summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/caam/caamalg_qi2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam/caamalg_qi2.c')
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c96
1 files changed, 46 insertions, 50 deletions
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 1652fa26cf96..06bf32c32cbd 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -140,7 +140,8 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq)
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents, dma_addr_t iv_dma, int ivsize,
- dma_addr_t qm_sg_dma, int qm_sg_bytes)
+ enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
+ int qm_sg_bytes)
{
if (dst != src) {
if (src_nents)
@@ -152,7 +153,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
if (qm_sg_bytes)
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
@@ -485,7 +486,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_nents, ivsize);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -500,7 +501,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(dev, iv_dma)) {
dev_err(dev, "unable to map IV\n");
caam_unmap(dev, req->src, req->dst, src_nents,
- dst_nents, 0, 0, 0, 0);
+ dst_nents, 0, 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -524,7 +525,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(dev, edesc->assoclen_dma)) {
dev_err(dev, "unable to map assoclen\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -546,7 +547,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(dev, "unable to map S/G table\n");
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1101,22 +1102,17 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
dst_sg_idx = qm_sg_ents;
/*
+ * Input, output HW S/G tables: [IV, src][dst, IV]
+ * IV entries point to the same buffer
+ * If src == dst, S/G entries are reused (S/G tables overlap)
+ *
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond
- * the end of the table by allocating more S/G entries. Logic:
- * if (src != dst && output S/G)
- * pad output S/G, if needed
- * else if (src == dst && S/G)
- * overlapping S/Gs; pad one of them
- * else if (input S/G) ...
- * pad input S/G, if needed
+ * the end of the table by allocating more S/G entries.
*/
- if (mapped_dst_nents > 1)
- qm_sg_ents += pad_sg_nents(mapped_dst_nents);
- else if ((req->src == req->dst) && (mapped_src_nents > 1))
- qm_sg_ents = max(pad_sg_nents(qm_sg_ents),
- 1 + pad_sg_nents(mapped_src_nents));
+ if (req->src != req->dst)
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
else
- qm_sg_ents = pad_sg_nents(qm_sg_ents);
+ qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
@@ -1124,7 +1120,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1133,7 +1129,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
if (unlikely(!edesc)) {
dev_err(dev, "could not allocate extended descriptor\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1142,11 +1138,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
iv = (u8 *)(sg_table + qm_sg_ents);
memcpy(iv, req->iv, ivsize);
- iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, iv_dma)) {
dev_err(dev, "unable to map IV\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1157,18 +1153,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
edesc->qm_sg_bytes = qm_sg_bytes;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->src, req->cryptlen, sg_table + 1, 0);
+ sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
- if (mapped_dst_nents > 1)
- sg_to_qm_sg_last(req->dst, req->cryptlen, sg_table +
- dst_sg_idx, 0);
+ if (req->src != req->dst)
+ sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
+
+ dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
+ ivsize, 0);
edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
dev_err(dev, "unable to map S/G table\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1176,23 +1174,19 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
- dpaa2_fl_set_len(out_fle, req->cryptlen);
+ dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
- if (req->src == req->dst) {
- dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+
+ if (req->src == req->dst)
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
sizeof(*sg_table));
- } else if (mapped_dst_nents > 1) {
- dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ else
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
sizeof(*sg_table));
- } else {
- dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
- }
return edesc;
}
@@ -1204,7 +1198,8 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
int ivsize = crypto_aead_ivsize(aead);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
}
@@ -1215,7 +1210,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
}
static void aead_encrypt_done(void *cbk_ctx, u32 status)
@@ -1372,10 +1368,10 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
/*
* The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block. This is used e.g. by the CTS mode.
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
*/
- scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
- ivsize, 0);
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
@@ -1407,6 +1403,14 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
skcipher_unmap(ctx->dev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
+ */
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
+
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
}
@@ -1445,7 +1449,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct caam_request *caam_req = skcipher_request_ctx(req);
- int ivsize = crypto_skcipher_ivsize(skcipher);
int ret;
/* allocate extended descriptor */
@@ -1453,13 +1456,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- /*
- * The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block.
- */
- scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
- ivsize, 0);
-
caam_req->flc = &ctx->flc[DECRYPT];
caam_req->flc_dma = ctx->flc_dma[DECRYPT];
caam_req->cbk = skcipher_decrypt_done;