summaryrefslogtreecommitdiffstats
path: root/drivers/staging/ccree
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/ccree')
-rw-r--r--drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt27
-rw-r--r--drivers/staging/ccree/Kconfig2
-rw-r--r--drivers/staging/ccree/Makefile8
-rw-r--r--drivers/staging/ccree/TODO22
-rw-r--r--drivers/staging/ccree/cc_aead.c (renamed from drivers/staging/ccree/ssi_aead.c)1119
-rw-r--r--drivers/staging/ccree/cc_aead.h (renamed from drivers/staging/ccree/ssi_aead.h)56
-rw-r--r--drivers/staging/ccree/cc_buffer_mgr.c (renamed from drivers/staging/ccree/ssi_buffer_mgr.c)1085
-rw-r--r--drivers/staging/ccree/cc_buffer_mgr.h74
-rw-r--r--drivers/staging/ccree/cc_cipher.c (renamed from drivers/staging/ccree/ssi_cipher.c)715
-rw-r--r--drivers/staging/ccree/cc_cipher.h74
-rw-r--r--drivers/staging/ccree/cc_crypto_ctx.h38
-rw-r--r--drivers/staging/ccree/cc_debugfs.c101
-rw-r--r--drivers/staging/ccree/cc_debugfs.h32
-rw-r--r--drivers/staging/ccree/cc_driver.c (renamed from drivers/staging/ccree/ssi_driver.c)346
-rw-r--r--drivers/staging/ccree/cc_driver.h194
-rw-r--r--drivers/staging/ccree/cc_fips.c (renamed from drivers/staging/ccree/ssi_fips.c)45
-rw-r--r--drivers/staging/ccree/cc_fips.h37
-rw-r--r--drivers/staging/ccree/cc_hash.c (renamed from drivers/staging/ccree/ssi_hash.c)1817
-rw-r--r--drivers/staging/ccree/cc_hash.h114
-rw-r--r--drivers/staging/ccree/cc_host_regs.h142
-rw-r--r--drivers/staging/ccree/cc_hw_queue_defs.h40
-rw-r--r--drivers/staging/ccree/cc_ivgen.c (renamed from drivers/staging/ccree/ssi_ivgen.c)139
-rw-r--r--drivers/staging/ccree/cc_ivgen.h55
-rw-r--r--drivers/staging/ccree/cc_kernel_regs.h167
-rw-r--r--drivers/staging/ccree/cc_lli_defs.h19
-rw-r--r--drivers/staging/ccree/cc_pm.c122
-rw-r--r--drivers/staging/ccree/cc_pm.h57
-rw-r--r--drivers/staging/ccree/cc_request_mgr.c713
-rw-r--r--drivers/staging/ccree/cc_request_mgr.h51
-rw-r--r--drivers/staging/ccree/cc_sram_mgr.c (renamed from drivers/staging/ccree/ssi_sram_mgr.c)70
-rw-r--r--drivers/staging/ccree/cc_sram_mgr.h65
-rw-r--r--drivers/staging/ccree/dx_crys_kernel.h180
-rw-r--r--drivers/staging/ccree/dx_host.h155
-rw-r--r--drivers/staging/ccree/dx_reg_common.h26
-rw-r--r--drivers/staging/ccree/hash_defs.h36
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.h91
-rw-r--r--drivers/staging/ccree/ssi_cipher.h84
-rw-r--r--drivers/staging/ccree/ssi_config.h36
-rw-r--r--drivers/staging/ccree/ssi_driver.h206
-rw-r--r--drivers/staging/ccree/ssi_fips.h49
-rw-r--r--drivers/staging/ccree/ssi_hash.h103
-rw-r--r--drivers/staging/ccree/ssi_ivgen.h71
-rw-r--r--drivers/staging/ccree/ssi_pm.c145
-rw-r--r--drivers/staging/ccree/ssi_pm.h43
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.c610
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.h60
-rw-r--r--drivers/staging/ccree/ssi_sram_mgr.h79
-rw-r--r--drivers/staging/ccree/ssi_sysfs.c172
-rw-r--r--drivers/staging/ccree/ssi_sysfs.h55
49 files changed, 4336 insertions, 5411 deletions
diff --git a/drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt b/drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
deleted file mode 100644
index 2ea65175c032..000000000000
--- a/drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Arm TrustZone CryptoCell cryptographic accelerators
-
-Required properties:
-- compatible: must be "arm,cryptocell-712-ree".
-- reg: shall contain base register location and length.
- Typically length is 0x10000.
-- interrupts: shall contain the interrupt for the device.
-
-Optional properties:
-- interrupt-parent: can designate the interrupt controller the
- device interrupt is connected to, if needed.
-- clocks: may contain the clock handling the device, if needed.
-- power-domains: may contain a reference to the PM domain, if applicable.
-
-
-Examples:
-
-Zynq FPGA device
-----------------
-
- arm_cc7x: arm_cc7x@80000000 {
- compatible = "arm,cryptocell-712-ree";
- interrupt-parent = <&intc>;
- interrupts = < 0 30 4 >;
- reg = < 0x80000000 0x10000 >;
- };
-
diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig
index 0b3092ba2fcb..c94dfe8adb63 100644
--- a/drivers/staging/ccree/Kconfig
+++ b/drivers/staging/ccree/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
config CRYPTO_DEV_CCREE
tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
diff --git a/drivers/staging/ccree/Makefile b/drivers/staging/ccree/Makefile
index ae702f3b5369..bdc27970f95f 100644
--- a/drivers/staging/ccree/Makefile
+++ b/drivers/staging/ccree/Makefile
@@ -1,3 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
-ccree-y := ssi_driver.o ssi_sysfs.o ssi_buffer_mgr.o ssi_request_mgr.o ssi_cipher.o ssi_hash.o ssi_aead.o ssi_ivgen.o ssi_sram_mgr.o ssi_pm.o
-ccree-$(CONFIG_CRYPTO_FIPS) += ssi_fips.o
+ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
+ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
+ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
+ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/staging/ccree/TODO b/drivers/staging/ccree/TODO
index c9f5754d062d..b8e163d98f91 100644
--- a/drivers/staging/ccree/TODO
+++ b/drivers/staging/ccree/TODO
@@ -6,25 +6,5 @@
* *
*************************************************************************
-ccree specific items
-a.k.a stuff fixing for this driver to move out of staging
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+1. ???
-1. Move to using Crypto Engine to handle backlog queueing.
-2. Remove synchronous algorithm support leftovers.
-3. Separate platform specific code for FIPS and power management into separate platform modules.
-4. Drop legacy kernel support code.
-5. Move most (all?) #ifdef CONFIG into inline functions.
-6. Remove all unused definitions.
-7. Re-factor to accomediate newer/older HW revisions besides the 712.
-8. Handle the many checkpatch errors.
-9. Implement ahash import/export correctly.
-10. Go through a proper review of DT bindings and sysfs ABI
-11. Sort out FIPS mode: bake tests into testmgr, sort out behaviour on error,
- figure if 3DES weak key check is needed
-
-Kernel infrastructure items
-a.k.a stuff we either neither need to fix in the kernel or understand what we're doing wrong
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-1. ahash import/export context has a PAGE_SIZE/8 size limit. We need more.
-2. Crypto Engine seems to be built for HW with hardware queue depth of 1, we have 600++.
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/cc_aead.c
index ba0954e4d2e5..b58413172231 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/cc_aead.c
@@ -1,41 +1,19 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <crypto/algapi.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
-#include <crypto/sha.h>
-#include <crypto/ctr.h>
#include <crypto/authenc.h>
-#include <crypto/aes.h>
#include <crypto/des.h>
#include <linux/rtnetlink.h>
-#include <linux/version.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_aead.h"
-#include "ssi_request_mgr.h"
-#include "ssi_hash.h"
-#include "ssi_sysfs.h"
-#include "ssi_sram_mgr.h"
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_aead.h"
+#include "cc_request_mgr.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
#define template_aead template_u.aead
@@ -51,8 +29,8 @@
/* Value of each ICV_CMP byte (of 8) in case of success */
#define ICV_VERIF_OK 0x01
-struct ssi_aead_handle {
- ssi_sram_addr_t sram_workspace_addr;
+struct cc_aead_handle {
+ cc_sram_addr_t sram_workspace_addr;
struct list_head aead_list;
};
@@ -68,8 +46,8 @@ struct cc_xcbc_s {
dma_addr_t xcbc_keys_dma_addr;
};
-struct ssi_aead_ctx {
- struct ssi_drvdata *drvdata;
+struct cc_aead_ctx {
+ struct cc_drvdata *drvdata;
u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
u8 *enckey;
dma_addr_t enckey_dma_addr;
@@ -90,9 +68,9 @@ static inline bool valid_assoclen(struct aead_request *req)
return ((req->assoclen == 16) || (req->assoclen == 20));
}
-static void ssi_aead_exit(struct crypto_aead *tfm)
+static void cc_aead_exit(struct crypto_aead *tfm)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
@@ -100,7 +78,8 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
/* Unmap enckey buffer */
if (ctx->enckey) {
- dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
+ dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
+ ctx->enckey_dma_addr);
dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
&ctx->enckey_dma_addr);
ctx->enckey_dma_addr = 0;
@@ -143,22 +122,22 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
}
}
-static int ssi_aead_init(struct crypto_aead *tfm)
+static int cc_aead_init(struct crypto_aead *tfm)
{
struct aead_alg *alg = crypto_aead_alg(tfm);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct ssi_crypto_alg *ssi_alg =
- container_of(alg, struct ssi_crypto_alg, aead_alg);
- struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_crypto_alg *cc_alg =
+ container_of(alg, struct cc_crypto_alg, aead_alg);
+ struct device *dev = drvdata_to_dev(cc_alg->drvdata);
dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
crypto_tfm_alg_name(&tfm->base));
/* Initialize modes in instance */
- ctx->cipher_mode = ssi_alg->cipher_mode;
- ctx->flow_mode = ssi_alg->flow_mode;
- ctx->auth_mode = ssi_alg->auth_mode;
- ctx->drvdata = ssi_alg->drvdata;
+ ctx->cipher_mode = cc_alg->cipher_mode;
+ ctx->flow_mode = cc_alg->flow_mode;
+ ctx->auth_mode = cc_alg->auth_mode;
+ ctx->drvdata = cc_alg->drvdata;
crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
/* Allocate key buffer, cache line aligned */
@@ -221,23 +200,25 @@ static int ssi_aead_init(struct crypto_aead *tfm)
return 0;
init_failed:
- ssi_aead_exit(tfm);
+ cc_aead_exit(tfm);
return -ENOMEM;
}
-static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+static void cc_aead_complete(struct device *dev, void *cc_req, int err)
{
- struct aead_request *areq = (struct aead_request *)ssi_req;
+ struct aead_request *areq = (struct aead_request *)cc_req;
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
- struct crypto_aead *tfm = crypto_aead_reqtfm(ssi_req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- int err = 0;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- ssi_buffer_mgr_unmap_aead_request(dev, areq);
+ cc_unmap_aead_request(dev, areq);
/* Restore ordinary iv pointer */
areq->iv = areq_ctx->backup_iv;
+ if (err)
+ goto done;
+
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
ctx->authsize) != 0) {
@@ -246,36 +227,43 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
/* In case of payload authentication failure, MUST NOT
* revealed the decrypted message --> zero its memory.
*/
- ssi_buffer_mgr_zero_sgl(areq->dst, areq_ctx->cryptlen);
+ cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
err = -EBADMSG;
}
} else { /*ENCRYPT*/
- if (unlikely(areq_ctx->is_icv_fragmented))
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
- areq->cryptlen + areq_ctx->dst_offset,
- (areq->cryptlen + areq_ctx->dst_offset +
- ctx->authsize),
- SSI_SG_FROM_BUF);
-
- /* If an IV was generated, copy it back to the user provided buffer. */
+ if (areq_ctx->is_icv_fragmented) {
+ u32 skip = areq->cryptlen + areq_ctx->dst_offset;
+
+ cc_copy_sg_portion(dev, areq_ctx->mac_buf,
+ areq_ctx->dst_sgl, skip,
+ (skip + ctx->authsize),
+ CC_SG_FROM_BUF);
+ }
+
+ /* If an IV was generated, copy it back to the user provided
+ * buffer.
+ */
if (areq_ctx->backup_giv) {
if (ctx->cipher_mode == DRV_CIPHER_CTR)
- memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_IV_SIZE);
+ memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
+ CTR_RFC3686_NONCE_SIZE,
+ CTR_RFC3686_IV_SIZE);
else if (ctx->cipher_mode == DRV_CIPHER_CCM)
- memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
+ memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
+ CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
}
}
-
+done:
aead_request_complete(areq, err);
}
-static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
+static int xcbc_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
{
/* Load the AES key */
hw_desc_init(&desc[0]);
- /* We are using for the source/user key the same buffer as for the output keys,
- * because after this key loading it is not needed anymore
+ /* We are using for the source/user key the same buffer
+ * as for the output keys, * because after this key loading it
+ * is not needed anymore
*/
set_din_type(&desc[0], DMA_DLLI,
ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
@@ -309,7 +297,7 @@ static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
return 4;
}
-static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
+static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
{
unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
unsigned int digest_ofs = 0;
@@ -328,8 +316,8 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
set_din_sram(&desc[idx],
- ssi_ahash_get_larval_digest_sram_addr(
- ctx->drvdata, ctx->auth_mode),
+ cc_larval_digest_addr(ctx->drvdata,
+ ctx->auth_mode),
digest_size);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
@@ -378,7 +366,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
return idx;
}
-static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
+static int validate_keys_sizes(struct cc_aead_ctx *ctx)
{
struct device *dev = drvdata_to_dev(ctx->drvdata);
@@ -390,9 +378,9 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
case DRV_HASH_SHA256:
break;
case DRV_HASH_XCBC_MAC:
- if ((ctx->auth_keylen != AES_KEYSIZE_128) &&
- (ctx->auth_keylen != AES_KEYSIZE_192) &&
- (ctx->auth_keylen != AES_KEYSIZE_256))
+ if (ctx->auth_keylen != AES_KEYSIZE_128 &&
+ ctx->auth_keylen != AES_KEYSIZE_192 &&
+ ctx->auth_keylen != AES_KEYSIZE_256)
return -ENOTSUPP;
break;
case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
@@ -404,16 +392,16 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
return -EINVAL;
}
/* Check cipher key size */
- if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
+ if (ctx->flow_mode == S_DIN_to_DES) {
if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
ctx->enc_keylen);
return -EINVAL;
}
} else { /* Default assumed to be AES ciphers */
- if ((ctx->enc_keylen != AES_KEYSIZE_128) &&
- (ctx->enc_keylen != AES_KEYSIZE_192) &&
- (ctx->enc_keylen != AES_KEYSIZE_256)) {
+ if (ctx->enc_keylen != AES_KEYSIZE_128 &&
+ ctx->enc_keylen != AES_KEYSIZE_192 &&
+ ctx->enc_keylen != AES_KEYSIZE_256) {
dev_err(dev, "Invalid cipher(AES) key size: %u\n",
ctx->enc_keylen);
return -EINVAL;
@@ -427,14 +415,14 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
* (copy to intenral buffer or hash in case of key longer than block
*/
static int
-ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
{
dma_addr_t key_dma_addr = 0;
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- u32 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
- ctx->drvdata, ctx->auth_mode);
- struct ssi_crypto_req ssi_req = {};
+ u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
+ struct cc_crypto_req cc_req = {};
unsigned int blocksize;
unsigned int digestsize;
unsigned int hashmode;
@@ -457,9 +445,10 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
hashmode = DRV_HASH_HW_SHA256;
}
- if (likely(keylen != 0)) {
- key_dma_addr = dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, key_dma_addr))) {
+ if (keylen != 0) {
+ key_dma_addr = dma_map_single(dev, (void *)key, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
return -ENOMEM;
@@ -537,22 +526,22 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
idx++;
}
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
- if (unlikely(rc != 0))
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+ if (rc)
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- if (likely(key_dma_addr != 0))
+ if (key_dma_addr)
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
return rc;
}
static int
-ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct rtattr *rta = (struct rtattr *)key;
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
struct crypto_authenc_key_param *param;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
int seq_len = 0, rc = -EINVAL;
@@ -586,8 +575,9 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
/* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV
*/
- memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + ctx->enc_keylen -
- CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
+ memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
+ ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
+ CTR_RFC3686_NONCE_SIZE);
/* Set CTR key size */
ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
}
@@ -597,7 +587,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
}
rc = validate_keys_sizes(ctx);
- if (unlikely(rc != 0))
+ if (rc)
goto badkey;
/* STAT_PHASE_1: Copy key to ctx */
@@ -609,8 +599,8 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
- rc = ssi_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
- if (rc != 0)
+ rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+ if (rc)
goto badkey;
}
@@ -635,8 +625,8 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
/* STAT_PHASE_3: Submit sequence to HW */
if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
- rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
- if (unlikely(rc != 0)) {
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
+ if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto setkey_error;
}
@@ -652,10 +642,10 @@ setkey_error:
return rc;
}
-#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
if (keylen < 3)
return -EINVAL;
@@ -663,20 +653,18 @@ static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
keylen -= 3;
memcpy(ctx->ctr_nonce, key + keylen, 3);
- return ssi_aead_setkey(tfm, key, keylen);
+ return cc_aead_setkey(tfm, key, keylen);
}
-#endif /*SSI_CC_HAS_AES_CCM*/
-static int ssi_aead_setauthsize(
- struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_aead_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
struct device *dev = drvdata_to_dev(ctx->drvdata);
/* Unsupported auth. sizes */
- if ((authsize == 0) ||
- (authsize > crypto_aead_maxauthsize(authenc))) {
+ if (authsize == 0 ||
+ authsize > crypto_aead_maxauthsize(authenc)) {
return -ENOTSUPP;
}
@@ -686,9 +674,8 @@ static int ssi_aead_setauthsize(
return 0;
}
-#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
switch (authsize) {
case 8:
@@ -699,11 +686,11 @@ static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
return -EINVAL;
}
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
-static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
switch (authsize) {
case 4:
@@ -718,46 +705,41 @@ static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
return -EINVAL;
}
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
-#endif /*SSI_CC_HAS_AES_CCM*/
-
-static inline void
-ssi_aead_create_assoc_desc(
- struct aead_request *areq,
- unsigned int flow_mode,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+
+static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
- enum ssi_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
+ enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
unsigned int idx = *seq_size;
struct device *dev = drvdata_to_dev(ctx->drvdata);
switch (assoc_dma_type) {
- case SSI_DMA_BUF_DLLI:
+ case CC_DMA_BUF_DLLI:
dev_dbg(dev, "ASSOC buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
- areq->assoclen, NS_BIT); set_flow_mode(&desc[idx],
- flow_mode);
- if ((ctx->auth_mode == DRV_HASH_XCBC_MAC) &&
- (areq_ctx->cryptlen > 0))
+ areq->assoclen, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+ areq_ctx->cryptlen > 0)
set_din_not_last_indication(&desc[idx]);
break;
- case SSI_DMA_BUF_MLLI:
+ case CC_DMA_BUF_MLLI:
dev_dbg(dev, "ASSOC buffer type MLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
areq_ctx->assoc.mlli_nents, NS_BIT);
set_flow_mode(&desc[idx], flow_mode);
- if ((ctx->auth_mode == DRV_HASH_XCBC_MAC) &&
- (areq_ctx->cryptlen > 0))
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+ areq_ctx->cryptlen > 0)
set_din_not_last_indication(&desc[idx]);
break;
- case SSI_DMA_BUF_NULL:
+ case CC_DMA_BUF_NULL:
default:
dev_err(dev, "Invalid ASSOC buffer type\n");
}
@@ -765,23 +747,20 @@ ssi_aead_create_assoc_desc(
*seq_size = (++idx);
}
-static inline void
-ssi_aead_process_authenc_data_desc(
- struct aead_request *areq,
- unsigned int flow_mode,
- struct cc_hw_desc desc[],
- unsigned int *seq_size,
- int direct)
+static void cc_proc_authen_desc(struct aead_request *areq,
+ unsigned int flow_mode,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size, int direct)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
- enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
unsigned int idx = *seq_size;
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
switch (data_dma_type) {
- case SSI_DMA_BUF_DLLI:
+ case CC_DMA_BUF_DLLI:
{
struct scatterlist *cipher =
(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
@@ -798,16 +777,16 @@ ssi_aead_process_authenc_data_desc(
set_flow_mode(&desc[idx], flow_mode);
break;
}
- case SSI_DMA_BUF_MLLI:
+ case CC_DMA_BUF_MLLI:
{
/* DOUBLE-PASS flow (as default)
* assoc. + iv + data -compact in one table
* if assoclen is ZERO only IV perform
*/
- ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
+ cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
u32 mlli_nents = areq_ctx->assoc.mlli_nents;
- if (likely(areq_ctx->is_single_pass)) {
+ if (areq_ctx->is_single_pass) {
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
mlli_addr = areq_ctx->dst.sram_addr;
mlli_nents = areq_ctx->dst.mlli_nents;
@@ -824,7 +803,7 @@ ssi_aead_process_authenc_data_desc(
set_flow_mode(&desc[idx], flow_mode);
break;
}
- case SSI_DMA_BUF_NULL:
+ case CC_DMA_BUF_NULL:
default:
dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
}
@@ -832,37 +811,36 @@ ssi_aead_process_authenc_data_desc(
*seq_size = (++idx);
}
-static inline void
-ssi_aead_process_cipher_data_desc(
- struct aead_request *areq,
- unsigned int flow_mode,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_cipher_desc(struct aead_request *areq,
+ unsigned int flow_mode,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
unsigned int idx = *seq_size;
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
- enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
if (areq_ctx->cryptlen == 0)
return; /*null processing*/
switch (data_dma_type) {
- case SSI_DMA_BUF_DLLI:
+ case CC_DMA_BUF_DLLI:
dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
(sg_dma_address(areq_ctx->src_sgl) +
- areq_ctx->src_offset), areq_ctx->cryptlen, NS_BIT);
+ areq_ctx->src_offset), areq_ctx->cryptlen,
+ NS_BIT);
set_dout_dlli(&desc[idx],
(sg_dma_address(areq_ctx->dst_sgl) +
areq_ctx->dst_offset),
areq_ctx->cryptlen, NS_BIT, 0);
set_flow_mode(&desc[idx], flow_mode);
break;
- case SSI_DMA_BUF_MLLI:
+ case CC_DMA_BUF_MLLI:
dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
@@ -871,7 +849,7 @@ ssi_aead_process_cipher_data_desc(
areq_ctx->dst.mlli_nents, NS_BIT, 0);
set_flow_mode(&desc[idx], flow_mode);
break;
- case SSI_DMA_BUF_NULL:
+ case CC_DMA_BUF_NULL:
default:
dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
}
@@ -879,13 +857,12 @@ ssi_aead_process_cipher_data_desc(
*seq_size = (++idx);
}
-static inline void ssi_aead_process_digest_result_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_digest_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
@@ -930,13 +907,12 @@ static inline void ssi_aead_process_digest_result_desc(
*seq_size = (++idx);
}
-static inline void ssi_aead_setup_cipher_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_cipher_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = req_ctx->hw_iv_size;
unsigned int idx = *seq_size;
@@ -976,11 +952,8 @@ static inline void ssi_aead_setup_cipher_desc(
*seq_size = idx;
}
-static inline void ssi_aead_process_cipher(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size,
- unsigned int data_flow_mode)
+static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size, unsigned int data_flow_mode)
{
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
int direct = req_ctx->gen_ctx.op_type;
@@ -989,8 +962,8 @@ static inline void ssi_aead_process_cipher(
if (req_ctx->cryptlen == 0)
return; /*null processing*/
- ssi_aead_setup_cipher_desc(req, desc, &idx);
- ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, &idx);
+ cc_set_cipher_desc(req, desc, &idx);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
/* We must wait for DMA to write all cipher */
hw_desc_init(&desc[idx]);
@@ -1002,13 +975,11 @@ static inline void ssi_aead_process_cipher(
*seq_size = idx;
}
-static inline void ssi_aead_hmac_setup_digest_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
@@ -1028,10 +999,8 @@ static inline void ssi_aead_hmac_setup_digest_desc(
/* Load init. digest len (64 bytes) */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
- set_din_sram(&desc[idx],
- ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata,
- hash_mode),
- HASH_LEN_SIZE);
+ set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
+ HASH_LEN_SIZE);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -1039,13 +1008,11 @@ static inline void ssi_aead_hmac_setup_digest_desc(
*seq_size = idx;
}
-static inline void ssi_aead_xcbc_setup_digest_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int idx = *seq_size;
/* Loading MAC state */
@@ -1101,28 +1068,26 @@ static inline void ssi_aead_xcbc_setup_digest_desc(
*seq_size = idx;
}
-static inline void ssi_aead_process_digest_header_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_header_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
unsigned int idx = *seq_size;
/* Hash associated data */
if (req->assoclen > 0)
- ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
+ cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
/* Hash IV */
*seq_size = idx;
}
-static inline void ssi_aead_process_digest_scheme_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_scheme_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct ssi_aead_handle *aead_handle = ctx->drvdata->aead_handle;
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
@@ -1161,9 +1126,7 @@ static inline void ssi_aead_process_digest_scheme_desc(
/* Load init. digest len (64 bytes) */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
- set_din_sram(&desc[idx],
- ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata,
- hash_mode),
+ set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
HASH_LEN_SIZE);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
@@ -1180,20 +1143,17 @@ static inline void ssi_aead_process_digest_scheme_desc(
*seq_size = idx;
}
-static inline void ssi_aead_load_mlli_to_sram(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_mlli_to_sram(struct aead_request *req,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
{
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- if (unlikely(
- (req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
- (req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
- !req_ctx->is_single_pass)) {
+ if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
+ !req_ctx->is_single_pass) {
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
(unsigned int)ctx->drvdata->mlli_sram_addr,
req_ctx->mlli_params.mlli_len);
@@ -1210,54 +1170,52 @@ static inline void ssi_aead_load_mlli_to_sram(
}
}
-static inline enum cc_flow_mode ssi_aead_get_data_flow_mode(
- enum drv_crypto_direction direct,
- enum cc_flow_mode setup_flow_mode,
- bool is_single_pass)
+static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
+ enum cc_flow_mode setup_flow_mode,
+ bool is_single_pass)
{
enum cc_flow_mode data_flow_mode;
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
if (setup_flow_mode == S_DIN_to_AES)
- data_flow_mode = likely(is_single_pass) ?
+ data_flow_mode = is_single_pass ?
AES_to_HASH_and_DOUT : DIN_AES_DOUT;
else
- data_flow_mode = likely(is_single_pass) ?
+ data_flow_mode = is_single_pass ?
DES_to_HASH_and_DOUT : DIN_DES_DOUT;
} else { /* Decrypt */
if (setup_flow_mode == S_DIN_to_AES)
- data_flow_mode = likely(is_single_pass) ?
- AES_and_HASH : DIN_AES_DOUT;
+ data_flow_mode = is_single_pass ?
+ AES_and_HASH : DIN_AES_DOUT;
else
- data_flow_mode = likely(is_single_pass) ?
- DES_and_HASH : DIN_DES_DOUT;
+ data_flow_mode = is_single_pass ?
+ DES_and_HASH : DIN_DES_DOUT;
}
return data_flow_mode;
}
-static inline void ssi_aead_hmac_authenc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
int direct = req_ctx->gen_ctx.op_type;
- unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
- direct, ctx->flow_mode, req_ctx->is_single_pass);
+ unsigned int data_flow_mode =
+ cc_get_data_flow(direct, ctx->flow_mode,
+ req_ctx->is_single_pass);
if (req_ctx->is_single_pass) {
/**
* Single-pass flow
*/
- ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
- ssi_aead_setup_cipher_desc(req, desc, seq_size);
- ssi_aead_process_digest_header_desc(req, desc, seq_size);
- ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
- ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_set_cipher_desc(req, desc, seq_size);
+ cc_proc_header_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
return;
}
@@ -1268,49 +1226,48 @@ static inline void ssi_aead_hmac_authenc(
*/
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
/* encrypt first.. */
- ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
/* authenc after..*/
- ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
- ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
- ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
} else { /*DECRYPT*/
/* authenc first..*/
- ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
- ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
- ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_scheme_desc(req, desc, seq_size);
/* decrypt after.. */
- ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
/* read the digest result with setting the completion bit
* must be after the cipher operation
*/
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
}
}
-static inline void
-ssi_aead_xcbc_authenc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void
+cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
int direct = req_ctx->gen_ctx.op_type;
- unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
- direct, ctx->flow_mode, req_ctx->is_single_pass);
+ unsigned int data_flow_mode =
+ cc_get_data_flow(direct, ctx->flow_mode,
+ req_ctx->is_single_pass);
if (req_ctx->is_single_pass) {
/**
* Single-pass flow
*/
- ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
- ssi_aead_setup_cipher_desc(req, desc, seq_size);
- ssi_aead_process_digest_header_desc(req, desc, seq_size);
- ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_set_cipher_desc(req, desc, seq_size);
+ cc_proc_header_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
return;
}
@@ -1321,25 +1278,25 @@ ssi_aead_xcbc_authenc(
*/
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
/* encrypt first.. */
- ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
/* authenc after.. */
- ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
- ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_digest_desc(req, desc, seq_size);
} else { /*DECRYPT*/
/* authenc first.. */
- ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
- ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
/* decrypt after..*/
- ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
/* read the digest result with setting the completion bit
* must be after the cipher operation
*/
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
}
}
-static int validate_data_size(struct ssi_aead_ctx *ctx,
+static int validate_data_size(struct cc_aead_ctx *ctx,
enum drv_crypto_direction direct,
struct aead_request *req)
{
@@ -1349,16 +1306,16 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen;
- if (unlikely((direct == DRV_CRYPTO_DIRECTION_DECRYPT) &&
- (req->cryptlen < ctx->authsize)))
+ if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->cryptlen < ctx->authsize)
goto data_size_err;
areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
switch (ctx->flow_mode) {
case S_DIN_to_AES:
- if (unlikely((ctx->cipher_mode == DRV_CIPHER_CBC) &&
- !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
+ if (ctx->cipher_mode == DRV_CIPHER_CBC &&
+ !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
goto data_size_err;
if (ctx->cipher_mode == DRV_CIPHER_CCM)
break;
@@ -1371,15 +1328,15 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
if (!IS_ALIGNED(assoclen, sizeof(u32)))
areq_ctx->is_single_pass = false;
- if ((ctx->cipher_mode == DRV_CIPHER_CTR) &&
+ if (ctx->cipher_mode == DRV_CIPHER_CTR &&
!IS_ALIGNED(cipherlen, sizeof(u32)))
areq_ctx->is_single_pass = false;
break;
case S_DIN_to_DES:
- if (unlikely(!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE)))
+ if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
goto data_size_err;
- if (unlikely(!IS_ALIGNED(assoclen, DES_BLOCK_SIZE)))
+ if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
areq_ctx->is_single_pass = false;
break;
default:
@@ -1393,7 +1350,6 @@ data_size_err:
return -EINVAL;
}
-#if SSI_CC_HAS_AES_CCM
static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
{
unsigned int len = 0;
@@ -1438,13 +1394,11 @@ static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
return 0;
}
-static inline int ssi_aead_ccm(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
unsigned int cipher_flow_mode;
@@ -1508,7 +1462,7 @@ static inline int ssi_aead_ccm(
/* process assoc data */
if (req->assoclen > 0) {
- ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
+ cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
} else {
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
@@ -1519,8 +1473,8 @@ static inline int ssi_aead_ccm(
}
/* process the cipher */
- if (req_ctx->cryptlen != 0)
- ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, &idx);
+ if (req_ctx->cryptlen)
+ cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
/* Read temporal MAC */
hw_desc_init(&desc[idx]);
@@ -1565,12 +1519,14 @@ static inline int ssi_aead_ccm(
static int config_ccm_adata(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
//unsigned int size_of_a = 0, rem_a_size = 0;
unsigned int lp = req->iv[0];
- /* Note: The code assume that req->iv[0] already contains the value of L' of RFC3610 */
+ /* Note: The code assume that req->iv[0] already contains the value
+ * of L' of RFC3610
+ */
unsigned int l = lp + 1; /* This is L' of RFC 3610. */
unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */
u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
@@ -1601,7 +1557,7 @@ static int config_ccm_adata(struct aead_request *req)
*b0 |= 64; /* Enable bit 6 if Adata exists. */
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
- if (rc != 0) {
+ if (rc) {
dev_err(dev, "message len overflow detected");
return rc;
}
@@ -1619,33 +1575,35 @@ static int config_ccm_adata(struct aead_request *req)
return 0;
}
-static void ssi_rfc4309_ccm_process(struct aead_request *req)
+static void cc_proc_rfc4309_ccm(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
/* L' */
memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
- areq_ctx->ctr_iv[0] = 3; /* For RFC 4309, always use 4 bytes for message length (at most 2^32-1 bytes). */
+ /* For RFC 4309, always use 4 bytes for message length
+ * (at most 2^32-1 bytes).
+ */
+ areq_ctx->ctr_iv[0] = 3;
- /* In RFC 4309 there is an 11-bytes nonce+IV part, that we build here. */
- memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce, CCM_BLOCK_NONCE_SIZE);
- memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv, CCM_BLOCK_IV_SIZE);
+ /* In RFC 4309 there is an 11-bytes nonce+IV part,
+ * that we build here.
+ */
+ memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
+ CCM_BLOCK_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
+ CCM_BLOCK_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
req->assoclen -= CCM_BLOCK_IV_SIZE;
}
-#endif /*SSI_CC_HAS_AES_CCM*/
-
-#if SSI_CC_HAS_AES_GCM
-static inline void ssi_aead_gcm_setup_ghash_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_ghash_desc(struct aead_request *req,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
@@ -1703,7 +1661,9 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
- /* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */
+ /* Load GHASH initial STATE (which is 0). (for any hash there is an
+ * initial state)
+ */
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
set_dout_no_dma(&desc[idx], 0, 0, 1);
@@ -1717,13 +1677,11 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
*seq_size = idx;
}
-static inline void ssi_aead_gcm_setup_gctr_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
@@ -1738,7 +1696,7 @@ static inline void ssi_aead_gcm_setup_gctr_desc(
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
- if ((req_ctx->cryptlen != 0) && (!req_ctx->plaintext_authenticate_only)) {
+ if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
/* load AES/CTR initial CTR value inc by 2*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
@@ -1755,13 +1713,12 @@ static inline void ssi_aead_gcm_setup_gctr_desc(
*seq_size = idx;
}
-static inline void ssi_aead_process_gcm_result_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_gcm_result(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
dma_addr_t mac_result;
unsigned int idx = *seq_size;
@@ -1821,10 +1778,8 @@ static inline void ssi_aead_process_gcm_result_desc(
*seq_size = idx;
}
-static inline int ssi_aead_gcm(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int cipher_flow_mode;
@@ -1837,77 +1792,33 @@ static inline int ssi_aead_gcm(
//in RFC4543 no data to encrypt. just copy data from src to dest.
if (req_ctx->plaintext_authenticate_only) {
- ssi_aead_process_cipher_data_desc(req, BYPASS, desc, seq_size);
- ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
+ cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
- ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
- ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
- ssi_aead_process_gcm_result_desc(req, desc, seq_size);
+ cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+ cc_set_gctr_desc(req, desc, seq_size);
+ cc_proc_gcm_result(req, desc, seq_size);
return 0;
}
// for gcm and rfc4106.
- ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
+ cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
if (req->assoclen > 0)
- ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
- ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
+ cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+ cc_set_gctr_desc(req, desc, seq_size);
/* process(gctr+ghash) */
- if (req_ctx->cryptlen != 0)
- ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
- ssi_aead_process_gcm_result_desc(req, desc, seq_size);
+ if (req_ctx->cryptlen)
+ cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
+ cc_proc_gcm_result(req, desc, seq_size);
return 0;
}
-#ifdef CC_DEBUG
-static inline void ssi_aead_dump_gcm(
- const char *title,
- struct aead_request *req)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-
- if (ctx->cipher_mode != DRV_CIPHER_GCTR)
- return;
-
- if (title) {
- dev_dbg(dev, "----------------------------------------------------------------------------------");
- dev_dbg(dev, "%s\n", title);
- }
-
- dev_dbg(dev, "cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n",
- ctx->cipher_mode, ctx->authsize, ctx->enc_keylen,
- req->assoclen, req_ctx->cryptlen);
-
- if (ctx->enckey)
- dump_byte_array("mac key", ctx->enckey, 16);
-
- dump_byte_array("req->iv", req->iv, AES_BLOCK_SIZE);
-
- dump_byte_array("gcm_iv_inc1", req_ctx->gcm_iv_inc1, AES_BLOCK_SIZE);
-
- dump_byte_array("gcm_iv_inc2", req_ctx->gcm_iv_inc2, AES_BLOCK_SIZE);
-
- dump_byte_array("hkey", req_ctx->hkey, AES_BLOCK_SIZE);
-
- dump_byte_array("mac_buf", req_ctx->mac_buf, AES_BLOCK_SIZE);
-
- dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.len_a, AES_BLOCK_SIZE);
-
- if (req->src && req->cryptlen)
- dump_byte_array("req->src", sg_virt(req->src), req->cryptlen + req->assoclen);
-
- if (req->dst)
- dump_byte_array("req->dst", sg_virt(req->dst), req->cryptlen + ctx->authsize + req->assoclen);
-}
-#endif
-
static int config_gcm_context(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
@@ -1938,10 +1849,14 @@ static int config_gcm_context(struct aead_request *req)
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
- } else { //rfc4543=> all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
+ } else {
+ /* rfc4543=> all data(AAD,IV,Plain) are considered additional
+ * data that is nothing is encrypted.
+ */
__be64 temp64;
- temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
+ temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
+ cryptlen) * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1950,30 +1865,31 @@ static int config_gcm_context(struct aead_request *req)
return 0;
}
-static void ssi_rfc4_gcm_process(struct aead_request *req)
+static void cc_proc_rfc4_gcm(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET, ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
- memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv, GCM_BLOCK_RFC4_IV_SIZE);
+ memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
+ ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
+ GCM_BLOCK_RFC4_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}
-#endif /*SSI_CC_HAS_AES_GCM*/
-
-static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction direct)
+static int cc_proc_aead(struct aead_request *req,
+ enum drv_crypto_direction direct)
{
int rc = 0;
int seq_len = 0;
struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
@@ -1983,7 +1899,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
/* STAT_PHASE_0: Init and sanity checks */
/* Check data length according to mode */
- if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
+ if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
req->cryptlen, req->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
@@ -1991,8 +1907,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_aead_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_aead_complete;
+ cc_req.user_arg = (void *)req;
/* Setup request context */
areq_ctx->gen_ctx.op_type = direct;
@@ -2005,7 +1921,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
/* Build CTR IV - Copy nonce from last 4 bytes in
* CTR key to first 4 bytes in CTR IV
*/
- memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
+ CTR_RFC3686_NONCE_SIZE);
if (!areq_ctx->backup_giv) /*User none-generated IV*/
memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
req->iv, CTR_RFC3686_IV_SIZE);
@@ -2020,17 +1937,17 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
(ctx->cipher_mode == DRV_CIPHER_GCTR)) {
areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
if (areq_ctx->ctr_iv != req->iv) {
- memcpy(areq_ctx->ctr_iv, req->iv, crypto_aead_ivsize(tfm));
+ memcpy(areq_ctx->ctr_iv, req->iv,
+ crypto_aead_ivsize(tfm));
req->iv = areq_ctx->ctr_iv;
}
} else {
areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
}
-#if SSI_CC_HAS_AES_CCM
if (ctx->cipher_mode == DRV_CIPHER_CCM) {
rc = config_ccm_adata(req);
- if (unlikely(rc != 0)) {
+ if (rc) {
dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
rc);
goto exit;
@@ -2038,23 +1955,18 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
} else {
areq_ctx->ccm_hdr_size = ccm_header_size_null;
}
-#else
- areq_ctx->ccm_hdr_size = ccm_header_size_null;
-#endif /*SSI_CC_HAS_AES_CCM*/
-#if SSI_CC_HAS_AES_GCM
if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
rc = config_gcm_context(req);
- if (unlikely(rc != 0)) {
+ if (rc) {
dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
rc);
goto exit;
}
}
-#endif /*SSI_CC_HAS_AES_GCM*/
- rc = ssi_buffer_mgr_map_aead_request(ctx->drvdata, req);
- if (unlikely(rc != 0)) {
+ rc = cc_map_aead_request(ctx->drvdata, req);
+ if (rc) {
dev_err(dev, "map_request() failed\n");
goto exit;
}
@@ -2063,74 +1975,77 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
if (areq_ctx->backup_giv) {
/* set the DMA mapped IV address*/
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
- ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CTR_RFC3686_NONCE_SIZE;
- ssi_req.ivgen_dma_addr_len = 1;
+ cc_req.ivgen_dma_addr[0] =
+ areq_ctx->gen_ctx.iv_dma_addr +
+ CTR_RFC3686_NONCE_SIZE;
+ cc_req.ivgen_dma_addr_len = 1;
} else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
- /* In ccm, the IV needs to exist both inside B0 and inside the counter.
- * It is also copied to iv_dma_addr for other reasons (like returning
- * it to the user).
+ /* In ccm, the IV needs to exist both inside B0 and
+ * inside the counter.It is also copied to iv_dma_addr
+ * for other reasons (like returning it to the user).
* So, using 3 (identical) IV outputs.
*/
- ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CCM_BLOCK_IV_OFFSET;
- ssi_req.ivgen_dma_addr[1] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
- ssi_req.ivgen_dma_addr[2] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
- ssi_req.ivgen_dma_addr_len = 3;
+ cc_req.ivgen_dma_addr[0] =
+ areq_ctx->gen_ctx.iv_dma_addr +
+ CCM_BLOCK_IV_OFFSET;
+ cc_req.ivgen_dma_addr[1] =
+ sg_dma_address(&areq_ctx->ccm_adata_sg) +
+ CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
+ cc_req.ivgen_dma_addr[2] =
+ sg_dma_address(&areq_ctx->ccm_adata_sg) +
+ CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
+ cc_req.ivgen_dma_addr_len = 3;
} else {
- ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr;
- ssi_req.ivgen_dma_addr_len = 1;
+ cc_req.ivgen_dma_addr[0] =
+ areq_ctx->gen_ctx.iv_dma_addr;
+ cc_req.ivgen_dma_addr_len = 1;
}
/* set the IV size (8/16 B long)*/
- ssi_req.ivgen_size = crypto_aead_ivsize(tfm);
+ cc_req.ivgen_size = crypto_aead_ivsize(tfm);
}
/* STAT_PHASE_2: Create sequence */
/* Load MLLI tables to SRAM if necessary */
- ssi_aead_load_mlli_to_sram(req, desc, &seq_len);
+ cc_mlli_to_sram(req, desc, &seq_len);
/*TODO: move seq len by reference */
switch (ctx->auth_mode) {
case DRV_HASH_SHA1:
case DRV_HASH_SHA256:
- ssi_aead_hmac_authenc(req, desc, &seq_len);
+ cc_hmac_authenc(req, desc, &seq_len);
break;
case DRV_HASH_XCBC_MAC:
- ssi_aead_xcbc_authenc(req, desc, &seq_len);
+ cc_xcbc_authenc(req, desc, &seq_len);
break;
-#if (SSI_CC_HAS_AES_CCM || SSI_CC_HAS_AES_GCM)
case DRV_HASH_NULL:
-#if SSI_CC_HAS_AES_CCM
if (ctx->cipher_mode == DRV_CIPHER_CCM)
- ssi_aead_ccm(req, desc, &seq_len);
-#endif /*SSI_CC_HAS_AES_CCM*/
-#if SSI_CC_HAS_AES_GCM
+ cc_ccm(req, desc, &seq_len);
if (ctx->cipher_mode == DRV_CIPHER_GCTR)
- ssi_aead_gcm(req, desc, &seq_len);
-#endif /*SSI_CC_HAS_AES_GCM*/
- break;
-#endif
+ cc_gcm(req, desc, &seq_len);
+ break;
default:
dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
- ssi_buffer_mgr_unmap_aead_request(dev, req);
+ cc_unmap_aead_request(dev, req);
rc = -ENOTSUPP;
goto exit;
}
/* STAT_PHASE_3: Lock HW and push sequence */
- rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
- if (unlikely(rc != -EINPROGRESS)) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_aead_request(dev, req);
+ cc_unmap_aead_request(dev, req);
}
exit:
return rc;
}
-static int ssi_aead_encrypt(struct aead_request *req)
+static int cc_aead_encrypt(struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
@@ -2142,21 +2057,20 @@ static int ssi_aead_encrypt(struct aead_request *req)
areq_ctx->plaintext_authenticate_only = false;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
return rc;
}
-#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
+static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_encrypt() above. */
+ /* Very similar to cc_aead_encrypt() above. */
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
int rc = -EINVAL;
@@ -2170,17 +2084,16 @@ static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
- ssi_rfc4309_ccm_process(req);
+ cc_proc_rfc4309_ccm(req);
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
out:
return rc;
}
-#endif /* SSI_CC_HAS_AES_CCM */
-static int ssi_aead_decrypt(struct aead_request *req)
+static int cc_aead_decrypt(struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
@@ -2192,18 +2105,17 @@ static int ssi_aead_decrypt(struct aead_request *req)
areq_ctx->plaintext_authenticate_only = false;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
return rc;
}
-#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
+static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
@@ -2218,22 +2130,20 @@ static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
- ssi_rfc4309_ccm_process(req);
+ cc_proc_rfc4309_ccm(req);
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
out:
return rc;
}
-#endif /* SSI_CC_HAS_AES_CCM */
-
-#if SSI_CC_HAS_AES_GCM
-static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
@@ -2244,12 +2154,13 @@ static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
keylen -= 4;
memcpy(ctx->ctr_nonce, key + keylen, 4);
- return ssi_aead_setkey(tfm, key, keylen);
+ return cc_aead_setkey(tfm, key, keylen);
}
-static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
@@ -2260,11 +2171,11 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
keylen -= 4;
memcpy(ctx->ctr_nonce, key + keylen, 4);
- return ssi_aead_setkey(tfm, key, keylen);
+ return cc_aead_setkey(tfm, key, keylen);
}
-static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
switch (authsize) {
case 4:
@@ -2279,13 +2190,13 @@ static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
return -EINVAL;
}
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
-static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "authsize %d\n", authsize);
@@ -2299,13 +2210,13 @@ static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
return -EINVAL;
}
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
-static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "authsize %d\n", authsize);
@@ -2313,15 +2224,15 @@ static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
if (authsize != 16)
return -EINVAL;
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
-static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
+static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_encrypt() above. */
+ /* Very similar to cc_aead_encrypt() above. */
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
@@ -2337,19 +2248,19 @@ static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
areq_ctx->plaintext_authenticate_only = false;
- ssi_rfc4_gcm_process(req);
+ cc_proc_rfc4_gcm(req);
areq_ctx->is_gcm4543 = true;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
out:
return rc;
}
-static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
+static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_encrypt() above. */
+ /* Very similar to cc_aead_encrypt() above. */
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
@@ -2361,22 +2272,22 @@ static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
- ssi_rfc4_gcm_process(req);
+ cc_proc_rfc4_gcm(req);
areq_ctx->is_gcm4543 = true;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
return rc;
}
-static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
+static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_decrypt() above. */
+ /* Very similar to cc_aead_decrypt() above. */
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
@@ -2392,19 +2303,19 @@ static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
areq_ctx->plaintext_authenticate_only = false;
- ssi_rfc4_gcm_process(req);
+ cc_proc_rfc4_gcm(req);
areq_ctx->is_gcm4543 = true;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
out:
return rc;
}
-static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
+static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_decrypt() above. */
+ /* Very similar to cc_aead_decrypt() above. */
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
@@ -2416,31 +2327,30 @@ static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
- ssi_rfc4_gcm_process(req);
+ cc_proc_rfc4_gcm(req);
areq_ctx->is_gcm4543 = true;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
return rc;
}
-#endif /* SSI_CC_HAS_AES_GCM */
/* DX Block aead alg */
-static struct ssi_alg_template aead_algs[] = {
+static struct cc_alg_template aead_algs[] = {
{
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -2454,12 +2364,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = DES3_EDE_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -2473,12 +2383,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -2492,12 +2402,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = DES3_EDE_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -2511,12 +2421,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2530,12 +2440,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -2549,12 +2459,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -2568,12 +2478,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2581,19 +2491,18 @@ static struct ssi_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_XCBC_MAC,
},
-#if SSI_CC_HAS_AES_CCM
{
.name = "ccm(aes)",
.driver_name = "ccm-aes-dx",
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_ccm_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_ccm_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2607,12 +2516,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_rfc4309_ccm_setkey,
- .setauthsize = ssi_rfc4309_ccm_setauthsize,
- .encrypt = ssi_rfc4309_ccm_encrypt,
- .decrypt = ssi_rfc4309_ccm_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_rfc4309_ccm_setkey,
+ .setauthsize = cc_rfc4309_ccm_setauthsize,
+ .encrypt = cc_rfc4309_ccm_encrypt,
+ .decrypt = cc_rfc4309_ccm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = CCM_BLOCK_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2620,20 +2529,18 @@ static struct ssi_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
},
-#endif /*SSI_CC_HAS_AES_CCM*/
-#if SSI_CC_HAS_AES_GCM
{
.name = "gcm(aes)",
.driver_name = "gcm-aes-dx",
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_gcm_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_gcm_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = 12,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2647,12 +2554,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_rfc4106_gcm_setkey,
- .setauthsize = ssi_rfc4106_gcm_setauthsize,
- .encrypt = ssi_rfc4106_gcm_encrypt,
- .decrypt = ssi_rfc4106_gcm_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_rfc4106_gcm_setkey,
+ .setauthsize = cc_rfc4106_gcm_setauthsize,
+ .encrypt = cc_rfc4106_gcm_encrypt,
+ .decrypt = cc_rfc4106_gcm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2666,12 +2573,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_rfc4543_gcm_setkey,
- .setauthsize = ssi_rfc4543_gcm_setauthsize,
- .encrypt = ssi_rfc4543_gcm_encrypt,
- .decrypt = ssi_rfc4543_gcm_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_rfc4543_gcm_setkey,
+ .setauthsize = cc_rfc4543_gcm_setauthsize,
+ .encrypt = cc_rfc4543_gcm_encrypt,
+ .decrypt = cc_rfc4543_gcm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2679,52 +2586,51 @@ static struct ssi_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
},
-#endif /*SSI_CC_HAS_AES_GCM*/
};
-static struct ssi_crypto_alg *ssi_aead_create_alg(
- struct ssi_alg_template *template,
- struct device *dev)
+static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
+ struct device *dev)
{
- struct ssi_crypto_alg *t_alg;
+ struct cc_crypto_alg *t_alg;
struct aead_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
if (!t_alg)
return ERR_PTR(-ENOMEM);
- alg = &template->template_aead;
+ alg = &tmpl->template_aead;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
+ tmpl->driver_name);
alg->base.cra_module = THIS_MODULE;
- alg->base.cra_priority = SSI_CRA_PRIO;
+ alg->base.cra_priority = CC_CRA_PRIO;
- alg->base.cra_ctxsize = sizeof(struct ssi_aead_ctx);
+ alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- template->type;
- alg->init = ssi_aead_init;
- alg->exit = ssi_aead_exit;
+ tmpl->type;
+ alg->init = cc_aead_init;
+ alg->exit = cc_aead_exit;
t_alg->aead_alg = *alg;
- t_alg->cipher_mode = template->cipher_mode;
- t_alg->flow_mode = template->flow_mode;
- t_alg->auth_mode = template->auth_mode;
+ t_alg->cipher_mode = tmpl->cipher_mode;
+ t_alg->flow_mode = tmpl->flow_mode;
+ t_alg->auth_mode = tmpl->auth_mode;
return t_alg;
}
-int ssi_aead_free(struct ssi_drvdata *drvdata)
+int cc_aead_free(struct cc_drvdata *drvdata)
{
- struct ssi_crypto_alg *t_alg, *n;
- struct ssi_aead_handle *aead_handle =
- (struct ssi_aead_handle *)drvdata->aead_handle;
+ struct cc_crypto_alg *t_alg, *n;
+ struct cc_aead_handle *aead_handle =
+ (struct cc_aead_handle *)drvdata->aead_handle;
if (aead_handle) {
/* Remove registered algs */
- list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
+ list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
+ entry) {
crypto_unregister_aead(&t_alg->aead_alg);
list_del(&t_alg->entry);
kfree(t_alg);
@@ -2736,10 +2642,10 @@ int ssi_aead_free(struct ssi_drvdata *drvdata)
return 0;
}
-int ssi_aead_alloc(struct ssi_drvdata *drvdata)
+int cc_aead_alloc(struct cc_drvdata *drvdata)
{
- struct ssi_aead_handle *aead_handle;
- struct ssi_crypto_alg *t_alg;
+ struct cc_aead_handle *aead_handle;
+ struct cc_crypto_alg *t_alg;
int rc = -ENOMEM;
int alg;
struct device *dev = drvdata_to_dev(drvdata);
@@ -2753,8 +2659,9 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
INIT_LIST_HEAD(&aead_handle->aead_list);
drvdata->aead_handle = aead_handle;
- aead_handle->sram_workspace_addr = ssi_sram_mgr_alloc(
- drvdata, MAX_HMAC_DIGEST_SIZE);
+ aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
+ MAX_HMAC_DIGEST_SIZE);
+
if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
@@ -2763,7 +2670,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
/* Linux crypto */
for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
- t_alg = ssi_aead_create_alg(&aead_algs[alg], dev);
+ t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
dev_err(dev, "%s alg allocation failed\n",
@@ -2772,7 +2679,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
}
t_alg->drvdata = drvdata;
rc = crypto_register_aead(&t_alg->aead_alg);
- if (unlikely(rc != 0)) {
+ if (rc) {
dev_err(dev, "%s alg registration failed\n",
t_alg->aead_alg.base.cra_driver_name);
goto fail2;
@@ -2788,7 +2695,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
fail2:
kfree(t_alg);
fail1:
- ssi_aead_free(drvdata);
+ cc_aead_free(drvdata);
fail0:
return rc;
}
diff --git a/drivers/staging/ccree/ssi_aead.h b/drivers/staging/ccree/cc_aead.h
index e85bcd917e7b..5edf3b351fa4 100644
--- a/drivers/staging/ccree/ssi_aead.h
+++ b/drivers/staging/ccree/cc_aead.h
@@ -1,25 +1,12 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
-/* \file ssi_aead.h
+/* \file cc_aead.h
* ARM CryptoCell AEAD Crypto API
*/
-#ifndef __SSI_AEAD_H__
-#define __SSI_AEAD_H__
+#ifndef __CC_AEAD_H__
+#define __CC_AEAD_H__
#include <linux/kernel.h>
#include <crypto/algapi.h>
@@ -28,7 +15,7 @@
/* mac_cmp - HW writes 8 B but all bytes hold the same value */
#define ICV_CMP_SIZE 8
#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
-#define MAX_MAC_SIZE MAX(SHA256_DIGEST_SIZE, AES_BLOCK_SIZE)
+#define MAX_MAC_SIZE SHA256_DIGEST_SIZE
/* defines for AES GCM configuration buffer */
#define GCM_BLOCK_LEN_SIZE 8
@@ -74,32 +61,37 @@ struct aead_req_ctx {
} gcm_len_block;
u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
- unsigned int hw_iv_size ____cacheline_aligned; /*HW actual size input*/
- u8 backup_mac[MAX_MAC_SIZE]; /*used to prevent cache coherence problem*/
+ /* HW actual size input */
+ unsigned int hw_iv_size ____cacheline_aligned;
+ /* used to prevent cache coherence problem */
+ u8 backup_mac[MAX_MAC_SIZE];
u8 *backup_iv; /*store iv for generated IV flow*/
u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
- dma_addr_t ccm_iv0_dma_addr; /* buffer for internal ccm configurations */
+ /* buffer for internal ccm configurations */
+ dma_addr_t ccm_iv0_dma_addr;
dma_addr_t icv_dma_addr; /* Phys. address of ICV */
//used in gcm
- dma_addr_t gcm_iv_inc1_dma_addr; /* buffer for internal gcm configurations */
- dma_addr_t gcm_iv_inc2_dma_addr; /* buffer for internal gcm configurations */
+ /* buffer for internal gcm configurations */
+ dma_addr_t gcm_iv_inc1_dma_addr;
+ /* buffer for internal gcm configurations */
+ dma_addr_t gcm_iv_inc2_dma_addr;
dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
bool is_gcm4543;
u8 *icv_virt_addr; /* Virt. address of ICV */
struct async_gen_req_ctx gen_ctx;
- struct ssi_mlli assoc;
- struct ssi_mlli src;
- struct ssi_mlli dst;
+ struct cc_mlli assoc;
+ struct cc_mlli src;
+ struct cc_mlli dst;
struct scatterlist *src_sgl;
struct scatterlist *dst_sgl;
unsigned int src_offset;
unsigned int dst_offset;
- enum ssi_req_dma_buf_type assoc_buff_type;
- enum ssi_req_dma_buf_type data_buff_type;
+ enum cc_req_dma_buf_type assoc_buff_type;
+ enum cc_req_dma_buf_type data_buff_type;
struct mlli_params mlli_params;
unsigned int cryptlen;
struct scatterlist ccm_adata_sg;
@@ -111,7 +103,7 @@ struct aead_req_ctx {
bool plaintext_authenticate_only; //for gcm_rfc4543
};
-int ssi_aead_alloc(struct ssi_drvdata *drvdata);
-int ssi_aead_free(struct ssi_drvdata *drvdata);
+int cc_aead_alloc(struct cc_drvdata *drvdata);
+int cc_aead_free(struct cc_drvdata *drvdata);
-#endif /*__SSI_AEAD_H__*/
+#endif /*__CC_AEAD_H__*/
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/cc_buffer_mgr.c
index 1f8a225530a8..14b2eabbf70a 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/cc_buffer_mgr.c
@@ -1,42 +1,17 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
-#include <linux/crypto.h>
-#include <linux/version.h>
-#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
-#include <crypto/hash.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include "ssi_buffer_mgr.h"
+#include "cc_buffer_mgr.h"
#include "cc_lli_defs.h"
-#include "ssi_cipher.h"
-#include "ssi_hash.h"
-#include "ssi_aead.h"
-
-#define GET_DMA_BUFFER_TYPE(buff_type) ( \
- ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
- ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
- ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
+#include "cc_cipher.h"
+#include "cc_hash.h"
+#include "cc_aead.h"
enum dma_buffer_type {
DMA_NULL_TYPE = -1,
@@ -64,25 +39,62 @@ struct buffer_array {
u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
};
+static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
+{
+ switch (type) {
+ case CC_DMA_BUF_NULL:
+ return "BUF_NULL";
+ case CC_DMA_BUF_DLLI:
+ return "BUF_DLLI";
+ case CC_DMA_BUF_MLLI:
+ return "BUF_MLLI";
+ default:
+ return "BUF_INVALID";
+ }
+}
+
/**
- * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
+ * cc_copy_mac() - Copy MAC to temporary location
+ *
+ * @dev: device object
+ * @req: aead request object
+ * @dir: [IN] copy from/to sgl
+ */
+static void cc_copy_mac(struct device *dev, struct aead_request *req,
+ enum cc_sg_cpy_direct dir)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ u32 skip = req->assoclen + req->cryptlen;
+
+ if (areq_ctx->is_gcm4543)
+ skip += crypto_aead_ivsize(tfm);
+
+ cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
+ (skip - areq_ctx->req_authsize), skip, dir);
+}
+
+/**
+ * cc_get_sgl_nents() - Get scatterlist number of entries.
*
* @sg_list: SG list
* @nbytes: [IN] Total SGL data bytes.
* @lbytes: [OUT] Returns the amount of bytes at the last entry
*/
-static unsigned int ssi_buffer_mgr_get_sgl_nents(
- struct device *dev, struct scatterlist *sg_list,
- unsigned int nbytes, u32 *lbytes, bool *is_chained)
+static unsigned int cc_get_sgl_nents(struct device *dev,
+ struct scatterlist *sg_list,
+ unsigned int nbytes, u32 *lbytes,
+ bool *is_chained)
{
unsigned int nents = 0;
- while (nbytes != 0) {
- if (sg_list->length != 0) {
+ while (nbytes && sg_list) {
+ if (sg_list->length) {
nents++;
/* get the number of bytes in the last entry */
*lbytes = nbytes;
- nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length;
+ nbytes -= (sg_list->length > nbytes) ?
+ nbytes : sg_list->length;
sg_list = sg_next(sg_list);
} else {
sg_list = (struct scatterlist *)sg_page(sg_list);
@@ -95,11 +107,11 @@ static unsigned int ssi_buffer_mgr_get_sgl_nents(
}
/**
- * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
+ * cc_zero_sgl() - Zero scatter scatter list data.
*
* @sgl:
*/
-void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
+void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
{
struct scatterlist *current_sg = sgl;
int sg_index = 0;
@@ -116,7 +128,7 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
}
/**
- * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
+ * cc_copy_sg_portion() - Copy scatter list data,
* from to_skip to end, to dest and vice versa
*
* @dest:
@@ -125,21 +137,19 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
* @end:
* @direct:
*/
-void ssi_buffer_mgr_copy_scatterlist_portion(
- struct device *dev, u8 *dest,
- struct scatterlist *sg, u32 to_skip,
- u32 end, enum ssi_sg_cpy_direct direct)
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+ u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
{
u32 nents, lbytes;
- nents = ssi_buffer_mgr_get_sgl_nents(dev, sg, end, &lbytes, NULL);
+ nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
- (direct == SSI_SG_TO_BUF));
+ (direct == CC_SG_TO_BUF));
}
-static inline int ssi_buffer_mgr_render_buff_to_mlli(
- struct device *dev, dma_addr_t buff_dma, u32 buff_size,
- u32 *curr_nents, u32 **mlli_entry_pp)
+static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
+ u32 buff_size, u32 *curr_nents,
+ u32 **mlli_entry_pp)
{
u32 *mlli_entry_p = *mlli_entry_pp;
u32 new_nents;
@@ -173,26 +183,25 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
return 0;
}
-static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
- struct device *dev, struct scatterlist *sgl,
- u32 sgl_data_len, u32 sgl_offset, u32 *curr_nents,
- u32 **mlli_entry_pp)
+static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
+ u32 sgl_data_len, u32 sgl_offset,
+ u32 *curr_nents, u32 **mlli_entry_pp)
{
struct scatterlist *curr_sgl = sgl;
u32 *mlli_entry_p = *mlli_entry_pp;
s32 rc = 0;
- for ( ; (curr_sgl) && (sgl_data_len != 0);
+ for ( ; (curr_sgl && sgl_data_len);
curr_sgl = sg_next(curr_sgl)) {
u32 entry_data_len =
(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
sg_dma_len(curr_sgl) - sgl_offset :
sgl_data_len;
sgl_data_len -= entry_data_len;
- rc = ssi_buffer_mgr_render_buff_to_mlli(
- dev, sg_dma_address(curr_sgl) + sgl_offset,
- entry_data_len, curr_nents, &mlli_entry_p);
- if (rc != 0)
+ rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
+ sgl_offset, entry_data_len,
+ curr_nents, &mlli_entry_p);
+ if (rc)
return rc;
sgl_offset = 0;
@@ -201,10 +210,8 @@ static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
return 0;
}
-static int ssi_buffer_mgr_generate_mlli(
- struct device *dev,
- struct buffer_array *sg_data,
- struct mlli_params *mlli_params)
+static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
+ struct mlli_params *mlli_params, gfp_t flags)
{
u32 *mlli_p;
u32 total_nents = 0, prev_total_nents = 0;
@@ -213,10 +220,10 @@ static int ssi_buffer_mgr_generate_mlli(
dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
/* Allocate memory from the pointed pool */
- mlli_params->mlli_virt_addr = dma_pool_alloc(
- mlli_params->curr_pool, GFP_KERNEL,
- &mlli_params->mlli_dma_addr);
- if (unlikely(!mlli_params->mlli_virt_addr)) {
+ mlli_params->mlli_virt_addr =
+ dma_pool_alloc(mlli_params->curr_pool, flags,
+ &mlli_params->mlli_dma_addr);
+ if (!mlli_params->mlli_virt_addr) {
dev_err(dev, "dma_pool_alloc() failed\n");
rc = -ENOMEM;
goto build_mlli_exit;
@@ -225,17 +232,19 @@ static int ssi_buffer_mgr_generate_mlli(
mlli_p = (u32 *)mlli_params->mlli_virt_addr;
/* go over all SG's and link it to one MLLI table */
for (i = 0; i < sg_data->num_of_buffers; i++) {
+ union buffer_array_entry *entry = &sg_data->entry[i];
+ u32 tot_len = sg_data->total_data_len[i];
+ u32 offset = sg_data->offset[i];
+
if (sg_data->type[i] == DMA_SGL_TYPE)
- rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
- dev, sg_data->entry[i].sgl,
- sg_data->total_data_len[i], sg_data->offset[i],
- &total_nents, &mlli_p);
+ rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
+ offset, &total_nents,
+ &mlli_p);
else /*DMA_BUFF_TYPE*/
- rc = ssi_buffer_mgr_render_buff_to_mlli(
- dev, sg_data->entry[i].buffer_dma,
- sg_data->total_data_len[i], &total_nents,
- &mlli_p);
- if (rc != 0)
+ rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
+ tot_len, &total_nents,
+ &mlli_p);
+ if (rc)
return rc;
/* set last bit in the current table */
@@ -260,10 +269,10 @@ build_mlli_exit:
return rc;
}
-static inline void ssi_buffer_mgr_add_buffer_entry(
- struct device *dev, struct buffer_array *sgl_data,
- dma_addr_t buffer_dma, unsigned int buffer_len,
- bool is_last_entry, u32 *mlli_nents)
+static void cc_add_buffer_entry(struct device *dev,
+ struct buffer_array *sgl_data,
+ dma_addr_t buffer_dma, unsigned int buffer_len,
+ bool is_last_entry, u32 *mlli_nents)
{
unsigned int index = sgl_data->num_of_buffers;
@@ -281,15 +290,10 @@ static inline void ssi_buffer_mgr_add_buffer_entry(
sgl_data->num_of_buffers++;
}
-static inline void ssi_buffer_mgr_add_scatterlist_entry(
- struct device *dev,
- struct buffer_array *sgl_data,
- unsigned int nents,
- struct scatterlist *sgl,
- unsigned int data_len,
- unsigned int data_offset,
- bool is_last_table,
- u32 *mlli_nents)
+static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
+ unsigned int nents, struct scatterlist *sgl,
+ unsigned int data_len, unsigned int data_offset,
+ bool is_last_table, u32 *mlli_nents)
{
unsigned int index = sgl_data->num_of_buffers;
@@ -307,9 +311,8 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
sgl_data->num_of_buffers++;
}
-static int
-ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
- enum dma_data_direction direction)
+static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
+ enum dma_data_direction direction)
{
u32 i, j;
struct scatterlist *l_sg = sg;
@@ -317,7 +320,7 @@ ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
for (i = 0; i < nents; i++) {
if (!l_sg)
break;
- if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
+ if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
dev_err(dev, "dma_map_page() sg buffer failed\n");
goto err;
}
@@ -336,17 +339,15 @@ err:
return 0;
}
-static int ssi_buffer_mgr_map_scatterlist(
- struct device *dev, struct scatterlist *sg,
- unsigned int nbytes, int direction,
- u32 *nents, u32 max_sg_nents,
- u32 *lbytes, u32 *mapped_nents)
+static int cc_map_sg(struct device *dev, struct scatterlist *sg,
+ unsigned int nbytes, int direction, u32 *nents,
+ u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{
bool is_chained = false;
if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */
- if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
+ if (dma_map_sg(dev, sg, 1, direction) != 1) {
dev_err(dev, "dma_map_sg() single buffer failed\n");
return -ENOMEM;
}
@@ -357,8 +358,8 @@ static int ssi_buffer_mgr_map_scatterlist(
*nents = 1;
*mapped_nents = 1;
} else { /*sg_is_last*/
- *nents = ssi_buffer_mgr_get_sgl_nents(dev, sg, nbytes, lbytes,
- &is_chained);
+ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
+ &is_chained);
if (*nents > max_sg_nents) {
*nents = 0;
dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -370,7 +371,7 @@ static int ssi_buffer_mgr_map_scatterlist(
* be changed from the original sgl nents
*/
*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
- if (unlikely(*mapped_nents == 0)) {
+ if (*mapped_nents == 0) {
*nents = 0;
dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
@@ -379,11 +380,9 @@ static int ssi_buffer_mgr_map_scatterlist(
/*In this case the driver maps entry by entry so it
* must have the same nents before and after map
*/
- *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
- sg,
- *nents,
- direction);
- if (unlikely(*mapped_nents != *nents)) {
+ *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
+ direction);
+ if (*mapped_nents != *nents) {
*nents = *mapped_nents;
dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
@@ -394,18 +393,16 @@ static int ssi_buffer_mgr_map_scatterlist(
return 0;
}
-static inline int
-ssi_aead_handle_config_buf(struct device *dev,
- struct aead_req_ctx *areq_ctx,
- u8 *config_data,
- struct buffer_array *sg_data,
- unsigned int assoclen)
+static int
+cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
+ u8 *config_data, struct buffer_array *sg_data,
+ unsigned int assoclen)
{
dev_dbg(dev, " handle additional data config set to DLLI\n");
/* create sg for the current buffer */
- sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
- if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
- DMA_TO_DEVICE) != 1)) {
+ sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
+ AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
+ if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
dev_err(dev, "dma_map_sg() config buffer failed\n");
return -ENOMEM;
}
@@ -416,25 +413,21 @@ ssi_aead_handle_config_buf(struct device *dev,
areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
/* prepare for case of MLLI */
if (assoclen > 0) {
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data, 1,
- &areq_ctx->ccm_adata_sg,
- (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
- 0, false, NULL);
+ cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
+ (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
+ 0, false, NULL);
}
return 0;
}
-static inline int ssi_ahash_handle_curr_buf(struct device *dev,
- struct ahash_req_ctx *areq_ctx,
- u8 *curr_buff,
- u32 curr_buff_cnt,
- struct buffer_array *sg_data)
+static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
+ u8 *curr_buff, u32 curr_buff_cnt,
+ struct buffer_array *sg_data)
{
dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
/* create sg for the current buffer */
sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
- if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
- DMA_TO_DEVICE) != 1)) {
+ if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
dev_err(dev, "dma_map_sg() src buffer failed\n");
return -ENOMEM;
}
@@ -442,25 +435,22 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev,
&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
areq_ctx->buff_sg->length);
- areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
areq_ctx->curr_sg = areq_ctx->buff_sg;
areq_ctx->in_nents = 0;
/* prepare for case of MLLI */
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data, 1, areq_ctx->buff_sg,
- curr_buff_cnt, 0, false, NULL);
+ cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
+ false, NULL);
return 0;
}
-void ssi_buffer_mgr_unmap_blkcipher_request(
- struct device *dev,
- void *ctx,
- unsigned int ivsize,
- struct scatterlist *src,
- struct scatterlist *dst)
+void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
+ unsigned int ivsize, struct scatterlist *src,
+ struct scatterlist *dst)
{
struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
- if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
+ if (req_ctx->gen_ctx.iv_dma_addr) {
dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
&req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
@@ -469,7 +459,8 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
DMA_TO_DEVICE);
}
/* Release pool */
- if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
+ req_ctx->mlli_params.mlli_virt_addr) {
dma_pool_free(req_ctx->mlli_params.curr_pool,
req_ctx->mlli_params.mlli_virt_addr,
req_ctx->mlli_params.mlli_dma_addr);
@@ -484,14 +475,10 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
}
}
-int ssi_buffer_mgr_map_blkcipher_request(
- struct ssi_drvdata *drvdata,
- void *ctx,
- unsigned int ivsize,
- unsigned int nbytes,
- void *info,
- struct scatterlist *src,
- struct scatterlist *dst)
+int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ void *info, struct scatterlist *src,
+ struct scatterlist *dst, gfp_t flags)
{
struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
struct mlli_params *mlli_params = &req_ctx->mlli_params;
@@ -502,20 +489,19 @@ int ssi_buffer_mgr_map_blkcipher_request(
int rc = 0;
u32 mapped_nents = 0;
- req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
+ req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
mlli_params->curr_pool = NULL;
sg_data.num_of_buffers = 0;
/* Map IV buffer */
- if (likely(ivsize != 0)) {
+ if (ivsize) {
dump_byte_array("iv", (u8 *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
ivsize,
req_ctx->is_giv ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev,
- req_ctx->gen_ctx.iv_dma_addr))) {
+ if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info);
return -ENOMEM;
@@ -527,121 +513,107 @@ int ssi_buffer_mgr_map_blkcipher_request(
}
/* Map the src SGL */
- rc = ssi_buffer_mgr_map_scatterlist(dev, src,
- nbytes, DMA_BIDIRECTIONAL,
- &req_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
- &mapped_nents);
- if (unlikely(rc != 0)) {
+ rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+ if (rc) {
rc = -ENOMEM;
goto ablkcipher_exit;
}
if (mapped_nents > 1)
- req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
+ req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
- if (unlikely(src == dst)) {
+ if (src == dst) {
/* Handle inplace operation */
- if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
req_ctx->out_nents = 0;
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- req_ctx->in_nents,
- src, nbytes, 0,
- true,
- &req_ctx->in_mlli_nents);
+ cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+ nbytes, 0, true,
+ &req_ctx->in_mlli_nents);
}
} else {
/* Map the dst sg */
- if (unlikely(ssi_buffer_mgr_map_scatterlist(
- dev, dst, nbytes,
- DMA_BIDIRECTIONAL, &req_ctx->out_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
- &mapped_nents))){
+ if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+ &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents)) {
rc = -ENOMEM;
goto ablkcipher_exit;
}
if (mapped_nents > 1)
- req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
-
- if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- req_ctx->in_nents,
- src, nbytes, 0,
- true,
- &req_ctx->in_mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- req_ctx->out_nents,
- dst, nbytes, 0,
- true,
- &req_ctx->out_mlli_nents);
+ req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+ cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+ nbytes, 0, true,
+ &req_ctx->in_mlli_nents);
+ cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
+ nbytes, 0, true,
+ &req_ctx->out_mlli_nents);
}
}
- if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
- rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
- if (unlikely(rc != 0))
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
goto ablkcipher_exit;
}
dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
- GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
+ cc_dma_buf_type(req_ctx->dma_buf_type));
return 0;
ablkcipher_exit:
- ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+ cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
return rc;
}
-void ssi_buffer_mgr_unmap_aead_request(
- struct device *dev, struct aead_request *req)
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
u32 dummy;
bool chained;
u32 size_to_unmap = 0;
- if (areq_ctx->mac_buf_dma_addr != 0) {
+ if (areq_ctx->mac_buf_dma_addr) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
}
-#if SSI_CC_HAS_AES_GCM
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
- if (areq_ctx->hkey_dma_addr != 0) {
+ if (areq_ctx->hkey_dma_addr) {
dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
}
- if (areq_ctx->gcm_block_len_dma_addr != 0) {
+ if (areq_ctx->gcm_block_len_dma_addr) {
dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
- if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
+ if (areq_ctx->gcm_iv_inc1_dma_addr) {
dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
- if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
+ if (areq_ctx->gcm_iv_inc2_dma_addr) {
dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
}
-#endif
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
- if (areq_ctx->ccm_iv0_dma_addr != 0) {
+ if (areq_ctx->ccm_iv0_dma_addr) {
dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
}
- if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
+ if (areq_ctx->gen_ctx.iv_dma_addr) {
dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
hw_iv_size, DMA_BIDIRECTIONAL);
}
@@ -668,46 +640,37 @@ void ssi_buffer_mgr_unmap_aead_request(
size_to_unmap += crypto_aead_ivsize(tfm);
dma_unmap_sg(dev, req->src,
- ssi_buffer_mgr_get_sgl_nents(dev, req->src, size_to_unmap,
- &dummy, &chained),
+ cc_get_sgl_nents(dev, req->src, size_to_unmap,
+ &dummy, &chained),
DMA_BIDIRECTIONAL);
- if (unlikely(req->src != req->dst)) {
+ if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
dma_unmap_sg(dev, req->dst,
- ssi_buffer_mgr_get_sgl_nents(dev, req->dst,
- size_to_unmap,
- &dummy, &chained),
+ cc_get_sgl_nents(dev, req->dst, size_to_unmap,
+ &dummy, &chained),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
- (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
- likely(req->src == req->dst)) {
- u32 size_to_skip = req->assoclen;
-
- if (areq_ctx->is_gcm4543)
- size_to_skip += crypto_aead_ivsize(tfm);
-
- /* copy mac to a temporary location to deal with possible
- * data memory overriding that caused by cache coherence problem.
+ areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->src == req->dst) {
+ /* copy back mac from temporary location to deal with possible
+ * data memory overriding that caused by cache coherence
+ * problem.
*/
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->backup_mac, req->src,
- size_to_skip + req->cryptlen - areq_ctx->req_authsize,
- size_to_skip + req->cryptlen, SSI_SG_FROM_BUF);
+ cc_copy_mac(dev, req, CC_SG_FROM_BUF);
}
}
-static inline int ssi_buffer_mgr_get_aead_icv_nents(
- struct device *dev,
- struct scatterlist *sgl,
- unsigned int sgl_nents,
- unsigned int authsize,
- u32 last_entry_data_size,
- bool *is_icv_fragmented)
+static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
+ unsigned int sgl_nents, unsigned int authsize,
+ u32 last_entry_data_size,
+ bool *is_icv_fragmented)
{
unsigned int icv_max_size = 0;
- unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
+ unsigned int icv_required_size = authsize > last_entry_data_size ?
+ (authsize - last_entry_data_size) :
+ authsize;
unsigned int nents;
unsigned int i;
@@ -726,10 +689,12 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
icv_max_size = sgl->length;
if (last_entry_data_size > authsize) {
- nents = 0; /* ICV attached to data in last entry (not fragmented!) */
+ /* ICV attached to data in last entry (not fragmented!) */
+ nents = 0;
*is_icv_fragmented = false;
} else if (last_entry_data_size == authsize) {
- nents = 1; /* ICV placed in whole last entry (not fragmented!) */
+ /* ICV placed in whole last entry (not fragmented!) */
+ nents = 1;
*is_icv_fragmented = false;
} else if (icv_max_size > icv_required_size) {
nents = 1;
@@ -748,25 +713,25 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
return nents;
}
-static inline int ssi_buffer_mgr_aead_chain_iv(
- struct ssi_drvdata *drvdata,
- struct aead_request *req,
- struct buffer_array *sg_data,
- bool is_last, bool do_chain)
+static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last, bool do_chain)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
- if (unlikely(!req->iv)) {
+ if (!req->iv) {
areq_ctx->gen_ctx.iv_dma_addr = 0;
goto chain_iv_exit;
}
- areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, hw_iv_size,
+ areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
+ hw_iv_size,
DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
+ if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
hw_iv_size, req->iv);
rc = -ENOMEM;
@@ -775,28 +740,27 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
- if (do_chain && areq_ctx->plaintext_authenticate_only) { // TODO: what about CTR?? ask Ron
+ // TODO: what about CTR?? ask Ron
+ if (do_chain && areq_ctx->plaintext_authenticate_only) {
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
/* Chain to given list */
- ssi_buffer_mgr_add_buffer_entry(
- dev, sg_data,
- areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
- iv_size_to_authenc, is_last,
- &areq_ctx->assoc.mlli_nents);
- areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
+ cc_add_buffer_entry(dev, sg_data,
+ (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
+ iv_size_to_authenc, is_last,
+ &areq_ctx->assoc.mlli_nents);
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
}
chain_iv_exit:
return rc;
}
-static inline int ssi_buffer_mgr_aead_chain_assoc(
- struct ssi_drvdata *drvdata,
- struct aead_request *req,
- struct buffer_array *sg_data,
- bool is_last, bool do_chain)
+static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last, bool do_chain)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = 0;
@@ -815,12 +779,12 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
goto chain_assoc_exit;
}
- if (unlikely(req->assoclen == 0)) {
- areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
+ if (req->assoclen == 0) {
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0;
dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+ cc_dma_buf_type(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
goto chain_assoc_exit;
}
@@ -828,12 +792,15 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
//iterate over the sgl to see how many entries are for associated data
//it is assumed that if we reach here , the sgl is already mapped
sg_index = current_sg->length;
- if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
+ //the first entry in the scatter list contains all the associated data
+ if (sg_index > size_of_assoc) {
mapped_nents++;
} else {
while (sg_index <= size_of_assoc) {
current_sg = sg_next(current_sg);
- //if have reached the end of the sgl, then this is unexpected
+ /* if have reached the end of the sgl, then this is
+ * unexpected
+ */
if (!current_sg) {
dev_err(dev, "reached end of sg list. unexpected\n");
return -EINVAL;
@@ -842,7 +809,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
mapped_nents++;
}
}
- if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
+ if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
return -ENOMEM;
@@ -853,8 +820,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
* ccm header configurations
*/
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
- if (unlikely((mapped_nents + 1) >
- LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
+ if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
(areq_ctx->assoc.nents + 1),
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
@@ -863,227 +829,175 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
}
}
- if (likely(mapped_nents == 1) &&
- (areq_ctx->ccm_hdr_size == ccm_header_size_null))
- areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI;
+ if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
else
- areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
- if (unlikely((do_chain) ||
- (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
+ if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+ cc_dma_buf_type(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
- ssi_buffer_mgr_add_scatterlist_entry(
- dev, sg_data, areq_ctx->assoc.nents,
- req->src, req->assoclen, 0, is_last,
- &areq_ctx->assoc.mlli_nents);
- areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
+ cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
+ req->assoclen, 0, is_last,
+ &areq_ctx->assoc.mlli_nents);
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
}
chain_assoc_exit:
return rc;
}
-static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
- struct aead_request *req,
- u32 *src_last_bytes, u32 *dst_last_bytes)
+static void cc_prepare_aead_data_dlli(struct aead_request *req,
+ u32 *src_last_bytes, u32 *dst_last_bytes)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
areq_ctx->is_icv_fragmented = false;
- if (likely(req->src == req->dst)) {
+ if (req->src == req->dst) {
/*INPLACE*/
- areq_ctx->icv_dma_addr = sg_dma_address(
- areq_ctx->src_sgl) +
+ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(
- areq_ctx->src_sgl) +
+ areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
/*NON-INPLACE and DECRYPT*/
- areq_ctx->icv_dma_addr = sg_dma_address(
- areq_ctx->src_sgl) +
+ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(
- areq_ctx->src_sgl) +
+ areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
} else {
/*NON-INPLACE and ENCRYPT*/
- areq_ctx->icv_dma_addr = sg_dma_address(
- areq_ctx->dst_sgl) +
+ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
(*dst_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(
- areq_ctx->dst_sgl) +
+ areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
(*dst_last_bytes - authsize);
}
}
-static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
- struct ssi_drvdata *drvdata,
- struct aead_request *req,
- struct buffer_array *sg_data,
- u32 *src_last_bytes, u32 *dst_last_bytes,
- bool is_last_table)
+static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ u32 *src_last_bytes, u32 *dst_last_bytes,
+ bool is_last_table)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
int rc = 0, icv_nents;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct device *dev = drvdata_to_dev(drvdata);
+ struct scatterlist *sg;
- if (likely(req->src == req->dst)) {
+ if (req->src == req->dst) {
/*INPLACE*/
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->src.nents,
- areq_ctx->src_sgl,
- areq_ctx->cryptlen,
- areq_ctx->src_offset,
- is_last_table,
- &areq_ctx->src.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
- areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize,
- *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (unlikely(icv_nents < 0)) {
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
+ areq_ctx->src.nents,
+ authsize, *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (icv_nents < 0) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
}
- if (unlikely(areq_ctx->is_icv_fragmented)) {
+ if (areq_ctx->is_icv_fragmented) {
/* Backup happens only when ICV is fragmented, ICV
- * verification is made by CPU compare in order to simplify
- * MAC verification upon request completion
+ * verification is made by CPU compare in order to
+ * simplify MAC verification upon request completion
*/
if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
- if (!drvdata->coherent) {
/* In coherent platforms (e.g. ACP)
* already copying ICV for any
* INPLACE-DECRYPT operation, hence
* we must neglect this code.
*/
- u32 skip = req->assoclen;
-
- if (areq_ctx->is_gcm4543)
- skip += crypto_aead_ivsize(tfm);
-
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->backup_mac,
- req->src,
- (skip + req->cryptlen -
- areq_ctx->req_authsize),
- skip + req->cryptlen,
- SSI_SG_TO_BUF);
- }
+ if (!drvdata->coherent)
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
+
areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
} else {
areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
- areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
+ areq_ctx->icv_dma_addr =
+ areq_ctx->mac_buf_dma_addr;
}
} else { /* Contig. ICV */
+ sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
/*Should hanlde if the sg is not contig.*/
- areq_ctx->icv_dma_addr = sg_dma_address(
- &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
(*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(
- &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
(*src_last_bytes - authsize);
}
} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
/*NON-INPLACE and DECRYPT*/
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->src.nents,
- areq_ctx->src_sgl,
- areq_ctx->cryptlen,
- areq_ctx->src_offset,
- is_last_table,
- &areq_ctx->src.mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->dst.nents,
- areq_ctx->dst_sgl,
- areq_ctx->cryptlen,
- areq_ctx->dst_offset,
- is_last_table,
- &areq_ctx->dst.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
- areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize,
- *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (unlikely(icv_nents < 0)) {
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+ areq_ctx->dst_sgl, areq_ctx->cryptlen,
+ areq_ctx->dst_offset, is_last_table,
+ &areq_ctx->dst.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
+ areq_ctx->src.nents,
+ authsize, *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (icv_nents < 0) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
}
- if (unlikely(areq_ctx->is_icv_fragmented)) {
- /* Backup happens only when ICV is fragmented, ICV
- * verification is made by CPU compare in order to simplify
- * MAC verification upon request completion
- */
- u32 size_to_skip = req->assoclen;
-
- if (areq_ctx->is_gcm4543)
- size_to_skip += crypto_aead_ivsize(tfm);
-
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->backup_mac, req->src,
- size_to_skip + req->cryptlen - areq_ctx->req_authsize,
- size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
+ /* Backup happens only when ICV is fragmented, ICV
+ * verification is made by CPU compare in order to simplify
+ * MAC verification upon request completion
+ */
+ if (areq_ctx->is_icv_fragmented) {
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
+
} else { /* Contig. ICV */
+ sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
/*Should hanlde if the sg is not contig.*/
- areq_ctx->icv_dma_addr = sg_dma_address(
- &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
(*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(
- &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
(*src_last_bytes - authsize);
}
} else {
/*NON-INPLACE and ENCRYPT*/
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->dst.nents,
- areq_ctx->dst_sgl,
- areq_ctx->cryptlen,
- areq_ctx->dst_offset,
- is_last_table,
- &areq_ctx->dst.mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->src.nents,
- areq_ctx->src_sgl,
- areq_ctx->cryptlen,
- areq_ctx->src_offset,
- is_last_table,
- &areq_ctx->src.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
- areq_ctx->dst_sgl,
- areq_ctx->dst.nents,
- authsize,
- *dst_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (unlikely(icv_nents < 0)) {
+ cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+ areq_ctx->dst_sgl, areq_ctx->cryptlen,
+ areq_ctx->dst_offset, is_last_table,
+ &areq_ctx->dst.mlli_nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
+ areq_ctx->dst.nents,
+ authsize, *dst_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (icv_nents < 0) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
}
- if (likely(!areq_ctx->is_icv_fragmented)) {
+ if (!areq_ctx->is_icv_fragmented) {
+ sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
/* Contig. ICV */
- areq_ctx->icv_dma_addr = sg_dma_address(
- &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) +
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
(*dst_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(
- &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) +
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
(*dst_last_bytes - authsize);
} else {
areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
@@ -1095,11 +1009,10 @@ prepare_data_mlli_exit:
return rc;
}
-static inline int ssi_buffer_mgr_aead_chain_data(
- struct ssi_drvdata *drvdata,
- struct aead_request *req,
- struct buffer_array *sg_data,
- bool is_last_table, bool do_chain)
+static int cc_aead_chain_data(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last_table, bool do_chain)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(drvdata);
@@ -1109,7 +1022,8 @@ static inline int ssi_buffer_mgr_aead_chain_data(
int rc = 0;
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
u32 offset = 0;
- unsigned int size_for_map = req->assoclen + req->cryptlen; /*non-inplace mode*/
+ /* non-inplace mode */
+ unsigned int size_for_map = req->assoclen + req->cryptlen;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 sg_index = 0;
bool chained = false;
@@ -1130,11 +1044,10 @@ static inline int ssi_buffer_mgr_aead_chain_data(
if (is_gcm4543)
size_for_map += crypto_aead_ivsize(tfm);
- size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
- src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(dev, req->src,
- size_for_map,
- &src_last_bytes,
- &chained);
+ size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ authsize : 0;
+ src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
+ &src_last_bytes, &chained);
sg_index = areq_ctx->src_sgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
@@ -1148,7 +1061,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
sg_index += areq_ctx->src_sgl->length;
src_mapped_nents--;
}
- if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
+ if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM;
@@ -1160,26 +1073,23 @@ static inline int ssi_buffer_mgr_aead_chain_data(
if (req->src != req->dst) {
size_for_map = req->assoclen + req->cryptlen;
- size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
+ size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ authsize : 0;
if (is_gcm4543)
size_for_map += crypto_aead_ivsize(tfm);
- rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
- DMA_BIDIRECTIONAL,
- &areq_ctx->dst.nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dst_last_bytes,
- &dst_mapped_nents);
- if (unlikely(rc != 0)) {
+ rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
+ &areq_ctx->dst.nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
+ &dst_mapped_nents);
+ if (rc) {
rc = -ENOMEM;
goto chain_data_exit;
}
}
- dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(dev, req->dst,
- size_for_map,
- &dst_last_bytes,
- &chained);
+ dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
+ &dst_last_bytes, &chained);
sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip;
@@ -1195,45 +1105,43 @@ static inline int ssi_buffer_mgr_aead_chain_data(
sg_index += areq_ctx->dst_sgl->length;
dst_mapped_nents--;
}
- if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
+ if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM;
}
areq_ctx->dst.nents = dst_mapped_nents;
areq_ctx->dst_offset = offset;
- if ((src_mapped_nents > 1) ||
- (dst_mapped_nents > 1) ||
+ if (src_mapped_nents > 1 ||
+ dst_mapped_nents > 1 ||
do_chain) {
- areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
- rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req,
- sg_data,
- &src_last_bytes,
- &dst_last_bytes,
- is_last_table);
+ areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
+ rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
+ &src_last_bytes,
+ &dst_last_bytes, is_last_table);
} else {
- areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
- ssi_buffer_mgr_prepare_aead_data_dlli(
- req, &src_last_bytes, &dst_last_bytes);
+ areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
+ cc_prepare_aead_data_dlli(req, &src_last_bytes,
+ &dst_last_bytes);
}
chain_data_exit:
return rc;
}
-static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
- struct aead_request *req)
+static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
+ struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
u32 curr_mlli_size = 0;
- if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
+ if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
curr_mlli_size = areq_ctx->assoc.mlli_nents *
LLI_ENTRY_BYTE_SIZE;
}
- if (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI) {
+ if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
/*Inplace case dst nents equal to src nents*/
if (req->src == req->dst) {
areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
@@ -1272,8 +1180,7 @@ static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
}
}
-int ssi_buffer_mgr_map_aead_request(
- struct ssi_drvdata *drvdata, struct aead_request *req)
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
@@ -1284,30 +1191,22 @@ int ssi_buffer_mgr_map_aead_request(
int rc = 0;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
bool is_gcm4543 = areq_ctx->is_gcm4543;
-
+ dma_addr_t dma_addr;
u32 mapped_nents = 0;
u32 dummy = 0; /*used for the assoc data fragments */
u32 size_to_map = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
mlli_params->curr_pool = NULL;
sg_data.num_of_buffers = 0;
+ /* copy mac to a temporary location to deal with possible
+ * data memory overriding that caused by cache coherence problem.
+ */
if (drvdata->coherent &&
- (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
- likely(req->src == req->dst)) {
- u32 size_to_skip = req->assoclen;
-
- if (is_gcm4543)
- size_to_skip += crypto_aead_ivsize(tfm);
-
- /* copy mac to a temporary location to deal with possible
- * data memory overriding that caused by cache coherence problem.
- */
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->backup_mac, req->src,
- size_to_skip + req->cryptlen - areq_ctx->req_authsize,
- size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
- }
+ areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->src == req->dst)
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
/* cacluate the size for cipher remove ICV in decrypt*/
areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
@@ -1315,90 +1214,83 @@ int ssi_buffer_mgr_map_aead_request(
req->cryptlen :
(req->cryptlen - authsize);
- areq_ctx->mac_buf_dma_addr = dma_map_single(dev, areq_ctx->mac_buf,
- MAX_MAC_SIZE,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
+ dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
MAX_MAC_SIZE, areq_ctx->mac_buf);
rc = -ENOMEM;
goto aead_map_failure;
}
+ areq_ctx->mac_buf_dma_addr = dma_addr;
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
- areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
- (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
- AES_BLOCK_SIZE,
- DMA_TO_DEVICE);
+ void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
- if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
+ dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
- AES_BLOCK_SIZE,
- (areq_ctx->ccm_config +
- CCM_CTR_COUNT_0_OFFSET));
+ AES_BLOCK_SIZE, addr);
areq_ctx->ccm_iv0_dma_addr = 0;
rc = -ENOMEM;
goto aead_map_failure;
}
- if (ssi_aead_handle_config_buf(dev, areq_ctx,
- areq_ctx->ccm_config, &sg_data,
- req->assoclen) != 0) {
+ areq_ctx->ccm_iv0_dma_addr = dma_addr;
+
+ if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
+ &sg_data, req->assoclen)) {
rc = -ENOMEM;
goto aead_map_failure;
}
}
-#if SSI_CC_HAS_AES_GCM
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
- areq_ctx->hkey_dma_addr = dma_map_single(dev,
- areq_ctx->hkey,
- AES_BLOCK_SIZE,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
+ dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, areq_ctx->hkey);
rc = -ENOMEM;
goto aead_map_failure;
}
+ areq_ctx->hkey_dma_addr = dma_addr;
- areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
- &areq_ctx->gcm_len_block,
- AES_BLOCK_SIZE,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
+ dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
rc = -ENOMEM;
goto aead_map_failure;
}
+ areq_ctx->gcm_block_len_dma_addr = dma_addr;
- areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
- areq_ctx->gcm_iv_inc1,
- AES_BLOCK_SIZE,
- DMA_TO_DEVICE);
+ dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
areq_ctx->gcm_iv_inc1_dma_addr = 0;
rc = -ENOMEM;
goto aead_map_failure;
}
+ areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
- areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
- areq_ctx->gcm_iv_inc2,
- AES_BLOCK_SIZE,
- DMA_TO_DEVICE);
+ dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
areq_ctx->gcm_iv_inc2_dma_addr = 0;
rc = -ENOMEM;
goto aead_map_failure;
}
+ areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
}
-#endif /*SSI_CC_HAS_AES_GCM*/
size_to_map = req->cryptlen + req->assoclen;
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
@@ -1406,29 +1298,31 @@ int ssi_buffer_mgr_map_aead_request(
if (is_gcm4543)
size_to_map += crypto_aead_ivsize(tfm);
- rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
- size_to_map, DMA_BIDIRECTIONAL, &areq_ctx->src.nents,
- LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
- if (unlikely(rc != 0)) {
+ rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
+ &areq_ctx->src.nents,
+ (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
+ LLI_MAX_NUM_OF_DATA_ENTRIES),
+ &dummy, &mapped_nents);
+ if (rc) {
rc = -ENOMEM;
goto aead_map_failure;
}
- if (likely(areq_ctx->is_single_pass)) {
+ if (areq_ctx->is_single_pass) {
/*
* Create MLLI table for:
* (1) Assoc. data
* (2) Src/Dst SGLs
* Note: IV is contg. buffer (not an SGL)
*/
- rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
- if (unlikely(rc != 0))
+ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
+ if (rc)
goto aead_map_failure;
- rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, true, false);
- if (unlikely(rc != 0))
+ rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
+ if (rc)
goto aead_map_failure;
- rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, false);
- if (unlikely(rc != 0))
+ rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
+ if (rc)
goto aead_map_failure;
} else { /* DOUBLE-PASS flow */
/*
@@ -1451,27 +1345,28 @@ int ssi_buffer_mgr_map_aead_request(
* (3) MLLI for src
* (4) MLLI for dst
*/
- rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, false, true);
- if (unlikely(rc != 0))
+ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
+ if (rc)
goto aead_map_failure;
- rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, false, true);
- if (unlikely(rc != 0))
+ rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
+ if (rc)
goto aead_map_failure;
- rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, true);
- if (unlikely(rc != 0))
+ rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
+ if (rc)
goto aead_map_failure;
}
- /* Mlli support -start building the MLLI according to the above results */
- if (unlikely(
- (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
- (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
+ /* Mlli support -start building the MLLI according to the above
+ * results
+ */
+ if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
- rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
- if (unlikely(rc != 0))
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
goto aead_map_failure;
- ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
+ cc_update_aead_mlli_nents(drvdata, req);
dev_dbg(dev, "assoc params mn %d\n",
areq_ctx->assoc.mlli_nents);
dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
@@ -1480,19 +1375,18 @@ int ssi_buffer_mgr_map_aead_request(
return 0;
aead_map_failure:
- ssi_buffer_mgr_unmap_aead_request(dev, req);
+ cc_unmap_aead_request(dev, req);
return rc;
}
-int ssi_buffer_mgr_map_hash_request_final(
- struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update, gfp_t flags)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
struct device *dev = drvdata_to_dev(drvdata);
- u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
- areq_ctx->buff0;
- u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
- &areq_ctx->buff0_cnt;
+ u8 *curr_buff = cc_hash_buf(areq_ctx);
+ u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
@@ -1502,88 +1396,78 @@ int ssi_buffer_mgr_map_hash_request_final(
dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
- areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
mlli_params->curr_pool = NULL;
sg_data.num_of_buffers = 0;
areq_ctx->in_nents = 0;
- if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
+ if (nbytes == 0 && *curr_buff_cnt == 0) {
/* nothing to do */
return 0;
}
/*TODO: copy data in case that buffer is enough for operation */
/* map the previous buffer */
- if (*curr_buff_cnt != 0) {
- if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
- *curr_buff_cnt, &sg_data) != 0) {
+ if (*curr_buff_cnt) {
+ if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data)) {
return -ENOMEM;
}
}
- if (src && (nbytes > 0) && do_update) {
- if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src, nbytes,
- DMA_TO_DEVICE,
- &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy,
- &mapped_nents))){
+ if (src && nbytes > 0 && do_update) {
+ if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+ &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents)) {
goto unmap_curr_buff;
}
- if (src && (mapped_nents == 1)
- && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
+ if (src && mapped_nents == 1 &&
+ areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
memcpy(areq_ctx->buff_sg, src,
sizeof(struct scatterlist));
areq_ctx->buff_sg->length = nbytes;
areq_ctx->curr_sg = areq_ctx->buff_sg;
- areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
} else {
- areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
}
}
/*build mlli */
- if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- areq_ctx->in_nents,
- src, nbytes, 0, true,
- &areq_ctx->mlli_nents);
- if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
- mlli_params) != 0)) {
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
+ 0, true, &areq_ctx->mlli_nents);
+ if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
goto fail_unmap_din;
- }
}
/* change the buffer index for the unmap function */
areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
+ cc_dma_buf_type(areq_ctx->data_dma_buf_type));
return 0;
fail_unmap_din:
dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
unmap_curr_buff:
- if (*curr_buff_cnt != 0)
+ if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
return -ENOMEM;
}
-int ssi_buffer_mgr_map_hash_request_update(
- struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ unsigned int block_size, gfp_t flags)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
struct device *dev = drvdata_to_dev(drvdata);
- u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
- areq_ctx->buff0;
- u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
- &areq_ctx->buff0_cnt;
- u8 *next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
- areq_ctx->buff1;
- u32 *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
- &areq_ctx->buff1_cnt;
+ u8 *curr_buff = cc_hash_buf(areq_ctx);
+ u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+ u8 *next_buff = cc_next_buf(areq_ctx);
+ u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
unsigned int update_data_len;
u32 total_in_len = nbytes + *curr_buff_cnt;
@@ -1596,18 +1480,17 @@ int ssi_buffer_mgr_map_hash_request_update(
dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
- areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
mlli_params->curr_pool = NULL;
areq_ctx->curr_sg = NULL;
sg_data.num_of_buffers = 0;
areq_ctx->in_nents = 0;
- if (unlikely(total_in_len < block_size)) {
+ if (total_in_len < block_size) {
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents =
- ssi_buffer_mgr_get_sgl_nents(dev, src, nbytes, &dummy,
- NULL);
+ cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
@@ -1623,20 +1506,20 @@ int ssi_buffer_mgr_map_hash_request_update(
*next_buff_cnt, update_data_len);
/* Copy the new residue to next buffer */
- if (*next_buff_cnt != 0) {
+ if (*next_buff_cnt) {
dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
next_buff, (update_data_len - *curr_buff_cnt),
*next_buff_cnt);
- ssi_buffer_mgr_copy_scatterlist_portion(dev, next_buff, src,
- (update_data_len - *curr_buff_cnt),
- nbytes, SSI_SG_TO_BUF);
+ cc_copy_sg_portion(dev, next_buff, src,
+ (update_data_len - *curr_buff_cnt),
+ nbytes, CC_SG_TO_BUF);
/* change the buffer index for next operation */
swap_index = 1;
}
- if (*curr_buff_cnt != 0) {
- if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
- *curr_buff_cnt, &sg_data) != 0) {
+ if (*curr_buff_cnt) {
+ if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data)) {
return -ENOMEM;
}
/* change the buffer index for next operation */
@@ -1644,42 +1527,33 @@ int ssi_buffer_mgr_map_hash_request_update(
}
if (update_data_len > *curr_buff_cnt) {
- if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
- (update_data_len - *curr_buff_cnt),
- DMA_TO_DEVICE,
- &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy,
- &mapped_nents))){
+ if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+ DMA_TO_DEVICE, &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents)) {
goto unmap_curr_buff;
}
- if ((mapped_nents == 1)
- && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
+ if (mapped_nents == 1 &&
+ areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
/* only one entry in the SG and no previous data */
memcpy(areq_ctx->buff_sg, src,
sizeof(struct scatterlist));
areq_ctx->buff_sg->length = update_data_len;
- areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
areq_ctx->curr_sg = areq_ctx->buff_sg;
} else {
- areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
}
}
- if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- areq_ctx->in_nents,
- src,
- (update_data_len - *curr_buff_cnt),
- 0,
- true,
- &areq_ctx->mlli_nents);
- if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
- mlli_params) != 0)) {
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
+ (update_data_len - *curr_buff_cnt), 0, true,
+ &areq_ctx->mlli_nents);
+ if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
goto fail_unmap_din;
- }
}
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
@@ -1689,18 +1563,17 @@ fail_unmap_din:
dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
unmap_curr_buff:
- if (*curr_buff_cnt != 0)
+ if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
return -ENOMEM;
}
-void ssi_buffer_mgr_unmap_hash_request(
- struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+ struct scatterlist *src, bool do_revert)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
- u32 *prev_len = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
- &areq_ctx->buff1_cnt;
+ u32 *prev_len = cc_next_buf_cnt(areq_ctx);
/*In case a pool was set, a table was
*allocated and should be released
@@ -1714,21 +1587,23 @@ void ssi_buffer_mgr_unmap_hash_request(
areq_ctx->mlli_params.mlli_dma_addr);
}
- if ((src) && likely(areq_ctx->in_nents != 0)) {
+ if (src && areq_ctx->in_nents) {
dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
dma_unmap_sg(dev, src,
areq_ctx->in_nents, DMA_TO_DEVICE);
}
- if (*prev_len != 0) {
+ if (*prev_len) {
dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
sg_virt(areq_ctx->buff_sg),
&sg_dma_address(areq_ctx->buff_sg),
sg_dma_len(areq_ctx->buff_sg));
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
if (!do_revert) {
- /* clean the previous data length for update operation */
+ /* clean the previous data length for update
+ * operation
+ */
*prev_len = 0;
} else {
areq_ctx->buff_index ^= 1;
@@ -1736,7 +1611,7 @@ void ssi_buffer_mgr_unmap_hash_request(
}
}
-int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
{
struct buff_mgr_handle *buff_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
@@ -1747,23 +1622,23 @@ int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
drvdata->buff_mgr_handle = buff_mgr_handle;
- buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
- "dx_single_mlli_tables", dev,
+ buff_mgr_handle->mlli_buffs_pool =
+ dma_pool_create("dx_single_mlli_tables", dev,
MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
LLI_ENTRY_BYTE_SIZE,
MLLI_TABLE_MIN_ALIGNMENT, 0);
- if (unlikely(!buff_mgr_handle->mlli_buffs_pool))
+ if (!buff_mgr_handle->mlli_buffs_pool)
goto error;
return 0;
error:
- ssi_buffer_mgr_fini(drvdata);
+ cc_buffer_mgr_fini(drvdata);
return -ENOMEM;
}
-int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
{
struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
diff --git a/drivers/staging/ccree/cc_buffer_mgr.h b/drivers/staging/ccree/cc_buffer_mgr.h
new file mode 100644
index 000000000000..99b752aa1077
--- /dev/null
+++ b/drivers/staging/ccree/cc_buffer_mgr.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_buffer_mgr.h
+ * Buffer Manager
+ */
+
+#ifndef __CC_BUFFER_MGR_H__
+#define __CC_BUFFER_MGR_H__
+
+#include <crypto/algapi.h>
+
+#include "cc_driver.h"
+
+enum cc_req_dma_buf_type {
+ CC_DMA_BUF_NULL = 0,
+ CC_DMA_BUF_DLLI,
+ CC_DMA_BUF_MLLI
+};
+
+enum cc_sg_cpy_direct {
+ CC_SG_TO_BUF = 0,
+ CC_SG_FROM_BUF = 1
+};
+
+struct cc_mlli {
+ cc_sram_addr_t sram_addr;
+ unsigned int nents; //sg nents
+ unsigned int mlli_nents; //mlli nents might be different than the above
+};
+
+struct mlli_params {
+ struct dma_pool *curr_pool;
+ u8 *mlli_virt_addr;
+ dma_addr_t mlli_dma_addr;
+ u32 mlli_len;
+};
+
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata);
+
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);
+
+int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ void *info, struct scatterlist *src,
+ struct scatterlist *dst, gfp_t flags);
+
+void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
+ unsigned int ivsize,
+ struct scatterlist *src,
+ struct scatterlist *dst);
+
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req);
+
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req);
+
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update, gfp_t flags);
+
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ unsigned int block_size, gfp_t flags);
+
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+ struct scatterlist *src, bool do_revert);
+
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+ u32 to_skip, u32 end, enum cc_sg_cpy_direct direct);
+
+void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);
+
+#endif /*__BUFFER_MGR_H__*/
+
diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/cc_cipher.c
index ee85cbf7c9ae..d4ac0ff2ffcf 100644
--- a/drivers/staging/ccree/ssi_cipher.c
+++ b/drivers/staging/ccree/cc_cipher.c
@@ -1,46 +1,27 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/semaphore.h>
#include <crypto/algapi.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/aes.h>
-#include <crypto/ctr.h>
#include <crypto/des.h>
#include <crypto/xts.h>
#include <crypto/scatterwalk.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
+#include "cc_driver.h"
#include "cc_lli_defs.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_cipher.h"
-#include "ssi_request_mgr.h"
-#include "ssi_sysfs.h"
+#include "cc_buffer_mgr.h"
+#include "cc_cipher.h"
+#include "cc_request_mgr.h"
#define MAX_ABLKCIPHER_SEQ_LEN 6
#define template_ablkcipher template_u.ablkcipher
-#define SSI_MIN_AES_XTS_SIZE 0x10
-#define SSI_MAX_AES_XTS_SIZE 0x2000
-struct ssi_blkcipher_handle {
+#define CC_MIN_AES_XTS_SIZE 0x10
+#define CC_MAX_AES_XTS_SIZE 0x2000
+struct cc_cipher_handle {
struct list_head blkcipher_alg_list;
};
@@ -54,8 +35,8 @@ struct cc_hw_key_info {
enum cc_hw_crypto_key key2_slot;
};
-struct ssi_ablkcipher_ctx {
- struct ssi_drvdata *drvdata;
+struct cc_cipher_ctx {
+ struct cc_drvdata *drvdata;
int keylen;
int key_round_number;
int cipher_mode;
@@ -67,61 +48,56 @@ struct ssi_ablkcipher_ctx {
struct crypto_shash *shash_tfm;
};
-static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base);
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
-static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size)
+static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
{
switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
switch (size) {
case CC_AES_128_BIT_KEY_SIZE:
case CC_AES_192_BIT_KEY_SIZE:
- if (likely((ctx_p->cipher_mode != DRV_CIPHER_XTS) &&
- (ctx_p->cipher_mode != DRV_CIPHER_ESSIV) &&
- (ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)))
+ if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
+ ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
+ ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
return 0;
break;
case CC_AES_256_BIT_KEY_SIZE:
return 0;
case (CC_AES_192_BIT_KEY_SIZE * 2):
case (CC_AES_256_BIT_KEY_SIZE * 2):
- if (likely((ctx_p->cipher_mode == DRV_CIPHER_XTS) ||
- (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) ||
- (ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)))
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
return 0;
break;
default:
break;
}
case S_DIN_to_DES:
- if (likely(size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE))
+ if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
return 0;
break;
-#if SSI_CC_HAS_MULTI2
- case S_DIN_to_MULTI2:
- if (likely(size == CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE))
- return 0;
- break;
-#endif
default:
break;
}
return -EINVAL;
}
-static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size)
+static int validate_data_size(struct cc_cipher_ctx *ctx_p,
+ unsigned int size)
{
switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
switch (ctx_p->cipher_mode) {
case DRV_CIPHER_XTS:
- if ((size >= SSI_MIN_AES_XTS_SIZE) &&
- (size <= SSI_MAX_AES_XTS_SIZE) &&
+ if (size >= CC_MIN_AES_XTS_SIZE &&
+ size <= CC_MAX_AES_XTS_SIZE &&
IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0;
break;
case DRV_CIPHER_CBC_CTS:
- if (likely(size >= AES_BLOCK_SIZE))
+ if (size >= AES_BLOCK_SIZE)
return 0;
break;
case DRV_CIPHER_OFB:
@@ -131,7 +107,7 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int siz
case DRV_CIPHER_CBC:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
- if (likely(IS_ALIGNED(size, AES_BLOCK_SIZE)))
+ if (IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0;
break;
default:
@@ -139,23 +115,9 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int siz
}
break;
case S_DIN_to_DES:
- if (likely(IS_ALIGNED(size, DES_BLOCK_SIZE)))
- return 0;
- break;
-#if SSI_CC_HAS_MULTI2
- case S_DIN_to_MULTI2:
- switch (ctx_p->cipher_mode) {
- case DRV_MULTI2_CBC:
- if (likely(IS_ALIGNED(size, CC_MULTI2_BLOCK_SIZE)))
- return 0;
- break;
- case DRV_MULTI2_OFB:
+ if (IS_ALIGNED(size, DES_BLOCK_SIZE))
return 0;
- default:
- break;
- }
break;
-#endif /*SSI_CC_HAS_MULTI2*/
default:
break;
}
@@ -164,36 +126,42 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int siz
static unsigned int get_max_keysize(struct crypto_tfm *tfm)
{
- struct ssi_crypto_alg *ssi_alg = container_of(tfm->__crt_alg, struct ssi_crypto_alg, crypto_alg);
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg, crypto_alg);
- if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_ABLKCIPHER)
- return ssi_alg->crypto_alg.cra_ablkcipher.max_keysize;
+ if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+ CRYPTO_ALG_TYPE_ABLKCIPHER)
+ return cc_alg->crypto_alg.cra_ablkcipher.max_keysize;
- if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER)
- return ssi_alg->crypto_alg.cra_blkcipher.max_keysize;
+ if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+ CRYPTO_ALG_TYPE_BLKCIPHER)
+ return cc_alg->crypto_alg.cra_blkcipher.max_keysize;
return 0;
}
-static int ssi_blkcipher_init(struct crypto_tfm *tfm)
+static int cc_cipher_init(struct crypto_tfm *tfm)
{
- struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct crypto_alg *alg = tfm->__crt_alg;
- struct ssi_crypto_alg *ssi_alg =
- container_of(alg, struct ssi_crypto_alg, crypto_alg);
- struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
+ struct cc_crypto_alg *cc_alg =
+ container_of(alg, struct cc_crypto_alg, crypto_alg);
+ struct device *dev = drvdata_to_dev(cc_alg->drvdata);
int rc = 0;
unsigned int max_key_buf_size = get_max_keysize(tfm);
+ struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
crypto_tfm_alg_name(tfm));
- ctx_p->cipher_mode = ssi_alg->cipher_mode;
- ctx_p->flow_mode = ssi_alg->flow_mode;
- ctx_p->drvdata = ssi_alg->drvdata;
+ ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);
+
+ ctx_p->cipher_mode = cc_alg->cipher_mode;
+ ctx_p->flow_mode = cc_alg->flow_mode;
+ ctx_p->drvdata = cc_alg->drvdata;
/* Allocate key buffer, cache line aligned */
- ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL | GFP_DMA);
+ ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
if (!ctx_p->user.key)
return -ENOMEM;
@@ -224,9 +192,9 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
return rc;
}
-static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
+static void cc_cipher_exit(struct crypto_tfm *tfm)
{
- struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
unsigned int max_key_buf_size = get_max_keysize(tfm);
@@ -262,13 +230,15 @@ static const u8 zero_buff[] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
/* The function verifies that tdes keys are not weak.*/
-static int ssi_verify_3des_keys(const u8 *key, unsigned int keylen)
+static int cc_verify_3des_keys(const u8 *key, unsigned int keylen)
{
struct tdes_keys *tdes_key = (struct tdes_keys *)key;
/* verify key1 != key2 and key3 != key2*/
- if (unlikely((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2, sizeof(tdes_key->key1)) == 0) ||
- (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2, sizeof(tdes_key->key3)) == 0))) {
+ if ((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2,
+ sizeof(tdes_key->key1)) == 0) ||
+ (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2,
+ sizeof(tdes_key->key3)) == 0)) {
return -ENOEXEC;
}
@@ -290,11 +260,11 @@ static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num)
return END_OF_KEYS;
}
-static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
- const u8 *key,
- unsigned int keylen)
+static int cc_cipher_setkey(struct crypto_ablkcipher *atfm, const u8 *key,
+ unsigned int keylen)
{
- struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
u32 tmp[DES_EXPKEY_WORDS];
unsigned int max_key_buf_size = get_max_keysize(tfm);
@@ -305,44 +275,39 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
/* STAT_PHASE_0: Init and sanity checks */
-#if SSI_CC_HAS_MULTI2
- /*last byte of key buffer is round number and should not be a part of key size*/
- if (ctx_p->flow_mode == S_DIN_to_MULTI2)
- keylen -= 1;
-#endif /*SSI_CC_HAS_MULTI2*/
-
- if (unlikely(validate_keys_sizes(ctx_p, keylen) != 0)) {
+ if (validate_keys_sizes(ctx_p, keylen)) {
dev_err(dev, "Unsupported key size %d.\n", keylen);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- if (ssi_is_hw_key(tfm)) {
+ if (cc_is_hw_key(tfm)) {
/* setting HW key slots */
struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
- if (unlikely(ctx_p->flow_mode != S_DIN_to_AES)) {
+ if (ctx_p->flow_mode != S_DIN_to_AES) {
dev_err(dev, "HW key not supported for non-AES flows\n");
return -EINVAL;
}
ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
- if (unlikely(ctx_p->hw.key1_slot == END_OF_KEYS)) {
+ if (ctx_p->hw.key1_slot == END_OF_KEYS) {
dev_err(dev, "Unsupported hw key1 number (%d)\n",
hki->hw_key1);
return -EINVAL;
}
- if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) ||
- (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) ||
- (ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)) {
- if (unlikely(hki->hw_key1 == hki->hw_key2)) {
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+ if (hki->hw_key1 == hki->hw_key2) {
dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
hki->hw_key1, hki->hw_key2);
return -EINVAL;
}
- ctx_p->hw.key2_slot = hw_key_to_cc_hw_key(hki->hw_key2);
- if (unlikely(ctx_p->hw.key2_slot == END_OF_KEYS)) {
+ ctx_p->hw.key2_slot =
+ hw_key_to_cc_hw_key(hki->hw_key2);
+ if (ctx_p->hw.key2_slot == END_OF_KEYS) {
dev_err(dev, "Unsupported hw key2 number (%d)\n",
hki->hw_key2);
return -EINVAL;
@@ -350,28 +315,28 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
}
ctx_p->keylen = keylen;
- dev_dbg(dev, "ssi_is_hw_key ret 0");
+ dev_dbg(dev, "cc_is_hw_key ret 0");
return 0;
}
// verify weak keys
if (ctx_p->flow_mode == S_DIN_to_DES) {
- if (unlikely(!des_ekey(tmp, key)) &&
+ if (!des_ekey(tmp, key) &&
(crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
dev_dbg(dev, "weak DES key");
return -EINVAL;
}
}
- if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) &&
- xts_check_key(tfm, key, keylen) != 0) {
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
+ xts_check_key(tfm, key, keylen)) {
dev_dbg(dev, "weak XTS key");
return -EINVAL;
}
- if ((ctx_p->flow_mode == S_DIN_to_DES) &&
- (keylen == DES3_EDE_KEY_SIZE) &&
- ssi_verify_3des_keys(key, keylen) != 0) {
+ if (ctx_p->flow_mode == S_DIN_to_DES &&
+ keylen == DES3_EDE_KEY_SIZE &&
+ cc_verify_3des_keys(key, keylen)) {
dev_dbg(dev, "weak 3DES key");
return -EINVAL;
}
@@ -380,34 +345,24 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
max_key_buf_size, DMA_TO_DEVICE);
- if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
-#if SSI_CC_HAS_MULTI2
- memcpy(ctx_p->user.key, key, CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE);
- ctx_p->key_round_number = key[CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE];
- if (ctx_p->key_round_number < CC_MULTI2_MIN_NUM_ROUNDS ||
- ctx_p->key_round_number > CC_MULTI2_MAX_NUM_ROUNDS) {
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- dev_dbg(dev, "SSI_CC_HAS_MULTI2 einval");
- return -EINVAL;
-#endif /*SSI_CC_HAS_MULTI2*/
- } else {
- memcpy(ctx_p->user.key, key, keylen);
- if (keylen == 24)
- memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
-
- if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
- /* sha256 for key2 - use sw implementation */
- int key_len = keylen >> 1;
- int err;
- SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
-
- desc->tfm = ctx_p->shash_tfm;
-
- err = crypto_shash_digest(desc, ctx_p->user.key, key_len, ctx_p->user.key + key_len);
- if (err) {
- dev_err(dev, "Failed to hash ESSIV key.\n");
- return err;
- }
+ memcpy(ctx_p->user.key, key, keylen);
+ if (keylen == 24)
+ memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ /* sha256 for key2 - use sw implementation */
+ int key_len = keylen >> 1;
+ int err;
+
+ SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
+
+ desc->tfm = ctx_p->shash_tfm;
+
+ err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
+ ctx_p->user.key + key_len);
+ if (err) {
+ dev_err(dev, "Failed to hash ESSIV key.\n");
+ return err;
}
}
dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
@@ -418,16 +373,13 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
return 0;
}
-static inline void
-ssi_blkcipher_create_setup_desc(
- struct crypto_tfm *tfm,
- struct blkcipher_req_ctx *req_ctx,
- unsigned int ivsize,
- unsigned int nbytes,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
+ struct blkcipher_req_ctx *req_ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
- struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
int cipher_mode = ctx_p->cipher_mode;
int flow_mode = ctx_p->flow_mode;
@@ -437,11 +389,14 @@ ssi_blkcipher_create_setup_desc(
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
unsigned int du_size = nbytes;
- struct ssi_crypto_alg *ssi_alg = container_of(tfm->__crt_alg, struct ssi_crypto_alg, crypto_alg);
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg, crypto_alg);
- if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) == CRYPTO_ALG_BULK_DU_512)
+ if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
+ CRYPTO_ALG_BULK_DU_512)
du_size = 512;
- if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) == CRYPTO_ALG_BULK_DU_4096)
+ if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
+ CRYPTO_ALG_BULK_DU_4096)
du_size = 4096;
switch (cipher_mode) {
@@ -456,8 +411,8 @@ ssi_blkcipher_create_setup_desc(
set_cipher_config0(&desc[*seq_size], direction);
set_flow_mode(&desc[*seq_size], flow_mode);
set_cipher_mode(&desc[*seq_size], cipher_mode);
- if ((cipher_mode == DRV_CIPHER_CTR) ||
- (cipher_mode == DRV_CIPHER_OFB)) {
+ if (cipher_mode == DRV_CIPHER_CTR ||
+ cipher_mode == DRV_CIPHER_OFB) {
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
} else {
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
@@ -470,7 +425,7 @@ ssi_blkcipher_create_setup_desc(
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
if (flow_mode == S_DIN_to_AES) {
- if (ssi_is_hw_key(tfm)) {
+ if (cc_is_hw_key(tfm)) {
set_hw_crypto_key(&desc[*seq_size],
ctx_p->hw.key1_slot);
} else {
@@ -497,7 +452,7 @@ ssi_blkcipher_create_setup_desc(
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
- if (ssi_is_hw_key(tfm)) {
+ if (cc_is_hw_key(tfm)) {
set_hw_crypto_key(&desc[*seq_size],
ctx_p->hw.key1_slot);
} else {
@@ -513,7 +468,7 @@ ssi_blkcipher_create_setup_desc(
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
- if (ssi_is_hw_key(tfm)) {
+ if (cc_is_hw_key(tfm)) {
set_hw_crypto_key(&desc[*seq_size],
ctx_p->hw.key2_slot);
} else {
@@ -543,62 +498,14 @@ ssi_blkcipher_create_setup_desc(
}
}
-#if SSI_CC_HAS_MULTI2
-static inline void ssi_blkcipher_create_multi2_setup_desc(
- struct crypto_tfm *tfm,
- struct blkcipher_req_ctx *req_ctx,
- unsigned int ivsize,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
-{
- struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
-
- int direction = req_ctx->gen_ctx.op_type;
- /* Load system key */
- hw_desc_init(&desc[*seq_size]);
- set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- set_din_type(&desc[*seq_size], DMA_DLLI, ctx_p->user.key_dma_addr,
- CC_MULTI2_SYSTEM_KEY_SIZE, NS_BIT);
- set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
- (*seq_size)++;
-
- /* load data key */
- hw_desc_init(&desc[*seq_size]);
- set_din_type(&desc[*seq_size], DMA_DLLI,
- (ctx_p->user.key_dma_addr + CC_MULTI2_SYSTEM_KEY_SIZE),
- CC_MULTI2_DATA_KEY_SIZE, NS_BIT);
- set_multi2_num_rounds(&desc[*seq_size], ctx_p->key_round_number);
- set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
- set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
- (*seq_size)++;
-
- /* Set state */
- hw_desc_init(&desc[*seq_size]);
- set_din_type(&desc[*seq_size], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
- ivsize, NS_BIT);
- set_cipher_config0(&desc[*seq_size], direction);
- set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
- set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
- (*seq_size)++;
-}
-#endif /*SSI_CC_HAS_MULTI2*/
-
-static inline void
-ssi_blkcipher_create_data_desc(
- struct crypto_tfm *tfm,
- struct blkcipher_req_ctx *req_ctx,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes,
- void *areq,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_setup_cipher_data(struct crypto_tfm *tfm,
+ struct blkcipher_req_ctx *req_ctx,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes,
+ void *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
- struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
unsigned int flow_mode = ctx_p->flow_mode;
@@ -609,17 +516,12 @@ ssi_blkcipher_create_data_desc(
case S_DIN_to_DES:
flow_mode = DIN_DES_DOUT;
break;
-#if SSI_CC_HAS_MULTI2
- case S_DIN_to_MULTI2:
- flow_mode = DIN_MULTI2_DOUT;
- break;
-#endif /*SSI_CC_HAS_MULTI2*/
default:
dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
return;
}
/* Process */
- if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)) {
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
dev_dbg(dev, " data params addr %pad length 0x%X\n",
&sg_dma_address(src), nbytes);
dev_dbg(dev, " data params addr %pad length 0x%X\n",
@@ -682,68 +584,63 @@ ssi_blkcipher_create_data_desc(
}
}
-static int ssi_blkcipher_complete(struct device *dev,
- struct ssi_ablkcipher_ctx *ctx_p,
- struct blkcipher_req_ctx *req_ctx,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int ivsize,
- void *areq,
- void __iomem *cc_base)
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
{
- int completion_error = 0;
+ struct ablkcipher_request *areq = (struct ablkcipher_request *)cc_req;
+ struct scatterlist *dst = areq->dst;
+ struct scatterlist *src = areq->src;
+ struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(areq);
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
- ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+ cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
kfree(req_ctx->iv);
- if (areq) {
- /*
- * The crypto API expects us to set the req->info to the last
- * ciphertext block. For encrypt, simply copy from the result.
- * For decrypt, we must copy from a saved buffer since this
- * could be an in-place decryption operation and the src is
- * lost by this point.
- */
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
- memcpy(req->info, req_ctx->backup_info, ivsize);
- kfree(req_ctx->backup_info);
- } else {
- scatterwalk_map_and_copy(req->info, req->dst,
- (req->nbytes - ivsize),
- ivsize, 0);
- }
-
- ablkcipher_request_complete(areq, completion_error);
- return 0;
+ /*
+ * The crypto API expects us to set the req->info to the last
+ * ciphertext block. For encrypt, simply copy from the result.
+ * For decrypt, we must copy from a saved buffer since this
+ * could be an in-place decryption operation and the src is
+ * lost by this point.
+ */
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ memcpy(req->info, req_ctx->backup_info, ivsize);
+ kfree(req_ctx->backup_info);
+ } else if (!err) {
+ scatterwalk_map_and_copy(req->info, req->dst,
+ (req->nbytes - ivsize), ivsize, 0);
}
- return completion_error;
+
+ ablkcipher_request_complete(areq, err);
}
-static int ssi_blkcipher_process(
- struct crypto_tfm *tfm,
- struct blkcipher_req_ctx *req_ctx,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes,
- void *info, //req info
- unsigned int ivsize,
- void *areq,
- enum drv_crypto_direction direction)
+static int cc_cipher_process(struct ablkcipher_request *req,
+ enum drv_crypto_direction direction)
{
- struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
+ struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
+ struct scatterlist *dst = req->dst;
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ void *info = req->info;
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
int rc, seq_len = 0, cts_restore_flag = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
- dev_dbg(dev, "%s areq=%p info=%p nbytes=%d\n",
+ dev_dbg(dev, "%s req=%p info=%p nbytes=%d\n",
((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
- "Encrypt" : "Decrypt"), areq, info, nbytes);
+ "Encrypt" : "Decrypt"), req, info, nbytes);
/* STAT_PHASE_0: Init and sanity checks */
/* TODO: check data length according to mode */
- if (unlikely(validate_data_size(ctx_p, nbytes))) {
+ if (validate_data_size(ctx_p, nbytes)) {
dev_err(dev, "Unsupported data size %d.\n", nbytes);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
rc = -EINVAL;
@@ -758,7 +655,7 @@ static int ssi_blkcipher_process(
/* The IV we are handed may be allocted from the stack so
* we must copy it to a DMAable buffer before use.
*/
- req_ctx->iv = kmalloc(ivsize, GFP_KERNEL);
+ req_ctx->iv = kmalloc(ivsize, flags);
if (!req_ctx->iv) {
rc = -ENOMEM;
goto exit_process;
@@ -766,17 +663,18 @@ static int ssi_blkcipher_process(
memcpy(req_ctx->iv, info, ivsize);
/*For CTS in case of data size aligned to 16 use CBC mode*/
- if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)) {
+ if (((nbytes % AES_BLOCK_SIZE) == 0) &&
+ ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) {
ctx_p->cipher_mode = DRV_CIPHER_CBC;
cts_restore_flag = 1;
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_ablkcipher_complete;
- ssi_req.user_arg = (void *)areq;
+ cc_req.user_cb = (void *)cc_cipher_complete;
+ cc_req.user_arg = (void *)req;
#ifdef ENABLE_CYCLE_COUNT
- ssi_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+ cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
#endif
@@ -786,10 +684,9 @@ static int ssi_blkcipher_process(
/* STAT_PHASE_1: Map buffers */
- rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx,
- ivsize, nbytes, req_ctx->iv,
- src, dst);
- if (unlikely(rc != 0)) {
+ rc = cc_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
+ req_ctx->iv, src, dst, flags);
+ if (rc) {
dev_err(dev, "map_request() failed\n");
goto exit_process;
}
@@ -797,50 +694,35 @@ static int ssi_blkcipher_process(
/* STAT_PHASE_2: Create sequence */
/* Setup processing */
-#if SSI_CC_HAS_MULTI2
- if (ctx_p->flow_mode == S_DIN_to_MULTI2)
- ssi_blkcipher_create_multi2_setup_desc(tfm, req_ctx, ivsize,
- desc, &seq_len);
- else
-#endif /*SSI_CC_HAS_MULTI2*/
- ssi_blkcipher_create_setup_desc(tfm, req_ctx, ivsize, nbytes,
- desc, &seq_len);
+ cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
/* Data processing */
- ssi_blkcipher_create_data_desc(tfm, req_ctx, dst, src, nbytes, areq,
- desc, &seq_len);
+ cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
+ &seq_len);
/* do we need to generate IV? */
if (req_ctx->is_giv) {
- ssi_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
- ssi_req.ivgen_dma_addr_len = 1;
+ cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
+ cc_req.ivgen_dma_addr_len = 1;
/* set the IV size (8/16 B long)*/
- ssi_req.ivgen_size = ivsize;
+ cc_req.ivgen_size = ivsize;
}
/* STAT_PHASE_3: Lock HW and push sequence */
- rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, (!areq) ? 0 : 1);
- if (areq) {
- if (unlikely(rc != -EINPROGRESS)) {
- /* Failed to send the request or request completed synchronously */
- ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
- }
-
- } else {
- if (rc != 0) {
- ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
- } else {
- rc = ssi_blkcipher_complete(dev, ctx_p, req_ctx, dst,
- src, ivsize, NULL,
- ctx_p->drvdata->cc_base);
- }
+ rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
+ &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ /* Failed to send the request or request completed
+ * synchronously
+ */
+ cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
}
exit_process:
- if (cts_restore_flag != 0)
+ if (cts_restore_flag)
ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
- if (rc != -EINPROGRESS) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
kfree(req_ctx->backup_info);
kfree(req_ctx->iv);
}
@@ -848,60 +730,28 @@ exit_process:
return rc;
}
-static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
-{
- struct ablkcipher_request *areq = (struct ablkcipher_request *)ssi_req;
- struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(areq);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
- struct ssi_ablkcipher_ctx *ctx_p = crypto_ablkcipher_ctx(tfm);
- unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
-
- ssi_blkcipher_complete(dev, ctx_p, req_ctx, areq->dst, areq->src,
- ivsize, areq, cc_base);
-}
-
-/* Async wrap functions */
-
-static int ssi_ablkcipher_init(struct crypto_tfm *tfm)
-{
- struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
-
- ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);
-
- return ssi_blkcipher_init(tfm);
-}
-
-static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key,
- unsigned int keylen)
-{
- return ssi_blkcipher_setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
-}
-
-static int ssi_ablkcipher_encrypt(struct ablkcipher_request *req)
+static int cc_cipher_encrypt(struct ablkcipher_request *req)
{
- struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
- unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
req_ctx->is_giv = false;
+ req_ctx->backup_info = NULL;
- return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
}
-static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
+static int cc_cipher_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
+ gfp_t flags = cc_gfp_flags(&req->base);
/*
* Allocate and save the last IV sized bytes of the source, which will
* be lost in case of in-place decryption and might be needed for CTS.
*/
- req_ctx->backup_info = kmalloc(ivsize, GFP_KERNEL);
+ req_ctx->backup_info = kmalloc(ivsize, flags);
if (!req_ctx->backup_info)
return -ENOMEM;
@@ -909,22 +759,20 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
(req->nbytes - ivsize), ivsize, 0);
req_ctx->is_giv = false;
- return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
}
/* DX Block cipher alg */
-static struct ssi_alg_template blkcipher_algs[] = {
-/* Async template */
-#if SSI_CC_HAS_AES_XTS
+static struct cc_alg_template blkcipher_algs[] = {
{
.name = "xts(aes)",
.driver_name = "xts-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -939,9 +787,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -955,9 +803,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -965,17 +813,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
.cipher_mode = DRV_CIPHER_XTS,
.flow_mode = S_DIN_to_AES,
},
-#endif /*SSI_CC_HAS_AES_XTS*/
-#if SSI_CC_HAS_AES_ESSIV
{
.name = "essiv(aes)",
.driver_name = "essiv-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -989,9 +835,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -1005,9 +851,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -1015,17 +861,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ESSIV,
.flow_mode = S_DIN_to_AES,
},
-#endif /*SSI_CC_HAS_AES_ESSIV*/
-#if SSI_CC_HAS_AES_BITLOCKER
{
.name = "bitlocker(aes)",
.driver_name = "bitlocker-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -1039,9 +883,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -1055,9 +899,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -1065,16 +909,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
.cipher_mode = DRV_CIPHER_BITLOCKER,
.flow_mode = S_DIN_to_AES,
},
-#endif /*SSI_CC_HAS_AES_BITLOCKER*/
{
.name = "ecb(aes)",
.driver_name = "ecb-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = 0,
@@ -1088,9 +931,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -1104,9 +947,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -1114,16 +957,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
.cipher_mode = DRV_CIPHER_OFB,
.flow_mode = S_DIN_to_AES,
},
-#if SSI_CC_HAS_AES_CTS
{
.name = "cts1(cbc(aes))",
.driver_name = "cts1-cbc-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -1131,16 +973,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CBC_CTS,
.flow_mode = S_DIN_to_AES,
},
-#endif
{
.name = "ctr(aes)",
.driver_name = "ctr-aes-dx",
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -1154,9 +995,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = DES3_EDE_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -1170,9 +1011,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = DES3_EDE_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = 0,
@@ -1186,9 +1027,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = DES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1202,9 +1043,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = DES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = 0,
@@ -1212,47 +1053,13 @@ static struct ssi_alg_template blkcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ECB,
.flow_mode = S_DIN_to_DES,
},
-#if SSI_CC_HAS_MULTI2
- {
- .name = "cbc(multi2)",
- .driver_name = "cbc-multi2-dx",
- .blocksize = CC_MULTI2_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
- .min_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
- .max_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
- .ivsize = CC_MULTI2_IV_SIZE,
- },
- .cipher_mode = DRV_MULTI2_CBC,
- .flow_mode = S_DIN_to_MULTI2,
- },
- {
- .name = "ofb(multi2)",
- .driver_name = "ofb-multi2-dx",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_encrypt,
- .min_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
- .max_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
- .ivsize = CC_MULTI2_IV_SIZE,
- },
- .cipher_mode = DRV_MULTI2_OFB,
- .flow_mode = S_DIN_to_MULTI2,
- },
-#endif /*SSI_CC_HAS_MULTI2*/
};
static
-struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template
- *template, struct device *dev)
+struct cc_crypto_alg *cc_cipher_create_alg(struct cc_alg_template *template,
+ struct device *dev)
{
- struct ssi_crypto_alg *t_alg;
+ struct cc_crypto_alg *t_alg;
struct crypto_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
@@ -1265,13 +1072,13 @@ struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
alg->cra_module = THIS_MODULE;
- alg->cra_priority = SSI_CRA_PRIO;
+ alg->cra_priority = CC_CRA_PRIO;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_ctxsize = sizeof(struct ssi_ablkcipher_ctx);
+ alg->cra_ctxsize = sizeof(struct cc_cipher_ctx);
- alg->cra_init = ssi_ablkcipher_init;
- alg->cra_exit = ssi_blkcipher_exit;
+ alg->cra_init = cc_cipher_init;
+ alg->cra_exit = cc_cipher_exit;
alg->cra_type = &crypto_ablkcipher_type;
alg->cra_ablkcipher = template->template_ablkcipher;
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
@@ -1283,11 +1090,11 @@ struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template
return t_alg;
}
-int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
+int cc_cipher_free(struct cc_drvdata *drvdata)
{
- struct ssi_crypto_alg *t_alg, *n;
- struct ssi_blkcipher_handle *blkcipher_handle =
- drvdata->blkcipher_handle;
+ struct cc_crypto_alg *t_alg, *n;
+ struct cc_cipher_handle *blkcipher_handle = drvdata->blkcipher_handle;
+
if (blkcipher_handle) {
/* Remove registered algs */
list_for_each_entry_safe(t_alg, n,
@@ -1303,10 +1110,10 @@ int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
return 0;
}
-int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
+int cc_cipher_alloc(struct cc_drvdata *drvdata)
{
- struct ssi_blkcipher_handle *ablkcipher_handle;
- struct ssi_crypto_alg *t_alg;
+ struct cc_cipher_handle *ablkcipher_handle;
+ struct cc_crypto_alg *t_alg;
struct device *dev = drvdata_to_dev(drvdata);
int rc = -ENOMEM;
int alg;
@@ -1323,7 +1130,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
ARRAY_SIZE(blkcipher_algs));
for (alg = 0; alg < ARRAY_SIZE(blkcipher_algs); alg++) {
dev_dbg(dev, "creating %s\n", blkcipher_algs[alg].driver_name);
- t_alg = ssi_ablkcipher_create_alg(&blkcipher_algs[alg], dev);
+ t_alg = cc_cipher_create_alg(&blkcipher_algs[alg], dev);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
dev_err(dev, "%s alg allocation failed\n",
@@ -1337,7 +1144,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
rc = crypto_register_alg(&t_alg->crypto_alg);
dev_dbg(dev, "%s alg registration rc = %x\n",
t_alg->crypto_alg.cra_driver_name, rc);
- if (unlikely(rc != 0)) {
+ if (rc) {
dev_err(dev, "%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
@@ -1352,6 +1159,6 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
return 0;
fail0:
- ssi_ablkcipher_free(drvdata);
+ cc_cipher_free(drvdata);
return rc;
}
diff --git a/drivers/staging/ccree/cc_cipher.h b/drivers/staging/ccree/cc_cipher.h
new file mode 100644
index 000000000000..4c181c721723
--- /dev/null
+++ b/drivers/staging/ccree/cc_cipher.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_cipher.h
+ * ARM CryptoCell Cipher Crypto API
+ */
+
+#ifndef __CC_CIPHER_H__
+#define __CC_CIPHER_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+
+/* Crypto cipher flags */
+#define CC_CRYPTO_CIPHER_KEY_KFDE0 BIT(0)
+#define CC_CRYPTO_CIPHER_KEY_KFDE1 BIT(1)
+#define CC_CRYPTO_CIPHER_KEY_KFDE2 BIT(2)
+#define CC_CRYPTO_CIPHER_KEY_KFDE3 BIT(3)
+#define CC_CRYPTO_CIPHER_DU_SIZE_512B BIT(4)
+
+#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | \
+ CC_CRYPTO_CIPHER_KEY_KFDE1 | \
+ CC_CRYPTO_CIPHER_KEY_KFDE2 | \
+ CC_CRYPTO_CIPHER_KEY_KFDE3)
+
+struct blkcipher_req_ctx {
+ struct async_gen_req_ctx gen_ctx;
+ enum cc_req_dma_buf_type dma_buf_type;
+ u32 in_nents;
+ u32 in_mlli_nents;
+ u32 out_nents;
+ u32 out_mlli_nents;
+ u8 *backup_info; /*store iv for generated IV flow*/
+ u8 *iv;
+ bool is_giv;
+ struct mlli_params mlli_params;
+};
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata);
+
+int cc_cipher_free(struct cc_drvdata *drvdata);
+
+#ifndef CRYPTO_ALG_BULK_MASK
+
+#define CRYPTO_ALG_BULK_DU_512 0x00002000
+#define CRYPTO_ALG_BULK_DU_4096 0x00004000
+#define CRYPTO_ALG_BULK_MASK (CRYPTO_ALG_BULK_DU_512 |\
+ CRYPTO_ALG_BULK_DU_4096)
+#endif /* CRYPTO_ALG_BULK_MASK */
+
+#ifdef CRYPTO_TFM_REQ_HW_KEY
+
+static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+{
+ return (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_HW_KEY);
+}
+
+#else
+
+struct arm_hw_key_info {
+ int hw_key1;
+ int hw_key2;
+};
+
+static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+{
+ return false;
+}
+
+#endif /* CRYPTO_TFM_REQ_HW_KEY */
+
+#endif /*__CC_CIPHER_H__*/
diff --git a/drivers/staging/ccree/cc_crypto_ctx.h b/drivers/staging/ccree/cc_crypto_ctx.h
index 591f6fdadc59..eb16842d7db9 100644
--- a/drivers/staging/ccree/cc_crypto_ctx.h
+++ b/drivers/staging/ccree/cc_crypto_ctx.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#ifndef _CC_CRYPTO_CTX_H_
#define _CC_CRYPTO_CTX_H_
@@ -21,7 +8,7 @@
/* context size */
#ifndef CC_CTX_SIZE_LOG2
-#if (CC_SUPPORT_SHA > 256)
+#if (CC_DEV_SHA_MAX > 256)
#define CC_CTX_SIZE_LOG2 8
#else
#define CC_CTX_SIZE_LOG2 7
@@ -72,7 +59,7 @@
#define CC_SHA384_BLOCK_SIZE 128
#define CC_SHA512_BLOCK_SIZE 128
-#if (CC_SUPPORT_SHA > 256)
+#if (CC_DEV_SHA_MAX > 256)
#define CC_DIGEST_SIZE_MAX CC_SHA512_DIGEST_SIZE
#define CC_HASH_BLOCK_SIZE_MAX CC_SHA512_BLOCK_SIZE /*1024b*/
#else /* Only up to SHA256 */
@@ -82,15 +69,6 @@
#define CC_HMAC_BLOCK_SIZE_MAX CC_HASH_BLOCK_SIZE_MAX
-#define CC_MULTI2_SYSTEM_KEY_SIZE 32
-#define CC_MULTI2_DATA_KEY_SIZE 8
-#define CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE \
- (CC_MULTI2_SYSTEM_KEY_SIZE + CC_MULTI2_DATA_KEY_SIZE)
-#define CC_MULTI2_BLOCK_SIZE 8
-#define CC_MULTI2_IV_SIZE 8
-#define CC_MULTI2_MIN_NUM_ROUNDS 8
-#define CC_MULTI2_MAX_NUM_ROUNDS 128
-
#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
enum drv_engine_type {
@@ -168,14 +146,6 @@ enum drv_hash_hw_mode {
DRV_HASH_HW_RESERVE32B = S32_MAX
};
-enum drv_multi2_mode {
- DRV_MULTI2_NULL = -1,
- DRV_MULTI2_ECB = 0,
- DRV_MULTI2_CBC = 1,
- DRV_MULTI2_OFB = 2,
- DRV_MULTI2_RESERVE32B = S32_MAX
-};
-
/* drv_crypto_key_type[1:0] is mapped to cipher_do[1:0] */
/* drv_crypto_key_type[2] is mapped to cipher_config2 */
enum drv_crypto_key_type {
diff --git a/drivers/staging/ccree/cc_debugfs.c b/drivers/staging/ccree/cc_debugfs.c
new file mode 100644
index 000000000000..08f8db489cf0
--- /dev/null
+++ b/drivers/staging/ccree/cc_debugfs.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/stringify.h>
+#include "cc_driver.h"
+#include "cc_crypto_ctx.h"
+#include "cc_debugfs.h"
+
+struct cc_debugfs_ctx {
+ struct dentry *dir;
+};
+
+#define CC_DEBUG_REG(_X) { \
+ .name = __stringify(_X),\
+ .offset = CC_REG(_X) \
+ }
+
+/*
+ * This is a global var for the dentry of the
+ * debugfs ccree/ dir. It is not tied down to
+ * a specific instance of ccree, hence it is
+ * global.
+ */
+static struct dentry *cc_debugfs_dir;
+
+static struct debugfs_reg32 debug_regs[] = {
+ CC_DEBUG_REG(HOST_SIGNATURE),
+ CC_DEBUG_REG(HOST_IRR),
+ CC_DEBUG_REG(HOST_POWER_DOWN_EN),
+ CC_DEBUG_REG(AXIM_MON_ERR),
+ CC_DEBUG_REG(DSCRPTR_QUEUE_CONTENT),
+ CC_DEBUG_REG(HOST_IMR),
+ CC_DEBUG_REG(AXIM_CFG),
+ CC_DEBUG_REG(AXIM_CACHE_PARAMS),
+ CC_DEBUG_REG(HOST_VERSION),
+ CC_DEBUG_REG(GPR_HOST),
+ CC_DEBUG_REG(AXIM_MON_COMP),
+};
+
+int __init cc_debugfs_global_init(void)
+{
+ cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
+
+ return !cc_debugfs_dir;
+}
+
+void __exit cc_debugfs_global_fini(void)
+{
+ debugfs_remove(cc_debugfs_dir);
+}
+
+int cc_debugfs_init(struct cc_drvdata *drvdata)
+{
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct cc_debugfs_ctx *ctx;
+ struct debugfs_regset32 *regset;
+ struct dentry *file;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOMEM;
+
+ regset->regs = debug_regs;
+ regset->nregs = ARRAY_SIZE(debug_regs);
+ regset->base = drvdata->cc_base;
+
+ ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
+ if (!ctx->dir)
+ return -ENFILE;
+
+ file = debugfs_create_regset32("regs", 0400, ctx->dir, regset);
+ if (!file) {
+ debugfs_remove(ctx->dir);
+ return -ENFILE;
+ }
+
+ file = debugfs_create_bool("coherent", 0400, ctx->dir,
+ &drvdata->coherent);
+
+ if (!file) {
+ debugfs_remove_recursive(ctx->dir);
+ return -ENFILE;
+ }
+
+ drvdata->debugfs = ctx;
+
+ return 0;
+}
+
+void cc_debugfs_fini(struct cc_drvdata *drvdata)
+{
+ struct cc_debugfs_ctx *ctx = (struct cc_debugfs_ctx *)drvdata->debugfs;
+
+ debugfs_remove_recursive(ctx->dir);
+}
diff --git a/drivers/staging/ccree/cc_debugfs.h b/drivers/staging/ccree/cc_debugfs.h
new file mode 100644
index 000000000000..5b5320eca7d2
--- /dev/null
+++ b/drivers/staging/ccree/cc_debugfs.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_DEBUGFS_H__
+#define __CC_DEBUGFS_H__
+
+#ifdef CONFIG_DEBUG_FS
+int cc_debugfs_global_init(void);
+void cc_debugfs_global_fini(void);
+
+int cc_debugfs_init(struct cc_drvdata *drvdata);
+void cc_debugfs_fini(struct cc_drvdata *drvdata);
+
+#else
+
+static inline int cc_debugfs_global_init(void)
+{
+ return 0;
+}
+
+static inline void cc_debugfs_global_fini(void) {}
+
+static inline int cc_debugfs_init(struct cc_drvdata *drvdata)
+{
+ return 0;
+}
+
+static inline void cc_debugfs_fini(struct cc_drvdata *drvdata) {}
+
+#endif
+
+#endif /*__CC_SYSFS_H__*/
diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/cc_driver.c
index 1a3c481fa92a..3a1cb0c98648 100644
--- a/drivers/staging/ccree/ssi_driver.c
+++ b/drivers/staging/ccree/cc_driver.c
@@ -1,96 +1,56 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <crypto/aes.h>
-#include <crypto/sha.h>
-#include <crypto/aead.h>
-#include <crypto/authenc.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/skcipher.h>
-
-#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
-#include <linux/random.h>
-#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/fcntl.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/mutex.h>
-#include <linux/sysctl.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
#include <linux/platform_device.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/pm.h>
-
-/* cache.h required for L1_CACHE_ALIGN() and cache_line_size() */
-#include <linux/cache.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/pagemap.h>
-#include <linux/sched.h>
-#include <linux/random.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/of_address.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "ssi_request_mgr.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_sysfs.h"
-#include "ssi_cipher.h"
-#include "ssi_aead.h"
-#include "ssi_hash.h"
-#include "ssi_ivgen.h"
-#include "ssi_sram_mgr.h"
-#include "ssi_pm.h"
-#include "ssi_fips.h"
-
-#ifdef DX_DUMP_BYTES
-void dump_byte_array(const char *name, const u8 *buf, size_t len)
+#include "cc_driver.h"
+#include "cc_request_mgr.h"
+#include "cc_buffer_mgr.h"
+#include "cc_debugfs.h"
+#include "cc_cipher.h"
+#include "cc_aead.h"
+#include "cc_hash.h"
+#include "cc_ivgen.h"
+#include "cc_sram_mgr.h"
+#include "cc_pm.h"
+#include "cc_fips.h"
+
+bool cc_dump_desc;
+module_param_named(dump_desc, cc_dump_desc, bool, 0600);
+MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
+
+bool cc_dump_bytes;
+module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
+MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
+
+void __dump_byte_array(const char *name, const u8 *buf, size_t len)
{
- char prefix[NAME_LEN];
+ char prefix[64];
if (!buf)
return;
- snprintf(prefix, sizeof(prefix), "%s[%lu]: ", name, len);
+ snprintf(prefix, sizeof(prefix), "%s[%zu]: ", name, len);
- print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, len,
- false);
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, buf,
+ len, false);
}
-#endif
static irqreturn_t cc_isr(int irq, void *dev_id)
{
- struct ssi_drvdata *drvdata = (struct ssi_drvdata *)dev_id;
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id;
struct device *dev = drvdata_to_dev(drvdata);
u32 irr;
u32 imr;
@@ -100,7 +60,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
/* read the interrupt status */
irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
dev_dbg(dev, "Got IRR=0x%08X\n", irr);
- if (unlikely(irr == 0)) { /* Probably shared interrupt line */
+ if (irr == 0) { /* Probably shared interrupt line */
dev_err(dev, "Got interrupt with empty IRR\n");
return IRQ_NONE;
}
@@ -111,23 +71,27 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
drvdata->irq = irr;
/* Completion interrupt - most probable */
- if (likely((irr & SSI_COMP_IRQ_MASK) != 0)) {
- /* Mask AXI completion interrupt - will be unmasked in Deferred service handler */
- cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | SSI_COMP_IRQ_MASK);
- irr &= ~SSI_COMP_IRQ_MASK;
+ if (irr & CC_COMP_IRQ_MASK) {
+ /* Mask AXI completion interrupt - will be unmasked in
+ * Deferred service handler
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
+ irr &= ~CC_COMP_IRQ_MASK;
complete_request(drvdata);
}
-#ifdef CC_SUPPORT_FIPS
+#ifdef CONFIG_CRYPTO_FIPS
/* TEE FIPS interrupt */
- if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) {
- /* Mask interrupt - will be unmasked in Deferred service handler */
- cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | SSI_GPR0_IRQ_MASK);
- irr &= ~SSI_GPR0_IRQ_MASK;
+ if (irr & CC_GPR0_IRQ_MASK) {
+ /* Mask interrupt - will be unmasked in Deferred service
+ * handler
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_GPR0_IRQ_MASK);
+ irr &= ~CC_GPR0_IRQ_MASK;
fips_handler(drvdata);
}
#endif
/* AXI error interrupt */
- if (unlikely((irr & SSI_AXI_ERR_IRQ_MASK) != 0)) {
+ if (irr & CC_AXI_ERR_IRQ_MASK) {
u32 axi_err;
/* Read the AXI error ID */
@@ -135,10 +99,10 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
axi_err);
- irr &= ~SSI_AXI_ERR_IRQ_MASK;
+ irr &= ~CC_AXI_ERR_IRQ_MASK;
}
- if (unlikely(irr != 0)) {
+ if (irr) {
dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
irr);
/* Just warning */
@@ -147,14 +111,14 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
+int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
{
unsigned int val, cache_params;
struct device *dev = drvdata_to_dev(drvdata);
/* Unmask all AXI interrupt sources AXI_CFG1 register */
val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
- cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~SSI_AXI_IRQ_MASK);
+ cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
dev_dbg(dev, "AXIM_CFG=0x%08X\n",
cc_ioread(drvdata, CC_REG(AXIM_CFG)));
@@ -164,21 +128,10 @@ int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
/* Unmask relevant interrupt cause */
- val = (unsigned int)(~(SSI_COMP_IRQ_MASK | SSI_AXI_ERR_IRQ_MASK |
- SSI_GPR0_IRQ_MASK));
+ val = (unsigned int)(~(CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK |
+ CC_GPR0_IRQ_MASK));
cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
-#ifdef DX_HOST_IRQ_TIMER_INIT_VAL_REG_OFFSET
-#ifdef DX_IRQ_DELAY
- /* Set CC IRQ delay */
- cc_iowrite(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL), DX_IRQ_DELAY);
-#endif
- if (cc_ioread(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL)) > 0) {
- dev_dbg(dev, "irq_delay=%d CC cycles\n",
- cc_ioread(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL)));
- }
-#endif
-
cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
@@ -199,12 +152,11 @@ int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
static int init_cc_resources(struct platform_device *plat_dev)
{
struct resource *req_mem_cc_regs = NULL;
- void __iomem *cc_base = NULL;
- struct ssi_drvdata *new_drvdata;
+ struct cc_drvdata *new_drvdata;
struct device *dev = &plat_dev->dev;
struct device_node *np = dev->of_node;
u32 signature_val;
- dma_addr_t dma_mask;
+ u64 dma_mask;
int rc = 0;
new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
@@ -222,18 +174,14 @@ static int init_cc_resources(struct platform_device *plat_dev)
req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
/* Map registers space */
new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
- if (IS_ERR(new_drvdata->cc_base)) {
- dev_err(dev, "Failed to ioremap registers");
+ if (IS_ERR(new_drvdata->cc_base))
return PTR_ERR(new_drvdata->cc_base);
- }
dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
req_mem_cc_regs);
dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
&req_mem_cc_regs->start, new_drvdata->cc_base);
- cc_base = new_drvdata->cc_base;
-
/* Then IRQ */
new_drvdata->irq = platform_get_irq(plat_dev, 0);
if (new_drvdata->irq < 0) {
@@ -250,10 +198,12 @@ static int init_cc_resources(struct platform_device *plat_dev)
}
dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
+ init_completion(&new_drvdata->hw_queue_avail);
+
if (!plat_dev->dev.dma_mask)
plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;
- dma_mask = (dma_addr_t)(DMA_BIT_MASK(DMA_BIT_MASK_LEN));
+ dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
while (dma_mask > 0x7fffffffUL) {
if (dma_supported(&plat_dev->dev, dma_mask)) {
rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
@@ -264,8 +214,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
}
if (rc) {
- dev_err(dev, "Failed in dma_set_mask, mask=%par\n",
- &dma_mask);
+ dev_err(dev, "Failed in dma_set_mask, mask=%par\n", &dma_mask);
return rc;
}
@@ -277,9 +226,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
/* Verify correct mapping */
signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
- if (signature_val != DX_DEV_SIGNATURE) {
+ if (signature_val != CC_DEV_SIGNATURE) {
dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
- signature_val, (u32)DX_DEV_SIGNATURE);
+ signature_val, (u32)CC_DEV_SIGNATURE);
rc = -EINVAL;
goto post_clk_err;
}
@@ -287,84 +236,82 @@ static int init_cc_resources(struct platform_device *plat_dev)
/* Display HW versions */
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
- SSI_DEV_NAME_STR,
+ CC_DEV_NAME_STR,
cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
DRV_MODULE_VERSION);
rc = init_cc_regs(new_drvdata, true);
- if (unlikely(rc != 0)) {
+ if (rc) {
dev_err(dev, "init_cc_regs failed\n");
goto post_clk_err;
}
-#ifdef ENABLE_CC_SYSFS
- rc = ssi_sysfs_init(&dev->kobj, new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "init_stat_db failed\n");
+ rc = cc_debugfs_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "Failed registering debugfs interface\n");
goto post_regs_err;
}
-#endif
- rc = ssi_fips_init(new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "SSI_FIPS_INIT failed 0x%x\n", rc);
- goto post_sysfs_err;
+ rc = cc_fips_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
+ goto post_debugfs_err;
}
- rc = ssi_sram_mgr_init(new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "ssi_sram_mgr_init failed\n");
+ rc = cc_sram_mgr_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_sram_mgr_init failed\n");
goto post_fips_init_err;
}
new_drvdata->mlli_sram_addr =
- ssi_sram_mgr_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
- if (unlikely(new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR)) {
+ cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
+ if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
rc = -ENOMEM;
goto post_sram_mgr_err;
}
- rc = request_mgr_init(new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "request_mgr_init failed\n");
+ rc = cc_req_mgr_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_req_mgr_init failed\n");
goto post_sram_mgr_err;
}
- rc = ssi_buffer_mgr_init(new_drvdata);
- if (unlikely(rc != 0)) {
+ rc = cc_buffer_mgr_init(new_drvdata);
+ if (rc) {
dev_err(dev, "buffer_mgr_init failed\n");
goto post_req_mgr_err;
}
- rc = ssi_power_mgr_init(new_drvdata);
- if (unlikely(rc != 0)) {
+ rc = cc_pm_init(new_drvdata);
+ if (rc) {
dev_err(dev, "ssi_power_mgr_init failed\n");
goto post_buf_mgr_err;
}
- rc = ssi_ivgen_init(new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "ssi_ivgen_init failed\n");
+ rc = cc_ivgen_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_ivgen_init failed\n");
goto post_power_mgr_err;
}
/* Allocate crypto algs */
- rc = ssi_ablkcipher_alloc(new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "ssi_ablkcipher_alloc failed\n");
+ rc = cc_cipher_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_cipher_alloc failed\n");
goto post_ivgen_err;
}
/* hash must be allocated before aead since hash exports APIs */
- rc = ssi_hash_alloc(new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "ssi_hash_alloc failed\n");
+ rc = cc_hash_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_hash_alloc failed\n");
goto post_cipher_err;
}
- rc = ssi_aead_alloc(new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "ssi_aead_alloc failed\n");
+ rc = cc_aead_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_aead_alloc failed\n");
goto post_hash_err;
}
@@ -377,25 +324,23 @@ static int init_cc_resources(struct platform_device *plat_dev)
return 0;
post_hash_err:
- ssi_hash_free(new_drvdata);
+ cc_hash_free(new_drvdata);
post_cipher_err:
- ssi_ablkcipher_free(new_drvdata);
+ cc_cipher_free(new_drvdata);
post_ivgen_err:
- ssi_ivgen_fini(new_drvdata);
+ cc_ivgen_fini(new_drvdata);
post_power_mgr_err:
- ssi_power_mgr_fini(new_drvdata);
+ cc_pm_fini(new_drvdata);
post_buf_mgr_err:
- ssi_buffer_mgr_fini(new_drvdata);
+ cc_buffer_mgr_fini(new_drvdata);
post_req_mgr_err:
- request_mgr_fini(new_drvdata);
+ cc_req_mgr_fini(new_drvdata);
post_sram_mgr_err:
- ssi_sram_mgr_fini(new_drvdata);
+ cc_sram_mgr_fini(new_drvdata);
post_fips_init_err:
- ssi_fips_fini(new_drvdata);
-post_sysfs_err:
-#ifdef ENABLE_CC_SYSFS
- ssi_sysfs_fini();
-#endif
+ cc_fips_fini(new_drvdata);
+post_debugfs_err:
+ cc_debugfs_fini(new_drvdata);
post_regs_err:
fini_cc_regs(new_drvdata);
post_clk_err:
@@ -403,7 +348,7 @@ post_clk_err:
return rc;
}
-void fini_cc_regs(struct ssi_drvdata *drvdata)
+void fini_cc_regs(struct cc_drvdata *drvdata)
{
/* Mask all interrupts */
cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
@@ -411,26 +356,24 @@ void fini_cc_regs(struct ssi_drvdata *drvdata)
static void cleanup_cc_resources(struct platform_device *plat_dev)
{
- struct ssi_drvdata *drvdata =
- (struct ssi_drvdata *)platform_get_drvdata(plat_dev);
-
- ssi_aead_free(drvdata);
- ssi_hash_free(drvdata);
- ssi_ablkcipher_free(drvdata);
- ssi_ivgen_fini(drvdata);
- ssi_power_mgr_fini(drvdata);
- ssi_buffer_mgr_fini(drvdata);
- request_mgr_fini(drvdata);
- ssi_sram_mgr_fini(drvdata);
- ssi_fips_fini(drvdata);
-#ifdef ENABLE_CC_SYSFS
- ssi_sysfs_fini();
-#endif
+ struct cc_drvdata *drvdata =
+ (struct cc_drvdata *)platform_get_drvdata(plat_dev);
+
+ cc_aead_free(drvdata);
+ cc_hash_free(drvdata);
+ cc_cipher_free(drvdata);
+ cc_ivgen_fini(drvdata);
+ cc_pm_fini(drvdata);
+ cc_buffer_mgr_fini(drvdata);
+ cc_req_mgr_fini(drvdata);
+ cc_sram_mgr_fini(drvdata);
+ cc_fips_fini(drvdata);
+ cc_debugfs_fini(drvdata);
fini_cc_regs(drvdata);
cc_clk_off(drvdata);
}
-int cc_clk_on(struct ssi_drvdata *drvdata)
+int cc_clk_on(struct cc_drvdata *drvdata)
{
struct clk *clk = drvdata->clk;
int rc;
@@ -446,7 +389,7 @@ int cc_clk_on(struct ssi_drvdata *drvdata)
return 0;
}
-void cc_clk_off(struct ssi_drvdata *drvdata)
+void cc_clk_off(struct cc_drvdata *drvdata)
{
struct clk *clk = drvdata->clk;
@@ -461,23 +404,10 @@ static int cc7x_probe(struct platform_device *plat_dev)
{
int rc;
struct device *dev = &plat_dev->dev;
-#if defined(CONFIG_ARM) && defined(CC_DEBUG)
- u32 ctr, cacheline_size;
-
- asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
- cacheline_size = 4 << ((ctr >> 16) & 0xf);
- dev_dbg(dev, "CP15(L1_CACHE_BYTES) = %u , Kconfig(L1_CACHE_BYTES) = %u\n",
- cacheline_size, L1_CACHE_BYTES);
-
- asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (ctr));
- dev_dbg(dev, "Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X, Part 0x%03X, Rev r%dp%d\n",
- (ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF,
- (ctr >> 20) & 0xF, ctr & 0xF);
-#endif
/* Map registers space */
rc = init_cc_resources(plat_dev);
- if (rc != 0)
+ if (rc)
return rc;
dev_info(dev, "ARM ccree device initialized\n");
@@ -498,38 +428,44 @@ static int cc7x_remove(struct platform_device *plat_dev)
return 0;
}
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-static const struct dev_pm_ops arm_cc7x_driver_pm = {
- SET_RUNTIME_PM_OPS(ssi_power_mgr_runtime_suspend, ssi_power_mgr_runtime_resume, NULL)
-};
-#endif
-
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-#define DX_DRIVER_RUNTIME_PM (&arm_cc7x_driver_pm)
-#else
-#define DX_DRIVER_RUNTIME_PM NULL
-#endif
-
-#ifdef CONFIG_OF
static const struct of_device_id arm_cc7x_dev_of_match[] = {
{.compatible = "arm,cryptocell-712-ree"},
{}
};
MODULE_DEVICE_TABLE(of, arm_cc7x_dev_of_match);
-#endif
static struct platform_driver cc7x_driver = {
.driver = {
.name = "cc7xree",
-#ifdef CONFIG_OF
.of_match_table = arm_cc7x_dev_of_match,
+#ifdef CONFIG_PM
+ .pm = &ccree_pm,
#endif
- .pm = DX_DRIVER_RUNTIME_PM,
},
.probe = cc7x_probe,
.remove = cc7x_remove,
};
-module_platform_driver(cc7x_driver);
+
+static int __init ccree_init(void)
+{
+ int ret;
+
+ cc_hash_global_init();
+
+ ret = cc_debugfs_global_init();
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&cc7x_driver);
+}
+module_init(ccree_init);
+
+static void __exit ccree_exit(void)
+{
+ platform_driver_unregister(&cc7x_driver);
+ cc_debugfs_global_fini();
+}
+module_exit(ccree_exit);
/* Module description */
MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
diff --git a/drivers/staging/ccree/cc_driver.h b/drivers/staging/ccree/cc_driver.h
new file mode 100644
index 000000000000..773ac591c45c
--- /dev/null
+++ b/drivers/staging/ccree/cc_driver.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_driver.h
+ * ARM CryptoCell Linux Crypto Driver
+ */
+
+#ifndef __CC_DRIVER_H__
+#define __CC_DRIVER_H__
+
+#ifdef COMP_IN_WQ
+#include <linux/workqueue.h>
+#else
+#include <linux/interrupt.h>
+#endif
+#include <linux/dma-mapping.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/hash.h>
+#include <linux/version.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+/* Registers definitions from shared/hw/ree_include */
+#include "cc_host_regs.h"
+#define CC_DEV_SHA_MAX 512
+#include "cc_crypto_ctx.h"
+#include "cc_hw_queue_defs.h"
+#include "cc_sram_mgr.h"
+
+extern bool cc_dump_desc;
+extern bool cc_dump_bytes;
+
+#define DRV_MODULE_VERSION "3.0"
+
+#define CC_DEV_NAME_STR "cc715ree"
+#define CC_COHERENT_CACHE_PARAMS 0xEEE
+
+/* Maximum DMA mask supported by IP */
+#define DMA_BIT_MASK_LEN 48
+
+#define CC_DEV_SIGNATURE 0xDCC71200UL
+
+#define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT))
+
+#define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
+
+#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+
+#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
+ CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
+ CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+
+/* Register name mangling macro */
+#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
+
+/* TEE FIPS status interrupt */
+#define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
+
+#define CC_CRA_PRIO 3000
+
+#define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
+
+#define MAX_REQUEST_QUEUE_SIZE 4096
+#define MAX_MLLI_BUFF_SIZE 2080
+#define MAX_ICV_NENTS_SUPPORTED 2
+
+/* Definitions for HW descriptors DIN/DOUT fields */
+#define NS_BIT 1
+#define AXI_ID 0
+/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
+ * field in the HW descriptor. The DMA engine +8 that value.
+ */
+
+#define CC_MAX_IVGEN_DMA_ADDRESSES 3
+struct cc_crypto_req {
+ void (*user_cb)(struct device *dev, void *req, int err);
+ void *user_arg;
+ dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
+ /* For the first 'ivgen_dma_addr_len' addresses of this array,
+ * generated IV would be placed in it by send_request().
+ * Same generated IV for all addresses!
+ */
+ /* Amount of 'ivgen_dma_addr' elements to be filled. */
+ unsigned int ivgen_dma_addr_len;
+ /* The generated IV size required, 8/16 B allowed. */
+ unsigned int ivgen_size;
+ struct completion seq_compl; /* request completion */
+};
+
+/**
+ * struct cc_drvdata - driver private data context
+ * @cc_base: virt address of the CC registers
+ * @irq: device IRQ number
+ * @irq_mask: Interrupt mask shadow (1 for masked interrupts)
+ * @fw_ver: SeP loaded firmware version
+ */
+struct cc_drvdata {
+ void __iomem *cc_base;
+ int irq;
+ u32 irq_mask;
+ u32 fw_ver;
+ struct completion hw_queue_avail; /* wait for HW queue availability */
+ struct platform_device *plat_dev;
+ cc_sram_addr_t mlli_sram_addr;
+ void *buff_mgr_handle;
+ void *hash_handle;
+ void *aead_handle;
+ void *blkcipher_handle;
+ void *request_mgr_handle;
+ void *fips_handle;
+ void *ivgen_handle;
+ void *sram_mgr_handle;
+ void *debugfs;
+ struct clk *clk;
+ bool coherent;
+};
+
+struct cc_crypto_alg {
+ struct list_head entry;
+ int cipher_mode;
+ int flow_mode; /* Note: currently, refers to the cipher mode only. */
+ int auth_mode;
+ struct cc_drvdata *drvdata;
+ struct crypto_alg crypto_alg;
+ struct aead_alg aead_alg;
+};
+
+struct cc_alg_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ u32 type;
+ union {
+ struct ablkcipher_alg ablkcipher;
+ struct aead_alg aead;
+ struct blkcipher_alg blkcipher;
+ struct cipher_alg cipher;
+ struct compress_alg compress;
+ } template_u;
+ int cipher_mode;
+ int flow_mode; /* Note: currently, refers to the cipher mode only. */
+ int auth_mode;
+ struct cc_drvdata *drvdata;
+};
+
+struct async_gen_req_ctx {
+ dma_addr_t iv_dma_addr;
+ enum drv_crypto_direction op_type;
+};
+
+static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
+{
+ return &drvdata->plat_dev->dev;
+}
+
+void __dump_byte_array(const char *name, const u8 *buf, size_t len);
+static inline void dump_byte_array(const char *name, const u8 *the_array,
+ size_t size)
+{
+ if (cc_dump_bytes)
+ __dump_byte_array(name, the_array, size);
+}
+
+int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
+void fini_cc_regs(struct cc_drvdata *drvdata);
+int cc_clk_on(struct cc_drvdata *drvdata);
+void cc_clk_off(struct cc_drvdata *drvdata);
+
+static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
+{
+ iowrite32(val, (drvdata->cc_base + reg));
+}
+
+static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
+{
+ return ioread32(drvdata->cc_base + reg);
+}
+
+static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
+{
+ return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+}
+
+#endif /*__CC_DRIVER_H__*/
+
diff --git a/drivers/staging/ccree/ssi_fips.c b/drivers/staging/ccree/cc_fips.c
index 4aea99fa129f..de08af976b7f 100644
--- a/drivers/staging/ccree/ssi_fips.c
+++ b/drivers/staging/ccree/cc_fips.c
@@ -1,36 +1,22 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/fips.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "ssi_fips.h"
+#include "cc_driver.h"
+#include "cc_fips.h"
static void fips_dsr(unsigned long devarg);
-struct ssi_fips_handle {
+struct cc_fips_handle {
struct tasklet_struct tasklet;
};
/* The function called once at driver entry point to check
* whether TEE FIPS error occurred.
*/
-static bool cc_get_tee_fips_status(struct ssi_drvdata *drvdata)
+static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
{
u32 reg;
@@ -42,7 +28,7 @@ static bool cc_get_tee_fips_status(struct ssi_drvdata *drvdata)
* This function should push the FIPS REE library status towards the TEE library
* by writing the error state to HOST_GPR0 register.
*/
-void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool status)
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status)
{
int val = CC_FIPS_SYNC_REE_STATUS;
@@ -51,9 +37,9 @@ void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool status)
cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
}
-void ssi_fips_fini(struct ssi_drvdata *drvdata)
+void cc_fips_fini(struct cc_drvdata *drvdata)
{
- struct ssi_fips_handle *fips_h = drvdata->fips_handle;
+ struct cc_fips_handle *fips_h = drvdata->fips_handle;
if (!fips_h)
return; /* Not allocated */
@@ -65,10 +51,9 @@ void ssi_fips_fini(struct ssi_drvdata *drvdata)
drvdata->fips_handle = NULL;
}
-void fips_handler(struct ssi_drvdata *drvdata)
+void fips_handler(struct cc_drvdata *drvdata)
{
- struct ssi_fips_handle *fips_handle_ptr =
- drvdata->fips_handle;
+ struct cc_fips_handle *fips_handle_ptr = drvdata->fips_handle;
tasklet_schedule(&fips_handle_ptr->tasklet);
}
@@ -84,11 +69,11 @@ static inline void tee_fips_error(struct device *dev)
/* Deferred service handler, run as interrupt-fired tasklet */
static void fips_dsr(unsigned long devarg)
{
- struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
struct device *dev = drvdata_to_dev(drvdata);
u32 irq, state, val;
- irq = (drvdata->irq & (SSI_GPR0_IRQ_MASK));
+ irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
if (irq) {
state = cc_ioread(drvdata, CC_REG(GPR_HOST));
@@ -105,9 +90,9 @@ static void fips_dsr(unsigned long devarg)
}
/* The function called once at driver entry point .*/
-int ssi_fips_init(struct ssi_drvdata *p_drvdata)
+int cc_fips_init(struct cc_drvdata *p_drvdata)
{
- struct ssi_fips_handle *fips_h;
+ struct cc_fips_handle *fips_h;
struct device *dev = drvdata_to_dev(p_drvdata);
fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
diff --git a/drivers/staging/ccree/cc_fips.h b/drivers/staging/ccree/cc_fips.h
new file mode 100644
index 000000000000..0d520030095b
--- /dev/null
+++ b/drivers/staging/ccree/cc_fips.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_FIPS_H__
+#define __CC_FIPS_H__
+
+#ifdef CONFIG_CRYPTO_FIPS
+
+enum cc_fips_status {
+ CC_FIPS_SYNC_MODULE_OK = 0x0,
+ CC_FIPS_SYNC_MODULE_ERROR = 0x1,
+ CC_FIPS_SYNC_REE_STATUS = 0x4,
+ CC_FIPS_SYNC_TEE_STATUS = 0x8,
+ CC_FIPS_SYNC_STATUS_RESERVE32B = S32_MAX
+};
+
+int cc_fips_init(struct cc_drvdata *p_drvdata);
+void cc_fips_fini(struct cc_drvdata *drvdata);
+void fips_handler(struct cc_drvdata *drvdata);
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
+
+#else /* CONFIG_CRYPTO_FIPS */
+
+static inline int cc_fips_init(struct cc_drvdata *p_drvdata)
+{
+ return 0;
+}
+
+static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
+static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
+ bool ok) {}
+static inline void fips_handler(struct cc_drvdata *drvdata) {}
+
+#endif /* CONFIG_CRYPTO_FIPS */
+
+#endif /*__CC_FIPS_H__*/
+
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/cc_hash.c
index d79090ed7f9c..8afc39f10bb3 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/cc_hash.c
@@ -1,44 +1,26 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
-#include <crypto/sha.h>
#include <crypto/md5.h>
#include <crypto/internal/hash.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "ssi_request_mgr.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_sysfs.h"
-#include "ssi_hash.h"
-#include "ssi_sram_mgr.h"
+#include "cc_driver.h"
+#include "cc_request_mgr.h"
+#include "cc_buffer_mgr.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
-#define SSI_MAX_AHASH_SEQ_LEN 12
-#define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE)
+#define CC_MAX_HASH_SEQ_LEN 12
+#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
-struct ssi_hash_handle {
- ssi_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
- ssi_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
+struct cc_hash_handle {
+ cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
+ cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
struct list_head hash_list;
- struct completion init_comp;
};
static const u32 digest_len_init[] = {
@@ -53,32 +35,31 @@ static const u32 sha224_init[] = {
static const u32 sha256_init[] = {
SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
-#if (DX_DEV_SHA_MAX > 256)
+#if (CC_DEV_SHA_MAX > 256)
static const u32 digest_len_sha512_init[] = {
0x00000080, 0x00000000, 0x00000000, 0x00000000 };
-static const u64 sha384_init[] = {
+static u64 sha384_init[] = {
SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
-static const u64 sha512_init[] = {
+static u64 sha512_init[] = {
SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
#endif
-static void ssi_hash_create_xcbc_setup(
- struct ahash_request *areq,
- struct cc_hw_desc desc[],
- unsigned int *seq_size);
+static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size);
+
+static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size);
-static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
- struct cc_hw_desc desc[],
- unsigned int *seq_size);
+static const void *cc_larval_digest(struct device *dev, u32 mode);
-struct ssi_hash_alg {
+struct cc_hash_alg {
struct list_head entry;
int hash_mode;
int hw_mode;
int inter_digestsize;
- struct ssi_drvdata *drvdata;
+ struct cc_drvdata *drvdata;
struct ahash_alg ahash_alg;
};
@@ -88,13 +69,13 @@ struct hash_key_req_ctx {
};
/* hash per-session context */
-struct ssi_hash_ctx {
- struct ssi_drvdata *drvdata;
+struct cc_hash_ctx {
+ struct cc_drvdata *drvdata;
/* holds the origin digest; the digest after "setkey" if HMAC,*
* the initial digest if HASH.
*/
- u8 digest_buff[SSI_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
- u8 opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE] ____cacheline_aligned;
+ u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
dma_addr_t digest_buff_dma_addr;
@@ -107,33 +88,27 @@ struct ssi_hash_ctx {
bool is_hmac;
};
-static void ssi_hash_create_data_desc(
- struct ahash_req_ctx *areq_ctx,
- struct ssi_hash_ctx *ctx,
- unsigned int flow_mode, struct cc_hw_desc desc[],
- bool is_not_last_data,
- unsigned int *seq_size);
+static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
+ unsigned int flow_mode, struct cc_hw_desc desc[],
+ bool is_not_last_data, unsigned int *seq_size);
-static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
+static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
{
- if (unlikely((mode == DRV_HASH_MD5) ||
- (mode == DRV_HASH_SHA384) ||
- (mode == DRV_HASH_SHA512))) {
+ if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
+ mode == DRV_HASH_SHA512) {
set_bytes_swap(desc, 1);
} else {
set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
}
}
-static int ssi_hash_map_result(struct device *dev,
- struct ahash_req_ctx *state,
- unsigned int digestsize)
+static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
+ unsigned int digestsize)
{
state->digest_result_dma_addr =
- dma_map_single(dev, (void *)state->digest_result_buff,
- digestsize,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
+ dma_map_single(dev, state->digest_result_buff,
+ digestsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
digestsize);
return -ENOMEM;
@@ -145,302 +120,331 @@ static int ssi_hash_map_result(struct device *dev,
return 0;
}
-static int ssi_hash_map_request(struct device *dev,
- struct ahash_req_ctx *state,
- struct ssi_hash_ctx *ctx)
+static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
{
bool is_hmac = ctx->is_hmac;
- ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
- ctx->drvdata, ctx->hash_mode);
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc;
- int rc = -ENOMEM;
-
- state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->buff0)
- goto fail0;
-
- state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->buff1)
- goto fail_buff0;
-
- state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->digest_result_buff)
- goto fail_buff1;
-
- state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
- if (!state->digest_buff)
- goto fail_digest_result_buff;
-
- dev_dbg(dev, "Allocated digest-buffer in context ctx->digest_buff=@%p\n",
- state->digest_buff);
- if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
- state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->digest_bytes_len)
- goto fail1;
- dev_dbg(dev, "Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n",
- state->digest_bytes_len);
- } else {
- state->digest_bytes_len = NULL;
- }
-
- state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
- if (!state->opad_digest_buff)
- goto fail2;
-
- dev_dbg(dev, "Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n",
- state->opad_digest_buff);
-
- state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
- dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
- ctx->inter_digestsize, state->digest_buff);
- goto fail3;
- }
- dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
- ctx->inter_digestsize, state->digest_buff,
- &state->digest_buff_dma_addr);
+ memset(state, 0, sizeof(*state));
if (is_hmac) {
- dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
- if ((ctx->hw_mode == DRV_CIPHER_XCBC_MAC) || (ctx->hw_mode == DRV_CIPHER_CMAC)) {
- memset(state->digest_buff, 0, ctx->inter_digestsize);
- } else { /*sha*/
- memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
-#if (DX_DEV_SHA_MAX > 256)
- if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384)))
- memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
+ if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
+ ctx->hw_mode != DRV_CIPHER_CMAC) {
+ dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+
+ memcpy(state->digest_buff, ctx->digest_buff,
+ ctx->inter_digestsize);
+#if (CC_DEV_SHA_MAX > 256)
+ if (ctx->hash_mode == DRV_HASH_SHA512 ||
+ ctx->hash_mode == DRV_HASH_SHA384)
+ memcpy(state->digest_bytes_len,
+ digest_len_sha512_init, HASH_LEN_SIZE);
else
- memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
+ memcpy(state->digest_bytes_len,
+ digest_len_init, HASH_LEN_SIZE);
#else
- memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
+ memcpy(state->digest_bytes_len, digest_len_init,
+ HASH_LEN_SIZE);
#endif
}
- dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
if (ctx->hash_mode != DRV_HASH_NULL) {
- dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
- memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
+ dma_sync_single_for_cpu(dev,
+ ctx->opad_tmp_keys_dma_addr,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+ memcpy(state->opad_digest_buff,
+ ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
}
} else { /*hash*/
- /* Copy the initial digests if hash flow. The SRAM contains the
- * initial digests in the expected order for all SHA*
- */
- hw_desc_init(&desc);
- set_din_sram(&desc, larval_digest_addr, ctx->inter_digestsize);
- set_dout_dlli(&desc, state->digest_buff_dma_addr,
- ctx->inter_digestsize, NS_BIT, 0);
- set_flow_mode(&desc, BYPASS);
-
- rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
- if (unlikely(rc != 0)) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- goto fail4;
- }
+ /* Copy the initial digests if hash flow. */
+ const void *larval = cc_larval_digest(dev, ctx->hash_mode);
+
+ memcpy(state->digest_buff, larval, ctx->inter_digestsize);
+ }
+}
+
+static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
+{
+ bool is_hmac = ctx->is_hmac;
+
+ state->digest_buff_dma_addr =
+ dma_map_single(dev, state->digest_buff,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
+ dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize, state->digest_buff);
+ return -EINVAL;
}
+ dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->digest_buff,
+ &state->digest_buff_dma_addr);
if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
- state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
+ state->digest_bytes_len_dma_addr =
+ dma_map_single(dev, state->digest_bytes_len,
+ HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
HASH_LEN_SIZE, state->digest_bytes_len);
- goto fail4;
+ goto unmap_digest_buf;
}
dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
HASH_LEN_SIZE, state->digest_bytes_len,
&state->digest_bytes_len_dma_addr);
- } else {
- state->digest_bytes_len_dma_addr = 0;
}
if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
- state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ state->opad_digest_dma_addr =
+ dma_map_single(dev, state->opad_digest_buff,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
ctx->inter_digestsize,
state->opad_digest_buff);
- goto fail5;
+ goto unmap_digest_len;
}
dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
ctx->inter_digestsize, state->opad_digest_buff,
&state->opad_digest_dma_addr);
- } else {
- state->opad_digest_dma_addr = 0;
}
- state->buff0_cnt = 0;
- state->buff1_cnt = 0;
- state->buff_index = 0;
- state->mlli_params.curr_pool = NULL;
return 0;
-fail5:
- if (state->digest_bytes_len_dma_addr != 0) {
- dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
+unmap_digest_len:
+ if (state->digest_bytes_len_dma_addr) {
+ dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
+ HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
state->digest_bytes_len_dma_addr = 0;
}
-fail4:
- if (state->digest_buff_dma_addr != 0) {
- dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+unmap_digest_buf:
+ if (state->digest_buff_dma_addr) {
+ dma_unmap_single(dev, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
state->digest_buff_dma_addr = 0;
}
-fail3:
- kfree(state->opad_digest_buff);
-fail2:
- kfree(state->digest_bytes_len);
-fail1:
- kfree(state->digest_buff);
-fail_digest_result_buff:
- kfree(state->digest_result_buff);
- state->digest_result_buff = NULL;
-fail_buff1:
- kfree(state->buff1);
- state->buff1 = NULL;
-fail_buff0:
- kfree(state->buff0);
- state->buff0 = NULL;
-fail0:
- return rc;
+
+ return -EINVAL;
}
-static void ssi_hash_unmap_request(struct device *dev,
- struct ahash_req_ctx *state,
- struct ssi_hash_ctx *ctx)
+static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
{
- if (state->digest_buff_dma_addr != 0) {
+ if (state->digest_buff_dma_addr) {
dma_unmap_single(dev, state->digest_buff_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
&state->digest_buff_dma_addr);
state->digest_buff_dma_addr = 0;
}
- if (state->digest_bytes_len_dma_addr != 0) {
+ if (state->digest_bytes_len_dma_addr) {
dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
&state->digest_bytes_len_dma_addr);
state->digest_bytes_len_dma_addr = 0;
}
- if (state->opad_digest_dma_addr != 0) {
+ if (state->opad_digest_dma_addr) {
dma_unmap_single(dev, state->opad_digest_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
&state->opad_digest_dma_addr);
state->opad_digest_dma_addr = 0;
}
-
- kfree(state->opad_digest_buff);
- kfree(state->digest_bytes_len);
- kfree(state->digest_buff);
- kfree(state->digest_result_buff);
- kfree(state->buff1);
- kfree(state->buff0);
}
-static void ssi_hash_unmap_result(struct device *dev,
- struct ahash_req_ctx *state,
- unsigned int digestsize, u8 *result)
+static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
+ unsigned int digestsize, u8 *result)
{
- if (state->digest_result_dma_addr != 0) {
- dma_unmap_single(dev,
- state->digest_result_dma_addr,
- digestsize,
- DMA_BIDIRECTIONAL);
+ if (state->digest_result_dma_addr) {
+ dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
+ DMA_BIDIRECTIONAL);
dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
state->digest_result_buff,
&state->digest_result_dma_addr, digestsize);
- memcpy(result,
- state->digest_result_buff,
- digestsize);
+ memcpy(result, state->digest_result_buff, digestsize);
}
state->digest_result_dma_addr = 0;
}
-static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+static void cc_update_complete(struct device *dev, void *cc_req, int err)
{
- struct ahash_request *req = (struct ahash_request *)ssi_req;
+ struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
dev_dbg(dev, "req=%pK\n", req);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
- req->base.complete(&req->base, 0);
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
}
-static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+static void cc_digest_complete(struct device *dev, void *cc_req, int err)
{
- struct ahash_request *req = (struct ahash_request *)ssi_req;
+ struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
dev_dbg(dev, "req=%pK\n", req);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
- ssi_hash_unmap_result(dev, state, digestsize, req->result);
- ssi_hash_unmap_request(dev, state, ctx);
- req->base.complete(&req->base, 0);
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
}
-static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+static void cc_hash_complete(struct device *dev, void *cc_req, int err)
{
- struct ahash_request *req = (struct ahash_request *)ssi_req;
+ struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
dev_dbg(dev, "req=%pK\n", req);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
- ssi_hash_unmap_result(dev, state, digestsize, req->result);
- ssi_hash_unmap_request(dev, state, ctx);
- req->base.complete(&req->base, 0);
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
+}
+
+static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
+ int idx)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ /* TODO */
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
+ NS_BIT, 1);
+ set_queue_last_ind(&desc[idx]);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ idx++;
+
+ return idx;
+}
+
+static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
+ int idx)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ /* store the hash digest result in the context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
+ NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* Loading hash opad xor key state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_sram(&desc[idx],
+ cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
+ HASH_LEN_SIZE);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ return idx;
}
-static int ssi_hash_digest(struct ahash_req_ctx *state,
- struct ssi_hash_ctx *ctx,
- unsigned int digestsize,
- struct scatterlist *src,
- unsigned int nbytes, u8 *result,
- void *async_req)
+static int cc_hash_digest(struct ahash_request *req)
{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
- ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
- ctx->drvdata, ctx->hash_mode);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ cc_sram_addr_t larval_digest_addr =
+ cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
int idx = 0;
int rc = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes);
- if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
+ cc_init_req(dev, state, ctx);
+
+ if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
+ flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (async_req) {
- /* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_digest_complete;
- ssi_req.user_arg = (void *)async_req;
- }
+ /* Setup DX request structure */
+ cc_req.user_cb = cc_digest_complete;
+ cc_req.user_arg = req;
- /* If HMAC then load hash IPAD xor key, if HASH then load initial digest */
+ /* If HMAC then load hash IPAD xor key, if HASH then load initial
+ * digest
+ */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
if (is_hmac) {
@@ -464,7 +468,7 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
NS_BIT);
} else {
set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
- if (likely(nbytes != 0))
+ if (nbytes)
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
else
set_cipher_do(&desc[idx], DO_PAD);
@@ -473,7 +477,7 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
- ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
+ cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
if (is_hmac) {
/* HW last hash block padding (aka. "DO_PAD") */
@@ -486,98 +490,62 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
set_cipher_do(&desc[idx], DO_PAD);
idx++;
- /* store the hash digest result in the context */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
- digestsize, NS_BIT, 0);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- idx++;
-
- /* Loading hash opad xor key state */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
- ctx->inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Load the hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_sram(&desc[idx],
- ssi_ahash_get_initial_digest_len_sram_addr(
-ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
+ idx = cc_fin_hmac(desc, req, idx);
+ }
- /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- idx++;
+ idx = cc_fin_result(desc, req, idx);
- /* Perform HASH update */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
- digestsize, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
}
+ return rc;
+}
- /* Get final MAC result */
+static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
+ struct ahash_req_ctx *state, int idx)
+{
+ /* Restore hash digest */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Restore hash current length */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
- /* TODO */
- set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
- NS_BIT, (async_req ? 1 : 0));
- if (async_req)
- set_queue_last_ind(&desc[idx]);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
- ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
+ HASH_LEN_SIZE, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
- if (async_req) {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- ssi_hash_unmap_result(dev, state, digestsize, result);
- ssi_hash_unmap_request(dev, state, ctx);
- }
- } else {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
- if (rc != 0) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- } else {
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
- }
- ssi_hash_unmap_result(dev, state, digestsize, result);
- ssi_hash_unmap_request(dev, state, ctx);
- }
- return rc;
+ cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+ return idx;
}
-static int ssi_hash_update(struct ahash_req_ctx *state,
- struct ssi_hash_ctx *ctx,
- unsigned int block_size,
- struct scatterlist *src,
- unsigned int nbytes,
- void *async_req)
+static int cc_hash_update(struct ahash_request *req)
{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
struct device *dev = drvdata_to_dev(ctx->drvdata);
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
u32 idx = 0;
int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
"hmac" : "hash", nbytes);
@@ -587,8 +555,9 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
return 0;
}
- rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size);
- if (unlikely(rc)) {
+ rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
+ block_size, flags);
+ if (rc) {
if (rc == 1) {
dev_dbg(dev, " data size not require HW update %x\n",
nbytes);
@@ -599,30 +568,17 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
return -ENOMEM;
}
- if (async_req) {
- /* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_update_complete;
- ssi_req.user_arg = async_req;
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ return -EINVAL;
}
- /* Restore hash digest */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
- ctx->inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
- /* Restore hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
+ /* Setup DX request structure */
+ cc_req.user_cb = cc_update_complete;
+ cc_req.user_arg = req;
- ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
+ idx = cc_restore_hash(desc, ctx, state, idx);
/* store the hash digest result in context */
hw_desc_init(&desc[idx]);
@@ -637,220 +593,124 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, NS_BIT, (async_req ? 1 : 0));
- if (async_req)
- set_queue_last_ind(&desc[idx]);
+ HASH_LEN_SIZE, NS_BIT, 1);
+ set_queue_last_ind(&desc[idx]);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
idx++;
- if (async_req) {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- }
- } else {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
- if (rc != 0) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- } else {
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
- }
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
}
return rc;
}
-static int ssi_hash_finup(struct ahash_req_ctx *state,
- struct ssi_hash_ctx *ctx,
- unsigned int digestsize,
- struct scatterlist *src,
- unsigned int nbytes,
- u8 *result,
- void *async_req)
+static int cc_hash_finup(struct ahash_request *req)
{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int idx = 0;
int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes);
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
+ flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (async_req) {
- /* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_complete;
- ssi_req.user_arg = async_req;
- }
-
- /* Restore hash digest */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
- ctx->inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Restore hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
-
- if (is_hmac) {
- /* Store the hash digest result in the context */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
- digestsize, NS_BIT, 0);
- ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- idx++;
-
- /* Loading hash OPAD xor key state */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
- ctx->inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
+ /* Setup DX request structure */
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
- /* Load the hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_sram(&desc[idx],
- ssi_ahash_get_initial_digest_len_sram_addr(
-ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
+ idx = cc_restore_hash(desc, ctx, state, idx);
- /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- idx++;
-
- /* Perform HASH update on last digest */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
- digestsize, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
- }
+ if (is_hmac)
+ idx = cc_fin_hmac(desc, req, idx);
- /* Get final MAC result */
- hw_desc_init(&desc[idx]);
- /* TODO */
- set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
- NS_BIT, (async_req ? 1 : 0));
- if (async_req)
- set_queue_last_ind(&desc[idx]);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- idx++;
+ idx = cc_fin_result(desc, req, idx);
- if (async_req) {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- ssi_hash_unmap_result(dev, state, digestsize, result);
- }
- } else {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
- if (rc != 0) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- ssi_hash_unmap_result(dev, state, digestsize, result);
- } else {
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
- ssi_hash_unmap_result(dev, state, digestsize, result);
- ssi_hash_unmap_request(dev, state, ctx);
- }
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
}
return rc;
}
-static int ssi_hash_final(struct ahash_req_ctx *state,
- struct ssi_hash_ctx *ctx,
- unsigned int digestsize,
- struct scatterlist *src,
- unsigned int nbytes,
- u8 *result,
- void *async_req)
+static int cc_hash_final(struct ahash_request *req)
{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int idx = 0;
int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes);
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
+ flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (async_req) {
- /* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_complete;
- ssi_req.user_arg = async_req;
- }
-
- /* Restore hash digest */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
- ctx->inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Restore hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
+ /* Setup DX request structure */
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
- ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
+ idx = cc_restore_hash(desc, ctx, state, idx);
/* "DO-PAD" must be enabled only when writing current length to HW */
hw_desc_init(&desc[idx]);
@@ -862,121 +722,56 @@ static int ssi_hash_final(struct ahash_req_ctx *state,
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
idx++;
- if (is_hmac) {
- /* Store the hash digest result in the context */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
- digestsize, NS_BIT, 0);
- ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- idx++;
-
- /* Loading hash OPAD xor key state */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
- ctx->inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Load the hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_sram(&desc[idx],
- ssi_ahash_get_initial_digest_len_sram_addr(
-ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- idx++;
-
- /* Perform HASH update on last digest */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
- digestsize, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
- }
+ if (is_hmac)
+ idx = cc_fin_hmac(desc, req, idx);
- /* Get final MAC result */
- hw_desc_init(&desc[idx]);
- set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
- NS_BIT, (async_req ? 1 : 0));
- if (async_req)
- set_queue_last_ind(&desc[idx]);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- idx++;
+ idx = cc_fin_result(desc, req, idx);
- if (async_req) {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- ssi_hash_unmap_result(dev, state, digestsize, result);
- }
- } else {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
- if (rc != 0) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- ssi_hash_unmap_result(dev, state, digestsize, result);
- } else {
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
- ssi_hash_unmap_result(dev, state, digestsize, result);
- ssi_hash_unmap_request(dev, state, ctx);
- }
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
}
return rc;
}
-static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
+static int cc_hash_init(struct ahash_request *req)
{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- state->xcbc_count = 0;
+ dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
- ssi_hash_map_request(dev, state, ctx);
+ cc_init_req(dev, state, ctx);
return 0;
}
-static int ssi_hash_setkey(void *hash,
- const u8 *key,
- unsigned int keylen,
- bool synchronize)
+static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
{
unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
- struct ssi_crypto_req ssi_req = {};
- struct ssi_hash_ctx *ctx = NULL;
+ struct cc_crypto_req cc_req = {};
+ struct cc_hash_ctx *ctx = NULL;
int blocksize = 0;
int digestsize = 0;
int i, idx = 0, rc = 0;
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
- ssi_sram_addr_t larval_addr;
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ cc_sram_addr_t larval_addr;
struct device *dev;
- ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
+ ctx = crypto_ahash_ctx(ahash);
dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "start keylen: %d", keylen);
- blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
- digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
+ blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ digestsize = crypto_ahash_digestsize(ahash);
- larval_addr = ssi_ahash_get_larval_digest_sram_addr(
- ctx->drvdata, ctx->hash_mode);
+ larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
/* The keylen value distinguishes HASH in case keylen is ZERO bytes,
* any NON-ZERO value utilizes HMAC flow
@@ -985,12 +780,10 @@ static int ssi_hash_setkey(void *hash,
ctx->key_params.key_dma_addr = 0;
ctx->is_hmac = true;
- if (keylen != 0) {
- ctx->key_params.key_dma_addr = dma_map_single(
- dev, (void *)key,
- keylen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev,
- ctx->key_params.key_dma_addr))) {
+ if (keylen) {
+ ctx->key_params.key_dma_addr =
+ dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
return -ENOMEM;
@@ -1032,14 +825,15 @@ static int ssi_hash_setkey(void *hash,
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
- ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
idx++;
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0, (blocksize - digestsize));
set_flow_mode(&desc[idx], BYPASS);
- set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
- digestsize),
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr +
+ digestsize),
(blocksize - digestsize), NS_BIT, 0);
idx++;
} else {
@@ -1052,7 +846,7 @@ static int ssi_hash_setkey(void *hash,
keylen, NS_BIT, 0);
idx++;
- if ((blocksize - keylen) != 0) {
+ if ((blocksize - keylen)) {
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0,
(blocksize - keylen));
@@ -1073,8 +867,8 @@ static int ssi_hash_setkey(void *hash,
idx++;
}
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
- if (unlikely(rc != 0)) {
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+ if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto out;
}
@@ -1114,7 +908,9 @@ static int ssi_hash_setkey(void *hash,
set_flow_mode(&desc[idx], DIN_HASH);
idx++;
- /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */
+ /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
+ * of the first HASH "update" state)
+ */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
if (i > 0) /* Not first iteration */
@@ -1128,11 +924,11 @@ static int ssi_hash_setkey(void *hash,
idx++;
}
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
out:
if (rc)
- crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
if (ctx->key_params.key_dma_addr) {
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
@@ -1143,14 +939,14 @@ out:
return rc;
}
-static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
- const u8 *key, unsigned int keylen)
+static int cc_xcbc_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
{
- struct ssi_crypto_req ssi_req = {};
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *dev = drvdata_to_dev(ctx->drvdata);
int idx = 0, rc = 0;
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
@@ -1165,10 +961,9 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
ctx->key_params.keylen = keylen;
- ctx->key_params.key_dma_addr = dma_map_single(
- dev, (void *)key,
- keylen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, ctx->key_params.key_dma_addr))) {
+ ctx->key_params.key_dma_addr =
+ dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
return -ENOMEM;
@@ -1191,30 +986,30 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], DIN_AES_DOUT);
- set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
- XCBC_MAC_K1_OFFSET),
- CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
idx++;
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], DIN_AES_DOUT);
- set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
- XCBC_MAC_K2_OFFSET),
- CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
idx++;
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], DIN_AES_DOUT);
- set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
- XCBC_MAC_K3_OFFSET),
- CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
- if (rc != 0)
+ if (rc)
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
@@ -1225,11 +1020,10 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
return rc;
}
-#if SSI_CC_HAS_CMAC
-static int ssi_cmac_setkey(struct crypto_ahash *ahash,
- const u8 *key, unsigned int keylen)
+static int cc_cmac_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
{
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
@@ -1253,8 +1047,10 @@ static int ssi_cmac_setkey(struct crypto_ahash *ahash,
keylen, DMA_TO_DEVICE);
memcpy(ctx->opad_tmp_keys_buff, key, keylen);
- if (keylen == 24)
- memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+ if (keylen == 24) {
+ memset(ctx->opad_tmp_keys_buff + 24, 0,
+ CC_AES_KEY_SIZE_MAX - 24);
+ }
dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
keylen, DMA_TO_DEVICE);
@@ -1263,20 +1059,19 @@ static int ssi_cmac_setkey(struct crypto_ahash *ahash,
return 0;
}
-#endif
-static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
+static void cc_free_ctx(struct cc_hash_ctx *ctx)
{
struct device *dev = drvdata_to_dev(ctx->drvdata);
- if (ctx->digest_buff_dma_addr != 0) {
+ if (ctx->digest_buff_dma_addr) {
dma_unmap_single(dev, ctx->digest_buff_dma_addr,
sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
&ctx->digest_buff_dma_addr);
ctx->digest_buff_dma_addr = 0;
}
- if (ctx->opad_tmp_keys_dma_addr != 0) {
+ if (ctx->opad_tmp_keys_dma_addr) {
dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
sizeof(ctx->opad_tmp_keys_buff),
DMA_BIDIRECTIONAL);
@@ -1288,13 +1083,15 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
ctx->key_params.keylen = 0;
}
-static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
+static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
{
struct device *dev = drvdata_to_dev(ctx->drvdata);
ctx->key_params.keylen = 0;
- ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+ ctx->digest_buff_dma_addr =
+ dma_map_single(dev, (void *)ctx->digest_buff,
+ sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
sizeof(ctx->digest_buff), ctx->digest_buff);
@@ -1304,7 +1101,10 @@ static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
sizeof(ctx->digest_buff), ctx->digest_buff,
&ctx->digest_buff_dma_addr);
- ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
+ ctx->opad_tmp_keys_dma_addr =
+ dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
+ sizeof(ctx->opad_tmp_keys_buff),
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
sizeof(ctx->opad_tmp_keys_buff),
@@ -1319,51 +1119,52 @@ static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
return 0;
fail:
- ssi_hash_free_ctx(ctx);
+ cc_free_ctx(ctx);
return -ENOMEM;
}
-static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
+static int cc_cra_init(struct crypto_tfm *tfm)
{
- struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct hash_alg_common *hash_alg_common =
container_of(tfm->__crt_alg, struct hash_alg_common, base);
struct ahash_alg *ahash_alg =
container_of(hash_alg_common, struct ahash_alg, halg);
- struct ssi_hash_alg *ssi_alg =
- container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
+ struct cc_hash_alg *cc_alg =
+ container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct ahash_req_ctx));
- ctx->hash_mode = ssi_alg->hash_mode;
- ctx->hw_mode = ssi_alg->hw_mode;
- ctx->inter_digestsize = ssi_alg->inter_digestsize;
- ctx->drvdata = ssi_alg->drvdata;
+ ctx->hash_mode = cc_alg->hash_mode;
+ ctx->hw_mode = cc_alg->hw_mode;
+ ctx->inter_digestsize = cc_alg->inter_digestsize;
+ ctx->drvdata = cc_alg->drvdata;
- return ssi_hash_alloc_ctx(ctx);
+ return cc_alloc_ctx(ctx);
}
-static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
+static void cc_cra_exit(struct crypto_tfm *tfm)
{
- struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- dev_dbg(dev, "ssi_hash_cra_exit");
- ssi_hash_free_ctx(ctx);
+ dev_dbg(dev, "cc_cra_exit");
+ cc_free_ctx(ctx);
}
-static int ssi_mac_update(struct ahash_request *req)
+static int cc_mac_update(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int rc;
u32 idx = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
if (req->nbytes == 0) {
/* no real updates required */
@@ -1372,8 +1173,9 @@ static int ssi_mac_update(struct ahash_request *req)
state->xcbc_count++;
- rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size);
- if (unlikely(rc)) {
+ rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
+ req->nbytes, block_size, flags);
+ if (rc) {
if (rc == 1) {
dev_dbg(dev, " data size not require HW update %x\n",
req->nbytes);
@@ -1384,12 +1186,17 @@ static int ssi_mac_update(struct ahash_request *req)
return -ENOMEM;
}
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
- ssi_hash_create_xcbc_setup(req, desc, &idx);
+ cc_setup_xcbc(req, desc, &idx);
else
- ssi_hash_create_cmac_setup(req, desc, &idx);
+ cc_setup_cmac(req, desc, &idx);
- ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
/* store the hash digest result in context */
hw_desc_init(&desc[idx]);
@@ -1402,32 +1209,32 @@ static int ssi_mac_update(struct ahash_request *req)
idx++;
/* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_update_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_update_complete;
+ cc_req.user_arg = (void *)req;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
}
return rc;
}
-static int ssi_mac_final(struct ahash_request *req)
+static int cc_mac_final(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int idx = 0;
int rc = 0;
u32 key_size, key_len;
u32 digestsize = crypto_ahash_digestsize(tfm);
-
- u32 rem_cnt = state->buff_index ? state->buff1_cnt :
- state->buff0_cnt;
+ gfp_t flags = cc_gfp_flags(&req->base);
+ u32 rem_cnt = *cc_hash_buf_cnt(state);
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_size = CC_AES_128_BIT_KEY_SIZE;
@@ -1440,34 +1247,45 @@ static int ssi_mac_final(struct ahash_request *req)
dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) {
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 0, flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_hash_complete;
+ cc_req.user_arg = (void *)req;
- if (state->xcbc_count && (rem_cnt == 0)) {
+ if (state->xcbc_count && rem_cnt == 0) {
/* Load key for ECB decryption */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
set_din_type(&desc[idx], DMA_DLLI,
- (ctx->opad_tmp_keys_dma_addr +
- XCBC_MAC_K1_OFFSET), key_size, NS_BIT);
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
+ key_size, NS_BIT);
set_key_size_aes(&desc[idx], key_len);
set_flow_mode(&desc[idx], S_DIN_to_AES);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
- /* Initiate decryption of block state to previous block_state-XOR-M[n] */
+ /* Initiate decryption of block state to previous
+ * block_state-XOR-M[n]
+ */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
CC_AES_BLOCK_SIZE, NS_BIT);
@@ -1484,9 +1302,9 @@ static int ssi_mac_final(struct ahash_request *req)
}
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
- ssi_hash_create_xcbc_setup(req, desc, &idx);
+ cc_setup_xcbc(req, desc, &idx);
else
- ssi_hash_create_cmac_setup(req, desc, &idx);
+ cc_setup_cmac(req, desc, &idx);
if (state->xcbc_count == 0) {
hw_desc_init(&desc[idx]);
@@ -1496,7 +1314,7 @@ static int ssi_mac_final(struct ahash_request *req)
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
} else if (rem_cnt > 0) {
- ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
} else {
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
@@ -1515,53 +1333,64 @@ static int ssi_mac_final(struct ahash_request *req)
set_cipher_mode(&desc[idx], ctx->hw_mode);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
- ssi_hash_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
}
return rc;
}
-static int ssi_mac_finup(struct ahash_request *req)
+static int cc_mac_finup(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int idx = 0;
int rc = 0;
u32 key_len = 0;
u32 digestsize = crypto_ahash_digestsize(tfm);
+ gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
if (state->xcbc_count > 0 && req->nbytes == 0) {
dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
- return ssi_mac_final(req);
+ return cc_mac_final(req);
+ }
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
}
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1, flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_hash_complete;
+ cc_req.user_arg = (void *)req;
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_len = CC_AES_128_BIT_KEY_SIZE;
- ssi_hash_create_xcbc_setup(req, desc, &idx);
+ cc_setup_xcbc(req, desc, &idx);
} else {
key_len = ctx->key_params.keylen;
- ssi_hash_create_cmac_setup(req, desc, &idx);
+ cc_setup_cmac(req, desc, &idx);
}
if (req->nbytes == 0) {
@@ -1572,7 +1401,7 @@ static int ssi_mac_finup(struct ahash_request *req)
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
} else {
- ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
}
/* Get final MAC result */
@@ -1586,54 +1415,61 @@ static int ssi_mac_finup(struct ahash_request *req)
set_cipher_mode(&desc[idx], ctx->hw_mode);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
- ssi_hash_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
}
return rc;
}
-static int ssi_mac_digest(struct ahash_request *req)
+static int cc_mac_digest(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
u32 digestsize = crypto_ahash_digestsize(tfm);
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
u32 key_len;
int idx = 0;
int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
- if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
+ cc_init_req(dev, state, ctx);
+
+ if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1, flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_digest_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_digest_complete;
+ cc_req.user_arg = (void *)req;
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_len = CC_AES_128_BIT_KEY_SIZE;
- ssi_hash_create_xcbc_setup(req, desc, &idx);
+ cc_setup_xcbc(req, desc, &idx);
} else {
key_len = ctx->key_params.keylen;
- ssi_hash_create_cmac_setup(req, desc, &idx);
+ cc_setup_cmac(req, desc, &idx);
}
if (req->nbytes == 0) {
@@ -1644,7 +1480,7 @@ static int ssi_mac_digest(struct ahash_request *req)
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
} else {
- ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
}
/* Get final MAC result */
@@ -1658,96 +1494,32 @@ static int ssi_mac_digest(struct ahash_request *req)
set_cipher_mode(&desc[idx], ctx->hw_mode);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
- ssi_hash_unmap_result(dev, state, digestsize, req->result);
- ssi_hash_unmap_request(dev, state, ctx);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
}
return rc;
}
-//ahash wrap functions
-static int ssi_ahash_digest(struct ahash_request *req)
-{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- u32 digestsize = crypto_ahash_digestsize(tfm);
-
- return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
-}
-
-static int ssi_ahash_update(struct ahash_request *req)
-{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
-
- return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
-}
-
-static int ssi_ahash_finup(struct ahash_request *req)
-{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- u32 digestsize = crypto_ahash_digestsize(tfm);
-
- return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
-}
-
-static int ssi_ahash_final(struct ahash_request *req)
-{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- u32 digestsize = crypto_ahash_digestsize(tfm);
-
- return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
-}
-
-static int ssi_ahash_init(struct ahash_request *req)
-{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
-
- dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
-
- return ssi_hash_init(state, ctx);
-}
-
-static int ssi_ahash_export(struct ahash_request *req, void *out)
+static int cc_hash_export(struct ahash_request *req, void *out)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct ahash_req_ctx *state = ahash_request_ctx(req);
- u8 *curr_buff = state->buff_index ? state->buff1 : state->buff0;
- u32 curr_buff_cnt = state->buff_index ? state->buff1_cnt :
- state->buff0_cnt;
+ u8 *curr_buff = cc_hash_buf(state);
+ u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
const u32 tmp = CC_EXPORT_MAGIC;
memcpy(out, &tmp, sizeof(u32));
out += sizeof(u32);
- dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
- ctx->inter_digestsize, DMA_BIDIRECTIONAL);
memcpy(out, state->digest_buff, ctx->inter_digestsize);
out += ctx->inter_digestsize;
- if (state->digest_bytes_len_dma_addr) {
- dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
- memcpy(out, state->digest_bytes_len, HASH_LEN_SIZE);
- } else {
- /* Poison the unused exported digest len field. */
- memset(out, 0x5F, HASH_LEN_SIZE);
- }
+ memcpy(out, state->digest_bytes_len, HASH_LEN_SIZE);
out += HASH_LEN_SIZE;
memcpy(out, &curr_buff_cnt, sizeof(u32));
@@ -1755,77 +1527,43 @@ static int ssi_ahash_export(struct ahash_request *req, void *out)
memcpy(out, curr_buff, curr_buff_cnt);
- /* No sync for device ineeded since we did not change the data,
- * we only copy it
- */
-
return 0;
}
-static int ssi_ahash_import(struct ahash_request *req, const void *in)
+static int cc_hash_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ahash_req_ctx *state = ahash_request_ctx(req);
u32 tmp;
- int rc;
memcpy(&tmp, in, sizeof(u32));
- if (tmp != CC_EXPORT_MAGIC) {
- rc = -EINVAL;
- goto out;
- }
+ if (tmp != CC_EXPORT_MAGIC)
+ return -EINVAL;
in += sizeof(u32);
- rc = ssi_hash_init(state, ctx);
- if (rc)
- goto out;
+ cc_init_req(dev, state, ctx);
- dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
- ctx->inter_digestsize, DMA_BIDIRECTIONAL);
memcpy(state->digest_buff, in, ctx->inter_digestsize);
in += ctx->inter_digestsize;
- if (state->digest_bytes_len_dma_addr) {
- dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
- memcpy(state->digest_bytes_len, in, HASH_LEN_SIZE);
- }
+ memcpy(state->digest_bytes_len, in, HASH_LEN_SIZE);
in += HASH_LEN_SIZE;
- dma_sync_single_for_device(dev, state->digest_buff_dma_addr,
- ctx->inter_digestsize, DMA_BIDIRECTIONAL);
-
- if (state->digest_bytes_len_dma_addr)
- dma_sync_single_for_device(dev,
- state->digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
-
- state->buff_index = 0;
-
/* Sanity check the data as much as possible */
memcpy(&tmp, in, sizeof(u32));
- if (tmp > SSI_MAX_HASH_BLCK_SIZE) {
- rc = -EINVAL;
- goto out;
- }
+ if (tmp > CC_MAX_HASH_BLCK_SIZE)
+ return -EINVAL;
in += sizeof(u32);
- state->buff0_cnt = tmp;
- memcpy(state->buff0, in, state->buff0_cnt);
-
-out:
- return rc;
-}
+ state->buf_cnt[0] = tmp;
+ memcpy(state->buffers[0], in, tmp);
-static int ssi_ahash_setkey(struct crypto_ahash *ahash,
- const u8 *key, unsigned int keylen)
-{
- return ssi_hash_setkey((void *)ahash, key, keylen, false);
+ return 0;
}
-struct ssi_hash_template {
+struct cc_hash_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
char mac_name[CRYPTO_MAX_ALG_NAME];
@@ -1836,14 +1574,14 @@ struct ssi_hash_template {
int hash_mode;
int hw_mode;
int inter_digestsize;
- struct ssi_drvdata *drvdata;
+ struct cc_drvdata *drvdata;
};
#define CC_STATE_SIZE(_x) \
- ((_x) + HASH_LEN_SIZE + SSI_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
+ ((_x) + HASH_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
/* hash descriptors */
-static struct ssi_hash_template driver_hash[] = {
+static struct cc_hash_template driver_hash[] = {
//Asynchronize hash template
{
.name = "sha1",
@@ -1853,14 +1591,14 @@ static struct ssi_hash_template driver_hash[] = {
.blocksize = SHA1_BLOCK_SIZE,
.synchronize = false,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_ahash_update,
- .final = ssi_ahash_final,
- .finup = ssi_ahash_finup,
- .digest = ssi_ahash_digest,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
- .setkey = ssi_ahash_setkey,
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
@@ -1877,14 +1615,14 @@ static struct ssi_hash_template driver_hash[] = {
.mac_driver_name = "hmac-sha256-dx",
.blocksize = SHA256_BLOCK_SIZE,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_ahash_update,
- .final = ssi_ahash_final,
- .finup = ssi_ahash_finup,
- .digest = ssi_ahash_digest,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
- .setkey = ssi_ahash_setkey,
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
@@ -1901,14 +1639,14 @@ static struct ssi_hash_template driver_hash[] = {
.mac_driver_name = "hmac-sha224-dx",
.blocksize = SHA224_BLOCK_SIZE,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_ahash_update,
- .final = ssi_ahash_final,
- .finup = ssi_ahash_finup,
- .digest = ssi_ahash_digest,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
- .setkey = ssi_ahash_setkey,
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
@@ -1918,7 +1656,7 @@ static struct ssi_hash_template driver_hash[] = {
.hw_mode = DRV_HASH_HW_SHA256,
.inter_digestsize = SHA256_DIGEST_SIZE,
},
-#if (DX_DEV_SHA_MAX > 256)
+#if (CC_DEV_SHA_MAX > 256)
{
.name = "sha384",
.driver_name = "sha384-dx",
@@ -1926,14 +1664,14 @@ static struct ssi_hash_template driver_hash[] = {
.mac_driver_name = "hmac-sha384-dx",
.blocksize = SHA384_BLOCK_SIZE,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_ahash_update,
- .final = ssi_ahash_final,
- .finup = ssi_ahash_finup,
- .digest = ssi_ahash_digest,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
- .setkey = ssi_ahash_setkey,
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
@@ -1950,14 +1688,14 @@ static struct ssi_hash_template driver_hash[] = {
.mac_driver_name = "hmac-sha512-dx",
.blocksize = SHA512_BLOCK_SIZE,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_ahash_update,
- .final = ssi_ahash_final,
- .finup = ssi_ahash_finup,
- .digest = ssi_ahash_digest,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
- .setkey = ssi_ahash_setkey,
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
@@ -1975,14 +1713,14 @@ static struct ssi_hash_template driver_hash[] = {
.mac_driver_name = "hmac-md5-dx",
.blocksize = MD5_HMAC_BLOCK_SIZE,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_ahash_update,
- .final = ssi_ahash_final,
- .finup = ssi_ahash_finup,
- .digest = ssi_ahash_digest,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
- .setkey = ssi_ahash_setkey,
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
@@ -1997,14 +1735,14 @@ static struct ssi_hash_template driver_hash[] = {
.mac_driver_name = "xcbc-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_mac_update,
- .final = ssi_mac_final,
- .finup = ssi_mac_finup,
- .digest = ssi_mac_digest,
- .setkey = ssi_xcbc_setkey,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
+ .init = cc_hash_init,
+ .update = cc_mac_update,
+ .final = cc_mac_final,
+ .finup = cc_mac_finup,
+ .digest = cc_mac_digest,
+ .setkey = cc_xcbc_setkey,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
.halg = {
.digestsize = AES_BLOCK_SIZE,
.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
@@ -2014,20 +1752,19 @@ static struct ssi_hash_template driver_hash[] = {
.hw_mode = DRV_CIPHER_XCBC_MAC,
.inter_digestsize = AES_BLOCK_SIZE,
},
-#if SSI_CC_HAS_CMAC
{
.mac_name = "cmac(aes)",
.mac_driver_name = "cmac-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_mac_update,
- .final = ssi_mac_final,
- .finup = ssi_mac_finup,
- .digest = ssi_mac_digest,
- .setkey = ssi_cmac_setkey,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
+ .init = cc_hash_init,
+ .update = cc_mac_update,
+ .final = cc_mac_final,
+ .finup = cc_mac_finup,
+ .digest = cc_mac_digest,
+ .setkey = cc_cmac_setkey,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
.halg = {
.digestsize = AES_BLOCK_SIZE,
.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
@@ -2037,15 +1774,12 @@ static struct ssi_hash_template driver_hash[] = {
.hw_mode = DRV_CIPHER_CMAC,
.inter_digestsize = AES_BLOCK_SIZE,
},
-#endif
-
};
-static struct ssi_hash_alg *
-ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
- bool keyed)
+static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
+ struct device *dev, bool keyed)
{
- struct ssi_hash_alg *t_crypto_alg;
+ struct cc_hash_alg *t_crypto_alg;
struct crypto_alg *alg;
struct ahash_alg *halg;
@@ -2053,7 +1787,6 @@ ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
if (!t_crypto_alg)
return ERR_PTR(-ENOMEM);
-
t_crypto_alg->ahash_alg = template->template_ahash;
halg = &t_crypto_alg->ahash_alg;
alg = &halg->halg.base;
@@ -2071,13 +1804,13 @@ ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
template->driver_name);
}
alg->cra_module = THIS_MODULE;
- alg->cra_ctxsize = sizeof(struct ssi_hash_ctx);
- alg->cra_priority = SSI_CRA_PRIO;
+ alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
+ alg->cra_priority = CC_CRA_PRIO;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_exit = ssi_hash_cra_exit;
+ alg->cra_exit = cc_cra_exit;
- alg->cra_init = ssi_ahash_cra_init;
+ alg->cra_init = cc_cra_init;
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->cra_type = &crypto_ahash_type;
@@ -2089,36 +1822,32 @@ ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
return t_crypto_alg;
}
-int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
+int cc_init_hash_sram(struct cc_drvdata *drvdata)
{
- struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
- ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
+ struct cc_hash_handle *hash_handle = drvdata->hash_handle;
+ cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
unsigned int larval_seq_len = 0;
struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
- struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
-#if (DX_DEV_SHA_MAX > 256)
- int i;
-#endif
/* Copy-to-sram digest-len */
- ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
- ARRAY_SIZE(digest_len_init),
- larval_seq, &larval_seq_len);
+ cc_set_sram_desc(digest_len_init, sram_buff_ofs,
+ ARRAY_SIZE(digest_len_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(digest_len_init);
larval_seq_len = 0;
-#if (DX_DEV_SHA_MAX > 256)
+#if (CC_DEV_SHA_MAX > 256)
/* Copy-to-sram digest-len for sha384/512 */
- ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
- ARRAY_SIZE(digest_len_sha512_init),
- larval_seq, &larval_seq_len);
+ cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
+ ARRAY_SIZE(digest_len_sha512_init),
+ larval_seq, &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(digest_len_sha512_init);
@@ -2129,88 +1858,89 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
hash_handle->larval_digest_sram_addr = sram_buff_ofs;
/* Copy-to-sram initial SHA* digests */
- ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
- ARRAY_SIZE(md5_init), larval_seq,
- &larval_seq_len);
+ cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
+ larval_seq, &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(md5_init);
larval_seq_len = 0;
- ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
- ARRAY_SIZE(sha1_init), larval_seq,
- &larval_seq_len);
+ cc_set_sram_desc(sha1_init, sram_buff_ofs,
+ ARRAY_SIZE(sha1_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(sha1_init);
larval_seq_len = 0;
- ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
- ARRAY_SIZE(sha224_init), larval_seq,
- &larval_seq_len);
+ cc_set_sram_desc(sha224_init, sram_buff_ofs,
+ ARRAY_SIZE(sha224_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(sha224_init);
larval_seq_len = 0;
- ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
- ARRAY_SIZE(sha256_init), larval_seq,
- &larval_seq_len);
+ cc_set_sram_desc(sha256_init, sram_buff_ofs,
+ ARRAY_SIZE(sha256_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(sha256_init);
larval_seq_len = 0;
-#if (DX_DEV_SHA_MAX > 256)
- /* We are forced to swap each double-word larval before copying to sram */
- for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
- const u32 const0 = ((u32 *)((u64 *)&sha384_init[i]))[1];
- const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
-
- ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
- sram_buff_ofs += sizeof(u32);
- ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
- sram_buff_ofs += sizeof(u32);
- }
+#if (CC_DEV_SHA_MAX > 256)
+ cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
+ (ARRAY_SIZE(sha384_init) * 2), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0)) {
- dev_err(dev, "send_request() failed (rc = %d)\n", rc);
+ if (rc)
goto init_digest_const_err;
- }
+ sram_buff_ofs += sizeof(sha384_init);
larval_seq_len = 0;
- for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
- const u32 const0 = ((u32 *)((u64 *)&sha512_init[i]))[1];
- const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
-
- ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
- sram_buff_ofs += sizeof(u32);
- ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
- sram_buff_ofs += sizeof(u32);
- }
+ cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
+ (ARRAY_SIZE(sha512_init) * 2), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0)) {
- dev_err(dev, "send_request() failed (rc = %d)\n", rc);
+ if (rc)
goto init_digest_const_err;
- }
#endif
init_digest_const_err:
return rc;
}
-int ssi_hash_alloc(struct ssi_drvdata *drvdata)
+static void __init cc_swap_dwords(u32 *buf, unsigned long size)
{
- struct ssi_hash_handle *hash_handle;
- ssi_sram_addr_t sram_buff;
+ int i;
+ u32 tmp;
+
+ for (i = 0; i < size; i += 2) {
+ tmp = buf[i];
+ buf[i] = buf[i + 1];
+ buf[i + 1] = tmp;
+ }
+}
+
+/*
+ * Due to the way the HW works we need to swap every
+ * double word in the SHA384 and SHA512 larval hashes
+ */
+void __init cc_hash_global_init(void)
+{
+ cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
+ cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
+}
+
+int cc_hash_alloc(struct cc_drvdata *drvdata)
+{
+ struct cc_hash_handle *hash_handle;
+ cc_sram_addr_t sram_buff;
u32 sram_size_to_alloc;
struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
@@ -2224,7 +1954,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
drvdata->hash_handle = hash_handle;
sram_size_to_alloc = sizeof(digest_len_init) +
-#if (DX_DEV_SHA_MAX > 256)
+#if (CC_DEV_SHA_MAX > 256)
sizeof(digest_len_sha512_init) +
sizeof(sha384_init) +
sizeof(sha512_init) +
@@ -2234,7 +1964,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
sizeof(sha224_init) +
sizeof(sha256_init);
- sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
+ sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
if (sram_buff == NULL_SRAM_ADDR) {
dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
@@ -2245,19 +1975,19 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
hash_handle->digest_len_sram_addr = sram_buff;
/*must be set before the alg registration as it is being used there*/
- rc = ssi_hash_init_sram_digest_consts(drvdata);
- if (unlikely(rc != 0)) {
+ rc = cc_init_hash_sram(drvdata);
+ if (rc) {
dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
goto fail;
}
/* ahash registration */
for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
- struct ssi_hash_alg *t_alg;
+ struct cc_hash_alg *t_alg;
int hw_mode = driver_hash[alg].hw_mode;
/* register hmac version */
- t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, true);
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
dev_err(dev, "%s alg allocation failed\n",
@@ -2267,22 +1997,21 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
t_alg->drvdata = drvdata;
rc = crypto_register_ahash(&t_alg->ahash_alg);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "%s alg registration failed\n",
driver_hash[alg].driver_name);
kfree(t_alg);
goto fail;
} else {
- list_add_tail(&t_alg->entry,
- &hash_handle->hash_list);
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
}
- if ((hw_mode == DRV_CIPHER_XCBC_MAC) ||
- (hw_mode == DRV_CIPHER_CMAC))
+ if (hw_mode == DRV_CIPHER_XCBC_MAC ||
+ hw_mode == DRV_CIPHER_CMAC)
continue;
/* register hash version */
- t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, false);
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
dev_err(dev, "%s alg allocation failed\n",
@@ -2292,7 +2021,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
t_alg->drvdata = drvdata;
rc = crypto_register_ahash(&t_alg->ahash_alg);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "%s alg registration failed\n",
driver_hash[alg].driver_name);
kfree(t_alg);
@@ -2310,13 +2039,14 @@ fail:
return rc;
}
-int ssi_hash_free(struct ssi_drvdata *drvdata)
+int cc_hash_free(struct cc_drvdata *drvdata)
{
- struct ssi_hash_alg *t_hash_alg, *hash_n;
- struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
+ struct cc_hash_alg *t_hash_alg, *hash_n;
+ struct cc_hash_handle *hash_handle = drvdata->hash_handle;
if (hash_handle) {
- list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
+ list_for_each_entry_safe(t_hash_alg, hash_n,
+ &hash_handle->hash_list, entry) {
crypto_unregister_ahash(&t_hash_alg->ahash_alg);
list_del(&t_hash_alg->entry);
kfree(t_hash_alg);
@@ -2328,14 +2058,13 @@ int ssi_hash_free(struct ssi_drvdata *drvdata)
return 0;
}
-static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
unsigned int idx = *seq_size;
struct ahash_req_ctx *state = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
/* Setup XCBC MAC K1 */
hw_desc_init(&desc[idx]);
@@ -2351,8 +2080,8 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
/* Setup XCBC MAC K2 */
hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
- XCBC_MAC_K2_OFFSET),
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
@@ -2363,8 +2092,8 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
/* Setup XCBC MAC K3 */
hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
- XCBC_MAC_K3_OFFSET),
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
@@ -2386,14 +2115,13 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
*seq_size = idx;
}
-static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
unsigned int idx = *seq_size;
struct ahash_req_ctx *state = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
/* Setup CMAC Key */
hw_desc_init(&desc[idx]);
@@ -2420,17 +2148,15 @@ static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
*seq_size = idx;
}
-static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
- struct ssi_hash_ctx *ctx,
- unsigned int flow_mode,
- struct cc_hw_desc desc[],
- bool is_not_last_data,
- unsigned int *seq_size)
+static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
+ struct cc_hash_ctx *ctx, unsigned int flow_mode,
+ struct cc_hw_desc desc[], bool is_not_last_data,
+ unsigned int *seq_size)
{
unsigned int idx = *seq_size;
struct device *dev = drvdata_to_dev(ctx->drvdata);
- if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
sg_dma_address(areq_ctx->curr_sg),
@@ -2438,7 +2164,7 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
set_flow_mode(&desc[idx], flow_mode);
idx++;
} else {
- if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
dev_dbg(dev, " NULL mode\n");
/* nothing to build */
return;
@@ -2466,6 +2192,29 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
*seq_size = idx;
}
+static const void *cc_larval_digest(struct device *dev, u32 mode)
+{
+ switch (mode) {
+ case DRV_HASH_MD5:
+ return md5_init;
+ case DRV_HASH_SHA1:
+ return sha1_init;
+ case DRV_HASH_SHA224:
+ return sha224_init;
+ case DRV_HASH_SHA256:
+ return sha256_init;
+#if (CC_DEV_SHA_MAX > 256)
+ case DRV_HASH_SHA384:
+ return sha384_init;
+ case DRV_HASH_SHA512:
+ return sha512_init;
+#endif
+ default:
+ dev_err(dev, "Invalid hash mode (%d)\n", mode);
+ return md5_init;
+ }
+}
+
/*!
* Gets the address of the initial digest in SRAM
* according to the given hash mode
@@ -2473,12 +2222,12 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
* \param drvdata
* \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
*
- * \return u32 The address of the inital digest in SRAM
+ * \return u32 The address of the initial digest in SRAM
*/
-ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
+cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
{
- struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
- struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
+ struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
+ struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
struct device *dev = drvdata_to_dev(_drvdata);
switch (mode) {
@@ -2498,7 +2247,7 @@ ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
sizeof(md5_init) +
sizeof(sha1_init) +
sizeof(sha224_init));
-#if (DX_DEV_SHA_MAX > 256)
+#if (CC_DEV_SHA_MAX > 256)
case DRV_HASH_SHA384:
return (hash_handle->larval_digest_sram_addr +
sizeof(md5_init) +
@@ -2521,12 +2270,12 @@ ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
return hash_handle->larval_digest_sram_addr;
}
-ssi_sram_addr_t
-ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode)
+cc_sram_addr_t
+cc_digest_len_addr(void *drvdata, u32 mode)
{
- struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
- struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
- ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
+ struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
+ struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
+ cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
switch (mode) {
case DRV_HASH_SHA1:
@@ -2534,7 +2283,7 @@ ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode)
case DRV_HASH_SHA256:
case DRV_HASH_MD5:
return digest_len_addr;
-#if (DX_DEV_SHA_MAX > 256)
+#if (CC_DEV_SHA_MAX > 256)
case DRV_HASH_SHA384:
case DRV_HASH_SHA512:
return digest_len_addr + sizeof(digest_len_init);
diff --git a/drivers/staging/ccree/cc_hash.h b/drivers/staging/ccree/cc_hash.h
new file mode 100644
index 000000000000..aa42b8f4348d
--- /dev/null
+++ b/drivers/staging/ccree/cc_hash.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_hash.h
+ * ARM CryptoCell Hash Crypto API
+ */
+
+#ifndef __CC_HASH_H__
+#define __CC_HASH_H__
+
+#include "cc_buffer_mgr.h"
+
+#define HMAC_IPAD_CONST 0x36363636
+#define HMAC_OPAD_CONST 0x5C5C5C5C
+#if (CC_DEV_SHA_MAX > 256)
+#define HASH_LEN_SIZE 16
+#define CC_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define CC_MAX_HASH_BLCK_SIZE SHA512_BLOCK_SIZE
+#else
+#define HASH_LEN_SIZE 8
+#define CC_MAX_HASH_DIGEST_SIZE SHA256_DIGEST_SIZE
+#define CC_MAX_HASH_BLCK_SIZE SHA256_BLOCK_SIZE
+#endif
+
+#define XCBC_MAC_K1_OFFSET 0
+#define XCBC_MAC_K2_OFFSET 16
+#define XCBC_MAC_K3_OFFSET 32
+
+#define CC_EXPORT_MAGIC 0xC2EE1070U
+
+/* this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used
+ * for xcbc/cmac statesize
+ */
+struct aeshash_state {
+ u8 state[AES_BLOCK_SIZE];
+ unsigned int count;
+ u8 buffer[AES_BLOCK_SIZE];
+};
+
+/* ahash state */
+struct ahash_req_ctx {
+ u8 buffers[2][CC_MAX_HASH_BLCK_SIZE] ____cacheline_aligned;
+ u8 digest_result_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 opad_digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 digest_bytes_len[HASH_LEN_SIZE] ____cacheline_aligned;
+ struct async_gen_req_ctx gen_ctx ____cacheline_aligned;
+ enum cc_req_dma_buf_type data_dma_buf_type;
+ dma_addr_t opad_digest_dma_addr;
+ dma_addr_t digest_buff_dma_addr;
+ dma_addr_t digest_bytes_len_dma_addr;
+ dma_addr_t digest_result_dma_addr;
+ u32 buf_cnt[2];
+ u32 buff_index;
+ u32 xcbc_count; /* count xcbc update operatations */
+ struct scatterlist buff_sg[2];
+ struct scatterlist *curr_sg;
+ u32 in_nents;
+ u32 mlli_nents;
+ struct mlli_params mlli_params;
+};
+
+static inline u32 *cc_hash_buf_cnt(struct ahash_req_ctx *state)
+{
+ return &state->buf_cnt[state->buff_index];
+}
+
+static inline u8 *cc_hash_buf(struct ahash_req_ctx *state)
+{
+ return state->buffers[state->buff_index];
+}
+
+static inline u32 *cc_next_buf_cnt(struct ahash_req_ctx *state)
+{
+ return &state->buf_cnt[state->buff_index ^ 1];
+}
+
+static inline u8 *cc_next_buf(struct ahash_req_ctx *state)
+{
+ return state->buffers[state->buff_index ^ 1];
+}
+
+int cc_hash_alloc(struct cc_drvdata *drvdata);
+int cc_init_hash_sram(struct cc_drvdata *drvdata);
+int cc_hash_free(struct cc_drvdata *drvdata);
+
+/*!
+ * Gets the initial digest length
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes:
+ * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * \return u32 returns the address of the initial digest length in SRAM
+ */
+cc_sram_addr_t
+cc_digest_len_addr(void *drvdata, u32 mode);
+
+/*!
+ * Gets the address of the initial digest in SRAM
+ * according to the given hash mode
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes:
+ * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * \return u32 The address of the initial digest in SRAM
+ */
+cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
+
+void cc_hash_global_init(void);
+
+#endif /*__CC_HASH_H__*/
+
diff --git a/drivers/staging/ccree/cc_host_regs.h b/drivers/staging/ccree/cc_host_regs.h
new file mode 100644
index 000000000000..69ef2fa0cb9b
--- /dev/null
+++ b/drivers/staging/ccree/cc_host_regs.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_HOST_H__
+#define __CC_HOST_H__
+
+// --------------------------------------
+// BLOCK: HOST_P
+// --------------------------------------
+#define CC_HOST_IRR_REG_OFFSET 0xA00UL
+#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
+#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
+#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
+#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
+#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
+#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REG_OFFSET 0xA04UL
+#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
+#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
+#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
+#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
+#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
+#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
+#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_REG_OFFSET 0xA08UL
+#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
+#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT 0x8UL
+#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SHIFT 0xBUL
+#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT 0x13UL
+#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
+#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_SIGNATURE_REG_OFFSET 0xA24UL
+#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_BOOT_REG_OFFSET 0xA28UL
+#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT 0x0UL
+#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT 0x1UL
+#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT 0x2UL
+#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SHIFT 0x3UL
+#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SHIFT 0x5UL
+#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SHIFT 0x6UL
+#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SIZE 0x3UL
+#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SHIFT 0x9UL
+#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SHIFT 0xAUL
+#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SHIFT 0xBUL
+#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SHIFT 0xCUL
+#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SHIFT 0xDUL
+#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SHIFT 0xEUL
+#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SHIFT 0xFUL
+#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SHIFT 0x10UL
+#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SHIFT 0x11UL
+#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SHIFT 0x12UL
+#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SHIFT 0x13UL
+#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SHIFT 0x14UL
+#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SHIFT 0x15UL
+#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SHIFT 0x16UL
+#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SHIFT 0x17UL
+#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SHIFT 0x18UL
+#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SHIFT 0x19UL
+#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SHIFT 0x1AUL
+#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SHIFT 0x1BUL
+#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SHIFT 0x1CUL
+#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SHIFT 0x1DUL
+#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL
+#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_VERSION_REG_OFFSET 0xA40UL
+#define CC_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_VERSION_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL
+#define CC_HOST_KFDE0_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE0_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE1_VALID_REG_OFFSET 0xA64UL
+#define CC_HOST_KFDE1_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE1_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE2_VALID_REG_OFFSET 0xA68UL
+#define CC_HOST_KFDE2_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE2_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE3_VALID_REG_OFFSET 0xA6CUL
+#define CC_HOST_KFDE3_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE3_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_GPR0_REG_OFFSET 0xA70UL
+#define CC_HOST_GPR0_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_GPR0_VALUE_BIT_SIZE 0x20UL
+#define CC_GPR_HOST_REG_OFFSET 0xA74UL
+#define CC_GPR_HOST_VALUE_BIT_SHIFT 0x0UL
+#define CC_GPR_HOST_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
+#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
+// --------------------------------------
+// BLOCK: HOST_SRAM
+// --------------------------------------
+#define CC_SRAM_DATA_REG_OFFSET 0xF00UL
+#define CC_SRAM_DATA_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_DATA_VALUE_BIT_SIZE 0x20UL
+#define CC_SRAM_ADDR_REG_OFFSET 0xF04UL
+#define CC_SRAM_ADDR_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_ADDR_VALUE_BIT_SIZE 0xFUL
+#define CC_SRAM_DATA_READY_REG_OFFSET 0xF08UL
+#define CC_SRAM_DATA_READY_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_DATA_READY_VALUE_BIT_SIZE 0x1UL
+
+#endif //__CC_HOST_H__
diff --git a/drivers/staging/ccree/cc_hw_queue_defs.h b/drivers/staging/ccree/cc_hw_queue_defs.h
index 2ae0f655e7a0..a79f28cec5ae 100644
--- a/drivers/staging/ccree/cc_hw_queue_defs.h
+++ b/drivers/staging/ccree/cc_hw_queue_defs.h
@@ -1,25 +1,12 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#ifndef __CC_HW_QUEUE_DEFS_H__
#define __CC_HW_QUEUE_DEFS_H__
#include <linux/types.h>
-#include "dx_crys_kernel.h"
+#include "cc_kernel_regs.h"
#include <linux/bitfield.h>
/******************************************************************************
@@ -30,14 +17,12 @@
/* Define max. available slots in HW queue */
#define HW_QUEUE_SLOTS_MAX 15
-#define CC_REG_NAME(word, name) DX_DSCRPTR_QUEUE_WORD ## word ## _ ## name
-
#define CC_REG_LOW(word, name) \
- (DX_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SHIFT)
+ (CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SHIFT)
#define CC_REG_HIGH(word, name) \
(CC_REG_LOW(word, name) + \
- DX_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SIZE - 1)
+ CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SIZE - 1)
#define CC_GENMASK(word, name) \
GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
@@ -122,7 +107,6 @@ enum cc_flow_mode {
AES_to_AES_to_HASH_and_DOUT = 13,
AES_to_AES_to_HASH = 14,
AES_to_HASH_and_AES = 15,
- DIN_MULTI2_DOUT = 16,
DIN_AES_AESMAC = 17,
HASH_to_DOUT = 18,
/* setup flows */
@@ -130,7 +114,6 @@ enum cc_flow_mode {
S_DIN_to_AES2 = 33,
S_DIN_to_DES = 34,
S_DIN_to_RC4 = 35,
- S_DIN_to_MULTI2 = 36,
S_DIN_to_HASH = 37,
S_AES_to_DOUT = 38,
S_AES2_to_DOUT = 39,
@@ -203,6 +186,19 @@ enum cc_hw_des_key_size {
END_OF_DES_KEYS = S32_MAX,
};
+enum cc_hash_conf_pad {
+ HASH_PADDING_DISABLED = 0,
+ HASH_PADDING_ENABLED = 1,
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN = 2,
+ HASH_CONFIG1_PADDING_RESERVE32 = S32_MAX,
+};
+
+enum cc_hash_cipher_pad {
+ DO_NOT_PAD = 0,
+ DO_PAD = 1,
+ HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
+};
+
/*****************************/
/* Descriptor packing macros */
/*****************************/
diff --git a/drivers/staging/ccree/ssi_ivgen.c b/drivers/staging/ccree/cc_ivgen.c
index 3f082f41ae8f..c47f419b277b 100644
--- a/drivers/staging/ccree/ssi_ivgen.c
+++ b/drivers/staging/ccree/cc_ivgen.c
@@ -1,38 +1,23 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
-#include <linux/platform_device.h>
#include <crypto/ctr.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "ssi_ivgen.h"
-#include "ssi_request_mgr.h"
-#include "ssi_sram_mgr.h"
-#include "ssi_buffer_mgr.h"
+#include "cc_driver.h"
+#include "cc_ivgen.h"
+#include "cc_request_mgr.h"
+#include "cc_sram_mgr.h"
+#include "cc_buffer_mgr.h"
/* The max. size of pool *MUST* be <= SRAM total size */
-#define SSI_IVPOOL_SIZE 1024
+#define CC_IVPOOL_SIZE 1024
/* The first 32B fraction of pool are dedicated to the
* next encryption "key" & "IV" for pool regeneration
*/
-#define SSI_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
-#define SSI_IVPOOL_GEN_SEQ_LEN 4
+#define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
+#define CC_IVPOOL_GEN_SEQ_LEN 4
/**
- * struct ssi_ivgen_ctx -IV pool generation context
+ * struct cc_ivgen_ctx -IV pool generation context
* @pool: the start address of the iv-pool resides in internal RAM
* @ctr_key_dma: address of pool's encryption key material in internal RAM
* @ctr_iv_dma: address of pool's counter iv in internal RAM
@@ -40,31 +25,29 @@
* @pool_meta: virt. address of the initial enc. key/IV
* @pool_meta_dma: phys. address of the initial enc. key/IV
*/
-struct ssi_ivgen_ctx {
- ssi_sram_addr_t pool;
- ssi_sram_addr_t ctr_key;
- ssi_sram_addr_t ctr_iv;
+struct cc_ivgen_ctx {
+ cc_sram_addr_t pool;
+ cc_sram_addr_t ctr_key;
+ cc_sram_addr_t ctr_iv;
u32 next_iv_ofs;
u8 *pool_meta;
dma_addr_t pool_meta_dma;
};
/*!
- * Generates SSI_IVPOOL_SIZE of random bytes by
+ * Generates CC_IVPOOL_SIZE of random bytes by
* encrypting 0's using AES128-CTR.
*
* \param ivgen iv-pool context
* \param iv_seq IN/OUT array to the descriptors sequence
* \param iv_seq_len IN/OUT pointer to the sequence length
*/
-static int ssi_ivgen_generate_pool(
- struct ssi_ivgen_ctx *ivgen_ctx,
- struct cc_hw_desc iv_seq[],
- unsigned int *iv_seq_len)
+static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx,
+ struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
{
unsigned int idx = *iv_seq_len;
- if ((*iv_seq_len + SSI_IVPOOL_GEN_SEQ_LEN) > SSI_IVPOOL_SEQ_LEN) {
+ if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) {
/* The sequence will be longer than allowed */
return -EINVAL;
}
@@ -97,15 +80,15 @@ static int ssi_ivgen_generate_pool(
/* Generate IV pool */
hw_desc_init(&iv_seq[idx]);
- set_din_const(&iv_seq[idx], 0, SSI_IVPOOL_SIZE);
- set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, SSI_IVPOOL_SIZE);
+ set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE);
+ set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE);
set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
idx++;
*iv_seq_len = idx; /* Update sequence length */
/* queue ordering assures pool readiness */
- ivgen_ctx->next_iv_ofs = SSI_IVPOOL_META_SIZE;
+ ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE;
return 0;
}
@@ -118,15 +101,15 @@ static int ssi_ivgen_generate_pool(
*
* \return int Zero for success, negative value otherwise.
*/
-int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
+int cc_init_iv_sram(struct cc_drvdata *drvdata)
{
- struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
- struct cc_hw_desc iv_seq[SSI_IVPOOL_SEQ_LEN];
+ struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
unsigned int iv_seq_len = 0;
int rc;
/* Generate initial enc. key/iv */
- get_random_bytes(ivgen_ctx->pool_meta, SSI_IVPOOL_META_SIZE);
+ get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE);
/* The first 32B reserved for the enc. Key/IV */
ivgen_ctx->ctr_key = ivgen_ctx->pool;
@@ -135,15 +118,15 @@ int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
/* Copy initial enc. key and IV to SRAM at a single descriptor */
hw_desc_init(&iv_seq[iv_seq_len]);
set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma,
- SSI_IVPOOL_META_SIZE, NS_BIT);
+ CC_IVPOOL_META_SIZE, NS_BIT);
set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool,
- SSI_IVPOOL_META_SIZE);
+ CC_IVPOOL_META_SIZE);
set_flow_mode(&iv_seq[iv_seq_len], BYPASS);
iv_seq_len++;
/* Generate initial pool */
- rc = ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, &iv_seq_len);
- if (unlikely(rc != 0))
+ rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len);
+ if (rc)
return rc;
/* Fire-and-forget */
@@ -155,17 +138,17 @@ int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
*
* \param drvdata
*/
-void ssi_ivgen_fini(struct ssi_drvdata *drvdata)
+void cc_ivgen_fini(struct cc_drvdata *drvdata)
{
- struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
struct device *device = &drvdata->plat_dev->dev;
if (!ivgen_ctx)
return;
if (ivgen_ctx->pool_meta) {
- memset(ivgen_ctx->pool_meta, 0, SSI_IVPOOL_META_SIZE);
- dma_free_coherent(device, SSI_IVPOOL_META_SIZE,
+ memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE);
+ dma_free_coherent(device, CC_IVPOOL_META_SIZE,
ivgen_ctx->pool_meta,
ivgen_ctx->pool_meta_dma);
}
@@ -184,42 +167,41 @@ void ssi_ivgen_fini(struct ssi_drvdata *drvdata)
*
* \return int Zero for success, negative value otherwise.
*/
-int ssi_ivgen_init(struct ssi_drvdata *drvdata)
+int cc_ivgen_init(struct cc_drvdata *drvdata)
{
- struct ssi_ivgen_ctx *ivgen_ctx;
+ struct cc_ivgen_ctx *ivgen_ctx;
struct device *device = &drvdata->plat_dev->dev;
int rc;
/* Allocate "this" context */
- drvdata->ivgen_handle = kzalloc(sizeof(*drvdata->ivgen_handle),
- GFP_KERNEL);
- if (!drvdata->ivgen_handle)
+ ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
+ if (!ivgen_ctx)
return -ENOMEM;
- ivgen_ctx = drvdata->ivgen_handle;
+ drvdata->ivgen_handle = ivgen_ctx;
- /* Allocate pool's header for intial enc. key/IV */
- ivgen_ctx->pool_meta = dma_alloc_coherent(device, SSI_IVPOOL_META_SIZE,
+ /* Allocate pool's header for initial enc. key/IV */
+ ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
&ivgen_ctx->pool_meta_dma,
GFP_KERNEL);
if (!ivgen_ctx->pool_meta) {
dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
- SSI_IVPOOL_META_SIZE);
+ CC_IVPOOL_META_SIZE);
rc = -ENOMEM;
goto out;
}
/* Allocate IV pool in SRAM */
- ivgen_ctx->pool = ssi_sram_mgr_alloc(drvdata, SSI_IVPOOL_SIZE);
+ ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE);
if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
dev_err(device, "SRAM pool exhausted\n");
rc = -ENOMEM;
goto out;
}
- return ssi_ivgen_init_sram_pool(drvdata);
+ return cc_init_iv_sram(drvdata);
out:
- ssi_ivgen_fini(drvdata);
+ cc_ivgen_fini(drvdata);
return rc;
}
@@ -228,37 +210,36 @@ out:
*
* \param drvdata Driver private context
* \param iv_out_dma Array of physical IV out addresses
- * \param iv_out_dma_len Length of iv_out_dma array (additional elements of iv_out_dma array are ignore)
+ * \param iv_out_dma_len Length of iv_out_dma array (additional elements
+ * of iv_out_dma array are ignore)
* \param iv_out_size May be 8 or 16 bytes long
* \param iv_seq IN/OUT array to the descriptors sequence
* \param iv_seq_len IN/OUT pointer to the sequence length
*
* \return int Zero for success, negative value otherwise.
*/
-int ssi_ivgen_getiv(
- struct ssi_drvdata *drvdata,
- dma_addr_t iv_out_dma[],
- unsigned int iv_out_dma_len,
- unsigned int iv_out_size,
- struct cc_hw_desc iv_seq[],
- unsigned int *iv_seq_len)
+int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
+ unsigned int iv_out_dma_len, unsigned int iv_out_size,
+ struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
{
- struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
unsigned int idx = *iv_seq_len;
struct device *dev = drvdata_to_dev(drvdata);
unsigned int t;
- if ((iv_out_size != CC_AES_IV_SIZE) &&
- (iv_out_size != CTR_RFC3686_IV_SIZE)) {
+ if (iv_out_size != CC_AES_IV_SIZE &&
+ iv_out_size != CTR_RFC3686_IV_SIZE) {
return -EINVAL;
}
- if ((iv_out_dma_len + 1) > SSI_IVPOOL_SEQ_LEN) {
+ if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) {
/* The sequence will be longer than allowed */
return -EINVAL;
}
- //check that number of generated IV is limited to max dma address iv buffer size
- if (iv_out_dma_len > SSI_MAX_IVGEN_DMA_ADDRESSES) {
+ /* check that number of generated IV is limited to max dma address
+ * iv buffer size
+ */
+ if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) {
/* The sequence will be longer than allowed */
return -EINVAL;
}
@@ -288,10 +269,10 @@ int ssi_ivgen_getiv(
/* Update iv index */
ivgen_ctx->next_iv_ofs += iv_out_size;
- if ((SSI_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
+ if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n");
/* pool is drained -regenerate it! */
- return ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, iv_seq_len);
+ return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len);
}
return 0;
diff --git a/drivers/staging/ccree/cc_ivgen.h b/drivers/staging/ccree/cc_ivgen.h
new file mode 100644
index 000000000000..b6ac16903dda
--- /dev/null
+++ b/drivers/staging/ccree/cc_ivgen.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_IVGEN_H__
+#define __CC_IVGEN_H__
+
+#include "cc_hw_queue_defs.h"
+
+#define CC_IVPOOL_SEQ_LEN 8
+
+/*!
+ * Allocates iv-pool and maps resources.
+ * This function generates the first IV pool.
+ *
+ * \param drvdata Driver's private context
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_ivgen_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Free iv-pool and ivgen context.
+ *
+ * \param drvdata
+ */
+void cc_ivgen_fini(struct cc_drvdata *drvdata);
+
+/*!
+ * Generates the initial pool in SRAM.
+ * This function should be invoked when resuming DX driver.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_init_iv_sram(struct cc_drvdata *drvdata);
+
+/*!
+ * Acquires 16 Bytes IV from the iv-pool
+ *
+ * \param drvdata Driver private context
+ * \param iv_out_dma Array of physical IV out addresses
+ * \param iv_out_dma_len Length of iv_out_dma array (additional elements of
+ * iv_out_dma array are ignore)
+ * \param iv_out_size May be 8 or 16 bytes long
+ * \param iv_seq IN/OUT array to the descriptors sequence
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
+ unsigned int iv_out_dma_len, unsigned int iv_out_size,
+ struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len);
+
+#endif /*__CC_IVGEN_H__*/
diff --git a/drivers/staging/ccree/cc_kernel_regs.h b/drivers/staging/ccree/cc_kernel_regs.h
new file mode 100644
index 000000000000..fa994406d610
--- /dev/null
+++ b/drivers/staging/ccree/cc_kernel_regs.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_CRYS_KERNEL_H__
+#define __CC_CRYS_KERNEL_H__
+
+// --------------------------------------
+// BLOCK: DSCRPTR
+// --------------------------------------
+#define CC_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET 0xE00UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE 0x6UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT 0x6UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_SW_RESET_REG_OFFSET 0xE40UL
+#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET 0xE60UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE 0xAUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT 0xAUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE 0xCUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT 0x16UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE 0x3UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET 0xE64UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_MEASURE_CNTR_REG_OFFSET 0xE68UL
+#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL
+#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SHIFT 0x1AUL
+#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SHIFT 0x1BUL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SHIFT 0x1CUL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD2_REG_OFFSET 0xE88UL
+#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD3_REG_OFFSET 0xE8CUL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SIZE 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SHIFT 0x1AUL
+#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SHIFT 0x1BUL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT 0x1FUL
+#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_REG_OFFSET 0xE90UL
+#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE 0x6UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT 0x6UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SHIFT 0x7UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SHIFT 0x8UL
+#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SHIFT 0xAUL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SIZE 0x4UL
+#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SHIFT 0xEUL
+#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SHIFT 0xFUL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SHIFT 0x11UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SHIFT 0x13UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SHIFT 0x14UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SHIFT 0x16UL
+#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SHIFT 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SIZE 0x4UL
+#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SHIFT 0x1CUL
+#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 0x1FUL
+#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL
+#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 0x10UL
+#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT 0x10UL
+#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE 0x10UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET 0xE98UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE 0xAUL
+#define CC_DSCRPTR_QUEUE_CONTENT_REG_OFFSET 0xE9CUL
+#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE 0xAUL
+// --------------------------------------
+// BLOCK: AXI_P
+// --------------------------------------
+#define CC_AXIM_MON_INFLIGHT_REG_OFFSET 0xB00UL
+#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE 0x8UL
+#define CC_AXIM_MON_INFLIGHTLAST_REG_OFFSET 0xB40UL
+#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE 0x8UL
+#define CC_AXIM_MON_COMP_REG_OFFSET 0xB80UL
+#define CC_AXIM_MON_COMP_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_COMP_VALUE_BIT_SIZE 0x10UL
+#define CC_AXIM_MON_ERR_REG_OFFSET 0xBC4UL
+#define CC_AXIM_MON_ERR_BRESP_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_ERR_BRESP_BIT_SIZE 0x2UL
+#define CC_AXIM_MON_ERR_BID_BIT_SHIFT 0x2UL
+#define CC_AXIM_MON_ERR_BID_BIT_SIZE 0x4UL
+#define CC_AXIM_MON_ERR_RRESP_BIT_SHIFT 0x10UL
+#define CC_AXIM_MON_ERR_RRESP_BIT_SIZE 0x2UL
+#define CC_AXIM_MON_ERR_RID_BIT_SHIFT 0x12UL
+#define CC_AXIM_MON_ERR_RID_BIT_SIZE 0x4UL
+#define CC_AXIM_CFG_REG_OFFSET 0xBE8UL
+#define CC_AXIM_CFG_BRESPMASK_BIT_SHIFT 0x4UL
+#define CC_AXIM_CFG_BRESPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_RRESPMASK_BIT_SHIFT 0x5UL
+#define CC_AXIM_CFG_RRESPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_INFLTMASK_BIT_SHIFT 0x6UL
+#define CC_AXIM_CFG_INFLTMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_COMPMASK_BIT_SHIFT 0x7UL
+#define CC_AXIM_CFG_COMPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_ACE_CONST_REG_OFFSET 0xBECUL
+#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT 0x0UL
+#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT 0x2UL
+#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_ARBAR_BIT_SHIFT 0x4UL
+#define CC_AXIM_ACE_CONST_ARBAR_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_AWBAR_BIT_SHIFT 0x6UL
+#define CC_AXIM_ACE_CONST_AWBAR_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SHIFT 0x8UL
+#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SIZE 0x4UL
+#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SHIFT 0xCUL
+#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SIZE 0x3UL
+#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SHIFT 0xFUL
+#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SIZE 0x3UL
+#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SHIFT 0x12UL
+#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE 0x7UL
+#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT 0x19UL
+#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_REG_OFFSET 0xBF0UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT 0x0UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT 0x4UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SHIFT 0x8UL
+#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SIZE 0x4UL
+#endif // __CC_CRYS_KERNEL_H__
diff --git a/drivers/staging/ccree/cc_lli_defs.h b/drivers/staging/ccree/cc_lli_defs.h
index a9c417b07b04..64b15ac9f1d3 100644
--- a/drivers/staging/ccree/cc_lli_defs.h
+++ b/drivers/staging/ccree/cc_lli_defs.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#ifndef _CC_LLI_DEFS_H_
#define _CC_LLI_DEFS_H_
@@ -20,7 +7,7 @@
#include <linux/types.h>
/* Max DLLI size
- * AKA DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE
+ * AKA CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE
*/
#define DLLI_SIZE_BIT_SIZE 0x18
diff --git a/drivers/staging/ccree/cc_pm.c b/drivers/staging/ccree/cc_pm.c
new file mode 100644
index 000000000000..d990f472e89f
--- /dev/null
+++ b/drivers/staging/ccree/cc_pm.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_request_mgr.h"
+#include "cc_sram_mgr.h"
+#include "cc_ivgen.h"
+#include "cc_hash.h"
+#include "cc_pm.h"
+
+#define POWER_DOWN_ENABLE 0x01
+#define POWER_DOWN_DISABLE 0x00
+
+const struct dev_pm_ops ccree_pm = {
+ SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
+};
+
+int cc_pm_suspend(struct device *dev)
+{
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+ int rc;
+
+ dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
+ rc = cc_suspend_req_queue(drvdata);
+ if (rc) {
+ dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
+ return rc;
+ }
+ fini_cc_regs(drvdata);
+ cc_clk_off(drvdata);
+ return 0;
+}
+
+int cc_pm_resume(struct device *dev)
+{
+ int rc;
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
+
+ rc = cc_clk_on(drvdata);
+ if (rc) {
+ dev_err(dev, "failed getting clock back on. We're toast.\n");
+ return rc;
+ }
+
+ rc = init_cc_regs(drvdata, false);
+ if (rc) {
+ dev_err(dev, "init_cc_regs (%x)\n", rc);
+ return rc;
+ }
+
+ rc = cc_resume_req_queue(drvdata);
+ if (rc) {
+ dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
+ return rc;
+ }
+
+ /* must be after the queue resuming as it uses the HW queue*/
+ cc_init_hash_sram(drvdata);
+
+ cc_init_iv_sram(drvdata);
+ return 0;
+}
+
+int cc_pm_get(struct device *dev)
+{
+ int rc = 0;
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (cc_req_queue_suspended(drvdata))
+ rc = pm_runtime_get_sync(dev);
+ else
+ pm_runtime_get_noresume(dev);
+
+ return rc;
+}
+
+int cc_pm_put_suspend(struct device *dev)
+{
+ int rc = 0;
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (!cc_req_queue_suspended(drvdata)) {
+ pm_runtime_mark_last_busy(dev);
+ rc = pm_runtime_put_autosuspend(dev);
+ } else {
+ /* Something wrong happens*/
+ dev_err(dev, "request to suspend already suspended queue");
+ rc = -EBUSY;
+ }
+ return rc;
+}
+
+int cc_pm_init(struct cc_drvdata *drvdata)
+{
+ int rc = 0;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /* must be before the enabling to avoid resdundent suspending */
+ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ /* activate the PM module */
+ rc = pm_runtime_set_active(dev);
+ if (rc)
+ return rc;
+ /* enable the PM module*/
+ pm_runtime_enable(dev);
+
+ return rc;
+}
+
+void cc_pm_fini(struct cc_drvdata *drvdata)
+{
+ pm_runtime_disable(drvdata_to_dev(drvdata));
+}
diff --git a/drivers/staging/ccree/cc_pm.h b/drivers/staging/ccree/cc_pm.h
new file mode 100644
index 000000000000..aac8190fea38
--- /dev/null
+++ b/drivers/staging/ccree/cc_pm.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_pm.h
+ */
+
+#ifndef __CC_POWER_MGR_H__
+#define __CC_POWER_MGR_H__
+
+#include "cc_driver.h"
+
+#define CC_SUSPEND_TIMEOUT 3000
+
+#if defined(CONFIG_PM)
+
+extern const struct dev_pm_ops ccree_pm;
+
+int cc_pm_init(struct cc_drvdata *drvdata);
+void cc_pm_fini(struct cc_drvdata *drvdata);
+int cc_pm_suspend(struct device *dev);
+int cc_pm_resume(struct device *dev);
+int cc_pm_get(struct device *dev);
+int cc_pm_put_suspend(struct device *dev);
+
+#else
+
+static inline int cc_pm_init(struct cc_drvdata *drvdata)
+{
+ return 0;
+}
+
+static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
+
+static inline int cc_pm_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static inline int cc_pm_resume(struct device *dev)
+{
+ return 0;
+}
+
+static inline int cc_pm_get(struct device *dev)
+{
+ return 0;
+}
+
+static inline int cc_pm_put_suspend(struct device *dev)
+{
+ return 0;
+}
+
+#endif
+
+#endif /*__POWER_MGR_H__*/
+
diff --git a/drivers/staging/ccree/cc_request_mgr.c b/drivers/staging/ccree/cc_request_mgr.c
new file mode 100644
index 000000000000..8a7f83407410
--- /dev/null
+++ b/drivers/staging/ccree/cc_request_mgr.c
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_request_mgr.h"
+#include "cc_ivgen.h"
+#include "cc_pm.h"
+
+#define CC_MAX_POLL_ITER 10
+/* The highest descriptor count in used */
+#define CC_MAX_DESC_SEQ_LEN 23
+
+struct cc_req_mgr_handle {
+ /* Request manager resources */
+ unsigned int hw_queue_size; /* HW capability */
+ unsigned int min_free_hw_slots;
+ unsigned int max_used_sw_slots;
+ struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
+ u32 req_queue_head;
+ u32 req_queue_tail;
+ u32 axi_completed;
+ u32 q_free_slots;
+ /* This lock protects access to HW register
+ * that must be single request at a time
+ */
+ spinlock_t hw_lock;
+ struct cc_hw_desc compl_desc;
+ u8 *dummy_comp_buff;
+ dma_addr_t dummy_comp_buff_dma;
+
+ /* backlog queue */
+ struct list_head backlog;
+ unsigned int bl_len;
+ spinlock_t bl_lock; /* protect backlog queue */
+
+#ifdef COMP_IN_WQ
+ struct workqueue_struct *workq;
+ struct delayed_work compwork;
+#else
+ struct tasklet_struct comptask;
+#endif
+ bool is_runtime_suspended;
+};
+
+struct cc_bl_item {
+ struct cc_crypto_req creq;
+ struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
+ unsigned int len;
+ struct list_head list;
+ bool notif;
+};
+
+static void comp_handler(unsigned long devarg);
+#ifdef COMP_IN_WQ
+static void comp_work_handler(struct work_struct *work);
+#endif
+
+void cc_req_mgr_fini(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ if (!req_mgr_h)
+ return; /* Not allocated */
+
+ if (req_mgr_h->dummy_comp_buff_dma) {
+ dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
+ req_mgr_h->dummy_comp_buff_dma);
+ }
+
+ dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
+ req_mgr_h->min_free_hw_slots));
+ dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
+
+#ifdef COMP_IN_WQ
+ flush_workqueue(req_mgr_h->workq);
+ destroy_workqueue(req_mgr_h->workq);
+#else
+ /* Kill tasklet */
+ tasklet_kill(&req_mgr_h->comptask);
+#endif
+ memset(req_mgr_h, 0, sizeof(struct cc_req_mgr_handle));
+ kfree(req_mgr_h);
+ drvdata->request_mgr_handle = NULL;
+}
+
+int cc_req_mgr_init(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *req_mgr_h;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc = 0;
+
+ req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
+ if (!req_mgr_h) {
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+
+ drvdata->request_mgr_handle = req_mgr_h;
+
+ spin_lock_init(&req_mgr_h->hw_lock);
+ spin_lock_init(&req_mgr_h->bl_lock);
+ INIT_LIST_HEAD(&req_mgr_h->backlog);
+
+#ifdef COMP_IN_WQ
+ dev_dbg(dev, "Initializing completion workqueue\n");
+ req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
+ if (!req_mgr_h->workq) {
+ dev_err(dev, "Failed creating work queue\n");
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+ INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
+#else
+ dev_dbg(dev, "Initializing completion tasklet\n");
+ tasklet_init(&req_mgr_h->comptask, comp_handler,
+ (unsigned long)drvdata);
+#endif
+ req_mgr_h->hw_queue_size = cc_ioread(drvdata,
+ CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
+ dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
+ if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
+ dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
+ req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+ req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
+ req_mgr_h->max_used_sw_slots = 0;
+
+ /* Allocate DMA word for "dummy" completion descriptor use */
+ req_mgr_h->dummy_comp_buff =
+ dma_alloc_coherent(dev, sizeof(u32),
+ &req_mgr_h->dummy_comp_buff_dma,
+ GFP_KERNEL);
+ if (!req_mgr_h->dummy_comp_buff) {
+ dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
+ sizeof(u32));
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+
+ /* Init. "dummy" completion descriptor */
+ hw_desc_init(&req_mgr_h->compl_desc);
+ set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
+ set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
+ sizeof(u32), NS_BIT, 1);
+ set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
+ set_queue_last_ind(&req_mgr_h->compl_desc);
+
+ return 0;
+
+req_mgr_init_err:
+ cc_req_mgr_fini(drvdata);
+ return rc;
+}
+
+static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
+ unsigned int seq_len)
+{
+ int i, w;
+ void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /*
+ * We do indeed write all 6 command words to the same
+ * register. The HW supports this.
+ */
+
+ for (i = 0; i < seq_len; i++) {
+ for (w = 0; w <= 5; w++)
+ writel_relaxed(seq[i].word[w], reg);
+
+ if (cc_dump_desc)
+ dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i, seq[i].word[0], seq[i].word[1],
+ seq[i].word[2], seq[i].word[3],
+ seq[i].word[4], seq[i].word[5]);
+ }
+}
+
+/*!
+ * Completion will take place if and only if user requested completion
+ * by cc_send_sync_request().
+ *
+ * \param dev
+ * \param dx_compl_h The completion event to signal
+ */
+static void request_mgr_complete(struct device *dev, void *dx_compl_h,
+ int dummy)
+{
+ struct completion *this_compl = dx_compl_h;
+
+ complete(this_compl);
+}
+
+static int cc_queues_status(struct cc_drvdata *drvdata,
+ struct cc_req_mgr_handle *req_mgr_h,
+ unsigned int total_seq_len)
+{
+ unsigned long poll_queue;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /* SW queue is checked only once as it will not
+ * be chaned during the poll because the spinlock_bh
+ * is held by the thread
+ */
+ if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
+ req_mgr_h->req_queue_tail) {
+ dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
+ return -ENOSPC;
+ }
+
+ if (req_mgr_h->q_free_slots >= total_seq_len)
+ return 0;
+
+ /* Wait for space in HW queue. Poll constant num of iterations. */
+ for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
+ req_mgr_h->q_free_slots =
+ cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
+ if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
+ req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
+
+ if (req_mgr_h->q_free_slots >= total_seq_len) {
+ /* If there is enough place return */
+ return 0;
+ }
+
+ dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->q_free_slots, total_seq_len);
+ }
+ /* No room in the HW queue try again later */
+ dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
+ req_mgr_h->q_free_slots, total_seq_len);
+ return -ENOSPC;
+}
+
+/*!
+ * Enqueue caller request to crypto hardware.
+ * Need to be called with HW lock held and PM running
+ *
+ * \param drvdata
+ * \param cc_req The request to enqueue
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ * \param add_comp If "true": add an artificial dout DMA to mark completion
+ *
+ * \return int Returns -EINPROGRESS or error code
+ */
+static int cc_do_send_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ bool add_comp, bool ivgen)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ unsigned int used_sw_slots;
+ unsigned int iv_seq_len = 0;
+ unsigned int total_seq_len = len; /*initial sequence length*/
+ struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc;
+
+ if (ivgen) {
+ dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
+ cc_req->ivgen_dma_addr_len,
+ &cc_req->ivgen_dma_addr[0],
+ &cc_req->ivgen_dma_addr[1],
+ &cc_req->ivgen_dma_addr[2],
+ cc_req->ivgen_size);
+
+ /* Acquire IV from pool */
+ rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr,
+ cc_req->ivgen_dma_addr_len,
+ cc_req->ivgen_size, iv_seq, &iv_seq_len);
+
+ if (rc) {
+ dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
+ return rc;
+ }
+
+ total_seq_len += iv_seq_len;
+ }
+
+ used_sw_slots = ((req_mgr_h->req_queue_head -
+ req_mgr_h->req_queue_tail) &
+ (MAX_REQUEST_QUEUE_SIZE - 1));
+ if (used_sw_slots > req_mgr_h->max_used_sw_slots)
+ req_mgr_h->max_used_sw_slots = used_sw_slots;
+
+ /* Enqueue request - must be locked with HW lock*/
+ req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
+ req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
+ (MAX_REQUEST_QUEUE_SIZE - 1);
+ /* TODO: Use circ_buf.h ? */
+
+ dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
+
+ /*
+ * We are about to push command to the HW via the command registers
+ * that may refernece hsot memory. We need to issue a memory barrier
+ * to make sure there are no outstnading memory writes
+ */
+ wmb();
+
+ /* STAT_PHASE_4: Push sequence */
+ if (ivgen)
+ enqueue_seq(drvdata, iv_seq, iv_seq_len);
+
+ enqueue_seq(drvdata, desc, len);
+
+ if (add_comp) {
+ enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
+ total_seq_len++;
+ }
+
+ if (req_mgr_h->q_free_slots < total_seq_len) {
+ /* This situation should never occur. Maybe indicating problem
+ * with resuming power. Set the free slot count to 0 and hope
+ * for the best.
+ */
+ dev_err(dev, "HW free slot count mismatch.");
+ req_mgr_h->q_free_slots = 0;
+ } else {
+ /* Update the free slots in HW queue */
+ req_mgr_h->q_free_slots -= total_seq_len;
+ }
+
+ /* Operation still in process */
+ return -EINPROGRESS;
+}
+
+static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
+ struct cc_bl_item *bli)
+{
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+
+ spin_lock_bh(&mgr->bl_lock);
+ list_add_tail(&bli->list, &mgr->backlog);
+ ++mgr->bl_len;
+ spin_unlock_bh(&mgr->bl_lock);
+ tasklet_schedule(&mgr->comptask);
+}
+
+static void cc_proc_backlog(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ struct cc_bl_item *bli;
+ struct cc_crypto_req *creq;
+ struct crypto_async_request *req;
+ bool ivgen;
+ unsigned int total_len;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc;
+
+ spin_lock(&mgr->bl_lock);
+
+ while (mgr->bl_len) {
+ bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
+ spin_unlock(&mgr->bl_lock);
+
+ creq = &bli->creq;
+ req = (struct crypto_async_request *)creq->user_arg;
+
+ /*
+ * Notify the request we're moving out of the backlog
+ * but only if we haven't done so already.
+ */
+ if (!bli->notif) {
+ req->complete(req, -EINPROGRESS);
+ bli->notif = true;
+ }
+
+ ivgen = !!creq->ivgen_dma_addr_len;
+ total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
+
+ spin_lock(&mgr->hw_lock);
+
+ rc = cc_queues_status(drvdata, mgr, total_len);
+ if (rc) {
+ /*
+ * There is still not room in the FIFO for
+ * this request. Bail out. We'll return here
+ * on the next completion irq.
+ */
+ spin_unlock(&mgr->hw_lock);
+ return;
+ }
+
+ rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
+ bli->len, false, ivgen);
+
+ spin_unlock(&mgr->hw_lock);
+
+ if (rc != -EINPROGRESS) {
+ cc_pm_put_suspend(dev);
+ creq->user_cb(dev, req, rc);
+ }
+
+ /* Remove ourselves from the backlog list */
+ spin_lock(&mgr->bl_lock);
+ list_del(&bli->list);
+ --mgr->bl_len;
+ }
+
+ spin_unlock(&mgr->bl_lock);
+}
+
+int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ struct crypto_async_request *req)
+{
+ int rc;
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ bool ivgen = !!cc_req->ivgen_dma_addr_len;
+ unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
+ struct device *dev = drvdata_to_dev(drvdata);
+ bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
+ gfp_t flags = cc_gfp_flags(req);
+ struct cc_bl_item *bli;
+
+ rc = cc_pm_get(dev);
+ if (rc) {
+ dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ return rc;
+ }
+
+ spin_lock_bh(&mgr->hw_lock);
+ rc = cc_queues_status(drvdata, mgr, total_len);
+
+#ifdef CC_DEBUG_FORCE_BACKLOG
+ if (backlog_ok)
+ rc = -ENOSPC;
+#endif /* CC_DEBUG_FORCE_BACKLOG */
+
+ if (rc == -ENOSPC && backlog_ok) {
+ spin_unlock_bh(&mgr->hw_lock);
+
+ bli = kmalloc(sizeof(*bli), flags);
+ if (!bli) {
+ cc_pm_put_suspend(dev);
+ return -ENOMEM;
+ }
+
+ memcpy(&bli->creq, cc_req, sizeof(*cc_req));
+ memcpy(&bli->desc, desc, len * sizeof(*desc));
+ bli->len = len;
+ bli->notif = false;
+ cc_enqueue_backlog(drvdata, bli);
+ return -EBUSY;
+ }
+
+ if (!rc)
+ rc = cc_do_send_request(drvdata, cc_req, desc, len, false,
+ ivgen);
+
+ spin_unlock_bh(&mgr->hw_lock);
+ return rc;
+}
+
+int cc_send_sync_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
+ unsigned int len)
+{
+ int rc;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+
+ init_completion(&cc_req->seq_compl);
+ cc_req->user_cb = request_mgr_complete;
+ cc_req->user_arg = &cc_req->seq_compl;
+
+ rc = cc_pm_get(dev);
+ if (rc) {
+ dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ return rc;
+ }
+
+ while (true) {
+ spin_lock_bh(&mgr->hw_lock);
+ rc = cc_queues_status(drvdata, mgr, len + 1);
+
+ if (!rc)
+ break;
+
+ spin_unlock_bh(&mgr->hw_lock);
+ if (rc != -EAGAIN) {
+ cc_pm_put_suspend(dev);
+ return rc;
+ }
+ wait_for_completion_interruptible(&drvdata->hw_queue_avail);
+ reinit_completion(&drvdata->hw_queue_avail);
+ }
+
+ rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false);
+ spin_unlock_bh(&mgr->hw_lock);
+
+ if (rc != -EINPROGRESS) {
+ cc_pm_put_suspend(dev);
+ return rc;
+ }
+
+ wait_for_completion(&cc_req->seq_compl);
+ return 0;
+}
+
+/*!
+ * Enqueue caller request to crypto hardware during init process.
+ * assume this function is not called in middle of a flow,
+ * since we set QUEUE_LAST_IND flag in the last descriptor.
+ *
+ * \param drvdata
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ *
+ * \return int Returns "0" upon success
+ */
+int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
+ unsigned int len)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ unsigned int total_seq_len = len; /*initial sequence length*/
+ int rc = 0;
+
+ /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
+ */
+ rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
+ if (rc)
+ return rc;
+
+ set_queue_last_ind(&desc[(len - 1)]);
+
+ /*
+ * We are about to push command to the HW via the command registers
+ * that may refernece hsot memory. We need to issue a memory barrier
+ * to make sure there are no outstnading memory writes
+ */
+ wmb();
+ enqueue_seq(drvdata, desc, len);
+
+ /* Update the free slots in HW queue */
+ req_mgr_h->q_free_slots =
+ cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
+
+ return 0;
+}
+
+void complete_request(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ complete(&drvdata->hw_queue_avail);
+#ifdef COMP_IN_WQ
+ queue_delayed_work(request_mgr_handle->workq,
+ &request_mgr_handle->compwork, 0);
+#else
+ tasklet_schedule(&request_mgr_handle->comptask);
+#endif
+}
+
+#ifdef COMP_IN_WQ
+static void comp_work_handler(struct work_struct *work)
+{
+ struct cc_drvdata *drvdata =
+ container_of(work, struct cc_drvdata, compwork.work);
+
+ comp_handler((unsigned long)drvdata);
+}
+#endif
+
+static void proc_completions(struct cc_drvdata *drvdata)
+{
+ struct cc_crypto_req *cc_req;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+ unsigned int *tail = &request_mgr_handle->req_queue_tail;
+ unsigned int *head = &request_mgr_handle->req_queue_head;
+
+ while (request_mgr_handle->axi_completed) {
+ request_mgr_handle->axi_completed--;
+
+ /* Dequeue request */
+ if (*head == *tail) {
+ /* We are supposed to handle a completion but our
+ * queue is empty. This is not normal. Return and
+ * hope for the best.
+ */
+ dev_err(dev, "Request queue is empty head == tail %u\n",
+ *head);
+ break;
+ }
+
+ cc_req = &request_mgr_handle->req_queue[*tail];
+
+ if (cc_req->user_cb)
+ cc_req->user_cb(dev, cc_req->user_arg, 0);
+ *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
+ dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
+ dev_dbg(dev, "Request completed. axi_completed=%d\n",
+ request_mgr_handle->axi_completed);
+ cc_pm_put_suspend(dev);
+ }
+}
+
+static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
+{
+ return FIELD_GET(AXIM_MON_COMP_VALUE,
+ cc_ioread(drvdata, CC_REG(AXIM_MON_COMP)));
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void comp_handler(unsigned long devarg)
+{
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ u32 irq;
+
+ irq = (drvdata->irq & CC_COMP_IRQ_MASK);
+
+ if (irq & CC_COMP_IRQ_MASK) {
+ /* To avoid the interrupt from firing as we unmask it,
+ * we clear it now
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
+
+ /* Avoid race with above clear: Test completion counter
+ * once more
+ */
+ request_mgr_handle->axi_completed +=
+ cc_axi_comp_count(drvdata);
+
+ while (request_mgr_handle->axi_completed) {
+ do {
+ proc_completions(drvdata);
+ /* At this point (after proc_completions()),
+ * request_mgr_handle->axi_completed is 0.
+ */
+ request_mgr_handle->axi_completed =
+ cc_axi_comp_count(drvdata);
+ } while (request_mgr_handle->axi_completed > 0);
+
+ cc_iowrite(drvdata, CC_REG(HOST_ICR),
+ CC_COMP_IRQ_MASK);
+
+ request_mgr_handle->axi_completed +=
+ cc_axi_comp_count(drvdata);
+ }
+ }
+ /* after verifing that there is nothing to do,
+ * unmask AXI completion interrupt
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR),
+ cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
+
+ cc_proc_backlog(drvdata);
+}
+
+/*
+ * resume the queue configuration - no need to take the lock as this happens
+ * inside the spin lock protection
+ */
+#if defined(CONFIG_PM)
+int cc_resume_req_queue(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ spin_lock_bh(&request_mgr_handle->hw_lock);
+ request_mgr_handle->is_runtime_suspended = false;
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+
+ return 0;
+}
+
+/*
+ * suspend the queue configuration. Since it is used for the runtime suspend
+ * only verify that the queue can be suspended.
+ */
+int cc_suspend_req_queue(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ /* lock the send_request */
+ spin_lock_bh(&request_mgr_handle->hw_lock);
+ if (request_mgr_handle->req_queue_head !=
+ request_mgr_handle->req_queue_tail) {
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+ return -EBUSY;
+ }
+ request_mgr_handle->is_runtime_suspended = true;
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+
+ return 0;
+}
+
+bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ return request_mgr_handle->is_runtime_suspended;
+}
+
+#endif
+
diff --git a/drivers/staging/ccree/cc_request_mgr.h b/drivers/staging/ccree/cc_request_mgr.h
new file mode 100644
index 000000000000..573cb97af085
--- /dev/null
+++ b/drivers/staging/ccree/cc_request_mgr.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_request_mgr.h
+ * Request Manager
+ */
+
+#ifndef __REQUEST_MGR_H__
+#define __REQUEST_MGR_H__
+
+#include "cc_hw_queue_defs.h"
+
+int cc_req_mgr_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Enqueue caller request to crypto hardware.
+ *
+ * \param drvdata
+ * \param cc_req The request to enqueue
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ * \param is_dout If "true": completion is handled by the caller
+ * If "false": this function adds a dummy descriptor completion
+ * and waits upon completion signal.
+ *
+ * \return int Returns -EINPROGRESS or error
+ */
+int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ struct crypto_async_request *req);
+
+int cc_send_sync_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
+ unsigned int len);
+
+int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
+ unsigned int len);
+
+void complete_request(struct cc_drvdata *drvdata);
+
+void cc_req_mgr_fini(struct cc_drvdata *drvdata);
+
+#if defined(CONFIG_PM)
+int cc_resume_req_queue(struct cc_drvdata *drvdata);
+
+int cc_suspend_req_queue(struct cc_drvdata *drvdata);
+
+bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
+#endif
+
+#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/staging/ccree/ssi_sram_mgr.c b/drivers/staging/ccree/cc_sram_mgr.c
index 07260d168c91..d1f8a9cc1c0f 100644
--- a/drivers/staging/ccree/ssi_sram_mgr.c
+++ b/drivers/staging/ccree/cc_sram_mgr.c
@@ -1,62 +1,47 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
-#include "ssi_driver.h"
-#include "ssi_sram_mgr.h"
+#include "cc_driver.h"
+#include "cc_sram_mgr.h"
/**
- * struct ssi_sram_mgr_ctx -Internal RAM context manager
+ * struct cc_sram_ctx -Internal RAM context manager
* @sram_free_offset: the offset to the non-allocated area
*/
-struct ssi_sram_mgr_ctx {
- ssi_sram_addr_t sram_free_offset;
+struct cc_sram_ctx {
+ cc_sram_addr_t sram_free_offset;
};
/**
- * ssi_sram_mgr_fini() - Cleanup SRAM pool.
+ * cc_sram_mgr_fini() - Cleanup SRAM pool.
*
* @drvdata: Associated device driver context
*/
-void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata)
+void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
{
- struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
-
/* Free "this" context */
- if (smgr_ctx) {
- memset(smgr_ctx, 0, sizeof(struct ssi_sram_mgr_ctx));
- kfree(smgr_ctx);
- }
+ kfree(drvdata->sram_mgr_handle);
}
/**
- * ssi_sram_mgr_init() - Initializes SRAM pool.
+ * cc_sram_mgr_init() - Initializes SRAM pool.
* The pool starts right at the beginning of SRAM.
* Returns zero for success, negative value otherwise.
*
* @drvdata: Associated device driver context
*/
-int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
+int cc_sram_mgr_init(struct cc_drvdata *drvdata)
{
+ struct cc_sram_ctx *ctx;
+
/* Allocate "this" context */
- drvdata->sram_mgr_handle = kzalloc(sizeof(struct ssi_sram_mgr_ctx),
- GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!drvdata->sram_mgr_handle)
+ if (!ctx)
return -ENOMEM;
+ drvdata->sram_mgr_handle = ctx;
+
return 0;
}
@@ -69,18 +54,18 @@ int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
* \param drvdata
* \param size The requested bytes to allocate
*/
-ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
+cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
{
- struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
+ struct cc_sram_ctx *smgr_ctx = drvdata->sram_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
- ssi_sram_addr_t p;
+ cc_sram_addr_t p;
- if (unlikely((size & 0x3) != 0)) {
+ if ((size & 0x3)) {
dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
size);
return NULL_SRAM_ADDR;
}
- if (unlikely(size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset))) {
+ if (size > (CC_CC_SRAM_SIZE - smgr_ctx->sram_free_offset)) {
dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
size, smgr_ctx->sram_free_offset);
return NULL_SRAM_ADDR;
@@ -93,7 +78,7 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
}
/**
- * ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
+ * cc_set_sram_desc() - Create const descriptors sequence to
* set values in given array into SRAM.
* Note: each const value can't exceed word size.
*
@@ -103,10 +88,9 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
* @seq: A pointer to the given IN/OUT descriptor sequence
* @seq_len: A pointer to the given IN/OUT sequence length
*/
-void ssi_sram_mgr_const2sram_desc(
- const u32 *src, ssi_sram_addr_t dst,
- unsigned int nelement,
- struct cc_hw_desc *seq, unsigned int *seq_len)
+void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
+ unsigned int nelement, struct cc_hw_desc *seq,
+ unsigned int *seq_len)
{
u32 i;
unsigned int idx = *seq_len;
diff --git a/drivers/staging/ccree/cc_sram_mgr.h b/drivers/staging/ccree/cc_sram_mgr.h
new file mode 100644
index 000000000000..d48649fb3323
--- /dev/null
+++ b/drivers/staging/ccree/cc_sram_mgr.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_SRAM_MGR_H__
+#define __CC_SRAM_MGR_H__
+
+#ifndef CC_CC_SRAM_SIZE
+#define CC_CC_SRAM_SIZE 4096
+#endif
+
+struct cc_drvdata;
+
+/**
+ * Address (offset) within CC internal SRAM
+ */
+
+typedef u64 cc_sram_addr_t;
+
+#define NULL_SRAM_ADDR ((cc_sram_addr_t)-1)
+
+/*!
+ * Initializes SRAM pool.
+ * The first X bytes of SRAM are reserved for ROM usage, hence, pool
+ * starts right after X bytes.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_sram_mgr_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Uninits SRAM pool.
+ *
+ * \param drvdata
+ */
+void cc_sram_mgr_fini(struct cc_drvdata *drvdata);
+
+/*!
+ * Allocated buffer from SRAM pool.
+ * Note: Caller is responsible to free the LAST allocated buffer.
+ * This function does not taking care of any fragmentation may occur
+ * by the order of calls to alloc/free.
+ *
+ * \param drvdata
+ * \param size The requested bytes to allocate
+ */
+cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
+
+/**
+ * cc_set_sram_desc() - Create const descriptors sequence to
+ * set values in given array into SRAM.
+ * Note: each const value can't exceed word size.
+ *
+ * @src: A pointer to array of words to set as consts.
+ * @dst: The target SRAM buffer to set into
+ * @nelements: The number of words in "src" array
+ * @seq: A pointer to the given IN/OUT descriptor sequence
+ * @seq_len: A pointer to the given IN/OUT sequence length
+ */
+void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
+ unsigned int nelement, struct cc_hw_desc *seq,
+ unsigned int *seq_len);
+
+#endif /*__CC_SRAM_MGR_H__*/
diff --git a/drivers/staging/ccree/dx_crys_kernel.h b/drivers/staging/ccree/dx_crys_kernel.h
deleted file mode 100644
index 219603030344..000000000000
--- a/drivers/staging/ccree/dx_crys_kernel.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __DX_CRYS_KERNEL_H__
-#define __DX_CRYS_KERNEL_H__
-
-// --------------------------------------
-// BLOCK: DSCRPTR
-// --------------------------------------
-#define DX_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET 0xE00UL
-#define DX_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE 0x6UL
-#define DX_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT 0x6UL
-#define DX_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_SW_RESET_REG_OFFSET 0xE40UL
-#define DX_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_SW_RESET_VALUE_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET 0xE60UL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE 0xAUL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT 0xAUL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE 0xCUL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT 0x16UL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE 0x3UL
-#define DX_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET 0xE64UL
-#define DX_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_MEASURE_CNTR_REG_OFFSET 0xE68UL
-#define DX_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE 0x20UL
-#define DX_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL
-#define DX_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 0x20UL
-#define DX_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE 0x18UL
-#define DX_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SHIFT 0x1AUL
-#define DX_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SHIFT 0x1BUL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SHIFT 0x1CUL
-#define DX_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SHIFT 0x1DUL
-#define DX_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT 0x1EUL
-#define DX_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD2_REG_OFFSET 0xE88UL
-#define DX_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE 0x20UL
-#define DX_DSCRPTR_QUEUE_WORD3_REG_OFFSET 0xE8CUL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SIZE 0x18UL
-#define DX_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SHIFT 0x1AUL
-#define DX_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SHIFT 0x1BUL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SHIFT 0x1DUL
-#define DX_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SHIFT 0x1EUL
-#define DX_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT 0x1FUL
-#define DX_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_REG_OFFSET 0xE90UL
-#define DX_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE 0x6UL
-#define DX_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT 0x6UL
-#define DX_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SHIFT 0x7UL
-#define DX_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SHIFT 0x8UL
-#define DX_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SHIFT 0xAUL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SIZE 0x4UL
-#define DX_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SHIFT 0xEUL
-#define DX_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SHIFT 0xFUL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SHIFT 0x11UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SHIFT 0x13UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SHIFT 0x14UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SHIFT 0x16UL
-#define DX_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SHIFT 0x18UL
-#define DX_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SIZE 0x4UL
-#define DX_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SHIFT 0x1CUL
-#define DX_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SHIFT 0x1DUL
-#define DX_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SHIFT 0x1EUL
-#define DX_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 0x1FUL
-#define DX_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL
-#define DX_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 0x10UL
-#define DX_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT 0x10UL
-#define DX_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE 0x10UL
-#define DX_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET 0xE98UL
-#define DX_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE 0xAUL
-#define DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET 0xE9CUL
-#define DX_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE 0xAUL
-// --------------------------------------
-// BLOCK: AXI_P
-// --------------------------------------
-#define DX_AXIM_MON_INFLIGHT_REG_OFFSET 0xB00UL
-#define DX_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT 0x0UL
-#define DX_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE 0x8UL
-#define DX_AXIM_MON_INFLIGHTLAST_REG_OFFSET 0xB40UL
-#define DX_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT 0x0UL
-#define DX_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE 0x8UL
-#define DX_AXIM_MON_COMP_REG_OFFSET 0xB80UL
-#define DX_AXIM_MON_COMP_VALUE_BIT_SHIFT 0x0UL
-#define DX_AXIM_MON_COMP_VALUE_BIT_SIZE 0x10UL
-#define DX_AXIM_MON_ERR_REG_OFFSET 0xBC4UL
-#define DX_AXIM_MON_ERR_BRESP_BIT_SHIFT 0x0UL
-#define DX_AXIM_MON_ERR_BRESP_BIT_SIZE 0x2UL
-#define DX_AXIM_MON_ERR_BID_BIT_SHIFT 0x2UL
-#define DX_AXIM_MON_ERR_BID_BIT_SIZE 0x4UL
-#define DX_AXIM_MON_ERR_RRESP_BIT_SHIFT 0x10UL
-#define DX_AXIM_MON_ERR_RRESP_BIT_SIZE 0x2UL
-#define DX_AXIM_MON_ERR_RID_BIT_SHIFT 0x12UL
-#define DX_AXIM_MON_ERR_RID_BIT_SIZE 0x4UL
-#define DX_AXIM_CFG_REG_OFFSET 0xBE8UL
-#define DX_AXIM_CFG_BRESPMASK_BIT_SHIFT 0x4UL
-#define DX_AXIM_CFG_BRESPMASK_BIT_SIZE 0x1UL
-#define DX_AXIM_CFG_RRESPMASK_BIT_SHIFT 0x5UL
-#define DX_AXIM_CFG_RRESPMASK_BIT_SIZE 0x1UL
-#define DX_AXIM_CFG_INFLTMASK_BIT_SHIFT 0x6UL
-#define DX_AXIM_CFG_INFLTMASK_BIT_SIZE 0x1UL
-#define DX_AXIM_CFG_COMPMASK_BIT_SHIFT 0x7UL
-#define DX_AXIM_CFG_COMPMASK_BIT_SIZE 0x1UL
-#define DX_AXIM_ACE_CONST_REG_OFFSET 0xBECUL
-#define DX_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT 0x0UL
-#define DX_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE 0x2UL
-#define DX_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT 0x2UL
-#define DX_AXIM_ACE_CONST_AWDOMAIN_BIT_SIZE 0x2UL
-#define DX_AXIM_ACE_CONST_ARBAR_BIT_SHIFT 0x4UL
-#define DX_AXIM_ACE_CONST_ARBAR_BIT_SIZE 0x2UL
-#define DX_AXIM_ACE_CONST_AWBAR_BIT_SHIFT 0x6UL
-#define DX_AXIM_ACE_CONST_AWBAR_BIT_SIZE 0x2UL
-#define DX_AXIM_ACE_CONST_ARSNOOP_BIT_SHIFT 0x8UL
-#define DX_AXIM_ACE_CONST_ARSNOOP_BIT_SIZE 0x4UL
-#define DX_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SHIFT 0xCUL
-#define DX_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SIZE 0x3UL
-#define DX_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SHIFT 0xFUL
-#define DX_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SIZE 0x3UL
-#define DX_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SHIFT 0x12UL
-#define DX_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE 0x7UL
-#define DX_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT 0x19UL
-#define DX_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE 0x4UL
-#define DX_AXIM_CACHE_PARAMS_REG_OFFSET 0xBF0UL
-#define DX_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT 0x0UL
-#define DX_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE 0x4UL
-#define DX_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT 0x4UL
-#define DX_AXIM_CACHE_PARAMS_AWCACHE_BIT_SIZE 0x4UL
-#define DX_AXIM_CACHE_PARAMS_ARCACHE_BIT_SHIFT 0x8UL
-#define DX_AXIM_CACHE_PARAMS_ARCACHE_BIT_SIZE 0x4UL
-#endif // __DX_CRYS_KERNEL_H__
diff --git a/drivers/staging/ccree/dx_host.h b/drivers/staging/ccree/dx_host.h
deleted file mode 100644
index 863c2670d826..000000000000
--- a/drivers/staging/ccree/dx_host.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __DX_HOST_H__
-#define __DX_HOST_H__
-
-// --------------------------------------
-// BLOCK: HOST_P
-// --------------------------------------
-#define DX_HOST_IRR_REG_OFFSET 0xA00UL
-#define DX_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
-#define DX_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
-#define DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
-#define DX_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
-#define DX_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
-#define DX_HOST_IRR_GPR0_BIT_SIZE 0x1UL
-#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
-#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
-#define DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
-#define DX_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
-#define DX_HOST_IMR_REG_OFFSET 0xA04UL
-#define DX_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
-#define DX_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
-#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
-#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
-#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
-#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
-#define DX_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
-#define DX_HOST_IMR_GPR0_BIT_SIZE 0x1UL
-#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
-#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
-#define DX_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
-#define DX_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
-#define DX_HOST_ICR_REG_OFFSET 0xA08UL
-#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
-#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
-#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT 0x8UL
-#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE 0x1UL
-#define DX_HOST_ICR_GPR_INT_CLEAR_BIT_SHIFT 0xBUL
-#define DX_HOST_ICR_GPR_INT_CLEAR_BIT_SIZE 0x1UL
-#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT 0x13UL
-#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
-#define DX_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
-#define DX_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
-#define DX_HOST_SIGNATURE_REG_OFFSET 0xA24UL
-#define DX_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL
-#define DX_HOST_BOOT_REG_OFFSET 0xA28UL
-#define DX_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT 0x0UL
-#define DX_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT 0x1UL
-#define DX_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT 0x2UL
-#define DX_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SHIFT 0x3UL
-#define DX_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SHIFT 0x5UL
-#define DX_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SHIFT 0x6UL
-#define DX_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SIZE 0x3UL
-#define DX_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SHIFT 0x9UL
-#define DX_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SHIFT 0xAUL
-#define DX_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SHIFT 0xBUL
-#define DX_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SHIFT 0xCUL
-#define DX_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SHIFT 0xDUL
-#define DX_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SHIFT 0xEUL
-#define DX_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SHIFT 0xFUL
-#define DX_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SHIFT 0x10UL
-#define DX_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SHIFT 0x11UL
-#define DX_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SHIFT 0x12UL
-#define DX_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SHIFT 0x13UL
-#define DX_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SHIFT 0x14UL
-#define DX_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SHIFT 0x15UL
-#define DX_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SHIFT 0x16UL
-#define DX_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SHIFT 0x17UL
-#define DX_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SHIFT 0x18UL
-#define DX_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SHIFT 0x19UL
-#define DX_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SHIFT 0x1AUL
-#define DX_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SHIFT 0x1BUL
-#define DX_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SHIFT 0x1CUL
-#define DX_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SHIFT 0x1DUL
-#define DX_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL
-#define DX_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_VERSION_REG_OFFSET 0xA40UL
-#define DX_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_VERSION_VALUE_BIT_SIZE 0x20UL
-#define DX_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL
-#define DX_HOST_KFDE0_VALID_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_KFDE0_VALID_VALUE_BIT_SIZE 0x1UL
-#define DX_HOST_KFDE1_VALID_REG_OFFSET 0xA64UL
-#define DX_HOST_KFDE1_VALID_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_KFDE1_VALID_VALUE_BIT_SIZE 0x1UL
-#define DX_HOST_KFDE2_VALID_REG_OFFSET 0xA68UL
-#define DX_HOST_KFDE2_VALID_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_KFDE2_VALID_VALUE_BIT_SIZE 0x1UL
-#define DX_HOST_KFDE3_VALID_REG_OFFSET 0xA6CUL
-#define DX_HOST_KFDE3_VALID_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_KFDE3_VALID_VALUE_BIT_SIZE 0x1UL
-#define DX_HOST_GPR0_REG_OFFSET 0xA70UL
-#define DX_HOST_GPR0_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_GPR0_VALUE_BIT_SIZE 0x20UL
-#define DX_GPR_HOST_REG_OFFSET 0xA74UL
-#define DX_GPR_HOST_VALUE_BIT_SHIFT 0x0UL
-#define DX_GPR_HOST_VALUE_BIT_SIZE 0x20UL
-#define DX_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
-#define DX_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
-// --------------------------------------
-// BLOCK: HOST_SRAM
-// --------------------------------------
-#define DX_SRAM_DATA_REG_OFFSET 0xF00UL
-#define DX_SRAM_DATA_VALUE_BIT_SHIFT 0x0UL
-#define DX_SRAM_DATA_VALUE_BIT_SIZE 0x20UL
-#define DX_SRAM_ADDR_REG_OFFSET 0xF04UL
-#define DX_SRAM_ADDR_VALUE_BIT_SHIFT 0x0UL
-#define DX_SRAM_ADDR_VALUE_BIT_SIZE 0xFUL
-#define DX_SRAM_DATA_READY_REG_OFFSET 0xF08UL
-#define DX_SRAM_DATA_READY_VALUE_BIT_SHIFT 0x0UL
-#define DX_SRAM_DATA_READY_VALUE_BIT_SIZE 0x1UL
-
-#endif //__DX_HOST_H__
diff --git a/drivers/staging/ccree/dx_reg_common.h b/drivers/staging/ccree/dx_reg_common.h
deleted file mode 100644
index d5132ffaf6e6..000000000000
--- a/drivers/staging/ccree/dx_reg_common.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __DX_REG_COMMON_H__
-#define __DX_REG_COMMON_H__
-
-#define DX_DEV_SIGNATURE 0xDCC71200UL
-
-#define CC_HW_VERSION 0xef840015UL
-
-#define DX_DEV_SHA_MAX 512
-
-#endif /*__DX_REG_COMMON_H__*/
diff --git a/drivers/staging/ccree/hash_defs.h b/drivers/staging/ccree/hash_defs.h
deleted file mode 100644
index f52656f5a3ea..000000000000
--- a/drivers/staging/ccree/hash_defs.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _HASH_DEFS_H_
-#define _HASH_DEFS_H_
-
-#include "cc_crypto_ctx.h"
-
-enum cc_hash_conf_pad {
- HASH_PADDING_DISABLED = 0,
- HASH_PADDING_ENABLED = 1,
- HASH_DIGEST_RESULT_LITTLE_ENDIAN = 2,
- HASH_CONFIG1_PADDING_RESERVE32 = S32_MAX,
-};
-
-enum cc_hash_cipher_pad {
- DO_NOT_PAD = 0,
- DO_PAD = 1,
- HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
-};
-
-#endif /*_HASH_DEFS_H_*/
-
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.h b/drivers/staging/ccree/ssi_buffer_mgr.h
deleted file mode 100644
index 1032f25edcab..000000000000
--- a/drivers/staging/ccree/ssi_buffer_mgr.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file buffer_mgr.h
- * Buffer Manager
- */
-
-#ifndef __SSI_BUFFER_MGR_H__
-#define __SSI_BUFFER_MGR_H__
-
-#include <crypto/algapi.h>
-
-#include "ssi_config.h"
-#include "ssi_driver.h"
-
-enum ssi_req_dma_buf_type {
- SSI_DMA_BUF_NULL = 0,
- SSI_DMA_BUF_DLLI,
- SSI_DMA_BUF_MLLI
-};
-
-enum ssi_sg_cpy_direct {
- SSI_SG_TO_BUF = 0,
- SSI_SG_FROM_BUF = 1
-};
-
-struct ssi_mlli {
- ssi_sram_addr_t sram_addr;
- unsigned int nents; //sg nents
- unsigned int mlli_nents; //mlli nents might be different than the above
-};
-
-struct mlli_params {
- struct dma_pool *curr_pool;
- u8 *mlli_virt_addr;
- dma_addr_t mlli_dma_addr;
- u32 mlli_len;
-};
-
-int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata);
-
-int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata);
-
-int ssi_buffer_mgr_map_blkcipher_request(
- struct ssi_drvdata *drvdata,
- void *ctx,
- unsigned int ivsize,
- unsigned int nbytes,
- void *info,
- struct scatterlist *src,
- struct scatterlist *dst);
-
-void ssi_buffer_mgr_unmap_blkcipher_request(
- struct device *dev,
- void *ctx,
- unsigned int ivsize,
- struct scatterlist *src,
- struct scatterlist *dst);
-
-int ssi_buffer_mgr_map_aead_request(struct ssi_drvdata *drvdata, struct aead_request *req);
-
-void ssi_buffer_mgr_unmap_aead_request(struct device *dev, struct aead_request *req);
-
-int ssi_buffer_mgr_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update);
-
-int ssi_buffer_mgr_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size);
-
-void ssi_buffer_mgr_unmap_hash_request(struct device *dev, void *ctx, struct scatterlist *src, bool do_revert);
-
-void ssi_buffer_mgr_copy_scatterlist_portion(struct device *dev, u8 *dest,
- struct scatterlist *sg,
- u32 to_skip, u32 end,
- enum ssi_sg_cpy_direct direct);
-
-void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len);
-
-#endif /*__BUFFER_MGR_H__*/
-
diff --git a/drivers/staging/ccree/ssi_cipher.h b/drivers/staging/ccree/ssi_cipher.h
deleted file mode 100644
index 25e6335c0d94..000000000000
--- a/drivers/staging/ccree/ssi_cipher.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file ssi_cipher.h
- * ARM CryptoCell Cipher Crypto API
- */
-
-#ifndef __SSI_CIPHER_H__
-#define __SSI_CIPHER_H__
-
-#include <linux/kernel.h>
-#include <crypto/algapi.h>
-#include "ssi_driver.h"
-#include "ssi_buffer_mgr.h"
-
-/* Crypto cipher flags */
-#define CC_CRYPTO_CIPHER_KEY_KFDE0 BIT(0)
-#define CC_CRYPTO_CIPHER_KEY_KFDE1 BIT(1)
-#define CC_CRYPTO_CIPHER_KEY_KFDE2 BIT(2)
-#define CC_CRYPTO_CIPHER_KEY_KFDE3 BIT(3)
-#define CC_CRYPTO_CIPHER_DU_SIZE_512B BIT(4)
-
-#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | CC_CRYPTO_CIPHER_KEY_KFDE1 | CC_CRYPTO_CIPHER_KEY_KFDE2 | CC_CRYPTO_CIPHER_KEY_KFDE3)
-
-struct blkcipher_req_ctx {
- struct async_gen_req_ctx gen_ctx;
- enum ssi_req_dma_buf_type dma_buf_type;
- u32 in_nents;
- u32 in_mlli_nents;
- u32 out_nents;
- u32 out_mlli_nents;
- u8 *backup_info; /*store iv for generated IV flow*/
- u8 *iv;
- bool is_giv;
- struct mlli_params mlli_params;
-};
-
-int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata);
-
-int ssi_ablkcipher_free(struct ssi_drvdata *drvdata);
-
-#ifndef CRYPTO_ALG_BULK_MASK
-
-#define CRYPTO_ALG_BULK_DU_512 0x00002000
-#define CRYPTO_ALG_BULK_DU_4096 0x00004000
-#define CRYPTO_ALG_BULK_MASK (CRYPTO_ALG_BULK_DU_512 |\
- CRYPTO_ALG_BULK_DU_4096)
-#endif /* CRYPTO_ALG_BULK_MASK */
-
-#ifdef CRYPTO_TFM_REQ_HW_KEY
-
-static inline bool ssi_is_hw_key(struct crypto_tfm *tfm)
-{
- return (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_HW_KEY);
-}
-
-#else
-
-struct arm_hw_key_info {
- int hw_key1;
- int hw_key2;
-};
-
-static inline bool ssi_is_hw_key(struct crypto_tfm *tfm)
-{
- return false;
-}
-
-#endif /* CRYPTO_TFM_REQ_HW_KEY */
-
-#endif /*__SSI_CIPHER_H__*/
diff --git a/drivers/staging/ccree/ssi_config.h b/drivers/staging/ccree/ssi_config.h
deleted file mode 100644
index ff7597c2d77e..000000000000
--- a/drivers/staging/ccree/ssi_config.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file ssi_config.h
- * Definitions for ARM CryptoCell Linux Crypto Driver
- */
-
-#ifndef __SSI_CONFIG_H__
-#define __SSI_CONFIG_H__
-
-#include <linux/version.h>
-
-//#define FLUSH_CACHE_ALL
-//#define COMPLETION_DELAY
-//#define DX_DUMP_DESCS
-// #define DX_DUMP_BYTES
-// #define CC_DEBUG
-#define ENABLE_CC_SYSFS /* Enable sysfs interface for debugging REE driver */
-//#define DX_IRQ_DELAY 100000
-#define DMA_BIT_MASK_LEN 48 /* was 32 bit, but for juno's sake it was enlarged to 48 bit */
-
-#endif /*__DX_CONFIG_H__*/
-
diff --git a/drivers/staging/ccree/ssi_driver.h b/drivers/staging/ccree/ssi_driver.h
deleted file mode 100644
index 94c755cafb47..000000000000
--- a/drivers/staging/ccree/ssi_driver.h
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file ssi_driver.h
- * ARM CryptoCell Linux Crypto Driver
- */
-
-#ifndef __SSI_DRIVER_H__
-#define __SSI_DRIVER_H__
-
-#include "ssi_config.h"
-#ifdef COMP_IN_WQ
-#include <linux/workqueue.h>
-#else
-#include <linux/interrupt.h>
-#endif
-#include <linux/dma-mapping.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/aes.h>
-#include <crypto/sha.h>
-#include <crypto/aead.h>
-#include <crypto/authenc.h>
-#include <crypto/hash.h>
-#include <linux/version.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-
-/* Registers definitions from shared/hw/ree_include */
-#include "dx_host.h"
-#include "dx_reg_common.h"
-#define CC_SUPPORT_SHA DX_DEV_SHA_MAX
-#include "cc_crypto_ctx.h"
-#include "ssi_sysfs.h"
-#include "hash_defs.h"
-#include "cc_hw_queue_defs.h"
-#include "ssi_sram_mgr.h"
-
-#define DRV_MODULE_VERSION "3.0"
-
-#define SSI_DEV_NAME_STR "cc715ree"
-#define CC_COHERENT_CACHE_PARAMS 0xEEE
-
-#define SSI_CC_HAS_AES_CCM 1
-#define SSI_CC_HAS_AES_GCM 1
-#define SSI_CC_HAS_AES_XTS 1
-#define SSI_CC_HAS_AES_ESSIV 1
-#define SSI_CC_HAS_AES_BITLOCKER 1
-#define SSI_CC_HAS_AES_CTS 1
-#define SSI_CC_HAS_MULTI2 0
-#define SSI_CC_HAS_CMAC 1
-
-#define SSI_AXI_IRQ_MASK ((1 << DX_AXIM_CFG_BRESPMASK_BIT_SHIFT) | (1 << DX_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
- (1 << DX_AXIM_CFG_INFLTMASK_BIT_SHIFT) | (1 << DX_AXIM_CFG_COMPMASK_BIT_SHIFT))
-
-#define SSI_AXI_ERR_IRQ_MASK BIT(DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
-
-#define SSI_COMP_IRQ_MASK BIT(DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
-
-#define AXIM_MON_COMP_VALUE GENMASK(DX_AXIM_MON_COMP_VALUE_BIT_SIZE + \
- DX_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
- DX_AXIM_MON_COMP_VALUE_BIT_SHIFT)
-
-/* Register name mangling macro */
-#define CC_REG(reg_name) DX_ ## reg_name ## _REG_OFFSET
-
-/* TEE FIPS status interrupt */
-#define SSI_GPR0_IRQ_MASK BIT(DX_HOST_IRR_GPR0_BIT_SHIFT)
-
-#define SSI_CRA_PRIO 3000
-
-#define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
-
-#define MAX_REQUEST_QUEUE_SIZE 4096
-#define MAX_MLLI_BUFF_SIZE 2080
-#define MAX_ICV_NENTS_SUPPORTED 2
-
-/* Definitions for HW descriptors DIN/DOUT fields */
-#define NS_BIT 1
-#define AXI_ID 0
-/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
- * field in the HW descriptor. The DMA engine +8 that value.
- */
-
-#define MIN(a, b) (((a) < (b)) ? (a) : (b))
-#define MAX(a, b) (((a) > (b)) ? (a) : (b))
-
-#define SSI_MAX_IVGEN_DMA_ADDRESSES 3
-struct ssi_crypto_req {
- void (*user_cb)(struct device *dev, void *req, void __iomem *cc_base);
- void *user_arg;
- dma_addr_t ivgen_dma_addr[SSI_MAX_IVGEN_DMA_ADDRESSES];
- /* For the first 'ivgen_dma_addr_len' addresses of this array,
- * generated IV would be placed in it by send_request().
- * Same generated IV for all addresses!
- */
- unsigned int ivgen_dma_addr_len; /* Amount of 'ivgen_dma_addr' elements to be filled. */
- unsigned int ivgen_size; /* The generated IV size required, 8/16 B allowed. */
- struct completion seq_compl; /* request completion */
-};
-
-/**
- * struct ssi_drvdata - driver private data context
- * @cc_base: virt address of the CC registers
- * @irq: device IRQ number
- * @irq_mask: Interrupt mask shadow (1 for masked interrupts)
- * @fw_ver: SeP loaded firmware version
- */
-struct ssi_drvdata {
- void __iomem *cc_base;
- int irq;
- u32 irq_mask;
- u32 fw_ver;
- /* Calibration time of start/stop
- * monitor descriptors
- */
- u32 monitor_null_cycles;
- struct platform_device *plat_dev;
- ssi_sram_addr_t mlli_sram_addr;
- void *buff_mgr_handle;
- void *hash_handle;
- void *aead_handle;
- void *blkcipher_handle;
- void *request_mgr_handle;
- void *fips_handle;
- void *ivgen_handle;
- void *sram_mgr_handle;
- struct clk *clk;
- bool coherent;
-};
-
-struct ssi_crypto_alg {
- struct list_head entry;
- int cipher_mode;
- int flow_mode; /* Note: currently, refers to the cipher mode only. */
- int auth_mode;
- struct ssi_drvdata *drvdata;
- struct crypto_alg crypto_alg;
- struct aead_alg aead_alg;
-};
-
-struct ssi_alg_template {
- char name[CRYPTO_MAX_ALG_NAME];
- char driver_name[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- u32 type;
- union {
- struct ablkcipher_alg ablkcipher;
- struct aead_alg aead;
- struct blkcipher_alg blkcipher;
- struct cipher_alg cipher;
- struct compress_alg compress;
- } template_u;
- int cipher_mode;
- int flow_mode; /* Note: currently, refers to the cipher mode only. */
- int auth_mode;
- struct ssi_drvdata *drvdata;
-};
-
-struct async_gen_req_ctx {
- dma_addr_t iv_dma_addr;
- enum drv_crypto_direction op_type;
-};
-
-static inline struct device *drvdata_to_dev(struct ssi_drvdata *drvdata)
-{
- return &drvdata->plat_dev->dev;
-}
-
-#ifdef DX_DUMP_BYTES
-void dump_byte_array(const char *name, const u8 *the_array, unsigned long size);
-#else
-static inline void dump_byte_array(const char *name, const u8 *the_array,
- unsigned long size) {};
-#endif
-
-int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe);
-void fini_cc_regs(struct ssi_drvdata *drvdata);
-int cc_clk_on(struct ssi_drvdata *drvdata);
-void cc_clk_off(struct ssi_drvdata *drvdata);
-
-static inline void cc_iowrite(struct ssi_drvdata *drvdata, u32 reg, u32 val)
-{
- iowrite32(val, (drvdata->cc_base + reg));
-}
-
-static inline u32 cc_ioread(struct ssi_drvdata *drvdata, u32 reg)
-{
- return ioread32(drvdata->cc_base + reg);
-}
-
-#endif /*__SSI_DRIVER_H__*/
-
diff --git a/drivers/staging/ccree/ssi_fips.h b/drivers/staging/ccree/ssi_fips.h
deleted file mode 100644
index 63bcca7f3af9..000000000000
--- a/drivers/staging/ccree/ssi_fips.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __SSI_FIPS_H__
-#define __SSI_FIPS_H__
-
-#ifdef CONFIG_CRYPTO_FIPS
-
-enum cc_fips_status {
- CC_FIPS_SYNC_MODULE_OK = 0x0,
- CC_FIPS_SYNC_MODULE_ERROR = 0x1,
- CC_FIPS_SYNC_REE_STATUS = 0x4,
- CC_FIPS_SYNC_TEE_STATUS = 0x8,
- CC_FIPS_SYNC_STATUS_RESERVE32B = S32_MAX
-};
-
-int ssi_fips_init(struct ssi_drvdata *p_drvdata);
-void ssi_fips_fini(struct ssi_drvdata *drvdata);
-void fips_handler(struct ssi_drvdata *drvdata);
-void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok);
-
-#else /* CONFIG_CRYPTO_FIPS */
-
-static inline int ssi_fips_init(struct ssi_drvdata *p_drvdata)
-{
- return 0;
-}
-
-static inline void ssi_fips_fini(struct ssi_drvdata *drvdata) {}
-static inline void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok) {}
-static inline void fips_handler(struct ssi_drvdata *drvdata) {}
-
-#endif /* CONFIG_CRYPTO_FIPS */
-
-#endif /*__SSI_FIPS_H__*/
-
diff --git a/drivers/staging/ccree/ssi_hash.h b/drivers/staging/ccree/ssi_hash.h
deleted file mode 100644
index 2400e389d65a..000000000000
--- a/drivers/staging/ccree/ssi_hash.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file ssi_hash.h
- * ARM CryptoCell Hash Crypto API
- */
-
-#ifndef __SSI_HASH_H__
-#define __SSI_HASH_H__
-
-#include "ssi_buffer_mgr.h"
-
-#define HMAC_IPAD_CONST 0x36363636
-#define HMAC_OPAD_CONST 0x5C5C5C5C
-#if (DX_DEV_SHA_MAX > 256)
-#define HASH_LEN_SIZE 16
-#define SSI_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
-#define SSI_MAX_HASH_BLCK_SIZE SHA512_BLOCK_SIZE
-#else
-#define HASH_LEN_SIZE 8
-#define SSI_MAX_HASH_DIGEST_SIZE SHA256_DIGEST_SIZE
-#define SSI_MAX_HASH_BLCK_SIZE SHA256_BLOCK_SIZE
-#endif
-
-#define XCBC_MAC_K1_OFFSET 0
-#define XCBC_MAC_K2_OFFSET 16
-#define XCBC_MAC_K3_OFFSET 32
-
-#define CC_EXPORT_MAGIC 0xC2EE1070U
-
-// this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used for xcbc/cmac statesize
-struct aeshash_state {
- u8 state[AES_BLOCK_SIZE];
- unsigned int count;
- u8 buffer[AES_BLOCK_SIZE];
-};
-
-/* ahash state */
-struct ahash_req_ctx {
- u8 *buff0;
- u8 *buff1;
- u8 *digest_result_buff;
- struct async_gen_req_ctx gen_ctx;
- enum ssi_req_dma_buf_type data_dma_buf_type;
- u8 *digest_buff;
- u8 *opad_digest_buff;
- u8 *digest_bytes_len;
- dma_addr_t opad_digest_dma_addr;
- dma_addr_t digest_buff_dma_addr;
- dma_addr_t digest_bytes_len_dma_addr;
- dma_addr_t digest_result_dma_addr;
- u32 buff0_cnt;
- u32 buff1_cnt;
- u32 buff_index;
- u32 xcbc_count; /* count xcbc update operatations */
- struct scatterlist buff_sg[2];
- struct scatterlist *curr_sg;
- u32 in_nents;
- u32 mlli_nents;
- struct mlli_params mlli_params;
-};
-
-int ssi_hash_alloc(struct ssi_drvdata *drvdata);
-int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata);
-int ssi_hash_free(struct ssi_drvdata *drvdata);
-
-/*!
- * Gets the initial digest length
- *
- * \param drvdata
- * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
- *
- * \return u32 returns the address of the initial digest length in SRAM
- */
-ssi_sram_addr_t
-ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode);
-
-/*!
- * Gets the address of the initial digest in SRAM
- * according to the given hash mode
- *
- * \param drvdata
- * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
- *
- * \return u32 The address of the inital digest in SRAM
- */
-ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode);
-
-#endif /*__SSI_HASH_H__*/
-
diff --git a/drivers/staging/ccree/ssi_ivgen.h b/drivers/staging/ccree/ssi_ivgen.h
deleted file mode 100644
index 961aea411cb3..000000000000
--- a/drivers/staging/ccree/ssi_ivgen.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __SSI_IVGEN_H__
-#define __SSI_IVGEN_H__
-
-#include "cc_hw_queue_defs.h"
-
-#define SSI_IVPOOL_SEQ_LEN 8
-
-/*!
- * Allocates iv-pool and maps resources.
- * This function generates the first IV pool.
- *
- * \param drvdata Driver's private context
- *
- * \return int Zero for success, negative value otherwise.
- */
-int ssi_ivgen_init(struct ssi_drvdata *drvdata);
-
-/*!
- * Free iv-pool and ivgen context.
- *
- * \param drvdata
- */
-void ssi_ivgen_fini(struct ssi_drvdata *drvdata);
-
-/*!
- * Generates the initial pool in SRAM.
- * This function should be invoked when resuming DX driver.
- *
- * \param drvdata
- *
- * \return int Zero for success, negative value otherwise.
- */
-int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata);
-
-/*!
- * Acquires 16 Bytes IV from the iv-pool
- *
- * \param drvdata Driver private context
- * \param iv_out_dma Array of physical IV out addresses
- * \param iv_out_dma_len Length of iv_out_dma array (additional elements of iv_out_dma array are ignore)
- * \param iv_out_size May be 8 or 16 bytes long
- * \param iv_seq IN/OUT array to the descriptors sequence
- * \param iv_seq_len IN/OUT pointer to the sequence length
- *
- * \return int Zero for success, negative value otherwise.
- */
-int ssi_ivgen_getiv(
- struct ssi_drvdata *drvdata,
- dma_addr_t iv_out_dma[],
- unsigned int iv_out_dma_len,
- unsigned int iv_out_size,
- struct cc_hw_desc iv_seq[],
- unsigned int *iv_seq_len);
-
-#endif /*__SSI_IVGEN_H__*/
diff --git a/drivers/staging/ccree/ssi_pm.c b/drivers/staging/ccree/ssi_pm.c
deleted file mode 100644
index 36a498098a70..000000000000
--- a/drivers/staging/ccree/ssi_pm.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "ssi_config.h"
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <crypto/ctr.h>
-#include <linux/pm_runtime.h>
-#include "ssi_driver.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_request_mgr.h"
-#include "ssi_sram_mgr.h"
-#include "ssi_sysfs.h"
-#include "ssi_ivgen.h"
-#include "ssi_hash.h"
-#include "ssi_pm.h"
-
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-
-#define POWER_DOWN_ENABLE 0x01
-#define POWER_DOWN_DISABLE 0x00
-
-int ssi_power_mgr_runtime_suspend(struct device *dev)
-{
- struct ssi_drvdata *drvdata =
- (struct ssi_drvdata *)dev_get_drvdata(dev);
- int rc;
-
- dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
- cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
- rc = ssi_request_mgr_runtime_suspend_queue(drvdata);
- if (rc != 0) {
- dev_err(dev, "ssi_request_mgr_runtime_suspend_queue (%x)\n",
- rc);
- return rc;
- }
- fini_cc_regs(drvdata);
- cc_clk_off(drvdata);
- return 0;
-}
-
-int ssi_power_mgr_runtime_resume(struct device *dev)
-{
- int rc;
- struct ssi_drvdata *drvdata =
- (struct ssi_drvdata *)dev_get_drvdata(dev);
-
- dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
- cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
-
- rc = cc_clk_on(drvdata);
- if (rc) {
- dev_err(dev, "failed getting clock back on. We're toast.\n");
- return rc;
- }
-
- rc = init_cc_regs(drvdata, false);
- if (rc != 0) {
- dev_err(dev, "init_cc_regs (%x)\n", rc);
- return rc;
- }
-
- rc = ssi_request_mgr_runtime_resume_queue(drvdata);
- if (rc != 0) {
- dev_err(dev, "ssi_request_mgr_runtime_resume_queue (%x)\n", rc);
- return rc;
- }
-
- /* must be after the queue resuming as it uses the HW queue*/
- ssi_hash_init_sram_digest_consts(drvdata);
-
- ssi_ivgen_init_sram_pool(drvdata);
- return 0;
-}
-
-int ssi_power_mgr_runtime_get(struct device *dev)
-{
- int rc = 0;
-
- if (ssi_request_mgr_is_queue_runtime_suspend(
- (struct ssi_drvdata *)dev_get_drvdata(dev))) {
- rc = pm_runtime_get_sync(dev);
- } else {
- pm_runtime_get_noresume(dev);
- }
- return rc;
-}
-
-int ssi_power_mgr_runtime_put_suspend(struct device *dev)
-{
- int rc = 0;
-
- if (!ssi_request_mgr_is_queue_runtime_suspend(
- (struct ssi_drvdata *)dev_get_drvdata(dev))) {
- pm_runtime_mark_last_busy(dev);
- rc = pm_runtime_put_autosuspend(dev);
- } else {
- /* Something wrong happens*/
- dev_err(dev, "request to suspend already suspended queue");
- rc = -EBUSY;
- }
- return rc;
-}
-
-#endif
-
-int ssi_power_mgr_init(struct ssi_drvdata *drvdata)
-{
- int rc = 0;
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- struct device *dev = drvdata_to_dev(drvdata);
-
- /* must be before the enabling to avoid resdundent suspending */
- pm_runtime_set_autosuspend_delay(dev, SSI_SUSPEND_TIMEOUT);
- pm_runtime_use_autosuspend(dev);
- /* activate the PM module */
- rc = pm_runtime_set_active(dev);
- if (rc != 0)
- return rc;
- /* enable the PM module*/
- pm_runtime_enable(dev);
-#endif
- return rc;
-}
-
-void ssi_power_mgr_fini(struct ssi_drvdata *drvdata)
-{
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- pm_runtime_disable(drvdata_to_dev(drvdata));
-#endif
-}
diff --git a/drivers/staging/ccree/ssi_pm.h b/drivers/staging/ccree/ssi_pm.h
deleted file mode 100644
index 63673f60d2d8..000000000000
--- a/drivers/staging/ccree/ssi_pm.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file ssi_pm.h
- */
-
-#ifndef __SSI_POWER_MGR_H__
-#define __SSI_POWER_MGR_H__
-
-#include "ssi_config.h"
-#include "ssi_driver.h"
-
-#define SSI_SUSPEND_TIMEOUT 3000
-
-int ssi_power_mgr_init(struct ssi_drvdata *drvdata);
-
-void ssi_power_mgr_fini(struct ssi_drvdata *drvdata);
-
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-int ssi_power_mgr_runtime_suspend(struct device *dev);
-
-int ssi_power_mgr_runtime_resume(struct device *dev);
-
-int ssi_power_mgr_runtime_get(struct device *dev);
-
-int ssi_power_mgr_runtime_put_suspend(struct device *dev);
-#endif
-
-#endif /*__POWER_MGR_H__*/
-
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
deleted file mode 100644
index a8a7dc672d4c..000000000000
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ /dev/null
@@ -1,610 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "ssi_config.h"
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <crypto/ctr.h>
-#ifdef FLUSH_CACHE_ALL
-#include <asm/cacheflush.h>
-#endif
-#include <linux/pm_runtime.h>
-#include "ssi_driver.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_request_mgr.h"
-#include "ssi_sysfs.h"
-#include "ssi_ivgen.h"
-#include "ssi_pm.h"
-
-#define SSI_MAX_POLL_ITER 10
-
-struct ssi_request_mgr_handle {
- /* Request manager resources */
- unsigned int hw_queue_size; /* HW capability */
- unsigned int min_free_hw_slots;
- unsigned int max_used_sw_slots;
- struct ssi_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
- u32 req_queue_head;
- u32 req_queue_tail;
- u32 axi_completed;
- u32 q_free_slots;
- spinlock_t hw_lock;
- struct cc_hw_desc compl_desc;
- u8 *dummy_comp_buff;
- dma_addr_t dummy_comp_buff_dma;
- struct cc_hw_desc monitor_desc;
-
-#ifdef COMP_IN_WQ
- struct workqueue_struct *workq;
- struct delayed_work compwork;
-#else
- struct tasklet_struct comptask;
-#endif
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- bool is_runtime_suspended;
-#endif
-};
-
-static void comp_handler(unsigned long devarg);
-#ifdef COMP_IN_WQ
-static void comp_work_handler(struct work_struct *work);
-#endif
-
-void request_mgr_fini(struct ssi_drvdata *drvdata)
-{
- struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
- struct device *dev = drvdata_to_dev(drvdata);
-
- if (!req_mgr_h)
- return; /* Not allocated */
-
- if (req_mgr_h->dummy_comp_buff_dma != 0) {
- dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
- req_mgr_h->dummy_comp_buff_dma);
- }
-
- dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
- req_mgr_h->min_free_hw_slots));
- dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
-
-#ifdef COMP_IN_WQ
- flush_workqueue(req_mgr_h->workq);
- destroy_workqueue(req_mgr_h->workq);
-#else
- /* Kill tasklet */
- tasklet_kill(&req_mgr_h->comptask);
-#endif
- memset(req_mgr_h, 0, sizeof(struct ssi_request_mgr_handle));
- kfree(req_mgr_h);
- drvdata->request_mgr_handle = NULL;
-}
-
-int request_mgr_init(struct ssi_drvdata *drvdata)
-{
- struct ssi_request_mgr_handle *req_mgr_h;
- struct device *dev = drvdata_to_dev(drvdata);
- int rc = 0;
-
- req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
- if (!req_mgr_h) {
- rc = -ENOMEM;
- goto req_mgr_init_err;
- }
-
- drvdata->request_mgr_handle = req_mgr_h;
-
- spin_lock_init(&req_mgr_h->hw_lock);
-#ifdef COMP_IN_WQ
- dev_dbg(dev, "Initializing completion workqueue\n");
- req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
- if (unlikely(!req_mgr_h->workq)) {
- dev_err(dev, "Failed creating work queue\n");
- rc = -ENOMEM;
- goto req_mgr_init_err;
- }
- INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
-#else
- dev_dbg(dev, "Initializing completion tasklet\n");
- tasklet_init(&req_mgr_h->comptask, comp_handler, (unsigned long)drvdata);
-#endif
- req_mgr_h->hw_queue_size = cc_ioread(drvdata,
- CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
- dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
- if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
- dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
- req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
- rc = -ENOMEM;
- goto req_mgr_init_err;
- }
- req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
- req_mgr_h->max_used_sw_slots = 0;
-
- /* Allocate DMA word for "dummy" completion descriptor use */
- req_mgr_h->dummy_comp_buff = dma_alloc_coherent(dev, sizeof(u32),
- &req_mgr_h->dummy_comp_buff_dma,
- GFP_KERNEL);
- if (!req_mgr_h->dummy_comp_buff) {
- dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
- sizeof(u32));
- rc = -ENOMEM;
- goto req_mgr_init_err;
- }
-
- /* Init. "dummy" completion descriptor */
- hw_desc_init(&req_mgr_h->compl_desc);
- set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
- set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
- sizeof(u32), NS_BIT, 1);
- set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
- set_queue_last_ind(&req_mgr_h->compl_desc);
-
- return 0;
-
-req_mgr_init_err:
- request_mgr_fini(drvdata);
- return rc;
-}
-
-static inline void enqueue_seq(
- void __iomem *cc_base,
- struct cc_hw_desc seq[], unsigned int seq_len)
-{
- int i;
-
- for (i = 0; i < seq_len; i++) {
- writel_relaxed(seq[i].word[0], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[1], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[2], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[3], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[4], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
- wmb();
- writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
-#ifdef DX_DUMP_DESCS
- dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
- i, seq[i].word[0], seq[i].word[1], seq[i].word[2],
- seq[i].word[3], seq[i].word[4], seq[i].word[5]);
-#endif
- }
-}
-
-/*!
- * Completion will take place if and only if user requested completion
- * by setting "is_dout = 0" in send_request().
- *
- * \param dev
- * \param dx_compl_h The completion event to signal
- */
-static void request_mgr_complete(struct device *dev, void *dx_compl_h, void __iomem *cc_base)
-{
- struct completion *this_compl = dx_compl_h;
-
- complete(this_compl);
-}
-
-static inline int request_mgr_queues_status_check(
- struct ssi_drvdata *drvdata,
- struct ssi_request_mgr_handle *req_mgr_h,
- unsigned int total_seq_len)
-{
- unsigned long poll_queue;
- struct device *dev = drvdata_to_dev(drvdata);
-
- /* SW queue is checked only once as it will not
- * be chaned during the poll becasue the spinlock_bh
- * is held by the thread
- */
- if (unlikely(((req_mgr_h->req_queue_head + 1) &
- (MAX_REQUEST_QUEUE_SIZE - 1)) ==
- req_mgr_h->req_queue_tail)) {
- dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
- req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
- return -EBUSY;
- }
-
- if ((likely(req_mgr_h->q_free_slots >= total_seq_len)))
- return 0;
-
- /* Wait for space in HW queue. Poll constant num of iterations. */
- for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) {
- req_mgr_h->q_free_slots =
- cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
- if (unlikely(req_mgr_h->q_free_slots <
- req_mgr_h->min_free_hw_slots)) {
- req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
- }
-
- if (likely(req_mgr_h->q_free_slots >= total_seq_len)) {
- /* If there is enough place return */
- return 0;
- }
-
- dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
- req_mgr_h->q_free_slots, total_seq_len);
- }
- /* No room in the HW queue try again later */
- dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
- req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
- req_mgr_h->q_free_slots, total_seq_len);
- return -EAGAIN;
-}
-
-/*!
- * Enqueue caller request to crypto hardware.
- *
- * \param drvdata
- * \param ssi_req The request to enqueue
- * \param desc The crypto sequence
- * \param len The crypto sequence length
- * \param is_dout If "true": completion is handled by the caller
- * If "false": this function adds a dummy descriptor completion
- * and waits upon completion signal.
- *
- * \return int Returns -EINPROGRESS if "is_dout=true"; "0" if "is_dout=false"
- */
-int send_request(
- struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req,
- struct cc_hw_desc *desc, unsigned int len, bool is_dout)
-{
- void __iomem *cc_base = drvdata->cc_base;
- struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
- unsigned int used_sw_slots;
- unsigned int iv_seq_len = 0;
- unsigned int total_seq_len = len; /*initial sequence length*/
- struct cc_hw_desc iv_seq[SSI_IVPOOL_SEQ_LEN];
- struct device *dev = drvdata_to_dev(drvdata);
- int rc;
- unsigned int max_required_seq_len = (total_seq_len +
- ((ssi_req->ivgen_dma_addr_len == 0) ? 0 :
- SSI_IVPOOL_SEQ_LEN) +
- (!is_dout ? 1 : 0));
-
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- rc = ssi_power_mgr_runtime_get(dev);
- if (rc != 0) {
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
- return rc;
- }
-#endif
-
- do {
- spin_lock_bh(&req_mgr_h->hw_lock);
-
- /* Check if there is enough place in the SW/HW queues
- * in case iv gen add the max size and in case of no dout add 1
- * for the internal completion descriptor
- */
- rc = request_mgr_queues_status_check(drvdata, req_mgr_h,
- max_required_seq_len);
- if (likely(rc == 0))
- /* There is enough place in the queue */
- break;
- /* something wrong release the spinlock*/
- spin_unlock_bh(&req_mgr_h->hw_lock);
-
- if (rc != -EAGAIN) {
- /* Any error other than HW queue full
- * (SW queue is full)
- */
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- ssi_power_mgr_runtime_put_suspend(dev);
-#endif
- return rc;
- }
-
- /* HW queue is full - short sleep */
- msleep(1);
- } while (1);
-
- /* Additional completion descriptor is needed incase caller did not
- * enabled any DLLI/MLLI DOUT bit in the given sequence
- */
- if (!is_dout) {
- init_completion(&ssi_req->seq_compl);
- ssi_req->user_cb = request_mgr_complete;
- ssi_req->user_arg = &ssi_req->seq_compl;
- total_seq_len++;
- }
-
- if (ssi_req->ivgen_dma_addr_len > 0) {
- dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
- ssi_req->ivgen_dma_addr_len,
- &ssi_req->ivgen_dma_addr[0],
- &ssi_req->ivgen_dma_addr[1],
- &ssi_req->ivgen_dma_addr[2],
- ssi_req->ivgen_size);
-
- /* Acquire IV from pool */
- rc = ssi_ivgen_getiv(drvdata, ssi_req->ivgen_dma_addr,
- ssi_req->ivgen_dma_addr_len,
- ssi_req->ivgen_size, iv_seq, &iv_seq_len);
-
- if (unlikely(rc != 0)) {
- dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
- spin_unlock_bh(&req_mgr_h->hw_lock);
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- ssi_power_mgr_runtime_put_suspend(dev);
-#endif
- return rc;
- }
-
- total_seq_len += iv_seq_len;
- }
-
- used_sw_slots = ((req_mgr_h->req_queue_head - req_mgr_h->req_queue_tail) & (MAX_REQUEST_QUEUE_SIZE - 1));
- if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots))
- req_mgr_h->max_used_sw_slots = used_sw_slots;
-
- /* Enqueue request - must be locked with HW lock*/
- req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *ssi_req;
- req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
- /* TODO: Use circ_buf.h ? */
-
- dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
-
-#ifdef FLUSH_CACHE_ALL
- flush_cache_all();
-#endif
-
- /* STAT_PHASE_4: Push sequence */
- enqueue_seq(cc_base, iv_seq, iv_seq_len);
- enqueue_seq(cc_base, desc, len);
- enqueue_seq(cc_base, &req_mgr_h->compl_desc, (is_dout ? 0 : 1));
-
- if (unlikely(req_mgr_h->q_free_slots < total_seq_len)) {
- /* This situation should never occur. Maybe indicating problem
- * with resuming power. Set the free slot count to 0 and hope
- * for the best.
- */
- dev_err(dev, "HW free slot count mismatch.");
- req_mgr_h->q_free_slots = 0;
- } else {
- /* Update the free slots in HW queue */
- req_mgr_h->q_free_slots -= total_seq_len;
- }
-
- spin_unlock_bh(&req_mgr_h->hw_lock);
-
- if (!is_dout) {
- /* Wait upon sequence completion.
- * Return "0" -Operation done successfully.
- */
- wait_for_completion(&ssi_req->seq_compl);
- return 0;
- }
- /* Operation still in process */
- return -EINPROGRESS;
-}
-
-/*!
- * Enqueue caller request to crypto hardware during init process.
- * assume this function is not called in middle of a flow,
- * since we set QUEUE_LAST_IND flag in the last descriptor.
- *
- * \param drvdata
- * \param desc The crypto sequence
- * \param len The crypto sequence length
- *
- * \return int Returns "0" upon success
- */
-int send_request_init(
- struct ssi_drvdata *drvdata, struct cc_hw_desc *desc, unsigned int len)
-{
- void __iomem *cc_base = drvdata->cc_base;
- struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
- unsigned int total_seq_len = len; /*initial sequence length*/
- int rc = 0;
-
- /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. */
- rc = request_mgr_queues_status_check(drvdata, req_mgr_h,
- total_seq_len);
- if (unlikely(rc != 0))
- return rc;
-
- set_queue_last_ind(&desc[(len - 1)]);
-
- enqueue_seq(cc_base, desc, len);
-
- /* Update the free slots in HW queue */
- req_mgr_h->q_free_slots =
- cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
-
- return 0;
-}
-
-void complete_request(struct ssi_drvdata *drvdata)
-{
- struct ssi_request_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-#ifdef COMP_IN_WQ
- queue_delayed_work(request_mgr_handle->workq, &request_mgr_handle->compwork, 0);
-#else
- tasklet_schedule(&request_mgr_handle->comptask);
-#endif
-}
-
-#ifdef COMP_IN_WQ
-static void comp_work_handler(struct work_struct *work)
-{
- struct ssi_drvdata *drvdata =
- container_of(work, struct ssi_drvdata, compwork.work);
-
- comp_handler((unsigned long)drvdata);
-}
-#endif
-
-static void proc_completions(struct ssi_drvdata *drvdata)
-{
- struct ssi_crypto_req *ssi_req;
- struct device *dev = drvdata_to_dev(drvdata);
- struct ssi_request_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- int rc = 0;
-#endif
-
- while (request_mgr_handle->axi_completed) {
- request_mgr_handle->axi_completed--;
-
- /* Dequeue request */
- if (unlikely(request_mgr_handle->req_queue_head == request_mgr_handle->req_queue_tail)) {
- /* We are supposed to handle a completion but our
- * queue is empty. This is not normal. Return and
- * hope for the best.
- */
- dev_err(dev, "Request queue is empty head == tail %u\n",
- request_mgr_handle->req_queue_head);
- break;
- }
-
- ssi_req = &request_mgr_handle->req_queue[request_mgr_handle->req_queue_tail];
-
-#ifdef FLUSH_CACHE_ALL
- flush_cache_all();
-#endif
-
-#ifdef COMPLETION_DELAY
- /* Delay */
- {
- u32 axi_err;
- int i;
-
- dev_info(dev, "Delay\n");
- for (i = 0; i < 1000000; i++)
- axi_err = cc_ioread(drvdata,
- CC_REG(AXIM_MON_ERR));
- }
-#endif /* COMPLETION_DELAY */
-
- if (likely(ssi_req->user_cb))
- ssi_req->user_cb(dev, ssi_req->user_arg,
- drvdata->cc_base);
- request_mgr_handle->req_queue_tail = (request_mgr_handle->req_queue_tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
- dev_dbg(dev, "Dequeue request tail=%u\n",
- request_mgr_handle->req_queue_tail);
- dev_dbg(dev, "Request completed. axi_completed=%d\n",
- request_mgr_handle->axi_completed);
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- rc = ssi_power_mgr_runtime_put_suspend(dev);
- if (rc != 0)
- dev_err(dev, "Failed to set runtime suspension %d\n",
- rc);
-#endif
- }
-}
-
-static inline u32 cc_axi_comp_count(struct ssi_drvdata *drvdata)
-{
- return FIELD_GET(AXIM_MON_COMP_VALUE,
- cc_ioread(drvdata, CC_REG(AXIM_MON_COMP)));
-}
-
-/* Deferred service handler, run as interrupt-fired tasklet */
-static void comp_handler(unsigned long devarg)
-{
- struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
- struct ssi_request_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- u32 irq;
-
- irq = (drvdata->irq & SSI_COMP_IRQ_MASK);
-
- if (irq & SSI_COMP_IRQ_MASK) {
- /* To avoid the interrupt from firing as we unmask it,
- * we clear it now
- */
- cc_iowrite(drvdata, CC_REG(HOST_ICR), SSI_COMP_IRQ_MASK);
-
- /* Avoid race with above clear: Test completion counter
- * once more
- */
- request_mgr_handle->axi_completed +=
- cc_axi_comp_count(drvdata);
-
- while (request_mgr_handle->axi_completed) {
- do {
- proc_completions(drvdata);
- /* At this point (after proc_completions()),
- * request_mgr_handle->axi_completed is 0.
- */
- request_mgr_handle->axi_completed =
- cc_axi_comp_count(drvdata);
- } while (request_mgr_handle->axi_completed > 0);
-
- cc_iowrite(drvdata, CC_REG(HOST_ICR),
- SSI_COMP_IRQ_MASK);
-
- request_mgr_handle->axi_completed +=
- cc_axi_comp_count(drvdata);
- }
- }
- /* after verifing that there is nothing to do,
- * unmask AXI completion interrupt
- */
- cc_iowrite(drvdata, CC_REG(HOST_IMR),
- cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
-}
-
-/*
- * resume the queue configuration - no need to take the lock as this happens inside
- * the spin lock protection
- */
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata)
-{
- struct ssi_request_mgr_handle *request_mgr_handle = drvdata->request_mgr_handle;
-
- spin_lock_bh(&request_mgr_handle->hw_lock);
- request_mgr_handle->is_runtime_suspended = false;
- spin_unlock_bh(&request_mgr_handle->hw_lock);
-
- return 0;
-}
-
-/*
- * suspend the queue configuration. Since it is used for the runtime suspend
- * only verify that the queue can be suspended.
- */
-int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata)
-{
- struct ssi_request_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- /* lock the send_request */
- spin_lock_bh(&request_mgr_handle->hw_lock);
- if (request_mgr_handle->req_queue_head !=
- request_mgr_handle->req_queue_tail) {
- spin_unlock_bh(&request_mgr_handle->hw_lock);
- return -EBUSY;
- }
- request_mgr_handle->is_runtime_suspended = true;
- spin_unlock_bh(&request_mgr_handle->hw_lock);
-
- return 0;
-}
-
-bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata)
-{
- struct ssi_request_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- return request_mgr_handle->is_runtime_suspended;
-}
-
-#endif
-
diff --git a/drivers/staging/ccree/ssi_request_mgr.h b/drivers/staging/ccree/ssi_request_mgr.h
deleted file mode 100644
index bdbbf89e5367..000000000000
--- a/drivers/staging/ccree/ssi_request_mgr.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file request_mgr.h
- * Request Manager
- */
-
-#ifndef __REQUEST_MGR_H__
-#define __REQUEST_MGR_H__
-
-#include "cc_hw_queue_defs.h"
-
-int request_mgr_init(struct ssi_drvdata *drvdata);
-
-/*!
- * Enqueue caller request to crypto hardware.
- *
- * \param drvdata
- * \param ssi_req The request to enqueue
- * \param desc The crypto sequence
- * \param len The crypto sequence length
- * \param is_dout If "true": completion is handled by the caller
- * If "false": this function adds a dummy descriptor completion
- * and waits upon completion signal.
- *
- * \return int Returns -EINPROGRESS if "is_dout=ture"; "0" if "is_dout=false"
- */
-int send_request(
- struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req,
- struct cc_hw_desc *desc, unsigned int len, bool is_dout);
-
-int send_request_init(
- struct ssi_drvdata *drvdata, struct cc_hw_desc *desc, unsigned int len);
-
-void complete_request(struct ssi_drvdata *drvdata);
-
-void request_mgr_fini(struct ssi_drvdata *drvdata);
-
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata);
-
-int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata);
-
-bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata);
-#endif
-
-#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/staging/ccree/ssi_sram_mgr.h b/drivers/staging/ccree/ssi_sram_mgr.h
deleted file mode 100644
index 9ba1d59a0bae..000000000000
--- a/drivers/staging/ccree/ssi_sram_mgr.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __SSI_SRAM_MGR_H__
-#define __SSI_SRAM_MGR_H__
-
-#ifndef SSI_CC_SRAM_SIZE
-#define SSI_CC_SRAM_SIZE 4096
-#endif
-
-struct ssi_drvdata;
-
-/**
- * Address (offset) within CC internal SRAM
- */
-
-typedef u64 ssi_sram_addr_t;
-
-#define NULL_SRAM_ADDR ((ssi_sram_addr_t)-1)
-
-/*!
- * Initializes SRAM pool.
- * The first X bytes of SRAM are reserved for ROM usage, hence, pool
- * starts right after X bytes.
- *
- * \param drvdata
- *
- * \return int Zero for success, negative value otherwise.
- */
-int ssi_sram_mgr_init(struct ssi_drvdata *drvdata);
-
-/*!
- * Uninits SRAM pool.
- *
- * \param drvdata
- */
-void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata);
-
-/*!
- * Allocated buffer from SRAM pool.
- * Note: Caller is responsible to free the LAST allocated buffer.
- * This function does not taking care of any fragmentation may occur
- * by the order of calls to alloc/free.
- *
- * \param drvdata
- * \param size The requested bytes to allocate
- */
-ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size);
-
-/**
- * ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
- * set values in given array into SRAM.
- * Note: each const value can't exceed word size.
- *
- * @src: A pointer to array of words to set as consts.
- * @dst: The target SRAM buffer to set into
- * @nelements: The number of words in "src" array
- * @seq: A pointer to the given IN/OUT descriptor sequence
- * @seq_len: A pointer to the given IN/OUT sequence length
- */
-void ssi_sram_mgr_const2sram_desc(
- const u32 *src, ssi_sram_addr_t dst,
- unsigned int nelement,
- struct cc_hw_desc *seq, unsigned int *seq_len);
-
-#endif /*__SSI_SRAM_MGR_H__*/
diff --git a/drivers/staging/ccree/ssi_sysfs.c b/drivers/staging/ccree/ssi_sysfs.c
deleted file mode 100644
index 5d39f15cdb59..000000000000
--- a/drivers/staging/ccree/ssi_sysfs.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/kernel.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "cc_crypto_ctx.h"
-#include "ssi_sysfs.h"
-
-#ifdef ENABLE_CC_SYSFS
-
-static struct ssi_drvdata *sys_get_drvdata(void);
-
-static ssize_t ssi_sys_regdump_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct ssi_drvdata *drvdata = sys_get_drvdata();
- u32 register_value;
- int offset = 0;
-
- register_value = cc_ioread(drvdata, CC_REG(HOST_SIGNATURE));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_SIGNATURE ", DX_HOST_SIGNATURE_REG_OFFSET, register_value);
- register_value = cc_ioread(drvdata, CC_REG(HOST_IRR));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_IRR ", DX_HOST_IRR_REG_OFFSET, register_value);
- register_value = cc_ioread(drvdata, CC_REG(HOST_POWER_DOWN_EN));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_POWER_DOWN_EN ", DX_HOST_POWER_DOWN_EN_REG_OFFSET, register_value);
- register_value = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "AXIM_MON_ERR ", DX_AXIM_MON_ERR_REG_OFFSET, register_value);
- register_value = cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "DSCRPTR_QUEUE_CONTENT", DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET, register_value);
- return offset;
-}
-
-static ssize_t ssi_sys_help_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- char *help_str[] = {
- "cat reg_dump ", "Print several of CC register values",
- };
- int i = 0, offset = 0;
-
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "Usage:\n");
- for (i = 0; i < ARRAY_SIZE(help_str); i += 2)
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s\t\t%s\n", help_str[i], help_str[i + 1]);
-
- return offset;
-}
-
-/********************************************************
- * SYSFS objects *
- ********************************************************/
-/*
- * Structure used to create a directory
- * and its attributes in sysfs.
- */
-struct sys_dir {
- struct kobject *sys_dir_kobj;
- struct attribute_group sys_dir_attr_group;
- struct attribute **sys_dir_attr_list;
- u32 num_of_attrs;
- struct ssi_drvdata *drvdata; /* Associated driver context */
-};
-
-/* top level directory structures */
-static struct sys_dir sys_top_dir;
-
-/* TOP LEVEL ATTRIBUTES */
-static struct kobj_attribute ssi_sys_top_level_attrs[] = {
- __ATTR(dump_regs, 0444, ssi_sys_regdump_show, NULL),
- __ATTR(help, 0444, ssi_sys_help_show, NULL),
-#if defined CC_CYCLE_COUNT
- __ATTR(stats_host, 0664, ssi_sys_stat_host_db_show, ssi_sys_stats_host_db_clear),
- __ATTR(stats_cc, 0664, ssi_sys_stat_cc_db_show, ssi_sys_stats_cc_db_clear),
-#endif
-
-};
-
-static struct ssi_drvdata *sys_get_drvdata(void)
-{
- /* TODO: supporting multiple SeP devices would require avoiding
- * global "top_dir" and finding associated "top_dir" by traversing
- * up the tree to the kobject which matches one of the top_dir's
- */
- return sys_top_dir.drvdata;
-}
-
-static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
- struct kobject *parent_dir_kobj, const char *dir_name,
- struct kobj_attribute *attrs, u32 num_of_attrs)
-{
- int i;
-
- memset(sys_dir, 0, sizeof(struct sys_dir));
-
- sys_dir->drvdata = drvdata;
-
- /* initialize directory kobject */
- sys_dir->sys_dir_kobj =
- kobject_create_and_add(dir_name, parent_dir_kobj);
-
- if (!(sys_dir->sys_dir_kobj))
- return -ENOMEM;
- /* allocate memory for directory's attributes list */
- sys_dir->sys_dir_attr_list =
- kcalloc(num_of_attrs + 1, sizeof(struct attribute *),
- GFP_KERNEL);
-
- if (!(sys_dir->sys_dir_attr_list)) {
- kobject_put(sys_dir->sys_dir_kobj);
- return -ENOMEM;
- }
-
- sys_dir->num_of_attrs = num_of_attrs;
-
- /* initialize attributes list */
- for (i = 0; i < num_of_attrs; ++i)
- sys_dir->sys_dir_attr_list[i] = &attrs[i].attr;
-
- /* last list entry should be NULL */
- sys_dir->sys_dir_attr_list[num_of_attrs] = NULL;
-
- sys_dir->sys_dir_attr_group.attrs = sys_dir->sys_dir_attr_list;
-
- return sysfs_create_group(sys_dir->sys_dir_kobj,
- &sys_dir->sys_dir_attr_group);
-}
-
-static void sys_free_dir(struct sys_dir *sys_dir)
-{
- if (!sys_dir)
- return;
-
- kfree(sys_dir->sys_dir_attr_list);
-
- if (sys_dir->sys_dir_kobj)
- kobject_put(sys_dir->sys_dir_kobj);
-}
-
-int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata)
-{
- int retval;
- struct device *dev = drvdata_to_dev(drvdata);
-
- dev_info(dev, "setup sysfs under %s\n", sys_dev_obj->name);
-
- /* Initialize top directory */
- retval = sys_init_dir(&sys_top_dir, drvdata, sys_dev_obj, "cc_info",
- ssi_sys_top_level_attrs,
- ARRAY_SIZE(ssi_sys_top_level_attrs));
- return retval;
-}
-
-void ssi_sysfs_fini(void)
-{
- sys_free_dir(&sys_top_dir);
-}
-
-#endif /*ENABLE_CC_SYSFS*/
-
diff --git a/drivers/staging/ccree/ssi_sysfs.h b/drivers/staging/ccree/ssi_sysfs.h
deleted file mode 100644
index 44ae3d4c40b3..000000000000
--- a/drivers/staging/ccree/ssi_sysfs.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file ssi_sysfs.h
- * ARM CryptoCell sysfs APIs
- */
-
-#ifndef __SSI_SYSFS_H__
-#define __SSI_SYSFS_H__
-
-#include <asm/timex.h>
-
-/* forward declaration */
-struct ssi_drvdata;
-
-enum stat_phase {
- STAT_PHASE_0 = 0,
- STAT_PHASE_1,
- STAT_PHASE_2,
- STAT_PHASE_3,
- STAT_PHASE_4,
- STAT_PHASE_5,
- STAT_PHASE_6,
- MAX_STAT_PHASES,
-};
-
-enum stat_op {
- STAT_OP_TYPE_NULL = 0,
- STAT_OP_TYPE_ENCODE,
- STAT_OP_TYPE_DECODE,
- STAT_OP_TYPE_SETKEY,
- STAT_OP_TYPE_GENERIC,
- MAX_STAT_OP_TYPES,
-};
-
-int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata);
-void ssi_sysfs_fini(void);
-void update_host_stat(unsigned int op_type, unsigned int phase, cycles_t result);
-void update_cc_stat(unsigned int op_type, unsigned int phase, unsigned int elapsed_cycles);
-void display_all_stat_db(void);
-
-#endif /*__SSI_SYSFS_H__*/