summaryrefslogtreecommitdiffstats
path: root/drivers/staging/ccree
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/ccree')
-rw-r--r--drivers/staging/ccree/Kconfig9
-rw-r--r--drivers/staging/ccree/Makefile2
-rw-r--r--drivers/staging/ccree/cc_hw_queue_defs.h3
-rw-r--r--drivers/staging/ccree/ssi_aead.c246
-rw-r--r--drivers/staging/ccree/ssi_aead.h12
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.c473
-rw-r--r--drivers/staging/ccree/ssi_cipher.c185
-rw-r--r--drivers/staging/ccree/ssi_driver.c64
-rw-r--r--drivers/staging/ccree/ssi_driver.h1
-rw-r--r--drivers/staging/ccree/ssi_fips.c119
-rw-r--r--drivers/staging/ccree/ssi_fips.h58
-rw-r--r--drivers/staging/ccree/ssi_fips_data.h306
-rw-r--r--drivers/staging/ccree/ssi_fips_ext.c92
-rw-r--r--drivers/staging/ccree/ssi_fips_ll.c1649
-rw-r--r--drivers/staging/ccree/ssi_fips_local.c357
-rw-r--r--drivers/staging/ccree/ssi_fips_local.h67
-rw-r--r--drivers/staging/ccree/ssi_hash.c276
-rw-r--r--drivers/staging/ccree/ssi_ivgen.c13
-rw-r--r--drivers/staging/ccree/ssi_pm.c4
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.c54
-rw-r--r--drivers/staging/ccree/ssi_sram_mgr.c6
-rw-r--r--drivers/staging/ccree/ssi_sysfs.c80
22 files changed, 849 insertions, 3227 deletions
diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig
index 36a87c686a2a..0b3092ba2fcb 100644
--- a/drivers/staging/ccree/Kconfig
+++ b/drivers/staging/ccree/Kconfig
@@ -23,12 +23,3 @@ config CRYPTO_DEV_CCREE
Choose this if you wish to use hardware acceleration of
cryptographic operations on the system REE.
If unsure say Y.
-
-config CCREE_FIPS_SUPPORT
- bool "Turn on CryptoCell 7XX REE FIPS mode support"
- depends on CRYPTO_DEV_CCREE
- default n
- help
- Say 'Y' to enable support for FIPS compliant mode by the
- CCREE driver.
- If unsure say N.
diff --git a/drivers/staging/ccree/Makefile b/drivers/staging/ccree/Makefile
index 318c2b39acf6..ae702f3b5369 100644
--- a/drivers/staging/ccree/Makefile
+++ b/drivers/staging/ccree/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
ccree-y := ssi_driver.o ssi_sysfs.o ssi_buffer_mgr.o ssi_request_mgr.o ssi_cipher.o ssi_hash.o ssi_aead.o ssi_ivgen.o ssi_sram_mgr.o ssi_pm.o
-ccree-$(CCREE_FIPS_SUPPORT) += ssi_fips.o ssi_fips_ll.o ssi_fips_ext.o ssi_fips_local.o
+ccree-$(CONFIG_CRYPTO_FIPS) += ssi_fips.o
diff --git a/drivers/staging/ccree/cc_hw_queue_defs.h b/drivers/staging/ccree/cc_hw_queue_defs.h
index e6b8cea3f88d..2ae0f655e7a0 100644
--- a/drivers/staging/ccree/cc_hw_queue_defs.h
+++ b/drivers/staging/ccree/cc_hw_queue_defs.h
@@ -27,7 +27,8 @@
******************************************************************************/
#define HW_DESC_SIZE_WORDS 6
-#define HW_QUEUE_SLOTS_MAX 15 /* Max. available slots in HW queue */
+/* Define max. available slots in HW queue */
+#define HW_QUEUE_SLOTS_MAX 15
#define CC_REG_NAME(word, name) DX_DSCRPTR_QUEUE_WORD ## word ## _ ## name
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
index 1fc0b05ea0d5..5abe6b24ff8c 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -36,7 +36,6 @@
#include "ssi_hash.h"
#include "ssi_sysfs.h"
#include "ssi_sram_mgr.h"
-#include "ssi_fips_local.h"
#define template_aead template_u.aead
@@ -57,22 +56,26 @@ struct ssi_aead_handle {
struct list_head aead_list;
};
+struct cc_hmac_s {
+ u8 *padded_authkey;
+ u8 *ipad_opad; /* IPAD, OPAD*/
+ dma_addr_t padded_authkey_dma_addr;
+ dma_addr_t ipad_opad_dma_addr;
+};
+
+struct cc_xcbc_s {
+ u8 *xcbc_keys; /* K1,K2,K3 */
+ dma_addr_t xcbc_keys_dma_addr;
+};
+
struct ssi_aead_ctx {
struct ssi_drvdata *drvdata;
u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
u8 *enckey;
dma_addr_t enckey_dma_addr;
union {
- struct {
- u8 *padded_authkey;
- u8 *ipad_opad; /* IPAD, OPAD*/
- dma_addr_t padded_authkey_dma_addr;
- dma_addr_t ipad_opad_dma_addr;
- } hmac;
- struct {
- u8 *xcbc_keys; /* K1,K2,K3 */
- dma_addr_t xcbc_keys_dma_addr;
- } xcbc;
+ struct cc_hmac_s hmac;
+ struct cc_xcbc_s xcbc;
} auth_state;
unsigned int enc_keylen;
unsigned int auth_keylen;
@@ -93,46 +96,50 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
SSI_LOG_DEBUG("Clearing context @%p for %s\n",
- crypto_aead_ctx(tfm), crypto_tfm_alg_name(&(tfm->base)));
+ crypto_aead_ctx(tfm), crypto_tfm_alg_name(&tfm->base));
dev = &ctx->drvdata->plat_dev->dev;
/* Unmap enckey buffer */
if (ctx->enckey) {
dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
- SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=0x%llX\n",
- (unsigned long long)ctx->enckey_dma_addr);
+ SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=%pad\n",
+ ctx->enckey_dma_addr);
ctx->enckey_dma_addr = 0;
ctx->enckey = NULL;
}
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
- if (ctx->auth_state.xcbc.xcbc_keys) {
+ struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
+
+ if (xcbc->xcbc_keys) {
dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
- ctx->auth_state.xcbc.xcbc_keys,
- ctx->auth_state.xcbc.xcbc_keys_dma_addr);
+ xcbc->xcbc_keys,
+ xcbc->xcbc_keys_dma_addr);
}
- SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=0x%llX\n",
- (unsigned long long)ctx->auth_state.xcbc.xcbc_keys_dma_addr);
- ctx->auth_state.xcbc.xcbc_keys_dma_addr = 0;
- ctx->auth_state.xcbc.xcbc_keys = NULL;
+ SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
+ xcbc->xcbc_keys_dma_addr);
+ xcbc->xcbc_keys_dma_addr = 0;
+ xcbc->xcbc_keys = NULL;
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
- if (ctx->auth_state.hmac.ipad_opad) {
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+
+ if (hmac->ipad_opad) {
dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
- ctx->auth_state.hmac.ipad_opad,
- ctx->auth_state.hmac.ipad_opad_dma_addr);
- SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=0x%llX\n",
- (unsigned long long)ctx->auth_state.hmac.ipad_opad_dma_addr);
- ctx->auth_state.hmac.ipad_opad_dma_addr = 0;
- ctx->auth_state.hmac.ipad_opad = NULL;
+ hmac->ipad_opad,
+ hmac->ipad_opad_dma_addr);
+ SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
+ hmac->ipad_opad_dma_addr);
+ hmac->ipad_opad_dma_addr = 0;
+ hmac->ipad_opad = NULL;
}
- if (ctx->auth_state.hmac.padded_authkey) {
+ if (hmac->padded_authkey) {
dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
- ctx->auth_state.hmac.padded_authkey,
- ctx->auth_state.hmac.padded_authkey_dma_addr);
- SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=0x%llX\n",
- (unsigned long long)ctx->auth_state.hmac.padded_authkey_dma_addr);
- ctx->auth_state.hmac.padded_authkey_dma_addr = 0;
- ctx->auth_state.hmac.padded_authkey = NULL;
+ hmac->padded_authkey,
+ hmac->padded_authkey_dma_addr);
+ SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
+ hmac->padded_authkey_dma_addr);
+ hmac->padded_authkey_dma_addr = 0;
+ hmac->padded_authkey = NULL;
}
}
}
@@ -144,9 +151,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct ssi_crypto_alg *ssi_alg =
container_of(alg, struct ssi_crypto_alg, aead_alg);
- SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&(tfm->base)));
-
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&tfm->base));
/* Initialize modes in instance */
ctx->cipher_mode = ssi_alg->cipher_mode;
@@ -158,7 +163,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
/* Allocate key buffer, cache line aligned */
ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
- &ctx->enckey_dma_addr, GFP_KERNEL);
+ &ctx->enckey_dma_addr, GFP_KERNEL);
if (!ctx->enckey) {
SSI_LOG_ERR("Failed allocating key buffer\n");
goto init_failed;
@@ -168,31 +173,42 @@ static int ssi_aead_init(struct crypto_aead *tfm)
/* Set default authlen value */
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
+ struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
+ const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
+
/* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
/* (and temporary for user key - up to 256b) */
- ctx->auth_state.xcbc.xcbc_keys = dma_alloc_coherent(dev,
- CC_AES_128_BIT_KEY_SIZE * 3,
- &ctx->auth_state.xcbc.xcbc_keys_dma_addr, GFP_KERNEL);
- if (!ctx->auth_state.xcbc.xcbc_keys) {
+ xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
+ &xcbc->xcbc_keys_dma_addr,
+ GFP_KERNEL);
+ if (!xcbc->xcbc_keys) {
SSI_LOG_ERR("Failed allocating buffer for XCBC keys\n");
goto init_failed;
}
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+ const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
+ dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
+
/* Allocate dma-coherent buffer for IPAD + OPAD */
- ctx->auth_state.hmac.ipad_opad = dma_alloc_coherent(dev,
- 2 * MAX_HMAC_DIGEST_SIZE,
- &ctx->auth_state.hmac.ipad_opad_dma_addr, GFP_KERNEL);
- if (!ctx->auth_state.hmac.ipad_opad) {
+ hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
+ &hmac->ipad_opad_dma_addr,
+ GFP_KERNEL);
+
+ if (!hmac->ipad_opad) {
SSI_LOG_ERR("Failed allocating IPAD/OPAD buffer\n");
goto init_failed;
}
+
SSI_LOG_DEBUG("Allocated authkey buffer in context ctx->authkey=@%p\n",
- ctx->auth_state.hmac.ipad_opad);
+ hmac->ipad_opad);
+
+ hmac->padded_authkey = dma_alloc_coherent(dev,
+ MAX_HMAC_BLOCK_SIZE,
+ pkey_dma,
+ GFP_KERNEL);
- ctx->auth_state.hmac.padded_authkey = dma_alloc_coherent(dev,
- MAX_HMAC_BLOCK_SIZE,
- &ctx->auth_state.hmac.padded_authkey_dma_addr, GFP_KERNEL);
- if (!ctx->auth_state.hmac.padded_authkey) {
+ if (!hmac->padded_authkey) {
SSI_LOG_ERR("failed to allocate padded_authkey\n");
goto init_failed;
}
@@ -223,7 +239,7 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
- ctx->authsize) != 0) {
+ ctx->authsize) != 0) {
SSI_LOG_DEBUG("Payload authentication failure, "
"(auth-size=%d, cipher=%d).\n",
ctx->authsize, ctx->cipher_mode);
@@ -236,8 +252,8 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
} else { /*ENCRYPT*/
if (unlikely(areq_ctx->is_icv_fragmented))
ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->mac_buf, areq_ctx->dstSgl, areq->cryptlen + areq_ctx->dstOffset,
- areq->cryptlen + areq_ctx->dstOffset + ctx->authsize, SSI_SG_FROM_BUF);
+ areq_ctx->mac_buf, areq_ctx->dst_sgl, areq->cryptlen + areq_ctx->dst_offset,
+ areq->cryptlen + areq_ctx->dst_offset + ctx->authsize, SSI_SG_FROM_BUF);
/* If an IV was generated, copy it back to the user provided buffer. */
if (areq_ctx->backup_giv) {
@@ -292,12 +308,13 @@ static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
{
- unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+ unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
unsigned int digest_ofs = 0;
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
int idx = 0;
int i;
@@ -325,7 +342,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
/* Prepare ipad key */
hw_desc_init(&desc[idx]);
- set_xor_val(&desc[idx], hmacPadConst[i]);
+ set_xor_val(&desc[idx], hmac_pad_const[i]);
set_cipher_mode(&desc[idx], hash_mode);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
@@ -334,7 +351,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
/* Perform HASH update */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
- ctx->auth_state.hmac.padded_authkey_dma_addr,
+ hmac->padded_authkey_dma_addr,
SHA256_BLOCK_SIZE, NS_BIT);
set_cipher_mode(&desc[idx], hash_mode);
set_xor_active(&desc[idx]);
@@ -345,8 +362,8 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
set_dout_dlli(&desc[idx],
- (ctx->auth_state.hmac.ipad_opad_dma_addr +
- digest_ofs), digest_size, NS_BIT, 0);
+ (hmac->ipad_opad_dma_addr + digest_ofs),
+ digest_size, NS_BIT, 0);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
@@ -361,7 +378,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
{
SSI_LOG_DEBUG("enc_keylen=%u authkeylen=%u\n",
- ctx->enc_keylen, ctx->auth_keylen);
+ ctx->enc_keylen, ctx->auth_keylen);
switch (ctx->auth_mode) {
case DRV_HASH_SHA1:
@@ -385,7 +402,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
SSI_LOG_ERR("Invalid cipher(3DES) key size: %u\n",
- ctx->enc_keylen);
+ ctx->enc_keylen);
return -EINVAL;
}
} else { /* Default assumed to be AES ciphers */
@@ -393,7 +410,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
(ctx->enc_keylen != AES_KEYSIZE_192) &&
(ctx->enc_keylen != AES_KEYSIZE_256)) {
SSI_LOG_ERR("Invalid cipher(AES) key size: %u\n",
- ctx->enc_keylen);
+ ctx->enc_keylen);
return -EINVAL;
}
}
@@ -536,9 +553,9 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
int seq_len = 0, rc = -EINVAL;
SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n",
- ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
+ ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)),
+ key, keylen);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
/* STAT_PHASE_0: Init and sanity checks */
if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
@@ -654,7 +671,6 @@ static int ssi_aead_setauthsize(
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
/* Unsupported auth. sizes */
if ((authsize == 0) ||
(authsize > crypto_aead_maxauthsize(authenc))) {
@@ -669,7 +685,7 @@ static int ssi_aead_setauthsize(
#if SSI_CC_HAS_AES_CCM
static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+ unsigned int authsize)
{
switch (authsize) {
case 8:
@@ -684,7 +700,7 @@ static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
}
static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+ unsigned int authsize)
{
switch (authsize) {
case 4:
@@ -762,11 +778,11 @@ ssi_aead_process_authenc_data_desc(
{
struct scatterlist *cipher =
(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
- areq_ctx->dstSgl : areq_ctx->srcSgl;
+ areq_ctx->dst_sgl : areq_ctx->src_sgl;
unsigned int offset =
(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
- areq_ctx->dstOffset : areq_ctx->srcOffset;
+ areq_ctx->dst_offset : areq_ctx->src_offset;
SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
@@ -828,11 +844,11 @@ ssi_aead_process_cipher_data_desc(
SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
- (sg_dma_address(areq_ctx->srcSgl) +
- areq_ctx->srcOffset), areq_ctx->cryptlen, NS_BIT);
+ (sg_dma_address(areq_ctx->src_sgl) +
+ areq_ctx->src_offset), areq_ctx->cryptlen, NS_BIT);
set_dout_dlli(&desc[idx],
- (sg_dma_address(areq_ctx->dstSgl) +
- areq_ctx->dstOffset),
+ (sg_dma_address(areq_ctx->dst_sgl) +
+ areq_ctx->dst_offset),
areq_ctx->cryptlen, NS_BIT, 0);
set_flow_mode(&desc[idx], flow_mode);
break;
@@ -1168,8 +1184,8 @@ static inline void ssi_aead_load_mlli_to_sram(
(req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
!req_ctx->is_single_pass)) {
SSI_LOG_DEBUG("Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
- (unsigned int)ctx->drvdata->mlli_sram_addr,
- req_ctx->mlli_params.mlli_len);
+ (unsigned int)ctx->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
/* Copy MLLI table host-to-sram */
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI,
@@ -1313,7 +1329,8 @@ ssi_aead_xcbc_authenc(
}
static int validate_data_size(struct ssi_aead_ctx *ctx,
- enum drv_crypto_direction direct, struct aead_request *req)
+ enum drv_crypto_direction direct,
+ struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int assoclen = req->assoclen;
@@ -1321,7 +1338,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
(req->cryptlen - ctx->authsize) : req->cryptlen;
if (unlikely((direct == DRV_CRYPTO_DIRECTION_DECRYPT) &&
- (req->cryptlen < ctx->authsize)))
+ (req->cryptlen < ctx->authsize)))
goto data_size_err;
areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
@@ -1329,7 +1346,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
switch (ctx->flow_mode) {
case S_DIN_to_AES:
if (unlikely((ctx->cipher_mode == DRV_CIPHER_CBC) &&
- !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
+ !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
goto data_size_err;
if (ctx->cipher_mode == DRV_CIPHER_CCM)
break;
@@ -1365,27 +1382,27 @@ data_size_err:
}
#if SSI_CC_HAS_AES_CCM
-static unsigned int format_ccm_a0(u8 *pA0Buff, u32 headerSize)
+static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
{
unsigned int len = 0;
- if (headerSize == 0)
+ if (header_size == 0)
return 0;
- if (headerSize < ((1UL << 16) - (1UL << 8))) {
+ if (header_size < ((1UL << 16) - (1UL << 8))) {
len = 2;
- pA0Buff[0] = (headerSize >> 8) & 0xFF;
- pA0Buff[1] = headerSize & 0xFF;
+ pa0_buff[0] = (header_size >> 8) & 0xFF;
+ pa0_buff[1] = header_size & 0xFF;
} else {
len = 6;
- pA0Buff[0] = 0xFF;
- pA0Buff[1] = 0xFE;
- pA0Buff[2] = (headerSize >> 24) & 0xFF;
- pA0Buff[3] = (headerSize >> 16) & 0xFF;
- pA0Buff[4] = (headerSize >> 8) & 0xFF;
- pA0Buff[5] = headerSize & 0xFF;
+ pa0_buff[0] = 0xFF;
+ pa0_buff[1] = 0xFE;
+ pa0_buff[2] = (header_size >> 24) & 0xFF;
+ pa0_buff[3] = (header_size >> 16) & 0xFF;
+ pa0_buff[4] = (header_size >> 8) & 0xFF;
+ pa0_buff[5] = header_size & 0xFF;
}
return len;
@@ -1557,7 +1574,7 @@ static int config_ccm_adata(struct aead_request *req)
/* taken from crypto/ccm.c */
/* 2 <= L <= 8, so 1 <= L' <= 7. */
- if (2 > l || l > 8) {
+ if (l < 2 || l > 8) {
SSI_LOG_ERR("illegal iv value %X\n", req->iv[0]);
return -EINVAL;
}
@@ -1848,8 +1865,9 @@ static inline void ssi_aead_dump_gcm(
SSI_LOG_DEBUG("%s\n", title);
}
- SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n", \
- ctx->cipher_mode, ctx->authsize, ctx->enc_keylen, req->assoclen, req_ctx->cryptlen);
+ SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n",
+ ctx->cipher_mode, ctx->authsize, ctx->enc_keylen,
+ req->assoclen, req_ctx->cryptlen);
if (ctx->enckey)
dump_byte_array("mac key", ctx->enckey, 16);
@@ -1864,7 +1882,7 @@ static inline void ssi_aead_dump_gcm(
dump_byte_array("mac_buf", req_ctx->mac_buf, AES_BLOCK_SIZE);
- dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.lenA, AES_BLOCK_SIZE);
+ dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.len_a, AES_BLOCK_SIZE);
if (req->src && req->cryptlen)
dump_byte_array("req->src", sg_virt(req->src), req->cryptlen + req->assoclen);
@@ -1886,7 +1904,7 @@ static int config_gcm_context(struct aead_request *req)
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
- SSI_LOG_DEBUG("config_gcm_context() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", cryptlen, req->assoclen, ctx->authsize);
+ SSI_LOG_DEBUG("%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", __func__, cryptlen, req->assoclen, ctx->authsize);
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
@@ -1903,16 +1921,16 @@ static int config_gcm_context(struct aead_request *req)
__be64 temp64;
temp64 = cpu_to_be64(req->assoclen * 8);
- memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
+ memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
- memcpy(&req_ctx->gcm_len_block.lenC, &temp64, 8);
+ memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
} else { //rfc4543=> all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
__be64 temp64;
temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
- memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
+ memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
- memcpy(&req_ctx->gcm_len_block.lenC, &temp64, 8);
+ memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
}
return 0;
@@ -1944,16 +1962,16 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
struct ssi_crypto_req ssi_req = {};
SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
- ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"), ctx, req, req->iv,
- sg_virt(req->src), req->src->offset, sg_virt(req->dst), req->dst->offset, req->cryptlen);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
+ ctx, req, req->iv, sg_virt(req->src), req->src->offset,
+ sg_virt(req->dst), req->dst->offset, req->cryptlen);
/* STAT_PHASE_0: Init and sanity checks */
/* Check data length according to mode */
if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
SSI_LOG_ERR("Unsupported crypt/assoc len %d/%d.\n",
- req->cryptlen, req->assoclen);
+ req->cryptlen, req->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -1976,7 +1994,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
if (!areq_ctx->backup_giv) /*User none-generated IV*/
memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
- req->iv, CTR_RFC3686_IV_SIZE);
+ req->iv, CTR_RFC3686_IV_SIZE);
/* Initialize counter portion of counter block */
*(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
@@ -2198,7 +2216,7 @@ static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
int rc = 0;
- SSI_LOG_DEBUG("ssi_rfc4106_gcm_setkey() keylen %d, key %p\n", keylen, key);
+ SSI_LOG_DEBUG("%s() keylen %d, key %p\n", __func__, keylen, key);
if (keylen < 4)
return -EINVAL;
@@ -2216,7 +2234,7 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
int rc = 0;
- SSI_LOG_DEBUG("ssi_rfc4543_gcm_setkey() keylen %d, key %p\n", keylen, key);
+ SSI_LOG_DEBUG("%s() keylen %d, key %p\n", __func__, keylen, key);
if (keylen < 4)
return -EINVAL;
@@ -2230,7 +2248,7 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
}
static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+ unsigned int authsize)
{
switch (authsize) {
case 4:
@@ -2249,9 +2267,9 @@ static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
}
static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+ unsigned int authsize)
{
- SSI_LOG_DEBUG("ssi_rfc4106_gcm_setauthsize() authsize %d\n", authsize);
+ SSI_LOG_DEBUG("authsize %d\n", authsize);
switch (authsize) {
case 8:
@@ -2268,7 +2286,7 @@ static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- SSI_LOG_DEBUG("ssi_rfc4543_gcm_setauthsize() authsize %d\n", authsize);
+ SSI_LOG_DEBUG("authsize %d\n", authsize);
if (authsize != 16)
return -EINVAL;
@@ -2641,7 +2659,7 @@ static struct ssi_crypto_alg *ssi_aead_create_alg(struct ssi_alg_template *templ
struct ssi_crypto_alg *t_alg;
struct aead_alg *alg;
- t_alg = kzalloc(sizeof(struct ssi_crypto_alg), GFP_KERNEL);
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
if (!t_alg) {
SSI_LOG_ERR("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
@@ -2696,7 +2714,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
int rc = -ENOMEM;
int alg;
- aead_handle = kmalloc(sizeof(struct ssi_aead_handle), GFP_KERNEL);
+ aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
if (!aead_handle) {
rc = -ENOMEM;
goto fail0;
@@ -2720,14 +2738,14 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
SSI_LOG_ERR("%s alg allocation failed\n",
- aead_algs[alg].driver_name);
+ aead_algs[alg].driver_name);
goto fail1;
}
t_alg->drvdata = drvdata;
rc = crypto_register_aead(&t_alg->aead_alg);
if (unlikely(rc != 0)) {
SSI_LOG_ERR("%s alg registration failed\n",
- t_alg->aead_alg.base.cra_driver_name);
+ t_alg->aead_alg.base.cra_driver_name);
goto fail2;
} else {
list_add_tail(&t_alg->entry, &aead_handle->aead_list);
diff --git a/drivers/staging/ccree/ssi_aead.h b/drivers/staging/ccree/ssi_aead.h
index 39cc633a3ffa..e85bcd917e7b 100644
--- a/drivers/staging/ccree/ssi_aead.h
+++ b/drivers/staging/ccree/ssi_aead.h
@@ -69,8 +69,8 @@ struct aead_req_ctx {
u8 gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
u8 hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
struct {
- u8 lenA[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
- u8 lenC[GCM_BLOCK_LEN_SIZE];
+ u8 len_a[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
+ u8 len_c[GCM_BLOCK_LEN_SIZE];
} gcm_len_block;
u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
@@ -94,10 +94,10 @@ struct aead_req_ctx {
struct ssi_mlli assoc;
struct ssi_mlli src;
struct ssi_mlli dst;
- struct scatterlist *srcSgl;
- struct scatterlist *dstSgl;
- unsigned int srcOffset;
- unsigned int dstOffset;
+ struct scatterlist *src_sgl;
+ struct scatterlist *dst_sgl;
+ unsigned int src_offset;
+ unsigned int dst_offset;
enum ssi_req_dma_buf_type assoc_buff_type;
enum ssi_req_dma_buf_type data_buff_type;
struct mlli_params mlli_params;
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
index b35871eeabd1..63936091d524 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/ssi_buffer_mgr.c
@@ -150,7 +150,7 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
u32 **mlli_entry_pp)
{
u32 *mlli_entry_p = *mlli_entry_pp;
- u32 new_nents;;
+ u32 new_nents;
/* Verify there is no memory overflow*/
new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
@@ -162,8 +162,8 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
cc_lli_set_addr(mlli_entry_p, buff_dma);
cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
- mlli_entry_p[LLI_WORD0_OFFSET],
- mlli_entry_p[LLI_WORD1_OFFSET]);
+ mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
mlli_entry_p = mlli_entry_p + 2;
@@ -173,8 +173,8 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
cc_lli_set_addr(mlli_entry_p, buff_dma);
cc_lli_set_size(mlli_entry_p, buff_size);
SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
- mlli_entry_p[LLI_WORD0_OFFSET],
- mlli_entry_p[LLI_WORD1_OFFSET]);
+ mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
mlli_entry_p = mlli_entry_p + 2;
*mlli_entry_pp = mlli_entry_p;
(*curr_nents)++;
@@ -182,8 +182,8 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
}
static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
- struct scatterlist *sgl, u32 sgl_data_len, u32 sglOffset, u32 *curr_nents,
- u32 **mlli_entry_pp)
+ struct scatterlist *sgl, u32 sgl_data_len, u32 sgl_offset,
+ u32 *curr_nents, u32 **mlli_entry_pp)
{
struct scatterlist *curr_sgl = sgl;
u32 *mlli_entry_p = *mlli_entry_pp;
@@ -192,16 +192,17 @@ static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
for ( ; (curr_sgl) && (sgl_data_len != 0);
curr_sgl = sg_next(curr_sgl)) {
u32 entry_data_len =
- (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
- sg_dma_len(curr_sgl) - sglOffset : sgl_data_len;
+ (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
+ sg_dma_len(curr_sgl) - sgl_offset :
+ sgl_data_len;
sgl_data_len -= entry_data_len;
rc = ssi_buffer_mgr_render_buff_to_mlli(
- sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
- &mlli_entry_p);
+ sg_dma_address(curr_sgl) + sgl_offset, entry_data_len,
+ curr_nents, &mlli_entry_p);
if (rc != 0)
return rc;
- sglOffset = 0;
+ sgl_offset = 0;
}
*mlli_entry_pp = mlli_entry_p;
return 0;
@@ -221,7 +222,7 @@ static int ssi_buffer_mgr_generate_mlli(
/* Allocate memory from the pointed pool */
mlli_params->mlli_virt_addr = dma_pool_alloc(
mlli_params->curr_pool, GFP_KERNEL,
- &(mlli_params->mlli_dma_addr));
+ &mlli_params->mlli_dma_addr);
if (unlikely(!mlli_params->mlli_virt_addr)) {
SSI_LOG_ERR("dma_pool_alloc() failed\n");
rc = -ENOMEM;
@@ -249,7 +250,7 @@ static int ssi_buffer_mgr_generate_mlli(
/*Calculate the current MLLI table length for the
*length field in the descriptor
*/
- *(sg_data->mlli_nents[i]) +=
+ *sg_data->mlli_nents[i] +=
(total_nents - prev_total_nents);
prev_total_nents = total_nents;
}
@@ -259,9 +260,9 @@ static int ssi_buffer_mgr_generate_mlli(
mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
SSI_LOG_DEBUG("MLLI params: "
- "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n",
+ "virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
mlli_params->mlli_virt_addr,
- (unsigned long long)mlli_params->mlli_dma_addr,
+ mlli_params->mlli_dma_addr,
mlli_params->mlli_len);
build_mlli_exit:
@@ -275,9 +276,9 @@ static inline void ssi_buffer_mgr_add_buffer_entry(
{
unsigned int index = sgl_data->num_of_buffers;
- SSI_LOG_DEBUG("index=%u single_buff=0x%llX "
+ SSI_LOG_DEBUG("index=%u single_buff=%pad "
"buffer_len=0x%08X is_last=%d\n",
- index, (unsigned long long)buffer_dma, buffer_len, is_last_entry);
+ index, buffer_dma, buffer_len, is_last_entry);
sgl_data->nents[index] = 1;
sgl_data->entry[index].buffer_dma = buffer_dma;
sgl_data->offset[index] = 0;
@@ -302,7 +303,7 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
unsigned int index = sgl_data->num_of_buffers;
SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
- index, nents, sgl, data_len, is_last_table);
+ index, nents, sgl, data_len, is_last_table);
sgl_data->nents[index] = nents;
sgl_data->entry[index].sgl = sgl;
sgl_data->offset[index] = data_offset;
@@ -317,7 +318,7 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
static int
ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
- enum dma_data_direction direction)
+ enum dma_data_direction direction)
{
u32 i, j;
struct scatterlist *l_sg = sg;
@@ -358,10 +359,10 @@ static int ssi_buffer_mgr_map_scatterlist(
SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
+ SSI_LOG_DEBUG("Mapped sg: dma_address=%pad "
"page=%p addr=%pK offset=%u "
"length=%u\n",
- (unsigned long long)sg_dma_address(sg),
+ sg_dma_address(sg),
sg_page(sg),
sg_virt(sg),
sg->offset, sg->length);
@@ -370,11 +371,11 @@ static int ssi_buffer_mgr_map_scatterlist(
*mapped_nents = 1;
} else { /*sg_is_last*/
*nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
- &is_chained);
+ &is_chained);
if (*nents > max_sg_nents) {
*nents = 0;
SSI_LOG_ERR("Too many fragments. current %d max %d\n",
- *nents, max_sg_nents);
+ *nents, max_sg_nents);
return -ENOMEM;
}
if (!is_chained) {
@@ -392,9 +393,9 @@ static int ssi_buffer_mgr_map_scatterlist(
* must have the same nents before and after map
*/
*mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
- sg,
- *nents,
- direction);
+ sg,
+ *nents,
+ direction);
if (unlikely(*mapped_nents != *nents)) {
*nents = *mapped_nents;
SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
@@ -408,10 +409,10 @@ static int ssi_buffer_mgr_map_scatterlist(
static inline int
ssi_aead_handle_config_buf(struct device *dev,
- struct aead_req_ctx *areq_ctx,
- u8 *config_data,
- struct buffer_array *sg_data,
- unsigned int assoclen)
+ struct aead_req_ctx *areq_ctx,
+ u8 *config_data,
+ struct buffer_array *sg_data,
+ unsigned int assoclen)
{
SSI_LOG_DEBUG(" handle additional data config set to DLLI\n");
/* create sg for the current buffer */
@@ -422,10 +423,10 @@ ssi_aead_handle_config_buf(struct device *dev,
"config buffer failed\n");
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
+ SSI_LOG_DEBUG("Mapped curr_buff: dma_address=%pad "
"page=%p addr=%pK "
"offset=%u length=%u\n",
- (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg),
+ sg_dma_address(&areq_ctx->ccm_adata_sg),
sg_page(&areq_ctx->ccm_adata_sg),
sg_virt(&areq_ctx->ccm_adata_sg),
areq_ctx->ccm_adata_sg.offset,
@@ -433,19 +434,18 @@ ssi_aead_handle_config_buf(struct device *dev,
/* prepare for case of MLLI */
if (assoclen > 0) {
ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
- &areq_ctx->ccm_adata_sg,
- (AES_BLOCK_SIZE +
- areq_ctx->ccm_hdr_size), 0,
- false, NULL);
+ &areq_ctx->ccm_adata_sg,
+ (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
+ 0, false, NULL);
}
return 0;
}
static inline int ssi_ahash_handle_curr_buf(struct device *dev,
- struct ahash_req_ctx *areq_ctx,
- u8 *curr_buff,
- u32 curr_buff_cnt,
- struct buffer_array *sg_data)
+ struct ahash_req_ctx *areq_ctx,
+ u8 *curr_buff,
+ u32 curr_buff_cnt,
+ struct buffer_array *sg_data)
{
SSI_LOG_DEBUG(" handle curr buff %x set to DLLI\n", curr_buff_cnt);
/* create sg for the current buffer */
@@ -456,10 +456,10 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev,
"src buffer failed\n");
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
+ SSI_LOG_DEBUG("Mapped curr_buff: dma_address=%pad "
"page=%p addr=%pK "
"offset=%u length=%u\n",
- (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
+ sg_dma_address(areq_ctx->buff_sg),
sg_page(areq_ctx->buff_sg),
sg_virt(areq_ctx->buff_sg),
areq_ctx->buff_sg->offset,
@@ -469,7 +469,7 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev,
areq_ctx->in_nents = 0;
/* prepare for case of MLLI */
ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
- curr_buff_cnt, 0, false, NULL);
+ curr_buff_cnt, 0, false, NULL);
return 0;
}
@@ -483,9 +483,9 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
- SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
- (unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
- ivsize);
+ SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
+ req_ctx->gen_ctx.iv_dma_addr,
+ ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
ivsize,
req_ctx->is_giv ? DMA_BIDIRECTIONAL :
@@ -498,16 +498,12 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
req_ctx->mlli_params.mlli_dma_addr);
}
- dma_unmap_sg(dev, src, req_ctx->in_nents,
- DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
- sg_virt(src));
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
+ SSI_LOG_DEBUG("Unmapped req->src=%pK\n", sg_virt(src));
if (src != dst) {
- dma_unmap_sg(dev, dst, req_ctx->out_nents,
- DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
- sg_virt(dst));
+ dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
+ SSI_LOG_DEBUG("Unmapped req->dst=%pK\n", sg_virt(dst));
}
}
@@ -542,22 +538,24 @@ int ssi_buffer_mgr_map_blkcipher_request(
req_ctx->is_giv ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev,
- req_ctx->gen_ctx.iv_dma_addr))) {
+ req_ctx->gen_ctx.iv_dma_addr))) {
SSI_LOG_ERR("Mapping iv %u B at va=%pK "
"for DMA failed\n", ivsize, info);
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
- ivsize, info,
- (unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
+ SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=%pad\n",
+ ivsize, info,
+ req_ctx->gen_ctx.iv_dma_addr);
} else {
req_ctx->gen_ctx.iv_dma_addr = 0;
}
/* Map the src SGL */
rc = ssi_buffer_mgr_map_scatterlist(dev, src,
- nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+ nbytes, DMA_BIDIRECTIONAL,
+ &req_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents);
if (unlikely(rc != 0)) {
rc = -ENOMEM;
goto ablkcipher_exit;
@@ -570,8 +568,10 @@ int ssi_buffer_mgr_map_blkcipher_request(
if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
req_ctx->out_nents = 0;
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
- req_ctx->in_nents, src,
- nbytes, 0, true, &req_ctx->in_mlli_nents);
+ req_ctx->in_nents,
+ src, nbytes, 0,
+ true,
+ &req_ctx->in_mlli_nents);
}
} else {
/* Map the dst sg */
@@ -588,13 +588,15 @@ int ssi_buffer_mgr_map_blkcipher_request(
if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
- req_ctx->in_nents, src,
- nbytes, 0, true,
- &req_ctx->in_mlli_nents);
+ req_ctx->in_nents,
+ src, nbytes, 0,
+ true,
+ &req_ctx->in_mlli_nents);
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
- req_ctx->out_nents, dst,
- nbytes, 0, true,
- &req_ctx->out_mlli_nents);
+ req_ctx->out_nents,
+ dst, nbytes, 0,
+ true,
+ &req_ctx->out_mlli_nents);
}
}
@@ -606,7 +608,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
}
SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
- GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
+ GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
return 0;
@@ -628,7 +630,7 @@ void ssi_buffer_mgr_unmap_aead_request(
if (areq_ctx->mac_buf_dma_addr != 0) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
- MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
+ MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
}
#if SSI_CC_HAS_AES_GCM
@@ -645,12 +647,12 @@ void ssi_buffer_mgr_unmap_aead_request(
if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
- AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
- AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
}
#endif
@@ -658,7 +660,7 @@ void ssi_buffer_mgr_unmap_aead_request(
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
if (areq_ctx->ccm_iv0_dma_addr != 0) {
dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
- AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
@@ -672,9 +674,9 @@ void ssi_buffer_mgr_unmap_aead_request(
*allocated and should be released
*/
if (areq_ctx->mlli_params.curr_pool) {
- SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
- (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
- areq_ctx->mlli_params.mlli_virt_addr);
+ SSI_LOG_DEBUG("free MLLI buffer: dma=%pad virt=%pK\n",
+ areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
areq_ctx->mlli_params.mlli_virt_addr,
areq_ctx->mlli_params.mlli_dma_addr);
@@ -690,14 +692,17 @@ void ssi_buffer_mgr_unmap_aead_request(
dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src, size_to_unmap, &dummy, &chained), DMA_BIDIRECTIONAL);
if (unlikely(req->src != req->dst)) {
SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
- sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst, size_to_unmap, &dummy, &chained),
- DMA_BIDIRECTIONAL);
+ sg_virt(req->dst));
+ dma_unmap_sg(dev, req->dst,
+ ssi_buffer_mgr_get_sgl_nents(req->dst,
+ size_to_unmap,
+ &dummy,
+ &chained),
+ DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
(areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
- likely(req->src == req->dst))
- {
+ likely(req->src == req->dst)) {
u32 size_to_skip = req->assoclen;
if (areq_ctx->is_gcm4543)
@@ -753,11 +758,11 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
*is_icv_fragmented = true;
} else {
SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n",
- MAX_ICV_NENTS_SUPPORTED);
+ MAX_ICV_NENTS_SUPPORTED);
nents = -1; /*unsupported*/
}
SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n",
- (*is_icv_fragmented ? "true" : "false"), nents);
+ (*is_icv_fragmented ? "true" : "false"), nents);
return nents;
}
@@ -778,18 +783,18 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
goto chain_iv_exit;
}
- areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
- hw_iv_size, DMA_BIDIRECTIONAL);
+ areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, hw_iv_size,
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
- hw_iv_size, req->iv);
+ hw_iv_size, req->iv);
rc = -ENOMEM;
goto chain_iv_exit;
}
- SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
- hw_iv_size, req->iv,
- (unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
+ SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=%pad\n",
+ hw_iv_size, req->iv,
+ areq_ctx->gen_ctx.iv_dma_addr);
if (do_chain && areq_ctx->plaintext_authenticate_only) { // TODO: what about CTR?? ask Ron
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
@@ -833,8 +838,8 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0;
SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
- areq_ctx->assoc.nents);
+ GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
goto chain_assoc_exit;
}
@@ -868,10 +873,9 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
if (unlikely((mapped_nents + 1) >
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
- SSI_LOG_ERR("CCM case.Too many fragments. "
- "Current %d max %d\n",
- (areq_ctx->assoc.nents + 1),
- LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+ SSI_LOG_ERR("CCM case.Too many fragments. Current %d max %d\n",
+ (areq_ctx->assoc.nents + 1),
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
rc = -ENOMEM;
goto chain_assoc_exit;
}
@@ -884,10 +888,10 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
if (unlikely((do_chain) ||
- (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
+ (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
- areq_ctx->assoc.nents);
+ GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
ssi_buffer_mgr_add_scatterlist_entry(
sg_data, areq_ctx->assoc.nents,
req->src, req->assoclen, 0, is_last,
@@ -911,26 +915,26 @@ static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
if (likely(req->src == req->dst)) {
/*INPLACE*/
areq_ctx->icv_dma_addr = sg_dma_address(
- areq_ctx->srcSgl) +
+ areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
- areq_ctx->srcSgl) +
+ areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
/*NON-INPLACE and DECRYPT*/
areq_ctx->icv_dma_addr = sg_dma_address(
- areq_ctx->srcSgl) +
+ areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
- areq_ctx->srcSgl) +
+ areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
} else {
/*NON-INPLACE and ENCRYPT*/
areq_ctx->icv_dma_addr = sg_dma_address(
- areq_ctx->dstSgl) +
+ areq_ctx->dst_sgl) +
(*dst_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
- areq_ctx->dstSgl) +
+ areq_ctx->dst_sgl) +
(*dst_last_bytes - authsize);
}
}
@@ -951,13 +955,18 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
if (likely(req->src == req->dst)) {
/*INPLACE*/
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
- areq_ctx->src.nents, areq_ctx->srcSgl,
- areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
- &areq_ctx->src.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
- areq_ctx->src.nents, authsize, *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
+ areq_ctx->src.nents,
+ areq_ctx->src_sgl,
+ areq_ctx->cryptlen,
+ areq_ctx->src_offset,
+ is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->src_sgl,
+ areq_ctx->src.nents,
+ authsize,
+ *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
@@ -995,27 +1004,35 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
} else { /* Contig. ICV */
/*Should hanlde if the sg is not contig.*/
areq_ctx->icv_dma_addr = sg_dma_address(
- &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
+ &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
(*src_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
- &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
+ &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
(*src_last_bytes - authsize);
}
} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
/*NON-INPLACE and DECRYPT*/
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
- areq_ctx->src.nents, areq_ctx->srcSgl,
- areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
- &areq_ctx->src.mlli_nents);
+ areq_ctx->src.nents,
+ areq_ctx->src_sgl,
+ areq_ctx->cryptlen,
+ areq_ctx->src_offset,
+ is_last_table,
+ &areq_ctx->src.mlli_nents);
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
- areq_ctx->dst.nents, areq_ctx->dstSgl,
- areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
- &areq_ctx->dst.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
- areq_ctx->src.nents, authsize, *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
+ areq_ctx->dst.nents,
+ areq_ctx->dst_sgl,
+ areq_ctx->cryptlen,
+ areq_ctx->dst_offset,
+ is_last_table,
+ &areq_ctx->dst.mlli_nents);
+
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->src_sgl,
+ areq_ctx->src.nents,
+ authsize,
+ *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
@@ -1039,26 +1056,34 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
} else { /* Contig. ICV */
/*Should hanlde if the sg is not contig.*/
areq_ctx->icv_dma_addr = sg_dma_address(
- &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
+ &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
(*src_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
- &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
+ &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
(*src_last_bytes - authsize);
}
} else {
/*NON-INPLACE and ENCRYPT*/
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
- areq_ctx->dst.nents, areq_ctx->dstSgl,
- areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
- &areq_ctx->dst.mlli_nents);
+ areq_ctx->dst.nents,
+ areq_ctx->dst_sgl,
+ areq_ctx->cryptlen,
+ areq_ctx->dst_offset,
+ is_last_table,
+ &areq_ctx->dst.mlli_nents);
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
- areq_ctx->src.nents, areq_ctx->srcSgl,
- areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
- &areq_ctx->src.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl,
- areq_ctx->dst.nents, authsize, *dst_last_bytes,
+ areq_ctx->src.nents,
+ areq_ctx->src_sgl,
+ areq_ctx->cryptlen,
+ areq_ctx->src_offset,
+ is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dst_sgl,
+ areq_ctx->dst.nents,
+ authsize,
+ *dst_last_bytes,
&areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) {
rc = -ENOTSUPP;
@@ -1068,10 +1093,10 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
if (likely(!areq_ctx->is_icv_fragmented)) {
/* Contig. ICV */
areq_ctx->icv_dma_addr = sg_dma_address(
- &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
+ &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) +
(*dst_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
- &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
+ &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) +
(*dst_last_bytes - authsize);
} else {
areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
@@ -1113,37 +1138,36 @@ static inline int ssi_buffer_mgr_aead_chain_data(
rc = -EINVAL;
goto chain_data_exit;
}
- areq_ctx->srcSgl = req->src;
- areq_ctx->dstSgl = req->dst;
+ areq_ctx->src_sgl = req->src;
+ areq_ctx->dst_sgl = req->dst;
if (is_gcm4543)
size_for_map += crypto_aead_ivsize(tfm);
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src, size_for_map, &src_last_bytes, &chained);
- sg_index = areq_ctx->srcSgl->length;
+ sg_index = areq_ctx->src_sgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
- offset -= areq_ctx->srcSgl->length;
- areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl);
+ offset -= areq_ctx->src_sgl->length;
+ areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
//if have reached the end of the sgl, then this is unexpected
- if (!areq_ctx->srcSgl) {
+ if (!areq_ctx->src_sgl) {
SSI_LOG_ERR("reached end of sg list. unexpected\n");
BUG();
}
- sg_index += areq_ctx->srcSgl->length;
+ sg_index += areq_ctx->src_sgl->length;
src_mapped_nents--;
}
- if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
- {
+ if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
SSI_LOG_ERR("Too many fragments. current %d max %d\n",
- src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+ src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM;
}
areq_ctx->src.nents = src_mapped_nents;
- areq_ctx->srcOffset = offset;
+ areq_ctx->src_offset = offset;
if (req->src != req->dst) {
size_for_map = req->assoclen + req->cryptlen;
@@ -1152,9 +1176,11 @@ static inline int ssi_buffer_mgr_aead_chain_data(
size_for_map += crypto_aead_ivsize(tfm);
rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
- DMA_BIDIRECTIONAL, &(areq_ctx->dst.nents),
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
- &dst_mapped_nents);
+ DMA_BIDIRECTIONAL,
+ &areq_ctx->dst.nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dst_last_bytes,
+ &dst_mapped_nents);
if (unlikely(rc != 0)) {
rc = -ENOMEM;
goto chain_data_exit;
@@ -1162,35 +1188,37 @@ static inline int ssi_buffer_mgr_aead_chain_data(
}
dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst, size_for_map, &dst_last_bytes, &chained);
- sg_index = areq_ctx->dstSgl->length;
+ sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip;
//check where the data starts
while (sg_index <= size_to_skip) {
- offset -= areq_ctx->dstSgl->length;
- areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
+ offset -= areq_ctx->dst_sgl->length;
+ areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
//if have reached the end of the sgl, then this is unexpected
- if (!areq_ctx->dstSgl) {
+ if (!areq_ctx->dst_sgl) {
SSI_LOG_ERR("reached end of sg list. unexpected\n");
BUG();
}
- sg_index += areq_ctx->dstSgl->length;
+ sg_index += areq_ctx->dst_sgl->length;
dst_mapped_nents--;
}
- if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
- {
+ if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
SSI_LOG_ERR("Too many fragments. current %d max %d\n",
dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM;
}
areq_ctx->dst.nents = dst_mapped_nents;
- areq_ctx->dstOffset = offset;
+ areq_ctx->dst_offset = offset;
if ((src_mapped_nents > 1) ||
(dst_mapped_nents > 1) ||
do_chain) {
areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
- rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req, sg_data,
- &src_last_bytes, &dst_last_bytes, is_last_table);
+ rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req,
+ sg_data,
+ &src_last_bytes,
+ &dst_last_bytes,
+ is_last_table);
} else {
areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
ssi_buffer_mgr_prepare_aead_data_dlli(
@@ -1202,7 +1230,7 @@ chain_data_exit:
}
static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
- struct aead_request *req)
+ struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
u32 curr_mlli_size = 0;
@@ -1274,8 +1302,7 @@ int ssi_buffer_mgr_map_aead_request(
if (drvdata->coherent &&
(areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
- likely(req->src == req->dst))
- {
+ likely(req->src == req->dst)) {
u32 size_to_skip = req->assoclen;
if (is_gcm4543)
@@ -1296,19 +1323,21 @@ int ssi_buffer_mgr_map_aead_request(
req->cryptlen :
(req->cryptlen - authsize);
- areq_ctx->mac_buf_dma_addr = dma_map_single(dev,
- areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
+ areq_ctx->mac_buf_dma_addr = dma_map_single(dev, areq_ctx->mac_buf,
+ MAX_MAC_SIZE,
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
- MAX_MAC_SIZE, areq_ctx->mac_buf);
+ MAX_MAC_SIZE, areq_ctx->mac_buf);
rc = -ENOMEM;
goto aead_map_failure;
}
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
- (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
- AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
+ AES_BLOCK_SIZE,
+ DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK "
@@ -1319,7 +1348,8 @@ int ssi_buffer_mgr_map_aead_request(
goto aead_map_failure;
}
if (ssi_aead_handle_config_buf(dev, areq_ctx,
- areq_ctx->ccm_config, &sg_data, req->assoclen) != 0) {
+ areq_ctx->ccm_config, &sg_data,
+ req->assoclen) != 0) {
rc = -ENOMEM;
goto aead_map_failure;
}
@@ -1328,26 +1358,31 @@ int ssi_buffer_mgr_map_aead_request(
#if SSI_CC_HAS_AES_GCM
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
areq_ctx->hkey_dma_addr = dma_map_single(dev,
- areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
+ areq_ctx->hkey,
+ AES_BLOCK_SIZE,
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
- AES_BLOCK_SIZE, areq_ctx->hkey);
+ AES_BLOCK_SIZE, areq_ctx->hkey);
rc = -ENOMEM;
goto aead_map_failure;
}
areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
- &areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ &areq_ctx->gcm_len_block,
+ AES_BLOCK_SIZE,
+ DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
- AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
+ AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
rc = -ENOMEM;
goto aead_map_failure;
}
areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
- areq_ctx->gcm_iv_inc1,
- AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ areq_ctx->gcm_iv_inc1,
+ AES_BLOCK_SIZE,
+ DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK "
@@ -1359,8 +1394,9 @@ int ssi_buffer_mgr_map_aead_request(
}
areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
- areq_ctx->gcm_iv_inc2,
- AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ areq_ctx->gcm_iv_inc2,
+ AES_BLOCK_SIZE,
+ DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK "
@@ -1380,7 +1416,7 @@ int ssi_buffer_mgr_map_aead_request(
if (is_gcm4543)
size_to_map += crypto_aead_ivsize(tfm);
rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
- size_to_map, DMA_BIDIRECTIONAL, &(areq_ctx->src.nents),
+ size_to_map, DMA_BIDIRECTIONAL, &areq_ctx->src.nents,
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
if (unlikely(rc != 0)) {
rc = -ENOMEM;
@@ -1491,18 +1527,18 @@ int ssi_buffer_mgr_map_hash_request_final(
/* map the previous buffer */
if (*curr_buff_cnt != 0) {
if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
- *curr_buff_cnt, &sg_data) != 0) {
+ *curr_buff_cnt, &sg_data) != 0) {
return -ENOMEM;
}
}
if (src && (nbytes > 0) && do_update) {
- if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
- nbytes,
- DMA_TO_DEVICE,
- &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy, &mapped_nents))){
+ if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src, nbytes,
+ DMA_TO_DEVICE,
+ &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy,
+ &mapped_nents))){
goto unmap_curr_buff;
}
if (src && (mapped_nents == 1)
@@ -1522,19 +1558,18 @@ int ssi_buffer_mgr_map_hash_request_final(
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
- areq_ctx->in_nents,
- src,
- nbytes, 0,
- true, &areq_ctx->mlli_nents);
+ areq_ctx->in_nents,
+ src, nbytes, 0, true,
+ &areq_ctx->mlli_nents);
if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
- mlli_params) != 0)) {
+ mlli_params) != 0)) {
goto fail_unmap_din;
}
}
/* change the buffer index for the unmap function */
areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
+ GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
return 0;
fail_unmap_din:
@@ -1588,8 +1623,8 @@ int ssi_buffer_mgr_map_hash_request_update(
&curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents =
ssi_buffer_mgr_get_sgl_nents(src,
- nbytes,
- &dummy, NULL);
+ nbytes,
+ &dummy, NULL);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
@@ -1612,15 +1647,15 @@ int ssi_buffer_mgr_map_hash_request_update(
(update_data_len - *curr_buff_cnt),
*next_buff_cnt);
ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
- (update_data_len - *curr_buff_cnt),
- nbytes, SSI_SG_TO_BUF);
+ (update_data_len - *curr_buff_cnt),
+ nbytes, SSI_SG_TO_BUF);
/* change the buffer index for next operation */
swap_index = 1;
}
if (*curr_buff_cnt != 0) {
if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
- *curr_buff_cnt, &sg_data) != 0) {
+ *curr_buff_cnt, &sg_data) != 0) {
return -ENOMEM;
}
/* change the buffer index for next operation */
@@ -1629,11 +1664,12 @@ int ssi_buffer_mgr_map_hash_request_update(
if (update_data_len > *curr_buff_cnt) {
if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
- (update_data_len - *curr_buff_cnt),
- DMA_TO_DEVICE,
- &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy, &mapped_nents))){
+ (update_data_len - *curr_buff_cnt),
+ DMA_TO_DEVICE,
+ &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy,
+ &mapped_nents))){
goto unmap_curr_buff;
}
if ((mapped_nents == 1)
@@ -1653,12 +1689,14 @@ int ssi_buffer_mgr_map_hash_request_update(
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
- areq_ctx->in_nents,
- src,
- (update_data_len - *curr_buff_cnt), 0,
- true, &areq_ctx->mlli_nents);
+ areq_ctx->in_nents,
+ src,
+ (update_data_len - *curr_buff_cnt),
+ 0,
+ true,
+ &areq_ctx->mlli_nents);
if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
- mlli_params) != 0)) {
+ mlli_params) != 0)) {
goto fail_unmap_din;
}
}
@@ -1687,28 +1725,28 @@ void ssi_buffer_mgr_unmap_hash_request(
*allocated and should be released
*/
if (areq_ctx->mlli_params.curr_pool) {
- SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
- (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
- areq_ctx->mlli_params.mlli_virt_addr);
+ SSI_LOG_DEBUG("free MLLI buffer: dma=%pad virt=%pK\n",
+ areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
areq_ctx->mlli_params.mlli_virt_addr,
areq_ctx->mlli_params.mlli_dma_addr);
}
if ((src) && likely(areq_ctx->in_nents != 0)) {
- SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
- sg_virt(src),
- (unsigned long long)sg_dma_address(src),
- sg_dma_len(src));
+ SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
+ sg_virt(src),
+ sg_dma_address(src),
+ sg_dma_len(src));
dma_unmap_sg(dev, src,
areq_ctx->in_nents, DMA_TO_DEVICE);
}
if (*prev_len != 0) {
SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
- " dma=0x%llX len 0x%X\n",
+ " dma=%pad len 0x%X\n",
sg_virt(areq_ctx->buff_sg),
- (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
+ sg_dma_address(areq_ctx->buff_sg),
sg_dma_len(areq_ctx->buff_sg));
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
if (!do_revert) {
@@ -1725,8 +1763,7 @@ int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
struct buff_mgr_handle *buff_mgr_handle;
struct device *dev = &drvdata->plat_dev->dev;
- buff_mgr_handle = (struct buff_mgr_handle *)
- kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL);
+ buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
if (!buff_mgr_handle)
return -ENOMEM;
diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
index cd2eafc04232..8d31a93fd8b7 100644
--- a/drivers/staging/ccree/ssi_cipher.c
+++ b/drivers/staging/ccree/ssi_cipher.c
@@ -23,6 +23,8 @@
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/des.h>
+#include <crypto/xts.h>
+#include <crypto/scatterwalk.h>
#include "ssi_config.h"
#include "ssi_driver.h"
@@ -31,7 +33,6 @@
#include "ssi_cipher.h"
#include "ssi_request_mgr.h"
#include "ssi_sysfs.h"
-#include "ssi_fips_local.h"
#define MAX_ABLKCIPHER_SEQ_LEN 6
@@ -68,7 +69,8 @@ struct ssi_ablkcipher_ctx {
static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base);
-static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
+static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size)
+{
switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
switch (size) {
@@ -92,8 +94,7 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
break;
}
case S_DIN_to_DES:
- if (likely(size == DES3_EDE_KEY_SIZE ||
- size == DES_KEY_SIZE))
+ if (likely(size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE))
return 0;
break;
#if SSI_CC_HAS_MULTI2
@@ -108,7 +109,8 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
return -EINVAL;
}
-static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size) {
+static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size)
+{
switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
switch (ctx_p->cipher_mode) {
@@ -183,10 +185,9 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
int rc = 0;
unsigned int max_key_buf_size = get_max_keysize(tfm);
- SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx_p,
- crypto_tfm_alg_name(tfm));
+ SSI_LOG_DEBUG("Initializing context @%p for %s\n",
+ ctx_p, crypto_tfm_alg_name(tfm));
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
ctx_p->cipher_mode = ssi_alg->cipher_mode;
ctx_p->flow_mode = ssi_alg->flow_mode;
ctx_p->drvdata = ssi_alg->drvdata;
@@ -203,15 +204,16 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
/* Map key buffer */
ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
- max_key_buf_size, DMA_TO_DEVICE);
+ max_key_buf_size,
+ DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
SSI_LOG_ERR("Mapping Key %u B at va=%pK for DMA failed\n",
- max_key_buf_size, ctx_p->user.key);
+ max_key_buf_size, ctx_p->user.key);
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped key %u B at va=%pK to dma=0x%llX\n",
- max_key_buf_size, ctx_p->user.key,
- (unsigned long long)ctx_p->user.key_dma_addr);
+ SSI_LOG_DEBUG("Mapped key %u B at va=%pK to dma=%pad\n",
+ max_key_buf_size, ctx_p->user.key,
+ ctx_p->user.key_dma_addr);
if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
/* Alloc hash tfm for essiv */
@@ -232,7 +234,7 @@ static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
unsigned int max_key_buf_size = get_max_keysize(tfm);
SSI_LOG_DEBUG("Clearing context @%p for %s\n",
- crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
+ crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
/* Free hash tfm for essiv */
@@ -242,9 +244,9 @@ static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
/* Unmap key buffer */
dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
- DMA_TO_DEVICE);
- SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=0x%llX\n",
- (unsigned long long)ctx_p->user.key_dma_addr);
+ DMA_TO_DEVICE);
+ SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=%pad\n",
+ ctx_p->user.key_dma_addr);
/* Free key buffer in context */
kfree(ctx_p->user.key);
@@ -263,31 +265,15 @@ static const u8 zero_buff[] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
/* The function verifies that tdes keys are not weak.*/
-static int ssi_fips_verify_3des_keys(const u8 *key, unsigned int keylen)
+static int ssi_verify_3des_keys(const u8 *key, unsigned int keylen)
{
-#ifdef CCREE_FIPS_SUPPORT
struct tdes_keys *tdes_key = (struct tdes_keys *)key;
/* verify key1 != key2 and key3 != key2*/
if (unlikely((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2, sizeof(tdes_key->key1)) == 0) ||
- (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2, sizeof(tdes_key->key3)) == 0))) {
+ (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2, sizeof(tdes_key->key3)) == 0))) {
return -ENOEXEC;
}
-#endif /* CCREE_FIPS_SUPPORT */
-
- return 0;
-}
-
-/* The function verifies that xts keys are not weak.*/
-static int ssi_fips_verify_xts_keys(const u8 *key, unsigned int keylen)
-{
-#ifdef CCREE_FIPS_SUPPORT
- /* Weak key is define as key that its first half (128/256 lsb) equals its second half (128/256 msb) */
- int singleKeySize = keylen >> 1;
-
- if (unlikely(memcmp(key, &key[singleKeySize], singleKeySize) == 0))
- return -ENOEXEC;
-#endif /* CCREE_FIPS_SUPPORT */
return 0;
}
@@ -317,12 +303,10 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
unsigned int max_key_buf_size = get_max_keysize(tfm);
SSI_LOG_DEBUG("Setting key in context @%p for %s. keylen=%u\n",
- ctx_p, crypto_tfm_alg_name(tfm), keylen);
+ ctx_p, crypto_tfm_alg_name(tfm), keylen);
dump_byte_array("key", (u8 *)key, keylen);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
-
- SSI_LOG_DEBUG("ssi_blkcipher_setkey: after FIPS check");
+ SSI_LOG_DEBUG("after FIPS check");
/* STAT_PHASE_0: Init and sanity checks */
@@ -368,7 +352,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
}
ctx_p->keylen = keylen;
- SSI_LOG_DEBUG("ssi_blkcipher_setkey: ssi_is_hw_key ret 0");
+ SSI_LOG_DEBUG("ssi_is_hw_key ret 0");
return 0;
}
@@ -378,25 +362,25 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
if (unlikely(!des_ekey(tmp, key)) &&
(crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
- SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak DES key");
+ SSI_LOG_DEBUG("weak DES key");
return -EINVAL;
}
}
if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) &&
- ssi_fips_verify_xts_keys(key, keylen) != 0) {
- SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak XTS key");
+ xts_check_key(tfm, key, keylen) != 0) {
+ SSI_LOG_DEBUG("weak XTS key");
return -EINVAL;
}
if ((ctx_p->flow_mode == S_DIN_to_DES) &&
(keylen == DES3_EDE_KEY_SIZE) &&
- ssi_fips_verify_3des_keys(key, keylen) != 0) {
- SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak 3DES key");
+ ssi_verify_3des_keys(key, keylen) != 0) {
+ SSI_LOG_DEBUG("weak 3DES key");
return -EINVAL;
}
/* STAT_PHASE_1: Copy key to ctx */
dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
- max_key_buf_size, DMA_TO_DEVICE);
+ max_key_buf_size, DMA_TO_DEVICE);
if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
#if SSI_CC_HAS_MULTI2
@@ -405,7 +389,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
if (ctx_p->key_round_number < CC_MULTI2_MIN_NUM_ROUNDS ||
ctx_p->key_round_number > CC_MULTI2_MAX_NUM_ROUNDS) {
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- SSI_LOG_DEBUG("ssi_blkcipher_setkey: SSI_CC_HAS_MULTI2 einval");
+ SSI_LOG_DEBUG("SSI_CC_HAS_MULTI2 einval");
return -EINVAL;
#endif /*SSI_CC_HAS_MULTI2*/
} else {
@@ -429,10 +413,10 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
}
}
dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
- max_key_buf_size, DMA_TO_DEVICE);
+ max_key_buf_size, DMA_TO_DEVICE);
ctx_p->keylen = keylen;
- SSI_LOG_DEBUG("ssi_blkcipher_setkey: return safely");
+ SSI_LOG_DEBUG("return safely");
return 0;
}
@@ -632,17 +616,15 @@ ssi_blkcipher_create_data_desc(
break;
#endif /*SSI_CC_HAS_MULTI2*/
default:
- SSI_LOG_ERR("invalid flow mode, flow_mode = %d \n", flow_mode);
+ SSI_LOG_ERR("invalid flow mode, flow_mode = %d\n", flow_mode);
return;
}
/* Process */
if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)) {
- SSI_LOG_DEBUG(" data params addr 0x%llX length 0x%X \n",
- (unsigned long long)sg_dma_address(src),
- nbytes);
- SSI_LOG_DEBUG(" data params addr 0x%llX length 0x%X \n",
- (unsigned long long)sg_dma_address(dst),
- nbytes);
+ SSI_LOG_DEBUG(" data params addr %pad length 0x%X\n",
+ sg_dma_address(src), nbytes);
+ SSI_LOG_DEBUG(" data params addr %pad length 0x%X\n",
+ sg_dma_address(dst), nbytes);
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
nbytes, NS_BIT);
@@ -655,9 +637,9 @@ ssi_blkcipher_create_data_desc(
(*seq_size)++;
} else {
/* bypass */
- SSI_LOG_DEBUG(" bypass params addr 0x%llX "
+ SSI_LOG_DEBUG(" bypass params addr %pad "
"length 0x%X addr 0x%08X\n",
- (unsigned long long)req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_dma_addr,
req_ctx->mlli_params.mlli_len,
(unsigned int)ctx_p->drvdata->mlli_sram_addr);
hw_desc_init(&desc[*seq_size]);
@@ -706,16 +688,17 @@ ssi_blkcipher_create_data_desc(
}
static int ssi_blkcipher_complete(struct device *dev,
- struct ssi_ablkcipher_ctx *ctx_p,
- struct blkcipher_req_ctx *req_ctx,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int ivsize,
- void *areq,
- void __iomem *cc_base)
+ struct ssi_ablkcipher_ctx *ctx_p,
+ struct blkcipher_req_ctx *req_ctx,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int ivsize,
+ void *areq,
+ void __iomem *cc_base)
{
int completion_error = 0;
u32 inflight_counter;
+ struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
@@ -726,6 +709,22 @@ static int ssi_blkcipher_complete(struct device *dev,
ctx_p->drvdata->inflight_counter--;
if (areq) {
+ /*
+ * The crypto API expects us to set the req->info to the last
+ * ciphertext block. For encrypt, simply copy from the result.
+ * For decrypt, we must copy from a saved buffer since this
+ * could be an in-place decryption operation and the src is
+ * lost by this point.
+ */
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ memcpy(req->info, req_ctx->backup_info, ivsize);
+ kfree(req_ctx->backup_info);
+ } else {
+ scatterwalk_map_and_copy(req->info, req->dst,
+ (req->nbytes - ivsize),
+ ivsize, 0);
+ }
+
ablkcipher_request_complete(areq, completion_error);
return 0;
}
@@ -749,21 +748,22 @@ static int ssi_blkcipher_process(
int rc, seq_len = 0, cts_restore_flag = 0;
SSI_LOG_DEBUG("%s areq=%p info=%p nbytes=%d\n",
- ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
- areq, info, nbytes);
+ ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
+ areq, info, nbytes);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
/* STAT_PHASE_0: Init and sanity checks */
/* TODO: check data length according to mode */
if (unlikely(validate_data_size(ctx_p, nbytes))) {
SSI_LOG_ERR("Unsupported data size %d.\n", nbytes);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
- return -EINVAL;
+ rc = -EINVAL;
+ goto exit_process;
}
if (nbytes == 0) {
/* No data to process is valid */
- return 0;
+ rc = 0;
+ goto exit_process;
}
/*For CTS in case of data size aligned to 16 use CBC mode*/
if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)) {
@@ -804,12 +804,8 @@ static int ssi_blkcipher_process(
ssi_blkcipher_create_setup_desc(tfm, req_ctx, ivsize, nbytes,
desc, &seq_len);
/* Data processing */
- ssi_blkcipher_create_data_desc(tfm,
- req_ctx,
- dst, src,
- nbytes,
- areq,
- desc, &seq_len);
+ ssi_blkcipher_create_data_desc(tfm, req_ctx, dst, src, nbytes, areq,
+ desc, &seq_len);
/* do we need to generate IV? */
if (req_ctx->is_giv) {
@@ -842,6 +838,9 @@ exit_process:
if (cts_restore_flag != 0)
ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
+ if (rc != -EINPROGRESS)
+ kfree(req_ctx->backup_info);
+
return rc;
}
@@ -853,8 +852,6 @@ static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __io
struct ssi_ablkcipher_ctx *ctx_p = crypto_ablkcipher_ctx(tfm);
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
- CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR();
-
ssi_blkcipher_complete(dev, ctx_p, req_ctx, areq->dst, areq->src,
ivsize, areq, cc_base);
}
@@ -871,8 +868,8 @@ static int ssi_ablkcipher_init(struct crypto_tfm *tfm)
}
static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key,
- unsigned int keylen)
+ const u8 *key,
+ unsigned int keylen)
{
return ssi_blkcipher_setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
}
@@ -884,7 +881,6 @@ static int ssi_ablkcipher_encrypt(struct ablkcipher_request *req)
struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
- req_ctx->backup_info = req->info;
req_ctx->is_giv = false;
return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_ENCRYPT);
@@ -897,8 +893,18 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
- req_ctx->backup_info = req->info;
+ /*
+ * Allocate and save the last IV sized bytes of the source, which will
+ * be lost in case of in-place decryption and might be needed for CTS.
+ */
+ req_ctx->backup_info = kmalloc(ivsize, GFP_KERNEL);
+ if (!req_ctx->backup_info)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
+ (req->nbytes - ivsize), ivsize, 0);
req_ctx->is_giv = false;
+
return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT);
}
@@ -1244,7 +1250,7 @@ struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template *templa
struct ssi_crypto_alg *t_alg;
struct crypto_alg *alg;
- t_alg = kzalloc(sizeof(struct ssi_crypto_alg), GFP_KERNEL);
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
if (!t_alg) {
SSI_LOG_ERR("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
@@ -1286,7 +1292,7 @@ int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
if (blkcipher_handle) {
/* Remove registered algs */
list_for_each_entry_safe(t_alg, n,
- &blkcipher_handle->blkcipher_alg_list,
+ &blkcipher_handle->blkcipher_alg_list,
entry) {
crypto_unregister_alg(&t_alg->crypto_alg);
list_del(&t_alg->entry);
@@ -1305,8 +1311,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
int rc = -ENOMEM;
int alg;
- ablkcipher_handle = kmalloc(sizeof(struct ssi_blkcipher_handle),
- GFP_KERNEL);
+ ablkcipher_handle = kmalloc(sizeof(*ablkcipher_handle), GFP_KERNEL);
if (!ablkcipher_handle)
return -ENOMEM;
@@ -1322,7 +1327,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
SSI_LOG_ERR("%s alg allocation failed\n",
- blkcipher_algs[alg].driver_name);
+ blkcipher_algs[alg].driver_name);
goto fail0;
}
t_alg->drvdata = drvdata;
@@ -1330,17 +1335,17 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
SSI_LOG_DEBUG("registering %s\n", blkcipher_algs[alg].driver_name);
rc = crypto_register_alg(&t_alg->crypto_alg);
SSI_LOG_DEBUG("%s alg registration rc = %x\n",
- t_alg->crypto_alg.cra_driver_name, rc);
+ t_alg->crypto_alg.cra_driver_name, rc);
if (unlikely(rc != 0)) {
SSI_LOG_ERR("%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
+ t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
goto fail0;
} else {
list_add_tail(&t_alg->entry,
&ablkcipher_handle->blkcipher_alg_list);
SSI_LOG_DEBUG("Registered %s\n",
- t_alg->crypto_alg.cra_driver_name);
+ t_alg->crypto_alg.cra_driver_name);
}
}
return 0;
diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c
index 78709b92736d..9c6f1200c130 100644
--- a/drivers/staging/ccree/ssi_driver.c
+++ b/drivers/staging/ccree/ssi_driver.c
@@ -71,7 +71,7 @@
#include "ssi_ivgen.h"
#include "ssi_sram_mgr.h"
#include "ssi_pm.h"
-#include "ssi_fips_local.h"
+#include "ssi_fips.h"
#ifdef DX_DUMP_BYTES
void dump_byte_array(const char *name, const u8 *the_array, unsigned long size)
@@ -81,12 +81,11 @@ void dump_byte_array(const char *name, const u8 *the_array, unsigned long size)
char line_buf[80];
if (!the_array) {
- SSI_LOG_ERR("cannot dump_byte_array - NULL pointer\n");
+ SSI_LOG_ERR("cannot dump array - NULL pointer\n");
return;
}
- ret = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ",
- name, size);
+ ret = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ", name, size);
if (ret < 0) {
SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n", ret);
return;
@@ -95,8 +94,8 @@ void dump_byte_array(const char *name, const u8 *the_array, unsigned long size)
for (i = 0, cur_byte = the_array;
(i < size) && (line_offset < sizeof(line_buf)); i++, cur_byte++) {
ret = snprintf(line_buf + line_offset,
- sizeof(line_buf) - line_offset,
- "0x%02X ", *cur_byte);
+ sizeof(line_buf) - line_offset,
+ "0x%02X ", *cur_byte);
if (ret < 0) {
SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n", ret);
return;
@@ -193,11 +192,11 @@ int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
#ifdef DX_IRQ_DELAY
/* Set CC IRQ delay */
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL),
- DX_IRQ_DELAY);
+ DX_IRQ_DELAY);
#endif
if (CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)) > 0) {
SSI_LOG_DEBUG("irq_delay=%d CC cycles\n",
- CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)));
+ CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)));
}
#endif
@@ -224,7 +223,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
struct resource *req_mem_cc_regs = NULL;
void __iomem *cc_base = NULL;
bool irq_registered = false;
- struct ssi_drvdata *new_drvdata = kzalloc(sizeof(struct ssi_drvdata), GFP_KERNEL);
+ struct ssi_drvdata *new_drvdata = kzalloc(sizeof(*new_drvdata),
+ GFP_KERNEL);
struct device *dev = &plat_dev->dev;
struct device_node *np = dev->of_node;
u32 signature_val;
@@ -251,10 +251,10 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = -ENODEV;
goto init_cc_res_err;
}
- SSI_LOG_DEBUG("Got MEM resource (%s): start=0x%llX end=0x%llX\n",
- new_drvdata->res_mem->name,
- (unsigned long long)new_drvdata->res_mem->start,
- (unsigned long long)new_drvdata->res_mem->end);
+ SSI_LOG_DEBUG("Got MEM resource (%s): start=%pad end=%pad\n",
+ new_drvdata->res_mem->name,
+ new_drvdata->res_mem->start,
+ new_drvdata->res_mem->end);
/* Map registers space */
req_mem_cc_regs = request_mem_region(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem), "arm_cc7x_regs");
if (unlikely(!req_mem_cc_regs)) {
@@ -266,7 +266,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
cc_base = ioremap(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem));
if (unlikely(!cc_base)) {
SSI_LOG_ERR("ioremap[CC](0x%08X,0x%08X) failed\n",
- (unsigned int)new_drvdata->res_mem->start, (unsigned int)resource_size(new_drvdata->res_mem));
+ (unsigned int)new_drvdata->res_mem->start,
+ (unsigned int)resource_size(new_drvdata->res_mem));
rc = -ENOMEM;
goto init_cc_res_err;
}
@@ -284,15 +285,15 @@ static int init_cc_resources(struct platform_device *plat_dev)
IRQF_SHARED, "arm_cc7x", new_drvdata);
if (unlikely(rc != 0)) {
SSI_LOG_ERR("Could not register to interrupt %llu\n",
- (unsigned long long)new_drvdata->res_irq->start);
+ (unsigned long long)new_drvdata->res_irq->start);
goto init_cc_res_err;
}
init_completion(&new_drvdata->icache_setup_completion);
irq_registered = true;
SSI_LOG_DEBUG("Registered to IRQ (%s) %llu\n",
- new_drvdata->res_irq->name,
- (unsigned long long)new_drvdata->res_irq->start);
+ new_drvdata->res_irq->name,
+ (unsigned long long)new_drvdata->res_irq->start);
new_drvdata->plat_dev = plat_dev;
@@ -301,19 +302,16 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto init_cc_res_err;
if (!new_drvdata->plat_dev->dev.dma_mask)
- {
new_drvdata->plat_dev->dev.dma_mask = &new_drvdata->plat_dev->dev.coherent_dma_mask;
- }
+
if (!new_drvdata->plat_dev->dev.coherent_dma_mask)
- {
new_drvdata->plat_dev->dev.coherent_dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
- }
/* Verify correct mapping */
signature_val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
if (signature_val != DX_DEV_SIGNATURE) {
SSI_LOG_ERR("Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
- signature_val, (u32)DX_DEV_SIGNATURE);
+ signature_val, (u32)DX_DEV_SIGNATURE);
rc = -EINVAL;
goto init_cc_res_err;
}
@@ -330,7 +328,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
}
#ifdef ENABLE_CC_SYSFS
- rc = ssi_sysfs_init(&(plat_dev->dev.kobj), new_drvdata);
+ rc = ssi_sysfs_init(&plat_dev->dev.kobj, new_drvdata);
if (unlikely(rc != 0)) {
SSI_LOG_ERR("init_stat_db failed\n");
goto init_cc_res_err;
@@ -401,6 +399,12 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto init_cc_res_err;
}
+ /* If we got here and FIPS mode is enabled
+ * it means all FIPS test passed, so let TEE
+ * know we're good.
+ */
+ cc_set_ree_fips_status(new_drvdata, true);
+
return 0;
init_cc_res_err:
@@ -428,7 +432,7 @@ init_cc_res_err:
new_drvdata->cc_base = NULL;
}
release_mem_region(new_drvdata->res_mem->start,
- resource_size(new_drvdata->res_mem));
+ resource_size(new_drvdata->res_mem));
new_drvdata->res_mem = NULL;
}
kfree(new_drvdata);
@@ -471,7 +475,7 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
if (drvdata->cc_base) {
iounmap(drvdata->cc_base);
release_mem_region(drvdata->res_mem->start,
- resource_size(drvdata->res_mem));
+ resource_size(drvdata->res_mem));
drvdata->cc_base = NULL;
drvdata->res_mem = NULL;
}
@@ -516,12 +520,12 @@ static int cc7x_probe(struct platform_device *plat_dev)
asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
cacheline_size = 4 << ((ctr >> 16) & 0xf);
SSI_LOG_DEBUG("CP15(L1_CACHE_BYTES) = %u , Kconfig(L1_CACHE_BYTES) = %u\n",
- cacheline_size, L1_CACHE_BYTES);
+ cacheline_size, L1_CACHE_BYTES);
asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (ctr));
- SSI_LOG_DEBUG("Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X,"
- " Part 0x%03X, Rev r%dp%d\n",
- (ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF, (ctr >> 20) & 0xF, ctr & 0xF);
+ SSI_LOG_DEBUG("Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X, Part 0x%03X, Rev r%dp%d\n",
+ (ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF,
+ (ctr >> 20) & 0xF, ctr & 0xF);
#endif
/* Map registers space */
@@ -546,7 +550,7 @@ static int cc7x_remove(struct platform_device *plat_dev)
}
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-static struct dev_pm_ops arm_cc7x_driver_pm = {
+static const struct dev_pm_ops arm_cc7x_driver_pm = {
SET_RUNTIME_PM_OPS(ssi_power_mgr_runtime_suspend, ssi_power_mgr_runtime_resume, NULL)
};
#endif
diff --git a/drivers/staging/ccree/ssi_driver.h b/drivers/staging/ccree/ssi_driver.h
index c1ed61f1a202..b6ad89ae9bee 100644
--- a/drivers/staging/ccree/ssi_driver.h
+++ b/drivers/staging/ccree/ssi_driver.h
@@ -48,7 +48,6 @@
#include "cc_crypto_ctx.h"
#include "ssi_sysfs.h"
#include "hash_defs.h"
-#include "ssi_fips_local.h"
#include "cc_hw_queue_defs.h"
#include "ssi_sram_mgr.h"
diff --git a/drivers/staging/ccree/ssi_fips.c b/drivers/staging/ccree/ssi_fips.c
index fdc40f38332a..33d53d64603d 100644
--- a/drivers/staging/ccree/ssi_fips.c
+++ b/drivers/staging/ccree/ssi_fips.c
@@ -14,48 +14,115 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-/**************************************************************
- * This file defines the driver FIPS APIs *
- **************************************************************/
+#include <linux/kernel.h>
+#include <linux/fips.h>
-#include <linux/module.h>
+#include "ssi_config.h"
+#include "ssi_driver.h"
+#include "cc_hal.h"
#include "ssi_fips.h"
-extern int ssi_fips_ext_get_state(enum cc_fips_state_t *p_state);
-extern int ssi_fips_ext_get_error(enum cc_fips_error *p_err);
+static void fips_dsr(unsigned long devarg);
+
+struct ssi_fips_handle {
+ struct tasklet_struct tasklet;
+};
+
+/* The function called once at driver entry point to check
+ * whether TEE FIPS error occurred.
+ */
+static bool cc_get_tee_fips_status(struct ssi_drvdata *drvdata)
+{
+ u32 reg;
+ void __iomem *cc_base = drvdata->cc_base;
+
+ reg = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
+ return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
+}
/*
- * This function returns the REE FIPS state.
- * It should be called by kernel module.
+ * This function should push the FIPS REE library status towards the TEE library
+ * by writing the error state to HOST_GPR0 register.
*/
-int ssi_fips_get_state(enum cc_fips_state_t *p_state)
+void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool status)
+{
+ void __iomem *cc_base = drvdata->cc_base;
+ int val = CC_FIPS_SYNC_REE_STATUS;
+
+ val |= (status ? CC_FIPS_SYNC_MODULE_OK : CC_FIPS_SYNC_MODULE_ERROR);
+
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), val);
+}
+
+void ssi_fips_fini(struct ssi_drvdata *drvdata)
{
- int rc = 0;
+ struct ssi_fips_handle *fips_h = drvdata->fips_handle;
- if (!p_state)
- return -EINVAL;
+ if (!fips_h)
+ return; /* Not allocated */
- rc = ssi_fips_ext_get_state(p_state);
+ /* Kill tasklet */
+ tasklet_kill(&fips_h->tasklet);
- return rc;
+ kfree(fips_h);
+ drvdata->fips_handle = NULL;
}
-EXPORT_SYMBOL(ssi_fips_get_state);
+void fips_handler(struct ssi_drvdata *drvdata)
+{
+ struct ssi_fips_handle *fips_handle_ptr =
+ drvdata->fips_handle;
-/*
- * This function returns the REE FIPS error.
- * It should be called by kernel module.
- */
-int ssi_fips_get_error(enum cc_fips_error *p_err)
+ tasklet_schedule(&fips_handle_ptr->tasklet);
+}
+
+static inline void tee_fips_error(void)
{
- int rc = 0;
+ if (fips_enabled)
+ panic("ccree: TEE reported cryptographic error in fips mode!\n");
+ else
+ SSI_LOG_ERR("TEE reported error!\n");
+}
- if (!p_err)
- return -EINVAL;
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void fips_dsr(unsigned long devarg)
+{
+ struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
+ void __iomem *cc_base = drvdata->cc_base;
+ u32 irq, state, val;
- rc = ssi_fips_ext_get_error(p_err);
+ irq = (drvdata->irq & (SSI_GPR0_IRQ_MASK));
- return rc;
+ if (irq) {
+ state = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
+
+ if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
+ tee_fips_error();
+ }
+
+ /* after verifing that there is nothing to do,
+ * unmask AXI completion interrupt.
+ */
+ val = (CC_REG_OFFSET(HOST_RGF, HOST_IMR) & ~irq);
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), val);
}
-EXPORT_SYMBOL(ssi_fips_get_error);
+/* The function called once at driver entry point .*/
+int ssi_fips_init(struct ssi_drvdata *p_drvdata)
+{
+ struct ssi_fips_handle *fips_h;
+
+ fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
+ if (!fips_h)
+ return -ENOMEM;
+
+ p_drvdata->fips_handle = fips_h;
+
+ SSI_LOG_DEBUG("Initializing fips tasklet\n");
+ tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
+
+ if (!cc_get_tee_fips_status(p_drvdata))
+ tee_fips_error();
+
+ return 0;
+}
diff --git a/drivers/staging/ccree/ssi_fips.h b/drivers/staging/ccree/ssi_fips.h
index 4f5c6a9a8363..369ddf9478e7 100644
--- a/drivers/staging/ccree/ssi_fips.h
+++ b/drivers/staging/ccree/ssi_fips.h
@@ -17,45 +17,33 @@
#ifndef __SSI_FIPS_H__
#define __SSI_FIPS_H__
-/*!
- * @file
- * @brief This file contains FIPS related defintions and APIs.
- */
+#ifdef CONFIG_CRYPTO_FIPS
-enum cc_fips_state {
- CC_FIPS_STATE_NOT_SUPPORTED = 0,
- CC_FIPS_STATE_SUPPORTED,
- CC_FIPS_STATE_ERROR,
- CC_FIPS_STATE_RESERVE32B = S32_MAX
+enum cc_fips_status {
+ CC_FIPS_SYNC_MODULE_OK = 0x0,
+ CC_FIPS_SYNC_MODULE_ERROR = 0x1,
+ CC_FIPS_SYNC_REE_STATUS = 0x4,
+ CC_FIPS_SYNC_TEE_STATUS = 0x8,
+ CC_FIPS_SYNC_STATUS_RESERVE32B = S32_MAX
};
-enum cc_fips_error {
- CC_REE_FIPS_ERROR_OK = 0,
- CC_REE_FIPS_ERROR_GENERAL,
- CC_REE_FIPS_ERROR_FROM_TEE,
- CC_REE_FIPS_ERROR_AES_ECB_PUT,
- CC_REE_FIPS_ERROR_AES_CBC_PUT,
- CC_REE_FIPS_ERROR_AES_OFB_PUT,
- CC_REE_FIPS_ERROR_AES_CTR_PUT,
- CC_REE_FIPS_ERROR_AES_CBC_CTS_PUT,
- CC_REE_FIPS_ERROR_AES_XTS_PUT,
- CC_REE_FIPS_ERROR_AES_CMAC_PUT,
- CC_REE_FIPS_ERROR_AESCCM_PUT,
- CC_REE_FIPS_ERROR_AESGCM_PUT,
- CC_REE_FIPS_ERROR_DES_ECB_PUT,
- CC_REE_FIPS_ERROR_DES_CBC_PUT,
- CC_REE_FIPS_ERROR_SHA1_PUT,
- CC_REE_FIPS_ERROR_SHA256_PUT,
- CC_REE_FIPS_ERROR_SHA512_PUT,
- CC_REE_FIPS_ERROR_HMAC_SHA1_PUT,
- CC_REE_FIPS_ERROR_HMAC_SHA256_PUT,
- CC_REE_FIPS_ERROR_HMAC_SHA512_PUT,
- CC_REE_FIPS_ERROR_ROM_CHECKSUM,
- CC_REE_FIPS_ERROR_RESERVE32B = S32_MAX
-};
+int ssi_fips_init(struct ssi_drvdata *p_drvdata);
+void ssi_fips_fini(struct ssi_drvdata *drvdata);
+void fips_handler(struct ssi_drvdata *drvdata);
+void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok);
+
+#else /* CONFIG_CRYPTO_FIPS */
+
+static inline int ssi_fips_init(struct ssi_drvdata *p_drvdata)
+{
+ return 0;
+}
+
+static inline void ssi_fips_fini(struct ssi_drvdata *drvdata) {}
+void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok) {}
+void fips_handler(struct ssi_drvdata *drvdata) {}
-int ssi_fips_get_state(enum cc_fips_state *p_state);
-int ssi_fips_get_error(enum cc_fips_error *p_err);
+#endif /* CONFIG_CRYPTO_FIPS */
#endif /*__SSI_FIPS_H__*/
diff --git a/drivers/staging/ccree/ssi_fips_data.h b/drivers/staging/ccree/ssi_fips_data.h
deleted file mode 100644
index c41671dbee40..000000000000
--- a/drivers/staging/ccree/ssi_fips_data.h
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/*
- * The test vectors were taken from:
- *
- * * AES
- * NIST Special Publication 800-38A 2001 Edition
- * Recommendation for Block Cipher Modes of Operation
- * http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
- * Appendix F: Example Vectors for Modes of Operation of the AES
- *
- * * AES CTS
- * Advanced Encryption Standard (AES) Encryption for Kerberos 5
- * February 2005
- * https://tools.ietf.org/html/rfc3962#appendix-B
- * B. Sample Test Vectors
- *
- * * AES XTS
- * http://csrc.nist.gov/groups/STM/cavp/#08
- * http://csrc.nist.gov/groups/STM/cavp/documents/aes/XTSTestVectors.zip
- *
- * * AES CMAC
- * http://csrc.nist.gov/groups/STM/cavp/index.html#07
- * http://csrc.nist.gov/groups/STM/cavp/documents/mac/cmactestvectors.zip
- *
- * * AES-CCM
- * http://csrc.nist.gov/groups/STM/cavp/#07
- * http://csrc.nist.gov/groups/STM/cavp/documents/mac/ccmtestvectors.zip
- *
- * * AES-GCM
- * http://csrc.nist.gov/groups/STM/cavp/documents/mac/gcmtestvectors.zip
- *
- * * Triple-DES
- * NIST Special Publication 800-67 January 2012
- * Recommendation for the Triple Data Encryption Algorithm (TDEA) Block Cipher
- * http://csrc.nist.gov/publications/nistpubs/800-67-Rev1/SP-800-67-Rev1.pdf
- * APPENDIX B: EXAMPLE OF TDEA FORWARD AND INVERSE CIPHER OPERATIONS
- * and
- * http://csrc.nist.gov/groups/STM/cavp/#01
- * http://csrc.nist.gov/groups/STM/cavp/documents/des/tdesmct_intermediate.zip
- *
- * * HASH
- * http://csrc.nist.gov/groups/STM/cavp/#03
- * http://csrc.nist.gov/groups/STM/cavp/documents/shs/shabytetestvectors.zip
- *
- * * HMAC
- * http://csrc.nist.gov/groups/STM/cavp/#07
- * http://csrc.nist.gov/groups/STM/cavp/documents/mac/hmactestvectors.zip
- */
-
-/* NIST AES */
-#define AES_128_BIT_KEY_SIZE 16
-#define AES_192_BIT_KEY_SIZE 24
-#define AES_256_BIT_KEY_SIZE 32
-#define AES_512_BIT_KEY_SIZE 64
-
-#define NIST_AES_IV_SIZE 16
-
-#define NIST_AES_128_KEY { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c }
-#define NIST_AES_192_KEY { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52, 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5, \
- 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b }
-#define NIST_AES_256_KEY { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe, 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81, \
- 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7, 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4 }
-#define NIST_AES_VECTOR_SIZE 16
-#define NIST_AES_PLAIN_DATA { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a }
-
-#define NIST_AES_ECB_IV { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
-#define NIST_AES_128_ECB_CIPHER { 0x3a, 0xd7, 0x7b, 0xb4, 0x0d, 0x7a, 0x36, 0x60, 0xa8, 0x9e, 0xca, 0xf3, 0x24, 0x66, 0xef, 0x97 }
-#define NIST_AES_192_ECB_CIPHER { 0xbd, 0x33, 0x4f, 0x1d, 0x6e, 0x45, 0xf2, 0x5f, 0xf7, 0x12, 0xa2, 0x14, 0x57, 0x1f, 0xa5, 0xcc }
-#define NIST_AES_256_ECB_CIPHER { 0xf3, 0xee, 0xd1, 0xbd, 0xb5, 0xd2, 0xa0, 0x3c, 0x06, 0x4b, 0x5a, 0x7e, 0x3d, 0xb1, 0x81, 0xf8 }
-
-#define NIST_AES_CBC_IV { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }
-#define NIST_AES_128_CBC_CIPHER { 0x76, 0x49, 0xab, 0xac, 0x81, 0x19, 0xb2, 0x46, 0xce, 0xe9, 0x8e, 0x9b, 0x12, 0xe9, 0x19, 0x7d }
-#define NIST_AES_192_CBC_CIPHER { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d, 0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8 }
-#define NIST_AES_256_CBC_CIPHER { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba, 0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6 }
-
-#define NIST_AES_OFB_IV { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }
-#define NIST_AES_128_OFB_CIPHER { 0x3b, 0x3f, 0xd9, 0x2e, 0xb7, 0x2d, 0xad, 0x20, 0x33, 0x34, 0x49, 0xf8, 0xe8, 0x3c, 0xfb, 0x4a }
-#define NIST_AES_192_OFB_CIPHER { 0xcd, 0xc8, 0x0d, 0x6f, 0xdd, 0xf1, 0x8c, 0xab, 0x34, 0xc2, 0x59, 0x09, 0xc9, 0x9a, 0x41, 0x74 }
-#define NIST_AES_256_OFB_CIPHER { 0xdc, 0x7e, 0x84, 0xbf, 0xda, 0x79, 0x16, 0x4b, 0x7e, 0xcd, 0x84, 0x86, 0x98, 0x5d, 0x38, 0x60 }
-
-#define NIST_AES_CTR_IV { 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff }
-#define NIST_AES_128_CTR_CIPHER { 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26, 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce }
-#define NIST_AES_192_CTR_CIPHER { 0x1a, 0xbc, 0x93, 0x24, 0x17, 0x52, 0x1c, 0xa2, 0x4f, 0x2b, 0x04, 0x59, 0xfe, 0x7e, 0x6e, 0x0b }
-#define NIST_AES_256_CTR_CIPHER { 0x60, 0x1e, 0xc3, 0x13, 0x77, 0x57, 0x89, 0xa5, 0xb7, 0xa7, 0xf5, 0x04, 0xbb, 0xf3, 0xd2, 0x28 }
-
-#define RFC3962_AES_128_KEY { 0x63, 0x68, 0x69, 0x63, 0x6b, 0x65, 0x6e, 0x20, 0x74, 0x65, 0x72, 0x69, 0x79, 0x61, 0x6b, 0x69 }
-#define RFC3962_AES_VECTOR_SIZE 17
-#define RFC3962_AES_PLAIN_DATA { 0x49, 0x20, 0x77, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6c, 0x69, 0x6b, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20 }
-#define RFC3962_AES_CBC_CTS_IV { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
-#define RFC3962_AES_128_CBC_CTS_CIPHER { 0xc6, 0x35, 0x35, 0x68, 0xf2, 0xbf, 0x8c, 0xb4, 0xd8, 0xa5, 0x80, 0x36, 0x2d, 0xa7, 0xff, 0x7f, 0x97 }
-
-#define NIST_AES_256_XTS_KEY { 0xa1, 0xb9, 0x0c, 0xba, 0x3f, 0x06, 0xac, 0x35, 0x3b, 0x2c, 0x34, 0x38, 0x76, 0x08, 0x17, 0x62, \
- 0x09, 0x09, 0x23, 0x02, 0x6e, 0x91, 0x77, 0x18, 0x15, 0xf2, 0x9d, 0xab, 0x01, 0x93, 0x2f, 0x2f }
-#define NIST_AES_256_XTS_IV { 0x4f, 0xae, 0xf7, 0x11, 0x7c, 0xda, 0x59, 0xc6, 0x6e, 0x4b, 0x92, 0x01, 0x3e, 0x76, 0x8a, 0xd5 }
-#define NIST_AES_256_XTS_VECTOR_SIZE 16
-#define NIST_AES_256_XTS_PLAIN { 0xeb, 0xab, 0xce, 0x95, 0xb1, 0x4d, 0x3c, 0x8d, 0x6f, 0xb3, 0x50, 0x39, 0x07, 0x90, 0x31, 0x1c }
-#define NIST_AES_256_XTS_CIPHER { 0x77, 0x8a, 0xe8, 0xb4, 0x3c, 0xb9, 0x8d, 0x5a, 0x82, 0x50, 0x81, 0xd5, 0xbe, 0x47, 0x1c, 0x63 }
-
-#define NIST_AES_512_XTS_KEY { 0x1e, 0xa6, 0x61, 0xc5, 0x8d, 0x94, 0x3a, 0x0e, 0x48, 0x01, 0xe4, 0x2f, 0x4b, 0x09, 0x47, 0x14, \
- 0x9e, 0x7f, 0x9f, 0x8e, 0x3e, 0x68, 0xd0, 0xc7, 0x50, 0x52, 0x10, 0xbd, 0x31, 0x1a, 0x0e, 0x7c, \
- 0xd6, 0xe1, 0x3f, 0xfd, 0xf2, 0x41, 0x8d, 0x8d, 0x19, 0x11, 0xc0, 0x04, 0xcd, 0xa5, 0x8d, 0xa3, \
- 0xd6, 0x19, 0xb7, 0xe2, 0xb9, 0x14, 0x1e, 0x58, 0x31, 0x8e, 0xea, 0x39, 0x2c, 0xf4, 0x1b, 0x08 }
-#define NIST_AES_512_XTS_IV { 0xad, 0xf8, 0xd9, 0x26, 0x27, 0x46, 0x4a, 0xd2, 0xf0, 0x42, 0x8e, 0x84, 0xa9, 0xf8, 0x75, 0x64, }
-#define NIST_AES_512_XTS_VECTOR_SIZE 32
-#define NIST_AES_512_XTS_PLAIN { 0x2e, 0xed, 0xea, 0x52, 0xcd, 0x82, 0x15, 0xe1, 0xac, 0xc6, 0x47, 0xe8, 0x10, 0xbb, 0xc3, 0x64, \
- 0x2e, 0x87, 0x28, 0x7f, 0x8d, 0x2e, 0x57, 0xe3, 0x6c, 0x0a, 0x24, 0xfb, 0xc1, 0x2a, 0x20, 0x2e }
-#define NIST_AES_512_XTS_CIPHER { 0xcb, 0xaa, 0xd0, 0xe2, 0xf6, 0xce, 0xa3, 0xf5, 0x0b, 0x37, 0xf9, 0x34, 0xd4, 0x6a, 0x9b, 0x13, \
- 0x0b, 0x9d, 0x54, 0xf0, 0x7e, 0x34, 0xf3, 0x6a, 0xf7, 0x93, 0xe8, 0x6f, 0x73, 0xc6, 0xd7, 0xdb }
-
-/* NIST AES-CMAC */
-#define NIST_AES_128_CMAC_KEY { 0x67, 0x08, 0xc9, 0x88, 0x7b, 0x84, 0x70, 0x84, 0xf1, 0x23, 0xd3, 0xdd, 0x9c, 0x3a, 0x81, 0x36 }
-#define NIST_AES_128_CMAC_PLAIN_DATA { 0xa8, 0xde, 0x55, 0x17, 0x0c, 0x6d, 0xc0, 0xd8, 0x0d, 0xe3, 0x2f, 0x50, 0x8b, 0xf4, 0x9b, 0x70 }
-#define NIST_AES_128_CMAC_MAC { 0xcf, 0xef, 0x9b, 0x78, 0x39, 0x84, 0x1f, 0xdb, 0xcc, 0xbb, 0x6c, 0x2c, 0xf2, 0x38, 0xf7 }
-#define NIST_AES_128_CMAC_VECTOR_SIZE 16
-#define NIST_AES_128_CMAC_OUTPUT_SIZE 15
-
-#define NIST_AES_192_CMAC_KEY { 0x20, 0x51, 0xaf, 0x34, 0x76, 0x2e, 0xbe, 0x55, 0x6f, 0x72, 0xa5, 0xc6, 0xed, 0xc7, 0x77, 0x1e, \
- 0xb9, 0x24, 0x5f, 0xad, 0x76, 0xf0, 0x34, 0xbe }
-#define NIST_AES_192_CMAC_PLAIN_DATA { 0xae, 0x8e, 0x93, 0xc9, 0xc9, 0x91, 0xcf, 0x89, 0x6a, 0x49, 0x1a, 0x89, 0x07, 0xdf, 0x4e, 0x4b, \
- 0xe5, 0x18, 0x6a, 0xe4, 0x96, 0xcd, 0x34, 0x0d, 0xc1, 0x9b, 0x23, 0x78, 0x21, 0xdb, 0x7b, 0x60 }
-#define NIST_AES_192_CMAC_MAC { 0x74, 0xf7, 0x46, 0x08, 0xc0, 0x4f, 0x0f, 0x4e, 0x47, 0xfa, 0x64, 0x04, 0x33, 0xb6, 0xe6, 0xfb }
-#define NIST_AES_192_CMAC_VECTOR_SIZE 32
-#define NIST_AES_192_CMAC_OUTPUT_SIZE 16
-
-#define NIST_AES_256_CMAC_KEY { 0x3a, 0x75, 0xa9, 0xd2, 0xbd, 0xb8, 0xc8, 0x04, 0xba, 0x4a, 0xb4, 0x98, 0x35, 0x73, 0xa6, 0xb2, \
- 0x53, 0x16, 0x0d, 0xd9, 0x0f, 0x8e, 0xdd, 0xfb, 0x2f, 0xdc, 0x2a, 0xb1, 0x76, 0x04, 0xf5, 0xc5 }
-#define NIST_AES_256_CMAC_PLAIN_DATA { 0x42, 0xf3, 0x5d, 0x5a, 0xa5, 0x33, 0xa7, 0xa0, 0xa5, 0xf7, 0x4e, 0x14, 0x4f, 0x2a, 0x5f, 0x20 }
-#define NIST_AES_256_CMAC_MAC { 0xf1, 0x53, 0x2f, 0x87, 0x32, 0xd9, 0xf5, 0x90, 0x30, 0x07 }
-#define NIST_AES_256_CMAC_VECTOR_SIZE 16
-#define NIST_AES_256_CMAC_OUTPUT_SIZE 10
-
-/* NIST TDES */
-#define TDES_NUM_OF_KEYS 3
-#define NIST_TDES_VECTOR_SIZE 8
-#define NIST_TDES_IV_SIZE 8
-
-#define NIST_TDES_ECB_IV { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
-
-#define NIST_TDES_ECB3_KEY { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, \
- 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, \
- 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23 }
-#define NIST_TDES_ECB3_PLAIN_DATA { 0x54, 0x68, 0x65, 0x20, 0x71, 0x75, 0x66, 0x63 }
-#define NIST_TDES_ECB3_CIPHER { 0xa8, 0x26, 0xfd, 0x8c, 0xe5, 0x3b, 0x85, 0x5f }
-
-#define NIST_TDES_CBC3_IV { 0xf8, 0xee, 0xe1, 0x35, 0x9c, 0x6e, 0x54, 0x40 }
-#define NIST_TDES_CBC3_KEY { 0xe9, 0xda, 0x37, 0xf8, 0xdc, 0x97, 0x6d, 0x5b, \
- 0xb6, 0x8c, 0x04, 0xe3, 0xec, 0x98, 0x20, 0x15, \
- 0xf4, 0x0e, 0x08, 0xb5, 0x97, 0x29, 0xf2, 0x8f }
-#define NIST_TDES_CBC3_PLAIN_DATA { 0x3b, 0xb7, 0xa7, 0xdb, 0xa3, 0xd5, 0x92, 0x91 }
-#define NIST_TDES_CBC3_CIPHER { 0x5b, 0x84, 0x24, 0xd2, 0x39, 0x3e, 0x55, 0xa2 }
-
-/* NIST AES-CCM */
-#define NIST_AESCCM_128_BIT_KEY_SIZE 16
-#define NIST_AESCCM_192_BIT_KEY_SIZE 24
-#define NIST_AESCCM_256_BIT_KEY_SIZE 32
-
-#define NIST_AESCCM_B0_VAL 0x79 /* L'[0:2]=1 , M'[3-5]=7 , Adata[6]=1, reserved[7]=0 */
-#define NIST_AESCCM_NONCE_SIZE 13
-#define NIST_AESCCM_IV_SIZE 16
-#define NIST_AESCCM_ADATA_SIZE 32
-#define NIST_AESCCM_TEXT_SIZE 16
-#define NIST_AESCCM_TAG_SIZE 16
-
-#define NIST_AESCCM_128_KEY { 0x70, 0x01, 0x0e, 0xd9, 0x0e, 0x61, 0x86, 0xec, 0xad, 0x41, 0xf0, 0xd3, 0xc7, 0xc4, 0x2f, 0xf8 }
-#define NIST_AESCCM_128_NONCE { 0xa5, 0xf4, 0xf4, 0x98, 0x6e, 0x98, 0x47, 0x29, 0x65, 0xf5, 0xab, 0xcc, 0x4b }
-#define NIST_AESCCM_128_ADATA { 0x3f, 0xec, 0x0e, 0x5c, 0xc2, 0x4d, 0x67, 0x13, 0x94, 0x37, 0xcb, 0xc8, 0x11, 0x24, 0x14, 0xfc, \
- 0x8d, 0xac, 0xcd, 0x1a, 0x94, 0xb4, 0x9a, 0x4c, 0x76, 0xe2, 0xd3, 0x93, 0x03, 0x54, 0x73, 0x17 }
-#define NIST_AESCCM_128_PLAIN_TEXT { 0xbe, 0x32, 0x2f, 0x58, 0xef, 0xa7, 0xf8, 0xc6, 0x8a, 0x63, 0x5e, 0x0b, 0x9c, 0xce, 0x77, 0xf2 }
-#define NIST_AESCCM_128_CIPHER { 0x8e, 0x44, 0x25, 0xae, 0x57, 0x39, 0x74, 0xf0, 0xf0, 0x69, 0x3a, 0x18, 0x8b, 0x52, 0x58, 0x12 }
-#define NIST_AESCCM_128_MAC { 0xee, 0xf0, 0x8e, 0x3f, 0xb1, 0x5f, 0x42, 0x27, 0xe0, 0xd9, 0x89, 0xa4, 0xd5, 0x87, 0xa8, 0xcf }
-
-#define NIST_AESCCM_192_KEY { 0x68, 0x73, 0xf1, 0xc6, 0xc3, 0x09, 0x75, 0xaf, 0xf6, 0xf0, 0x84, 0x70, 0x26, 0x43, 0x21, 0x13, \
- 0x0a, 0x6e, 0x59, 0x84, 0xad, 0xe3, 0x24, 0xe9 }
-#define NIST_AESCCM_192_NONCE { 0x7c, 0x4d, 0x2f, 0x7c, 0xec, 0x04, 0x36, 0x1f, 0x18, 0x7f, 0x07, 0x26, 0xd5 }
-#define NIST_AESCCM_192_ADATA { 0x77, 0x74, 0x3b, 0x5d, 0x83, 0xa0, 0x0d, 0x2c, 0x8d, 0x5f, 0x7e, 0x10, 0x78, 0x15, 0x31, 0xb4, \
- 0x96, 0xe0, 0x9f, 0x3b, 0xc9, 0x29, 0x5d, 0x7a, 0xe9, 0x79, 0x9e, 0x64, 0x66, 0x8e, 0xf8, 0xc5 }
-#define NIST_AESCCM_192_PLAIN_TEXT { 0x50, 0x51, 0xa0, 0xb0, 0xb6, 0x76, 0x6c, 0xd6, 0xea, 0x29, 0xa6, 0x72, 0x76, 0x9d, 0x40, 0xfe }
-#define NIST_AESCCM_192_CIPHER { 0x0c, 0xe5, 0xac, 0x8d, 0x6b, 0x25, 0x6f, 0xb7, 0x58, 0x0b, 0xf6, 0xac, 0xc7, 0x64, 0x26, 0xaf }
-#define NIST_AESCCM_192_MAC { 0x40, 0xbc, 0xe5, 0x8f, 0xd4, 0xcd, 0x65, 0x48, 0xdf, 0x90, 0xa0, 0x33, 0x7c, 0x84, 0x20, 0x04 }
-
-#define NIST_AESCCM_256_KEY { 0xee, 0x8c, 0xe1, 0x87, 0x16, 0x97, 0x79, 0xd1, 0x3e, 0x44, 0x3d, 0x64, 0x28, 0xe3, 0x8b, 0x38, \
- 0xb5, 0x5d, 0xfb, 0x90, 0xf0, 0x22, 0x8a, 0x8a, 0x4e, 0x62, 0xf8, 0xf5, 0x35, 0x80, 0x6e, 0x62 }
-#define NIST_AESCCM_256_NONCE { 0x12, 0x16, 0x42, 0xc4, 0x21, 0x8b, 0x39, 0x1c, 0x98, 0xe6, 0x26, 0x9c, 0x8a }
-#define NIST_AESCCM_256_ADATA { 0x71, 0x8d, 0x13, 0xe4, 0x75, 0x22, 0xac, 0x4c, 0xdf, 0x3f, 0x82, 0x80, 0x63, 0x98, 0x0b, 0x6d, \
- 0x45, 0x2f, 0xcd, 0xcd, 0x6e, 0x1a, 0x19, 0x04, 0xbf, 0x87, 0xf5, 0x48, 0xa5, 0xfd, 0x5a, 0x05 }
-#define NIST_AESCCM_256_PLAIN_TEXT { 0xd1, 0x5f, 0x98, 0xf2, 0xc6, 0xd6, 0x70, 0xf5, 0x5c, 0x78, 0xa0, 0x66, 0x48, 0x33, 0x2b, 0xc9 }
-#define NIST_AESCCM_256_CIPHER { 0xcc, 0x17, 0xbf, 0x87, 0x94, 0xc8, 0x43, 0x45, 0x7d, 0x89, 0x93, 0x91, 0x89, 0x8e, 0xd2, 0x2a }
-#define NIST_AESCCM_256_MAC { 0x6f, 0x9d, 0x28, 0xfc, 0xb6, 0x42, 0x34, 0xe1, 0xcd, 0x79, 0x3c, 0x41, 0x44, 0xf1, 0xda, 0x50 }
-
-/* NIST AES-GCM */
-#define NIST_AESGCM_128_BIT_KEY_SIZE 16
-#define NIST_AESGCM_192_BIT_KEY_SIZE 24
-#define NIST_AESGCM_256_BIT_KEY_SIZE 32
-
-#define NIST_AESGCM_IV_SIZE 12
-#define NIST_AESGCM_ADATA_SIZE 16
-#define NIST_AESGCM_TEXT_SIZE 16
-#define NIST_AESGCM_TAG_SIZE 16
-
-#define NIST_AESGCM_128_KEY { 0x81, 0x6e, 0x39, 0x07, 0x04, 0x10, 0xcf, 0x21, 0x84, 0x90, 0x4d, 0xa0, 0x3e, 0xa5, 0x07, 0x5a }
-#define NIST_AESGCM_128_IV { 0x32, 0xc3, 0x67, 0xa3, 0x36, 0x26, 0x13, 0xb2, 0x7f, 0xc3, 0xe6, 0x7e }
-#define NIST_AESGCM_128_ADATA { 0xf2, 0xa3, 0x07, 0x28, 0xed, 0x87, 0x4e, 0xe0, 0x29, 0x83, 0xc2, 0x94, 0x43, 0x5d, 0x3c, 0x16 }
-#define NIST_AESGCM_128_PLAIN_TEXT { 0xec, 0xaf, 0xe9, 0x6c, 0x67, 0xa1, 0x64, 0x67, 0x44, 0xf1, 0xc8, 0x91, 0xf5, 0xe6, 0x94, 0x27 }
-#define NIST_AESGCM_128_CIPHER { 0x55, 0x2e, 0xbe, 0x01, 0x2e, 0x7b, 0xcf, 0x90, 0xfc, 0xef, 0x71, 0x2f, 0x83, 0x44, 0xe8, 0xf1 }
-#define NIST_AESGCM_128_MAC { 0xec, 0xaa, 0xe9, 0xfc, 0x68, 0x27, 0x6a, 0x45, 0xab, 0x0c, 0xa3, 0xcb, 0x9d, 0xd9, 0x53, 0x9f }
-
-#define NIST_AESGCM_192_KEY { 0x0c, 0x44, 0xd6, 0xc9, 0x28, 0xee, 0x11, 0x2c, 0xe6, 0x65, 0xfe, 0x54, 0x7e, 0xbd, 0x38, 0x72, \
- 0x98, 0xa9, 0x54, 0xb4, 0x62, 0xf6, 0x95, 0xd8 }
-#define NIST_AESGCM_192_IV { 0x18, 0xb8, 0xf3, 0x20, 0xfe, 0xf4, 0xae, 0x8c, 0xcb, 0xe8, 0xf9, 0x52 }
-#define NIST_AESGCM_192_ADATA { 0x73, 0x41, 0xd4, 0x3f, 0x98, 0xcf, 0x38, 0x82, 0x21, 0x18, 0x09, 0x41, 0x97, 0x03, 0x76, 0xe8 }
-#define NIST_AESGCM_192_PLAIN_TEXT { 0x96, 0xad, 0x07, 0xf9, 0xb6, 0x28, 0xb6, 0x52, 0xcf, 0x86, 0xcb, 0x73, 0x17, 0x88, 0x6f, 0x51 }
-#define NIST_AESGCM_192_CIPHER { 0xa6, 0x64, 0x07, 0x81, 0x33, 0x40, 0x5e, 0xb9, 0x09, 0x4d, 0x36, 0xf7, 0xe0, 0x70, 0x19, 0x1f }
-#define NIST_AESGCM_192_MAC { 0xe8, 0xf9, 0xc3, 0x17, 0x84, 0x7c, 0xe3, 0xf3, 0xc2, 0x39, 0x94, 0xa4, 0x02, 0xf0, 0x65, 0x81 }
-
-#define NIST_AESGCM_256_KEY { 0x54, 0xe3, 0x52, 0xea, 0x1d, 0x84, 0xbf, 0xe6, 0x4a, 0x10, 0x11, 0x09, 0x61, 0x11, 0xfb, 0xe7, \
- 0x66, 0x8a, 0xd2, 0x20, 0x3d, 0x90, 0x2a, 0x01, 0x45, 0x8c, 0x3b, 0xbd, 0x85, 0xbf, 0xce, 0x14 }
-#define NIST_AESGCM_256_IV { 0xdf, 0x7c, 0x3b, 0xca, 0x00, 0x39, 0x6d, 0x0c, 0x01, 0x84, 0x95, 0xd9 }
-#define NIST_AESGCM_256_ADATA { 0x7e, 0x96, 0x8d, 0x71, 0xb5, 0x0c, 0x1f, 0x11, 0xfd, 0x00, 0x1f, 0x3f, 0xef, 0x49, 0xd0, 0x45 }
-#define NIST_AESGCM_256_PLAIN_TEXT { 0x85, 0xfc, 0x3d, 0xfa, 0xd9, 0xb5, 0xa8, 0xd3, 0x25, 0x8e, 0x4f, 0xc4, 0x45, 0x71, 0xbd, 0x3b }
-#define NIST_AESGCM_256_CIPHER { 0x42, 0x6e, 0x0e, 0xfc, 0x69, 0x3b, 0x7b, 0xe1, 0xf3, 0x01, 0x8d, 0xb7, 0xdd, 0xbb, 0x7e, 0x4d }
-#define NIST_AESGCM_256_MAC { 0xee, 0x82, 0x57, 0x79, 0x5b, 0xe6, 0xa1, 0x16, 0x4d, 0x7e, 0x1d, 0x2d, 0x6c, 0xac, 0x77, 0xa7 }
-
-/* NIST HASH */
-#define NIST_SHA_MSG_SIZE 16
-
-#define NIST_SHA_1_MSG { 0x35, 0x52, 0x69, 0x4c, 0xdf, 0x66, 0x3f, 0xd9, 0x4b, 0x22, 0x47, 0x47, 0xac, 0x40, 0x6a, 0xaf }
-#define NIST_SHA_1_MD { 0xa1, 0x50, 0xde, 0x92, 0x74, 0x54, 0x20, 0x2d, 0x94, 0xe6, 0x56, 0xde, 0x4c, 0x7c, 0x0c, 0xa6, \
- 0x91, 0xde, 0x95, 0x5d }
-
-#define NIST_SHA_256_MSG { 0x0a, 0x27, 0x84, 0x7c, 0xdc, 0x98, 0xbd, 0x6f, 0x62, 0x22, 0x0b, 0x04, 0x6e, 0xdd, 0x76, 0x2b }
-#define NIST_SHA_256_MD { 0x80, 0xc2, 0x5e, 0xc1, 0x60, 0x05, 0x87, 0xe7, 0xf2, 0x8b, 0x18, 0xb1, 0xb1, 0x8e, 0x3c, 0xdc, \
- 0x89, 0x92, 0x8e, 0x39, 0xca, 0xb3, 0xbc, 0x25, 0xe4, 0xd4, 0xa4, 0xc1, 0x39, 0xbc, 0xed, 0xc4 }
-
-#define NIST_SHA_512_MSG { 0xcd, 0x67, 0xbd, 0x40, 0x54, 0xaa, 0xa3, 0xba, 0xa0, 0xdb, 0x17, 0x8c, 0xe2, 0x32, 0xfd, 0x5a }
-#define NIST_SHA_512_MD { 0x0d, 0x85, 0x21, 0xf8, 0xf2, 0xf3, 0x90, 0x03, 0x32, 0xd1, 0xa1, 0xa5, 0x5c, 0x60, 0xba, 0x81, \
- 0xd0, 0x4d, 0x28, 0xdf, 0xe8, 0xc5, 0x04, 0xb6, 0x32, 0x8a, 0xe7, 0x87, 0x92, 0x5f, 0xe0, 0x18, \
- 0x8f, 0x2b, 0xa9, 0x1c, 0x3a, 0x9f, 0x0c, 0x16, 0x53, 0xc4, 0xbf, 0x0a, 0xda, 0x35, 0x64, 0x55, \
- 0xea, 0x36, 0xfd, 0x31, 0xf8, 0xe7, 0x3e, 0x39, 0x51, 0xca, 0xd4, 0xeb, 0xba, 0x8c, 0x6e, 0x04 }
-
-/* NIST HMAC */
-#define NIST_HMAC_MSG_SIZE 128
-
-#define NIST_HMAC_SHA1_KEY_SIZE 10
-#define NIST_HMAC_SHA1_KEY { 0x59, 0x78, 0x59, 0x28, 0xd7, 0x25, 0x16, 0xe3, 0x12, 0x72 }
-#define NIST_HMAC_SHA1_MSG { 0xa3, 0xce, 0x88, 0x99, 0xdf, 0x10, 0x22, 0xe8, 0xd2, 0xd5, 0x39, 0xb4, 0x7b, 0xf0, 0xe3, 0x09, \
- 0xc6, 0x6f, 0x84, 0x09, 0x5e, 0x21, 0x43, 0x8e, 0xc3, 0x55, 0xbf, 0x11, 0x9c, 0xe5, 0xfd, 0xcb, \
- 0x4e, 0x73, 0xa6, 0x19, 0xcd, 0xf3, 0x6f, 0x25, 0xb3, 0x69, 0xd8, 0xc3, 0x8f, 0xf4, 0x19, 0x99, \
- 0x7f, 0x0c, 0x59, 0x83, 0x01, 0x08, 0x22, 0x36, 0x06, 0xe3, 0x12, 0x23, 0x48, 0x3f, 0xd3, 0x9e, \
- 0xde, 0xaa, 0x4d, 0x3f, 0x0d, 0x21, 0x19, 0x88, 0x62, 0xd2, 0x39, 0xc9, 0xfd, 0x26, 0x07, 0x41, \
- 0x30, 0xff, 0x6c, 0x86, 0x49, 0x3f, 0x52, 0x27, 0xab, 0x89, 0x5c, 0x8f, 0x24, 0x4b, 0xd4, 0x2c, \
- 0x7a, 0xfc, 0xe5, 0xd1, 0x47, 0xa2, 0x0a, 0x59, 0x07, 0x98, 0xc6, 0x8e, 0x70, 0x8e, 0x96, 0x49, \
- 0x02, 0xd1, 0x24, 0xda, 0xde, 0xcd, 0xbd, 0xa9, 0xdb, 0xd0, 0x05, 0x1e, 0xd7, 0x10, 0xe9, 0xbf }
-#define NIST_HMAC_SHA1_MD { 0x3c, 0x81, 0x62, 0x58, 0x9a, 0xaf, 0xae, 0xe0, 0x24, 0xfc, 0x9a, 0x5c, 0xa5, 0x0d, 0xd2, 0x33, \
- 0x6f, 0xe3, 0xeb, 0x28 }
-
-#define NIST_HMAC_SHA256_KEY_SIZE 40
-#define NIST_HMAC_SHA256_KEY { 0x97, 0x79, 0xd9, 0x12, 0x06, 0x42, 0x79, 0x7f, 0x17, 0x47, 0x02, 0x5d, 0x5b, 0x22, 0xb7, 0xac, \
- 0x60, 0x7c, 0xab, 0x08, 0xe1, 0x75, 0x8f, 0x2f, 0x3a, 0x46, 0xc8, 0xbe, 0x1e, 0x25, 0xc5, 0x3b, \
- 0x8c, 0x6a, 0x8f, 0x58, 0xff, 0xef, 0xa1, 0x76 }
-#define NIST_HMAC_SHA256_MSG { 0xb1, 0x68, 0x9c, 0x25, 0x91, 0xea, 0xf3, 0xc9, 0xe6, 0x60, 0x70, 0xf8, 0xa7, 0x79, 0x54, 0xff, \
- 0xb8, 0x17, 0x49, 0xf1, 0xb0, 0x03, 0x46, 0xf9, 0xdf, 0xe0, 0xb2, 0xee, 0x90, 0x5d, 0xcc, 0x28, \
- 0x8b, 0xaf, 0x4a, 0x92, 0xde, 0x3f, 0x40, 0x01, 0xdd, 0x9f, 0x44, 0xc4, 0x68, 0xc3, 0xd0, 0x7d, \
- 0x6c, 0x6e, 0xe8, 0x2f, 0xac, 0xea, 0xfc, 0x97, 0xc2, 0xfc, 0x0f, 0xc0, 0x60, 0x17, 0x19, 0xd2, \
- 0xdc, 0xd0, 0xaa, 0x2a, 0xec, 0x92, 0xd1, 0xb0, 0xae, 0x93, 0x3c, 0x65, 0xeb, 0x06, 0xa0, 0x3c, \
- 0x9c, 0x93, 0x5c, 0x2b, 0xad, 0x04, 0x59, 0x81, 0x02, 0x41, 0x34, 0x7a, 0xb8, 0x7e, 0x9f, 0x11, \
- 0xad, 0xb3, 0x04, 0x15, 0x42, 0x4c, 0x6c, 0x7f, 0x5f, 0x22, 0xa0, 0x03, 0xb8, 0xab, 0x8d, 0xe5, \
- 0x4f, 0x6d, 0xed, 0x0e, 0x3a, 0xb9, 0x24, 0x5f, 0xa7, 0x95, 0x68, 0x45, 0x1d, 0xfa, 0x25, 0x8e }
-#define NIST_HMAC_SHA256_MD { 0x76, 0x9f, 0x00, 0xd3, 0xe6, 0xa6, 0xcc, 0x1f, 0xb4, 0x26, 0xa1, 0x4a, 0x4f, 0x76, 0xc6, 0x46, \
- 0x2e, 0x61, 0x49, 0x72, 0x6e, 0x0d, 0xee, 0x0e, 0xc0, 0xcf, 0x97, 0xa1, 0x66, 0x05, 0xac, 0x8b }
-
-#define NIST_HMAC_SHA512_KEY_SIZE 100
-#define NIST_HMAC_SHA512_KEY { 0x57, 0xc2, 0xeb, 0x67, 0x7b, 0x50, 0x93, 0xb9, 0xe8, 0x29, 0xea, 0x4b, 0xab, 0xb5, 0x0b, 0xde, \
- 0x55, 0xd0, 0xad, 0x59, 0xfe, 0xc3, 0x4a, 0x61, 0x89, 0x73, 0x80, 0x2b, 0x2a, 0xd9, 0xb7, 0x8e, \
- 0x26, 0xb2, 0x04, 0x5d, 0xda, 0x78, 0x4d, 0xf3, 0xff, 0x90, 0xae, 0x0f, 0x2c, 0xc5, 0x1c, 0xe3, \
- 0x9c, 0xf5, 0x48, 0x67, 0x32, 0x0a, 0xc6, 0xf3, 0xba, 0x2c, 0x6f, 0x0d, 0x72, 0x36, 0x04, 0x80, \
- 0xc9, 0x66, 0x14, 0xae, 0x66, 0x58, 0x1f, 0x26, 0x6c, 0x35, 0xfb, 0x79, 0xfd, 0x28, 0x77, 0x4a, \
- 0xfd, 0x11, 0x3f, 0xa5, 0x18, 0x7e, 0xff, 0x92, 0x06, 0xd7, 0xcb, 0xe9, 0x0d, 0xd8, 0xbf, 0x67, \
- 0xc8, 0x44, 0xe2, 0x02 }
-#define NIST_HMAC_SHA512_MSG { 0x24, 0x23, 0xdf, 0xf4, 0x8b, 0x31, 0x2b, 0xe8, 0x64, 0xcb, 0x34, 0x90, 0x64, 0x1f, 0x79, 0x3d, \
- 0x2b, 0x9f, 0xb6, 0x8a, 0x77, 0x63, 0xb8, 0xe2, 0x98, 0xc8, 0x6f, 0x42, 0x24, 0x5e, 0x45, 0x40, \
- 0xeb, 0x01, 0xae, 0x4d, 0x2d, 0x45, 0x00, 0x37, 0x0b, 0x18, 0x86, 0xf2, 0x3c, 0xa2, 0xcf, 0x97, \
- 0x01, 0x70, 0x4c, 0xad, 0x5b, 0xd2, 0x1b, 0xa8, 0x7b, 0x81, 0x1d, 0xaf, 0x7a, 0x85, 0x4e, 0xa2, \
- 0x4a, 0x56, 0x56, 0x5c, 0xed, 0x42, 0x5b, 0x35, 0xe4, 0x0e, 0x1a, 0xcb, 0xeb, 0xe0, 0x36, 0x03, \
- 0xe3, 0x5d, 0xcf, 0x4a, 0x10, 0x0e, 0x57, 0x21, 0x84, 0x08, 0xa1, 0xd8, 0xdb, 0xcc, 0x3b, 0x99, \
- 0x29, 0x6c, 0xfe, 0xa9, 0x31, 0xef, 0xe3, 0xeb, 0xd8, 0xf7, 0x19, 0xa6, 0xd9, 0xa1, 0x54, 0x87, \
- 0xb9, 0xad, 0x67, 0xea, 0xfe, 0xdf, 0x15, 0x55, 0x9c, 0xa4, 0x24, 0x45, 0xb0, 0xf9, 0xb4, 0x2e }
-#define NIST_HMAC_SHA512_MD { 0x33, 0xc5, 0x11, 0xe9, 0xbc, 0x23, 0x07, 0xc6, 0x27, 0x58, 0xdf, 0x61, 0x12, 0x5a, 0x98, 0x0e, \
- 0xe6, 0x4c, 0xef, 0xeb, 0xd9, 0x09, 0x31, 0xcb, 0x91, 0xc1, 0x37, 0x42, 0xd4, 0x71, 0x4c, 0x06, \
- 0xde, 0x40, 0x03, 0xfa, 0xf3, 0xc4, 0x1c, 0x06, 0xae, 0xfc, 0x63, 0x8a, 0xd4, 0x7b, 0x21, 0x90, \
- 0x6e, 0x6b, 0x10, 0x48, 0x16, 0xb7, 0x2d, 0xe6, 0x26, 0x9e, 0x04, 0x5a, 0x1f, 0x44, 0x29, 0xd4 }
-
diff --git a/drivers/staging/ccree/ssi_fips_ext.c b/drivers/staging/ccree/ssi_fips_ext.c
deleted file mode 100644
index e7bf1843f60c..000000000000
--- a/drivers/staging/ccree/ssi_fips_ext.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/**************************************************************
- * This file defines the driver FIPS functions that should be
- * implemented by the driver user. Current implementation is sample code only.
- ***************************************************************/
-
-#include <linux/module.h>
-#include "ssi_fips_local.h"
-#include "ssi_driver.h"
-
-static bool tee_error;
-module_param(tee_error, bool, 0644);
-MODULE_PARM_DESC(tee_error, "Simulate TEE library failure flag: 0 - no error (default), 1 - TEE error occured ");
-
-static enum cc_fips_state_t fips_state = CC_FIPS_STATE_NOT_SUPPORTED;
-static enum cc_fips_error fips_error = CC_REE_FIPS_ERROR_OK;
-
-/*
- * This function returns the FIPS REE state.
- * The function should be implemented by the driver user, depends on where
- * the state value is stored.
- * The reference code uses global variable.
- */
-int ssi_fips_ext_get_state(enum cc_fips_state_t *p_state)
-{
- int rc = 0;
-
- if (!p_state)
- return -EINVAL;
-
- *p_state = fips_state;
-
- return rc;
-}
-
-/*
- * This function returns the FIPS REE error.
- * The function should be implemented by the driver user, depends on where
- * the error value is stored.
- * The reference code uses global variable.
- */
-int ssi_fips_ext_get_error(enum cc_fips_error *p_err)
-{
- int rc = 0;
-
- if (!p_err)
- return -EINVAL;
-
- *p_err = fips_error;
-
- return rc;
-}
-
-/*
- * This function sets the FIPS REE state.
- * The function should be implemented by the driver user, depends on where
- * the state value is stored.
- * The reference code uses global variable.
- */
-int ssi_fips_ext_set_state(enum cc_fips_state_t state)
-{
- fips_state = state;
- return 0;
-}
-
-/*
- * This function sets the FIPS REE error.
- * The function should be implemented by the driver user, depends on where
- * the error value is stored.
- * The reference code uses global variable.
- */
-int ssi_fips_ext_set_error(enum cc_fips_error err)
-{
- fips_error = err;
- return 0;
-}
-
diff --git a/drivers/staging/ccree/ssi_fips_ll.c b/drivers/staging/ccree/ssi_fips_ll.c
deleted file mode 100644
index 3557e20c9e36..000000000000
--- a/drivers/staging/ccree/ssi_fips_ll.c
+++ /dev/null
@@ -1,1649 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/**************************************************************
- * This file defines the driver FIPS Low Level implmentaion functions,
- * that executes the KAT.
- ***************************************************************/
-#include <linux/kernel.h>
-
-#include "ssi_driver.h"
-#include "ssi_fips_local.h"
-#include "ssi_fips_data.h"
-#include "cc_crypto_ctx.h"
-#include "ssi_hash.h"
-#include "ssi_request_mgr.h"
-
-static const u32 digest_len_init[] = {
- 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
-static const u32 sha1_init[] = {
- SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
-static const u32 sha256_init[] = {
- SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
- SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
-#if (CC_SUPPORT_SHA > 256)
-static const u32 digest_len_sha512_init[] = {
- 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
-static const u64 sha512_init[] = {
- SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
- SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
-#endif
-
-#define NIST_CIPHER_AES_MAX_VECTOR_SIZE 32
-
-struct fips_cipher_ctx {
- u8 iv[CC_AES_IV_SIZE];
- u8 key[AES_512_BIT_KEY_SIZE];
- u8 din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
- u8 dout[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
-};
-
-typedef struct _FipsCipherData {
- u8 isAes;
- u8 key[AES_512_BIT_KEY_SIZE];
- size_t keySize;
- u8 iv[CC_AES_IV_SIZE];
- enum drv_crypto_direction direction;
- enum drv_cipher_mode oprMode;
- u8 dataIn[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
- u8 dataOut[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
- size_t dataInSize;
-} FipsCipherData;
-
-struct fips_cmac_ctx {
- u8 key[AES_256_BIT_KEY_SIZE];
- u8 din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
- u8 mac_res[CC_DIGEST_SIZE_MAX];
-};
-
-typedef struct _FipsCmacData {
- enum drv_crypto_direction direction;
- u8 key[AES_256_BIT_KEY_SIZE];
- size_t key_size;
- u8 data_in[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
- size_t data_in_size;
- u8 mac_res[CC_DIGEST_SIZE_MAX];
- size_t mac_res_size;
-} FipsCmacData;
-
-struct fips_hash_ctx {
- u8 initial_digest[CC_DIGEST_SIZE_MAX];
- u8 din[NIST_SHA_MSG_SIZE];
- u8 mac_res[CC_DIGEST_SIZE_MAX];
-};
-
-typedef struct _FipsHashData {
- enum drv_hash_mode hash_mode;
- u8 data_in[NIST_SHA_MSG_SIZE];
- size_t data_in_size;
- u8 mac_res[CC_DIGEST_SIZE_MAX];
-} FipsHashData;
-
-/* note that the hmac key length must be equal or less than block size (block size is 64 up to sha256 and 128 for sha384/512) */
-struct fips_hmac_ctx {
- u8 initial_digest[CC_DIGEST_SIZE_MAX];
- u8 key[CC_HMAC_BLOCK_SIZE_MAX];
- u8 k0[CC_HMAC_BLOCK_SIZE_MAX];
- u8 digest_bytes_len[HASH_LEN_SIZE];
- u8 tmp_digest[CC_DIGEST_SIZE_MAX];
- u8 din[NIST_HMAC_MSG_SIZE];
- u8 mac_res[CC_DIGEST_SIZE_MAX];
-};
-
-typedef struct _FipsHmacData {
- enum drv_hash_mode hash_mode;
- u8 key[CC_HMAC_BLOCK_SIZE_MAX];
- size_t key_size;
- u8 data_in[NIST_HMAC_MSG_SIZE];
- size_t data_in_size;
- u8 mac_res[CC_DIGEST_SIZE_MAX];
-} FipsHmacData;
-
-#define FIPS_CCM_B0_A0_ADATA_SIZE (NIST_AESCCM_IV_SIZE + NIST_AESCCM_IV_SIZE + NIST_AESCCM_ADATA_SIZE)
-
-struct fips_ccm_ctx {
- u8 b0_a0_adata[FIPS_CCM_B0_A0_ADATA_SIZE];
- u8 iv[NIST_AESCCM_IV_SIZE];
- u8 ctr_cnt_0[NIST_AESCCM_IV_SIZE];
- u8 key[CC_AES_KEY_SIZE_MAX];
- u8 din[NIST_AESCCM_TEXT_SIZE];
- u8 dout[NIST_AESCCM_TEXT_SIZE];
- u8 mac_res[NIST_AESCCM_TAG_SIZE];
-};
-
-typedef struct _FipsCcmData {
- enum drv_crypto_direction direction;
- u8 key[CC_AES_KEY_SIZE_MAX];
- size_t keySize;
- u8 nonce[NIST_AESCCM_NONCE_SIZE];
- u8 adata[NIST_AESCCM_ADATA_SIZE];
- size_t adataSize;
- u8 dataIn[NIST_AESCCM_TEXT_SIZE];
- size_t dataInSize;
- u8 dataOut[NIST_AESCCM_TEXT_SIZE];
- u8 tagSize;
- u8 macResOut[NIST_AESCCM_TAG_SIZE];
-} FipsCcmData;
-
-struct fips_gcm_ctx {
- u8 adata[NIST_AESGCM_ADATA_SIZE];
- u8 key[CC_AES_KEY_SIZE_MAX];
- u8 hkey[CC_AES_KEY_SIZE_MAX];
- u8 din[NIST_AESGCM_TEXT_SIZE];
- u8 dout[NIST_AESGCM_TEXT_SIZE];
- u8 mac_res[NIST_AESGCM_TAG_SIZE];
- u8 len_block[AES_BLOCK_SIZE];
- u8 iv_inc1[AES_BLOCK_SIZE];
- u8 iv_inc2[AES_BLOCK_SIZE];
-};
-
-typedef struct _FipsGcmData {
- enum drv_crypto_direction direction;
- u8 key[CC_AES_KEY_SIZE_MAX];
- size_t keySize;
- u8 iv[NIST_AESGCM_IV_SIZE];
- u8 adata[NIST_AESGCM_ADATA_SIZE];
- size_t adataSize;
- u8 dataIn[NIST_AESGCM_TEXT_SIZE];
- size_t dataInSize;
- u8 dataOut[NIST_AESGCM_TEXT_SIZE];
- u8 tagSize;
- u8 macResOut[NIST_AESGCM_TAG_SIZE];
-} FipsGcmData;
-
-typedef union _fips_ctx {
- struct fips_cipher_ctx cipher;
- struct fips_cmac_ctx cmac;
- struct fips_hash_ctx hash;
- struct fips_hmac_ctx hmac;
- struct fips_ccm_ctx ccm;
- struct fips_gcm_ctx gcm;
-} fips_ctx;
-
-/* test data tables */
-static const FipsCipherData FipsCipherDataTable[] = {
- /* AES */
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_AES_PLAIN_DATA, NIST_AES_128_ECB_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_AES_128_ECB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_AES_PLAIN_DATA, NIST_AES_192_ECB_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_AES_192_ECB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_AES_PLAIN_DATA, NIST_AES_256_ECB_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_AES_256_ECB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_AES_PLAIN_DATA, NIST_AES_128_CBC_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_AES_128_CBC_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_AES_PLAIN_DATA, NIST_AES_192_CBC_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_AES_192_CBC_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_AES_PLAIN_DATA, NIST_AES_256_CBC_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_AES_256_CBC_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_PLAIN_DATA, NIST_AES_128_OFB_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_128_OFB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_PLAIN_DATA, NIST_AES_192_OFB_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_OFB, NIST_AES_192_OFB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_PLAIN_DATA, NIST_AES_256_OFB_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_OFB, NIST_AES_256_OFB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CTR, NIST_AES_PLAIN_DATA, NIST_AES_128_CTR_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CTR, NIST_AES_128_CTR_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CTR, NIST_AES_PLAIN_DATA, NIST_AES_192_CTR_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CTR, NIST_AES_192_CTR_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CTR, NIST_AES_PLAIN_DATA, NIST_AES_256_CTR_CIPHER, NIST_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CTR, NIST_AES_256_CTR_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
- { 1, RFC3962_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, RFC3962_AES_CBC_CTS_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC_CTS, RFC3962_AES_PLAIN_DATA, RFC3962_AES_128_CBC_CTS_CIPHER, RFC3962_AES_VECTOR_SIZE },
- { 1, RFC3962_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, RFC3962_AES_CBC_CTS_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC_CTS, RFC3962_AES_128_CBC_CTS_CIPHER, RFC3962_AES_PLAIN_DATA, RFC3962_AES_VECTOR_SIZE },
- { 1, NIST_AES_256_XTS_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_256_XTS_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_XTS, NIST_AES_256_XTS_PLAIN, NIST_AES_256_XTS_CIPHER, NIST_AES_256_XTS_VECTOR_SIZE },
- { 1, NIST_AES_256_XTS_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_256_XTS_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_XTS, NIST_AES_256_XTS_CIPHER, NIST_AES_256_XTS_PLAIN, NIST_AES_256_XTS_VECTOR_SIZE },
-#if (CC_SUPPORT_SHA > 256)
- { 1, NIST_AES_512_XTS_KEY, 2 * CC_AES_256_BIT_KEY_SIZE, NIST_AES_512_XTS_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_XTS, NIST_AES_512_XTS_PLAIN, NIST_AES_512_XTS_CIPHER, NIST_AES_512_XTS_VECTOR_SIZE },
- { 1, NIST_AES_512_XTS_KEY, 2 * CC_AES_256_BIT_KEY_SIZE, NIST_AES_512_XTS_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_XTS, NIST_AES_512_XTS_CIPHER, NIST_AES_512_XTS_PLAIN, NIST_AES_512_XTS_VECTOR_SIZE },
-#endif
- /* DES */
- { 0, NIST_TDES_ECB3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_TDES_ECB3_PLAIN_DATA, NIST_TDES_ECB3_CIPHER, NIST_TDES_VECTOR_SIZE },
- { 0, NIST_TDES_ECB3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_TDES_ECB3_CIPHER, NIST_TDES_ECB3_PLAIN_DATA, NIST_TDES_VECTOR_SIZE },
- { 0, NIST_TDES_CBC3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_CBC3_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_TDES_CBC3_PLAIN_DATA, NIST_TDES_CBC3_CIPHER, NIST_TDES_VECTOR_SIZE },
- { 0, NIST_TDES_CBC3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_CBC3_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_TDES_CBC3_CIPHER, NIST_TDES_CBC3_PLAIN_DATA, NIST_TDES_VECTOR_SIZE },
-};
-
-#define FIPS_CIPHER_NUM_OF_TESTS (sizeof(FipsCipherDataTable) / sizeof(FipsCipherData))
-
-static const FipsCmacData FipsCmacDataTable[] = {
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_128_CMAC_KEY, AES_128_BIT_KEY_SIZE, NIST_AES_128_CMAC_PLAIN_DATA, NIST_AES_128_CMAC_VECTOR_SIZE, NIST_AES_128_CMAC_MAC, NIST_AES_128_CMAC_OUTPUT_SIZE },
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_192_CMAC_KEY, AES_192_BIT_KEY_SIZE, NIST_AES_192_CMAC_PLAIN_DATA, NIST_AES_192_CMAC_VECTOR_SIZE, NIST_AES_192_CMAC_MAC, NIST_AES_192_CMAC_OUTPUT_SIZE },
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_256_CMAC_KEY, AES_256_BIT_KEY_SIZE, NIST_AES_256_CMAC_PLAIN_DATA, NIST_AES_256_CMAC_VECTOR_SIZE, NIST_AES_256_CMAC_MAC, NIST_AES_256_CMAC_OUTPUT_SIZE },
-};
-
-#define FIPS_CMAC_NUM_OF_TESTS (sizeof(FipsCmacDataTable) / sizeof(FipsCmacData))
-
-static const FipsHashData FipsHashDataTable[] = {
- { DRV_HASH_SHA1, NIST_SHA_1_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_1_MD },
- { DRV_HASH_SHA256, NIST_SHA_256_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_256_MD },
-#if (CC_SUPPORT_SHA > 256)
-// { DRV_HASH_SHA512, NIST_SHA_512_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_512_MD },
-#endif
-};
-
-#define FIPS_HASH_NUM_OF_TESTS (sizeof(FipsHashDataTable) / sizeof(FipsHashData))
-
-static const FipsHmacData FipsHmacDataTable[] = {
- { DRV_HASH_SHA1, NIST_HMAC_SHA1_KEY, NIST_HMAC_SHA1_KEY_SIZE, NIST_HMAC_SHA1_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA1_MD },
- { DRV_HASH_SHA256, NIST_HMAC_SHA256_KEY, NIST_HMAC_SHA256_KEY_SIZE, NIST_HMAC_SHA256_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA256_MD },
-#if (CC_SUPPORT_SHA > 256)
-// { DRV_HASH_SHA512, NIST_HMAC_SHA512_KEY, NIST_HMAC_SHA512_KEY_SIZE, NIST_HMAC_SHA512_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA512_MD },
-#endif
-};
-
-#define FIPS_HMAC_NUM_OF_TESTS (sizeof(FipsHmacDataTable) / sizeof(FipsHmacData))
-
-static const FipsCcmData FipsCcmDataTable[] = {
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_128_KEY, NIST_AESCCM_128_BIT_KEY_SIZE, NIST_AESCCM_128_NONCE, NIST_AESCCM_128_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_128_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_128_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_128_MAC },
- { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_128_KEY, NIST_AESCCM_128_BIT_KEY_SIZE, NIST_AESCCM_128_NONCE, NIST_AESCCM_128_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_128_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_128_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_128_MAC },
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_192_KEY, NIST_AESCCM_192_BIT_KEY_SIZE, NIST_AESCCM_192_NONCE, NIST_AESCCM_192_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_192_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_192_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_192_MAC },
- { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_192_KEY, NIST_AESCCM_192_BIT_KEY_SIZE, NIST_AESCCM_192_NONCE, NIST_AESCCM_192_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_192_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_192_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_192_MAC },
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_256_KEY, NIST_AESCCM_256_BIT_KEY_SIZE, NIST_AESCCM_256_NONCE, NIST_AESCCM_256_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_256_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_256_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_256_MAC },
- { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_256_KEY, NIST_AESCCM_256_BIT_KEY_SIZE, NIST_AESCCM_256_NONCE, NIST_AESCCM_256_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_256_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_256_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_256_MAC },
-};
-
-#define FIPS_CCM_NUM_OF_TESTS (sizeof(FipsCcmDataTable) / sizeof(FipsCcmData))
-
-static const FipsGcmData FipsGcmDataTable[] = {
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_128_KEY, NIST_AESGCM_128_BIT_KEY_SIZE, NIST_AESGCM_128_IV, NIST_AESGCM_128_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_128_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_128_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_128_MAC },
- { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_128_KEY, NIST_AESGCM_128_BIT_KEY_SIZE, NIST_AESGCM_128_IV, NIST_AESGCM_128_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_128_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_128_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_128_MAC },
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_192_KEY, NIST_AESGCM_192_BIT_KEY_SIZE, NIST_AESGCM_192_IV, NIST_AESGCM_192_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_192_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_192_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_192_MAC },
- { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_192_KEY, NIST_AESGCM_192_BIT_KEY_SIZE, NIST_AESGCM_192_IV, NIST_AESGCM_192_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_192_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_192_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_192_MAC },
- { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_256_KEY, NIST_AESGCM_256_BIT_KEY_SIZE, NIST_AESGCM_256_IV, NIST_AESGCM_256_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_256_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_256_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_256_MAC },
- { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_256_KEY, NIST_AESGCM_256_BIT_KEY_SIZE, NIST_AESGCM_256_IV, NIST_AESGCM_256_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_256_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_256_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_256_MAC },
-};
-
-#define FIPS_GCM_NUM_OF_TESTS (sizeof(FipsGcmDataTable) / sizeof(FipsGcmData))
-
-static inline enum cc_fips_error
-FIPS_CipherToFipsError(enum drv_cipher_mode mode, bool is_aes)
-{
- switch (mode)
- {
- case DRV_CIPHER_ECB:
- return is_aes ? CC_REE_FIPS_ERROR_AES_ECB_PUT : CC_REE_FIPS_ERROR_DES_ECB_PUT;
- case DRV_CIPHER_CBC:
- return is_aes ? CC_REE_FIPS_ERROR_AES_CBC_PUT : CC_REE_FIPS_ERROR_DES_CBC_PUT;
- case DRV_CIPHER_OFB:
- return CC_REE_FIPS_ERROR_AES_OFB_PUT;
- case DRV_CIPHER_CTR:
- return CC_REE_FIPS_ERROR_AES_CTR_PUT;
- case DRV_CIPHER_CBC_CTS:
- return CC_REE_FIPS_ERROR_AES_CBC_CTS_PUT;
- case DRV_CIPHER_XTS:
- return CC_REE_FIPS_ERROR_AES_XTS_PUT;
- default:
- return CC_REE_FIPS_ERROR_GENERAL;
- }
-
- return CC_REE_FIPS_ERROR_GENERAL;
-}
-
-static inline int
-ssi_cipher_fips_run_test(struct ssi_drvdata *drvdata,
- bool is_aes,
- int cipher_mode,
- int direction,
- dma_addr_t key_dma_addr,
- size_t key_len,
- dma_addr_t iv_dma_addr,
- size_t iv_len,
- dma_addr_t din_dma_addr,
- dma_addr_t dout_dma_addr,
- size_t data_size)
-{
- /* max number of descriptors used for the flow */
- #define FIPS_CIPHER_MAX_SEQ_LEN 6
-
- int rc;
- struct ssi_crypto_req ssi_req = {0};
- struct cc_hw_desc desc[FIPS_CIPHER_MAX_SEQ_LEN];
- int idx = 0;
- int s_flow_mode = is_aes ? S_DIN_to_AES : S_DIN_to_DES;
-
- /* create setup descriptors */
- switch (cipher_mode) {
- case DRV_CIPHER_CBC:
- case DRV_CIPHER_CBC_CTS:
- case DRV_CIPHER_CTR:
- case DRV_CIPHER_OFB:
- /* Load cipher state */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI,
- iv_dma_addr, iv_len, NS_BIT);
- set_cipher_config0(&desc[idx], direction);
- set_flow_mode(&desc[idx], s_flow_mode);
- set_cipher_mode(&desc[idx], cipher_mode);
- if ((cipher_mode == DRV_CIPHER_CTR) ||
- (cipher_mode == DRV_CIPHER_OFB)) {
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- } else {
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- }
- idx++;
- /*FALLTHROUGH*/
- case DRV_CIPHER_ECB:
- /* Load key */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], cipher_mode);
- set_cipher_config0(&desc[idx], direction);
- if (is_aes) {
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
- ((key_len == 24) ? AES_MAX_KEY_SIZE :
- key_len), NS_BIT);
- set_key_size_aes(&desc[idx], key_len);
- } else {/*des*/
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
- key_len, NS_BIT);
- set_key_size_des(&desc[idx], key_len);
- }
- set_flow_mode(&desc[idx], s_flow_mode);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
- break;
- case DRV_CIPHER_XTS:
- /* Load AES key */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], cipher_mode);
- set_cipher_config0(&desc[idx], direction);
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, (key_len / 2),
- NS_BIT);
- set_key_size_aes(&desc[idx], (key_len / 2));
- set_flow_mode(&desc[idx], s_flow_mode);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- /* load XEX key */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], cipher_mode);
- set_cipher_config0(&desc[idx], direction);
- set_din_type(&desc[idx], DMA_DLLI,
- (key_dma_addr + (key_len / 2)),
- (key_len / 2), NS_BIT);
- set_xex_data_unit_size(&desc[idx], data_size);
- set_flow_mode(&desc[idx], s_flow_mode);
- set_key_size_aes(&desc[idx], (key_len / 2));
- set_setup_mode(&desc[idx], SETUP_LOAD_XEX_KEY);
- idx++;
-
- /* Set state */
- hw_desc_init(&desc[idx]);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- set_cipher_mode(&desc[idx], cipher_mode);
- set_cipher_config0(&desc[idx], direction);
- set_key_size_aes(&desc[idx], (key_len / 2));
- set_flow_mode(&desc[idx], s_flow_mode);
- set_din_type(&desc[idx], DMA_DLLI, iv_dma_addr,
- CC_AES_BLOCK_SIZE, NS_BIT);
- idx++;
- break;
- default:
- FIPS_LOG("Unsupported cipher mode (%d)\n", cipher_mode);
- BUG();
- }
-
- /* create data descriptor */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, data_size, NS_BIT);
- set_dout_dlli(&desc[idx], dout_dma_addr, data_size, NS_BIT, 0);
- set_flow_mode(&desc[idx], is_aes ? DIN_AES_DOUT : DIN_DES_DOUT);
- idx++;
-
- /* perform the operation - Lock HW and push sequence */
- BUG_ON(idx > FIPS_CIPHER_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, false);
-
- // send_request returns error just in some corner cases which should not appear in this flow.
- return rc;
-}
-
-enum cc_fips_error
-ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
-{
- enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
- size_t i;
- struct fips_cipher_ctx *virt_ctx = (struct fips_cipher_ctx *)cpu_addr_buffer;
-
- /* set the phisical pointers for iv, key, din, dout */
- dma_addr_t iv_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, iv);
- dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, key);
- dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, din);
- dma_addr_t dout_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, dout);
-
- for (i = 0; i < FIPS_CIPHER_NUM_OF_TESTS; ++i)
- {
- FipsCipherData *cipherData = (FipsCipherData *)&FipsCipherDataTable[i];
- int rc = 0;
- size_t iv_size = cipherData->isAes ? NIST_AES_IV_SIZE : NIST_TDES_IV_SIZE;
-
- memset(cpu_addr_buffer, 0, sizeof(struct fips_cipher_ctx));
-
- /* copy into the allocated buffer */
- memcpy(virt_ctx->iv, cipherData->iv, iv_size);
- memcpy(virt_ctx->key, cipherData->key, cipherData->keySize);
- memcpy(virt_ctx->din, cipherData->dataIn, cipherData->dataInSize);
-
- FIPS_DBG("ssi_cipher_fips_run_test - (i = %d) \n", i);
- rc = ssi_cipher_fips_run_test(drvdata,
- cipherData->isAes,
- cipherData->oprMode,
- cipherData->direction,
- key_dma_addr,
- cipherData->keySize,
- iv_dma_addr,
- iv_size,
- din_dma_addr,
- dout_dma_addr,
- cipherData->dataInSize);
- if (rc != 0)
- {
- FIPS_LOG("ssi_cipher_fips_run_test %d returned error - rc = %d \n", i, rc);
- error = FIPS_CipherToFipsError(cipherData->oprMode, cipherData->isAes);
- break;
- }
-
- /* compare actual dout to expected */
- if (memcmp(virt_ctx->dout, cipherData->dataOut, cipherData->dataInSize) != 0)
- {
- FIPS_LOG("dout comparison error %d - oprMode=%d, isAes=%d\n", i, cipherData->oprMode, cipherData->isAes);
- FIPS_LOG(" i expected received \n");
- FIPS_LOG(" i 0x%08x 0x%08x (size=%d) \n", (size_t)cipherData->dataOut, (size_t)virt_ctx->dout, cipherData->dataInSize);
- for (i = 0; i < cipherData->dataInSize; ++i)
- {
- FIPS_LOG(" %d 0x%02x 0x%02x \n", i, cipherData->dataOut[i], virt_ctx->dout[i]);
- }
-
- error = FIPS_CipherToFipsError(cipherData->oprMode, cipherData->isAes);
- break;
- }
- }
-
- return error;
-}
-
-static inline int
-ssi_cmac_fips_run_test(struct ssi_drvdata *drvdata,
- dma_addr_t key_dma_addr,
- size_t key_len,
- dma_addr_t din_dma_addr,
- size_t din_len,
- dma_addr_t digest_dma_addr,
- size_t digest_len)
-{
- /* max number of descriptors used for the flow */
- #define FIPS_CMAC_MAX_SEQ_LEN 4
-
- int rc;
- struct ssi_crypto_req ssi_req = {0};
- struct cc_hw_desc desc[FIPS_CMAC_MAX_SEQ_LEN];
- int idx = 0;
-
- /* Setup CMAC Key */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
- ((key_len == 24) ? AES_MAX_KEY_SIZE : key_len), NS_BIT);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
- set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_key_size_aes(&desc[idx], key_len);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- /* Load MAC state */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, digest_dma_addr, CC_AES_BLOCK_SIZE,
- NS_BIT);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
- set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_key_size_aes(&desc[idx], key_len);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- //ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, din_len, NS_BIT);
- set_flow_mode(&desc[idx], DIN_AES_DOUT);
- idx++;
-
- /* Get final MAC result */
- hw_desc_init(&desc[idx]);
- set_dout_dlli(&desc[idx], digest_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT,
- 0);
- set_flow_mode(&desc[idx], S_AES_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
- idx++;
-
- /* perform the operation - Lock HW and push sequence */
- BUG_ON(idx > FIPS_CMAC_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, false);
-
- // send_request returns error just in some corner cases which should not appear in this flow.
- return rc;
-}
-
-enum cc_fips_error
-ssi_cmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
-{
- enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
- size_t i;
- struct fips_cmac_ctx *virt_ctx = (struct fips_cmac_ctx *)cpu_addr_buffer;
-
- /* set the phisical pointers for key, din, dout */
- dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_cmac_ctx, key);
- dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_cmac_ctx, din);
- dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_cmac_ctx, mac_res);
-
- for (i = 0; i < FIPS_CMAC_NUM_OF_TESTS; ++i)
- {
- FipsCmacData *cmac_data = (FipsCmacData *)&FipsCmacDataTable[i];
- int rc = 0;
-
- memset(cpu_addr_buffer, 0, sizeof(struct fips_cmac_ctx));
-
- /* copy into the allocated buffer */
- memcpy(virt_ctx->key, cmac_data->key, cmac_data->key_size);
- memcpy(virt_ctx->din, cmac_data->data_in, cmac_data->data_in_size);
-
- BUG_ON(cmac_data->direction != DRV_CRYPTO_DIRECTION_ENCRYPT);
-
- FIPS_DBG("ssi_cmac_fips_run_test - (i = %d) \n", i);
- rc = ssi_cmac_fips_run_test(drvdata,
- key_dma_addr,
- cmac_data->key_size,
- din_dma_addr,
- cmac_data->data_in_size,
- mac_res_dma_addr,
- cmac_data->mac_res_size);
- if (rc != 0)
- {
- FIPS_LOG("ssi_cmac_fips_run_test %d returned error - rc = %d \n", i, rc);
- error = CC_REE_FIPS_ERROR_AES_CMAC_PUT;
- break;
- }
-
- /* compare actual mac result to expected */
- if (memcmp(virt_ctx->mac_res, cmac_data->mac_res, cmac_data->mac_res_size) != 0)
- {
- FIPS_LOG("comparison error %d - digest_size=%d \n", i, cmac_data->mac_res_size);
- FIPS_LOG(" i expected received \n");
- FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)cmac_data->mac_res, (size_t)virt_ctx->mac_res);
- for (i = 0; i < cmac_data->mac_res_size; ++i)
- {
- FIPS_LOG(" %d 0x%02x 0x%02x \n", i, cmac_data->mac_res[i], virt_ctx->mac_res[i]);
- }
-
- error = CC_REE_FIPS_ERROR_AES_CMAC_PUT;
- break;
- }
- }
-
- return error;
-}
-
-static inline enum cc_fips_error
-FIPS_HashToFipsError(enum drv_hash_mode hash_mode)
-{
- switch (hash_mode) {
- case DRV_HASH_SHA1:
- return CC_REE_FIPS_ERROR_SHA1_PUT;
- case DRV_HASH_SHA256:
- return CC_REE_FIPS_ERROR_SHA256_PUT;
-#if (CC_SUPPORT_SHA > 256)
- case DRV_HASH_SHA512:
- return CC_REE_FIPS_ERROR_SHA512_PUT;
-#endif
- default:
- return CC_REE_FIPS_ERROR_GENERAL;
- }
-
- return CC_REE_FIPS_ERROR_GENERAL;
-}
-
-static inline int
-ssi_hash_fips_run_test(struct ssi_drvdata *drvdata,
- dma_addr_t initial_digest_dma_addr,
- dma_addr_t din_dma_addr,
- size_t data_in_size,
- dma_addr_t mac_res_dma_addr,
- enum drv_hash_mode hash_mode,
- enum drv_hash_hw_mode hw_mode,
- int digest_size,
- int inter_digestsize)
-{
- /* max number of descriptors used for the flow */
- #define FIPS_HASH_MAX_SEQ_LEN 4
-
- int rc;
- struct ssi_crypto_req ssi_req = {0};
- struct cc_hw_desc desc[FIPS_HASH_MAX_SEQ_LEN];
- int idx = 0;
-
- /* Load initial digest */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, initial_digest_dma_addr,
- inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Load the hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- /* data descriptor */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, data_in_size, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
-
- /* Get final MAC result */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_dout_dlli(&desc[idx], mac_res_dma_addr, digest_size, NS_BIT, 0);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
- if (unlikely((hash_mode == DRV_HASH_MD5) ||
- (hash_mode == DRV_HASH_SHA384) ||
- (hash_mode == DRV_HASH_SHA512))) {
- set_bytes_swap(&desc[idx], 1);
- } else {
- set_cipher_config0(&desc[idx],
- HASH_DIGEST_RESULT_LITTLE_ENDIAN);
- }
- idx++;
-
- /* perform the operation - Lock HW and push sequence */
- BUG_ON(idx > FIPS_HASH_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, false);
-
- return rc;
-}
-
-enum cc_fips_error
-ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
-{
- enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
- size_t i;
- struct fips_hash_ctx *virt_ctx = (struct fips_hash_ctx *)cpu_addr_buffer;
-
- /* set the phisical pointers for initial_digest, din, mac_res */
- dma_addr_t initial_digest_dma_addr = dma_coherent_buffer + offsetof(struct fips_hash_ctx, initial_digest);
- dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_hash_ctx, din);
- dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_hash_ctx, mac_res);
-
- for (i = 0; i < FIPS_HASH_NUM_OF_TESTS; ++i)
- {
- FipsHashData *hash_data = (FipsHashData *)&FipsHashDataTable[i];
- int rc = 0;
- enum drv_hash_hw_mode hw_mode = 0;
- int digest_size = 0;
- int inter_digestsize = 0;
-
- memset(cpu_addr_buffer, 0, sizeof(struct fips_hash_ctx));
-
- switch (hash_data->hash_mode) {
- case DRV_HASH_SHA1:
- hw_mode = DRV_HASH_HW_SHA1;
- digest_size = CC_SHA1_DIGEST_SIZE;
- inter_digestsize = CC_SHA1_DIGEST_SIZE;
- /* copy the initial digest into the allocated cache coherent buffer */
- memcpy(virt_ctx->initial_digest, (void *)sha1_init, CC_SHA1_DIGEST_SIZE);
- break;
- case DRV_HASH_SHA256:
- hw_mode = DRV_HASH_HW_SHA256;
- digest_size = CC_SHA256_DIGEST_SIZE;
- inter_digestsize = CC_SHA256_DIGEST_SIZE;
- memcpy(virt_ctx->initial_digest, (void *)sha256_init, CC_SHA256_DIGEST_SIZE);
- break;
-#if (CC_SUPPORT_SHA > 256)
- case DRV_HASH_SHA512:
- hw_mode = DRV_HASH_HW_SHA512;
- digest_size = CC_SHA512_DIGEST_SIZE;
- inter_digestsize = CC_SHA512_DIGEST_SIZE;
- memcpy(virt_ctx->initial_digest, (void *)sha512_init, CC_SHA512_DIGEST_SIZE);
- break;
-#endif
- default:
- error = FIPS_HashToFipsError(hash_data->hash_mode);
- break;
- }
-
- /* copy the din data into the allocated buffer */
- memcpy(virt_ctx->din, hash_data->data_in, hash_data->data_in_size);
-
- /* run the test on HW */
- FIPS_DBG("ssi_hash_fips_run_test - (i = %d) \n", i);
- rc = ssi_hash_fips_run_test(drvdata,
- initial_digest_dma_addr,
- din_dma_addr,
- hash_data->data_in_size,
- mac_res_dma_addr,
- hash_data->hash_mode,
- hw_mode,
- digest_size,
- inter_digestsize);
- if (rc != 0)
- {
- FIPS_LOG("ssi_hash_fips_run_test %d returned error - rc = %d \n", i, rc);
- error = FIPS_HashToFipsError(hash_data->hash_mode);
- break;
- }
-
- /* compare actual mac result to expected */
- if (memcmp(virt_ctx->mac_res, hash_data->mac_res, digest_size) != 0)
- {
- FIPS_LOG("comparison error %d - hash_mode=%d digest_size=%d \n", i, hash_data->hash_mode, digest_size);
- FIPS_LOG(" i expected received \n");
- FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)hash_data->mac_res, (size_t)virt_ctx->mac_res);
- for (i = 0; i < digest_size; ++i)
- {
- FIPS_LOG(" %d 0x%02x 0x%02x \n", i, hash_data->mac_res[i], virt_ctx->mac_res[i]);
- }
-
- error = FIPS_HashToFipsError(hash_data->hash_mode);
- break;
- }
- }
-
- return error;
-}
-
-static inline enum cc_fips_error
-FIPS_HmacToFipsError(enum drv_hash_mode hash_mode)
-{
- switch (hash_mode) {
- case DRV_HASH_SHA1:
- return CC_REE_FIPS_ERROR_HMAC_SHA1_PUT;
- case DRV_HASH_SHA256:
- return CC_REE_FIPS_ERROR_HMAC_SHA256_PUT;
-#if (CC_SUPPORT_SHA > 256)
- case DRV_HASH_SHA512:
- return CC_REE_FIPS_ERROR_HMAC_SHA512_PUT;
-#endif
- default:
- return CC_REE_FIPS_ERROR_GENERAL;
- }
-
- return CC_REE_FIPS_ERROR_GENERAL;
-}
-
-static inline int
-ssi_hmac_fips_run_test(struct ssi_drvdata *drvdata,
- dma_addr_t initial_digest_dma_addr,
- dma_addr_t key_dma_addr,
- size_t key_size,
- dma_addr_t din_dma_addr,
- size_t data_in_size,
- dma_addr_t mac_res_dma_addr,
- enum drv_hash_mode hash_mode,
- enum drv_hash_hw_mode hw_mode,
- size_t digest_size,
- size_t inter_digestsize,
- size_t block_size,
- dma_addr_t k0_dma_addr,
- dma_addr_t tmp_digest_dma_addr,
- dma_addr_t digest_bytes_len_dma_addr)
-{
- /* The implemented flow is not the same as the one implemented in ssi_hash.c (setkey + digest flows).
- * In this flow, there is no need to store and reload some of the intermidiate results.
- */
-
- /* max number of descriptors used for the flow */
- #define FIPS_HMAC_MAX_SEQ_LEN 12
-
- int rc;
- struct ssi_crypto_req ssi_req = {0};
- struct cc_hw_desc desc[FIPS_HMAC_MAX_SEQ_LEN];
- int idx = 0;
- int i;
- /* calc the hash opad first and ipad only afterwards (unlike the flow in ssi_hash.c) */
- unsigned int hmacPadConst[2] = { HMAC_OPAD_CONST, HMAC_IPAD_CONST };
-
- // assume (key_size <= block_size)
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, key_size, NS_BIT);
- set_flow_mode(&desc[idx], BYPASS);
- set_dout_dlli(&desc[idx], k0_dma_addr, key_size, NS_BIT, 0);
- idx++;
-
- // if needed, append Key with zeros to create K0
- if ((block_size - key_size) != 0) {
- hw_desc_init(&desc[idx]);
- set_din_const(&desc[idx], 0, (block_size - key_size));
- set_flow_mode(&desc[idx], BYPASS);
- set_dout_dlli(&desc[idx], (k0_dma_addr + key_size),
- (block_size - key_size), NS_BIT, 0);
- idx++;
- }
-
- BUG_ON(idx > FIPS_HMAC_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, 0);
- if (unlikely(rc != 0)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
- return rc;
- }
- idx = 0;
-
- /* calc derived HMAC key */
- for (i = 0; i < 2; i++) {
- /* Load hash initial state */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, initial_digest_dma_addr,
- inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Load the hash current length*/
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- /* Prepare opad/ipad key */
- hw_desc_init(&desc[idx]);
- set_xor_val(&desc[idx], hmacPadConst[i]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- idx++;
-
- /* Perform HASH update */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, k0_dma_addr, block_size,
- NS_BIT);
- set_cipher_mode(&desc[idx], hw_mode);
- set_xor_active(&desc[idx]);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
-
- if (i == 0) {
- /* First iteration - calc H(K0^opad) into tmp_digest_dma_addr */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_dout_dlli(&desc[idx], tmp_digest_dma_addr,
- inter_digestsize, NS_BIT, 0);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- idx++;
-
- // is this needed?? or continue with current descriptors??
- BUG_ON(idx > FIPS_HMAC_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, 0);
- if (unlikely(rc != 0)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
- return rc;
- }
- idx = 0;
- }
- }
-
- /* data descriptor */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, data_in_size, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
-
- /* HW last hash block padding (aka. "DO_PAD") */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_dout_dlli(&desc[idx], k0_dma_addr, HASH_LEN_SIZE, NS_BIT, 0);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
- set_cipher_do(&desc[idx], DO_PAD);
- idx++;
-
- /* store the hash digest result in the context */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_dout_dlli(&desc[idx], k0_dma_addr, digest_size, NS_BIT, 0);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- if (unlikely((hash_mode == DRV_HASH_MD5) ||
- (hash_mode == DRV_HASH_SHA384) ||
- (hash_mode == DRV_HASH_SHA512))) {
- set_bytes_swap(&desc[idx], 1);
- } else {
- set_cipher_config0(&desc[idx],
- HASH_DIGEST_RESULT_LITTLE_ENDIAN);
- }
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- idx++;
-
- /* at this point:
- * tmp_digest = H(o_key_pad)
- * k0 = H(i_key_pad || m)
- */
-
- /* Loading hash opad xor key state */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, tmp_digest_dma_addr,
- inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Load the hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, NS_BIT);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- idx++;
-
- /* Perform HASH update */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, k0_dma_addr, digest_size, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
-
- /* Get final MAC result */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], hw_mode);
- set_dout_dlli(&desc[idx], mac_res_dma_addr, digest_size, NS_BIT, 0);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
- if (unlikely((hash_mode == DRV_HASH_MD5) ||
- (hash_mode == DRV_HASH_SHA384) ||
- (hash_mode == DRV_HASH_SHA512))) {
- set_bytes_swap(&desc[idx], 1);
- } else {
- set_cipher_config0(&desc[idx],
- HASH_DIGEST_RESULT_LITTLE_ENDIAN);
- }
- idx++;
-
- /* perform the operation - Lock HW and push sequence */
- BUG_ON(idx > FIPS_HMAC_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, false);
-
- return rc;
-}
-
-enum cc_fips_error
-ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
-{
- enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
- size_t i;
- struct fips_hmac_ctx *virt_ctx = (struct fips_hmac_ctx *)cpu_addr_buffer;
-
- /* set the phisical pointers */
- dma_addr_t initial_digest_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, initial_digest);
- dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, key);
- dma_addr_t k0_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, k0);
- dma_addr_t tmp_digest_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, tmp_digest);
- dma_addr_t digest_bytes_len_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, digest_bytes_len);
- dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, din);
- dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, mac_res);
-
- for (i = 0; i < FIPS_HMAC_NUM_OF_TESTS; ++i)
- {
- FipsHmacData *hmac_data = (FipsHmacData *)&FipsHmacDataTable[i];
- int rc = 0;
- enum drv_hash_hw_mode hw_mode = 0;
- int digest_size = 0;
- int block_size = 0;
- int inter_digestsize = 0;
-
- memset(cpu_addr_buffer, 0, sizeof(struct fips_hmac_ctx));
-
- switch (hmac_data->hash_mode) {
- case DRV_HASH_SHA1:
- hw_mode = DRV_HASH_HW_SHA1;
- digest_size = CC_SHA1_DIGEST_SIZE;
- block_size = CC_SHA1_BLOCK_SIZE;
- inter_digestsize = CC_SHA1_DIGEST_SIZE;
- memcpy(virt_ctx->initial_digest, (void *)sha1_init, CC_SHA1_DIGEST_SIZE);
- memcpy(virt_ctx->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
- break;
- case DRV_HASH_SHA256:
- hw_mode = DRV_HASH_HW_SHA256;
- digest_size = CC_SHA256_DIGEST_SIZE;
- block_size = CC_SHA256_BLOCK_SIZE;
- inter_digestsize = CC_SHA256_DIGEST_SIZE;
- memcpy(virt_ctx->initial_digest, (void *)sha256_init, CC_SHA256_DIGEST_SIZE);
- memcpy(virt_ctx->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
- break;
-#if (CC_SUPPORT_SHA > 256)
- case DRV_HASH_SHA512:
- hw_mode = DRV_HASH_HW_SHA512;
- digest_size = CC_SHA512_DIGEST_SIZE;
- block_size = CC_SHA512_BLOCK_SIZE;
- inter_digestsize = CC_SHA512_DIGEST_SIZE;
- memcpy(virt_ctx->initial_digest, (void *)sha512_init, CC_SHA512_DIGEST_SIZE);
- memcpy(virt_ctx->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
- break;
-#endif
- default:
- error = FIPS_HmacToFipsError(hmac_data->hash_mode);
- break;
- }
-
- /* copy into the allocated buffer */
- memcpy(virt_ctx->key, hmac_data->key, hmac_data->key_size);
- memcpy(virt_ctx->din, hmac_data->data_in, hmac_data->data_in_size);
-
- /* run the test on HW */
- FIPS_DBG("ssi_hmac_fips_run_test - (i = %d) \n", i);
- rc = ssi_hmac_fips_run_test(drvdata,
- initial_digest_dma_addr,
- key_dma_addr,
- hmac_data->key_size,
- din_dma_addr,
- hmac_data->data_in_size,
- mac_res_dma_addr,
- hmac_data->hash_mode,
- hw_mode,
- digest_size,
- inter_digestsize,
- block_size,
- k0_dma_addr,
- tmp_digest_dma_addr,
- digest_bytes_len_dma_addr);
- if (rc != 0)
- {
- FIPS_LOG("ssi_hmac_fips_run_test %d returned error - rc = %d \n", i, rc);
- error = FIPS_HmacToFipsError(hmac_data->hash_mode);
- break;
- }
-
- /* compare actual mac result to expected */
- if (memcmp(virt_ctx->mac_res, hmac_data->mac_res, digest_size) != 0)
- {
- FIPS_LOG("comparison error %d - hash_mode=%d digest_size=%d \n", i, hmac_data->hash_mode, digest_size);
- FIPS_LOG(" i expected received \n");
- FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)hmac_data->mac_res, (size_t)virt_ctx->mac_res);
- for (i = 0; i < digest_size; ++i)
- {
- FIPS_LOG(" %d 0x%02x 0x%02x \n", i, hmac_data->mac_res[i], virt_ctx->mac_res[i]);
- }
-
- error = FIPS_HmacToFipsError(hmac_data->hash_mode);
- break;
- }
- }
-
- return error;
-}
-
-static inline int
-ssi_ccm_fips_run_test(struct ssi_drvdata *drvdata,
- enum drv_crypto_direction direction,
- dma_addr_t key_dma_addr,
- size_t key_size,
- dma_addr_t iv_dma_addr,
- dma_addr_t ctr_cnt_0_dma_addr,
- dma_addr_t b0_a0_adata_dma_addr,
- size_t b0_a0_adata_size,
- dma_addr_t din_dma_addr,
- size_t din_size,
- dma_addr_t dout_dma_addr,
- dma_addr_t mac_res_dma_addr)
-{
- /* max number of descriptors used for the flow */
- #define FIPS_CCM_MAX_SEQ_LEN 10
-
- int rc;
- struct ssi_crypto_req ssi_req = {0};
- struct cc_hw_desc desc[FIPS_CCM_MAX_SEQ_LEN];
- unsigned int idx = 0;
- unsigned int cipher_flow_mode;
-
- if (direction == DRV_CRYPTO_DIRECTION_DECRYPT) {
- cipher_flow_mode = AES_to_HASH_and_DOUT;
- } else { /* Encrypt */
- cipher_flow_mode = AES_and_HASH;
- }
-
- /* load key */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
- ((key_size == NIST_AESCCM_192_BIT_KEY_SIZE) ?
- CC_AES_KEY_SIZE_MAX : key_size), NS_BIT)
- set_key_size_aes(&desc[idx], key_size);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- /* load ctr state */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
- set_key_size_aes(&desc[idx], key_size);
- set_din_type(&desc[idx], DMA_DLLI, iv_dma_addr, AES_BLOCK_SIZE,
- NS_BIT);
- set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- /* load MAC key */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
- ((key_size == NIST_AESCCM_192_BIT_KEY_SIZE) ?
- CC_AES_KEY_SIZE_MAX : key_size), NS_BIT);
- set_key_size_aes(&desc[idx], key_size);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_aes_not_hash_mode(&desc[idx]);
- idx++;
-
- /* load MAC state */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
- set_key_size_aes(&desc[idx], key_size);
- set_din_type(&desc[idx], DMA_DLLI, mac_res_dma_addr,
- NIST_AESCCM_TAG_SIZE, NS_BIT);
- set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_aes_not_hash_mode(&desc[idx]);
- idx++;
-
- /* prcess assoc data */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, b0_a0_adata_dma_addr,
- b0_a0_adata_size, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
-
- /* process the cipher */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, din_size, NS_BIT);
- set_dout_dlli(&desc[idx], dout_dma_addr, din_size, NS_BIT, 0);
- set_flow_mode(&desc[idx], cipher_flow_mode);
- idx++;
-
- /* Read temporal MAC */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
- set_dout_dlli(&desc[idx], mac_res_dma_addr, NIST_AESCCM_TAG_SIZE,
- NS_BIT, 0);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_aes_not_hash_mode(&desc[idx]);
- idx++;
-
- /* load AES-CTR state (for last MAC calculation)*/
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
- set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
- set_din_type(&desc[idx], DMA_DLLI, ctr_cnt_0_dma_addr, AES_BLOCK_SIZE,
- NS_BIT);
- set_key_size_aes(&desc[idx], key_size);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- /* Memory Barrier */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- idx++;
-
- /* encrypt the "T" value and store MAC inplace */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, mac_res_dma_addr,
- NIST_AESCCM_TAG_SIZE, NS_BIT);
- set_dout_dlli(&desc[idx], mac_res_dma_addr, NIST_AESCCM_TAG_SIZE,
- NS_BIT, 0);
- set_flow_mode(&desc[idx], DIN_AES_DOUT);
- idx++;
-
- /* perform the operation - Lock HW and push sequence */
- BUG_ON(idx > FIPS_CCM_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, false);
-
- return rc;
-}
-
-enum cc_fips_error
-ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
-{
- enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
- size_t i;
- struct fips_ccm_ctx *virt_ctx = (struct fips_ccm_ctx *)cpu_addr_buffer;
-
- /* set the phisical pointers */
- dma_addr_t b0_a0_adata_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, b0_a0_adata);
- dma_addr_t iv_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, iv);
- dma_addr_t ctr_cnt_0_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, ctr_cnt_0);
- dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, key);
- dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, din);
- dma_addr_t dout_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, dout);
- dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, mac_res);
-
- for (i = 0; i < FIPS_CCM_NUM_OF_TESTS; ++i)
- {
- FipsCcmData *ccmData = (FipsCcmData *)&FipsCcmDataTable[i];
- int rc = 0;
-
- memset(cpu_addr_buffer, 0, sizeof(struct fips_ccm_ctx));
-
- /* copy the nonce, key, adata, din data into the allocated buffer */
- memcpy(virt_ctx->key, ccmData->key, ccmData->keySize);
- memcpy(virt_ctx->din, ccmData->dataIn, ccmData->dataInSize);
- {
- /* build B0 -- B0, nonce, l(m) */
- __be16 data = cpu_to_be16(NIST_AESCCM_TEXT_SIZE);
-
- virt_ctx->b0_a0_adata[0] = NIST_AESCCM_B0_VAL;
- memcpy(virt_ctx->b0_a0_adata + 1, ccmData->nonce, NIST_AESCCM_NONCE_SIZE);
- memcpy(virt_ctx->b0_a0_adata + 14, (u8 *)&data, sizeof(__be16));
- /* build A0+ADATA */
- virt_ctx->b0_a0_adata[NIST_AESCCM_IV_SIZE + 0] = (ccmData->adataSize >> 8) & 0xFF;
- virt_ctx->b0_a0_adata[NIST_AESCCM_IV_SIZE + 1] = ccmData->adataSize & 0xFF;
- memcpy(virt_ctx->b0_a0_adata + NIST_AESCCM_IV_SIZE + 2, ccmData->adata, ccmData->adataSize);
- /* iv */
- virt_ctx->iv[0] = 1; /* L' */
- memcpy(virt_ctx->iv + 1, ccmData->nonce, NIST_AESCCM_NONCE_SIZE);
- virt_ctx->iv[15] = 1;
- /* ctr_count_0 */
- memcpy(virt_ctx->ctr_cnt_0, virt_ctx->iv, NIST_AESCCM_IV_SIZE);
- virt_ctx->ctr_cnt_0[15] = 0;
- }
-
- FIPS_DBG("ssi_ccm_fips_run_test - (i = %d) \n", i);
- rc = ssi_ccm_fips_run_test(drvdata,
- ccmData->direction,
- key_dma_addr,
- ccmData->keySize,
- iv_dma_addr,
- ctr_cnt_0_dma_addr,
- b0_a0_adata_dma_addr,
- FIPS_CCM_B0_A0_ADATA_SIZE,
- din_dma_addr,
- ccmData->dataInSize,
- dout_dma_addr,
- mac_res_dma_addr);
- if (rc != 0)
- {
- FIPS_LOG("ssi_ccm_fips_run_test %d returned error - rc = %d \n", i, rc);
- error = CC_REE_FIPS_ERROR_AESCCM_PUT;
- break;
- }
-
- /* compare actual dout to expected */
- if (memcmp(virt_ctx->dout, ccmData->dataOut, ccmData->dataInSize) != 0)
- {
- FIPS_LOG("dout comparison error %d - size=%d \n", i, ccmData->dataInSize);
- error = CC_REE_FIPS_ERROR_AESCCM_PUT;
- break;
- }
-
- /* compare actual mac result to expected */
- if (memcmp(virt_ctx->mac_res, ccmData->macResOut, ccmData->tagSize) != 0)
- {
- FIPS_LOG("mac_res comparison error %d - mac_size=%d \n", i, ccmData->tagSize);
- FIPS_LOG(" i expected received \n");
- FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)ccmData->macResOut, (size_t)virt_ctx->mac_res);
- for (i = 0; i < ccmData->tagSize; ++i)
- {
- FIPS_LOG(" %d 0x%02x 0x%02x \n", i, ccmData->macResOut[i], virt_ctx->mac_res[i]);
- }
-
- error = CC_REE_FIPS_ERROR_AESCCM_PUT;
- break;
- }
- }
-
- return error;
-}
-
-static inline int
-ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
- enum drv_crypto_direction direction,
- dma_addr_t key_dma_addr,
- size_t key_size,
- dma_addr_t hkey_dma_addr,
- dma_addr_t block_len_dma_addr,
- dma_addr_t iv_inc1_dma_addr,
- dma_addr_t iv_inc2_dma_addr,
- dma_addr_t adata_dma_addr,
- size_t adata_size,
- dma_addr_t din_dma_addr,
- size_t din_size,
- dma_addr_t dout_dma_addr,
- dma_addr_t mac_res_dma_addr)
-{
- /* max number of descriptors used for the flow */
- #define FIPS_GCM_MAX_SEQ_LEN 15
-
- int rc;
- struct ssi_crypto_req ssi_req = {0};
- struct cc_hw_desc desc[FIPS_GCM_MAX_SEQ_LEN];
- unsigned int idx = 0;
- unsigned int cipher_flow_mode;
-
- if (direction == DRV_CRYPTO_DIRECTION_DECRYPT) {
- cipher_flow_mode = AES_and_HASH;
- } else { /* Encrypt */
- cipher_flow_mode = AES_to_HASH_and_DOUT;
- }
-
-///////////////////////////////// 1 ////////////////////////////////////
-// ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
-///////////////////////////////// 1 ////////////////////////////////////
-
- /* load key to AES */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
- set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, key_size, NS_BIT);
- set_key_size_aes(&desc[idx], key_size);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- /* process one zero block to generate hkey */
- hw_desc_init(&desc[idx]);
- set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
- set_dout_dlli(&desc[idx], hkey_dma_addr, AES_BLOCK_SIZE, NS_BIT, 0);
- set_flow_mode(&desc[idx], DIN_AES_DOUT);
- idx++;
-
- /* Memory Barrier */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- idx++;
-
- /* Load GHASH subkey */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, hkey_dma_addr, AES_BLOCK_SIZE,
- NS_BIT);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_aes_not_hash_mode(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- /* Configure Hash Engine to work with GHASH.
- * Since it was not possible to extend HASH submodes to add GHASH,
- * The following command is necessary in order to
- * select GHASH (according to HW designers)
- */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_aes_not_hash_mode(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
- set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
- set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- /* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */
- hw_desc_init(&desc[idx]);
- set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_aes_not_hash_mode(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
-///////////////////////////////// 2 ////////////////////////////////////
- /* prcess(ghash) assoc data */
-// if (req->assoclen > 0)
-// ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
-///////////////////////////////// 2 ////////////////////////////////////
-
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, adata_dma_addr, adata_size, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
-
-///////////////////////////////// 3 ////////////////////////////////////
-// ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
-///////////////////////////////// 3 ////////////////////////////////////
-
- /* load key to AES*/
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
- set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
- set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, key_size, NS_BIT);
- set_key_size_aes(&desc[idx], key_size);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- /* load AES/CTR initial CTR value inc by 2*/
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
- set_key_size_aes(&desc[idx], key_size);
- set_din_type(&desc[idx], DMA_DLLI, iv_inc2_dma_addr, AES_BLOCK_SIZE,
- NS_BIT);
- set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
-///////////////////////////////// 4 ////////////////////////////////////
- /* process(gctr+ghash) */
-// if (req_ctx->cryptlen != 0)
-// ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
-///////////////////////////////// 4 ////////////////////////////////////
-
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, din_size, NS_BIT);
- set_dout_dlli(&desc[idx], dout_dma_addr, din_size, NS_BIT, 0);
- set_flow_mode(&desc[idx], cipher_flow_mode);
- idx++;
-
-///////////////////////////////// 5 ////////////////////////////////////
-// ssi_aead_process_gcm_result_desc(req, desc, seq_size);
-///////////////////////////////// 5 ////////////////////////////////////
-
- /* prcess(ghash) gcm_block_len */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, block_len_dma_addr, AES_BLOCK_SIZE,
- NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
-
- /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_dlli(&desc[idx], mac_res_dma_addr, AES_BLOCK_SIZE, NS_BIT, 0);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_aes_not_hash_mode(&desc[idx]);
- idx++;
-
- /* load AES/CTR initial CTR value inc by 1*/
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
- set_key_size_aes(&desc[idx], key_size);
- set_din_type(&desc[idx], DMA_DLLI, iv_inc1_dma_addr, AES_BLOCK_SIZE,
- NS_BIT);
- set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- set_flow_mode(&desc[idx], S_DIN_to_AES);
- idx++;
-
- /* Memory Barrier */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- idx++;
-
- /* process GCTR on stored GHASH and store MAC inplace */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
- set_din_type(&desc[idx], DMA_DLLI, mac_res_dma_addr, AES_BLOCK_SIZE,
- NS_BIT);
- set_dout_dlli(&desc[idx], mac_res_dma_addr, AES_BLOCK_SIZE, NS_BIT, 0);
- set_flow_mode(&desc[idx], DIN_AES_DOUT);
- idx++;
-
- /* perform the operation - Lock HW and push sequence */
- BUG_ON(idx > FIPS_GCM_MAX_SEQ_LEN);
- rc = send_request(drvdata, &ssi_req, desc, idx, false);
-
- return rc;
-}
-
-enum cc_fips_error
-ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
-{
- enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
- size_t i;
- struct fips_gcm_ctx *virt_ctx = (struct fips_gcm_ctx *)cpu_addr_buffer;
-
- /* set the phisical pointers */
- dma_addr_t adata_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, adata);
- dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, key);
- dma_addr_t hkey_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, hkey);
- dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, din);
- dma_addr_t dout_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, dout);
- dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, mac_res);
- dma_addr_t len_block_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, len_block);
- dma_addr_t iv_inc1_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, iv_inc1);
- dma_addr_t iv_inc2_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, iv_inc2);
-
- for (i = 0; i < FIPS_GCM_NUM_OF_TESTS; ++i)
- {
- FipsGcmData *gcmData = (FipsGcmData *)&FipsGcmDataTable[i];
- int rc = 0;
-
- memset(cpu_addr_buffer, 0, sizeof(struct fips_gcm_ctx));
-
- /* copy the key, adata, din data - into the allocated buffer */
- memcpy(virt_ctx->key, gcmData->key, gcmData->keySize);
- memcpy(virt_ctx->adata, gcmData->adata, gcmData->adataSize);
- memcpy(virt_ctx->din, gcmData->dataIn, gcmData->dataInSize);
-
- /* len_block */
- {
- __be64 len_bits;
-
- len_bits = cpu_to_be64(gcmData->adataSize * 8);
- memcpy(virt_ctx->len_block, &len_bits, sizeof(len_bits));
- len_bits = cpu_to_be64(gcmData->dataInSize * 8);
- memcpy(virt_ctx->len_block + 8, &len_bits, sizeof(len_bits));
- }
- /* iv_inc1, iv_inc2 */
- {
- __be32 counter = cpu_to_be32(1);
-
- memcpy(virt_ctx->iv_inc1, gcmData->iv, NIST_AESGCM_IV_SIZE);
- memcpy(virt_ctx->iv_inc1 + NIST_AESGCM_IV_SIZE, &counter, sizeof(counter));
- counter = cpu_to_be32(2);
- memcpy(virt_ctx->iv_inc2, gcmData->iv, NIST_AESGCM_IV_SIZE);
- memcpy(virt_ctx->iv_inc2 + NIST_AESGCM_IV_SIZE, &counter, sizeof(counter));
- }
-
- FIPS_DBG("ssi_gcm_fips_run_test - (i = %d) \n", i);
- rc = ssi_gcm_fips_run_test(drvdata,
- gcmData->direction,
- key_dma_addr,
- gcmData->keySize,
- hkey_dma_addr,
- len_block_dma_addr,
- iv_inc1_dma_addr,
- iv_inc2_dma_addr,
- adata_dma_addr,
- gcmData->adataSize,
- din_dma_addr,
- gcmData->dataInSize,
- dout_dma_addr,
- mac_res_dma_addr);
- if (rc != 0)
- {
- FIPS_LOG("ssi_gcm_fips_run_test %d returned error - rc = %d \n", i, rc);
- error = CC_REE_FIPS_ERROR_AESGCM_PUT;
- break;
- }
-
- if (gcmData->direction == DRV_CRYPTO_DIRECTION_ENCRYPT) {
- /* compare actual dout to expected */
- if (memcmp(virt_ctx->dout, gcmData->dataOut, gcmData->dataInSize) != 0)
- {
- FIPS_LOG("dout comparison error %d - size=%d \n", i, gcmData->dataInSize);
- FIPS_LOG(" i expected received \n");
- FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)gcmData->dataOut, (size_t)virt_ctx->dout);
- for (i = 0; i < gcmData->dataInSize; ++i)
- {
- FIPS_LOG(" %d 0x%02x 0x%02x \n", i, gcmData->dataOut[i], virt_ctx->dout[i]);
- }
-
- error = CC_REE_FIPS_ERROR_AESGCM_PUT;
- break;
- }
- }
-
- /* compare actual mac result to expected */
- if (memcmp(virt_ctx->mac_res, gcmData->macResOut, gcmData->tagSize) != 0)
- {
- FIPS_LOG("mac_res comparison error %d - mac_size=%d \n", i, gcmData->tagSize);
- FIPS_LOG(" i expected received \n");
- FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)gcmData->macResOut, (size_t)virt_ctx->mac_res);
- for (i = 0; i < gcmData->tagSize; ++i)
- {
- FIPS_LOG(" %d 0x%02x 0x%02x \n", i, gcmData->macResOut[i], virt_ctx->mac_res[i]);
- }
-
- error = CC_REE_FIPS_ERROR_AESGCM_PUT;
- break;
- }
- }
- return error;
-}
-
-size_t ssi_fips_max_mem_alloc_size(void)
-{
- FIPS_DBG("sizeof(struct fips_cipher_ctx) %d \n", sizeof(struct fips_cipher_ctx));
- FIPS_DBG("sizeof(struct fips_cmac_ctx) %d \n", sizeof(struct fips_cmac_ctx));
- FIPS_DBG("sizeof(struct fips_hash_ctx) %d \n", sizeof(struct fips_hash_ctx));
- FIPS_DBG("sizeof(struct fips_hmac_ctx) %d \n", sizeof(struct fips_hmac_ctx));
- FIPS_DBG("sizeof(struct fips_ccm_ctx) %d \n", sizeof(struct fips_ccm_ctx));
- FIPS_DBG("sizeof(struct fips_gcm_ctx) %d \n", sizeof(struct fips_gcm_ctx));
-
- return sizeof(fips_ctx);
-}
-
diff --git a/drivers/staging/ccree/ssi_fips_local.c b/drivers/staging/ccree/ssi_fips_local.c
deleted file mode 100644
index aefb71dc9e9a..000000000000
--- a/drivers/staging/ccree/ssi_fips_local.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/**************************************************************
- * This file defines the driver FIPS internal function, used by the driver itself.
- ***************************************************************/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <crypto/des.h>
-
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "cc_hal.h"
-
-#define FIPS_POWER_UP_TEST_CIPHER 1
-#define FIPS_POWER_UP_TEST_CMAC 1
-#define FIPS_POWER_UP_TEST_HASH 1
-#define FIPS_POWER_UP_TEST_HMAC 1
-#define FIPS_POWER_UP_TEST_CCM 1
-#define FIPS_POWER_UP_TEST_GCM 1
-
-static bool ssi_fips_support = 1;
-module_param(ssi_fips_support, bool, 0644);
-MODULE_PARM_DESC(ssi_fips_support, "FIPS supported flag: 0 - off , 1 - on (default)");
-
-static void fips_dsr(unsigned long devarg);
-
-struct ssi_fips_handle {
-#ifdef COMP_IN_WQ
- struct workqueue_struct *workq;
- struct delayed_work fipswork;
-#else
- struct tasklet_struct fipstask;
-#endif
-};
-
-extern int ssi_fips_get_state(enum cc_fips_state_t *p_state);
-extern int ssi_fips_get_error(enum cc_fips_error *p_err);
-extern int ssi_fips_ext_set_state(enum cc_fips_state_t state);
-extern int ssi_fips_ext_set_error(enum cc_fips_error err);
-
-/* FIPS power-up tests */
-extern enum cc_fips_error ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern enum cc_fips_error ssi_cmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern enum cc_fips_error ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern enum cc_fips_error ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern enum cc_fips_error ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern enum cc_fips_error ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern size_t ssi_fips_max_mem_alloc_size(void);
-
-/* The function called once at driver entry point to check whether TEE FIPS error occured.*/
-static enum ssi_fips_error ssi_fips_get_tee_error(struct ssi_drvdata *drvdata)
-{
- u32 regVal;
- void __iomem *cc_base = drvdata->cc_base;
-
- regVal = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
- if (regVal == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
- return CC_REE_FIPS_ERROR_OK;
-
- return CC_REE_FIPS_ERROR_FROM_TEE;
-}
-
-/*
- * This function should push the FIPS REE library status towards the TEE library.
- * By writing the error state to HOST_GPR0 register. The function is called from
- * driver entry point so no need to protect by mutex.
- */
-static void ssi_fips_update_tee_upon_ree_status(struct ssi_drvdata *drvdata, enum cc_fips_error err)
-{
- void __iomem *cc_base = drvdata->cc_base;
-
- if (err == CC_REE_FIPS_ERROR_OK)
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS | CC_FIPS_SYNC_MODULE_OK));
- else
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS | CC_FIPS_SYNC_MODULE_ERROR));
-}
-
-void ssi_fips_fini(struct ssi_drvdata *drvdata)
-{
- struct ssi_fips_handle *fips_h = drvdata->fips_handle;
-
- if (!fips_h)
- return; /* Not allocated */
-
-#ifdef COMP_IN_WQ
- if (fips_h->workq) {
- flush_workqueue(fips_h->workq);
- destroy_workqueue(fips_h->workq);
- }
-#else
- /* Kill tasklet */
- tasklet_kill(&fips_h->fipstask);
-#endif
- memset(fips_h, 0, sizeof(struct ssi_fips_handle));
- kfree(fips_h);
- drvdata->fips_handle = NULL;
-}
-
-void fips_handler(struct ssi_drvdata *drvdata)
-{
- struct ssi_fips_handle *fips_handle_ptr =
- drvdata->fips_handle;
-#ifdef COMP_IN_WQ
- queue_delayed_work(fips_handle_ptr->workq, &fips_handle_ptr->fipswork, 0);
-#else
- tasklet_schedule(&fips_handle_ptr->fipstask);
-#endif
-}
-
-#ifdef COMP_IN_WQ
-static void fips_wq_handler(struct work_struct *work)
-{
- struct ssi_drvdata *drvdata =
- container_of(work, struct ssi_drvdata, fipswork.work);
-
- fips_dsr((unsigned long)drvdata);
-}
-#endif
-
-/* Deferred service handler, run as interrupt-fired tasklet */
-static void fips_dsr(unsigned long devarg)
-{
- struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
- void __iomem *cc_base = drvdata->cc_base;
- u32 irq;
- u32 teeFipsError = 0;
-
- irq = (drvdata->irq & (SSI_GPR0_IRQ_MASK));
-
- if (irq & SSI_GPR0_IRQ_MASK) {
- teeFipsError = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
- if (teeFipsError != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
- ssi_fips_set_error(drvdata, CC_REE_FIPS_ERROR_FROM_TEE);
- }
-
- /* after verifing that there is nothing to do, Unmask AXI completion interrupt */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
- CC_HAL_READ_REGISTER(
- CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
-}
-
-enum cc_fips_error cc_fips_run_power_up_tests(struct ssi_drvdata *drvdata)
-{
- enum cc_fips_error fips_error = CC_REE_FIPS_ERROR_OK;
- void *cpu_addr_buffer = NULL;
- dma_addr_t dma_handle;
- size_t alloc_buff_size = ssi_fips_max_mem_alloc_size();
- struct device *dev = &drvdata->plat_dev->dev;
-
- // allocate memory using dma_alloc_coherent - for phisical, consecutive and cache coherent buffer (memory map is not needed)
- // the return value is the virtual address - use it to copy data into the buffer
- // the dma_handle is the returned phy address - use it in the HW descriptor
- FIPS_DBG("dma_alloc_coherent \n");
- cpu_addr_buffer = dma_alloc_coherent(dev, alloc_buff_size, &dma_handle, GFP_KERNEL);
- if (!cpu_addr_buffer)
- return CC_REE_FIPS_ERROR_GENERAL;
-
- FIPS_DBG("allocated coherent buffer - addr 0x%08X , size = %d \n", (size_t)cpu_addr_buffer, alloc_buff_size);
-
-#if FIPS_POWER_UP_TEST_CIPHER
- FIPS_DBG("ssi_cipher_fips_power_up_tests ...\n");
- fips_error = ssi_cipher_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
- FIPS_DBG("ssi_cipher_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
-#endif
-#if FIPS_POWER_UP_TEST_CMAC
- if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
- FIPS_DBG("ssi_cmac_fips_power_up_tests ...\n");
- fips_error = ssi_cmac_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
- FIPS_DBG("ssi_cmac_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
- }
-#endif
-#if FIPS_POWER_UP_TEST_HASH
- if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
- FIPS_DBG("ssi_hash_fips_power_up_tests ...\n");
- fips_error = ssi_hash_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
- FIPS_DBG("ssi_hash_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
- }
-#endif
-#if FIPS_POWER_UP_TEST_HMAC
- if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
- FIPS_DBG("ssi_hmac_fips_power_up_tests ...\n");
- fips_error = ssi_hmac_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
- FIPS_DBG("ssi_hmac_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
- }
-#endif
-#if FIPS_POWER_UP_TEST_CCM
- if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
- FIPS_DBG("ssi_ccm_fips_power_up_tests ...\n");
- fips_error = ssi_ccm_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
- FIPS_DBG("ssi_ccm_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
- }
-#endif
-#if FIPS_POWER_UP_TEST_GCM
- if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
- FIPS_DBG("ssi_gcm_fips_power_up_tests ...\n");
- fips_error = ssi_gcm_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
- FIPS_DBG("ssi_gcm_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
- }
-#endif
- /* deallocate the buffer when all tests are done... */
- FIPS_DBG("dma_free_coherent \n");
- dma_free_coherent(dev, alloc_buff_size, cpu_addr_buffer, dma_handle);
-
- return fips_error;
-}
-
-/* The function checks if FIPS supported and FIPS error exists.*
- * It should be used in every driver API.
- */
-int ssi_fips_check_fips_error(void)
-{
- enum cc_fips_state_t fips_state;
-
- if (ssi_fips_get_state(&fips_state) != 0) {
- FIPS_LOG("ssi_fips_get_state FAILED, returning.. \n");
- return -ENOEXEC;
- }
- if (fips_state == CC_FIPS_STATE_ERROR) {
- FIPS_LOG("ssi_fips_get_state: fips_state is %d, returning.. \n", fips_state);
- return -ENOEXEC;
- }
- return 0;
-}
-
-/* The function sets the REE FIPS state.*
- * It should be used while driver is being loaded.
- */
-int ssi_fips_set_state(enum cc_fips_state_t state)
-{
- return ssi_fips_ext_set_state(state);
-}
-
-/* The function sets the REE FIPS error, and pushes the error to TEE library. *
- * It should be used when any of the KAT tests fails.
- */
-int ssi_fips_set_error(struct ssi_drvdata *p_drvdata, enum cc_fips_error err)
-{
- int rc = 0;
- enum cc_fips_error current_err;
-
- FIPS_LOG("ssi_fips_set_error - fips_error = %d \n", err);
-
- // setting no error is not allowed
- if (err == CC_REE_FIPS_ERROR_OK)
- return -ENOEXEC;
-
- // If error exists, do not set new error
- if (ssi_fips_get_error(&current_err) != 0)
- return -ENOEXEC;
-
- if (current_err != CC_REE_FIPS_ERROR_OK)
- return -ENOEXEC;
-
- // set REE internal error and state
- rc = ssi_fips_ext_set_error(err);
- if (rc != 0)
- return -ENOEXEC;
-
- rc = ssi_fips_ext_set_state(CC_FIPS_STATE_ERROR);
- if (rc != 0)
- return -ENOEXEC;
-
- // push error towards TEE libraray, if it's not TEE error
- if (err != CC_REE_FIPS_ERROR_FROM_TEE)
- ssi_fips_update_tee_upon_ree_status(p_drvdata, err);
-
- return rc;
-}
-
-/* The function called once at driver entry point .*/
-int ssi_fips_init(struct ssi_drvdata *p_drvdata)
-{
- enum cc_fips_error rc = CC_REE_FIPS_ERROR_OK;
- struct ssi_fips_handle *fips_h;
-
- FIPS_DBG("CC FIPS code .. (fips=%d) \n", ssi_fips_support);
-
- fips_h = kzalloc(sizeof(struct ssi_fips_handle), GFP_KERNEL);
- if (!fips_h) {
- ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
- return -ENOMEM;
- }
-
- p_drvdata->fips_handle = fips_h;
-
-#ifdef COMP_IN_WQ
- SSI_LOG_DEBUG("Initializing fips workqueue\n");
- fips_h->workq = create_singlethread_workqueue("arm_cc7x_fips_wq");
- if (unlikely(!fips_h->workq)) {
- SSI_LOG_ERR("Failed creating fips work queue\n");
- ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
- rc = -ENOMEM;
- goto ssi_fips_init_err;
- }
- INIT_DELAYED_WORK(&fips_h->fipswork, fips_wq_handler);
-#else
- SSI_LOG_DEBUG("Initializing fips tasklet\n");
- tasklet_init(&fips_h->fipstask, fips_dsr, (unsigned long)p_drvdata);
-#endif
-
- /* init fips driver data */
- rc = ssi_fips_set_state((ssi_fips_support == 0) ? CC_FIPS_STATE_NOT_SUPPORTED : CC_FIPS_STATE_SUPPORTED);
- if (unlikely(rc != 0)) {
- ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
- rc = -EAGAIN;
- goto ssi_fips_init_err;
- }
-
- /* Run power up tests (before registration and operating the HW engines) */
- FIPS_DBG("ssi_fips_get_tee_error \n");
- rc = ssi_fips_get_tee_error(p_drvdata);
- if (unlikely(rc != CC_REE_FIPS_ERROR_OK)) {
- ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_FROM_TEE);
- rc = -EAGAIN;
- goto ssi_fips_init_err;
- }
-
- FIPS_DBG("cc_fips_run_power_up_tests \n");
- rc = cc_fips_run_power_up_tests(p_drvdata);
- if (unlikely(rc != CC_REE_FIPS_ERROR_OK)) {
- ssi_fips_set_error(p_drvdata, rc);
- rc = -EAGAIN;
- goto ssi_fips_init_err;
- }
- FIPS_LOG("cc_fips_run_power_up_tests - done ... fips_error = %d \n", rc);
-
- /* when all tests passed, update TEE with fips OK status after power up tests */
- ssi_fips_update_tee_upon_ree_status(p_drvdata, CC_REE_FIPS_ERROR_OK);
-
- if (unlikely(rc != 0)) {
- rc = -EAGAIN;
- ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
- goto ssi_fips_init_err;
- }
-
- return 0;
-
-ssi_fips_init_err:
- ssi_fips_fini(p_drvdata);
- return rc;
-}
-
diff --git a/drivers/staging/ccree/ssi_fips_local.h b/drivers/staging/ccree/ssi_fips_local.h
deleted file mode 100644
index 8c7994fe9fae..000000000000
--- a/drivers/staging/ccree/ssi_fips_local.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __SSI_FIPS_LOCAL_H__
-#define __SSI_FIPS_LOCAL_H__
-
-#ifdef CONFIG_CCX7REE_FIPS_SUPPORT
-
-#include "ssi_fips.h"
-struct ssi_drvdata;
-
-#define CHECK_AND_RETURN_UPON_FIPS_ERROR() {\
- if (ssi_fips_check_fips_error() != 0) {\
- return -ENOEXEC;\
- } \
-}
-
-#define CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR() {\
- if (ssi_fips_check_fips_error() != 0) {\
- return;\
- } \
-}
-
-#define SSI_FIPS_INIT(p_drvData) (ssi_fips_init(p_drvData))
-#define SSI_FIPS_FINI(p_drvData) (ssi_fips_fini(p_drvData))
-
-#define FIPS_LOG(...) SSI_LOG(KERN_INFO, __VA_ARGS__)
-#define FIPS_DBG(...) //SSI_LOG(KERN_INFO, __VA_ARGS__)
-
-/* FIPS functions */
-int ssi_fips_init(struct ssi_drvdata *p_drvdata);
-void ssi_fips_fini(struct ssi_drvdata *drvdata);
-int ssi_fips_check_fips_error(void);
-int ssi_fips_set_error(struct ssi_drvdata *p_drvdata, enum cc_fips_error err);
-void fips_handler(struct ssi_drvdata *drvdata);
-
-#else /* CONFIG_CC7XXREE_FIPS_SUPPORT */
-
-#define CHECK_AND_RETURN_UPON_FIPS_ERROR()
-#define CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR()
-
-static inline int ssi_fips_init(struct ssi_drvdata *p_drvdata)
-{
- return 0;
-}
-
-static inline void ssi_fips_fini(struct ssi_drvdata *drvdata) {}
-
-void fips_handler(struct ssi_drvdata *drvdata);
-
-#endif /* CONFIG_CC7XXREE_FIPS_SUPPORT */
-
-#endif /*__SSI_FIPS_LOCAL_H__*/
-
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index ae8f36af3837..13291aeaf350 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -30,7 +30,6 @@
#include "ssi_sysfs.h"
#include "ssi_hash.h"
#include "ssi_sram_mgr.h"
-#include "ssi_fips_local.h"
#define SSI_MAX_AHASH_SEQ_LEN 12
#define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE)
@@ -71,8 +70,8 @@ static void ssi_hash_create_xcbc_setup(
unsigned int *seq_size);
static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
- struct cc_hw_desc desc[],
- unsigned int *seq_size);
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size);
struct ssi_hash_alg {
struct list_head entry;
@@ -118,8 +117,8 @@ static void ssi_hash_create_data_desc(
static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
{
if (unlikely((mode == DRV_HASH_MD5) ||
- (mode == DRV_HASH_SHA384) ||
- (mode == DRV_HASH_SHA512))) {
+ (mode == DRV_HASH_SHA384) ||
+ (mode == DRV_HASH_SHA512))) {
set_bytes_swap(desc, 1);
} else {
set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
@@ -136,13 +135,13 @@ static int ssi_hash_map_result(struct device *dev,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
SSI_LOG_ERR("Mapping digest result buffer %u B for DMA failed\n",
- digestsize);
+ digestsize);
return -ENOMEM;
}
SSI_LOG_DEBUG("Mapped digest result buffer %u B "
- "at va=%pK to dma=0x%llX\n",
+ "at va=%pK to dma=%pad\n",
digestsize, state->digest_result_buff,
- (unsigned long long)state->digest_result_dma_addr);
+ state->digest_result_dma_addr);
return 0;
}
@@ -201,12 +200,12 @@ static int ssi_hash_map_request(struct device *dev,
state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
SSI_LOG_ERR("Mapping digest len %d B at va=%pK for DMA failed\n",
- ctx->inter_digestsize, state->digest_buff);
+ ctx->inter_digestsize, state->digest_buff);
goto fail3;
}
- SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=0x%llX\n",
- ctx->inter_digestsize, state->digest_buff,
- (unsigned long long)state->digest_buff_dma_addr);
+ SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->digest_buff,
+ state->digest_buff_dma_addr);
if (is_hmac) {
dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
@@ -250,12 +249,12 @@ static int ssi_hash_map_request(struct device *dev,
state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
SSI_LOG_ERR("Mapping digest len %u B at va=%pK for DMA failed\n",
- HASH_LEN_SIZE, state->digest_bytes_len);
+ HASH_LEN_SIZE, state->digest_bytes_len);
goto fail4;
}
- SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=0x%llX\n",
- HASH_LEN_SIZE, state->digest_bytes_len,
- (unsigned long long)state->digest_bytes_len_dma_addr);
+ SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=%pad\n",
+ HASH_LEN_SIZE, state->digest_bytes_len,
+ state->digest_bytes_len_dma_addr);
} else {
state->digest_bytes_len_dma_addr = 0;
}
@@ -264,12 +263,13 @@ static int ssi_hash_map_request(struct device *dev,
state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
SSI_LOG_ERR("Mapping opad digest %d B at va=%pK for DMA failed\n",
- ctx->inter_digestsize, state->opad_digest_buff);
+ ctx->inter_digestsize,
+ state->opad_digest_buff);
goto fail5;
}
- SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=0x%llX\n",
- ctx->inter_digestsize, state->opad_digest_buff,
- (unsigned long long)state->opad_digest_dma_addr);
+ SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->opad_digest_buff,
+ state->opad_digest_dma_addr);
} else {
state->opad_digest_dma_addr = 0;
}
@@ -297,20 +297,14 @@ fail2:
fail1:
kfree(state->digest_buff);
fail_digest_result_buff:
- if (state->digest_result_buff) {
- kfree(state->digest_result_buff);
- state->digest_result_buff = NULL;
- }
+ kfree(state->digest_result_buff);
+ state->digest_result_buff = NULL;
fail_buff1:
- if (state->buff1) {
- kfree(state->buff1);
- state->buff1 = NULL;
- }
+ kfree(state->buff1);
+ state->buff1 = NULL;
fail_buff0:
- if (state->buff0) {
- kfree(state->buff0);
- state->buff0 = NULL;
- }
+ kfree(state->buff0);
+ state->buff0 = NULL;
fail0:
return rc;
}
@@ -322,22 +316,22 @@ static void ssi_hash_unmap_request(struct device *dev,
if (state->digest_buff_dma_addr != 0) {
dma_unmap_single(dev, state->digest_buff_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=0x%llX\n",
- (unsigned long long)state->digest_buff_dma_addr);
+ SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+ state->digest_buff_dma_addr);
state->digest_buff_dma_addr = 0;
}
if (state->digest_bytes_len_dma_addr != 0) {
dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=0x%llX\n",
- (unsigned long long)state->digest_bytes_len_dma_addr);
+ SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
+ state->digest_bytes_len_dma_addr);
state->digest_bytes_len_dma_addr = 0;
}
if (state->opad_digest_dma_addr != 0) {
dma_unmap_single(dev, state->opad_digest_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=0x%llX\n",
- (unsigned long long)state->opad_digest_dma_addr);
+ SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
+ state->opad_digest_dma_addr);
state->opad_digest_dma_addr = 0;
}
@@ -359,9 +353,9 @@ static void ssi_hash_unmap_result(struct device *dev,
digestsize,
DMA_BIDIRECTIONAL);
SSI_LOG_DEBUG("unmpa digest result buffer "
- "va (%pK) pa (%llx) len %u\n",
+ "va (%pK) pa (%pad) len %u\n",
state->digest_result_buff,
- (unsigned long long)state->digest_result_dma_addr,
+ state->digest_result_dma_addr,
digestsize);
memcpy(result,
state->digest_result_buff,
@@ -431,8 +425,6 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
SSI_LOG_DEBUG("===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
-
if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
SSI_LOG_ERR("map_ahash_source() failed\n");
return -ENOMEM;
@@ -596,16 +588,16 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
SSI_LOG_DEBUG("===== %s-update (%d) ====\n", ctx->is_hmac ?
"hmac" : "hash", nbytes);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
if (nbytes == 0) {
/* no real updates required */
return 0;
}
- if (unlikely(rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size))) {
+ rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size);
+ if (unlikely(rc)) {
if (rc == 1) {
SSI_LOG_DEBUG(" data size not require HW update %x\n",
- nbytes);
+ nbytes);
/* No hardware updates are required */
return 0;
}
@@ -693,8 +685,6 @@ static int ssi_hash_finup(struct ahash_req_ctx *state,
SSI_LOG_DEBUG("===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
-
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
SSI_LOG_ERR("map_ahash_request_final() failed\n");
return -ENOMEM;
@@ -829,8 +819,6 @@ static int ssi_hash_final(struct ahash_req_ctx *state,
SSI_LOG_DEBUG("===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
-
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
SSI_LOG_ERR("map_ahash_request_final() failed\n");
return -ENOMEM;
@@ -964,7 +952,6 @@ static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
state->xcbc_count = 0;
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
ssi_hash_map_request(dev, state, ctx);
return 0;
@@ -975,7 +962,7 @@ static int ssi_hash_setkey(void *hash,
unsigned int keylen,
bool synchronize)
{
- unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+ unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
struct ssi_crypto_req ssi_req = {};
struct ssi_hash_ctx *ctx = NULL;
int blocksize = 0;
@@ -984,9 +971,8 @@ static int ssi_hash_setkey(void *hash,
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
ssi_sram_addr_t larval_addr;
- SSI_LOG_DEBUG("ssi_hash_setkey: start keylen: %d", keylen);
+ SSI_LOG_DEBUG("start keylen: %d", keylen);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
@@ -1012,9 +998,8 @@ static int ssi_hash_setkey(void *hash,
" DMA failed\n", key, keylen);
return -ENOMEM;
}
- SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX "
- "keylen=%u\n",
- (unsigned long long)ctx->key_params.key_dma_addr,
+ SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=%pad "
+ "keylen=%u\n", ctx->key_params.key_dma_addr,
ctx->key_params.keylen);
if (keylen > blocksize) {
@@ -1118,7 +1103,7 @@ static int ssi_hash_setkey(void *hash,
/* Prepare ipad key */
hw_desc_init(&desc[idx]);
- set_xor_val(&desc[idx], hmacPadConst[i]);
+ set_xor_val(&desc[idx], hmac_pad_const[i]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
@@ -1155,17 +1140,17 @@ out:
if (ctx->key_params.key_dma_addr) {
dma_unmap_single(&ctx->drvdata->plat_dev->dev,
- ctx->key_params.key_dma_addr,
- ctx->key_params.keylen, DMA_TO_DEVICE);
- SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=0x%llX keylen=%u\n",
- (unsigned long long)ctx->key_params.key_dma_addr,
- ctx->key_params.keylen);
+ ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen, DMA_TO_DEVICE);
+ SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen);
}
return rc;
}
static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
- const u8 *key, unsigned int keylen)
+ const u8 *key, unsigned int keylen)
{
struct ssi_crypto_req ssi_req = {};
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
@@ -1173,15 +1158,14 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
switch (keylen) {
- case AES_KEYSIZE_128:
- case AES_KEYSIZE_192:
- case AES_KEYSIZE_256:
- break;
- default:
- return -EINVAL;
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ return -EINVAL;
}
ctx->key_params.keylen = keylen;
@@ -1196,9 +1180,9 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
" DMA failed\n", key, keylen);
return -ENOMEM;
}
- SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX "
+ SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=%pad "
"keylen=%u\n",
- (unsigned long long)ctx->key_params.key_dma_addr,
+ ctx->key_params.key_dma_addr,
ctx->key_params.keylen);
ctx->is_hmac = true;
@@ -1243,33 +1227,32 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
dma_unmap_single(&ctx->drvdata->plat_dev->dev,
- ctx->key_params.key_dma_addr,
- ctx->key_params.keylen, DMA_TO_DEVICE);
- SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=0x%llX keylen=%u\n",
- (unsigned long long)ctx->key_params.key_dma_addr,
- ctx->key_params.keylen);
+ ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen, DMA_TO_DEVICE);
+ SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen);
return rc;
}
#if SSI_CC_HAS_CMAC
static int ssi_cmac_setkey(struct crypto_ahash *ahash,
- const u8 *key, unsigned int keylen)
+ const u8 *key, unsigned int keylen)
{
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
ctx->is_hmac = true;
switch (keylen) {
- case AES_KEYSIZE_128:
- case AES_KEYSIZE_192:
- case AES_KEYSIZE_256:
- break;
- default:
- return -EINVAL;
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ return -EINVAL;
}
ctx->key_params.keylen = keylen;
@@ -1302,8 +1285,8 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
dma_unmap_single(dev, ctx->digest_buff_dma_addr,
sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
SSI_LOG_DEBUG("Unmapped digest-buffer: "
- "digest_buff_dma_addr=0x%llX\n",
- (unsigned long long)ctx->digest_buff_dma_addr);
+ "digest_buff_dma_addr=%pad\n",
+ ctx->digest_buff_dma_addr);
ctx->digest_buff_dma_addr = 0;
}
if (ctx->opad_tmp_keys_dma_addr != 0) {
@@ -1311,8 +1294,8 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
sizeof(ctx->opad_tmp_keys_buff),
DMA_BIDIRECTIONAL);
SSI_LOG_DEBUG("Unmapped opad-digest: "
- "opad_tmp_keys_dma_addr=0x%llX\n",
- (unsigned long long)ctx->opad_tmp_keys_dma_addr);
+ "opad_tmp_keys_dma_addr=%pad\n",
+ ctx->opad_tmp_keys_dma_addr);
ctx->opad_tmp_keys_dma_addr = 0;
}
@@ -1328,23 +1311,23 @@ static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
SSI_LOG_ERR("Mapping digest len %zu B at va=%pK for DMA failed\n",
- sizeof(ctx->digest_buff), ctx->digest_buff);
+ sizeof(ctx->digest_buff), ctx->digest_buff);
goto fail;
}
- SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=0x%llX\n",
- sizeof(ctx->digest_buff), ctx->digest_buff,
- (unsigned long long)ctx->digest_buff_dma_addr);
+ SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=%pad\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff,
+ ctx->digest_buff_dma_addr);
ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
SSI_LOG_ERR("Mapping opad digest %zu B at va=%pK for DMA failed\n",
- sizeof(ctx->opad_tmp_keys_buff),
- ctx->opad_tmp_keys_buff);
+ sizeof(ctx->opad_tmp_keys_buff),
+ ctx->opad_tmp_keys_buff);
goto fail;
}
- SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=0x%llX\n",
- sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
- (unsigned long long)ctx->opad_tmp_keys_dma_addr);
+ SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
+ sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
+ ctx->opad_tmp_keys_dma_addr);
ctx->is_hmac = false;
return 0;
@@ -1364,9 +1347,8 @@ static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
struct ssi_hash_alg *ssi_alg =
container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_req_ctx));
+ sizeof(struct ahash_req_ctx));
ctx->hash_mode = ssi_alg->hash_mode;
ctx->hw_mode = ssi_alg->hw_mode;
@@ -1396,7 +1378,6 @@ static int ssi_mac_update(struct ahash_request *req)
int rc;
u32 idx = 0;
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
if (req->nbytes == 0) {
/* no real updates required */
return 0;
@@ -1404,10 +1385,11 @@ static int ssi_mac_update(struct ahash_request *req)
state->xcbc_count++;
- if (unlikely(rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size))) {
+ rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size);
+ if (unlikely(rc)) {
if (rc == 1) {
SSI_LOG_DEBUG(" data size not require HW update %x\n",
- req->nbytes);
+ req->nbytes);
/* No hardware updates are required */
return 0;
}
@@ -1454,19 +1436,19 @@ static int ssi_mac_final(struct ahash_request *req)
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
int idx = 0;
int rc = 0;
- u32 keySize, keyLen;
+ u32 key_size, key_len;
u32 digestsize = crypto_ahash_digestsize(tfm);
u32 rem_cnt = state->buff_index ? state->buff1_cnt :
state->buff0_cnt;
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
- keySize = CC_AES_128_BIT_KEY_SIZE;
- keyLen = CC_AES_128_BIT_KEY_SIZE;
+ key_size = CC_AES_128_BIT_KEY_SIZE;
+ key_len = CC_AES_128_BIT_KEY_SIZE;
} else {
- keySize = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : ctx->key_params.keylen;
- keyLen = ctx->key_params.keylen;
+ key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
+ ctx->key_params.keylen;
+ key_len = ctx->key_params.keylen;
}
SSI_LOG_DEBUG("===== final xcbc reminder (%d) ====\n", rem_cnt);
@@ -1492,8 +1474,8 @@ static int ssi_mac_final(struct ahash_request *req)
set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
set_din_type(&desc[idx], DMA_DLLI,
(ctx->opad_tmp_keys_dma_addr +
- XCBC_MAC_K1_OFFSET), keySize, NS_BIT);
- set_key_size_aes(&desc[idx], keyLen);
+ XCBC_MAC_K1_OFFSET), key_size, NS_BIT);
+ set_key_size_aes(&desc[idx], key_len);
set_flow_mode(&desc[idx], S_DIN_to_AES);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -1522,7 +1504,7 @@ static int ssi_mac_final(struct ahash_request *req)
if (state->xcbc_count == 0) {
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_key_size_aes(&desc[idx], keyLen);
+ set_key_size_aes(&desc[idx], key_len);
set_cmac_size0_mode(&desc[idx]);
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
@@ -1569,9 +1551,8 @@ static int ssi_mac_finup(struct ahash_request *req)
u32 digestsize = crypto_ahash_digestsize(tfm);
SSI_LOG_DEBUG("===== finup xcbc(%d) ====\n", req->nbytes);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
if (state->xcbc_count > 0 && req->nbytes == 0) {
- SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final \n");
+ SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final\n");
return ssi_mac_final(req);
}
@@ -1636,12 +1617,11 @@ static int ssi_mac_digest(struct ahash_request *req)
u32 digestsize = crypto_ahash_digestsize(tfm);
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
- u32 keyLen;
+ u32 key_len;
int idx = 0;
int rc;
SSI_LOG_DEBUG("===== -digest mac (%d) ====\n", req->nbytes);
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
SSI_LOG_ERR("map_ahash_source() failed\n");
@@ -1662,17 +1642,17 @@ static int ssi_mac_digest(struct ahash_request *req)
ssi_req.user_arg = (void *)req;
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
- keyLen = CC_AES_128_BIT_KEY_SIZE;
+ key_len = CC_AES_128_BIT_KEY_SIZE;
ssi_hash_create_xcbc_setup(req, desc, &idx);
} else {
- keyLen = ctx->key_params.keylen;
+ key_len = ctx->key_params.keylen;
ssi_hash_create_cmac_setup(req, desc, &idx);
}
if (req->nbytes == 0) {
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_key_size_aes(&desc[idx], keyLen);
+ set_key_size_aes(&desc[idx], key_len);
set_cmac_size0_mode(&desc[idx]);
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
@@ -1764,8 +1744,6 @@ static int ssi_ahash_export(struct ahash_request *req, void *out)
state->buff0_cnt;
const u32 tmp = CC_EXPORT_MAGIC;
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
-
memcpy(out, &tmp, sizeof(u32));
out += sizeof(u32);
@@ -1805,8 +1783,6 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
u32 tmp;
int rc;
- CHECK_AND_RETURN_UPON_FIPS_ERROR();
-
memcpy(&tmp, in, sizeof(u32));
if (tmp != CC_EXPORT_MAGIC) {
rc = -EINVAL;
@@ -1856,7 +1832,7 @@ out:
}
static int ssi_ahash_setkey(struct crypto_ahash *ahash,
- const u8 *key, unsigned int keylen)
+ const u8 *key, unsigned int keylen)
{
return ssi_hash_setkey((void *)ahash, key, keylen, false);
}
@@ -2084,9 +2060,9 @@ ssi_hash_create_alg(struct ssi_hash_template *template, bool keyed)
struct crypto_alg *alg;
struct ahash_alg *halg;
- t_crypto_alg = kzalloc(sizeof(struct ssi_hash_alg), GFP_KERNEL);
+ t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
if (!t_crypto_alg) {
- SSI_LOG_ERR("failed to allocate t_alg\n");
+ SSI_LOG_ERR("failed to allocate t_crypto_alg\n");
return ERR_PTR(-ENOMEM);
}
@@ -2138,7 +2114,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
/* Copy-to-sram digest-len */
ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
- ARRAY_SIZE(digest_len_init), larval_seq, &larval_seq_len);
+ ARRAY_SIZE(digest_len_init),
+ larval_seq, &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0))
goto init_digest_const_err;
@@ -2149,7 +2126,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
#if (DX_DEV_SHA_MAX > 256)
/* Copy-to-sram digest-len for sha384/512 */
ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
- ARRAY_SIZE(digest_len_sha512_init), larval_seq, &larval_seq_len);
+ ARRAY_SIZE(digest_len_sha512_init),
+ larval_seq, &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0))
goto init_digest_const_err;
@@ -2163,7 +2141,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
/* Copy-to-sram initial SHA* digests */
ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
- ARRAY_SIZE(md5_init), larval_seq, &larval_seq_len);
+ ARRAY_SIZE(md5_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0))
goto init_digest_const_err;
@@ -2171,7 +2150,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
larval_seq_len = 0;
ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
- ARRAY_SIZE(sha1_init), larval_seq, &larval_seq_len);
+ ARRAY_SIZE(sha1_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0))
goto init_digest_const_err;
@@ -2179,7 +2159,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
larval_seq_len = 0;
ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
- ARRAY_SIZE(sha224_init), larval_seq, &larval_seq_len);
+ ARRAY_SIZE(sha224_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0))
goto init_digest_const_err;
@@ -2187,7 +2168,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
larval_seq_len = 0;
ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
- ARRAY_SIZE(sha256_init), larval_seq, &larval_seq_len);
+ ARRAY_SIZE(sha256_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0))
goto init_digest_const_err;
@@ -2201,10 +2183,10 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
+ larval_seq, &larval_seq_len);
sram_buff_ofs += sizeof(u32);
ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
+ larval_seq, &larval_seq_len);
sram_buff_ofs += sizeof(u32);
}
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
@@ -2219,10 +2201,10 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
+ larval_seq, &larval_seq_len);
sram_buff_ofs += sizeof(u32);
ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
+ larval_seq, &larval_seq_len);
sram_buff_ofs += sizeof(u32);
}
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
@@ -2244,10 +2226,10 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
int rc = 0;
int alg;
- hash_handle = kzalloc(sizeof(struct ssi_hash_handle), GFP_KERNEL);
+ hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
if (!hash_handle) {
SSI_LOG_ERR("kzalloc failed to allocate %zu B\n",
- sizeof(struct ssi_hash_handle));
+ sizeof(*hash_handle));
rc = -ENOMEM;
goto fail;
}
@@ -2319,7 +2301,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
SSI_LOG_ERR("%s alg allocation failed\n",
- driver_hash[alg].driver_name);
+ driver_hash[alg].driver_name);
goto fail;
}
t_alg->drvdata = drvdata;
@@ -2338,11 +2320,8 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
return 0;
fail:
-
- if (drvdata->hash_handle) {
- kfree(drvdata->hash_handle);
- drvdata->hash_handle = NULL;
- }
+ kfree(drvdata->hash_handle);
+ drvdata->hash_handle = NULL;
return rc;
}
@@ -2365,8 +2344,9 @@ int ssi_hash_free(struct ssi_drvdata *drvdata)
}
static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
- struct cc_hw_desc desc[],
- unsigned int *seq_size) {
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
unsigned int idx = *seq_size;
struct ahash_req_ctx *state = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
@@ -2422,8 +2402,8 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
}
static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
unsigned int idx = *seq_size;
struct ahash_req_ctx *state = ahash_request_ctx(areq);
diff --git a/drivers/staging/ccree/ssi_ivgen.c b/drivers/staging/ccree/ssi_ivgen.c
index 5ff3368c04d9..b01e03231947 100644
--- a/drivers/staging/ccree/ssi_ivgen.c
+++ b/drivers/staging/ccree/ssi_ivgen.c
@@ -158,7 +158,7 @@ int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
void ssi_ivgen_fini(struct ssi_drvdata *drvdata)
{
struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
- struct device *device = &(drvdata->plat_dev->dev);
+ struct device *device = &drvdata->plat_dev->dev;
if (!ivgen_ctx)
return;
@@ -166,7 +166,8 @@ void ssi_ivgen_fini(struct ssi_drvdata *drvdata)
if (ivgen_ctx->pool_meta) {
memset(ivgen_ctx->pool_meta, 0, SSI_IVPOOL_META_SIZE);
dma_free_coherent(device, SSI_IVPOOL_META_SIZE,
- ivgen_ctx->pool_meta, ivgen_ctx->pool_meta_dma);
+ ivgen_ctx->pool_meta,
+ ivgen_ctx->pool_meta_dma);
}
ivgen_ctx->pool = NULL_SRAM_ADDR;
@@ -190,10 +191,11 @@ int ssi_ivgen_init(struct ssi_drvdata *drvdata)
int rc;
/* Allocate "this" context */
- drvdata->ivgen_handle = kzalloc(sizeof(struct ssi_ivgen_ctx), GFP_KERNEL);
+ drvdata->ivgen_handle = kzalloc(sizeof(*drvdata->ivgen_handle),
+ GFP_KERNEL);
if (!drvdata->ivgen_handle) {
SSI_LOG_ERR("Not enough memory to allocate IVGEN context "
- "(%zu B)\n", sizeof(struct ssi_ivgen_ctx));
+ "(%zu B)\n", sizeof(*drvdata->ivgen_handle));
rc = -ENOMEM;
goto out;
}
@@ -201,7 +203,8 @@ int ssi_ivgen_init(struct ssi_drvdata *drvdata)
/* Allocate pool's header for intial enc. key/IV */
ivgen_ctx->pool_meta = dma_alloc_coherent(device, SSI_IVPOOL_META_SIZE,
- &ivgen_ctx->pool_meta_dma, GFP_KERNEL);
+ &ivgen_ctx->pool_meta_dma,
+ GFP_KERNEL);
if (!ivgen_ctx->pool_meta) {
SSI_LOG_ERR("Not enough memory to allocate DMA of pool_meta "
"(%u B)\n", SSI_IVPOOL_META_SIZE);
diff --git a/drivers/staging/ccree/ssi_pm.c b/drivers/staging/ccree/ssi_pm.c
index 52a8ed579177..31325e6cd4b4 100644
--- a/drivers/staging/ccree/ssi_pm.c
+++ b/drivers/staging/ccree/ssi_pm.c
@@ -40,7 +40,7 @@ int ssi_power_mgr_runtime_suspend(struct device *dev)
(struct ssi_drvdata *)dev_get_drvdata(dev);
int rc;
- SSI_LOG_DEBUG("ssi_power_mgr_runtime_suspend: set HOST_POWER_DOWN_EN\n");
+ SSI_LOG_DEBUG("set HOST_POWER_DOWN_EN\n");
WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
rc = ssi_request_mgr_runtime_suspend_queue(drvdata);
if (rc != 0) {
@@ -58,7 +58,7 @@ int ssi_power_mgr_runtime_resume(struct device *dev)
struct ssi_drvdata *drvdata =
(struct ssi_drvdata *)dev_get_drvdata(dev);
- SSI_LOG_DEBUG("ssi_power_mgr_runtime_resume , unset HOST_POWER_DOWN_EN\n");
+ SSI_LOG_DEBUG("unset HOST_POWER_DOWN_EN\n");
WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
rc = cc_clk_on(drvdata);
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
index 46d9396f9ff9..e5c2f92857f6 100644
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ b/drivers/staging/ccree/ssi_request_mgr.c
@@ -30,8 +30,6 @@
#include "ssi_sysfs.h"
#include "ssi_ivgen.h"
#include "ssi_pm.h"
-#include "ssi_fips.h"
-#include "ssi_fips_local.h"
#define SSI_MAX_POLL_ITER 10
@@ -102,7 +100,7 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
struct ssi_request_mgr_handle *req_mgr_h;
int rc = 0;
- req_mgr_h = kzalloc(sizeof(struct ssi_request_mgr_handle), GFP_KERNEL);
+ req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
if (!req_mgr_h) {
rc = -ENOMEM;
goto req_mgr_init_err;
@@ -129,7 +127,7 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
SSI_LOG_DEBUG("hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
SSI_LOG_ERR("Invalid HW queue size = %u (Min. required is %u)\n",
- req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
+ req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
rc = -ENOMEM;
goto req_mgr_init_err;
}
@@ -138,7 +136,9 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
/* Allocate DMA word for "dummy" completion descriptor use */
req_mgr_h->dummy_comp_buff = dma_alloc_coherent(&drvdata->plat_dev->dev,
- sizeof(u32), &req_mgr_h->dummy_comp_buff_dma, GFP_KERNEL);
+ sizeof(u32),
+ &req_mgr_h->dummy_comp_buff_dma,
+ GFP_KERNEL);
if (!req_mgr_h->dummy_comp_buff) {
SSI_LOG_ERR("Not enough memory to allocate DMA (%zu) dropped "
"buffer\n", sizeof(u32));
@@ -177,7 +177,8 @@ static inline void enqueue_seq(
writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
#ifdef DX_DUMP_DESCS
SSI_LOG_DEBUG("desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
- seq[i].word[0], seq[i].word[1], seq[i].word[2], seq[i].word[3], seq[i].word[4], seq[i].word[5]);
+ seq[i].word[0], seq[i].word[1], seq[i].word[2],
+ seq[i].word[3], seq[i].word[4], seq[i].word[5]);
#endif
}
}
@@ -211,7 +212,7 @@ static inline int request_mgr_queues_status_check(
(MAX_REQUEST_QUEUE_SIZE - 1)) ==
req_mgr_h->req_queue_tail)) {
SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
- req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
return -EBUSY;
}
@@ -221,9 +222,8 @@ static inline int request_mgr_queues_status_check(
/* Wait for space in HW queue. Poll constant num of iterations. */
for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) {
req_mgr_h->q_free_slots =
- CC_HAL_READ_REGISTER(
- CC_REG_OFFSET(CRY_KERNEL,
- DSCRPTR_QUEUE_CONTENT));
+ CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL,
+ DSCRPTR_QUEUE_CONTENT));
if (unlikely(req_mgr_h->q_free_slots <
req_mgr_h->min_free_hw_slots)) {
req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
@@ -235,7 +235,7 @@ static inline int request_mgr_queues_status_check(
}
SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
- req_mgr_h->q_free_slots, total_seq_len);
+ req_mgr_h->q_free_slots, total_seq_len);
}
/* No room in the HW queue try again later */
SSI_LOG_DEBUG("HW FIFO full, timeout. req_queue_head=%d "
@@ -291,9 +291,8 @@ int send_request(
* in case iv gen add the max size and in case of no dout add 1
* for the internal completion descriptor
*/
- rc = request_mgr_queues_status_check(req_mgr_h,
- cc_base,
- max_required_seq_len);
+ rc = request_mgr_queues_status_check(req_mgr_h, cc_base,
+ max_required_seq_len);
if (likely(rc == 0))
/* There is enough place in the queue */
break;
@@ -320,21 +319,22 @@ int send_request(
if (!is_dout) {
init_completion(&ssi_req->seq_compl);
ssi_req->user_cb = request_mgr_complete;
- ssi_req->user_arg = &(ssi_req->seq_compl);
+ ssi_req->user_arg = &ssi_req->seq_compl;
total_seq_len++;
}
if (ssi_req->ivgen_dma_addr_len > 0) {
- SSI_LOG_DEBUG("Acquire IV from pool into %d DMA addresses 0x%llX, 0x%llX, 0x%llX, IV-size=%u\n",
- ssi_req->ivgen_dma_addr_len,
- (unsigned long long)ssi_req->ivgen_dma_addr[0],
- (unsigned long long)ssi_req->ivgen_dma_addr[1],
- (unsigned long long)ssi_req->ivgen_dma_addr[2],
- ssi_req->ivgen_size);
+ SSI_LOG_DEBUG("Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
+ ssi_req->ivgen_dma_addr_len,
+ ssi_req->ivgen_dma_addr[0],
+ ssi_req->ivgen_dma_addr[1],
+ ssi_req->ivgen_dma_addr[2],
+ ssi_req->ivgen_size);
/* Acquire IV from pool */
- rc = ssi_ivgen_getiv(drvdata, ssi_req->ivgen_dma_addr, ssi_req->ivgen_dma_addr_len,
- ssi_req->ivgen_size, iv_seq, &iv_seq_len);
+ rc = ssi_ivgen_getiv(drvdata, ssi_req->ivgen_dma_addr,
+ ssi_req->ivgen_dma_addr_len,
+ ssi_req->ivgen_size, iv_seq, &iv_seq_len);
if (unlikely(rc != 0)) {
SSI_LOG_ERR("Failed to generate IV (rc=%d)\n", rc);
@@ -418,9 +418,8 @@ int send_request_init(
enqueue_seq(cc_base, desc, len);
/* Update the free slots in HW queue */
- req_mgr_h->q_free_slots = CC_HAL_READ_REGISTER(
- CC_REG_OFFSET(CRY_KERNEL,
- DSCRPTR_QUEUE_CONTENT));
+ req_mgr_h->q_free_slots = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL,
+ DSCRPTR_QUEUE_CONTENT));
return 0;
}
@@ -545,8 +544,7 @@ static void comp_handler(unsigned long devarg)
}
/* after verifing that there is nothing to do, Unmask AXI completion interrupt */
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
- CC_HAL_READ_REGISTER(
- CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
+ CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
}
/*
diff --git a/drivers/staging/ccree/ssi_sram_mgr.c b/drivers/staging/ccree/ssi_sram_mgr.c
index e05c0c13c2eb..f11116afe89a 100644
--- a/drivers/staging/ccree/ssi_sram_mgr.c
+++ b/drivers/staging/ccree/ssi_sram_mgr.c
@@ -58,7 +58,7 @@ int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
sizeof(struct ssi_sram_mgr_ctx), GFP_KERNEL);
if (!drvdata->sram_mgr_handle) {
SSI_LOG_ERR("Not enough memory to allocate SRAM_MGR ctx (%zu)\n",
- sizeof(struct ssi_sram_mgr_ctx));
+ sizeof(struct ssi_sram_mgr_ctx));
rc = -ENOMEM;
goto out;
}
@@ -90,12 +90,12 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
if (unlikely((size & 0x3) != 0)) {
SSI_LOG_ERR("Requested buffer size (%u) is not multiple of 4",
- size);
+ size);
return NULL_SRAM_ADDR;
}
if (unlikely(size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset))) {
SSI_LOG_ERR("Not enough space to allocate %u B (at offset %llu)\n",
- size, smgr_ctx->sram_free_offset);
+ size, smgr_ctx->sram_free_offset);
return NULL_SRAM_ADDR;
}
diff --git a/drivers/staging/ccree/ssi_sysfs.c b/drivers/staging/ccree/ssi_sysfs.c
index dbcd1634aad1..0655658bba4d 100644
--- a/drivers/staging/ccree/ssi_sysfs.c
+++ b/drivers/staging/ccree/ssi_sysfs.c
@@ -40,8 +40,7 @@ struct stat_name {
const char *stat_phase_name[MAX_STAT_PHASES];
};
-static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] =
-{
+static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] = {
{
/* STAT_OP_TYPE_NULL */
.op_type_name = "NULL",
@@ -144,8 +143,12 @@ static void display_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES]
avg = (u64)item[i][j].sum;
do_div(avg, item[i][j].count);
SSI_LOG_ERR("%s, %s: min=%d avg=%d max=%d sum=%lld count=%d\n",
- stat_name_db[i].op_type_name, stat_name_db[i].stat_phase_name[j],
- item[i][j].min, (int)avg, item[i][j].max, (long long)item[i][j].sum, item[i][j].count);
+ stat_name_db[i].op_type_name,
+ stat_name_db[i].stat_phase_name[j],
+ item[i][j].min, (int)avg,
+ item[i][j].max,
+ (long long)item[i][j].sum,
+ item[i][j].count);
}
}
}
@@ -156,21 +159,23 @@ static void display_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES]
**************************************/
static ssize_t ssi_sys_stats_host_db_clear(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
init_db(stat_host_db);
return count;
}
static ssize_t ssi_sys_stats_cc_db_clear(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
init_db(stat_cc_db);
return count;
}
static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
int i, j;
char line[512];
@@ -179,7 +184,7 @@ static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj,
ssize_t buf_len, tmp_len = 0;
buf_len = scnprintf(buf, PAGE_SIZE,
- "phase\t\t\t\t\t\t\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
+ "phase\t\t\t\t\t\t\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
if (buf_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
return buf_len;
for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) {
@@ -193,11 +198,11 @@ static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj,
avg = min_cyc = max_cyc = 0;
}
tmp_len = scnprintf(line, 512,
- "%s::%s\t\t\t\t\t%6u\t%6u\t%6u\t%7u\n",
- stat_name_db[i].op_type_name,
- stat_name_db[i].stat_phase_name[j],
- min_cyc, (unsigned int)avg, max_cyc,
- stat_host_db[i][j].count);
+ "%s::%s\t\t\t\t\t%6u\t%6u\t%6u\t%7u\n",
+ stat_name_db[i].op_type_name,
+ stat_name_db[i].stat_phase_name[j],
+ min_cyc, (unsigned int)avg, max_cyc,
+ stat_host_db[i][j].count);
if (tmp_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
return buf_len;
if (buf_len + tmp_len >= PAGE_SIZE)
@@ -210,7 +215,7 @@ static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj,
}
static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
int i;
char line[256];
@@ -219,7 +224,7 @@ static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj,
ssize_t buf_len, tmp_len = 0;
buf_len = scnprintf(buf, PAGE_SIZE,
- "phase\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
+ "phase\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
if (buf_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
return buf_len;
for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) {
@@ -231,13 +236,10 @@ static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj,
} else {
avg = min_cyc = max_cyc = 0;
}
- tmp_len = scnprintf(line, 256,
- "%s\t%6u\t%6u\t%6u\t%7u\n",
- stat_name_db[i].op_type_name,
- min_cyc,
- (unsigned int)avg,
- max_cyc,
- stat_cc_db[i][STAT_PHASE_6].count);
+ tmp_len = scnprintf(line, 256, "%s\t%6u\t%6u\t%6u\t%7u\n",
+ stat_name_db[i].op_type_name, min_cyc,
+ (unsigned int)avg, max_cyc,
+ stat_cc_db[i][STAT_PHASE_6].count);
if (tmp_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
return buf_len;
@@ -255,7 +257,7 @@ void update_host_stat(unsigned int op_type, unsigned int phase, cycles_t result)
unsigned long flags;
spin_lock_irqsave(&stat_lock, flags);
- update_db(&(stat_host_db[op_type][phase]), (unsigned int)result);
+ update_db(&stat_host_db[op_type][phase], (unsigned int)result);
spin_unlock_irqrestore(&stat_lock, flags);
}
@@ -264,7 +266,7 @@ void update_cc_stat(
unsigned int phase,
unsigned int elapsed_cycles)
{
- update_db(&(stat_cc_db[op_type][phase]), elapsed_cycles);
+ update_db(&stat_cc_db[op_type][phase], elapsed_cycles);
}
void display_all_stat_db(void)
@@ -277,7 +279,7 @@ void display_all_stat_db(void)
#endif /*CC_CYCLE_COUNT*/
static ssize_t ssi_sys_regdump_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct ssi_drvdata *drvdata = sys_get_drvdata();
u32 register_value;
@@ -285,20 +287,20 @@ static ssize_t ssi_sys_regdump_show(struct kobject *kobj,
int offset = 0;
register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_SIGNATURE ", DX_HOST_SIGNATURE_REG_OFFSET, register_value);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_SIGNATURE ", DX_HOST_SIGNATURE_REG_OFFSET, register_value);
register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_IRR ", DX_HOST_IRR_REG_OFFSET, register_value);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_IRR ", DX_HOST_IRR_REG_OFFSET, register_value);
register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_POWER_DOWN_EN ", DX_HOST_POWER_DOWN_EN_REG_OFFSET, register_value);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_POWER_DOWN_EN ", DX_HOST_POWER_DOWN_EN_REG_OFFSET, register_value);
register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "AXIM_MON_ERR ", DX_AXIM_MON_ERR_REG_OFFSET, register_value);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "AXIM_MON_ERR ", DX_AXIM_MON_ERR_REG_OFFSET, register_value);
register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_CONTENT));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "DSCRPTR_QUEUE_CONTENT", DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET, register_value);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "DSCRPTR_QUEUE_CONTENT", DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET, register_value);
return offset;
}
static ssize_t ssi_sys_help_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
char *help_str[] = {
"cat reg_dump ", "Print several of CC register values",
@@ -357,8 +359,8 @@ static struct ssi_drvdata *sys_get_drvdata(void)
}
static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
- struct kobject *parent_dir_kobj, const char *dir_name,
- struct kobj_attribute *attrs, u32 num_of_attrs)
+ struct kobject *parent_dir_kobj, const char *dir_name,
+ struct kobj_attribute *attrs, u32 num_of_attrs)
{
int i;
@@ -375,7 +377,7 @@ static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
/* allocate memory for directory's attributes list */
sys_dir->sys_dir_attr_list =
kzalloc(sizeof(struct attribute *) * (num_of_attrs + 1),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!(sys_dir->sys_dir_attr_list)) {
kobject_put(sys_dir->sys_dir_kobj);
@@ -386,7 +388,7 @@ static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
/* initialize attributes list */
for (i = 0; i < num_of_attrs; ++i)
- sys_dir->sys_dir_attr_list[i] = &(attrs[i].attr);
+ sys_dir->sys_dir_attr_list[i] = &attrs[i].attr;
/* last list entry should be NULL */
sys_dir->sys_dir_attr_list[num_of_attrs] = NULL;
@@ -394,7 +396,7 @@ static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
sys_dir->sys_dir_attr_group.attrs = sys_dir->sys_dir_attr_list;
return sysfs_create_group(sys_dir->sys_dir_kobj,
- &(sys_dir->sys_dir_attr_group));
+ &sys_dir->sys_dir_attr_group);
}
static void sys_free_dir(struct sys_dir *sys_dir)
@@ -421,9 +423,9 @@ int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata)
SSI_LOG_ERR("setup sysfs under %s\n", sys_dev_obj->name);
/* Initialize top directory */
- retval = sys_init_dir(&sys_top_dir, drvdata, sys_dev_obj,
- "cc_info", ssi_sys_top_level_attrs,
- ARRAY_SIZE(ssi_sys_top_level_attrs));
+ retval = sys_init_dir(&sys_top_dir, drvdata, sys_dev_obj, "cc_info",
+ ssi_sys_top_level_attrs,
+ ARRAY_SIZE(ssi_sys_top_level_attrs));
return retval;
}