summaryrefslogtreecommitdiffstats
path: root/arch/x86/crypto/sha256-mb
diff options
context:
space:
mode:
authorTim Chen2016-07-08 18:28:03 +0200
committerHerbert Xu2016-07-12 07:02:00 +0200
commiteb9bc8e7afaa9f062105dad55ec1c0663d961bb3 (patch)
treeee3fdfcaa6e1dcbb0522bd8861ec26505c4d20b5 /arch/x86/crypto/sha256-mb
parentcrypto: qat - Stop dropping leading zeros from RSA output (diff)
downloadkernel-qcow2-linux-eb9bc8e7afaa9f062105dad55ec1c0663d961bb3.tar.gz
kernel-qcow2-linux-eb9bc8e7afaa9f062105dad55ec1c0663d961bb3.tar.xz
kernel-qcow2-linux-eb9bc8e7afaa9f062105dad55ec1c0663d961bb3.zip
crypto: sha-mb - Cleanup code to use || instead of |
for condition comparison and cleanup multiline comment style In sha*_ctx_mgr_submit, we currently use the | operator instead of || ((ctx->partial_block_buffer_length) | (len < SHA1_BLOCK_SIZE)) Switching it to || and remove extraneous paranthesis to adhere to coding style. Also cleanup inconsistent multiline comment style. Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto/sha256-mb')
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
index c9d5dcc81c96..89fa85e8b10c 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ b/arch/x86/crypto/sha256-mb/sha256_mb.c
@@ -283,7 +283,8 @@ static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
ctx->incoming_buffer = buffer;
ctx->incoming_buffer_length = len;
- /* Store the user's request flags and mark this ctx as currently
+ /*
+ * Store the user's request flags and mark this ctx as currently
* being processed.
*/
ctx->status = (flags & HASH_LAST) ?
@@ -299,8 +300,9 @@ static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
* Or if the user's buffer contains less than a whole block,
* append as much as possible to the extra block.
*/
- if ((ctx->partial_block_buffer_length) | (len < SHA256_BLOCK_SIZE)) {
- /* Compute how many bytes to copy from user buffer into
+ if (ctx->partial_block_buffer_length || len < SHA256_BLOCK_SIZE) {
+ /*
+ * Compute how many bytes to copy from user buffer into
* extra block
*/
uint32_t copy_len = SHA256_BLOCK_SIZE -
@@ -323,7 +325,8 @@ static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
/* The extra block should never contain more than 1 block */
assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
- /* If the extra block buffer contains exactly 1 block,
+ /*
+ * If the extra block buffer contains exactly 1 block,
* it can be hashed.
*/
if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {