diff options
author | Linus Torvalds | 2006-09-22 21:51:33 +0200 |
---|---|---|
committer | Linus Torvalds | 2006-09-22 21:51:33 +0200 |
commit | 6bbd9b6d694ff7242d63cda2faac4bd59ee4328e (patch) | |
tree | 0641aa896e2ea01f4692973e5fbea429408854f4 /drivers | |
parent | Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6 (diff) | |
parent | [BLOCK] dm-crypt: trivial comment improvements (diff) | |
download | kernel-qcow2-linux-6bbd9b6d694ff7242d63cda2faac4bd59ee4328e.tar.gz kernel-qcow2-linux-6bbd9b6d694ff7242d63cda2faac4bd59ee4328e.tar.xz kernel-qcow2-linux-6bbd9b6d694ff7242d63cda2faac4bd59ee4328e.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (64 commits)
[BLOCK] dm-crypt: trivial comment improvements
[CRYPTO] api: Deprecate crypto_digest_* and crypto_alg_available
[CRYPTO] padlock: Convert padlock-sha to use crypto_hash
[CRYPTO] users: Use crypto_comp and crypto_has_*
[CRYPTO] api: Add crypto_comp and crypto_has_*
[CRYPTO] users: Use crypto_hash interface instead of crypto_digest
[SCSI] iscsi: Use crypto_hash interface instead of crypto_digest
[CRYPTO] digest: Remove old HMAC implementation
[CRYPTO] doc: Update documentation for hash and me
[SCTP]: Use HMAC template and hash interface
[IPSEC]: Use HMAC template and hash interface
[CRYPTO] tcrypt: Use HMAC template and hash interface
[CRYPTO] hmac: Add crypto template implementation
[CRYPTO] digest: Added user API for new hash type
[CRYPTO] api: Mark parts of cipher interface as deprecated
[PATCH] scatterlist: Add const to sg_set_buf/sg_init_one pointer argument
[CRYPTO] drivers: Remove obsolete block cipher operations
[CRYPTO] users: Use block ciphers where applicable
[SUNRPC] GSS: Use block ciphers where applicable
[IPSEC] ESP: Use block ciphers where applicable
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/cryptoloop.c | 160 | ||||
-rw-r--r-- | drivers/crypto/Kconfig | 45 | ||||
-rw-r--r-- | drivers/crypto/Makefile | 8 | ||||
-rw-r--r-- | drivers/crypto/padlock-aes.c | 258 | ||||
-rw-r--r-- | drivers/crypto/padlock-generic.c | 63 | ||||
-rw-r--r-- | drivers/crypto/padlock-sha.c | 318 | ||||
-rw-r--r-- | drivers/crypto/padlock.c | 58 | ||||
-rw-r--r-- | drivers/crypto/padlock.h | 17 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 146 | ||||
-rw-r--r-- | drivers/net/ppp_mppe.c | 68 | ||||
-rw-r--r-- | drivers/net/wireless/airo.c | 22 | ||||
-rw-r--r-- | drivers/scsi/iscsi_tcp.c | 134 | ||||
-rw-r--r-- | drivers/scsi/iscsi_tcp.h | 9 |
13 files changed, 877 insertions, 429 deletions
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c index 3d4261c39f16..40535036e893 100644 --- a/drivers/block/cryptoloop.c +++ b/drivers/block/cryptoloop.c @@ -40,11 +40,13 @@ static int cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) { int err = -EINVAL; + int cipher_len; + int mode_len; char cms[LO_NAME_SIZE]; /* cipher-mode string */ char *cipher; char *mode; char *cmsp = cms; /* c-m string pointer */ - struct crypto_tfm *tfm = NULL; + struct crypto_blkcipher *tfm; /* encryption breaks for non sector aligned offsets */ @@ -53,20 +55,39 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE); cms[LO_NAME_SIZE - 1] = 0; - cipher = strsep(&cmsp, "-"); - mode = strsep(&cmsp, "-"); - - if (mode == NULL || strcmp(mode, "cbc") == 0) - tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC | - CRYPTO_TFM_REQ_MAY_SLEEP); - else if (strcmp(mode, "ecb") == 0) - tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB | - CRYPTO_TFM_REQ_MAY_SLEEP); - if (tfm == NULL) + + cipher = cmsp; + cipher_len = strcspn(cmsp, "-"); + + mode = cmsp + cipher_len; + mode_len = 0; + if (*mode) { + mode++; + mode_len = strcspn(mode, "-"); + } + + if (!mode_len) { + mode = "cbc"; + mode_len = 3; + } + + if (cipher_len + mode_len + 3 > LO_NAME_SIZE) return -EINVAL; - err = tfm->crt_u.cipher.cit_setkey(tfm, info->lo_encrypt_key, - info->lo_encrypt_key_size); + memmove(cms, mode, mode_len); + cmsp = cms + mode_len; + *cmsp++ = '('; + memcpy(cmsp, info->lo_crypt_name, cipher_len); + cmsp += cipher_len; + *cmsp++ = ')'; + *cmsp = 0; + + tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key, + info->lo_encrypt_key_size); if (err != 0) goto out_free_tfm; @@ -75,99 +96,49 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) return 0; out_free_tfm: - crypto_free_tfm(tfm); + crypto_free_blkcipher(tfm); out: return err; } -typedef int (*encdec_ecb_t)(struct crypto_tfm *tfm, +typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc, struct scatterlist *sg_out, struct scatterlist *sg_in, unsigned int nsg); - -static int -cryptoloop_transfer_ecb(struct loop_device *lo, int cmd, - struct page *raw_page, unsigned raw_off, - struct page *loop_page, unsigned loop_off, - int size, sector_t IV) -{ - struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; - struct scatterlist sg_out = { NULL, }; - struct scatterlist sg_in = { NULL, }; - - encdec_ecb_t encdecfunc; - struct page *in_page, *out_page; - unsigned in_offs, out_offs; - - if (cmd == READ) { - in_page = raw_page; - in_offs = raw_off; - out_page = loop_page; - out_offs = loop_off; - encdecfunc = tfm->crt_u.cipher.cit_decrypt; - } else { - in_page = loop_page; - in_offs = loop_off; - out_page = raw_page; - out_offs = raw_off; - encdecfunc = tfm->crt_u.cipher.cit_encrypt; - } - - while (size > 0) { - const int sz = min(size, LOOP_IV_SECTOR_SIZE); - - sg_in.page = in_page; - sg_in.offset = in_offs; - sg_in.length = sz; - - sg_out.page = out_page; - sg_out.offset = out_offs; - sg_out.length = sz; - - encdecfunc(tfm, &sg_out, &sg_in, sz); - - size -= sz; - in_offs += sz; - out_offs += sz; - } - - return 0; -} - -typedef int (*encdec_cbc_t)(struct crypto_tfm *tfm, - struct scatterlist *sg_out, - struct scatterlist *sg_in, - unsigned int nsg, u8 *iv); - static int -cryptoloop_transfer_cbc(struct loop_device *lo, int cmd, - struct page *raw_page, unsigned raw_off, - struct page *loop_page, unsigned loop_off, - int size, sector_t IV) +cryptoloop_transfer(struct loop_device *lo, int cmd, + struct page *raw_page, unsigned raw_off, + struct page *loop_page, unsigned loop_off, + int size, sector_t IV) { - struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; + struct crypto_blkcipher *tfm = lo->key_data; + struct blkcipher_desc desc = { + .tfm = tfm, + .flags = CRYPTO_TFM_REQ_MAY_SLEEP, + }; struct scatterlist sg_out = { NULL, }; struct scatterlist sg_in = { NULL, }; encdec_cbc_t encdecfunc; struct page *in_page, *out_page; unsigned in_offs, out_offs; + int err; if (cmd == READ) { in_page = raw_page; in_offs = raw_off; out_page = loop_page; out_offs = loop_off; - encdecfunc = tfm->crt_u.cipher.cit_decrypt_iv; + encdecfunc = crypto_blkcipher_crt(tfm)->decrypt; } else { in_page = loop_page; in_offs = loop_off; out_page = raw_page; out_offs = raw_off; - encdecfunc = tfm->crt_u.cipher.cit_encrypt_iv; + encdecfunc = crypto_blkcipher_crt(tfm)->encrypt; } while (size > 0) { @@ -183,7 +154,10 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd, sg_out.offset = out_offs; sg_out.length = sz; - encdecfunc(tfm, &sg_out, &sg_in, sz, (u8 *)iv); + desc.info = iv; + err = encdecfunc(&desc, &sg_out, &sg_in, sz); + if (err) + return err; IV++; size -= sz; @@ -195,32 +169,6 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd, } static int -cryptoloop_transfer(struct loop_device *lo, int cmd, - struct page *raw_page, unsigned raw_off, - struct page *loop_page, unsigned loop_off, - int size, sector_t IV) -{ - struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; - if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB) - { - lo->transfer = cryptoloop_transfer_ecb; - return cryptoloop_transfer_ecb(lo, cmd, raw_page, raw_off, - loop_page, loop_off, size, IV); - } - if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC) - { - lo->transfer = cryptoloop_transfer_cbc; - return cryptoloop_transfer_cbc(lo, cmd, raw_page, raw_off, - loop_page, loop_off, size, IV); - } - - /* This is not supposed to happen */ - - printk( KERN_ERR "cryptoloop: unsupported cipher mode in cryptoloop_transfer!\n"); - return -EINVAL; -} - -static int cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) { return -EINVAL; @@ -229,9 +177,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) static int cryptoloop_release(struct loop_device *lo) { - struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; + struct crypto_blkcipher *tfm = lo->key_data; if (tfm != NULL) { - crypto_free_tfm(tfm); + crypto_free_blkcipher(tfm); lo->key_data = NULL; return 0; } diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 4263935443cc..adb554153f67 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -2,22 +2,53 @@ menu "Hardware crypto devices" config CRYPTO_DEV_PADLOCK tristate "Support for VIA PadLock ACE" - depends on CRYPTO && X86_32 + depends on X86_32 + select CRYPTO_ALGAPI + default m help Some VIA processors come with an integrated crypto engine (so called VIA PadLock ACE, Advanced Cryptography Engine) - that provides instructions for very fast {en,de}cryption - with some algorithms. + that provides instructions for very fast cryptographic + operations with supported algorithms. The instructions are used only when the CPU supports them. - Otherwise software encryption is used. If you are unsure, - say Y. + Otherwise software encryption is used. + + Selecting M for this option will compile a helper module + padlock.ko that should autoload all below configured + algorithms. Don't worry if your hardware does not support + some or all of them. In such case padlock.ko will + simply write a single line into the kernel log informing + about its failure but everything will keep working fine. + + If you are unsure, say M. The compiled module will be + called padlock.ko config CRYPTO_DEV_PADLOCK_AES - bool "Support for AES in VIA PadLock" + tristate "PadLock driver for AES algorithm" depends on CRYPTO_DEV_PADLOCK - default y + select CRYPTO_BLKCIPHER + default m help Use VIA PadLock for AES algorithm. + Available in VIA C3 and newer CPUs. + + If unsure say M. The compiled module will be + called padlock-aes.ko + +config CRYPTO_DEV_PADLOCK_SHA + tristate "PadLock driver for SHA1 and SHA256 algorithms" + depends on CRYPTO_DEV_PADLOCK + select CRYPTO_SHA1 + select CRYPTO_SHA256 + default m + help + Use VIA PadLock for SHA1/SHA256 algorithms. + + Available in VIA C7 and newer processors. + + If unsure say M. The compiled module will be + called padlock-sha.ko + endmenu diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 45426ca19a23..4c3d0ec1cf80 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -1,7 +1,3 @@ - obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o - -padlock-objs-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o - -padlock-objs := padlock-generic.o $(padlock-objs-y) - +obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o +obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index b643d71298a9..d4501dc7e650 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -43,11 +43,11 @@ * --------------------------------------------------------------------------- */ +#include <crypto/algapi.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> -#include <linux/crypto.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <asm/byteorder.h> @@ -59,6 +59,17 @@ #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) +/* Control word. */ +struct cword { + unsigned int __attribute__ ((__packed__)) + rounds:4, + algo:3, + keygen:1, + interm:1, + encdec:1, + ksize:2; +} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); + /* Whenever making any changes to the following * structure *make sure* you keep E, d_data * and cword aligned on 16 Bytes boundaries!!! */ @@ -286,9 +297,9 @@ aes_hw_extkey_available(uint8_t key_len) return 0; } -static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) +static inline struct aes_ctx *aes_ctx_common(void *ctx) { - unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); + unsigned long addr = (unsigned long)ctx; unsigned long align = PADLOCK_ALIGNMENT; if (align <= crypto_tfm_ctx_alignment()) @@ -296,16 +307,27 @@ static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) return (struct aes_ctx *)ALIGN(addr, align); } +static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) +{ + return aes_ctx_common(crypto_tfm_ctx(tfm)); +} + +static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) +{ + return aes_ctx_common(crypto_blkcipher_ctx(tfm)); +} + static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len, u32 *flags) + unsigned int key_len) { struct aes_ctx *ctx = aes_ctx(tfm); const __le32 *key = (const __le32 *)in_key; + u32 *flags = &tfm->crt_flags; uint32_t i, t, u, v, w; uint32_t P[AES_EXTENDED_KEY_SIZE]; uint32_t rounds; - if (key_len != 16 && key_len != 24 && key_len != 32) { + if (key_len % 8) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } @@ -430,80 +452,212 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); } -static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_set_key, + .cia_encrypt = aes_encrypt, + .cia_decrypt = aes_decrypt, + } + } +}; + +static int ecb_aes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) { - struct aes_ctx *ctx = aes_ctx(desc->tfm); - padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, - nbytes / AES_BLOCK_SIZE); - return nbytes & ~(AES_BLOCK_SIZE - 1); + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->E, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; } -static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) +static int ecb_aes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) { - struct aes_ctx *ctx = aes_ctx(desc->tfm); - padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, - nbytes / AES_BLOCK_SIZE); - return nbytes & ~(AES_BLOCK_SIZE - 1); + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; } -static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) -{ - struct aes_ctx *ctx = aes_ctx(desc->tfm); - u8 *iv; +static struct crypto_alg ecb_aes_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-padlock", + .cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key, + .encrypt = ecb_aes_encrypt, + .decrypt = ecb_aes_decrypt, + } + } +}; - iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info, - &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE); - memcpy(desc->info, iv, AES_BLOCK_SIZE); +static int cbc_aes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, + walk.dst.virt.addr, ctx->E, + walk.iv, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + memcpy(walk.iv, iv, AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } - return nbytes & ~(AES_BLOCK_SIZE - 1); + return err; } -static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) +static int cbc_aes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) { - struct aes_ctx *ctx = aes_ctx(desc->tfm); - padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt, - nbytes / AES_BLOCK_SIZE); - return nbytes & ~(AES_BLOCK_SIZE - 1); + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, walk.iv, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; } -static struct crypto_alg aes_alg = { - .cra_name = "aes", - .cra_driver_name = "aes-padlock", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, +static struct crypto_alg cbc_aes_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-padlock", + .cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aes_ctx), .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), + .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), .cra_u = { - .cipher = { - .cia_min_keysize = AES_MIN_KEY_SIZE, - .cia_max_keysize = AES_MAX_KEY_SIZE, - .cia_setkey = aes_set_key, - .cia_encrypt = aes_encrypt, - .cia_decrypt = aes_decrypt, - .cia_encrypt_ecb = aes_encrypt_ecb, - .cia_decrypt_ecb = aes_decrypt_ecb, - .cia_encrypt_cbc = aes_encrypt_cbc, - .cia_decrypt_cbc = aes_decrypt_cbc, + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key, + .encrypt = cbc_aes_encrypt, + .decrypt = cbc_aes_decrypt, } } }; -int __init padlock_init_aes(void) +static int __init padlock_init(void) { - printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); + int ret; + + if (!cpu_has_xcrypt) { + printk(KERN_ERR PFX "VIA PadLock not detected.\n"); + return -ENODEV; + } + + if (!cpu_has_xcrypt_enabled) { + printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); + return -ENODEV; + } gen_tabs(); - return crypto_register_alg(&aes_alg); + if ((ret = crypto_register_alg(&aes_alg))) + goto aes_err; + + if ((ret = crypto_register_alg(&ecb_aes_alg))) + goto ecb_aes_err; + + if ((ret = crypto_register_alg(&cbc_aes_alg))) + goto cbc_aes_err; + + printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); + +out: + return ret; + +cbc_aes_err: + crypto_unregister_alg(&ecb_aes_alg); +ecb_aes_err: + crypto_unregister_alg(&aes_alg); +aes_err: + printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); + goto out; } -void __exit padlock_fini_aes(void) +static void __exit padlock_fini(void) { + crypto_unregister_alg(&cbc_aes_alg); + crypto_unregister_alg(&ecb_aes_alg); crypto_unregister_alg(&aes_alg); } + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); + +MODULE_ALIAS("aes-padlock"); diff --git a/drivers/crypto/padlock-generic.c b/drivers/crypto/padlock-generic.c deleted file mode 100644 index 18cf0e8274a7..000000000000 --- a/drivers/crypto/padlock-generic.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Cryptographic API. - * - * Support for VIA PadLock hardware crypto engine. - * - * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/crypto.h> -#include <asm/byteorder.h> -#include "padlock.h" - -static int __init -padlock_init(void) -{ - int ret = -ENOSYS; - - if (!cpu_has_xcrypt) { - printk(KERN_ERR PFX "VIA PadLock not detected.\n"); - return -ENODEV; - } - - if (!cpu_has_xcrypt_enabled) { - printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); - return -ENODEV; - } - -#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES - if ((ret = padlock_init_aes())) { - printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); - return ret; - } -#endif - - if (ret == -ENOSYS) - printk(KERN_ERR PFX "Hmm, VIA PadLock was compiled without any algorithm.\n"); - - return ret; -} - -static void __exit -padlock_fini(void) -{ -#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES - padlock_fini_aes(); -#endif -} - -module_init(padlock_init); -module_exit(padlock_fini); - -MODULE_DESCRIPTION("VIA PadLock crypto engine support."); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Michal Ludvig"); diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c new file mode 100644 index 000000000000..a781fd23b607 --- /dev/null +++ b/drivers/crypto/padlock-sha.c @@ -0,0 +1,318 @@ +/* + * Cryptographic API. + * + * Support for VIA PadLock hardware crypto engine. + * + * Copyright (c) 2006 Michal Ludvig <michal@logix.cz> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <crypto/algapi.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/cryptohash.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/scatterlist.h> +#include "padlock.h" + +#define SHA1_DEFAULT_FALLBACK "sha1-generic" +#define SHA1_DIGEST_SIZE 20 +#define SHA1_HMAC_BLOCK_SIZE 64 + +#define SHA256_DEFAULT_FALLBACK "sha256-generic" +#define SHA256_DIGEST_SIZE 32 +#define SHA256_HMAC_BLOCK_SIZE 64 + +struct padlock_sha_ctx { + char *data; + size_t used; + int bypass; + void (*f_sha_padlock)(const char *in, char *out, int count); + struct hash_desc fallback; +}; + +static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) +{ + return crypto_tfm_ctx(tfm); +} + +/* We'll need aligned address on the stack */ +#define NEAREST_ALIGNED(ptr) \ + ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT)) + +static struct crypto_alg sha1_alg, sha256_alg; + +static void padlock_sha_bypass(struct crypto_tfm *tfm) +{ + if (ctx(tfm)->bypass) + return; + + crypto_hash_init(&ctx(tfm)->fallback); + if (ctx(tfm)->data && ctx(tfm)->used) { + struct scatterlist sg; + + sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used); + crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); + } + + ctx(tfm)->used = 0; + ctx(tfm)->bypass = 1; +} + +static void padlock_sha_init(struct crypto_tfm *tfm) +{ + ctx(tfm)->used = 0; + ctx(tfm)->bypass = 0; +} + +static void padlock_sha_update(struct crypto_tfm *tfm, + const uint8_t *data, unsigned int length) +{ + /* Our buffer is always one page. */ + if (unlikely(!ctx(tfm)->bypass && + (ctx(tfm)->used + length > PAGE_SIZE))) + padlock_sha_bypass(tfm); + + if (unlikely(ctx(tfm)->bypass)) { + struct scatterlist sg; + sg_set_buf(&sg, (uint8_t *)data, length); + crypto_hash_update(&ctx(tfm)->fallback, &sg, length); + return; + } + + memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length); + ctx(tfm)->used += length; +} + +static inline void padlock_output_block(uint32_t *src, + uint32_t *dst, size_t count) +{ + while (count--) + *dst++ = swab32(*src++); +} + +static void padlock_do_sha1(const char *in, char *out, int count) +{ + /* We can't store directly to *out as it may be unaligned. */ + /* BTW Don't reduce the buffer size below 128 Bytes! + * PadLock microcode needs it that big. */ + char buf[128+16]; + char *result = NEAREST_ALIGNED(buf); + + ((uint32_t *)result)[0] = 0x67452301; + ((uint32_t *)result)[1] = 0xEFCDAB89; + ((uint32_t *)result)[2] = 0x98BADCFE; + ((uint32_t *)result)[3] = 0x10325476; + ((uint32_t *)result)[4] = 0xC3D2E1F0; + + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ + : "+S"(in), "+D"(result) + : "c"(count), "a"(0)); + + padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); +} + +static void padlock_do_sha256(const char *in, char *out, int count) +{ + /* We can't store directly to *out as it may be unaligned. */ + /* BTW Don't reduce the buffer size below 128 Bytes! + * PadLock microcode needs it that big. */ + char buf[128+16]; + char *result = NEAREST_ALIGNED(buf); + + ((uint32_t *)result)[0] = 0x6A09E667; + ((uint32_t *)result)[1] = 0xBB67AE85; + ((uint32_t *)result)[2] = 0x3C6EF372; + ((uint32_t *)result)[3] = 0xA54FF53A; + ((uint32_t *)result)[4] = 0x510E527F; + ((uint32_t *)result)[5] = 0x9B05688C; + ((uint32_t *)result)[6] = 0x1F83D9AB; + ((uint32_t *)result)[7] = 0x5BE0CD19; + + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ + : "+S"(in), "+D"(result) + : "c"(count), "a"(0)); + + padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); +} + +static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) +{ + if (unlikely(ctx(tfm)->bypass)) { + crypto_hash_final(&ctx(tfm)->fallback, out); + ctx(tfm)->bypass = 0; + return; + } + + /* Pass the input buffer to PadLock microcode... */ + ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used); + + ctx(tfm)->used = 0; +} + +static int padlock_cra_init(struct crypto_tfm *tfm) +{ + const char *fallback_driver_name = tfm->__crt_alg->cra_name; + struct crypto_hash *fallback_tfm; + + /* For now we'll allocate one page. This + * could eventually be configurable one day. */ + ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL); + if (!ctx(tfm)->data) + return -ENOMEM; + + /* Allocate a fallback and abort if it failed. */ + fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback_tfm)) { + printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", + fallback_driver_name); + free_page((unsigned long)(ctx(tfm)->data)); + return PTR_ERR(fallback_tfm); + } + + ctx(tfm)->fallback.tfm = fallback_tfm; + return 0; +} + +static int padlock_sha1_cra_init(struct crypto_tfm *tfm) +{ + ctx(tfm)->f_sha_padlock = padlock_do_sha1; + + return padlock_cra_init(tfm); +} + +static int padlock_sha256_cra_init(struct crypto_tfm *tfm) +{ + ctx(tfm)->f_sha_padlock = padlock_do_sha256; + + return padlock_cra_init(tfm); +} + +static void padlock_cra_exit(struct crypto_tfm *tfm) +{ + if (ctx(tfm)->data) { + free_page((unsigned long)(ctx(tfm)->data)); + ctx(tfm)->data = NULL; + } + + crypto_free_hash(ctx(tfm)->fallback.tfm); + ctx(tfm)->fallback.tfm = NULL; +} + +static struct crypto_alg sha1_alg = { + .cra_name = "sha1", + .cra_driver_name = "sha1-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_DIGEST | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct padlock_sha_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), + .cra_init = padlock_sha1_cra_init, + .cra_exit = padlock_cra_exit, + .cra_u = { + .digest = { + .dia_digestsize = SHA1_DIGEST_SIZE, + .dia_init = padlock_sha_init, + .dia_update = padlock_sha_update, + .dia_final = padlock_sha_final, + } + } +}; + +static struct crypto_alg sha256_alg = { + .cra_name = "sha256", + .cra_driver_name = "sha256-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_DIGEST | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct padlock_sha_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), + .cra_init = padlock_sha256_cra_init, + .cra_exit = padlock_cra_exit, + .cra_u = { + .digest = { + .dia_digestsize = SHA256_DIGEST_SIZE, + .dia_init = padlock_sha_init, + .dia_update = padlock_sha_update, + .dia_final = padlock_sha_final, + } + } +}; + +static void __init padlock_sha_check_fallbacks(void) +{ + if (!crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK)) + printk(KERN_WARNING PFX + "Couldn't load fallback module for sha1.\n"); + + if (!crypto_has_hash("sha256", 0, CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK)) + printk(KERN_WARNING PFX + "Couldn't load fallback module for sha256.\n"); +} + +static int __init padlock_init(void) +{ + int rc = -ENODEV; + + if (!cpu_has_phe) { + printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n"); + return -ENODEV; + } + + if (!cpu_has_phe_enabled) { + printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); + return -ENODEV; + } + + padlock_sha_check_fallbacks(); + + rc = crypto_register_alg(&sha1_alg); + if (rc) + goto out; + + rc = crypto_register_alg(&sha256_alg); + if (rc) + goto out_unreg1; + + printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n"); + + return 0; + +out_unreg1: + crypto_unregister_alg(&sha1_alg); +out: + printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); + return rc; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_alg(&sha1_alg); + crypto_unregister_alg(&sha256_alg); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support."); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); + +MODULE_ALIAS("sha1-padlock"); +MODULE_ALIAS("sha256-padlock"); diff --git a/drivers/crypto/padlock.c b/drivers/crypto/padlock.c new file mode 100644 index 000000000000..d6d7dd5bb98c --- /dev/null +++ b/drivers/crypto/padlock.c @@ -0,0 +1,58 @@ +/* + * Cryptographic API. + * + * Support for VIA PadLock hardware crypto engine. + * + * Copyright (c) 2006 Michal Ludvig <michal@logix.cz> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/crypto.h> +#include <linux/cryptohash.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/scatterlist.h> +#include "padlock.h" + +static int __init padlock_init(void) +{ + int success = 0; + + if (crypto_has_cipher("aes-padlock", 0, 0)) + success++; + + if (crypto_has_hash("sha1-padlock", 0, 0)) + success++; + + if (crypto_has_hash("sha256-padlock", 0, 0)) + success++; + + if (!success) { + printk(KERN_WARNING PFX "No VIA PadLock drivers have been loaded.\n"); + return -ENODEV; + } + + printk(KERN_NOTICE PFX "%d drivers are available.\n", success); + + return 0; +} + +static void __exit padlock_fini(void) +{ +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("Load all configured PadLock algorithms."); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); + diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h index b78489bc298a..b728e4518bd1 100644 --- a/drivers/crypto/padlock.h +++ b/drivers/crypto/padlock.h @@ -15,22 +15,9 @@ #define PADLOCK_ALIGNMENT 16 -/* Control word. */ -struct cword { - unsigned int __attribute__ ((__packed__)) - rounds:4, - algo:3, - keygen:1, - interm:1, - encdec:1, - ksize:2; -} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); - #define PFX "padlock: " -#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES -int padlock_init_aes(void); -void padlock_fini_aes(void); -#endif +#define PADLOCK_CRA_PRIORITY 300 +#define PADLOCK_COMPOSITE_PRIORITY 400 #endif /* _CRYPTO_PADLOCK_H */ diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 6022ed12a795..bdbd34993a80 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -5,6 +5,7 @@ * This file is released under the GPL. */ +#include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> @@ -78,11 +79,13 @@ struct crypt_config { */ struct crypt_iv_operations *iv_gen_ops; char *iv_mode; - void *iv_gen_private; + struct crypto_cipher *iv_gen_private; sector_t iv_offset; unsigned int iv_size; - struct crypto_tfm *tfm; + char cipher[CRYPTO_MAX_ALG_NAME]; + char chainmode[CRYPTO_MAX_ALG_NAME]; + struct crypto_blkcipher *tfm; unsigned int key_size; u8 key[0]; }; @@ -96,12 +99,12 @@ static kmem_cache_t *_crypt_io_pool; /* * Different IV generation algorithms: * - * plain: the initial vector is the 32-bit low-endian version of the sector + * plain: the initial vector is the 32-bit little-endian version of the sector * number, padded with zeros if neccessary. * - * ess_iv: "encrypted sector|salt initial vector", the sector number is - * encrypted with the bulk cipher using a salt as key. The salt - * should be derived from the bulk cipher's key via hashing. + * essiv: "encrypted sector|salt initial vector", the sector number is + * encrypted with the bulk cipher using a salt as key. The salt + * should be derived from the bulk cipher's key via hashing. * * plumb: unimplemented, see: * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 @@ -118,11 +121,13 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { - struct crypto_tfm *essiv_tfm; - struct crypto_tfm *hash_tfm; + struct crypto_cipher *essiv_tfm; + struct crypto_hash *hash_tfm; + struct hash_desc desc; struct scatterlist sg; unsigned int saltsize; u8 *salt; + int err; if (opts == NULL) { ti->error = "Digest algorithm missing for ESSIV mode"; @@ -130,76 +135,70 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, } /* Hash the cipher key with the given hash algorithm */ - hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP); - if (hash_tfm == NULL) { + hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(hash_tfm)) { ti->error = "Error initializing ESSIV hash"; - return -EINVAL; + return PTR_ERR(hash_tfm); } - if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) { - ti->error = "Expected digest algorithm for ESSIV hash"; - crypto_free_tfm(hash_tfm); - return -EINVAL; - } - - saltsize = crypto_tfm_alg_digestsize(hash_tfm); + saltsize = crypto_hash_digestsize(hash_tfm); salt = kmalloc(saltsize, GFP_KERNEL); if (salt == NULL) { ti->error = "Error kmallocing salt storage in ESSIV"; - crypto_free_tfm(hash_tfm); + crypto_free_hash(hash_tfm); return -ENOMEM; } sg_set_buf(&sg, cc->key, cc->key_size); - crypto_digest_digest(hash_tfm, &sg, 1, salt); - crypto_free_tfm(hash_tfm); + desc.tfm = hash_tfm; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); + crypto_free_hash(hash_tfm); + + if (err) { + ti->error = "Error calculating hash in ESSIV"; + return err; + } /* Setup the essiv_tfm with the given salt */ - essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm), - CRYPTO_TFM_MODE_ECB | - CRYPTO_TFM_REQ_MAY_SLEEP); - if (essiv_tfm == NULL) { + essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(essiv_tfm)) { ti->error = "Error allocating crypto tfm for ESSIV"; kfree(salt); - return -EINVAL; + return PTR_ERR(essiv_tfm); } - if (crypto_tfm_alg_blocksize(essiv_tfm) - != crypto_tfm_alg_ivsize(cc->tfm)) { + if (crypto_cipher_blocksize(essiv_tfm) != + crypto_blkcipher_ivsize(cc->tfm)) { ti->error = "Block size of ESSIV cipher does " "not match IV size of block cipher"; - crypto_free_tfm(essiv_tfm); + crypto_free_cipher(essiv_tfm); kfree(salt); return -EINVAL; } - if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) { + err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); + if (err) { ti->error = "Failed to set key for ESSIV cipher"; - crypto_free_tfm(essiv_tfm); + crypto_free_cipher(essiv_tfm); kfree(salt); - return -EINVAL; + return err; } kfree(salt); - cc->iv_gen_private = (void *)essiv_tfm; + cc->iv_gen_private = essiv_tfm; return 0; } static void crypt_iv_essiv_dtr(struct crypt_config *cc) { - crypto_free_tfm((struct crypto_tfm *)cc->iv_gen_private); + crypto_free_cipher(cc->iv_gen_private); cc->iv_gen_private = NULL; } static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { - struct scatterlist sg; - memset(iv, 0, cc->iv_size); *(u64 *)iv = cpu_to_le64(sector); - - sg_set_buf(&sg, iv, cc->iv_size); - crypto_cipher_encrypt((struct crypto_tfm *)cc->iv_gen_private, - &sg, &sg, cc->iv_size); - + crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv); return 0; } @@ -220,6 +219,11 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, int write, sector_t sector) { u8 iv[cc->iv_size]; + struct blkcipher_desc desc = { + .tfm = cc->tfm, + .info = iv, + .flags = CRYPTO_TFM_REQ_MAY_SLEEP, + }; int r; if (cc->iv_gen_ops) { @@ -228,14 +232,14 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, return r; if (write) - r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv); + r = crypto_blkcipher_encrypt_iv(&desc, out, in, length); else - r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv); + r = crypto_blkcipher_decrypt_iv(&desc, out, in, length); } else { if (write) - r = crypto_cipher_encrypt(cc->tfm, out, in, length); + r = crypto_blkcipher_encrypt(&desc, out, in, length); else - r = crypto_cipher_decrypt(cc->tfm, out, in, length); + r = crypto_blkcipher_decrypt(&desc, out, in, length); } return r; @@ -510,13 +514,12 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size) static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct crypt_config *cc; - struct crypto_tfm *tfm; + struct crypto_blkcipher *tfm; char *tmp; char *cipher; char *chainmode; char *ivmode; char *ivopts; - unsigned int crypto_flags; unsigned int key_size; unsigned long long tmpll; @@ -556,31 +559,25 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ivmode = "plain"; } - /* Choose crypto_flags according to chainmode */ - if (strcmp(chainmode, "cbc") == 0) - crypto_flags = CRYPTO_TFM_MODE_CBC; - else if (strcmp(chainmode, "ecb") == 0) - crypto_flags = CRYPTO_TFM_MODE_ECB; - else { - ti->error = "Unknown chaining mode"; + if (strcmp(chainmode, "ecb") && !ivmode) { + ti->error = "This chaining mode requires an IV mechanism"; goto bad1; } - if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) { - ti->error = "This chaining mode requires an IV mechanism"; + if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode, + cipher) >= CRYPTO_MAX_ALG_NAME) { + ti->error = "Chain mode + cipher name is too long"; goto bad1; } - tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP); - if (!tfm) { + tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { ti->error = "Error allocating crypto tfm"; goto bad1; } - if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) { - ti->error = "Expected cipher algorithm"; - goto bad2; - } + strcpy(cc->cipher, cipher); + strcpy(cc->chainmode, chainmode); cc->tfm = tfm; /* @@ -603,12 +600,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) goto bad2; - if (tfm->crt_cipher.cit_decrypt_iv && tfm->crt_cipher.cit_encrypt_iv) + cc->iv_size = crypto_blkcipher_ivsize(tfm); + if (cc->iv_size) /* at least a 64 bit sector number should fit in our buffer */ - cc->iv_size = max(crypto_tfm_alg_ivsize(tfm), + cc->iv_size = max(cc->iv_size, (unsigned int)(sizeof(u64) / sizeof(u8))); else { - cc->iv_size = 0; if (cc->iv_gen_ops) { DMWARN("Selected cipher does not support IVs"); if (cc->iv_gen_ops->dtr) @@ -629,7 +626,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad4; } - if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) { + if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) { ti->error = "Error setting key"; goto bad5; } @@ -675,7 +672,7 @@ bad3: if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) cc->iv_gen_ops->dtr(cc); bad2: - crypto_free_tfm(tfm); + crypto_free_blkcipher(tfm); bad1: /* Must zero key material before freeing */ memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); @@ -693,7 +690,7 @@ static void crypt_dtr(struct dm_target *ti) kfree(cc->iv_mode); if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) cc->iv_gen_ops->dtr(cc); - crypto_free_tfm(cc->tfm); + crypto_free_blkcipher(cc->tfm); dm_put_device(ti, cc->dev); /* Must zero key material before freeing */ @@ -858,18 +855,9 @@ static int crypt_status(struct dm_target *ti, status_type_t type, break; case STATUSTYPE_TABLE: - cipher = crypto_tfm_alg_name(cc->tfm); + cipher = crypto_blkcipher_name(cc->tfm); - switch(cc->tfm->crt_cipher.cit_mode) { - case CRYPTO_TFM_MODE_CBC: - chainmode = "cbc"; - break; - case CRYPTO_TFM_MODE_ECB: - chainmode = "ecb"; - break; - default: - BUG(); - } + chainmode = cc->chainmode; if (cc->iv_mode) DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode); diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c index 51ff9a9d1bb5..f3655fd772f5 100644 --- a/drivers/net/ppp_mppe.c +++ b/drivers/net/ppp_mppe.c @@ -43,6 +43,7 @@ * deprecated in 2.6 */ +#include <linux/err.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/version.h> @@ -64,12 +65,13 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE)); MODULE_VERSION("1.0.2"); -static void +static unsigned int setup_sg(struct scatterlist *sg, const void *address, unsigned int length) { sg[0].page = virt_to_page(address); sg[0].offset = offset_in_page(address); sg[0].length = length; + return length; } #define SHA1_PAD_SIZE 40 @@ -95,8 +97,8 @@ static inline void sha_pad_init(struct sha_pad *shapad) * State for an MPPE (de)compressor. */ struct ppp_mppe_state { - struct crypto_tfm *arc4; - struct crypto_tfm *sha1; + struct crypto_blkcipher *arc4; + struct crypto_hash *sha1; unsigned char *sha1_digest; unsigned char master_key[MPPE_MAX_KEY_LEN]; unsigned char session_key[MPPE_MAX_KEY_LEN]; @@ -136,14 +138,21 @@ struct ppp_mppe_state { */ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey) { + struct hash_desc desc; struct scatterlist sg[4]; + unsigned int nbytes; - setup_sg(&sg[0], state->master_key, state->keylen); - setup_sg(&sg[1], sha_pad->sha_pad1, sizeof(sha_pad->sha_pad1)); - setup_sg(&sg[2], state->session_key, state->keylen); - setup_sg(&sg[3], sha_pad->sha_pad2, sizeof(sha_pad->sha_pad2)); + nbytes = setup_sg(&sg[0], state->master_key, state->keylen); + nbytes += setup_sg(&sg[1], sha_pad->sha_pad1, + sizeof(sha_pad->sha_pad1)); + nbytes += setup_sg(&sg[2], state->session_key, state->keylen); + nbytes += setup_sg(&sg[3], sha_pad->sha_pad2, + sizeof(sha_pad->sha_pad2)); - crypto_digest_digest (state->sha1, sg, 4, state->sha1_digest); + desc.tfm = state->sha1; + desc.flags = 0; + + crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest); memcpy(InterimKey, state->sha1_digest, state->keylen); } @@ -156,14 +165,15 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) { unsigned char InterimKey[MPPE_MAX_KEY_LEN]; struct scatterlist sg_in[1], sg_out[1]; + struct blkcipher_desc desc = { .tfm = state->arc4 }; get_new_key_from_sha(state, InterimKey); if (!initial_key) { - crypto_cipher_setkey(state->arc4, InterimKey, state->keylen); + crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen); setup_sg(sg_in, InterimKey, state->keylen); setup_sg(sg_out, state->session_key, state->keylen); - if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, - state->keylen) != 0) { + if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, + state->keylen) != 0) { printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); } } else { @@ -175,7 +185,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) state->session_key[1] = 0x26; state->session_key[2] = 0x9e; } - crypto_cipher_setkey(state->arc4, state->session_key, state->keylen); + crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen); } /* @@ -196,15 +206,19 @@ static void *mppe_alloc(unsigned char *options, int optlen) memset(state, 0, sizeof(*state)); - state->arc4 = crypto_alloc_tfm("arc4", 0); - if (!state->arc4) + state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(state->arc4)) { + state->arc4 = NULL; goto out_free; + } - state->sha1 = crypto_alloc_tfm("sha1", 0); - if (!state->sha1) + state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(state->sha1)) { + state->sha1 = NULL; goto out_free; + } - digestsize = crypto_tfm_alg_digestsize(state->sha1); + digestsize = crypto_hash_digestsize(state->sha1); if (digestsize < MPPE_MAX_KEY_LEN) goto out_free; @@ -229,9 +243,9 @@ static void *mppe_alloc(unsigned char *options, int optlen) if (state->sha1_digest) kfree(state->sha1_digest); if (state->sha1) - crypto_free_tfm(state->sha1); + crypto_free_hash(state->sha1); if (state->arc4) - crypto_free_tfm(state->arc4); + crypto_free_blkcipher(state->arc4); kfree(state); out: return NULL; @@ -247,9 +261,9 @@ static void mppe_free(void *arg) if (state->sha1_digest) kfree(state->sha1_digest); if (state->sha1) - crypto_free_tfm(state->sha1); + crypto_free_hash(state->sha1); if (state->arc4) - crypto_free_tfm(state->arc4); + crypto_free_blkcipher(state->arc4); kfree(state); } } @@ -356,6 +370,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, int isize, int osize) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; + struct blkcipher_desc desc = { .tfm = state->arc4 }; int proto; struct scatterlist sg_in[1], sg_out[1]; @@ -413,7 +428,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, /* Encrypt packet */ setup_sg(sg_in, ibuf, isize); setup_sg(sg_out, obuf, osize); - if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, isize) != 0) { + if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) { printk(KERN_DEBUG "crypto_cypher_encrypt failed\n"); return -1; } @@ -462,6 +477,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, int osize) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; + struct blkcipher_desc desc = { .tfm = state->arc4 }; unsigned ccount; int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; int sanity = 0; @@ -599,7 +615,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, */ setup_sg(sg_in, ibuf, 1); setup_sg(sg_out, obuf, 1); - if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, 1) != 0) { + if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) { printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); return DECOMP_ERROR; } @@ -619,7 +635,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, /* And finally, decrypt the rest of the packet. */ setup_sg(sg_in, ibuf + 1, isize - 1); setup_sg(sg_out, obuf + 1, osize - 1); - if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, isize - 1) != 0) { + if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) { printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); return DECOMP_ERROR; } @@ -694,8 +710,8 @@ static struct compressor ppp_mppe = { static int __init ppp_mppe_init(void) { int answer; - if (!(crypto_alg_available("arc4", 0) && - crypto_alg_available("sha1", 0))) + if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) && + crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC))) return -ENODEV; sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL); diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index a4dd13942714..170c500169da 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -19,6 +19,7 @@ ======================================================================*/ +#include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> @@ -1203,7 +1204,7 @@ struct airo_info { struct iw_spy_data spy_data; struct iw_public_data wireless_data; /* MIC stuff */ - struct crypto_tfm *tfm; + struct crypto_cipher *tfm; mic_module mod[2]; mic_statistics micstats; HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors @@ -1271,7 +1272,8 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev); static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq); static void MoveWindow(miccntx *context, u32 micSeq); -static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *); +static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, + struct crypto_cipher *tfm); static void emmh32_init(emmh32_context *context); static void emmh32_update(emmh32_context *context, u8 *pOctets, int len); static void emmh32_final(emmh32_context *context, u8 digest[4]); @@ -1339,10 +1341,11 @@ static int micsetup(struct airo_info *ai) { int i; if (ai->tfm == NULL) - ai->tfm = crypto_alloc_tfm("aes", CRYPTO_TFM_REQ_MAY_SLEEP); + ai->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); - if (ai->tfm == NULL) { + if (IS_ERR(ai->tfm)) { airo_print_err(ai->dev->name, "failed to load transform for AES"); + ai->tfm = NULL; return ERROR; } @@ -1608,7 +1611,8 @@ static void MoveWindow(miccntx *context, u32 micSeq) static unsigned char aes_counter[16]; /* expand the key to fill the MMH coefficient array */ -static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *tfm) +static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, + struct crypto_cipher *tfm) { /* take the keying material, expand if necessary, truncate at 16-bytes */ /* run through AES counter mode to generate context->coeff[] */ @@ -1616,7 +1620,6 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct int i,j; u32 counter; u8 *cipher, plain[16]; - struct scatterlist sg[1]; crypto_cipher_setkey(tfm, pkey, 16); counter = 0; @@ -1627,9 +1630,8 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct aes_counter[12] = (u8)(counter >> 24); counter++; memcpy (plain, aes_counter, 16); - sg_set_buf(sg, plain, 16); - crypto_cipher_encrypt(tfm, sg, sg, 16); - cipher = kmap(sg->page) + sg->offset; + crypto_cipher_encrypt_one(tfm, plain, plain); + cipher = plain; for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) { context->coeff[i++] = ntohl(*(u32 *)&cipher[j]); j += 4; @@ -2432,7 +2434,7 @@ void stop_airo_card( struct net_device *dev, int freeres ) ai->shared, ai->shared_dma); } } - crypto_free_tfm(ai->tfm); + crypto_free_cipher(ai->tfm); del_airo_dev( dev ); free_netdev( dev ); } diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 058f094f945a..66a1ae1d6982 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -26,6 +26,7 @@ * Zhenyu Wang */ +#include <linux/err.h> #include <linux/types.h> #include <linux/list.h> #include <linux/inet.h> @@ -107,8 +108,11 @@ iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf, u8* crc) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct hash_desc desc; - crypto_digest_digest(tcp_conn->tx_tfm, &buf->sg, 1, crc); + desc.tfm = tcp_conn->tx_tfm; + desc.flags = 0; + crypto_hash_digest(&desc, &buf->sg, buf->sg.length, crc); buf->sg.length += sizeof(uint32_t); } @@ -452,11 +456,14 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) } if (conn->hdrdgst_en) { + struct hash_desc desc; struct scatterlist sg; sg_init_one(&sg, (u8 *)hdr, sizeof(struct iscsi_hdr) + ahslen); - crypto_digest_digest(tcp_conn->rx_tfm, &sg, 1, (u8 *)&cdgst); + desc.tfm = tcp_conn->rx_tfm; + desc.flags = 0; + crypto_hash_digest(&desc, &sg, sg.length, (u8 *)&cdgst); rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) + ahslen); if (cdgst != rdgst) { @@ -673,7 +680,7 @@ partial_sg_digest_update(struct iscsi_tcp_conn *tcp_conn, memcpy(&temp, sg, sizeof(struct scatterlist)); temp.offset = offset; temp.length = length; - crypto_digest_update(tcp_conn->data_rx_tfm, &temp, 1); + crypto_hash_update(&tcp_conn->data_rx_hash, &temp, length); } static void @@ -682,7 +689,7 @@ iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len) struct scatterlist tmp; sg_init_one(&tmp, buf, len); - crypto_digest_update(tcp_conn->data_rx_tfm, &tmp, 1); + crypto_hash_update(&tcp_conn->data_rx_hash, &tmp, len); } static int iscsi_scsi_data_in(struct iscsi_conn *conn) @@ -736,9 +743,9 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn) if (!rc) { if (conn->datadgst_en) { if (!offset) - crypto_digest_update( - tcp_conn->data_rx_tfm, - &sg[i], 1); + crypto_hash_update( + &tcp_conn->data_rx_hash, + &sg[i], sg[i].length); else partial_sg_digest_update(tcp_conn, &sg[i], @@ -877,8 +884,7 @@ more: rc = iscsi_tcp_hdr_recv(conn); if (!rc && tcp_conn->in.datalen) { if (conn->datadgst_en) { - BUG_ON(!tcp_conn->data_rx_tfm); - crypto_digest_init(tcp_conn->data_rx_tfm); + crypto_hash_init(&tcp_conn->data_rx_hash); } tcp_conn->in_progress = IN_PROGRESS_DATA_RECV; } else if (rc) { @@ -931,11 +937,11 @@ more: tcp_conn->in.padding); memset(pad, 0, tcp_conn->in.padding); sg_init_one(&sg, pad, tcp_conn->in.padding); - crypto_digest_update(tcp_conn->data_rx_tfm, - &sg, 1); + crypto_hash_update(&tcp_conn->data_rx_hash, + &sg, sg.length); } - crypto_digest_final(tcp_conn->data_rx_tfm, - (u8 *) & tcp_conn->in.datadgst); + crypto_hash_final(&tcp_conn->data_rx_hash, + (u8 *)&tcp_conn->in.datadgst); debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; } else @@ -1181,8 +1187,7 @@ iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, { struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - BUG_ON(!tcp_conn->data_tx_tfm); - crypto_digest_init(tcp_conn->data_tx_tfm); + crypto_hash_init(&tcp_conn->data_tx_hash); tcp_ctask->digest_count = 4; } @@ -1196,7 +1201,7 @@ iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, int sent = 0; if (final) - crypto_digest_final(tcp_conn->data_tx_tfm, (u8*)digest); + crypto_hash_final(&tcp_conn->data_tx_hash, (u8 *)digest); iscsi_buf_init_iov(buf, (char*)digest, 4); rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); @@ -1491,16 +1496,17 @@ handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) if (rc) { tcp_ctask->xmstate |= XMSTATE_IMM_DATA; if (conn->datadgst_en) { - crypto_digest_final(tcp_conn->data_tx_tfm, - (u8*)&tcp_ctask->immdigest); + crypto_hash_final(&tcp_conn->data_tx_hash, + (u8 *)&tcp_ctask->immdigest); debug_tcp("tx imm sendpage fail 0x%x\n", tcp_ctask->datadigest); } return rc; } if (conn->datadgst_en) - crypto_digest_update(tcp_conn->data_tx_tfm, - &tcp_ctask->sendbuf.sg, 1); + crypto_hash_update(&tcp_conn->data_tx_hash, + &tcp_ctask->sendbuf.sg, + tcp_ctask->sendbuf.sg.length); if (!ctask->imm_count) break; @@ -1577,8 +1583,8 @@ handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) tcp_ctask->xmstate |= XMSTATE_UNS_DATA; /* will continue with this ctask later.. */ if (conn->datadgst_en) { - crypto_digest_final(tcp_conn->data_tx_tfm, - (u8 *)&dtask->digest); + crypto_hash_final(&tcp_conn->data_tx_hash, + (u8 *)&dtask->digest); debug_tcp("tx uns data fail 0x%x\n", dtask->digest); } @@ -1593,8 +1599,9 @@ handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) * so pass it */ if (conn->datadgst_en && tcp_ctask->sent - start > 0) - crypto_digest_update(tcp_conn->data_tx_tfm, - &tcp_ctask->sendbuf.sg, 1); + crypto_hash_update(&tcp_conn->data_tx_hash, + &tcp_ctask->sendbuf.sg, + tcp_ctask->sendbuf.sg.length); if (!ctask->data_count) break; @@ -1668,7 +1675,7 @@ solicit_again: tcp_ctask->xmstate |= XMSTATE_SOL_DATA; /* will continue with this ctask later.. */ if (conn->datadgst_en) { - crypto_digest_final(tcp_conn->data_tx_tfm, + crypto_hash_final(&tcp_conn->data_tx_hash, (u8 *)&dtask->digest); debug_tcp("r2t data send fail 0x%x\n", dtask->digest); } @@ -1677,8 +1684,8 @@ solicit_again: BUG_ON(r2t->data_count < 0); if (conn->datadgst_en) - crypto_digest_update(tcp_conn->data_tx_tfm, &r2t->sendbuf.sg, - 1); + crypto_hash_update(&tcp_conn->data_tx_hash, &r2t->sendbuf.sg, + r2t->sendbuf.sg.length); if (r2t->data_count) { BUG_ON(ctask->sc->use_sg == 0); @@ -1766,8 +1773,9 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) } if (conn->datadgst_en) { - crypto_digest_update(tcp_conn->data_tx_tfm, - &tcp_ctask->sendbuf.sg, 1); + crypto_hash_update(&tcp_conn->data_tx_hash, + &tcp_ctask->sendbuf.sg, + tcp_ctask->sendbuf.sg.length); /* imm data? */ if (!dtask) { rc = iscsi_digest_final_send(conn, ctask, @@ -1963,13 +1971,13 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) /* now free tcp_conn */ if (digest) { if (tcp_conn->tx_tfm) - crypto_free_tfm(tcp_conn->tx_tfm); + crypto_free_hash(tcp_conn->tx_tfm); if (tcp_conn->rx_tfm) - crypto_free_tfm(tcp_conn->rx_tfm); - if (tcp_conn->data_tx_tfm) - crypto_free_tfm(tcp_conn->data_tx_tfm); - if (tcp_conn->data_rx_tfm) - crypto_free_tfm(tcp_conn->data_rx_tfm); + crypto_free_hash(tcp_conn->rx_tfm); + if (tcp_conn->data_tx_hash.tfm) + crypto_free_hash(tcp_conn->data_tx_hash.tfm); + if (tcp_conn->data_rx_hash.tfm) + crypto_free_hash(tcp_conn->data_rx_hash.tfm); } kfree(tcp_conn); @@ -2130,44 +2138,48 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, if (conn->hdrdgst_en) { tcp_conn->hdr_size += sizeof(__u32); if (!tcp_conn->tx_tfm) - tcp_conn->tx_tfm = crypto_alloc_tfm("crc32c", - 0); - if (!tcp_conn->tx_tfm) - return -ENOMEM; + tcp_conn->tx_tfm = + crypto_alloc_hash("crc32c", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(tcp_conn->tx_tfm)) + return PTR_ERR(tcp_conn->tx_tfm); if (!tcp_conn->rx_tfm) - tcp_conn->rx_tfm = crypto_alloc_tfm("crc32c", - 0); - if (!tcp_conn->rx_tfm) { - crypto_free_tfm(tcp_conn->tx_tfm); - return -ENOMEM; + tcp_conn->rx_tfm = + crypto_alloc_hash("crc32c", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(tcp_conn->rx_tfm)) { + crypto_free_hash(tcp_conn->tx_tfm); + return PTR_ERR(tcp_conn->rx_tfm); } } else { if (tcp_conn->tx_tfm) - crypto_free_tfm(tcp_conn->tx_tfm); + crypto_free_hash(tcp_conn->tx_tfm); if (tcp_conn->rx_tfm) - crypto_free_tfm(tcp_conn->rx_tfm); + crypto_free_hash(tcp_conn->rx_tfm); } break; case ISCSI_PARAM_DATADGST_EN: iscsi_set_param(cls_conn, param, buf, buflen); if (conn->datadgst_en) { - if (!tcp_conn->data_tx_tfm) - tcp_conn->data_tx_tfm = - crypto_alloc_tfm("crc32c", 0); - if (!tcp_conn->data_tx_tfm) - return -ENOMEM; - if (!tcp_conn->data_rx_tfm) - tcp_conn->data_rx_tfm = - crypto_alloc_tfm("crc32c", 0); - if (!tcp_conn->data_rx_tfm) { - crypto_free_tfm(tcp_conn->data_tx_tfm); - return -ENOMEM; + if (!tcp_conn->data_tx_hash.tfm) + tcp_conn->data_tx_hash.tfm = + crypto_alloc_hash("crc32c", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(tcp_conn->data_tx_hash.tfm)) + return PTR_ERR(tcp_conn->data_tx_hash.tfm); + if (!tcp_conn->data_rx_hash.tfm) + tcp_conn->data_rx_hash.tfm = + crypto_alloc_hash("crc32c", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(tcp_conn->data_rx_hash.tfm)) { + crypto_free_hash(tcp_conn->data_tx_hash.tfm); + return PTR_ERR(tcp_conn->data_rx_hash.tfm); } } else { - if (tcp_conn->data_tx_tfm) - crypto_free_tfm(tcp_conn->data_tx_tfm); - if (tcp_conn->data_rx_tfm) - crypto_free_tfm(tcp_conn->data_rx_tfm); + if (tcp_conn->data_tx_hash.tfm) + crypto_free_hash(tcp_conn->data_tx_hash.tfm); + if (tcp_conn->data_rx_hash.tfm) + crypto_free_hash(tcp_conn->data_rx_hash.tfm); } tcp_conn->sendpage = conn->datadgst_en ? sock_no_sendpage : tcp_conn->sock->ops->sendpage; diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 6a4ee704e46e..e35701305fc9 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -51,6 +51,7 @@ #define ISCSI_SG_TABLESIZE SG_ALL #define ISCSI_TCP_MAX_CMD_LEN 16 +struct crypto_hash; struct socket; /* Socket connection recieve helper */ @@ -84,8 +85,8 @@ struct iscsi_tcp_conn { /* iSCSI connection-wide sequencing */ int hdr_size; /* PDU header size */ - struct crypto_tfm *rx_tfm; /* CRC32C (Rx) */ - struct crypto_tfm *data_rx_tfm; /* CRC32C (Rx) for data */ + struct crypto_hash *rx_tfm; /* CRC32C (Rx) */ + struct hash_desc data_rx_hash; /* CRC32C (Rx) for data */ /* control data */ struct iscsi_tcp_recv in; /* TCP receive context */ @@ -97,8 +98,8 @@ struct iscsi_tcp_conn { void (*old_write_space)(struct sock *); /* xmit */ - struct crypto_tfm *tx_tfm; /* CRC32C (Tx) */ - struct crypto_tfm *data_tx_tfm; /* CRC32C (Tx) for data */ + struct crypto_hash *tx_tfm; /* CRC32C (Tx) */ + struct hash_desc data_tx_hash; /* CRC32C (Tx) for data */ /* MIB custom statistics */ uint32_t sendpage_failures_cnt; |