diff options
Diffstat (limited to 'drivers/crypto')
73 files changed, 4811 insertions, 1280 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 4b75084fabad..5b5393f1b87a 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -525,12 +525,26 @@ config CRYPTO_DEV_ATMEL_SHA To compile this driver as a module, choose M here: the module will be called atmel-sha. +config CRYPTO_DEV_ATMEL_ECC + tristate "Support for Microchip / Atmel ECC hw accelerator" + depends on ARCH_AT91 || COMPILE_TEST + depends on I2C + select CRYPTO_ECDH + select CRC16 + help + Microhip / Atmel ECC hw accelerator. + Select this if you want to use the Microchip / Atmel module for + ECDH algorithm. + + To compile this driver as a module, choose M here: the module + will be called atmel-ecc. + config CRYPTO_DEV_CCP - bool "Support for AMD Cryptographic Coprocessor" + bool "Support for AMD Secure Processor" depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM help - The AMD Cryptographic Coprocessor provides hardware offload support - for encryption, hashing and related operations. + The AMD Secure Processor provides support for the Cryptographic Coprocessor + (CCP) and the Platform Security Processor (PSP) devices. if CRYPTO_DEV_CCP source "drivers/crypto/ccp/Kconfig" @@ -616,6 +630,14 @@ config CRYPTO_DEV_SUN4I_SS To compile this driver as a module, choose M here: the module will be called sun4i-ss. +config CRYPTO_DEV_SUN4I_SS_PRNG + bool "Support for Allwinner Security System PRNG" + depends on CRYPTO_DEV_SUN4I_SS + select CRYPTO_RNG + help + Select this option if you want to provide kernel-side support for + the Pseudo-Random Number Generator found in the Security System. + config CRYPTO_DEV_ROCKCHIP tristate "Rockchip's Cryptographic Engine driver" depends on OF && ARCH_ROCKCHIP diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 2c555a3393b2..b12eb3c99430 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o +obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/ obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ @@ -35,7 +36,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o -obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32/ +obj-$(CONFIG_ARCH_STM32) += stm32/ obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c new file mode 100644 index 000000000000..e66f18a0ddd0 --- /dev/null +++ b/drivers/crypto/atmel-ecc.c @@ -0,0 +1,781 @@ +/* + * Microchip / Atmel ECC (I2C) driver. + * + * Copyright (c) 2017, Microchip Technology Inc. + * Author: Tudor Ambarus <tudor.ambarus@microchip.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/bitrev.h> +#include <linux/crc16.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <crypto/internal/kpp.h> +#include <crypto/ecdh.h> +#include <crypto/kpp.h> +#include "atmel-ecc.h" + +/* Used for binding tfm objects to i2c clients. */ +struct atmel_ecc_driver_data { + struct list_head i2c_client_list; + spinlock_t i2c_list_lock; +} ____cacheline_aligned; + +static struct atmel_ecc_driver_data driver_data; + +/** + * atmel_ecc_i2c_client_priv - i2c_client private data + * @client : pointer to i2c client device + * @i2c_client_list_node: part of i2c_client_list + * @lock : lock for sending i2c commands + * @wake_token : wake token array of zeros + * @wake_token_sz : size in bytes of the wake_token + * @tfm_count : number of active crypto transformations on i2c client + * + * Reads and writes from/to the i2c client are sequential. The first byte + * transmitted to the device is treated as the byte size. Any attempt to send + * more than this number of bytes will cause the device to not ACK those bytes. + * After the host writes a single command byte to the input buffer, reads are + * prohibited until after the device completes command execution. Use a mutex + * when sending i2c commands. + */ +struct atmel_ecc_i2c_client_priv { + struct i2c_client *client; + struct list_head i2c_client_list_node; + struct mutex lock; + u8 wake_token[WAKE_TOKEN_MAX_SIZE]; + size_t wake_token_sz; + atomic_t tfm_count ____cacheline_aligned; +}; + +/** + * atmel_ecdh_ctx - transformation context + * @client : pointer to i2c client device + * @fallback : used for unsupported curves or when user wants to use its own + * private key. + * @public_key : generated when calling set_secret(). It's the responsibility + * of the user to not call set_secret() while + * generate_public_key() or compute_shared_secret() are in flight. + * @curve_id : elliptic curve id + * @n_sz : size in bytes of the n prime + * @do_fallback: true when the device doesn't support the curve or when the user + * wants to use its own private key. + */ +struct atmel_ecdh_ctx { + struct i2c_client *client; + struct crypto_kpp *fallback; + const u8 *public_key; + unsigned int curve_id; + size_t n_sz; + bool do_fallback; +}; + +/** + * atmel_ecc_work_data - data structure representing the work + * @ctx : transformation context. + * @cbk : pointer to a callback function to be invoked upon completion of this + * request. This has the form: + * callback(struct atmel_ecc_work_data *work_data, void *areq, u8 status) + * where: + * @work_data: data structure representing the work + * @areq : optional pointer to an argument passed with the original + * request. + * @status : status returned from the i2c client device or i2c error. + * @areq: optional pointer to a user argument for use at callback time. + * @work: describes the task to be executed. + * @cmd : structure used for communicating with the device. + */ +struct atmel_ecc_work_data { + struct atmel_ecdh_ctx *ctx; + void (*cbk)(struct atmel_ecc_work_data *work_data, void *areq, + int status); + void *areq; + struct work_struct work; + struct atmel_ecc_cmd cmd; +}; + +static u16 atmel_ecc_crc16(u16 crc, const u8 *buffer, size_t len) +{ + return cpu_to_le16(bitrev16(crc16(crc, buffer, len))); +} + +/** + * atmel_ecc_checksum() - Generate 16-bit CRC as required by ATMEL ECC. + * CRC16 verification of the count, opcode, param1, param2 and data bytes. + * The checksum is saved in little-endian format in the least significant + * two bytes of the command. CRC polynomial is 0x8005 and the initial register + * value should be zero. + * + * @cmd : structure used for communicating with the device. + */ +static void atmel_ecc_checksum(struct atmel_ecc_cmd *cmd) +{ + u8 *data = &cmd->count; + size_t len = cmd->count - CRC_SIZE; + u16 *crc16 = (u16 *)(data + len); + + *crc16 = atmel_ecc_crc16(0, data, len); +} + +static void atmel_ecc_init_read_cmd(struct atmel_ecc_cmd *cmd) +{ + cmd->word_addr = COMMAND; + cmd->opcode = OPCODE_READ; + /* + * Read the word from Configuration zone that contains the lock bytes + * (UserExtra, Selector, LockValue, LockConfig). + */ + cmd->param1 = CONFIG_ZONE; + cmd->param2 = DEVICE_LOCK_ADDR; + cmd->count = READ_COUNT; + + atmel_ecc_checksum(cmd); + + cmd->msecs = MAX_EXEC_TIME_READ; + cmd->rxsize = READ_RSP_SIZE; +} + +static void atmel_ecc_init_genkey_cmd(struct atmel_ecc_cmd *cmd, u16 keyid) +{ + cmd->word_addr = COMMAND; + cmd->count = GENKEY_COUNT; + cmd->opcode = OPCODE_GENKEY; + cmd->param1 = GENKEY_MODE_PRIVATE; + /* a random private key will be generated and stored in slot keyID */ + cmd->param2 = cpu_to_le16(keyid); + + atmel_ecc_checksum(cmd); + + cmd->msecs = MAX_EXEC_TIME_GENKEY; + cmd->rxsize = GENKEY_RSP_SIZE; +} + +static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd, + struct scatterlist *pubkey) +{ + size_t copied; + + cmd->word_addr = COMMAND; + cmd->count = ECDH_COUNT; + cmd->opcode = OPCODE_ECDH; + cmd->param1 = ECDH_PREFIX_MODE; + /* private key slot */ + cmd->param2 = cpu_to_le16(DATA_SLOT_2); + + /* + * The device only supports NIST P256 ECC keys. The public key size will + * always be the same. Use a macro for the key size to avoid unnecessary + * computations. + */ + copied = sg_copy_to_buffer(pubkey, 1, cmd->data, ATMEL_ECC_PUBKEY_SIZE); + if (copied != ATMEL_ECC_PUBKEY_SIZE) + return -EINVAL; + + atmel_ecc_checksum(cmd); + + cmd->msecs = MAX_EXEC_TIME_ECDH; + cmd->rxsize = ECDH_RSP_SIZE; + + return 0; +} + +/* + * After wake and after execution of a command, there will be error, status, or + * result bytes in the device's output register that can be retrieved by the + * system. When the length of that group is four bytes, the codes returned are + * detailed in error_list. + */ +static int atmel_ecc_status(struct device *dev, u8 *status) +{ + size_t err_list_len = ARRAY_SIZE(error_list); + int i; + u8 err_id = status[1]; + + if (*status != STATUS_SIZE) + return 0; + + if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR) + return 0; + + for (i = 0; i < err_list_len; i++) + if (error_list[i].value == err_id) + break; + + /* if err_id is not in the error_list then ignore it */ + if (i != err_list_len) { + dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text); + return err_id; + } + + return 0; +} + +static int atmel_ecc_wakeup(struct i2c_client *client) +{ + struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); + u8 status[STATUS_RSP_SIZE]; + int ret; + + /* + * The device ignores any levels or transitions on the SCL pin when the + * device is idle, asleep or during waking up. Don't check for error + * when waking up the device. + */ + i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz); + + /* + * Wait to wake the device. Typical execution times for ecdh and genkey + * are around tens of milliseconds. Delta is chosen to 50 microseconds. + */ + usleep_range(TWHI_MIN, TWHI_MAX); + + ret = i2c_master_recv(client, status, STATUS_SIZE); + if (ret < 0) + return ret; + + return atmel_ecc_status(&client->dev, status); +} + +static int atmel_ecc_sleep(struct i2c_client *client) +{ + u8 sleep = SLEEP_TOKEN; + + return i2c_master_send(client, &sleep, 1); +} + +static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq, + int status) +{ + struct kpp_request *req = areq; + struct atmel_ecdh_ctx *ctx = work_data->ctx; + struct atmel_ecc_cmd *cmd = &work_data->cmd; + size_t copied; + size_t n_sz = ctx->n_sz; + + if (status) + goto free_work_data; + + /* copy the shared secret */ + copied = sg_copy_from_buffer(req->dst, 1, &cmd->data[RSP_DATA_IDX], + n_sz); + if (copied != n_sz) + status = -EINVAL; + + /* fall through */ +free_work_data: + kzfree(work_data); + kpp_request_complete(req, status); +} + +/* + * atmel_ecc_send_receive() - send a command to the device and receive its + * response. + * @client: i2c client device + * @cmd : structure used to communicate with the device + * + * After the device receives a Wake token, a watchdog counter starts within the + * device. After the watchdog timer expires, the device enters sleep mode + * regardless of whether some I/O transmission or command execution is in + * progress. If a command is attempted when insufficient time remains prior to + * watchdog timer execution, the device will return the watchdog timeout error + * code without attempting to execute the command. There is no way to reset the + * counter other than to put the device into sleep or idle mode and then + * wake it up again. + */ +static int atmel_ecc_send_receive(struct i2c_client *client, + struct atmel_ecc_cmd *cmd) +{ + struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); + int ret; + + mutex_lock(&i2c_priv->lock); + + ret = atmel_ecc_wakeup(client); + if (ret) + goto err; + + /* send the command */ + ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE); + if (ret < 0) + goto err; + + /* delay the appropriate amount of time for command to execute */ + msleep(cmd->msecs); + + /* receive the response */ + ret = i2c_master_recv(client, cmd->data, cmd->rxsize); + if (ret < 0) + goto err; + + /* put the device into low-power mode */ + ret = atmel_ecc_sleep(client); + if (ret < 0) + goto err; + + mutex_unlock(&i2c_priv->lock); + return atmel_ecc_status(&client->dev, cmd->data); +err: + mutex_unlock(&i2c_priv->lock); + return ret; +} + +static void atmel_ecc_work_handler(struct work_struct *work) +{ + struct atmel_ecc_work_data *work_data = + container_of(work, struct atmel_ecc_work_data, work); + struct atmel_ecc_cmd *cmd = &work_data->cmd; + struct i2c_client *client = work_data->ctx->client; + int status; + + status = atmel_ecc_send_receive(client, cmd); + work_data->cbk(work_data, work_data->areq, status); +} + +static void atmel_ecc_enqueue(struct atmel_ecc_work_data *work_data, + void (*cbk)(struct atmel_ecc_work_data *work_data, + void *areq, int status), + void *areq) +{ + work_data->cbk = (void *)cbk; + work_data->areq = areq; + + INIT_WORK(&work_data->work, atmel_ecc_work_handler); + schedule_work(&work_data->work); +} + +static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id) +{ + if (curve_id == ECC_CURVE_NIST_P256) + return ATMEL_ECC_NIST_P256_N_SIZE; + + return 0; +} + +/* + * A random private key is generated and stored in the device. The device + * returns the pair public key. + */ +static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, + unsigned int len) +{ + struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + struct atmel_ecc_cmd *cmd; + void *public_key; + struct ecdh params; + int ret = -ENOMEM; + + /* free the old public key, if any */ + kfree(ctx->public_key); + /* make sure you don't free the old public key twice */ + ctx->public_key = NULL; + + if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) { + dev_err(&ctx->client->dev, "crypto_ecdh_decode_key failed\n"); + return -EINVAL; + } + + ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id); + if (!ctx->n_sz || params.key_size) { + /* fallback to ecdh software implementation */ + ctx->do_fallback = true; + return crypto_kpp_set_secret(ctx->fallback, buf, len); + } + + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + /* + * The device only supports NIST P256 ECC keys. The public key size will + * always be the same. Use a macro for the key size to avoid unnecessary + * computations. + */ + public_key = kmalloc(ATMEL_ECC_PUBKEY_SIZE, GFP_KERNEL); + if (!public_key) + goto free_cmd; + + ctx->do_fallback = false; + ctx->curve_id = params.curve_id; + + atmel_ecc_init_genkey_cmd(cmd, DATA_SLOT_2); + + ret = atmel_ecc_send_receive(ctx->client, cmd); + if (ret) + goto free_public_key; + + /* save the public key */ + memcpy(public_key, &cmd->data[RSP_DATA_IDX], ATMEL_ECC_PUBKEY_SIZE); + ctx->public_key = public_key; + + kfree(cmd); + return 0; + +free_public_key: + kfree(public_key); +free_cmd: + kfree(cmd); + return ret; +} + +static int atmel_ecdh_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + size_t copied; + int ret = 0; + + if (ctx->do_fallback) { + kpp_request_set_tfm(req, ctx->fallback); + return crypto_kpp_generate_public_key(req); + } + + /* public key was saved at private key generation */ + copied = sg_copy_from_buffer(req->dst, 1, ctx->public_key, + ATMEL_ECC_PUBKEY_SIZE); + if (copied != ATMEL_ECC_PUBKEY_SIZE) + ret = -EINVAL; + + return ret; +} + +static int atmel_ecdh_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + struct atmel_ecc_work_data *work_data; + gfp_t gfp; + int ret; + + if (ctx->do_fallback) { + kpp_request_set_tfm(req, ctx->fallback); + return crypto_kpp_compute_shared_secret(req); + } + + gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : + GFP_ATOMIC; + + work_data = kmalloc(sizeof(*work_data), gfp); + if (!work_data) + return -ENOMEM; + + work_data->ctx = ctx; + + ret = atmel_ecc_init_ecdh_cmd(&work_data->cmd, req->src); + if (ret) + goto free_work_data; + + atmel_ecc_enqueue(work_data, atmel_ecdh_done, req); + + return -EINPROGRESS; + +free_work_data: + kfree(work_data); + return ret; +} + +static struct i2c_client *atmel_ecc_i2c_client_alloc(void) +{ + struct atmel_ecc_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL; + struct i2c_client *client = ERR_PTR(-ENODEV); + int min_tfm_cnt = INT_MAX; + int tfm_cnt; + + spin_lock(&driver_data.i2c_list_lock); + + if (list_empty(&driver_data.i2c_client_list)) { + spin_unlock(&driver_data.i2c_list_lock); + return ERR_PTR(-ENODEV); + } + + list_for_each_entry(i2c_priv, &driver_data.i2c_client_list, + i2c_client_list_node) { + tfm_cnt = atomic_read(&i2c_priv->tfm_count); + if (tfm_cnt < min_tfm_cnt) { + min_tfm_cnt = tfm_cnt; + min_i2c_priv = i2c_priv; + } + if (!min_tfm_cnt) + break; + } + + if (min_i2c_priv) { + atomic_inc(&min_i2c_priv->tfm_count); + client = min_i2c_priv->client; + } + + spin_unlock(&driver_data.i2c_list_lock); + + return client; +} + +static void atmel_ecc_i2c_client_free(struct i2c_client *client) +{ + struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); + + atomic_dec(&i2c_priv->tfm_count); +} + +static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm) +{ + const char *alg = kpp_alg_name(tfm); + struct crypto_kpp *fallback; + struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + + ctx->client = atmel_ecc_i2c_client_alloc(); + if (IS_ERR(ctx->client)) { + pr_err("tfm - i2c_client binding failed\n"); + return PTR_ERR(ctx->client); + } + + fallback = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + dev_err(&ctx->client->dev, "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + + crypto_kpp_set_flags(fallback, crypto_kpp_get_flags(tfm)); + + dev_info(&ctx->client->dev, "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name(crypto_kpp_tfm(fallback))); + + ctx->fallback = fallback; + + return 0; +} + +static void atmel_ecdh_exit_tfm(struct crypto_kpp *tfm) +{ + struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + + kfree(ctx->public_key); + crypto_free_kpp(ctx->fallback); + atmel_ecc_i2c_client_free(ctx->client); +} + +static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm) +{ + struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + + if (ctx->fallback) + return crypto_kpp_maxsize(ctx->fallback); + + /* + * The device only supports NIST P256 ECC keys. The public key size will + * always be the same. Use a macro for the key size to avoid unnecessary + * computations. + */ + return ATMEL_ECC_PUBKEY_SIZE; +} + +static struct kpp_alg atmel_ecdh = { + .set_secret = atmel_ecdh_set_secret, + .generate_public_key = atmel_ecdh_generate_public_key, + .compute_shared_secret = atmel_ecdh_compute_shared_secret, + .init = atmel_ecdh_init_tfm, + .exit = atmel_ecdh_exit_tfm, + .max_size = atmel_ecdh_max_size, + .base = { + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_name = "ecdh", + .cra_driver_name = "atmel-ecdh", + .cra_priority = ATMEL_ECC_PRIORITY, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct atmel_ecdh_ctx), + }, +}; + +static inline size_t atmel_ecc_wake_token_sz(u32 bus_clk_rate) +{ + u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC); + + /* return the size of the wake_token in bytes */ + return DIV_ROUND_UP(no_of_bits, 8); +} + +static int device_sanity_check(struct i2c_client *client) +{ + struct atmel_ecc_cmd *cmd; + int ret; + + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + atmel_ecc_init_read_cmd(cmd); + + ret = atmel_ecc_send_receive(client, cmd); + if (ret) + goto free_cmd; + + /* + * It is vital that the Configuration, Data and OTP zones be locked + * prior to release into the field of the system containing the device. + * Failure to lock these zones may permit modification of any secret + * keys and may lead to other security problems. + */ + if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) { + dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n"); + ret = -ENOTSUPP; + } + + /* fall through */ +free_cmd: + kfree(cmd); + return ret; +} + +static int atmel_ecc_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct atmel_ecc_i2c_client_priv *i2c_priv; + struct device *dev = &client->dev; + int ret; + u32 bus_clk_rate; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(dev, "I2C_FUNC_I2C not supported\n"); + return -ENODEV; + } + + ret = of_property_read_u32(client->adapter->dev.of_node, + "clock-frequency", &bus_clk_rate); + if (ret) { + dev_err(dev, "of: failed to read clock-frequency property\n"); + return ret; + } + + if (bus_clk_rate > 1000000L) { + dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n", + bus_clk_rate); + return -EINVAL; + } + + i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL); + if (!i2c_priv) + return -ENOMEM; + + i2c_priv->client = client; + mutex_init(&i2c_priv->lock); + + /* + * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate - + * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz + * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE. + */ + i2c_priv->wake_token_sz = atmel_ecc_wake_token_sz(bus_clk_rate); + + memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token)); + + atomic_set(&i2c_priv->tfm_count, 0); + + i2c_set_clientdata(client, i2c_priv); + + ret = device_sanity_check(client); + if (ret) + return ret; + + spin_lock(&driver_data.i2c_list_lock); + list_add_tail(&i2c_priv->i2c_client_list_node, + &driver_data.i2c_client_list); + spin_unlock(&driver_data.i2c_list_lock); + + ret = crypto_register_kpp(&atmel_ecdh); + if (ret) { + spin_lock(&driver_data.i2c_list_lock); + list_del(&i2c_priv->i2c_client_list_node); + spin_unlock(&driver_data.i2c_list_lock); + + dev_err(dev, "%s alg registration failed\n", + atmel_ecdh.base.cra_driver_name); + } else { + dev_info(dev, "atmel ecc algorithms registered in /proc/crypto\n"); + } + + return ret; +} + +static int atmel_ecc_remove(struct i2c_client *client) +{ + struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); + + /* Return EBUSY if i2c client already allocated. */ + if (atomic_read(&i2c_priv->tfm_count)) { + dev_err(&client->dev, "Device is busy\n"); + return -EBUSY; + } + + crypto_unregister_kpp(&atmel_ecdh); + + spin_lock(&driver_data.i2c_list_lock); + list_del(&i2c_priv->i2c_client_list_node); + spin_unlock(&driver_data.i2c_list_lock); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id atmel_ecc_dt_ids[] = { + { + .compatible = "atmel,atecc508a", + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, atmel_ecc_dt_ids); +#endif + +static const struct i2c_device_id atmel_ecc_id[] = { + { "atecc508a", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, atmel_ecc_id); + +static struct i2c_driver atmel_ecc_driver = { + .driver = { + .name = "atmel-ecc", + .of_match_table = of_match_ptr(atmel_ecc_dt_ids), + }, + .probe = atmel_ecc_probe, + .remove = atmel_ecc_remove, + .id_table = atmel_ecc_id, +}; + +static int __init atmel_ecc_init(void) +{ + spin_lock_init(&driver_data.i2c_list_lock); + INIT_LIST_HEAD(&driver_data.i2c_client_list); + return i2c_add_driver(&atmel_ecc_driver); +} + +static void __exit atmel_ecc_exit(void) +{ + flush_scheduled_work(); + i2c_del_driver(&atmel_ecc_driver); +} + +module_init(atmel_ecc_init); +module_exit(atmel_ecc_exit); + +MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>"); +MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h new file mode 100644 index 000000000000..25232c8abcc2 --- /dev/null +++ b/drivers/crypto/atmel-ecc.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2017, Microchip Technology Inc. + * Author: Tudor Ambarus <tudor.ambarus@microchip.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + * + */ + +#ifndef __ATMEL_ECC_H__ +#define __ATMEL_ECC_H__ + +#define ATMEL_ECC_PRIORITY 300 + +#define COMMAND 0x03 /* packet function */ +#define SLEEP_TOKEN 0x01 +#define WAKE_TOKEN_MAX_SIZE 8 + +/* Definitions of Data and Command sizes */ +#define WORD_ADDR_SIZE 1 +#define COUNT_SIZE 1 +#define CRC_SIZE 2 +#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE) + +/* size in bytes of the n prime */ +#define ATMEL_ECC_NIST_P256_N_SIZE 32 +#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE) + +#define STATUS_RSP_SIZE 4 +#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE) +#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \ + CMD_OVERHEAD_SIZE) +#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE) +#define MAX_RSP_SIZE GENKEY_RSP_SIZE + +/** + * atmel_ecc_cmd - structure used for communicating with the device. + * @word_addr: indicates the function of the packet sent to the device. This + * byte should have a value of COMMAND for normal operation. + * @count : number of bytes to be transferred to (or from) the device. + * @opcode : the command code. + * @param1 : the first parameter; always present. + * @param2 : the second parameter; always present. + * @data : optional remaining input data. Includes a 2-byte CRC. + * @rxsize : size of the data received from i2c client. + * @msecs : command execution time in milliseconds + */ +struct atmel_ecc_cmd { + u8 word_addr; + u8 count; + u8 opcode; + u8 param1; + u16 param2; + u8 data[MAX_RSP_SIZE]; + u8 msecs; + u16 rxsize; +} __packed; + +/* Status/Error codes */ +#define STATUS_SIZE 0x04 +#define STATUS_NOERR 0x00 +#define STATUS_WAKE_SUCCESSFUL 0x11 + +static const struct { + u8 value; + const char *error_text; +} error_list[] = { + { 0x01, "CheckMac or Verify miscompare" }, + { 0x03, "Parse Error" }, + { 0x05, "ECC Fault" }, + { 0x0F, "Execution Error" }, + { 0xEE, "Watchdog about to expire" }, + { 0xFF, "CRC or other communication error" }, +}; + +/* Definitions for eeprom organization */ +#define CONFIG_ZONE 0 + +/* Definitions for Indexes common to all commands */ +#define RSP_DATA_IDX 1 /* buffer index of data in response */ +#define DATA_SLOT_2 2 /* used for ECDH private key */ + +/* Definitions for the device lock state */ +#define DEVICE_LOCK_ADDR 0x15 +#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2) +#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3) + +/* + * Wake High delay to data communication (microseconds). SDA should be stable + * high for this entire duration. + */ +#define TWHI_MIN 1500 +#define TWHI_MAX 1550 + +/* Wake Low duration */ +#define TWLO_USEC 60 + +/* Command execution time (milliseconds) */ +#define MAX_EXEC_TIME_ECDH 58 +#define MAX_EXEC_TIME_GENKEY 115 +#define MAX_EXEC_TIME_READ 1 + +/* Command opcode */ +#define OPCODE_ECDH 0x43 +#define OPCODE_GENKEY 0x40 +#define OPCODE_READ 0x02 + +/* Definitions for the READ Command */ +#define READ_COUNT 7 + +/* Definitions for the GenKey Command */ +#define GENKEY_COUNT 7 +#define GENKEY_MODE_PRIVATE 0x04 + +/* Definitions for the ECDH Command */ +#define ECDH_COUNT 71 +#define ECDH_PREFIX_MODE 0x00 + +#endif /* __ATMEL_ECC_H__ */ diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index dad4e5bad827..3e2f41b3eaf3 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -2883,7 +2883,7 @@ sha_dd_err: static int atmel_sha_remove(struct platform_device *pdev) { - static struct atmel_sha_dev *sha_dd; + struct atmel_sha_dev *sha_dd; sha_dd = platform_get_drvdata(pdev); if (!sha_dd) diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index b25f1b3c981f..f4b335dda568 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -1487,7 +1487,7 @@ tdes_dd_err: static int atmel_tdes_remove(struct platform_device *pdev) { - static struct atmel_tdes_dev *tdes_dd; + struct atmel_tdes_dev *tdes_dd; tdes_dd = platform_get_drvdata(pdev); if (!tdes_dd) diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 9cfd36c1bcb6..8685c7e4debd 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -90,8 +90,6 @@ static int aead_pri = 150; module_param(aead_pri, int, 0644); MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos"); -#define MAX_SPUS 16 - /* A type 3 BCM header, expected to precede the SPU header for SPU-M. * Bits 3 and 4 in the first byte encode the channel number (the dma ringset). * 0x60 - ring 0 @@ -120,7 +118,7 @@ static u8 select_channel(void) { u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan); - return chan_idx % iproc_priv.spu.num_spu; + return chan_idx % iproc_priv.spu.num_chan; } /** @@ -4528,8 +4526,13 @@ static void spu_functions_register(struct device *dev, */ static int spu_mb_init(struct device *dev) { - struct mbox_client *mcl = &iproc_priv.mcl[iproc_priv.spu.num_spu]; - int err; + struct mbox_client *mcl = &iproc_priv.mcl; + int err, i; + + iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan, + sizeof(struct mbox_chan *), GFP_KERNEL); + if (!iproc_priv.mbox) + return -ENOMEM; mcl->dev = dev; mcl->tx_block = false; @@ -4538,25 +4541,33 @@ static int spu_mb_init(struct device *dev) mcl->rx_callback = spu_rx_callback; mcl->tx_done = NULL; - iproc_priv.mbox[iproc_priv.spu.num_spu] = - mbox_request_channel(mcl, 0); - if (IS_ERR(iproc_priv.mbox[iproc_priv.spu.num_spu])) { - err = (int)PTR_ERR(iproc_priv.mbox[iproc_priv.spu.num_spu]); - dev_err(dev, - "Mbox channel %d request failed with err %d", - iproc_priv.spu.num_spu, err); - iproc_priv.mbox[iproc_priv.spu.num_spu] = NULL; - return err; + for (i = 0; i < iproc_priv.spu.num_chan; i++) { + iproc_priv.mbox[i] = mbox_request_channel(mcl, i); + if (IS_ERR(iproc_priv.mbox[i])) { + err = (int)PTR_ERR(iproc_priv.mbox[i]); + dev_err(dev, + "Mbox channel %d request failed with err %d", + i, err); + iproc_priv.mbox[i] = NULL; + goto free_channels; + } } return 0; +free_channels: + for (i = 0; i < iproc_priv.spu.num_chan; i++) { + if (iproc_priv.mbox[i]) + mbox_free_channel(iproc_priv.mbox[i]); + } + + return err; } static void spu_mb_release(struct platform_device *pdev) { int i; - for (i = 0; i < iproc_priv.spu.num_spu; i++) + for (i = 0; i < iproc_priv.spu.num_chan; i++) mbox_free_channel(iproc_priv.mbox[i]); } @@ -4567,7 +4578,7 @@ static void spu_counters_init(void) atomic_set(&iproc_priv.session_count, 0); atomic_set(&iproc_priv.stream_count, 0); - atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_spu); + atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan); atomic64_set(&iproc_priv.bytes_in, 0); atomic64_set(&iproc_priv.bytes_out, 0); for (i = 0; i < SPU_OP_NUM; i++) { @@ -4809,47 +4820,38 @@ static int spu_dt_read(struct platform_device *pdev) struct resource *spu_ctrl_regs; const struct of_device_id *match; const struct spu_type_subtype *matched_spu_type; - void __iomem *spu_reg_vbase[MAX_SPUS]; - int err; + struct device_node *dn = pdev->dev.of_node; + int err, i; - match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev); - matched_spu_type = match->data; + /* Count number of mailbox channels */ + spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells"); - if (iproc_priv.spu.num_spu > 1) { - /* If this is 2nd or later SPU, make sure it's same type */ - if ((spu->spu_type != matched_spu_type->type) || - (spu->spu_subtype != matched_spu_type->subtype)) { - err = -EINVAL; - dev_err(&pdev->dev, "Multiple SPU types not allowed"); - return err; - } - } else { - /* Record type of first SPU */ - spu->spu_type = matched_spu_type->type; - spu->spu_subtype = matched_spu_type->subtype; + match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev); + if (!match) { + dev_err(&pdev->dev, "Failed to match device\n"); + return -ENODEV; } - /* Get and map SPU registers */ - spu_ctrl_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!spu_ctrl_regs) { - err = -EINVAL; - dev_err(&pdev->dev, "Invalid/missing registers for SPU\n"); - return err; - } + matched_spu_type = match->data; - spu_reg_vbase[iproc_priv.spu.num_spu] = - devm_ioremap_resource(dev, spu_ctrl_regs); - if (IS_ERR(spu_reg_vbase[iproc_priv.spu.num_spu])) { - err = PTR_ERR(spu_reg_vbase[iproc_priv.spu.num_spu]); - dev_err(&pdev->dev, "Failed to map registers: %d\n", - err); - spu_reg_vbase[iproc_priv.spu.num_spu] = NULL; - return err; - } + spu->spu_type = matched_spu_type->type; + spu->spu_subtype = matched_spu_type->subtype; - dev_dbg(dev, "SPU %d detected.", iproc_priv.spu.num_spu); + i = 0; + for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs = + platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) { - spu->reg_vbase[iproc_priv.spu.num_spu] = spu_reg_vbase; + spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs); + if (IS_ERR(spu->reg_vbase[i])) { + err = PTR_ERR(spu->reg_vbase[i]); + dev_err(&pdev->dev, "Failed to map registers: %d\n", + err); + spu->reg_vbase[i] = NULL; + return err; + } + } + spu->num_spu = i; + dev_dbg(dev, "Device has %d SPUs", spu->num_spu); return 0; } @@ -4860,8 +4862,8 @@ int bcm_spu_probe(struct platform_device *pdev) struct spu_hw *spu = &iproc_priv.spu; int err = 0; - iproc_priv.pdev[iproc_priv.spu.num_spu] = pdev; - platform_set_drvdata(iproc_priv.pdev[iproc_priv.spu.num_spu], + iproc_priv.pdev = pdev; + platform_set_drvdata(iproc_priv.pdev, &iproc_priv); err = spu_dt_read(pdev); @@ -4872,12 +4874,6 @@ int bcm_spu_probe(struct platform_device *pdev) if (err < 0) goto failure; - iproc_priv.spu.num_spu++; - - /* If already initialized, we've just added another SPU and are done */ - if (iproc_priv.inited) - return 0; - if (spu->spu_type == SPU_TYPE_SPUM) iproc_priv.bcm_hdr_len = 8; else if (spu->spu_type == SPU_TYPE_SPU2) @@ -4893,8 +4889,6 @@ int bcm_spu_probe(struct platform_device *pdev) if (err < 0) goto fail_reg; - iproc_priv.inited = true; - return 0; fail_reg: diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h index 51dca529ce8f..57a55eb2a255 100644 --- a/drivers/crypto/bcm/cipher.h +++ b/drivers/crypto/bcm/cipher.h @@ -427,10 +427,13 @@ struct spu_hw { /* The number of SPUs on this platform */ u32 num_spu; + + /* The number of SPU channels on this platform */ + u32 num_chan; }; struct device_private { - struct platform_device *pdev[MAX_SPUS]; + struct platform_device *pdev; struct spu_hw spu; @@ -470,12 +473,10 @@ struct device_private { /* Number of ICV check failures for AEAD messages */ atomic_t bad_icv; - struct mbox_client mcl[MAX_SPUS]; - /* Array of mailbox channel pointers, one for each channel */ - struct mbox_chan *mbox[MAX_SPUS]; + struct mbox_client mcl; - /* Driver initialized */ - bool inited; + /* Array of mailbox channel pointers, one for each channel */ + struct mbox_chan **mbox; }; extern struct device_private iproc_priv; diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 0488b7f81dcf..54f3b375a453 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -81,40 +81,6 @@ #define debug(format, arg...) #endif -#ifdef DEBUG -#include <linux/highmem.h> - -static void dbg_dump_sg(const char *level, const char *prefix_str, - int prefix_type, int rowsize, int groupsize, - struct scatterlist *sg, size_t tlen, bool ascii) -{ - struct scatterlist *it; - void *it_page; - size_t len; - void *buf; - - for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) { - /* - * make sure the scatterlist's page - * has a valid virtual memory mapping - */ - it_page = kmap_atomic(sg_page(it)); - if (unlikely(!it_page)) { - printk(KERN_ERR "dbg_dump_sg: kmap failed\n"); - return; - } - - buf = it_page + it->offset; - len = min_t(size_t, tlen, it->length); - print_hex_dump(level, prefix_str, prefix_type, rowsize, - groupsize, buf, len, ascii); - tlen -= len; - - kunmap_atomic(it_page); - } -} -#endif - static struct list_head alg_list; struct caam_alg_entry { @@ -898,10 +864,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, edesc->src_nents > 1 ? 100 : ivsize, 1); - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->dst, - edesc->dst_nents > 1 ? 100 : req->nbytes, 1); #endif + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, + edesc->dst_nents > 1 ? 100 : req->nbytes, 1); ablkcipher_unmap(jrdev, edesc, req); @@ -937,10 +903,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, ivsize, 1); - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->dst, - edesc->dst_nents > 1 ? 100 : req->nbytes, 1); #endif + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, + edesc->dst_nents > 1 ? 100 : req->nbytes, 1); ablkcipher_unmap(jrdev, edesc, req); @@ -1107,10 +1073,10 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, ivsize, 1); pr_err("asked=%d, nbytes%d\n", (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes); - dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->src, - edesc->src_nents > 1 ? 100 : req->nbytes, 1); #endif + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->src, + edesc->src_nents > 1 ? 100 : req->nbytes, 1); len = desc_len(sh_desc); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); @@ -1164,10 +1130,10 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, ivsize, 1); - dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->src, - edesc->src_nents > 1 ? 100 : req->nbytes, 1); #endif + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->src, + edesc->src_nents > 1 ? 100 : req->nbytes, 1); len = desc_len(sh_desc); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); @@ -1449,11 +1415,9 @@ static int aead_decrypt(struct aead_request *req) u32 *desc; int ret = 0; -#ifdef DEBUG - dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->src, - req->assoclen + req->cryptlen, 1); -#endif + caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->src, + req->assoclen + req->cryptlen, 1); /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index 6f9c7ec0e339..530c14ee32de 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c @@ -599,7 +599,7 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, /* skip key loading if they are loaded due to sharing */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | - JUMP_COND_SHRD | JUMP_COND_SELF); + JUMP_COND_SHRD); if (cdata->key_inline) append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); @@ -688,8 +688,7 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, /* skip key loading if they are loaded due to sharing */ key_jump_cmd = append_jump(desc, JUMP_JSL | - JUMP_TEST_ALL | JUMP_COND_SHRD | - JUMP_COND_SELF); + JUMP_TEST_ALL | JUMP_COND_SHRD); if (cdata->key_inline) append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 78c4c0485c58..2eefc4a26bc2 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -12,7 +12,6 @@ #include "intern.h" #include "desc_constr.h" #include "error.h" -#include "sg_sw_sec4.h" #include "sg_sw_qm.h" #include "key_gen.h" #include "qi.h" @@ -399,6 +398,7 @@ badkey: * @iv_dma: dma address of iv for checking continuity and link table * @qm_sg_bytes: length of dma mapped h/w link table * @qm_sg_dma: bus physical mapped address of h/w link table + * @assoclen: associated data length, in CAAM endianness * @assoclen_dma: bus physical mapped address of req->assoclen * @drv_req: driver-specific request structure * @sgt: the h/w link table @@ -409,8 +409,12 @@ struct aead_edesc { dma_addr_t iv_dma; int qm_sg_bytes; dma_addr_t qm_sg_dma; + unsigned int assoclen; dma_addr_t assoclen_dma; struct caam_drv_req drv_req; +#define CAAM_QI_MAX_AEAD_SG \ + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \ + sizeof(struct qm_sg_entry)) struct qm_sg_entry sgt[0]; }; @@ -431,6 +435,9 @@ struct ablkcipher_edesc { int qm_sg_bytes; dma_addr_t qm_sg_dma; struct caam_drv_req drv_req; +#define CAAM_QI_MAX_ABLKCIPHER_SG \ + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \ + sizeof(struct qm_sg_entry)) struct qm_sg_entry sgt[0]; }; @@ -660,6 +667,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, */ qm_sg_ents = 1 + !!ivsize + mapped_src_nents + (mapped_dst_nents > 1 ? mapped_dst_nents : 0); + if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) { + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", + qm_sg_ents, CAAM_QI_MAX_AEAD_SG); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, + iv_dma, ivsize, op_type, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } sg_table = &edesc->sgt[0]; qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); @@ -670,7 +685,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, edesc->drv_req.cbk = aead_done; edesc->drv_req.drv_ctx = drv_ctx; - edesc->assoclen_dma = dma_map_single(qidev, &req->assoclen, 4, + edesc->assoclen = cpu_to_caam32(req->assoclen); + edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, DMA_TO_DEVICE); if (dma_mapping_error(qidev, edesc->assoclen_dma)) { dev_err(qidev, "unable to map assoclen\n"); @@ -776,9 +792,9 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *qidev = caam_ctx->qidev; -#ifdef DEBUG int ivsize = crypto_ablkcipher_ivsize(ablkcipher); +#ifdef DEBUG dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); #endif @@ -791,14 +807,21 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, edesc->src_nents > 1 ? 100 : ivsize, 1); - dbg_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->dst, - edesc->dst_nents > 1 ? 100 : req->nbytes, 1); + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, + edesc->dst_nents > 1 ? 100 : req->nbytes, 1); #endif ablkcipher_unmap(qidev, edesc, req); qi_cache_free(edesc); + /* + * The crypto API expects us to set the IV (req->info) to the last + * ciphertext block. This is used e.g. by the CTS mode. + */ + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, + ivsize, 0); + ablkcipher_request_complete(req, status); } @@ -880,6 +903,15 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request } dst_sg_idx = qm_sg_ents; + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, + iv_dma, ivsize, op_type, 0, 0); + return ERR_PTR(-ENOMEM); + } + /* allocate space for base edesc and link tables */ edesc = qi_cache_alloc(GFP_DMA | flags); if (unlikely(!edesc)) { @@ -892,7 +924,6 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->iv_dma = iv_dma; - qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; sg_table = &edesc->sgt[0]; edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); edesc->drv_req.app_ctx = req; @@ -1026,6 +1057,14 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( qm_sg_ents += 1 + mapped_dst_nents; } + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, + iv_dma, ivsize, GIVENCRYPT, 0, 0); + return ERR_PTR(-ENOMEM); + } + /* allocate space for base edesc and link tables */ edesc = qi_cache_alloc(GFP_DMA | flags); if (!edesc) { @@ -1968,7 +2007,7 @@ static struct caam_aead_alg driver_aeads[] = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" - "hmac-sha256-cbc-desi-" + "hmac-sha256-cbc-des-" "caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 910ec61cae09..698580b60b2f 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -791,8 +791,8 @@ static int ahash_update_ctx(struct ahash_request *req) to_hash - *buflen, *next_buflen, 0); } else { - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= - cpu_to_caam32(SEC4_SG_LEN_FIN); + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - + 1); } desc = edesc->hw_desc; @@ -882,8 +882,7 @@ static int ahash_final_ctx(struct ahash_request *req) if (ret) goto unmap_ctx; - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= - cpu_to_caam32(SEC4_SG_LEN_FIN); + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 41398da3edf4..fde07d4ff019 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -285,11 +285,7 @@ static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) if (err) return err; - err = caam_init_buf(ctx, 1); - if (err) - return err; - - return 0; + return caam_init_buf(ctx, 1); } static struct hwrng caam_rng = { diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index dd353e342c12..dacb53fb690e 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -17,6 +17,8 @@ bool caam_little_end; EXPORT_SYMBOL(caam_little_end); +bool caam_dpaa2; +EXPORT_SYMBOL(caam_dpaa2); #ifdef CONFIG_CAAM_QI #include "qi.h" @@ -319,8 +321,11 @@ static int caam_remove(struct platform_device *pdev) caam_qi_shutdown(ctrlpriv->qidev); #endif - /* De-initialize RNG state handles initialized by this driver. */ - if (ctrlpriv->rng4_sh_init) + /* + * De-initialize RNG state handles initialized by this driver. + * In case of DPAA 2.x, RNG is managed by MC firmware. + */ + if (!caam_dpaa2 && ctrlpriv->rng4_sh_init) deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init); /* Shut down debug views */ @@ -444,7 +449,6 @@ static int caam_probe(struct platform_device *pdev) dev = &pdev->dev; dev_set_drvdata(dev, ctrlpriv); - ctrlpriv->pdev = pdev; nprop = pdev->dev.of_node; /* Enable clocking */ @@ -553,12 +557,17 @@ static int caam_probe(struct platform_device *pdev) /* * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, - * long pointers in master configuration register + * long pointers in master configuration register. + * In case of DPAA 2.x, Management Complex firmware performs + * the configuration. */ - clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR, - MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF | - MCFGR_WDENABLE | MCFGR_LARGE_BURST | - (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); + caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2); + if (!caam_dpaa2) + clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR, + MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF | + MCFGR_WDENABLE | MCFGR_LARGE_BURST | + (sizeof(dma_addr_t) == sizeof(u64) ? + MCFGR_LONG_PTR : 0)); /* * Read the Compile Time paramters and SCFGR to determine @@ -587,7 +596,9 @@ static int caam_probe(struct platform_device *pdev) JRSTART_JR3_START); if (sizeof(dma_addr_t) == sizeof(u64)) { - if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) + if (caam_dpaa2) + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49)); + else if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); else ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)); @@ -630,11 +641,9 @@ static int caam_probe(struct platform_device *pdev) ring++; } - /* Check to see if QI present. If so, enable */ - ctrlpriv->qi_present = - !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) & - CTPR_MS_QI_MASK); - if (ctrlpriv->qi_present) { + /* Check to see if (DPAA 1.x) QI present. If so, enable */ + ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); + if (ctrlpriv->qi_present && !caam_dpaa2) { ctrlpriv->qi = (struct caam_queue_if __iomem __force *) ((__force uint8_t *)ctrl + BLOCK_OFFSET * QI_BLOCK_NUMBER @@ -662,8 +671,10 @@ static int caam_probe(struct platform_device *pdev) /* * If SEC has RNG version >= 4 and RNG state handle has not been * already instantiated, do RNG instantiation + * In case of DPAA 2.x, RNG is managed by MC firmware. */ - if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { + if (!caam_dpaa2 && + (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { ctrlpriv->rng4_sh_init = rd_reg32(&ctrl->r4tst[0].rdsta); /* @@ -731,63 +742,43 @@ static int caam_probe(struct platform_device *pdev) /* Report "alive" for developer to see */ dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, caam_get_era()); - dev_info(dev, "job rings = %d, qi = %d\n", - ctrlpriv->total_jobrs, ctrlpriv->qi_present); + dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n", + ctrlpriv->total_jobrs, ctrlpriv->qi_present, + caam_dpaa2 ? "yes" : "no"); #ifdef CONFIG_DEBUG_FS - - ctrlpriv->ctl_rq_dequeued = - debugfs_create_file("rq_dequeued", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->req_dequeued, - &caam_fops_u64_ro); - ctrlpriv->ctl_ob_enc_req = - debugfs_create_file("ob_rq_encrypted", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ob_enc_req, - &caam_fops_u64_ro); - ctrlpriv->ctl_ib_dec_req = - debugfs_create_file("ib_rq_decrypted", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ib_dec_req, - &caam_fops_u64_ro); - ctrlpriv->ctl_ob_enc_bytes = - debugfs_create_file("ob_bytes_encrypted", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ob_enc_bytes, - &caam_fops_u64_ro); - ctrlpriv->ctl_ob_prot_bytes = - debugfs_create_file("ob_bytes_protected", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ob_prot_bytes, - &caam_fops_u64_ro); - ctrlpriv->ctl_ib_dec_bytes = - debugfs_create_file("ib_bytes_decrypted", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ib_dec_bytes, - &caam_fops_u64_ro); - ctrlpriv->ctl_ib_valid_bytes = - debugfs_create_file("ib_bytes_validated", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ib_valid_bytes, - &caam_fops_u64_ro); + debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->req_dequeued, + &caam_fops_u64_ro); + debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ob_enc_req, + &caam_fops_u64_ro); + debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ib_dec_req, + &caam_fops_u64_ro); + debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ob_enc_bytes, + &caam_fops_u64_ro); + debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ob_prot_bytes, + &caam_fops_u64_ro); + debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ib_dec_bytes, + &caam_fops_u64_ro); + debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ib_valid_bytes, + &caam_fops_u64_ro); /* Controller level - global status values */ - ctrlpriv->ctl_faultaddr = - debugfs_create_file("fault_addr", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->faultaddr, - &caam_fops_u32_ro); - ctrlpriv->ctl_faultdetail = - debugfs_create_file("fault_detail", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->faultdetail, - &caam_fops_u32_ro); - ctrlpriv->ctl_faultstatus = - debugfs_create_file("fault_status", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->status, - &caam_fops_u32_ro); + debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->faultaddr, + &caam_fops_u32_ro); + debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->faultdetail, + &caam_fops_u32_ro); + debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->status, + &caam_fops_u32_ro); /* Internal covering keys (useful in non-secure mode only) */ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0]; diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h index cac5402a46eb..7e7bf68c9ef5 100644 --- a/drivers/crypto/caam/ctrl.h +++ b/drivers/crypto/caam/ctrl.h @@ -10,4 +10,6 @@ /* Prototypes for backend-level services exposed to APIs */ int caam_get_era(void); +extern bool caam_dpaa2; + #endif /* CTRL_H */ diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 6f44ccb55c63..3d639f3b45aa 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c @@ -9,6 +9,46 @@ #include "desc.h" #include "error.h" +#ifdef DEBUG +#include <linux/highmem.h> + +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, + int rowsize, int groupsize, struct scatterlist *sg, + size_t tlen, bool ascii) +{ + struct scatterlist *it; + void *it_page; + size_t len; + void *buf; + + for (it = sg; it && tlen > 0 ; it = sg_next(sg)) { + /* + * make sure the scatterlist's page + * has a valid virtual memory mapping + */ + it_page = kmap_atomic(sg_page(it)); + if (unlikely(!it_page)) { + pr_err("caam_dump_sg: kmap failed\n"); + return; + } + + buf = it_page + it->offset; + len = min_t(size_t, tlen, it->length); + print_hex_dump(level, prefix_str, prefix_type, rowsize, + groupsize, buf, len, ascii); + tlen -= len; + + kunmap_atomic(it_page); + } +} +#else +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, + int rowsize, int groupsize, struct scatterlist *sg, + size_t tlen, bool ascii) +{} +#endif /* DEBUG */ +EXPORT_SYMBOL(caam_dump_sg); + static const struct { u8 value; const char *error_text; diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h index b6350b0d9153..250e1a21c473 100644 --- a/drivers/crypto/caam/error.h +++ b/drivers/crypto/caam/error.h @@ -8,4 +8,8 @@ #define CAAM_ERROR_H #define CAAM_ERROR_STR_MAX 302 void caam_jr_strstatus(struct device *jrdev, u32 status); + +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, + int rowsize, int groupsize, struct scatterlist *sg, + size_t tlen, bool ascii); #endif /* CAAM_ERROR_H */ diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 85b6c5835b8f..a52361258d3a 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -64,12 +64,9 @@ struct caam_drv_private_jr { * Driver-private storage for a single CAAM block instance */ struct caam_drv_private { - - struct device *dev; #ifdef CONFIG_CAAM_QI struct device *qidev; #endif - struct platform_device *pdev; /* Physical-presence section */ struct caam_ctrl __iomem *ctrl; /* controller region */ @@ -105,16 +102,8 @@ struct caam_drv_private { #ifdef CONFIG_DEBUG_FS struct dentry *dfs_root; struct dentry *ctl; /* controller dir */ - struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req; - struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes; - struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes; - struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus; - struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap; struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk; -#ifdef CONFIG_CAAM_QI - struct dentry *qi_congested; -#endif #endif }; diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 1ccfb317d468..d258953ff488 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -9,6 +9,7 @@ #include <linux/of_address.h> #include "compat.h" +#include "ctrl.h" #include "regs.h" #include "jr.h" #include "desc.h" @@ -499,7 +500,11 @@ static int caam_jr_probe(struct platform_device *pdev) jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl; if (sizeof(dma_addr_t) == sizeof(u64)) { - if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) + if (caam_dpaa2) + error = dma_set_mask_and_coherent(jrdev, + DMA_BIT_MASK(49)); + else if (of_device_is_compatible(nprop, + "fsl,sec-v5.0-job-ring")) error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40)); else diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 1990ed460c46..e4cf00014233 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -24,9 +24,6 @@ */ #define MAX_RSP_FQ_BACKLOG_PER_CPU 256 -/* Length of a single buffer in the QI driver memory cache */ -#define CAAM_QI_MEMCACHE_SIZE 512 - #define CAAM_QI_ENQUEUE_RETRIES 10000 #define CAAM_NAPI_WEIGHT 63 @@ -55,6 +52,7 @@ struct caam_qi_pcpu_priv { } ____cacheline_aligned; static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv); +static DEFINE_PER_CPU(int, last_cpu); /* * caam_qi_priv - CAAM QI backend private params @@ -203,8 +201,8 @@ static struct qman_fq *create_caam_req_fq(struct device *qidev, goto init_req_fq_fail; } - dev_info(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid, - smp_processor_id()); + dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid, + smp_processor_id()); return req_fq; init_req_fq_fail: @@ -277,6 +275,7 @@ empty_fq: dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid); qman_destroy_fq(fq); + kfree(fq); return ret; } @@ -342,8 +341,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) drv_ctx->req_fq = old_fq; if (kill_fq(qidev, new_fq)) - dev_warn(qidev, "New CAAM FQ: %u kill failed\n", - new_fq->fqid); + dev_warn(qidev, "New CAAM FQ kill failed\n"); return ret; } @@ -373,10 +371,9 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) drv_ctx->req_fq = old_fq; if (kill_fq(qidev, new_fq)) - dev_warn(qidev, "New CAAM FQ: %u kill failed\n", - new_fq->fqid); + dev_warn(qidev, "New CAAM FQ kill failed\n"); } else if (kill_fq(qidev, old_fq)) { - dev_warn(qidev, "Old CAAM FQ: %u kill failed\n", old_fq->fqid); + dev_warn(qidev, "Old CAAM FQ kill failed\n"); } return 0; @@ -392,7 +389,6 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, dma_addr_t hwdesc; struct caam_drv_ctx *drv_ctx; const cpumask_t *cpus = qman_affine_cpus(); - static DEFINE_PER_CPU(int, last_cpu); num_words = desc_len(sh_desc); if (num_words > MAX_SDLEN) { @@ -511,7 +507,6 @@ int caam_qi_shutdown(struct device *qidev) if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i))) dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); - kfree(per_cpu(pcpu_qipriv.rsp_fq, i)); } /* @@ -646,7 +641,7 @@ static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu) per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq; - dev_info(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu); + dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu); return 0; } @@ -679,7 +674,7 @@ static int init_cgr(struct device *qidev) return ret; } - dev_info(qidev, "Congestion threshold set to %llu\n", val); + dev_dbg(qidev, "Congestion threshold set to %llu\n", val); return 0; } @@ -737,6 +732,7 @@ int caam_qi_init(struct platform_device *caam_pdev) qi_pdev = platform_device_register_full(&qi_pdev_info); if (IS_ERR(qi_pdev)) return PTR_ERR(qi_pdev); + set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev)); ctrlpriv = dev_get_drvdata(ctrldev); qidev = &qi_pdev->dev; @@ -795,10 +791,8 @@ int caam_qi_init(struct platform_device *caam_pdev) /* Done with the CGRs; restore the cpus allowed mask */ set_cpus_allowed_ptr(current, &old_cpumask); #ifdef CONFIG_DEBUG_FS - ctrlpriv->qi_congested = debugfs_create_file("qi_congested", 0444, - ctrlpriv->ctl, - ×_congested, - &caam_fops_u64_ro); + debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, + ×_congested, &caam_fops_u64_ro); #endif dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); return 0; diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h index 33b0433f5f22..ecb21f207637 100644 --- a/drivers/crypto/caam/qi.h +++ b/drivers/crypto/caam/qi.h @@ -39,6 +39,9 @@ */ #define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ) +/* Length of a single buffer in the QI driver memory cache */ +#define CAAM_QI_MEMCACHE_SIZE 768 + extern bool caam_congested __read_mostly; /* diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 84d2f838a063..2b5efff9ec3c 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -293,6 +293,7 @@ struct caam_perfmon { u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/ #define CTPR_MS_QI_SHIFT 25 #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) +#define CTPR_MS_DPAA2 BIT(13) #define CTPR_MS_VIRT_EN_INCL 0x00000001 #define CTPR_MS_VIRT_EN_POR 0x00000002 #define CTPR_MS_PG_SZ_MASK 0x10 diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h new file mode 100644 index 000000000000..31b440757146 --- /dev/null +++ b/drivers/crypto/caam/sg_sw_qm2.h @@ -0,0 +1,81 @@ +/* + * Copyright 2015-2016 Freescale Semiconductor, Inc. + * Copyright 2017 NXP + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the names of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SG_SW_QM2_H_ +#define _SG_SW_QM2_H_ + +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h" + +static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr, + dma_addr_t dma, u32 len, u16 offset) +{ + dpaa2_sg_set_addr(qm_sg_ptr, dma); + dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single); + dpaa2_sg_set_final(qm_sg_ptr, false); + dpaa2_sg_set_len(qm_sg_ptr, len); + dpaa2_sg_set_bpid(qm_sg_ptr, 0); + dpaa2_sg_set_offset(qm_sg_ptr, offset); +} + +/* + * convert scatterlist to h/w link table format + * but does not have final bit; instead, returns last entry + */ +static inline struct dpaa2_sg_entry * +sg_to_qm_sg(struct scatterlist *sg, int sg_count, + struct dpaa2_sg_entry *qm_sg_ptr, u16 offset) +{ + while (sg_count && sg) { + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), + sg_dma_len(sg), offset); + qm_sg_ptr++; + sg = sg_next(sg); + sg_count--; + } + return qm_sg_ptr - 1; +} + +/* + * convert scatterlist to h/w link table format + * scatterlist must have been previously dma mapped + */ +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, + struct dpaa2_sg_entry *qm_sg_ptr, + u16 offset) +{ + qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); + dpaa2_sg_set_final(qm_sg_ptr, true); +} + +#endif /* _SG_SW_QM2_H_ */ diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index c6adad09c972..936b1b630058 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h @@ -5,7 +5,13 @@ * */ +#ifndef _SG_SW_SEC4_H_ +#define _SG_SW_SEC4_H_ + +#include "ctrl.h" #include "regs.h" +#include "sg_sw_qm2.h" +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h" struct sec4_sg_entry { u64 ptr; @@ -19,9 +25,15 @@ struct sec4_sg_entry { static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, dma_addr_t dma, u32 len, u16 offset) { - sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma); - sec4_sg_ptr->len = cpu_to_caam32(len); - sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK); + if (caam_dpaa2) { + dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len, + offset); + } else { + sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma); + sec4_sg_ptr->len = cpu_to_caam32(len); + sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & + SEC4_SG_OFFSET_MASK); + } #ifdef DEBUG print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ", DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr, @@ -47,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count, return sec4_sg_ptr - 1; } +static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr) +{ + if (caam_dpaa2) + dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true); + else + sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); +} + /* * convert scatterlist to h/w link table format * scatterlist must have been previously dma mapped @@ -56,20 +76,7 @@ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, u16 offset) { sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); - sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); + sg_to_sec4_set_last(sec4_sg_ptr); } -static inline struct sec4_sg_entry *sg_to_sec4_sg_len( - struct scatterlist *sg, unsigned int total, - struct sec4_sg_entry *sec4_sg_ptr) -{ - do { - unsigned int len = min(sg_dma_len(sg), total); - - dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0); - sec4_sg_ptr++; - sg = sg_next(sg); - total -= len; - } while (total); - return sec4_sg_ptr - 1; -} +#endif /* _SG_SW_SEC4_H_ */ diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 2238f77aa248..6d626606b9c5 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -1,25 +1,33 @@ config CRYPTO_DEV_CCP_DD - tristate "Cryptographic Coprocessor device driver" - depends on CRYPTO_DEV_CCP + tristate "Secure Processor device driver" default m + help + Provides AMD Secure Processor device driver. + If you choose 'M' here, this module will be called ccp. + +config CRYPTO_DEV_SP_CCP + bool "Cryptographic Coprocessor device" + default y + depends on CRYPTO_DEV_CCP_DD select HW_RANDOM select DMA_ENGINE select DMADEVICES select CRYPTO_SHA1 select CRYPTO_SHA256 help - Provides the interface to use the AMD Cryptographic Coprocessor - which can be used to offload encryption operations such as SHA, - AES and more. If you choose 'M' here, this module will be called - ccp. + Provides the support for AMD Cryptographic Coprocessor (CCP) device + which can be used to offload encryption operations such as SHA, AES + and more. config CRYPTO_DEV_CCP_CRYPTO tristate "Encryption and hashing offload support" - depends on CRYPTO_DEV_CCP_DD default m + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_SP_CCP select CRYPTO_HASH select CRYPTO_BLKCIPHER select CRYPTO_AUTHENC + select CRYPTO_RSA help Support for using the cryptographic API with the AMD Cryptographic Coprocessor. This module supports offload of SHA and AES algorithms. diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 59493fd3a751..57f8debfcfb3 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -1,12 +1,12 @@ obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o -ccp-objs := ccp-dev.o \ +ccp-objs := sp-dev.o sp-platform.o +ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \ ccp-ops.o \ ccp-dev-v3.o \ ccp-dev-v5.o \ - ccp-platform.o \ ccp-dmaengine.o \ ccp-debugfs.o -ccp-$(CONFIG_PCI) += ccp-pci.o +ccp-$(CONFIG_PCI) += sp-pci.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ @@ -15,4 +15,5 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-aes-xts.o \ ccp-crypto-aes-galois.o \ ccp-crypto-des3.o \ + ccp-crypto-rsa.o \ ccp-crypto-sha.o diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c index 38ee6f348ea9..52313524a4dd 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) AES GCM crypto API support * - * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. * * Author: Gary R Hook <gary.hook@amd.com> * diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 58a4244b4752..94b5bcf5b628 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c @@ -1,8 +1,9 @@ /* * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * + * Author: Gary R Hook <gary.hook@amd.com> * Author: Tom Lendacky <thomas.lendacky@amd.com> * * This program is free software; you can redistribute it and/or modify @@ -15,6 +16,7 @@ #include <linux/delay.h> #include <linux/scatterlist.h> #include <crypto/aes.h> +#include <crypto/xts.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> @@ -37,46 +39,26 @@ struct ccp_unit_size_map { u32 value; }; -static struct ccp_unit_size_map unit_size_map[] = { +static struct ccp_unit_size_map xts_unit_sizes[] = { { - .size = 4096, - .value = CCP_XTS_AES_UNIT_SIZE_4096, - }, - { - .size = 2048, - .value = CCP_XTS_AES_UNIT_SIZE_2048, - }, - { - .size = 1024, - .value = CCP_XTS_AES_UNIT_SIZE_1024, + .size = 16, + .value = CCP_XTS_AES_UNIT_SIZE_16, }, { - .size = 512, + .size = 512, .value = CCP_XTS_AES_UNIT_SIZE_512, }, { - .size = 256, - .value = CCP_XTS_AES_UNIT_SIZE__LAST, - }, - { - .size = 128, - .value = CCP_XTS_AES_UNIT_SIZE__LAST, - }, - { - .size = 64, - .value = CCP_XTS_AES_UNIT_SIZE__LAST, - }, - { - .size = 32, - .value = CCP_XTS_AES_UNIT_SIZE__LAST, + .size = 1024, + .value = CCP_XTS_AES_UNIT_SIZE_1024, }, { - .size = 16, - .value = CCP_XTS_AES_UNIT_SIZE_16, + .size = 2048, + .value = CCP_XTS_AES_UNIT_SIZE_2048, }, { - .size = 1, - .value = CCP_XTS_AES_UNIT_SIZE__LAST, + .size = 4096, + .value = CCP_XTS_AES_UNIT_SIZE_4096, }, }; @@ -96,15 +78,26 @@ static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret) static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int key_len) { - struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); + struct crypto_tfm *xfm = crypto_ablkcipher_tfm(tfm); + struct ccp_ctx *ctx = crypto_tfm_ctx(xfm); + unsigned int ccpversion = ccp_version(); + int ret; - /* Only support 128-bit AES key with a 128-bit Tweak key, - * otherwise use the fallback + ret = xts_check_key(xfm, key, key_len); + if (ret) + return ret; + + /* Version 3 devices support 128-bit keys; version 5 devices can + * accommodate 128- and 256-bit keys. */ switch (key_len) { case AES_KEYSIZE_128 * 2: memcpy(ctx->u.aes.key, key, key_len); break; + case AES_KEYSIZE_256 * 2: + if (ccpversion > CCP_VERSION(3, 0)) + memcpy(ctx->u.aes.key, key, key_len); + break; } ctx->u.aes.key_len = key_len / 2; sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); @@ -117,6 +110,8 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, { struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); + unsigned int ccpversion = ccp_version(); + unsigned int fallback = 0; unsigned int unit; u32 unit_size; int ret; @@ -130,18 +125,32 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, if (!req->info) return -EINVAL; + /* Check conditions under which the CCP can fulfill a request. The + * device can handle input plaintext of a length that is a multiple + * of the unit_size, bug the crypto implementation only supports + * the unit_size being equal to the input length. This limits the + * number of scenarios we can handle. + */ unit_size = CCP_XTS_AES_UNIT_SIZE__LAST; - if (req->nbytes <= unit_size_map[0].size) { - for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) { - if (!(req->nbytes & (unit_size_map[unit].size - 1))) { - unit_size = unit_size_map[unit].value; - break; - } + for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) { + if (req->nbytes == xts_unit_sizes[unit].size) { + unit_size = unit; + break; } } - - if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) || - (ctx->u.aes.key_len != AES_KEYSIZE_128)) { + /* The CCP has restrictions on block sizes. Also, a version 3 device + * only supports AES-128 operations; version 5 CCPs support both + * AES-128 and -256 operations. + */ + if (unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) + fallback = 1; + if ((ccpversion < CCP_VERSION(5, 0)) && + (ctx->u.aes.key_len != AES_KEYSIZE_128)) + fallback = 1; + if ((ctx->u.aes.key_len != AES_KEYSIZE_128) && + (ctx->u.aes.key_len != AES_KEYSIZE_256)) + fallback = 1; + if (fallback) { SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher); /* Use the fallback to process the request for any @@ -164,6 +173,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, memset(&rctx->cmd, 0, sizeof(rctx->cmd)); INIT_LIST_HEAD(&rctx->cmd.entry); rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; + rctx->cmd.u.xts.type = CCP_AES_TYPE_128; rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT; rctx->cmd.u.xts.unit_size = unit_size; diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c index 5af7347ae03c..ae87b741f9d5 100644 --- a/drivers/crypto/ccp/ccp-crypto-des3.c +++ b/drivers/crypto/ccp/ccp-crypto-des3.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) DES3 crypto API support * - * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. * * Author: Gary R Hook <ghook@amd.com> * diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 8dccbddabef1..35a9de7fd475 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) crypto API support * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * @@ -17,6 +17,7 @@ #include <linux/ccp.h> #include <linux/scatterlist.h> #include <crypto/internal/hash.h> +#include <crypto/internal/akcipher.h> #include "ccp-crypto.h" @@ -37,10 +38,15 @@ static unsigned int des3_disable; module_param(des3_disable, uint, 0444); MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value"); +static unsigned int rsa_disable; +module_param(rsa_disable, uint, 0444); +MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value"); + /* List heads for the supported algorithms */ static LIST_HEAD(hash_algs); static LIST_HEAD(cipher_algs); static LIST_HEAD(aead_algs); +static LIST_HEAD(akcipher_algs); /* For any tfm, requests for that tfm must be returned on the order * received. With multiple queues available, the CCP can process more @@ -358,6 +364,12 @@ static int ccp_register_algs(void) return ret; } + if (!rsa_disable) { + ret = ccp_register_rsa_algs(&akcipher_algs); + if (ret) + return ret; + } + return 0; } @@ -366,6 +378,7 @@ static void ccp_unregister_algs(void) struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp; struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp; struct ccp_crypto_aead *aead_alg, *aead_tmp; + struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp; list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) { crypto_unregister_ahash(&ahash_alg->alg); @@ -384,6 +397,12 @@ static void ccp_unregister_algs(void) list_del(&aead_alg->entry); kfree(aead_alg); } + + list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) { + crypto_unregister_akcipher(&akc_alg->alg); + list_del(&akc_alg->entry); + kfree(akc_alg); + } } static int ccp_crypto_init(void) diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c new file mode 100644 index 000000000000..e6db8672d89c --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c @@ -0,0 +1,299 @@ +/* + * AMD Cryptographic Coprocessor (CCP) RSA crypto API support + * + * Copyright (C) 2017 Advanced Micro Devices, Inc. + * + * Author: Gary R Hook <gary.hook@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/scatterlist.h> +#include <linux/crypto.h> +#include <crypto/algapi.h> +#include <crypto/internal/rsa.h> +#include <crypto/internal/akcipher.h> +#include <crypto/akcipher.h> +#include <crypto/scatterwalk.h> + +#include "ccp-crypto.h" + +static inline struct akcipher_request *akcipher_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct akcipher_request, base); +} + +static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen, + const u8 *buf, size_t sz) +{ + int nskip; + + for (nskip = 0; nskip < sz; nskip++) + if (buf[nskip]) + break; + *kplen = sz - nskip; + *kpbuf = kzalloc(*kplen, GFP_KERNEL); + if (!*kpbuf) + return -ENOMEM; + memcpy(*kpbuf, buf + nskip, *kplen); + + return 0; +} + +static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret) +{ + struct akcipher_request *req = akcipher_request_cast(async_req); + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req); + + if (ret) + return ret; + + req->dst_len = rctx->cmd.u.rsa.key_size >> 3; + + return 0; +} + +static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm) +{ + if (ccp_version() > CCP_VERSION(3, 0)) + return CCP5_RSA_MAXMOD; + else + return CCP_RSA_MAXMOD; +} + +static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req); + int ret = 0; + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_RSA; + + rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len; /* in bits */ + if (encrypt) { + rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg; + rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len; + } else { + rctx->cmd.u.rsa.exp = &ctx->u.rsa.d_sg; + rctx->cmd.u.rsa.exp_len = ctx->u.rsa.d_len; + } + rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg; + rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len; + rctx->cmd.u.rsa.src = req->src; + rctx->cmd.u.rsa.src_len = req->src_len; + rctx->cmd.u.rsa.dst = req->dst; + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_rsa_encrypt(struct akcipher_request *req) +{ + return ccp_rsa_crypt(req, true); +} + +static int ccp_rsa_decrypt(struct akcipher_request *req) +{ + return ccp_rsa_crypt(req, false); +} + +static int ccp_check_key_length(unsigned int len) +{ + /* In bits */ + if (len < 8 || len > 4096) + return -EINVAL; + return 0; +} + +static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx) +{ + /* Clean up old key data */ + kzfree(ctx->u.rsa.e_buf); + ctx->u.rsa.e_buf = NULL; + ctx->u.rsa.e_len = 0; + kzfree(ctx->u.rsa.n_buf); + ctx->u.rsa.n_buf = NULL; + ctx->u.rsa.n_len = 0; + kzfree(ctx->u.rsa.d_buf); + ctx->u.rsa.d_buf = NULL; + ctx->u.rsa.d_len = 0; +} + +static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen, bool private) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct rsa_key raw_key; + int ret; + + ccp_rsa_free_key_bufs(ctx); + memset(&raw_key, 0, sizeof(raw_key)); + + /* Code borrowed from crypto/rsa.c */ + if (private) + ret = rsa_parse_priv_key(&raw_key, key, keylen); + else + ret = rsa_parse_pub_key(&raw_key, key, keylen); + if (ret) + goto n_key; + + ret = ccp_copy_and_save_keypart(&ctx->u.rsa.n_buf, &ctx->u.rsa.n_len, + raw_key.n, raw_key.n_sz); + if (ret) + goto key_err; + sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len); + + ctx->u.rsa.key_len = ctx->u.rsa.n_len << 3; /* convert to bits */ + if (ccp_check_key_length(ctx->u.rsa.key_len)) { + ret = -EINVAL; + goto key_err; + } + + ret = ccp_copy_and_save_keypart(&ctx->u.rsa.e_buf, &ctx->u.rsa.e_len, + raw_key.e, raw_key.e_sz); + if (ret) + goto key_err; + sg_init_one(&ctx->u.rsa.e_sg, ctx->u.rsa.e_buf, ctx->u.rsa.e_len); + + if (private) { + ret = ccp_copy_and_save_keypart(&ctx->u.rsa.d_buf, + &ctx->u.rsa.d_len, + raw_key.d, raw_key.d_sz); + if (ret) + goto key_err; + sg_init_one(&ctx->u.rsa.d_sg, + ctx->u.rsa.d_buf, ctx->u.rsa.d_len); + } + + return 0; + +key_err: + ccp_rsa_free_key_bufs(ctx); + +n_key: + return ret; +} + +static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + return ccp_rsa_setkey(tfm, key, keylen, true); +} + +static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + return ccp_rsa_setkey(tfm, key, keylen, false); +} + +static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + + akcipher_set_reqsize(tfm, sizeof(struct ccp_rsa_req_ctx)); + ctx->complete = ccp_rsa_complete; + + return 0; +} + +static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base); + + ccp_rsa_free_key_bufs(ctx); +} + +static struct akcipher_alg ccp_rsa_defaults = { + .encrypt = ccp_rsa_encrypt, + .decrypt = ccp_rsa_decrypt, + .sign = ccp_rsa_decrypt, + .verify = ccp_rsa_encrypt, + .set_pub_key = ccp_rsa_setpubkey, + .set_priv_key = ccp_rsa_setprivkey, + .max_size = ccp_rsa_maxsize, + .init = ccp_rsa_init_tfm, + .exit = ccp_rsa_exit_tfm, + .base = { + .cra_name = "rsa", + .cra_driver_name = "rsa-ccp", + .cra_priority = CCP_CRA_PRIORITY, + .cra_module = THIS_MODULE, + .cra_ctxsize = 2 * sizeof(struct ccp_ctx), + }, +}; + +struct ccp_rsa_def { + unsigned int version; + const char *name; + const char *driver_name; + unsigned int reqsize; + struct akcipher_alg *alg_defaults; +}; + +static struct ccp_rsa_def rsa_algs[] = { + { + .version = CCP_VERSION(3, 0), + .name = "rsa", + .driver_name = "rsa-ccp", + .reqsize = sizeof(struct ccp_rsa_req_ctx), + .alg_defaults = &ccp_rsa_defaults, + } +}; + +int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def) +{ + struct ccp_crypto_akcipher_alg *ccp_alg; + struct akcipher_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + ret = crypto_register_akcipher(alg); + if (ret) { + pr_err("%s akcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_rsa_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + /* Register the RSA algorithm in standard mode + * This works for CCP v3 and later + */ + for (i = 0; i < ARRAY_SIZE(rsa_algs); i++) { + if (rsa_algs[i].version > ccpversion) + continue; + ret = ccp_register_rsa_alg(head, &rsa_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index ce97b3868f4a..8b9b16d433f7 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) SHA crypto API support * - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Gary R Hook <gary.hook@amd.com> diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index dd5bf15f06e5..b9fd090c46c2 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) crypto API support * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * @@ -24,6 +24,8 @@ #include <crypto/ctr.h> #include <crypto/hash.h> #include <crypto/sha.h> +#include <crypto/akcipher.h> +#include <crypto/internal/rsa.h> #define CCP_LOG_LEVEL KERN_INFO @@ -58,6 +60,12 @@ struct ccp_crypto_ahash_alg { struct ahash_alg alg; }; +struct ccp_crypto_akcipher_alg { + struct list_head entry; + + struct akcipher_alg alg; +}; + static inline struct ccp_crypto_ablkcipher_alg * ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm) { @@ -91,7 +99,7 @@ struct ccp_aes_ctx { struct scatterlist key_sg; unsigned int key_len; - u8 key[AES_MAX_KEY_SIZE]; + u8 key[AES_MAX_KEY_SIZE * 2]; u8 nonce[CTR_RFC3686_NONCE_SIZE]; @@ -227,12 +235,35 @@ struct ccp_sha_exp_ctx { u8 buf[MAX_SHA_BLOCK_SIZE]; }; +/***** RSA related defines *****/ + +struct ccp_rsa_ctx { + unsigned int key_len; /* in bits */ + struct scatterlist e_sg; + u8 *e_buf; + unsigned int e_len; + struct scatterlist n_sg; + u8 *n_buf; + unsigned int n_len; + struct scatterlist d_sg; + u8 *d_buf; + unsigned int d_len; +}; + +struct ccp_rsa_req_ctx { + struct ccp_cmd cmd; +}; + +#define CCP_RSA_MAXMOD (4 * 1024 / 8) +#define CCP5_RSA_MAXMOD (16 * 1024 / 8) + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); union { struct ccp_aes_ctx aes; + struct ccp_rsa_ctx rsa; struct ccp_sha_ctx sha; struct ccp_des3_ctx des3; } u; @@ -249,5 +280,6 @@ int ccp_register_aes_xts_algs(struct list_head *head); int ccp_register_aes_aeads(struct list_head *head); int ccp_register_sha_algs(struct list_head *head); int ccp_register_des3_algs(struct list_head *head); +int ccp_register_rsa_algs(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c index 3cd6c83754e0..59d4ca4e72d8 100644 --- a/drivers/crypto/ccp/ccp-debugfs.c +++ b/drivers/crypto/ccp/ccp-debugfs.c @@ -305,19 +305,19 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) ccp->debugfs_instance = debugfs_create_dir(ccp->name, ccp_debugfs_dir); if (!ccp->debugfs_instance) - return; + goto err; debugfs_info = debugfs_create_file("info", 0400, ccp->debugfs_instance, ccp, &ccp_debugfs_info_ops); if (!debugfs_info) - return; + goto err; debugfs_stats = debugfs_create_file("stats", 0600, ccp->debugfs_instance, ccp, &ccp_debugfs_stats_ops); if (!debugfs_stats) - return; + goto err; for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; @@ -327,15 +327,20 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) debugfs_q_instance = debugfs_create_dir(name, ccp->debugfs_instance); if (!debugfs_q_instance) - return; + goto err; debugfs_q_stats = debugfs_create_file("stats", 0600, debugfs_q_instance, cmd_q, &ccp_debugfs_queue_ops); if (!debugfs_q_stats) - return; + goto err; } + + return; + +err: + debugfs_remove_recursive(ccp->debugfs_instance); } void ccp5_debugfs_destroy(void) diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 367c2e30656f..240bebbcb8ac 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Gary R Hook <gary.hook@amd.com> @@ -359,8 +359,7 @@ static void ccp_irq_bh(unsigned long data) static irqreturn_t ccp_irq_handler(int irq, void *data) { - struct device *dev = data; - struct ccp_device *ccp = dev_get_drvdata(dev); + struct ccp_device *ccp = (struct ccp_device *)data; ccp_disable_queue_interrupts(ccp); if (ccp->use_tasklet) @@ -454,7 +453,7 @@ static int ccp_init(struct ccp_device *ccp) iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG); /* Request an irq */ - ret = ccp->get_irq(ccp); + ret = sp_request_ccp_irq(ccp->sp, ccp_irq_handler, ccp->name, ccp); if (ret) { dev_err(dev, "unable to allocate an IRQ\n"); goto e_pool; @@ -511,7 +510,7 @@ e_kthread: if (ccp->cmd_q[i].kthread) kthread_stop(ccp->cmd_q[i].kthread); - ccp->free_irq(ccp); + sp_free_ccp_irq(ccp->sp, ccp); e_pool: for (i = 0; i < ccp->cmd_q_count; i++) @@ -550,7 +549,7 @@ static void ccp_destroy(struct ccp_device *ccp) if (ccp->cmd_q[i].kthread) kthread_stop(ccp->cmd_q[i].kthread); - ccp->free_irq(ccp); + sp_free_ccp_irq(ccp->sp, ccp); for (i = 0; i < ccp->cmd_q_count; i++) dma_pool_destroy(ccp->cmd_q[i].dma_pool); @@ -586,10 +585,17 @@ static const struct ccp_actions ccp3_actions = { .irqhandler = ccp_irq_handler, }; +const struct ccp_vdata ccpv3_platform = { + .version = CCP_VERSION(3, 0), + .setup = NULL, + .perform = &ccp3_actions, + .offset = 0, +}; + const struct ccp_vdata ccpv3 = { .version = CCP_VERSION(3, 0), .setup = NULL, .perform = &ccp3_actions, - .bar = 2, .offset = 0x20000, + .rsamax = CCP_RSA_MAX_WIDTH, }; diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index b10d2d2075cb..65604fc65e8f 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. * * Author: Gary R Hook <gary.hook@amd.com> * @@ -145,6 +145,7 @@ union ccp_function { #define CCP_AES_MODE(p) ((p)->aes.mode) #define CCP_AES_TYPE(p) ((p)->aes.type) #define CCP_XTS_SIZE(p) ((p)->aes_xts.size) +#define CCP_XTS_TYPE(p) ((p)->aes_xts.type) #define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt) #define CCP_DES3_SIZE(p) ((p)->des3.size) #define CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt) @@ -344,6 +345,7 @@ static int ccp5_perform_xts_aes(struct ccp_op *op) CCP5_CMD_PROT(&desc) = 0; function.raw = 0; + CCP_XTS_TYPE(&function) = op->u.xts.type; CCP_XTS_ENCRYPT(&function) = op->u.xts.action; CCP_XTS_SIZE(&function) = op->u.xts.unit_size; CCP5_CMD_FUNCTION(&desc) = function.raw; @@ -469,7 +471,7 @@ static int ccp5_perform_rsa(struct ccp_op *op) CCP5_CMD_PROT(&desc) = 0; function.raw = 0; - CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3; + CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3; CCP5_CMD_FUNCTION(&desc) = function.raw; CCP5_CMD_LEN(&desc) = op->u.rsa.input_len; @@ -484,10 +486,10 @@ static int ccp5_perform_rsa(struct ccp_op *op) CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - /* Exponent is in LSB memory */ - CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE; - CCP5_CMD_KEY_HI(&desc) = 0; - CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + /* Key (Exponent) is in external memory */ + CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma); + CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma); + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM; return ccp5_do_cmd(&desc, op->cmd_q); } @@ -769,8 +771,7 @@ static void ccp5_irq_bh(unsigned long data) static irqreturn_t ccp5_irq_handler(int irq, void *data) { - struct device *dev = data; - struct ccp_device *ccp = dev_get_drvdata(dev); + struct ccp_device *ccp = (struct ccp_device *)data; ccp5_disable_queue_interrupts(ccp); ccp->total_interrupts++; @@ -881,7 +882,7 @@ static int ccp5_init(struct ccp_device *ccp) dev_dbg(dev, "Requesting an IRQ...\n"); /* Request an irq */ - ret = ccp->get_irq(ccp); + ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp); if (ret) { dev_err(dev, "unable to allocate an IRQ\n"); goto e_pool; @@ -987,7 +988,7 @@ e_kthread: kthread_stop(ccp->cmd_q[i].kthread); e_irq: - ccp->free_irq(ccp); + sp_free_ccp_irq(ccp->sp, ccp); e_pool: for (i = 0; i < ccp->cmd_q_count; i++) @@ -1037,7 +1038,7 @@ static void ccp5_destroy(struct ccp_device *ccp) if (ccp->cmd_q[i].kthread) kthread_stop(ccp->cmd_q[i].kthread); - ccp->free_irq(ccp); + sp_free_ccp_irq(ccp->sp, ccp); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; @@ -1106,15 +1107,14 @@ static const struct ccp_actions ccp5_actions = { .init = ccp5_init, .destroy = ccp5_destroy, .get_free_slots = ccp5_get_free_slots, - .irqhandler = ccp5_irq_handler, }; const struct ccp_vdata ccpv5a = { .version = CCP_VERSION(5, 0), .setup = ccp5_config, .perform = &ccp5_actions, - .bar = 2, .offset = 0x0, + .rsamax = CCP5_RSA_MAX_WIDTH, }; const struct ccp_vdata ccpv5b = { @@ -1122,6 +1122,6 @@ const struct ccp_vdata ccpv5b = { .dma_chan_attr = DMA_PRIVATE, .setup = ccp5other_config, .perform = &ccp5_actions, - .bar = 2, .offset = 0x0, + .rsamax = CCP5_RSA_MAX_WIDTH, }; diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 2506b5025700..4e029b176641 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Gary R Hook <gary.hook@amd.com> @@ -11,7 +11,6 @@ * published by the Free Software Foundation. */ -#include <linux/module.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/sched.h> @@ -30,12 +29,6 @@ #include "ccp-dev.h" -MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); -MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>"); -MODULE_LICENSE("GPL"); -MODULE_VERSION("1.1.0"); -MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver"); - struct ccp_tasklet_data { struct completion completion; struct ccp_cmd *cmd; @@ -111,13 +104,6 @@ static LIST_HEAD(ccp_units); static DEFINE_SPINLOCK(ccp_rr_lock); static struct ccp_device *ccp_rr; -/* Ever-increasing value to produce unique unit numbers */ -static atomic_t ccp_unit_ordinal; -static unsigned int ccp_increment_unit_ordinal(void) -{ - return atomic_inc_return(&ccp_unit_ordinal); -} - /** * ccp_add_device - add a CCP device to the list * @@ -415,6 +401,7 @@ static void ccp_do_cmd_complete(unsigned long data) struct ccp_cmd *cmd = tdata->cmd; cmd->callback(cmd->data, cmd->ret); + complete(&tdata->completion); } @@ -464,14 +451,17 @@ int ccp_cmd_queue_thread(void *data) * * @dev: device struct of the CCP */ -struct ccp_device *ccp_alloc_struct(struct device *dev) +struct ccp_device *ccp_alloc_struct(struct sp_device *sp) { + struct device *dev = sp->dev; struct ccp_device *ccp; ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL); if (!ccp) return NULL; ccp->dev = dev; + ccp->sp = sp; + ccp->axcache = sp->axcache; INIT_LIST_HEAD(&ccp->cmd); INIT_LIST_HEAD(&ccp->backlog); @@ -486,9 +476,8 @@ struct ccp_device *ccp_alloc_struct(struct device *dev) init_waitqueue_head(&ccp->sb_queue); init_waitqueue_head(&ccp->suspend_queue); - ccp->ord = ccp_increment_unit_ordinal(); - snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); - snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord); + snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord); + snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord); return ccp; } @@ -538,55 +527,100 @@ bool ccp_queues_suspended(struct ccp_device *ccp) return ccp->cmd_q_count == suspended; } -#endif -static int __init ccp_mod_init(void) +int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) { -#ifdef CONFIG_X86 - int ret; + struct ccp_device *ccp = sp->ccp_data; + unsigned long flags; + unsigned int i; - ret = ccp_pci_init(); - if (ret) - return ret; + spin_lock_irqsave(&ccp->cmd_lock, flags); - /* Don't leave the driver loaded if init failed */ - if (ccp_present() != 0) { - ccp_pci_exit(); - return -ENODEV; + ccp->suspending = 1; + + /* Wake all the queue kthreads to prepare for suspend */ + for (i = 0; i < ccp->cmd_q_count; i++) + wake_up_process(ccp->cmd_q[i].kthread); + + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + + /* Wait for all queue kthreads to say they're done */ + while (!ccp_queues_suspended(ccp)) + wait_event_interruptible(ccp->suspend_queue, + ccp_queues_suspended(ccp)); + + return 0; +} + +int ccp_dev_resume(struct sp_device *sp) +{ + struct ccp_device *ccp = sp->ccp_data; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&ccp->cmd_lock, flags); + + ccp->suspending = 0; + + /* Wake up all the kthreads */ + for (i = 0; i < ccp->cmd_q_count; i++) { + ccp->cmd_q[i].suspended = 0; + wake_up_process(ccp->cmd_q[i].kthread); } + spin_unlock_irqrestore(&ccp->cmd_lock, flags); + return 0; +} #endif -#ifdef CONFIG_ARM64 +int ccp_dev_init(struct sp_device *sp) +{ + struct device *dev = sp->dev; + struct ccp_device *ccp; int ret; - ret = ccp_platform_init(); + ret = -ENOMEM; + ccp = ccp_alloc_struct(sp); + if (!ccp) + goto e_err; + sp->ccp_data = ccp; + + ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata; + if (!ccp->vdata || !ccp->vdata->version) { + ret = -ENODEV; + dev_err(dev, "missing driver data\n"); + goto e_err; + } + + ccp->use_tasklet = sp->use_tasklet; + + ccp->io_regs = sp->io_map + ccp->vdata->offset; + if (ccp->vdata->setup) + ccp->vdata->setup(ccp); + + ret = ccp->vdata->perform->init(ccp); if (ret) - return ret; + goto e_err; - /* Don't leave the driver loaded if init failed */ - if (ccp_present() != 0) { - ccp_platform_exit(); - return -ENODEV; - } + dev_notice(dev, "ccp enabled\n"); return 0; -#endif - return -ENODEV; +e_err: + sp->ccp_data = NULL; + + dev_notice(dev, "ccp initialization failed\n"); + + return ret; } -static void __exit ccp_mod_exit(void) +void ccp_dev_destroy(struct sp_device *sp) { -#ifdef CONFIG_X86 - ccp_pci_exit(); -#endif + struct ccp_device *ccp = sp->ccp_data; -#ifdef CONFIG_ARM64 - ccp_platform_exit(); -#endif -} + if (!ccp) + return; -module_init(ccp_mod_init); -module_exit(ccp_mod_exit); + ccp->vdata->perform->destroy(ccp); +} diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index a70154ac7405..6810b65c1939 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Gary R Hook <gary.hook@amd.com> @@ -27,6 +27,8 @@ #include <linux/irqreturn.h> #include <linux/dmaengine.h> +#include "sp-dev.h" + #define MAX_CCP_NAME_LEN 16 #define MAX_DMAPOOL_NAME_LEN 32 @@ -192,6 +194,7 @@ #define CCP_AES_CTX_SB_COUNT 1 #define CCP_XTS_AES_KEY_SB_COUNT 1 +#define CCP5_XTS_AES_KEY_SB_COUNT 2 #define CCP_XTS_AES_CTX_SB_COUNT 1 #define CCP_DES3_KEY_SB_COUNT 1 @@ -200,6 +203,7 @@ #define CCP_SHA_SB_COUNT 1 #define CCP_RSA_MAX_WIDTH 4096 +#define CCP5_RSA_MAX_WIDTH 16384 #define CCP_PASSTHRU_BLOCKSIZE 256 #define CCP_PASSTHRU_MASKSIZE 32 @@ -344,12 +348,11 @@ struct ccp_device { char rngname[MAX_CCP_NAME_LEN]; struct device *dev; + struct sp_device *sp; /* Bus specific device information */ void *dev_specific; - int (*get_irq)(struct ccp_device *ccp); - void (*free_irq)(struct ccp_device *ccp); unsigned int qim; unsigned int irq; bool use_tasklet; @@ -362,7 +365,6 @@ struct ccp_device { * them. */ struct mutex req_mutex ____cacheline_aligned; - void __iomem *io_map; void __iomem *io_regs; /* Master lists that all cmds are queued on. Because there can be @@ -497,6 +499,7 @@ struct ccp_aes_op { }; struct ccp_xts_aes_op { + enum ccp_aes_type type; enum ccp_aes_action action; enum ccp_xts_aes_unit_size unit_size; }; @@ -626,18 +629,12 @@ struct ccp5_desc { struct dword7 dw7; }; -int ccp_pci_init(void); -void ccp_pci_exit(void); - -int ccp_platform_init(void); -void ccp_platform_exit(void); - void ccp_add_device(struct ccp_device *ccp); void ccp_del_device(struct ccp_device *ccp); extern void ccp_log_error(struct ccp_device *, int); -struct ccp_device *ccp_alloc_struct(struct device *dev); +struct ccp_device *ccp_alloc_struct(struct sp_device *sp); bool ccp_queues_suspended(struct ccp_device *ccp); int ccp_cmd_queue_thread(void *data); int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait); @@ -669,16 +666,7 @@ struct ccp_actions { irqreturn_t (*irqhandler)(int, void *); }; -/* Structure to hold CCP version-specific values */ -struct ccp_vdata { - const unsigned int version; - const unsigned int dma_chan_attr; - void (*setup)(struct ccp_device *); - const struct ccp_actions *perform; - const unsigned int bar; - const unsigned int offset; -}; - +extern const struct ccp_vdata ccpv3_platform; extern const struct ccp_vdata ccpv3; extern const struct ccp_vdata ccpv5a; extern const struct ccp_vdata ccpv5b; diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index e00be01fbf5a..901343dd513e 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. * * Author: Gary R Hook <gary.hook@amd.com> * diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index c0dfdacbdff5..406b95329b3d 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Gary R Hook <gary.hook@amd.com> @@ -168,7 +168,7 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, wa->dma.address = dma_map_single(wa->dev, wa->address, len, dir); - if (!wa->dma.address) + if (dma_mapping_error(wa->dev, wa->dma.address)) return -ENOMEM; wa->dma.length = len; @@ -1038,6 +1038,8 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_op op; unsigned int unit_size, dm_offset; bool in_place = false; + unsigned int sb_count; + enum ccp_aes_type aestype; int ret; switch (xts->unit_size) { @@ -1061,7 +1063,11 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, return -EINVAL; } - if (xts->key_len != AES_KEYSIZE_128) + if (xts->key_len == AES_KEYSIZE_128) + aestype = CCP_AES_TYPE_128; + else if (xts->key_len == AES_KEYSIZE_256) + aestype = CCP_AES_TYPE_256; + else return -EINVAL; if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1))) @@ -1083,23 +1089,44 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, op.sb_key = cmd_q->sb_key; op.sb_ctx = cmd_q->sb_ctx; op.init = 1; + op.u.xts.type = aestype; op.u.xts.action = xts->action; op.u.xts.unit_size = xts->unit_size; - /* All supported key sizes fit in a single (32-byte) SB entry - * and must be in little endian format. Use the 256-bit byte - * swap passthru option to convert from big endian to little - * endian. + /* A version 3 device only supports 128-bit keys, which fits into a + * single SB entry. A version 5 device uses a 512-bit vector, so two + * SB entries. */ + if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) + sb_count = CCP_XTS_AES_KEY_SB_COUNT; + else + sb_count = CCP5_XTS_AES_KEY_SB_COUNT; ret = ccp_init_dm_workarea(&key, cmd_q, - CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES, + sb_count * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; - dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; - ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); - ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len); + if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { + /* All supported key sizes must be in little endian format. + * Use the 256-bit byte swap passthru option to convert from + * big endian to little endian. + */ + dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; + ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); + ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); + } else { + /* Version 5 CCPs use a 512-bit space for the key: each portion + * occupies 256 bits, or one entire slot, and is zero-padded. + */ + unsigned int pad; + + dm_offset = CCP_SB_BYTES; + pad = dm_offset - xts->key_len; + ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); + ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len, + xts->key_len); + } ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -1731,42 +1758,53 @@ e_ctx: static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_rsa_engine *rsa = &cmd->u.rsa; - struct ccp_dm_workarea exp, src; - struct ccp_data dst; + struct ccp_dm_workarea exp, src, dst; struct ccp_op op; unsigned int sb_count, i_len, o_len; int ret; - if (rsa->key_size > CCP_RSA_MAX_WIDTH) + /* Check against the maximum allowable size, in bits */ + if (rsa->key_size > cmd_q->ccp->vdata->rsamax) return -EINVAL; if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst) return -EINVAL; + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + /* The RSA modulus must precede the message being acted upon, so * it must be copied to a DMA area where the message and the * modulus can be concatenated. Therefore the input buffer * length required is twice the output buffer length (which - * must be a multiple of 256-bits). + * must be a multiple of 256-bits). Compute o_len, i_len in bytes. + * Buffer sizes must be a multiple of 32 bytes; rounding up may be + * required. */ - o_len = ((rsa->key_size + 255) / 256) * 32; + o_len = 32 * ((rsa->key_size + 255) / 256); i_len = o_len * 2; - sb_count = o_len / CCP_SB_BYTES; - - memset(&op, 0, sizeof(op)); - op.cmd_q = cmd_q; - op.jobid = ccp_gen_jobid(cmd_q->ccp); - op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count); - - if (!op.sb_key) - return -EIO; + sb_count = 0; + if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { + /* sb_count is the number of storage block slots required + * for the modulus. + */ + sb_count = o_len / CCP_SB_BYTES; + op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, + sb_count); + if (!op.sb_key) + return -EIO; + } else { + /* A version 5 device allows a modulus size that will not fit + * in the LSB, so the command will transfer it from memory. + * Set the sb key to the default, even though it's not used. + */ + op.sb_key = cmd_q->sb_key; + } - /* The RSA exponent may span multiple (32-byte) SB entries and must - * be in little endian format. Reverse copy each 32-byte chunk - * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk) - * and each byte within that chunk and do not perform any byte swap - * operations on the passthru operation. + /* The RSA exponent must be in little endian format. Reverse its + * byte order. */ ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); if (ret) @@ -1775,11 +1813,22 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len); if (ret) goto e_exp; - ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key, - CCP_PASSTHRU_BYTESWAP_NOOP); - if (ret) { - cmd->engine_error = cmd_q->cmd_error; - goto e_exp; + + if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { + /* Copy the exponent to the local storage block, using + * as many 32-byte blocks as were allocated above. It's + * already little endian, so no further change is required. + */ + ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_exp; + } + } else { + /* The exponent can be retrieved from memory via DMA. */ + op.exp.u.dma.address = exp.dma.address; + op.exp.u.dma.offset = 0; } /* Concatenate the modulus and the message. Both the modulus and @@ -1798,8 +1847,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_src; /* Prepare the output area for the operation */ - ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len, - o_len, DMA_FROM_DEVICE); + ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE); if (ret) goto e_src; @@ -1807,7 +1855,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) op.src.u.dma.address = src.dma.address; op.src.u.dma.offset = 0; op.src.u.dma.length = i_len; - op.dst.u.dma.address = dst.dm_wa.dma.address; + op.dst.u.dma.address = dst.dma.address; op.dst.u.dma.offset = 0; op.dst.u.dma.length = o_len; @@ -1820,10 +1868,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_dst; } - ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len); + ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len); e_dst: - ccp_free_data(&dst, cmd_q); + ccp_dm_free(&dst); e_src: ccp_dm_free(&src); @@ -1832,7 +1880,8 @@ e_exp: ccp_dm_free(&exp); e_sb: - cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); + if (sb_count) + cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); return ret; } @@ -1992,7 +2041,7 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; - op.jobid = ccp_gen_jobid(cmd_q->ccp); + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { /* Load the mask */ diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c deleted file mode 100644 index e880d4cf4ada..000000000000 --- a/drivers/crypto/ccp/ccp-pci.c +++ /dev/null @@ -1,356 +0,0 @@ -/* - * AMD Cryptographic Coprocessor (CCP) driver - * - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. - * - * Author: Tom Lendacky <thomas.lendacky@amd.com> - * Author: Gary R Hook <gary.hook@amd.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/pci.h> -#include <linux/pci_ids.h> -#include <linux/dma-mapping.h> -#include <linux/kthread.h> -#include <linux/sched.h> -#include <linux/interrupt.h> -#include <linux/spinlock.h> -#include <linux/delay.h> -#include <linux/ccp.h> - -#include "ccp-dev.h" - -#define MSIX_VECTORS 2 - -struct ccp_msix { - u32 vector; - char name[16]; -}; - -struct ccp_pci { - int msix_count; - struct ccp_msix msix[MSIX_VECTORS]; -}; - -static int ccp_get_msix_irqs(struct ccp_device *ccp) -{ - struct ccp_pci *ccp_pci = ccp->dev_specific; - struct device *dev = ccp->dev; - struct pci_dev *pdev = to_pci_dev(dev); - struct msix_entry msix_entry[MSIX_VECTORS]; - unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1; - int v, ret; - - for (v = 0; v < ARRAY_SIZE(msix_entry); v++) - msix_entry[v].entry = v; - - ret = pci_enable_msix_range(pdev, msix_entry, 1, v); - if (ret < 0) - return ret; - - ccp_pci->msix_count = ret; - for (v = 0; v < ccp_pci->msix_count; v++) { - /* Set the interrupt names and request the irqs */ - snprintf(ccp_pci->msix[v].name, name_len, "%s-%u", - ccp->name, v); - ccp_pci->msix[v].vector = msix_entry[v].vector; - ret = request_irq(ccp_pci->msix[v].vector, - ccp->vdata->perform->irqhandler, - 0, ccp_pci->msix[v].name, dev); - if (ret) { - dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n", - ret); - goto e_irq; - } - } - ccp->use_tasklet = true; - - return 0; - -e_irq: - while (v--) - free_irq(ccp_pci->msix[v].vector, dev); - - pci_disable_msix(pdev); - - ccp_pci->msix_count = 0; - - return ret; -} - -static int ccp_get_msi_irq(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - struct pci_dev *pdev = to_pci_dev(dev); - int ret; - - ret = pci_enable_msi(pdev); - if (ret) - return ret; - - ccp->irq = pdev->irq; - ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0, - ccp->name, dev); - if (ret) { - dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); - goto e_msi; - } - ccp->use_tasklet = true; - - return 0; - -e_msi: - pci_disable_msi(pdev); - - return ret; -} - -static int ccp_get_irqs(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - int ret; - - ret = ccp_get_msix_irqs(ccp); - if (!ret) - return 0; - - /* Couldn't get MSI-X vectors, try MSI */ - dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret); - ret = ccp_get_msi_irq(ccp); - if (!ret) - return 0; - - /* Couldn't get MSI interrupt */ - dev_notice(dev, "could not enable MSI (%d)\n", ret); - - return ret; -} - -static void ccp_free_irqs(struct ccp_device *ccp) -{ - struct ccp_pci *ccp_pci = ccp->dev_specific; - struct device *dev = ccp->dev; - struct pci_dev *pdev = to_pci_dev(dev); - - if (ccp_pci->msix_count) { - while (ccp_pci->msix_count--) - free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, - dev); - pci_disable_msix(pdev); - } else if (ccp->irq) { - free_irq(ccp->irq, dev); - pci_disable_msi(pdev); - } - ccp->irq = 0; -} - -static int ccp_find_mmio_area(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - struct pci_dev *pdev = to_pci_dev(dev); - resource_size_t io_len; - unsigned long io_flags; - - io_flags = pci_resource_flags(pdev, ccp->vdata->bar); - io_len = pci_resource_len(pdev, ccp->vdata->bar); - if ((io_flags & IORESOURCE_MEM) && - (io_len >= (ccp->vdata->offset + 0x800))) - return ccp->vdata->bar; - - return -EIO; -} - -static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct ccp_device *ccp; - struct ccp_pci *ccp_pci; - struct device *dev = &pdev->dev; - unsigned int bar; - int ret; - - ret = -ENOMEM; - ccp = ccp_alloc_struct(dev); - if (!ccp) - goto e_err; - - ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL); - if (!ccp_pci) - goto e_err; - - ccp->dev_specific = ccp_pci; - ccp->vdata = (struct ccp_vdata *)id->driver_data; - if (!ccp->vdata || !ccp->vdata->version) { - ret = -ENODEV; - dev_err(dev, "missing driver data\n"); - goto e_err; - } - ccp->get_irq = ccp_get_irqs; - ccp->free_irq = ccp_free_irqs; - - ret = pci_request_regions(pdev, "ccp"); - if (ret) { - dev_err(dev, "pci_request_regions failed (%d)\n", ret); - goto e_err; - } - - ret = pci_enable_device(pdev); - if (ret) { - dev_err(dev, "pci_enable_device failed (%d)\n", ret); - goto e_regions; - } - - pci_set_master(pdev); - - ret = ccp_find_mmio_area(ccp); - if (ret < 0) - goto e_device; - bar = ret; - - ret = -EIO; - ccp->io_map = pci_iomap(pdev, bar, 0); - if (!ccp->io_map) { - dev_err(dev, "pci_iomap failed\n"); - goto e_device; - } - ccp->io_regs = ccp->io_map + ccp->vdata->offset; - - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); - if (ret) { - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); - if (ret) { - dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", - ret); - goto e_iomap; - } - } - - dev_set_drvdata(dev, ccp); - - if (ccp->vdata->setup) - ccp->vdata->setup(ccp); - - ret = ccp->vdata->perform->init(ccp); - if (ret) - goto e_iomap; - - dev_notice(dev, "enabled\n"); - - return 0; - -e_iomap: - pci_iounmap(pdev, ccp->io_map); - -e_device: - pci_disable_device(pdev); - -e_regions: - pci_release_regions(pdev); - -e_err: - dev_notice(dev, "initialization failed\n"); - return ret; -} - -static void ccp_pci_remove(struct pci_dev *pdev) -{ - struct device *dev = &pdev->dev; - struct ccp_device *ccp = dev_get_drvdata(dev); - - if (!ccp) - return; - - ccp->vdata->perform->destroy(ccp); - - pci_iounmap(pdev, ccp->io_map); - - pci_disable_device(pdev); - - pci_release_regions(pdev); - - dev_notice(dev, "disabled\n"); -} - -#ifdef CONFIG_PM -static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct device *dev = &pdev->dev; - struct ccp_device *ccp = dev_get_drvdata(dev); - unsigned long flags; - unsigned int i; - - spin_lock_irqsave(&ccp->cmd_lock, flags); - - ccp->suspending = 1; - - /* Wake all the queue kthreads to prepare for suspend */ - for (i = 0; i < ccp->cmd_q_count; i++) - wake_up_process(ccp->cmd_q[i].kthread); - - spin_unlock_irqrestore(&ccp->cmd_lock, flags); - - /* Wait for all queue kthreads to say they're done */ - while (!ccp_queues_suspended(ccp)) - wait_event_interruptible(ccp->suspend_queue, - ccp_queues_suspended(ccp)); - - return 0; -} - -static int ccp_pci_resume(struct pci_dev *pdev) -{ - struct device *dev = &pdev->dev; - struct ccp_device *ccp = dev_get_drvdata(dev); - unsigned long flags; - unsigned int i; - - spin_lock_irqsave(&ccp->cmd_lock, flags); - - ccp->suspending = 0; - - /* Wake up all the kthreads */ - for (i = 0; i < ccp->cmd_q_count; i++) { - ccp->cmd_q[i].suspended = 0; - wake_up_process(ccp->cmd_q[i].kthread); - } - - spin_unlock_irqrestore(&ccp->cmd_lock, flags); - - return 0; -} -#endif - -static const struct pci_device_id ccp_pci_table[] = { - { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 }, - { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a }, - { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b }, - /* Last entry must be zero */ - { 0, } -}; -MODULE_DEVICE_TABLE(pci, ccp_pci_table); - -static struct pci_driver ccp_pci_driver = { - .name = "ccp", - .id_table = ccp_pci_table, - .probe = ccp_pci_probe, - .remove = ccp_pci_remove, -#ifdef CONFIG_PM - .suspend = ccp_pci_suspend, - .resume = ccp_pci_resume, -#endif -}; - -int ccp_pci_init(void) -{ - return pci_register_driver(&ccp_pci_driver); -} - -void ccp_pci_exit(void) -{ - pci_unregister_driver(&ccp_pci_driver); -} diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c deleted file mode 100644 index e26969e601ad..000000000000 --- a/drivers/crypto/ccp/ccp-platform.c +++ /dev/null @@ -1,293 +0,0 @@ -/* - * AMD Cryptographic Coprocessor (CCP) driver - * - * Copyright (C) 2014,2016 Advanced Micro Devices, Inc. - * - * Author: Tom Lendacky <thomas.lendacky@amd.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/platform_device.h> -#include <linux/ioport.h> -#include <linux/dma-mapping.h> -#include <linux/kthread.h> -#include <linux/sched.h> -#include <linux/interrupt.h> -#include <linux/spinlock.h> -#include <linux/delay.h> -#include <linux/ccp.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/acpi.h> - -#include "ccp-dev.h" - -struct ccp_platform { - int coherent; -}; - -static const struct acpi_device_id ccp_acpi_match[]; -static const struct of_device_id ccp_of_match[]; - -static struct ccp_vdata *ccp_get_of_version(struct platform_device *pdev) -{ -#ifdef CONFIG_OF - const struct of_device_id *match; - - match = of_match_node(ccp_of_match, pdev->dev.of_node); - if (match && match->data) - return (struct ccp_vdata *)match->data; -#endif - return NULL; -} - -static struct ccp_vdata *ccp_get_acpi_version(struct platform_device *pdev) -{ -#ifdef CONFIG_ACPI - const struct acpi_device_id *match; - - match = acpi_match_device(ccp_acpi_match, &pdev->dev); - if (match && match->driver_data) - return (struct ccp_vdata *)match->driver_data; -#endif - return NULL; -} - -static int ccp_get_irq(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - struct platform_device *pdev = to_platform_device(dev); - int ret; - - ret = platform_get_irq(pdev, 0); - if (ret < 0) - return ret; - - ccp->irq = ret; - ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0, - ccp->name, dev); - if (ret) { - dev_notice(dev, "unable to allocate IRQ (%d)\n", ret); - return ret; - } - - return 0; -} - -static int ccp_get_irqs(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - int ret; - - ret = ccp_get_irq(ccp); - if (!ret) - return 0; - - /* Couldn't get an interrupt */ - dev_notice(dev, "could not enable interrupts (%d)\n", ret); - - return ret; -} - -static void ccp_free_irqs(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - - free_irq(ccp->irq, dev); -} - -static struct resource *ccp_find_mmio_area(struct ccp_device *ccp) -{ - struct device *dev = ccp->dev; - struct platform_device *pdev = to_platform_device(dev); - struct resource *ior; - - ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (ior && (resource_size(ior) >= 0x800)) - return ior; - - return NULL; -} - -static int ccp_platform_probe(struct platform_device *pdev) -{ - struct ccp_device *ccp; - struct ccp_platform *ccp_platform; - struct device *dev = &pdev->dev; - enum dev_dma_attr attr; - struct resource *ior; - int ret; - - ret = -ENOMEM; - ccp = ccp_alloc_struct(dev); - if (!ccp) - goto e_err; - - ccp_platform = devm_kzalloc(dev, sizeof(*ccp_platform), GFP_KERNEL); - if (!ccp_platform) - goto e_err; - - ccp->dev_specific = ccp_platform; - ccp->vdata = pdev->dev.of_node ? ccp_get_of_version(pdev) - : ccp_get_acpi_version(pdev); - if (!ccp->vdata || !ccp->vdata->version) { - ret = -ENODEV; - dev_err(dev, "missing driver data\n"); - goto e_err; - } - ccp->get_irq = ccp_get_irqs; - ccp->free_irq = ccp_free_irqs; - - ior = ccp_find_mmio_area(ccp); - ccp->io_map = devm_ioremap_resource(dev, ior); - if (IS_ERR(ccp->io_map)) { - ret = PTR_ERR(ccp->io_map); - goto e_err; - } - ccp->io_regs = ccp->io_map; - - attr = device_get_dma_attr(dev); - if (attr == DEV_DMA_NOT_SUPPORTED) { - dev_err(dev, "DMA is not supported"); - goto e_err; - } - - ccp_platform->coherent = (attr == DEV_DMA_COHERENT); - if (ccp_platform->coherent) - ccp->axcache = CACHE_WB_NO_ALLOC; - else - ccp->axcache = CACHE_NONE; - - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); - if (ret) { - dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); - goto e_err; - } - - dev_set_drvdata(dev, ccp); - - ret = ccp->vdata->perform->init(ccp); - if (ret) - goto e_err; - - dev_notice(dev, "enabled\n"); - - return 0; - -e_err: - dev_notice(dev, "initialization failed\n"); - return ret; -} - -static int ccp_platform_remove(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct ccp_device *ccp = dev_get_drvdata(dev); - - ccp->vdata->perform->destroy(ccp); - - dev_notice(dev, "disabled\n"); - - return 0; -} - -#ifdef CONFIG_PM -static int ccp_platform_suspend(struct platform_device *pdev, - pm_message_t state) -{ - struct device *dev = &pdev->dev; - struct ccp_device *ccp = dev_get_drvdata(dev); - unsigned long flags; - unsigned int i; - - spin_lock_irqsave(&ccp->cmd_lock, flags); - - ccp->suspending = 1; - - /* Wake all the queue kthreads to prepare for suspend */ - for (i = 0; i < ccp->cmd_q_count; i++) - wake_up_process(ccp->cmd_q[i].kthread); - - spin_unlock_irqrestore(&ccp->cmd_lock, flags); - - /* Wait for all queue kthreads to say they're done */ - while (!ccp_queues_suspended(ccp)) - wait_event_interruptible(ccp->suspend_queue, - ccp_queues_suspended(ccp)); - - return 0; -} - -static int ccp_platform_resume(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct ccp_device *ccp = dev_get_drvdata(dev); - unsigned long flags; - unsigned int i; - - spin_lock_irqsave(&ccp->cmd_lock, flags); - - ccp->suspending = 0; - - /* Wake up all the kthreads */ - for (i = 0; i < ccp->cmd_q_count; i++) { - ccp->cmd_q[i].suspended = 0; - wake_up_process(ccp->cmd_q[i].kthread); - } - - spin_unlock_irqrestore(&ccp->cmd_lock, flags); - - return 0; -} -#endif - -#ifdef CONFIG_ACPI -static const struct acpi_device_id ccp_acpi_match[] = { - { "AMDI0C00", (kernel_ulong_t)&ccpv3 }, - { }, -}; -MODULE_DEVICE_TABLE(acpi, ccp_acpi_match); -#endif - -#ifdef CONFIG_OF -static const struct of_device_id ccp_of_match[] = { - { .compatible = "amd,ccp-seattle-v1a", - .data = (const void *)&ccpv3 }, - { }, -}; -MODULE_DEVICE_TABLE(of, ccp_of_match); -#endif - -static struct platform_driver ccp_platform_driver = { - .driver = { - .name = "ccp", -#ifdef CONFIG_ACPI - .acpi_match_table = ccp_acpi_match, -#endif -#ifdef CONFIG_OF - .of_match_table = ccp_of_match, -#endif - }, - .probe = ccp_platform_probe, - .remove = ccp_platform_remove, -#ifdef CONFIG_PM - .suspend = ccp_platform_suspend, - .resume = ccp_platform_resume, -#endif -}; - -int ccp_platform_init(void) -{ - return platform_driver_register(&ccp_platform_driver); -} - -void ccp_platform_exit(void) -{ - platform_driver_unregister(&ccp_platform_driver); -} diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c new file mode 100644 index 000000000000..bef387c8abfd --- /dev/null +++ b/drivers/crypto/ccp/sp-dev.c @@ -0,0 +1,277 @@ +/* + * AMD Secure Processor driver + * + * Copyright (C) 2017 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * Author: Gary R Hook <gary.hook@amd.com> + * Author: Brijesh Singh <brijesh.singh@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/spinlock_types.h> +#include <linux/types.h> +#include <linux/ccp.h> + +#include "ccp-dev.h" +#include "sp-dev.h" + +MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); +MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.1.0"); +MODULE_DESCRIPTION("AMD Secure Processor driver"); + +/* List of SPs, SP count, read-write access lock, and access functions + * + * Lock structure: get sp_unit_lock for reading whenever we need to + * examine the SP list. + */ +static DEFINE_RWLOCK(sp_unit_lock); +static LIST_HEAD(sp_units); + +/* Ever-increasing value to produce unique unit numbers */ +static atomic_t sp_ordinal; + +static void sp_add_device(struct sp_device *sp) +{ + unsigned long flags; + + write_lock_irqsave(&sp_unit_lock, flags); + + list_add_tail(&sp->entry, &sp_units); + + write_unlock_irqrestore(&sp_unit_lock, flags); +} + +static void sp_del_device(struct sp_device *sp) +{ + unsigned long flags; + + write_lock_irqsave(&sp_unit_lock, flags); + + list_del(&sp->entry); + + write_unlock_irqrestore(&sp_unit_lock, flags); +} + +static irqreturn_t sp_irq_handler(int irq, void *data) +{ + struct sp_device *sp = data; + + if (sp->ccp_irq_handler) + sp->ccp_irq_handler(irq, sp->ccp_irq_data); + + if (sp->psp_irq_handler) + sp->psp_irq_handler(irq, sp->psp_irq_data); + + return IRQ_HANDLED; +} + +int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data) +{ + int ret; + + if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) { + /* Need a common routine to manage all interrupts */ + sp->ccp_irq_data = data; + sp->ccp_irq_handler = handler; + + if (!sp->irq_registered) { + ret = request_irq(sp->ccp_irq, sp_irq_handler, 0, + sp->name, sp); + if (ret) + return ret; + + sp->irq_registered = true; + } + } else { + /* Each sub-device can manage it's own interrupt */ + ret = request_irq(sp->ccp_irq, handler, 0, name, data); + if (ret) + return ret; + } + + return 0; +} + +int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data) +{ + int ret; + + if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) { + /* Need a common routine to manage all interrupts */ + sp->psp_irq_data = data; + sp->psp_irq_handler = handler; + + if (!sp->irq_registered) { + ret = request_irq(sp->psp_irq, sp_irq_handler, 0, + sp->name, sp); + if (ret) + return ret; + + sp->irq_registered = true; + } + } else { + /* Each sub-device can manage it's own interrupt */ + ret = request_irq(sp->psp_irq, handler, 0, name, data); + if (ret) + return ret; + } + + return 0; +} + +void sp_free_ccp_irq(struct sp_device *sp, void *data) +{ + if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) { + /* Using common routine to manage all interrupts */ + if (!sp->psp_irq_handler) { + /* Nothing else using it, so free it */ + free_irq(sp->ccp_irq, sp); + + sp->irq_registered = false; + } + + sp->ccp_irq_handler = NULL; + sp->ccp_irq_data = NULL; + } else { + /* Each sub-device can manage it's own interrupt */ + free_irq(sp->ccp_irq, data); + } +} + +void sp_free_psp_irq(struct sp_device *sp, void *data) +{ + if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) { + /* Using common routine to manage all interrupts */ + if (!sp->ccp_irq_handler) { + /* Nothing else using it, so free it */ + free_irq(sp->psp_irq, sp); + + sp->irq_registered = false; + } + + sp->psp_irq_handler = NULL; + sp->psp_irq_data = NULL; + } else { + /* Each sub-device can manage it's own interrupt */ + free_irq(sp->psp_irq, data); + } +} + +/** + * sp_alloc_struct - allocate and initialize the sp_device struct + * + * @dev: device struct of the SP + */ +struct sp_device *sp_alloc_struct(struct device *dev) +{ + struct sp_device *sp; + + sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL); + if (!sp) + return NULL; + + sp->dev = dev; + sp->ord = atomic_inc_return(&sp_ordinal); + snprintf(sp->name, SP_MAX_NAME_LEN, "sp-%u", sp->ord); + + return sp; +} + +int sp_init(struct sp_device *sp) +{ + sp_add_device(sp); + + if (sp->dev_vdata->ccp_vdata) + ccp_dev_init(sp); + + return 0; +} + +void sp_destroy(struct sp_device *sp) +{ + if (sp->dev_vdata->ccp_vdata) + ccp_dev_destroy(sp); + + sp_del_device(sp); +} + +#ifdef CONFIG_PM +int sp_suspend(struct sp_device *sp, pm_message_t state) +{ + int ret; + + if (sp->dev_vdata->ccp_vdata) { + ret = ccp_dev_suspend(sp, state); + if (ret) + return ret; + } + + return 0; +} + +int sp_resume(struct sp_device *sp) +{ + int ret; + + if (sp->dev_vdata->ccp_vdata) { + ret = ccp_dev_resume(sp); + if (ret) + return ret; + } + + return 0; +} +#endif + +static int __init sp_mod_init(void) +{ +#ifdef CONFIG_X86 + int ret; + + ret = sp_pci_init(); + if (ret) + return ret; + + return 0; +#endif + +#ifdef CONFIG_ARM64 + int ret; + + ret = sp_platform_init(); + if (ret) + return ret; + + return 0; +#endif + + return -ENODEV; +} + +static void __exit sp_mod_exit(void) +{ +#ifdef CONFIG_X86 + sp_pci_exit(); +#endif + +#ifdef CONFIG_ARM64 + sp_platform_exit(); +#endif +} + +module_init(sp_mod_init); +module_exit(sp_mod_exit); diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h new file mode 100644 index 000000000000..5ab486ade1ad --- /dev/null +++ b/drivers/crypto/ccp/sp-dev.h @@ -0,0 +1,133 @@ +/* + * AMD Secure Processor driver + * + * Copyright (C) 2017 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * Author: Gary R Hook <gary.hook@amd.com> + * Author: Brijesh Singh <brijesh.singh@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __SP_DEV_H__ +#define __SP_DEV_H__ + +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/wait.h> +#include <linux/dmapool.h> +#include <linux/hw_random.h> +#include <linux/bitops.h> +#include <linux/interrupt.h> +#include <linux/irqreturn.h> + +#define SP_MAX_NAME_LEN 32 + +#define CACHE_NONE 0x00 +#define CACHE_WB_NO_ALLOC 0xb7 + +/* Structure to hold CCP device data */ +struct ccp_device; +struct ccp_vdata { + const unsigned int version; + const unsigned int dma_chan_attr; + void (*setup)(struct ccp_device *); + const struct ccp_actions *perform; + const unsigned int offset; + const unsigned int rsamax; +}; +/* Structure to hold SP device data */ +struct sp_dev_vdata { + const unsigned int bar; + + const struct ccp_vdata *ccp_vdata; + void *psp_vdata; +}; + +struct sp_device { + struct list_head entry; + + struct device *dev; + + struct sp_dev_vdata *dev_vdata; + unsigned int ord; + char name[SP_MAX_NAME_LEN]; + + /* Bus specific device information */ + void *dev_specific; + + /* I/O area used for device communication. */ + void __iomem *io_map; + + /* DMA caching attribute support */ + unsigned int axcache; + + bool irq_registered; + bool use_tasklet; + + unsigned int ccp_irq; + irq_handler_t ccp_irq_handler; + void *ccp_irq_data; + + unsigned int psp_irq; + irq_handler_t psp_irq_handler; + void *psp_irq_data; + + void *ccp_data; + void *psp_data; +}; + +int sp_pci_init(void); +void sp_pci_exit(void); + +int sp_platform_init(void); +void sp_platform_exit(void); + +struct sp_device *sp_alloc_struct(struct device *dev); + +int sp_init(struct sp_device *sp); +void sp_destroy(struct sp_device *sp); +struct sp_device *sp_get_master(void); + +int sp_suspend(struct sp_device *sp, pm_message_t state); +int sp_resume(struct sp_device *sp); +int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data); +void sp_free_ccp_irq(struct sp_device *sp, void *data); +int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data); +void sp_free_psp_irq(struct sp_device *sp, void *data); + +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + +int ccp_dev_init(struct sp_device *sp); +void ccp_dev_destroy(struct sp_device *sp); + +int ccp_dev_suspend(struct sp_device *sp, pm_message_t state); +int ccp_dev_resume(struct sp_device *sp); + +#else /* !CONFIG_CRYPTO_DEV_SP_CCP */ + +static inline int ccp_dev_init(struct sp_device *sp) +{ + return 0; +} +static inline void ccp_dev_destroy(struct sp_device *sp) { } + +static inline int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) +{ + return 0; +} +static inline int ccp_dev_resume(struct sp_device *sp) +{ + return 0; +} +#endif /* CONFIG_CRYPTO_DEV_SP_CCP */ + +#endif diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c new file mode 100644 index 000000000000..9859aa683a28 --- /dev/null +++ b/drivers/crypto/ccp/sp-pci.c @@ -0,0 +1,276 @@ +/* + * AMD Secure Processor device driver + * + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * Author: Gary R Hook <gary.hook@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/dma-mapping.h> +#include <linux/kthread.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/ccp.h> + +#include "ccp-dev.h" + +#define MSIX_VECTORS 2 + +struct sp_pci { + int msix_count; + struct msix_entry msix_entry[MSIX_VECTORS]; +}; + +static int sp_get_msix_irqs(struct sp_device *sp) +{ + struct sp_pci *sp_pci = sp->dev_specific; + struct device *dev = sp->dev; + struct pci_dev *pdev = to_pci_dev(dev); + int v, ret; + + for (v = 0; v < ARRAY_SIZE(sp_pci->msix_entry); v++) + sp_pci->msix_entry[v].entry = v; + + ret = pci_enable_msix_range(pdev, sp_pci->msix_entry, 1, v); + if (ret < 0) + return ret; + + sp_pci->msix_count = ret; + sp->use_tasklet = true; + + sp->psp_irq = sp_pci->msix_entry[0].vector; + sp->ccp_irq = (sp_pci->msix_count > 1) ? sp_pci->msix_entry[1].vector + : sp_pci->msix_entry[0].vector; + return 0; +} + +static int sp_get_msi_irq(struct sp_device *sp) +{ + struct device *dev = sp->dev; + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + ret = pci_enable_msi(pdev); + if (ret) + return ret; + + sp->ccp_irq = pdev->irq; + sp->psp_irq = pdev->irq; + + return 0; +} + +static int sp_get_irqs(struct sp_device *sp) +{ + struct device *dev = sp->dev; + int ret; + + ret = sp_get_msix_irqs(sp); + if (!ret) + return 0; + + /* Couldn't get MSI-X vectors, try MSI */ + dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret); + ret = sp_get_msi_irq(sp); + if (!ret) + return 0; + + /* Couldn't get MSI interrupt */ + dev_notice(dev, "could not enable MSI (%d)\n", ret); + + return ret; +} + +static void sp_free_irqs(struct sp_device *sp) +{ + struct sp_pci *sp_pci = sp->dev_specific; + struct device *dev = sp->dev; + struct pci_dev *pdev = to_pci_dev(dev); + + if (sp_pci->msix_count) + pci_disable_msix(pdev); + else if (sp->psp_irq) + pci_disable_msi(pdev); + + sp->ccp_irq = 0; + sp->psp_irq = 0; +} + +static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct sp_device *sp; + struct sp_pci *sp_pci; + struct device *dev = &pdev->dev; + void __iomem * const *iomap_table; + int bar_mask; + int ret; + + ret = -ENOMEM; + sp = sp_alloc_struct(dev); + if (!sp) + goto e_err; + + sp_pci = devm_kzalloc(dev, sizeof(*sp_pci), GFP_KERNEL); + if (!sp_pci) + goto e_err; + + sp->dev_specific = sp_pci; + sp->dev_vdata = (struct sp_dev_vdata *)id->driver_data; + if (!sp->dev_vdata) { + ret = -ENODEV; + dev_err(dev, "missing driver data\n"); + goto e_err; + } + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(dev, "pcim_enable_device failed (%d)\n", ret); + goto e_err; + } + + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + ret = pcim_iomap_regions(pdev, bar_mask, "ccp"); + if (ret) { + dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret); + goto e_err; + } + + iomap_table = pcim_iomap_table(pdev); + if (!iomap_table) { + dev_err(dev, "pcim_iomap_table failed\n"); + ret = -ENOMEM; + goto e_err; + } + + sp->io_map = iomap_table[sp->dev_vdata->bar]; + if (!sp->io_map) { + dev_err(dev, "ioremap failed\n"); + ret = -ENOMEM; + goto e_err; + } + + ret = sp_get_irqs(sp); + if (ret) + goto e_err; + + pci_set_master(pdev); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (ret) { + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", + ret); + goto e_err; + } + } + + dev_set_drvdata(dev, sp); + + ret = sp_init(sp); + if (ret) + goto e_err; + + dev_notice(dev, "enabled\n"); + + return 0; + +e_err: + dev_notice(dev, "initialization failed\n"); + return ret; +} + +static void sp_pci_remove(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct sp_device *sp = dev_get_drvdata(dev); + + if (!sp) + return; + + sp_destroy(sp); + + sp_free_irqs(sp); + + dev_notice(dev, "disabled\n"); +} + +#ifdef CONFIG_PM +static int sp_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct device *dev = &pdev->dev; + struct sp_device *sp = dev_get_drvdata(dev); + + return sp_suspend(sp, state); +} + +static int sp_pci_resume(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct sp_device *sp = dev_get_drvdata(dev); + + return sp_resume(sp); +} +#endif + +static const struct sp_dev_vdata dev_vdata[] = { + { + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv3, +#endif + }, + { + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5a, +#endif + }, + { + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5b, +#endif + }, +}; +static const struct pci_device_id sp_pci_table[] = { + { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] }, + { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] }, + { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, + /* Last entry must be zero */ + { 0, } +}; +MODULE_DEVICE_TABLE(pci, sp_pci_table); + +static struct pci_driver sp_pci_driver = { + .name = "ccp", + .id_table = sp_pci_table, + .probe = sp_pci_probe, + .remove = sp_pci_remove, +#ifdef CONFIG_PM + .suspend = sp_pci_suspend, + .resume = sp_pci_resume, +#endif +}; + +int sp_pci_init(void) +{ + return pci_register_driver(&sp_pci_driver); +} + +void sp_pci_exit(void) +{ + pci_unregister_driver(&sp_pci_driver); +} diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c new file mode 100644 index 000000000000..71734f254fd1 --- /dev/null +++ b/drivers/crypto/ccp/sp-platform.c @@ -0,0 +1,256 @@ +/* + * AMD Secure Processor device driver + * + * Copyright (C) 2014,2016 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/ioport.h> +#include <linux/dma-mapping.h> +#include <linux/kthread.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/ccp.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/acpi.h> + +#include "ccp-dev.h" + +struct sp_platform { + int coherent; + unsigned int irq_count; +}; + +static const struct acpi_device_id sp_acpi_match[]; +static const struct of_device_id sp_of_match[]; + +static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev) +{ +#ifdef CONFIG_OF + const struct of_device_id *match; + + match = of_match_node(sp_of_match, pdev->dev.of_node); + if (match && match->data) + return (struct sp_dev_vdata *)match->data; +#endif + return NULL; +} + +static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev) +{ +#ifdef CONFIG_ACPI + const struct acpi_device_id *match; + + match = acpi_match_device(sp_acpi_match, &pdev->dev); + if (match && match->driver_data) + return (struct sp_dev_vdata *)match->driver_data; +#endif + return NULL; +} + +static int sp_get_irqs(struct sp_device *sp) +{ + struct sp_platform *sp_platform = sp->dev_specific; + struct device *dev = sp->dev; + struct platform_device *pdev = to_platform_device(dev); + unsigned int i, count; + int ret; + + for (i = 0, count = 0; i < pdev->num_resources; i++) { + struct resource *res = &pdev->resource[i]; + + if (resource_type(res) == IORESOURCE_IRQ) + count++; + } + + sp_platform->irq_count = count; + + ret = platform_get_irq(pdev, 0); + if (ret < 0) { + dev_notice(dev, "unable to get IRQ (%d)\n", ret); + return ret; + } + + sp->psp_irq = ret; + if (count == 1) { + sp->ccp_irq = ret; + } else { + ret = platform_get_irq(pdev, 1); + if (ret < 0) { + dev_notice(dev, "unable to get IRQ (%d)\n", ret); + return ret; + } + + sp->ccp_irq = ret; + } + + return 0; +} + +static int sp_platform_probe(struct platform_device *pdev) +{ + struct sp_device *sp; + struct sp_platform *sp_platform; + struct device *dev = &pdev->dev; + enum dev_dma_attr attr; + struct resource *ior; + int ret; + + ret = -ENOMEM; + sp = sp_alloc_struct(dev); + if (!sp) + goto e_err; + + sp_platform = devm_kzalloc(dev, sizeof(*sp_platform), GFP_KERNEL); + if (!sp_platform) + goto e_err; + + sp->dev_specific = sp_platform; + sp->dev_vdata = pdev->dev.of_node ? sp_get_of_version(pdev) + : sp_get_acpi_version(pdev); + if (!sp->dev_vdata) { + ret = -ENODEV; + dev_err(dev, "missing driver data\n"); + goto e_err; + } + + ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sp->io_map = devm_ioremap_resource(dev, ior); + if (IS_ERR(sp->io_map)) { + ret = PTR_ERR(sp->io_map); + goto e_err; + } + + attr = device_get_dma_attr(dev); + if (attr == DEV_DMA_NOT_SUPPORTED) { + dev_err(dev, "DMA is not supported"); + goto e_err; + } + + sp_platform->coherent = (attr == DEV_DMA_COHERENT); + if (sp_platform->coherent) + sp->axcache = CACHE_WB_NO_ALLOC; + else + sp->axcache = CACHE_NONE; + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (ret) { + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); + goto e_err; + } + + ret = sp_get_irqs(sp); + if (ret) + goto e_err; + + dev_set_drvdata(dev, sp); + + ret = sp_init(sp); + if (ret) + goto e_err; + + dev_notice(dev, "enabled\n"); + + return 0; + +e_err: + dev_notice(dev, "initialization failed\n"); + return ret; +} + +static int sp_platform_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sp_device *sp = dev_get_drvdata(dev); + + sp_destroy(sp); + + dev_notice(dev, "disabled\n"); + + return 0; +} + +#ifdef CONFIG_PM +static int sp_platform_suspend(struct platform_device *pdev, + pm_message_t state) +{ + struct device *dev = &pdev->dev; + struct sp_device *sp = dev_get_drvdata(dev); + + return sp_suspend(sp, state); +} + +static int sp_platform_resume(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sp_device *sp = dev_get_drvdata(dev); + + return sp_resume(sp); +} +#endif + +static const struct sp_dev_vdata dev_vdata[] = { + { + .bar = 0, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv3_platform, +#endif + }, +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id sp_acpi_match[] = { + { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, sp_acpi_match); +#endif + +#ifdef CONFIG_OF +static const struct of_device_id sp_of_match[] = { + { .compatible = "amd,ccp-seattle-v1a", + .data = (const void *)&dev_vdata[0] }, + { }, +}; +MODULE_DEVICE_TABLE(of, sp_of_match); +#endif + +static struct platform_driver sp_platform_driver = { + .driver = { + .name = "ccp", +#ifdef CONFIG_ACPI + .acpi_match_table = sp_acpi_match, +#endif +#ifdef CONFIG_OF + .of_match_table = sp_of_match, +#endif + }, + .probe = sp_platform_probe, + .remove = sp_platform_remove, +#ifdef CONFIG_PM + .suspend = sp_platform_suspend, + .resume = sp_platform_resume, +#endif +}; + +int sp_platform_init(void) +{ + return platform_driver_register(&sp_platform_driver); +} + +void sp_platform_exit(void) +{ + platform_driver_unregister(&sp_platform_driver); +} diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index fe538e5287a5..eb2a0a73cbed 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -1,10 +1,10 @@ /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ #include <linux/module.h> #include <linux/kernel.h> @@ -30,6 +30,7 @@ static inline void _writefield(u32 offset, void *value) { int i; + for (i = 0; i < 4; i++) iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4)); } @@ -39,6 +40,7 @@ static inline void _readfield(u32 offset, void *value) { int i; + for (i = 0; i < 4; i++) ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4)); } @@ -515,6 +517,7 @@ static void geode_aes_remove(struct pci_dev *dev) static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) { int ret; + ret = pci_enable_device(dev); if (ret) return ret; @@ -570,7 +573,7 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) } static struct pci_device_id geode_aes_tbl[] = { - { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } , + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), }, { 0, } }; diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 0c6a917a9ab8..b87000a0a01c 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -1054,7 +1054,7 @@ res_err: static int img_hash_remove(struct platform_device *pdev) { - static struct img_hash_dev *hdev; + struct img_hash_dev *hdev; hdev = platform_get_drvdata(pdev); spin_lock(&img_hash.lock); diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c index 000b6500a22d..b182e941b0cd 100644 --- a/drivers/crypto/mediatek/mtk-platform.c +++ b/drivers/crypto/mediatek/mtk-platform.c @@ -500,7 +500,7 @@ static int mtk_crypto_probe(struct platform_device *pdev) cryp->irq[i] = platform_get_irq(pdev, i); if (cryp->irq[i] < 0) { dev_err(cryp->dev, "no IRQ:%d resource info\n", i); - return -ENXIO; + return cryp->irq[i]; } } diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c index ee4be1b0d30b..e01c46387df8 100644 --- a/drivers/crypto/mxc-scc.c +++ b/drivers/crypto/mxc-scc.c @@ -708,8 +708,8 @@ static int mxc_scc_probe(struct platform_device *pdev) for (i = 0; i < 2; i++) { irq = platform_get_irq(pdev, i); if (irq < 0) { - dev_err(dev, "failed to get irq resource\n"); - ret = -EINVAL; + dev_err(dev, "failed to get irq resource: %d\n", irq); + ret = irq; goto err_out; } diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index 625ee50fd78b..764be3e6933c 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -908,12 +908,16 @@ static int mxs_dcp_probe(struct platform_device *pdev) iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); dcp_vmi_irq = platform_get_irq(pdev, 0); - if (dcp_vmi_irq < 0) + if (dcp_vmi_irq < 0) { + dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq); return dcp_vmi_irq; + } dcp_irq = platform_get_irq(pdev, 1); - if (dcp_irq < 0) + if (dcp_irq < 0) { + dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq); return dcp_irq; + } sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); if (!sdcp) diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 269451375b63..a9fd8b9e86cd 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1730,8 +1730,8 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, continue; id = mdesc_get_property(mdesc, tgt, "id", NULL); if (table[*id] != NULL) { - dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", - dev->dev.of_node->full_name); + dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n", + dev->dev.of_node); return -EINVAL; } cpumask_set_cpu(*id, &p->sharing); @@ -1751,8 +1751,8 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); if (!p) { - dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", - dev->dev.of_node->full_name); + dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n", + dev->dev.of_node); return -ENOMEM; } @@ -1981,41 +1981,39 @@ static void n2_spu_driver_version(void) static int n2_crypto_probe(struct platform_device *dev) { struct mdesc_handle *mdesc; - const char *full_name; struct n2_crypto *np; int err; n2_spu_driver_version(); - full_name = dev->dev.of_node->full_name; - pr_info("Found N2CP at %s\n", full_name); + pr_info("Found N2CP at %pOF\n", dev->dev.of_node); np = alloc_n2cp(); if (!np) { - dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n", - full_name); + dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n", + dev->dev.of_node); return -ENOMEM; } err = grab_global_resources(); if (err) { - dev_err(&dev->dev, "%s: Unable to grab " - "global resources.\n", full_name); + dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n", + dev->dev.of_node); goto out_free_n2cp; } mdesc = mdesc_grab(); if (!mdesc) { - dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", - full_name); + dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n", + dev->dev.of_node); err = -ENODEV; goto out_free_global; } err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); if (err) { - dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", - full_name); + dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n", + dev->dev.of_node); mdesc_release(mdesc); goto out_free_global; } @@ -2026,15 +2024,15 @@ static int n2_crypto_probe(struct platform_device *dev) mdesc_release(mdesc); if (err) { - dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n", - full_name); + dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n", + dev->dev.of_node); goto out_free_global; } err = n2_register_algs(); if (err) { - dev_err(&dev->dev, "%s: Unable to register algorithms.\n", - full_name); + dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n", + dev->dev.of_node); goto out_free_spu_list; } @@ -2092,42 +2090,40 @@ static void free_ncp(struct n2_mau *mp) static int n2_mau_probe(struct platform_device *dev) { struct mdesc_handle *mdesc; - const char *full_name; struct n2_mau *mp; int err; n2_spu_driver_version(); - full_name = dev->dev.of_node->full_name; - pr_info("Found NCP at %s\n", full_name); + pr_info("Found NCP at %pOF\n", dev->dev.of_node); mp = alloc_ncp(); if (!mp) { - dev_err(&dev->dev, "%s: Unable to allocate ncp.\n", - full_name); + dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n", + dev->dev.of_node); return -ENOMEM; } err = grab_global_resources(); if (err) { - dev_err(&dev->dev, "%s: Unable to grab " - "global resources.\n", full_name); + dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n", + dev->dev.of_node); goto out_free_ncp; } mdesc = mdesc_grab(); if (!mdesc) { - dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", - full_name); + dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n", + dev->dev.of_node); err = -ENODEV; goto out_free_global; } err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); if (err) { - dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", - full_name); + dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n", + dev->dev.of_node); mdesc_release(mdesc); goto out_free_global; } @@ -2138,8 +2134,8 @@ static int n2_mau_probe(struct platform_device *dev) mdesc_release(mdesc); if (err) { - dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n", - full_name); + dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n", + dev->dev.of_node); goto out_free_global; } diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 5120a17731d0..c376a3ee7c2c 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1095,6 +1095,7 @@ static int omap_aes_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "can't get IRQ resource\n"); + err = irq; goto err_irq; } diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 0bcab00e0ff5..d37c9506c36c 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -1023,7 +1023,8 @@ static int omap_des_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(dev, "can't get IRQ resource\n"); + dev_err(dev, "can't get IRQ resource: %d\n", irq); + err = irq; goto err_irq; } diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 9ad9d399daf1..c40ac30ec002 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -2133,7 +2133,7 @@ data_err: static int omap_sham_remove(struct platform_device *pdev) { - static struct omap_sham_dev *dd; + struct omap_sham_dev *dd; int i, j; dd = platform_get_drvdata(pdev); diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index d3e25c37dc33..da8a2d3b5e9a 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -208,7 +208,7 @@ static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev) static void adf_resume(struct pci_dev *pdev) { dev_info(&pdev->dev, "Acceleration driver reset completed\n"); - dev_info(&pdev->dev, "Device is up and runnig\n"); + dev_info(&pdev->dev, "Device is up and running\n"); } static const struct pci_error_handlers adf_err_handler = { diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index d0f80c6241f9..57c37831bd42 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -169,52 +169,46 @@ static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) { struct rk_crypto_info *dev = platform_get_drvdata(dev_id); u32 interrupt_status; - int err = 0; spin_lock(&dev->lock); interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); + if (interrupt_status & 0x0a) { dev_warn(dev->dev, "DMA Error\n"); - err = -EFAULT; - } else if (interrupt_status & 0x05) { - err = dev->update(dev); + dev->err = -EFAULT; } - if (err) - dev->complete(dev, err); + tasklet_schedule(&dev->done_task); + spin_unlock(&dev->lock); return IRQ_HANDLED; } -static void rk_crypto_tasklet_cb(unsigned long data) +static void rk_crypto_queue_task_cb(unsigned long data) { struct rk_crypto_info *dev = (struct rk_crypto_info *)data; - struct crypto_async_request *async_req, *backlog; - unsigned long flags; int err = 0; - spin_lock_irqsave(&dev->lock, flags); - backlog = crypto_get_backlog(&dev->queue); - async_req = crypto_dequeue_request(&dev->queue); - spin_unlock_irqrestore(&dev->lock, flags); - if (!async_req) { - dev_err(dev->dev, "async_req is NULL !!\n"); - return; - } - if (backlog) { - backlog->complete(backlog, -EINPROGRESS); - backlog = NULL; - } - - if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER) - dev->ablk_req = ablkcipher_request_cast(async_req); - else - dev->ahash_req = ahash_request_cast(async_req); + dev->err = 0; err = dev->start(dev); if (err) dev->complete(dev, err); } +static void rk_crypto_done_task_cb(unsigned long data) +{ + struct rk_crypto_info *dev = (struct rk_crypto_info *)data; + + if (dev->err) { + dev->complete(dev, dev->err); + return; + } + + dev->err = dev->update(dev); + if (dev->err) + dev->complete(dev, dev->err); +} + static struct rk_crypto_tmp *rk_cipher_algs[] = { &rk_ecb_aes_alg, &rk_cbc_aes_alg, @@ -361,8 +355,10 @@ static int rk_crypto_probe(struct platform_device *pdev) crypto_info->dev = &pdev->dev; platform_set_drvdata(pdev, crypto_info); - tasklet_init(&crypto_info->crypto_tasklet, - rk_crypto_tasklet_cb, (unsigned long)crypto_info); + tasklet_init(&crypto_info->queue_task, + rk_crypto_queue_task_cb, (unsigned long)crypto_info); + tasklet_init(&crypto_info->done_task, + rk_crypto_done_task_cb, (unsigned long)crypto_info); crypto_init_queue(&crypto_info->queue, 50); crypto_info->enable_clk = rk_crypto_enable_clk; @@ -380,7 +376,8 @@ static int rk_crypto_probe(struct platform_device *pdev) return 0; err_register_alg: - tasklet_kill(&crypto_info->crypto_tasklet); + tasklet_kill(&crypto_info->queue_task); + tasklet_kill(&crypto_info->done_task); err_crypto: return err; } @@ -390,7 +387,8 @@ static int rk_crypto_remove(struct platform_device *pdev) struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); rk_crypto_unregister(); - tasklet_kill(&crypto_tmp->crypto_tasklet); + tasklet_kill(&crypto_tmp->done_task); + tasklet_kill(&crypto_tmp->queue_task); return 0; } diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h index d7b71fea320b..65ad1c261949 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.h +++ b/drivers/crypto/rockchip/rk3288_crypto.h @@ -190,9 +190,11 @@ struct rk_crypto_info { void __iomem *reg; int irq; struct crypto_queue queue; - struct tasklet_struct crypto_tasklet; + struct tasklet_struct queue_task; + struct tasklet_struct done_task; struct ablkcipher_request *ablk_req; struct ahash_request *ahash_req; + int err; /* device lock */ spinlock_t lock; diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c index b5a3afe222e4..dbe78def7b65 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c @@ -25,6 +25,7 @@ static int rk_handle_req(struct rk_crypto_info *dev, struct ablkcipher_request *req) { unsigned long flags; + struct crypto_async_request *async_req, *backlog; int err; if (!IS_ALIGNED(req->nbytes, dev->align_size)) @@ -41,8 +42,22 @@ static int rk_handle_req(struct rk_crypto_info *dev, spin_lock_irqsave(&dev->lock, flags); err = ablkcipher_enqueue_request(&dev->queue, req); + backlog = crypto_get_backlog(&dev->queue); + async_req = crypto_dequeue_request(&dev->queue); spin_unlock_irqrestore(&dev->lock, flags); - tasklet_schedule(&dev->crypto_tasklet); + + if (!async_req) { + dev_err(dev->dev, "async_req is NULL !!\n"); + return err; + } + if (backlog) { + backlog->complete(backlog, -EINPROGRESS); + backlog = NULL; + } + + dev->ablk_req = ablkcipher_request_cast(async_req); + + tasklet_schedule(&dev->queue_task); return err; } diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index 718588219f75..ebc46e007804 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -166,6 +166,7 @@ static int rk_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_async_request *async_req, *backlog; struct rk_crypto_info *dev = NULL; unsigned long flags; int ret; @@ -202,9 +203,22 @@ static int rk_ahash_digest(struct ahash_request *req) spin_lock_irqsave(&dev->lock, flags); ret = crypto_enqueue_request(&dev->queue, &req->base); + backlog = crypto_get_backlog(&dev->queue); + async_req = crypto_dequeue_request(&dev->queue); spin_unlock_irqrestore(&dev->lock, flags); - tasklet_schedule(&dev->crypto_tasklet); + if (!async_req) { + dev_err(dev->dev, "async_req is NULL !!\n"); + return ret; + } + if (backlog) { + backlog->complete(backlog, -EINPROGRESS); + backlog = NULL; + } + + dev->ahash_req = ahash_request_cast(async_req); + + tasklet_schedule(&dev->queue_task); /* * it will take some time to process date after last dma transmission. diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 1d9ecd368b5b..03a8abad7e76 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -202,7 +202,6 @@ struct sahara_dev { struct completion dma_completion; struct sahara_ctx *ctx; - spinlock_t lock; struct crypto_queue queue; unsigned long flags; @@ -543,10 +542,10 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev) unmap_out: dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, - DMA_TO_DEVICE); + DMA_FROM_DEVICE); unmap_in: dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, - DMA_FROM_DEVICE); + DMA_TO_DEVICE); return -EINVAL; } @@ -594,9 +593,9 @@ static int sahara_aes_process(struct ablkcipher_request *req) } dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, - DMA_TO_DEVICE); - dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_FROM_DEVICE); + dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, + DMA_TO_DEVICE); return 0; } @@ -1382,7 +1381,7 @@ static struct platform_device_id sahara_platform_ids[] = { }; MODULE_DEVICE_TABLE(platform, sahara_platform_ids); -static struct of_device_id sahara_dt_ids[] = { +static const struct of_device_id sahara_dt_ids[] = { { .compatible = "fsl,imx53-sahara" }, { .compatible = "fsl,imx27-sahara" }, { /* sentinel */ } @@ -1487,7 +1486,6 @@ static int sahara_probe(struct platform_device *pdev) crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH); - spin_lock_init(&dev->lock); mutex_init(&dev->queue_mutex); dev_ptr = dev; diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig index 09b4ec87c212..602332e02729 100644 --- a/drivers/crypto/stm32/Kconfig +++ b/drivers/crypto/stm32/Kconfig @@ -1,7 +1,20 @@ -config CRYPTO_DEV_STM32 - tristate "Support for STM32 crypto accelerators" +config CRC_DEV_STM32 + tristate "Support for STM32 crc accelerators" depends on ARCH_STM32 select CRYPTO_HASH help This enables support for the CRC32 hw accelerator which can be found - on STMicroelectronis STM32 SOC. + on STMicroelectronics STM32 SOC. + +config HASH_DEV_STM32 + tristate "Support for STM32 hash accelerators" + depends on ARCH_STM32 + depends on HAS_DMA + select CRYPTO_HASH + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_ENGINE + help + This enables support for the HASH hw accelerator which can be found + on STMicroelectronics STM32 SOC. diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile index 73b4c6e47f5f..73cd56cad0cc 100644 --- a/drivers/crypto/stm32/Makefile +++ b/drivers/crypto/stm32/Makefile @@ -1,2 +1,2 @@ -obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32_cryp.o -stm32_cryp-objs := stm32_crc32.o +obj-$(CONFIG_CRC_DEV_STM32) += stm32_crc32.o +obj-$(CONFIG_HASH_DEV_STM32) += stm32-hash.o
\ No newline at end of file diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c new file mode 100644 index 000000000000..b585ce54a802 --- /dev/null +++ b/drivers/crypto/stm32/stm32-hash.c @@ -0,0 +1,1575 @@ +/* + * This file is part of STM32 Crypto driver for Linux. + * + * Copyright (C) 2017, STMicroelectronics - All Rights Reserved + * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics. + * + * License terms: GPL V2.0. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + * + */ + +#include <linux/clk.h> +#include <linux/crypto.h> +#include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include <crypto/engine.h> +#include <crypto/hash.h> +#include <crypto/md5.h> +#include <crypto/scatterwalk.h> +#include <crypto/sha.h> +#include <crypto/internal/hash.h> + +#define HASH_CR 0x00 +#define HASH_DIN 0x04 +#define HASH_STR 0x08 +#define HASH_IMR 0x20 +#define HASH_SR 0x24 +#define HASH_CSR(x) (0x0F8 + ((x) * 0x04)) +#define HASH_HREG(x) (0x310 + ((x) * 0x04)) +#define HASH_HWCFGR 0x3F0 +#define HASH_VER 0x3F4 +#define HASH_ID 0x3F8 + +/* Control Register */ +#define HASH_CR_INIT BIT(2) +#define HASH_CR_DMAE BIT(3) +#define HASH_CR_DATATYPE_POS 4 +#define HASH_CR_MODE BIT(6) +#define HASH_CR_MDMAT BIT(13) +#define HASH_CR_DMAA BIT(14) +#define HASH_CR_LKEY BIT(16) + +#define HASH_CR_ALGO_SHA1 0x0 +#define HASH_CR_ALGO_MD5 0x80 +#define HASH_CR_ALGO_SHA224 0x40000 +#define HASH_CR_ALGO_SHA256 0x40080 + +/* Interrupt */ +#define HASH_DINIE BIT(0) +#define HASH_DCIE BIT(1) + +/* Interrupt Mask */ +#define HASH_MASK_CALC_COMPLETION BIT(0) +#define HASH_MASK_DATA_INPUT BIT(1) + +/* Context swap register */ +#define HASH_CSR_REGISTER_NUMBER 53 + +/* Status Flags */ +#define HASH_SR_DATA_INPUT_READY BIT(0) +#define HASH_SR_OUTPUT_READY BIT(1) +#define HASH_SR_DMA_ACTIVE BIT(2) +#define HASH_SR_BUSY BIT(3) + +/* STR Register */ +#define HASH_STR_NBLW_MASK GENMASK(4, 0) +#define HASH_STR_DCAL BIT(8) + +#define HASH_FLAGS_INIT BIT(0) +#define HASH_FLAGS_OUTPUT_READY BIT(1) +#define HASH_FLAGS_CPU BIT(2) +#define HASH_FLAGS_DMA_READY BIT(3) +#define HASH_FLAGS_DMA_ACTIVE BIT(4) +#define HASH_FLAGS_HMAC_INIT BIT(5) +#define HASH_FLAGS_HMAC_FINAL BIT(6) +#define HASH_FLAGS_HMAC_KEY BIT(7) + +#define HASH_FLAGS_FINAL BIT(15) +#define HASH_FLAGS_FINUP BIT(16) +#define HASH_FLAGS_ALGO_MASK GENMASK(21, 18) +#define HASH_FLAGS_MD5 BIT(18) +#define HASH_FLAGS_SHA1 BIT(19) +#define HASH_FLAGS_SHA224 BIT(20) +#define HASH_FLAGS_SHA256 BIT(21) +#define HASH_FLAGS_ERRORS BIT(22) +#define HASH_FLAGS_HMAC BIT(23) + +#define HASH_OP_UPDATE 1 +#define HASH_OP_FINAL 2 + +enum stm32_hash_data_format { + HASH_DATA_32_BITS = 0x0, + HASH_DATA_16_BITS = 0x1, + HASH_DATA_8_BITS = 0x2, + HASH_DATA_1_BIT = 0x3 +}; + +#define HASH_BUFLEN 256 +#define HASH_LONG_KEY 64 +#define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8) +#define HASH_QUEUE_LENGTH 16 +#define HASH_DMA_THRESHOLD 50 + +struct stm32_hash_ctx { + struct stm32_hash_dev *hdev; + unsigned long flags; + + u8 key[HASH_MAX_KEY_SIZE]; + int keylen; +}; + +struct stm32_hash_request_ctx { + struct stm32_hash_dev *hdev; + unsigned long flags; + unsigned long op; + + u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); + size_t digcnt; + size_t bufcnt; + size_t buflen; + + /* DMA */ + struct scatterlist *sg; + unsigned int offset; + unsigned int total; + struct scatterlist sg_key; + + dma_addr_t dma_addr; + size_t dma_ct; + int nents; + + u8 data_type; + + u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32)); + + /* Export Context */ + u32 *hw_context; +}; + +struct stm32_hash_algs_info { + struct ahash_alg *algs_list; + size_t size; +}; + +struct stm32_hash_pdata { + struct stm32_hash_algs_info *algs_info; + size_t algs_info_size; +}; + +struct stm32_hash_dev { + struct list_head list; + struct device *dev; + struct clk *clk; + struct reset_control *rst; + void __iomem *io_base; + phys_addr_t phys_base; + u32 dma_mode; + u32 dma_maxburst; + + spinlock_t lock; /* lock to protect queue */ + + struct ahash_request *req; + struct crypto_engine *engine; + + int err; + unsigned long flags; + + struct dma_chan *dma_lch; + struct completion dma_completion; + + const struct stm32_hash_pdata *pdata; +}; + +struct stm32_hash_drv { + struct list_head dev_list; + spinlock_t lock; /* List protection access */ +}; + +static struct stm32_hash_drv stm32_hash = { + .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list), + .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock), +}; + +static void stm32_hash_dma_callback(void *param); + +static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset) +{ + return readl_relaxed(hdev->io_base + offset); +} + +static inline void stm32_hash_write(struct stm32_hash_dev *hdev, + u32 offset, u32 value) +{ + writel_relaxed(value, hdev->io_base + offset); +} + +static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev) +{ + u32 status; + + return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status, + !(status & HASH_SR_BUSY), 10, 10000); +} + +static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length) +{ + u32 reg; + + reg = stm32_hash_read(hdev, HASH_STR); + reg &= ~(HASH_STR_NBLW_MASK); + reg |= (8U * ((length) % 4U)); + stm32_hash_write(hdev, HASH_STR, reg); +} + +static int stm32_hash_write_key(struct stm32_hash_dev *hdev) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); + u32 reg; + int keylen = ctx->keylen; + void *key = ctx->key; + + if (keylen) { + stm32_hash_set_nblw(hdev, keylen); + + while (keylen > 0) { + stm32_hash_write(hdev, HASH_DIN, *(u32 *)key); + keylen -= 4; + key += 4; + } + + reg = stm32_hash_read(hdev, HASH_STR); + reg |= HASH_STR_DCAL; + stm32_hash_write(hdev, HASH_STR, reg); + + return -EINPROGRESS; + } + + return 0; +} + +static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + u32 reg = HASH_CR_INIT; + + if (!(hdev->flags & HASH_FLAGS_INIT)) { + switch (rctx->flags & HASH_FLAGS_ALGO_MASK) { + case HASH_FLAGS_MD5: + reg |= HASH_CR_ALGO_MD5; + break; + case HASH_FLAGS_SHA1: + reg |= HASH_CR_ALGO_SHA1; + break; + case HASH_FLAGS_SHA224: + reg |= HASH_CR_ALGO_SHA224; + break; + case HASH_FLAGS_SHA256: + reg |= HASH_CR_ALGO_SHA256; + break; + default: + reg |= HASH_CR_ALGO_MD5; + } + + reg |= (rctx->data_type << HASH_CR_DATATYPE_POS); + + if (rctx->flags & HASH_FLAGS_HMAC) { + hdev->flags |= HASH_FLAGS_HMAC; + reg |= HASH_CR_MODE; + if (ctx->keylen > HASH_LONG_KEY) + reg |= HASH_CR_LKEY; + } + + stm32_hash_write(hdev, HASH_IMR, HASH_DCIE); + + stm32_hash_write(hdev, HASH_CR, reg); + + hdev->flags |= HASH_FLAGS_INIT; + + dev_dbg(hdev->dev, "Write Control %x\n", reg); + } +} + +static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx) +{ + size_t count; + + while ((rctx->bufcnt < rctx->buflen) && rctx->total) { + count = min(rctx->sg->length - rctx->offset, rctx->total); + count = min(count, rctx->buflen - rctx->bufcnt); + + if (count <= 0) { + if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) { + rctx->sg = sg_next(rctx->sg); + continue; + } else { + break; + } + } + + scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg, + rctx->offset, count, 0); + + rctx->bufcnt += count; + rctx->offset += count; + rctx->total -= count; + + if (rctx->offset == rctx->sg->length) { + rctx->sg = sg_next(rctx->sg); + if (rctx->sg) + rctx->offset = 0; + else + rctx->total = 0; + } + } +} + +static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev, + const u8 *buf, size_t length, int final) +{ + unsigned int count, len32; + const u32 *buffer = (const u32 *)buf; + u32 reg; + + if (final) + hdev->flags |= HASH_FLAGS_FINAL; + + len32 = DIV_ROUND_UP(length, sizeof(u32)); + + dev_dbg(hdev->dev, "%s: length: %d, final: %x len32 %i\n", + __func__, length, final, len32); + + hdev->flags |= HASH_FLAGS_CPU; + + stm32_hash_write_ctrl(hdev); + + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + + if ((hdev->flags & HASH_FLAGS_HMAC) && + (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) { + hdev->flags |= HASH_FLAGS_HMAC_KEY; + stm32_hash_write_key(hdev); + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + } + + for (count = 0; count < len32; count++) + stm32_hash_write(hdev, HASH_DIN, buffer[count]); + + if (final) { + stm32_hash_set_nblw(hdev, length); + reg = stm32_hash_read(hdev, HASH_STR); + reg |= HASH_STR_DCAL; + stm32_hash_write(hdev, HASH_STR, reg); + if (hdev->flags & HASH_FLAGS_HMAC) { + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + stm32_hash_write_key(hdev); + } + return -EINPROGRESS; + } + + return 0; +} + +static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); + int bufcnt, err = 0, final; + + dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags); + + final = (rctx->flags & HASH_FLAGS_FINUP); + + while ((rctx->total >= rctx->buflen) || + (rctx->bufcnt + rctx->total >= rctx->buflen)) { + stm32_hash_append_sg(rctx); + bufcnt = rctx->bufcnt; + rctx->bufcnt = 0; + err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0); + } + + stm32_hash_append_sg(rctx); + + if (final) { + bufcnt = rctx->bufcnt; + rctx->bufcnt = 0; + err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, + (rctx->flags & HASH_FLAGS_FINUP)); + } + + return err; +} + +static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev, + struct scatterlist *sg, int length, int mdma) +{ + struct dma_async_tx_descriptor *in_desc; + dma_cookie_t cookie; + u32 reg; + int err; + + in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | + DMA_CTRL_ACK); + if (!in_desc) { + dev_err(hdev->dev, "dmaengine_prep_slave error\n"); + return -ENOMEM; + } + + reinit_completion(&hdev->dma_completion); + in_desc->callback = stm32_hash_dma_callback; + in_desc->callback_param = hdev; + + hdev->flags |= HASH_FLAGS_FINAL; + hdev->flags |= HASH_FLAGS_DMA_ACTIVE; + + reg = stm32_hash_read(hdev, HASH_CR); + + if (mdma) + reg |= HASH_CR_MDMAT; + else + reg &= ~HASH_CR_MDMAT; + + reg |= HASH_CR_DMAE; + + stm32_hash_write(hdev, HASH_CR, reg); + + stm32_hash_set_nblw(hdev, length); + + cookie = dmaengine_submit(in_desc); + err = dma_submit_error(cookie); + if (err) + return -ENOMEM; + + dma_async_issue_pending(hdev->dma_lch); + + if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion, + msecs_to_jiffies(100))) + err = -ETIMEDOUT; + + if (dma_async_is_tx_complete(hdev->dma_lch, cookie, + NULL, NULL) != DMA_COMPLETE) + err = -ETIMEDOUT; + + if (err) { + dev_err(hdev->dev, "DMA Error %i\n", err); + dmaengine_terminate_all(hdev->dma_lch); + return err; + } + + return -EINPROGRESS; +} + +static void stm32_hash_dma_callback(void *param) +{ + struct stm32_hash_dev *hdev = param; + + complete(&hdev->dma_completion); + + hdev->flags |= HASH_FLAGS_DMA_READY; +} + +static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); + int err; + + if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) { + err = stm32_hash_write_key(hdev); + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + } else { + if (!(hdev->flags & HASH_FLAGS_HMAC_KEY)) + sg_init_one(&rctx->sg_key, ctx->key, + ALIGN(ctx->keylen, sizeof(u32))); + + rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1, + DMA_TO_DEVICE); + if (rctx->dma_ct == 0) { + dev_err(hdev->dev, "dma_map_sg error\n"); + return -ENOMEM; + } + + err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0); + + dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE); + } + + return err; +} + +static int stm32_hash_dma_init(struct stm32_hash_dev *hdev) +{ + struct dma_slave_config dma_conf; + int err; + + memset(&dma_conf, 0, sizeof(dma_conf)); + + dma_conf.direction = DMA_MEM_TO_DEV; + dma_conf.dst_addr = hdev->phys_base + HASH_DIN; + dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_conf.src_maxburst = hdev->dma_maxburst; + dma_conf.dst_maxburst = hdev->dma_maxburst; + dma_conf.device_fc = false; + + hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in"); + if (!hdev->dma_lch) { + dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n"); + return -EBUSY; + } + + err = dmaengine_slave_config(hdev->dma_lch, &dma_conf); + if (err) { + dma_release_channel(hdev->dma_lch); + hdev->dma_lch = NULL; + dev_err(hdev->dev, "Couldn't configure DMA slave.\n"); + return err; + } + + init_completion(&hdev->dma_completion); + + return 0; +} + +static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); + struct scatterlist sg[1], *tsg; + int err = 0, len = 0, reg, ncp; + unsigned int i; + const u32 *buffer = (const u32 *)rctx->buffer; + + rctx->sg = hdev->req->src; + rctx->total = hdev->req->nbytes; + + rctx->nents = sg_nents(rctx->sg); + + if (rctx->nents < 0) + return -EINVAL; + + stm32_hash_write_ctrl(hdev); + + if (hdev->flags & HASH_FLAGS_HMAC) { + err = stm32_hash_hmac_dma_send(hdev); + if (err != -EINPROGRESS) + return err; + } + + for_each_sg(rctx->sg, tsg, rctx->nents, i) { + len = sg->length; + + sg[0] = *tsg; + if (sg_is_last(sg)) { + if (hdev->dma_mode == 1) { + len = (ALIGN(sg->length, 16) - 16); + + ncp = sg_pcopy_to_buffer( + rctx->sg, rctx->nents, + rctx->buffer, sg->length - len, + rctx->total - sg->length + len); + + sg->length = len; + } else { + if (!(IS_ALIGNED(sg->length, sizeof(u32)))) { + len = sg->length; + sg->length = ALIGN(sg->length, + sizeof(u32)); + } + } + } + + rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, + DMA_TO_DEVICE); + if (rctx->dma_ct == 0) { + dev_err(hdev->dev, "dma_map_sg error\n"); + return -ENOMEM; + } + + err = stm32_hash_xmit_dma(hdev, sg, len, + !sg_is_last(sg)); + + dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE); + + if (err == -ENOMEM) + return err; + } + + if (hdev->dma_mode == 1) { + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + reg = stm32_hash_read(hdev, HASH_CR); + reg &= ~HASH_CR_DMAE; + reg |= HASH_CR_DMAA; + stm32_hash_write(hdev, HASH_CR, reg); + + for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++) + stm32_hash_write(hdev, HASH_DIN, buffer[i]); + + stm32_hash_set_nblw(hdev, ncp); + reg = stm32_hash_read(hdev, HASH_STR); + reg |= HASH_STR_DCAL; + stm32_hash_write(hdev, HASH_STR, reg); + err = -EINPROGRESS; + } + + if (hdev->flags & HASH_FLAGS_HMAC) { + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + err = stm32_hash_hmac_dma_send(hdev); + } + + return err; +} + +static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx) +{ + struct stm32_hash_dev *hdev = NULL, *tmp; + + spin_lock_bh(&stm32_hash.lock); + if (!ctx->hdev) { + list_for_each_entry(tmp, &stm32_hash.dev_list, list) { + hdev = tmp; + break; + } + ctx->hdev = hdev; + } else { + hdev = ctx->hdev; + } + + spin_unlock_bh(&stm32_hash.lock); + + return hdev; +} + +static bool stm32_hash_dma_aligned_data(struct ahash_request *req) +{ + struct scatterlist *sg; + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + int i; + + if (req->nbytes <= HASH_DMA_THRESHOLD) + return false; + + if (sg_nents(req->src) > 1) { + if (hdev->dma_mode == 1) + return false; + for_each_sg(req->src, sg, sg_nents(req->src), i) { + if ((!IS_ALIGNED(sg->length, sizeof(u32))) && + (!sg_is_last(sg))) + return false; + } + } + + if (req->src->offset % 4) + return false; + + return true; +} + +static int stm32_hash_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + + rctx->hdev = hdev; + + rctx->flags = HASH_FLAGS_CPU; + + rctx->digcnt = crypto_ahash_digestsize(tfm); + switch (rctx->digcnt) { + case MD5_DIGEST_SIZE: + rctx->flags |= HASH_FLAGS_MD5; + break; + case SHA1_DIGEST_SIZE: + rctx->flags |= HASH_FLAGS_SHA1; + break; + case SHA224_DIGEST_SIZE: + rctx->flags |= HASH_FLAGS_SHA224; + break; + case SHA256_DIGEST_SIZE: + rctx->flags |= HASH_FLAGS_SHA256; + break; + default: + return -EINVAL; + } + + rctx->bufcnt = 0; + rctx->buflen = HASH_BUFLEN; + rctx->total = 0; + rctx->offset = 0; + rctx->data_type = HASH_DATA_8_BITS; + + memset(rctx->buffer, 0, HASH_BUFLEN); + + if (ctx->flags & HASH_FLAGS_HMAC) + rctx->flags |= HASH_FLAGS_HMAC; + + dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags); + + return 0; +} + +static int stm32_hash_update_req(struct stm32_hash_dev *hdev) +{ + return stm32_hash_update_cpu(hdev); +} + +static int stm32_hash_final_req(struct stm32_hash_dev *hdev) +{ + struct ahash_request *req = hdev->req; + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + int err; + + if (!(rctx->flags & HASH_FLAGS_CPU)) + err = stm32_hash_dma_send(hdev); + else + err = stm32_hash_xmit_cpu(hdev, rctx->buffer, rctx->bufcnt, 1); + + rctx->bufcnt = 0; + + return err; +} + +static void stm32_hash_copy_hash(struct ahash_request *req) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + u32 *hash = (u32 *)rctx->digest; + unsigned int i, hashsize; + + switch (rctx->flags & HASH_FLAGS_ALGO_MASK) { + case HASH_FLAGS_MD5: + hashsize = MD5_DIGEST_SIZE; + break; + case HASH_FLAGS_SHA1: + hashsize = SHA1_DIGEST_SIZE; + break; + case HASH_FLAGS_SHA224: + hashsize = SHA224_DIGEST_SIZE; + break; + case HASH_FLAGS_SHA256: + hashsize = SHA256_DIGEST_SIZE; + break; + default: + return; + } + + for (i = 0; i < hashsize / sizeof(u32); i++) + hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev, + HASH_HREG(i))); +} + +static int stm32_hash_finish(struct ahash_request *req) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + + if (!req->result) + return -EINVAL; + + memcpy(req->result, rctx->digest, rctx->digcnt); + + return 0; +} + +static void stm32_hash_finish_req(struct ahash_request *req, int err) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_dev *hdev = rctx->hdev; + + if (!err && (HASH_FLAGS_FINAL & hdev->flags)) { + stm32_hash_copy_hash(req); + err = stm32_hash_finish(req); + hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU | + HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY | + HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC | + HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL | + HASH_FLAGS_HMAC_KEY); + } else { + rctx->flags |= HASH_FLAGS_ERRORS; + } + + crypto_finalize_hash_request(hdev->engine, req, err); +} + +static int stm32_hash_hw_init(struct stm32_hash_dev *hdev, + struct stm32_hash_request_ctx *rctx) +{ + if (!(HASH_FLAGS_INIT & hdev->flags)) { + stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT); + stm32_hash_write(hdev, HASH_STR, 0); + stm32_hash_write(hdev, HASH_DIN, 0); + stm32_hash_write(hdev, HASH_IMR, 0); + hdev->err = 0; + } + + return 0; +} + +static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev, + struct ahash_request *req) +{ + return crypto_transfer_hash_request_to_engine(hdev->engine, req); +} + +static int stm32_hash_prepare_req(struct crypto_engine *engine, + struct ahash_request *req) +{ + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + struct stm32_hash_request_ctx *rctx; + + if (!hdev) + return -ENODEV; + + hdev->req = req; + + rctx = ahash_request_ctx(req); + + dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n", + rctx->op, req->nbytes); + + return stm32_hash_hw_init(hdev, rctx); +} + +static int stm32_hash_one_request(struct crypto_engine *engine, + struct ahash_request *req) +{ + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + struct stm32_hash_request_ctx *rctx; + int err = 0; + + if (!hdev) + return -ENODEV; + + hdev->req = req; + + rctx = ahash_request_ctx(req); + + if (rctx->op == HASH_OP_UPDATE) + err = stm32_hash_update_req(hdev); + else if (rctx->op == HASH_OP_FINAL) + err = stm32_hash_final_req(hdev); + + if (err != -EINPROGRESS) + /* done task will not finish it, so do it here */ + stm32_hash_finish_req(req, err); + + return 0; +} + +static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct stm32_hash_dev *hdev = ctx->hdev; + + rctx->op = op; + + return stm32_hash_handle_queue(hdev, req); +} + +static int stm32_hash_update(struct ahash_request *req) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + int ret; + + if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU)) + return 0; + + rctx->total = req->nbytes; + rctx->sg = req->src; + rctx->offset = 0; + + if ((rctx->bufcnt + rctx->total < rctx->buflen)) { + stm32_hash_append_sg(rctx); + return 0; + } + + ret = stm32_hash_enqueue(req, HASH_OP_UPDATE); + + if (rctx->flags & HASH_FLAGS_FINUP) + return ret; + + return 0; +} + +static int stm32_hash_final(struct ahash_request *req) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + + rctx->flags |= HASH_FLAGS_FINUP; + + return stm32_hash_enqueue(req, HASH_OP_FINAL); +} + +static int stm32_hash_finup(struct ahash_request *req) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + int err1, err2; + + rctx->flags |= HASH_FLAGS_FINUP; + + if (hdev->dma_lch && stm32_hash_dma_aligned_data(req)) + rctx->flags &= ~HASH_FLAGS_CPU; + + err1 = stm32_hash_update(req); + + if (err1 == -EINPROGRESS || err1 == -EBUSY) + return err1; + + /* + * final() has to be always called to cleanup resources + * even if update() failed, except EINPROGRESS + */ + err2 = stm32_hash_final(req); + + return err1 ?: err2; +} + +static int stm32_hash_digest(struct ahash_request *req) +{ + return stm32_hash_init(req) ?: stm32_hash_finup(req); +} + +static int stm32_hash_export(struct ahash_request *req, void *out) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + u32 *preg; + unsigned int i; + + while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY)) + cpu_relax(); + + rctx->hw_context = kmalloc(sizeof(u32) * (3 + HASH_CSR_REGISTER_NUMBER), + GFP_KERNEL); + + preg = rctx->hw_context; + + *preg++ = stm32_hash_read(hdev, HASH_IMR); + *preg++ = stm32_hash_read(hdev, HASH_STR); + *preg++ = stm32_hash_read(hdev, HASH_CR); + for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++) + *preg++ = stm32_hash_read(hdev, HASH_CSR(i)); + + memcpy(out, rctx, sizeof(*rctx)); + + return 0; +} + +static int stm32_hash_import(struct ahash_request *req, const void *in) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + const u32 *preg = in; + u32 reg; + unsigned int i; + + memcpy(rctx, in, sizeof(*rctx)); + + preg = rctx->hw_context; + + stm32_hash_write(hdev, HASH_IMR, *preg++); + stm32_hash_write(hdev, HASH_STR, *preg++); + stm32_hash_write(hdev, HASH_CR, *preg); + reg = *preg++ | HASH_CR_INIT; + stm32_hash_write(hdev, HASH_CR, reg); + + for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++) + stm32_hash_write(hdev, HASH_CSR(i), *preg++); + + kfree(rctx->hw_context); + + return 0; +} + +static int stm32_hash_setkey(struct crypto_ahash *tfm, + const u8 *key, unsigned int keylen) +{ + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + if (keylen <= HASH_MAX_KEY_SIZE) { + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + } else { + return -ENOMEM; + } + + return 0; +} + +static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, + const char *algs_hmac_name) +{ + struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct stm32_hash_request_ctx)); + + ctx->keylen = 0; + + if (algs_hmac_name) + ctx->flags |= HASH_FLAGS_HMAC; + + return 0; +} + +static int stm32_hash_cra_init(struct crypto_tfm *tfm) +{ + return stm32_hash_cra_init_algs(tfm, NULL); +} + +static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm) +{ + return stm32_hash_cra_init_algs(tfm, "md5"); +} + +static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm) +{ + return stm32_hash_cra_init_algs(tfm, "sha1"); +} + +static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm) +{ + return stm32_hash_cra_init_algs(tfm, "sha224"); +} + +static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm) +{ + return stm32_hash_cra_init_algs(tfm, "sha256"); +} + +static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id) +{ + struct stm32_hash_dev *hdev = dev_id; + int err; + + if (HASH_FLAGS_CPU & hdev->flags) { + if (HASH_FLAGS_OUTPUT_READY & hdev->flags) { + hdev->flags &= ~HASH_FLAGS_OUTPUT_READY; + goto finish; + } + } else if (HASH_FLAGS_DMA_READY & hdev->flags) { + if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) { + hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE; + goto finish; + } + } + + return IRQ_HANDLED; + +finish: + /*Finish current request */ + stm32_hash_finish_req(hdev->req, err); + + return IRQ_HANDLED; +} + +static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id) +{ + struct stm32_hash_dev *hdev = dev_id; + u32 reg; + + reg = stm32_hash_read(hdev, HASH_SR); + if (reg & HASH_SR_OUTPUT_READY) { + reg &= ~HASH_SR_OUTPUT_READY; + stm32_hash_write(hdev, HASH_SR, reg); + hdev->flags |= HASH_FLAGS_OUTPUT_READY; + return IRQ_WAKE_THREAD; + } + + return IRQ_NONE; +} + +static struct ahash_alg algs_md5_sha1[] = { + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "md5", + .cra_driver_name = "stm32-md5", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .setkey = stm32_hash_setkey, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "hmac(md5)", + .cra_driver_name = "stm32-hmac-md5", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_md5_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "sha1", + .cra_driver_name = "stm32-sha1", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .setkey = stm32_hash_setkey, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "stm32-hmac-sha1", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_sha1_init, + .cra_module = THIS_MODULE, + } + } + }, +}; + +static struct ahash_alg algs_sha224_sha256[] = { + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "sha224", + .cra_driver_name = "stm32-sha224", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .setkey = stm32_hash_setkey, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "hmac(sha224)", + .cra_driver_name = "stm32-hmac-sha224", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_sha224_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "sha256", + .cra_driver_name = "stm32-sha256", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_init, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .setkey = stm32_hash_setkey, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_request_ctx), + .base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "stm32-hmac-sha256", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_sha256_init, + .cra_module = THIS_MODULE, + } + } + }, +}; + +static int stm32_hash_register_algs(struct stm32_hash_dev *hdev) +{ + unsigned int i, j; + int err; + + for (i = 0; i < hdev->pdata->algs_info_size; i++) { + for (j = 0; j < hdev->pdata->algs_info[i].size; j++) { + err = crypto_register_ahash( + &hdev->pdata->algs_info[i].algs_list[j]); + if (err) + goto err_algs; + } + } + + return 0; +err_algs: + dev_err(hdev->dev, "Algo %d : %d failed\n", i, j); + for (; i--; ) { + for (; j--;) + crypto_unregister_ahash( + &hdev->pdata->algs_info[i].algs_list[j]); + } + + return err; +} + +static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev) +{ + unsigned int i, j; + + for (i = 0; i < hdev->pdata->algs_info_size; i++) { + for (j = 0; j < hdev->pdata->algs_info[i].size; j++) + crypto_unregister_ahash( + &hdev->pdata->algs_info[i].algs_list[j]); + } + + return 0; +} + +static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = { + { + .algs_list = algs_md5_sha1, + .size = ARRAY_SIZE(algs_md5_sha1), + }, +}; + +static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = { + .algs_info = stm32_hash_algs_info_stm32f4, + .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4), +}; + +static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = { + { + .algs_list = algs_md5_sha1, + .size = ARRAY_SIZE(algs_md5_sha1), + }, + { + .algs_list = algs_sha224_sha256, + .size = ARRAY_SIZE(algs_sha224_sha256), + }, +}; + +static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = { + .algs_info = stm32_hash_algs_info_stm32f7, + .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7), +}; + +static const struct of_device_id stm32_hash_of_match[] = { + { + .compatible = "st,stm32f456-hash", + .data = &stm32_hash_pdata_stm32f4, + }, + { + .compatible = "st,stm32f756-hash", + .data = &stm32_hash_pdata_stm32f7, + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, stm32_hash_of_match); + +static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev, + struct device *dev) +{ + const struct of_device_id *match; + int err; + + match = of_match_device(stm32_hash_of_match, dev); + if (!match) { + dev_err(dev, "no compatible OF match\n"); + return -EINVAL; + } + + err = of_property_read_u32(dev->of_node, "dma-maxburst", + &hdev->dma_maxburst); + + hdev->pdata = match->data; + + return err; +} + +static int stm32_hash_probe(struct platform_device *pdev) +{ + struct stm32_hash_dev *hdev; + struct device *dev = &pdev->dev; + struct resource *res; + int ret, irq; + + hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL); + if (!hdev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hdev->io_base = devm_ioremap_resource(dev, res); + if (IS_ERR(hdev->io_base)) + return PTR_ERR(hdev->io_base); + + hdev->phys_base = res->start; + + ret = stm32_hash_get_of_match(hdev, dev); + if (ret) + return ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "Cannot get IRQ resource\n"); + return irq; + } + + ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler, + stm32_hash_irq_thread, IRQF_ONESHOT, + dev_name(dev), hdev); + if (ret) { + dev_err(dev, "Cannot grab IRQ\n"); + return ret; + } + + hdev->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(hdev->clk)) { + dev_err(dev, "failed to get clock for hash (%lu)\n", + PTR_ERR(hdev->clk)); + return PTR_ERR(hdev->clk); + } + + ret = clk_prepare_enable(hdev->clk); + if (ret) { + dev_err(dev, "failed to enable hash clock (%d)\n", ret); + return ret; + } + + hdev->rst = devm_reset_control_get(&pdev->dev, NULL); + if (!IS_ERR(hdev->rst)) { + reset_control_assert(hdev->rst); + udelay(2); + reset_control_deassert(hdev->rst); + } + + hdev->dev = dev; + + platform_set_drvdata(pdev, hdev); + + ret = stm32_hash_dma_init(hdev); + if (ret) + dev_dbg(dev, "DMA mode not available\n"); + + spin_lock(&stm32_hash.lock); + list_add_tail(&hdev->list, &stm32_hash.dev_list); + spin_unlock(&stm32_hash.lock); + + /* Initialize crypto engine */ + hdev->engine = crypto_engine_alloc_init(dev, 1); + if (!hdev->engine) { + ret = -ENOMEM; + goto err_engine; + } + + hdev->engine->prepare_hash_request = stm32_hash_prepare_req; + hdev->engine->hash_one_request = stm32_hash_one_request; + + ret = crypto_engine_start(hdev->engine); + if (ret) + goto err_engine_start; + + hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR); + + /* Register algos */ + ret = stm32_hash_register_algs(hdev); + if (ret) + goto err_algs; + + dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n", + stm32_hash_read(hdev, HASH_VER), hdev->dma_mode); + + return 0; + +err_algs: +err_engine_start: + crypto_engine_exit(hdev->engine); +err_engine: + spin_lock(&stm32_hash.lock); + list_del(&hdev->list); + spin_unlock(&stm32_hash.lock); + + if (hdev->dma_lch) + dma_release_channel(hdev->dma_lch); + + clk_disable_unprepare(hdev->clk); + + return ret; +} + +static int stm32_hash_remove(struct platform_device *pdev) +{ + static struct stm32_hash_dev *hdev; + + hdev = platform_get_drvdata(pdev); + if (!hdev) + return -ENODEV; + + stm32_hash_unregister_algs(hdev); + + crypto_engine_exit(hdev->engine); + + spin_lock(&stm32_hash.lock); + list_del(&hdev->list); + spin_unlock(&stm32_hash.lock); + + if (hdev->dma_lch) + dma_release_channel(hdev->dma_lch); + + clk_disable_unprepare(hdev->clk); + + return 0; +} + +static struct platform_driver stm32_hash_driver = { + .probe = stm32_hash_probe, + .remove = stm32_hash_remove, + .driver = { + .name = "stm32-hash", + .of_match_table = stm32_hash_of_match, + } +}; + +module_platform_driver(stm32_hash_driver); + +MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver"); +MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c index ec83b1e6bfe8..090582baecfe 100644 --- a/drivers/crypto/stm32/stm32_crc32.c +++ b/drivers/crypto/stm32/stm32_crc32.c @@ -107,12 +107,12 @@ static int stm32_crc_init(struct shash_desc *desc) spin_unlock_bh(&crc_list.lock); /* Reset, set key, poly and configure in bit reverse mode */ - writel(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT); - writel(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL); - writel(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR); + writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT); + writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL); + writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR); /* Store partial result */ - ctx->partial = readl(ctx->crc->regs + CRC_DR); + ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR); ctx->crc->nb_pending_bytes = 0; return 0; @@ -135,7 +135,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, if (crc->nb_pending_bytes == sizeof(u32)) { /* Process completed pending data */ - writel(*(u32 *)crc->pending_data, crc->regs + CRC_DR); + writel_relaxed(*(u32 *)crc->pending_data, + crc->regs + CRC_DR); crc->nb_pending_bytes = 0; } } @@ -143,10 +144,10 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, d32 = (u32 *)d8; for (i = 0; i < length >> 2; i++) /* Process 32 bits data */ - writel(*(d32++), crc->regs + CRC_DR); + writel_relaxed(*(d32++), crc->regs + CRC_DR); /* Store partial result */ - ctx->partial = readl(crc->regs + CRC_DR); + ctx->partial = readl_relaxed(crc->regs + CRC_DR); /* Check for pending data (non 32 bits) */ length &= 3; @@ -295,7 +296,7 @@ static int stm32_crc_remove(struct platform_device *pdev) list_del(&crc->list); spin_unlock(&crc_list.lock); - crypto_unregister_shash(algs); + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); clk_disable_unprepare(crc->clk); diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/sunxi-ss/Makefile index 8f4c7a273141..ccb893219079 100644 --- a/drivers/crypto/sunxi-ss/Makefile +++ b/drivers/crypto/sunxi-ss/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss.o sun4i-ss-y += sun4i-ss-core.o sun4i-ss-hash.o sun4i-ss-cipher.o +sun4i-ss-$(CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG) += sun4i-ss-prng.o diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c index 02ad8256e900..1547cbe13dc2 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c @@ -213,6 +213,23 @@ static struct sun4i_ss_alg_template ss_algs[] = { } } }, +#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG +{ + .type = CRYPTO_ALG_TYPE_RNG, + .alg.rng = { + .base = { + .cra_name = "stdrng", + .cra_driver_name = "sun4i_ss_rng", + .cra_priority = 300, + .cra_ctxsize = 0, + .cra_module = THIS_MODULE, + }, + .generate = sun4i_ss_prng_generate, + .seed = sun4i_ss_prng_seed, + .seedsize = SS_SEED_LEN / BITS_PER_BYTE, + } +}, +#endif }; static int sun4i_ss_probe(struct platform_device *pdev) @@ -355,6 +372,13 @@ static int sun4i_ss_probe(struct platform_device *pdev) goto error_alg; } break; + case CRYPTO_ALG_TYPE_RNG: + err = crypto_register_rng(&ss_algs[i].alg.rng); + if (err) { + dev_err(ss->dev, "Fail to register %s\n", + ss_algs[i].alg.rng.base.cra_name); + } + break; } } platform_set_drvdata(pdev, ss); @@ -369,6 +393,9 @@ error_alg: case CRYPTO_ALG_TYPE_AHASH: crypto_unregister_ahash(&ss_algs[i].alg.hash); break; + case CRYPTO_ALG_TYPE_RNG: + crypto_unregister_rng(&ss_algs[i].alg.rng); + break; } } if (ss->reset) @@ -393,6 +420,9 @@ static int sun4i_ss_remove(struct platform_device *pdev) case CRYPTO_ALG_TYPE_AHASH: crypto_unregister_ahash(&ss_algs[i].alg.hash); break; + case CRYPTO_ALG_TYPE_RNG: + crypto_unregister_rng(&ss_algs[i].alg.rng); + break; } } diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c new file mode 100644 index 000000000000..0d01d1624252 --- /dev/null +++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c @@ -0,0 +1,56 @@ +#include "sun4i-ss.h" + +int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen) +{ + struct sun4i_ss_alg_template *algt; + struct rng_alg *alg = crypto_rng_alg(tfm); + + algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); + memcpy(algt->ss->seed, seed, slen); + + return 0; +} + +int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int dlen) +{ + struct sun4i_ss_alg_template *algt; + struct rng_alg *alg = crypto_rng_alg(tfm); + int i; + u32 v; + u32 *data = (u32 *)dst; + const u32 mode = SS_OP_PRNG | SS_PRNG_CONTINUE | SS_ENABLED; + size_t len; + struct sun4i_ss_ctx *ss; + unsigned int todo = (dlen / 4) * 4; + + algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); + ss = algt->ss; + + spin_lock(&ss->slock); + + writel(mode, ss->base + SS_CTL); + + while (todo > 0) { + /* write the seed */ + for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) + writel(ss->seed[i], ss->base + SS_KEY0 + i * 4); + + /* Read the random data */ + len = min_t(size_t, SS_DATA_LEN / BITS_PER_BYTE, todo); + readsl(ss->base + SS_TXFIFO, data, len / 4); + data += len / 4; + todo -= len; + + /* Update the seed */ + for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) { + v = readl(ss->base + SS_KEY0 + i * 4); + ss->seed[i] = v; + } + } + + writel(0, ss->base + SS_CTL); + spin_unlock(&ss->slock); + return dlen; +} diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h index a0e1efc1cb2a..f3ac90692ac6 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss.h +++ b/drivers/crypto/sunxi-ss/sun4i-ss.h @@ -32,6 +32,7 @@ #include <crypto/aes.h> #include <crypto/des.h> #include <crypto/internal/rng.h> +#include <crypto/rng.h> #define SS_CTL 0x00 #define SS_KEY0 0x04 @@ -127,6 +128,9 @@ #define SS_RXFIFO_EMP_INT_ENABLE (1 << 2) #define SS_TXFIFO_AVA_INT_ENABLE (1 << 0) +#define SS_SEED_LEN 192 +#define SS_DATA_LEN 160 + struct sun4i_ss_ctx { void __iomem *base; int irq; @@ -136,6 +140,9 @@ struct sun4i_ss_ctx { struct device *dev; struct resource *res; spinlock_t slock; /* control the use of the device */ +#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG + u32 seed[SS_SEED_LEN / BITS_PER_LONG]; +#endif }; struct sun4i_ss_alg_template { @@ -144,6 +151,7 @@ struct sun4i_ss_alg_template { union { struct skcipher_alg crypto; struct ahash_alg hash; + struct rng_alg rng; } alg; struct sun4i_ss_ctx *ss; }; @@ -201,3 +209,6 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); +int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int dlen); +int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index 49defda4e03d..5035b0dc1e40 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -27,12 +27,68 @@ #include <uapi/linux/virtio_crypto.h> #include "virtio_crypto_common.h" + +struct virtio_crypto_ablkcipher_ctx { + struct virtio_crypto *vcrypto; + struct crypto_tfm *tfm; + + struct virtio_crypto_sym_session_info enc_sess_info; + struct virtio_crypto_sym_session_info dec_sess_info; +}; + +struct virtio_crypto_sym_request { + struct virtio_crypto_request base; + + /* Cipher or aead */ + uint32_t type; + struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx; + struct ablkcipher_request *ablkcipher_req; + uint8_t *iv; + /* Encryption? */ + bool encrypt; +}; + /* * The algs_lock protects the below global virtio_crypto_active_devs * and crypto algorithms registion. */ static DEFINE_MUTEX(algs_lock); static unsigned int virtio_crypto_active_devs; +static void virtio_crypto_ablkcipher_finalize_req( + struct virtio_crypto_sym_request *vc_sym_req, + struct ablkcipher_request *req, + int err); + +static void virtio_crypto_dataq_sym_callback + (struct virtio_crypto_request *vc_req, int len) +{ + struct virtio_crypto_sym_request *vc_sym_req = + container_of(vc_req, struct virtio_crypto_sym_request, base); + struct ablkcipher_request *ablk_req; + int error; + + /* Finish the encrypt or decrypt process */ + if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { + switch (vc_req->status) { + case VIRTIO_CRYPTO_OK: + error = 0; + break; + case VIRTIO_CRYPTO_INVSESS: + case VIRTIO_CRYPTO_ERR: + error = -EINVAL; + break; + case VIRTIO_CRYPTO_BADMSG: + error = -EBADMSG; + break; + default: + error = -EIO; + break; + } + ablk_req = vc_sym_req->ablkcipher_req; + virtio_crypto_ablkcipher_finalize_req(vc_sym_req, + ablk_req, error); + } +} static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg) { @@ -286,13 +342,14 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, } static int -__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req, +__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, struct ablkcipher_request *req, struct data_queue *data_vq) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx; + struct virtio_crypto_request *vc_req = &vc_sym_req->base; unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); - struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx; struct virtio_crypto *vcrypto = ctx->vcrypto; struct virtio_crypto_op_data_req *req_data; int src_nents, dst_nents; @@ -326,9 +383,9 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req, } vc_req->req_data = req_data; - vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER; + vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER; /* Head of operation */ - if (vc_req->encrypt) { + if (vc_sym_req->encrypt) { req_data->header.session_id = cpu_to_le64(ctx->enc_sess_info.session_id); req_data->header.opcode = @@ -383,7 +440,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req, memcpy(iv, req->info, ivsize); sg_init_one(&iv_sg, iv, ivsize); sgs[num_out++] = &iv_sg; - vc_req->iv = iv; + vc_sym_req->iv = iv; /* Source data */ for (i = 0; i < src_nents; i++) @@ -421,15 +478,18 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm); - struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req); + struct virtio_crypto_sym_request *vc_sym_req = + ablkcipher_request_ctx(req); + struct virtio_crypto_request *vc_req = &vc_sym_req->base; struct virtio_crypto *vcrypto = ctx->vcrypto; /* Use the first data virtqueue as default */ struct data_queue *data_vq = &vcrypto->data_vq[0]; - vc_req->ablkcipher_ctx = ctx; - vc_req->ablkcipher_req = req; - vc_req->encrypt = true; vc_req->dataq = data_vq; + vc_req->alg_cb = virtio_crypto_dataq_sym_callback; + vc_sym_req->ablkcipher_ctx = ctx; + vc_sym_req->ablkcipher_req = req; + vc_sym_req->encrypt = true; return crypto_transfer_cipher_request_to_engine(data_vq->engine, req); } @@ -438,16 +498,18 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm); - struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req); + struct virtio_crypto_sym_request *vc_sym_req = + ablkcipher_request_ctx(req); + struct virtio_crypto_request *vc_req = &vc_sym_req->base; struct virtio_crypto *vcrypto = ctx->vcrypto; /* Use the first data virtqueue as default */ struct data_queue *data_vq = &vcrypto->data_vq[0]; - vc_req->ablkcipher_ctx = ctx; - vc_req->ablkcipher_req = req; - - vc_req->encrypt = false; vc_req->dataq = data_vq; + vc_req->alg_cb = virtio_crypto_dataq_sym_callback; + vc_sym_req->ablkcipher_ctx = ctx; + vc_sym_req->ablkcipher_req = req; + vc_sym_req->encrypt = false; return crypto_transfer_cipher_request_to_engine(data_vq->engine, req); } @@ -456,7 +518,7 @@ static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm) { struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); - tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_request); + tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request); ctx->tfm = tfm; return 0; @@ -479,11 +541,13 @@ int virtio_crypto_ablkcipher_crypt_req( struct crypto_engine *engine, struct ablkcipher_request *req) { - struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req); + struct virtio_crypto_sym_request *vc_sym_req = + ablkcipher_request_ctx(req); + struct virtio_crypto_request *vc_req = &vc_sym_req->base; struct data_queue *data_vq = vc_req->dataq; int ret; - ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq); + ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq); if (ret < 0) return ret; @@ -492,14 +556,15 @@ int virtio_crypto_ablkcipher_crypt_req( return 0; } -void virtio_crypto_ablkcipher_finalize_req( - struct virtio_crypto_request *vc_req, +static void virtio_crypto_ablkcipher_finalize_req( + struct virtio_crypto_sym_request *vc_sym_req, struct ablkcipher_request *req, int err) { - crypto_finalize_cipher_request(vc_req->dataq->engine, req, err); - - virtcrypto_clear_request(vc_req); + crypto_finalize_cipher_request(vc_sym_req->base.dataq->engine, + req, err); + kzfree(vc_sym_req->iv); + virtcrypto_clear_request(&vc_sym_req->base); } static struct crypto_alg virtio_crypto_algs[] = { { diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h index da6d8c0ea407..e976539a05d9 100644 --- a/drivers/crypto/virtio/virtio_crypto_common.h +++ b/drivers/crypto/virtio/virtio_crypto_common.h @@ -83,26 +83,16 @@ struct virtio_crypto_sym_session_info { __u64 session_id; }; -struct virtio_crypto_ablkcipher_ctx { - struct virtio_crypto *vcrypto; - struct crypto_tfm *tfm; - - struct virtio_crypto_sym_session_info enc_sess_info; - struct virtio_crypto_sym_session_info dec_sess_info; -}; +struct virtio_crypto_request; +typedef void (*virtio_crypto_data_callback) + (struct virtio_crypto_request *vc_req, int len); struct virtio_crypto_request { - /* Cipher or aead */ - uint32_t type; uint8_t status; - struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx; - struct ablkcipher_request *ablkcipher_req; struct virtio_crypto_op_data_req *req_data; struct scatterlist **sgs; - uint8_t *iv; - /* Encryption? */ - bool encrypt; struct data_queue *dataq; + virtio_crypto_data_callback alg_cb; }; int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev); @@ -119,10 +109,6 @@ void virtcrypto_dev_stop(struct virtio_crypto *vcrypto); int virtio_crypto_ablkcipher_crypt_req( struct crypto_engine *engine, struct ablkcipher_request *req); -void virtio_crypto_ablkcipher_finalize_req( - struct virtio_crypto_request *vc_req, - struct ablkcipher_request *req, - int err); void virtcrypto_clear_request(struct virtio_crypto_request *vc_req); diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index a111cd72797b..ff1410a32c2b 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -29,7 +29,6 @@ void virtcrypto_clear_request(struct virtio_crypto_request *vc_req) { if (vc_req) { - kzfree(vc_req->iv); kzfree(vc_req->req_data); kfree(vc_req->sgs); } @@ -41,40 +40,18 @@ static void virtcrypto_dataq_callback(struct virtqueue *vq) struct virtio_crypto_request *vc_req; unsigned long flags; unsigned int len; - struct ablkcipher_request *ablk_req; - int error; unsigned int qid = vq->index; spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags); do { virtqueue_disable_cb(vq); while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) { - if (vc_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { - switch (vc_req->status) { - case VIRTIO_CRYPTO_OK: - error = 0; - break; - case VIRTIO_CRYPTO_INVSESS: - case VIRTIO_CRYPTO_ERR: - error = -EINVAL; - break; - case VIRTIO_CRYPTO_BADMSG: - error = -EBADMSG; - break; - default: - error = -EIO; - break; - } - ablk_req = vc_req->ablkcipher_req; - - spin_unlock_irqrestore( - &vcrypto->data_vq[qid].lock, flags); - /* Finish the encrypt or decrypt process */ - virtio_crypto_ablkcipher_finalize_req(vc_req, - ablk_req, error); - spin_lock_irqsave( - &vcrypto->data_vq[qid].lock, flags); - } + spin_unlock_irqrestore( + &vcrypto->data_vq[qid].lock, flags); + if (vc_req->alg_cb) + vc_req->alg_cb(vc_req, len); + spin_lock_irqsave( + &vcrypto->data_vq[qid].lock, flags); } } while (!virtqueue_enable_cb(vq)); spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags); @@ -270,7 +247,7 @@ static int virtcrypto_update_status(struct virtio_crypto *vcrypto) return -EPERM; } - dev_info(&vcrypto->vdev->dev, "Accelerator is ready\n"); + dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n"); } else { virtcrypto_dev_stop(vcrypto); dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n"); diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 9c26d9e8dbea..17d84217dd76 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -104,8 +104,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, pagefault_enable(); preempt_enable(); - crypto_xor(keystream, src, nbytes); - memcpy(dst, keystream, nbytes); + crypto_xor_cpy(dst, keystream, src, nbytes); crypto_inc(ctrblk, AES_BLOCK_SIZE); } |