From 9739f5f93b7806a684713ba42e6ed2d1df7c8100 Mon Sep 17 00:00:00 2001 From: Christian Marangi Date: Tue, 14 Jan 2025 13:36:36 +0100 Subject: crypto: eip93 - Add Inside Secure SafeXcel EIP-93 crypto engine support Add support for the Inside Secure SafeXcel EIP-93 Crypto Engine used on Mediatek MT7621 SoC and new Airoha SoC. EIP-93 IP supports AES/DES/3DES ciphers in ECB/CBC and CTR modes as well as authenc(HMAC(x), cipher(y)) using HMAC MD5, SHA1, SHA224 and SHA256. EIP-93 provide regs to signal support for specific chipers and the driver dynamically register only the supported one by the chip. Signed-off-by: Richard van Schagen Co-developed-by: Christian Marangi Signed-off-by: Christian Marangi Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 + drivers/crypto/Makefile | 1 + drivers/crypto/inside-secure/eip93/Kconfig | 20 + drivers/crypto/inside-secure/eip93/Makefile | 5 + drivers/crypto/inside-secure/eip93/eip93-aead.c | 711 ++++++++++++++++++ drivers/crypto/inside-secure/eip93/eip93-aead.h | 38 + drivers/crypto/inside-secure/eip93/eip93-aes.h | 16 + drivers/crypto/inside-secure/eip93/eip93-cipher.c | 413 +++++++++++ drivers/crypto/inside-secure/eip93/eip93-cipher.h | 60 ++ drivers/crypto/inside-secure/eip93/eip93-common.c | 809 ++++++++++++++++++++ drivers/crypto/inside-secure/eip93/eip93-common.h | 24 + drivers/crypto/inside-secure/eip93/eip93-des.h | 16 + drivers/crypto/inside-secure/eip93/eip93-hash.c | 866 ++++++++++++++++++++++ drivers/crypto/inside-secure/eip93/eip93-hash.h | 82 ++ drivers/crypto/inside-secure/eip93/eip93-main.c | 501 +++++++++++++ drivers/crypto/inside-secure/eip93/eip93-main.h | 151 ++++ drivers/crypto/inside-secure/eip93/eip93-regs.h | 335 +++++++++ 17 files changed, 4049 insertions(+) create mode 100644 drivers/crypto/inside-secure/eip93/Kconfig create mode 100644 drivers/crypto/inside-secure/eip93/Makefile create mode 100644 drivers/crypto/inside-secure/eip93/eip93-aead.c create mode 100644 drivers/crypto/inside-secure/eip93/eip93-aead.h create mode 100644 drivers/crypto/inside-secure/eip93/eip93-aes.h create mode 100644 drivers/crypto/inside-secure/eip93/eip93-cipher.c create mode 100644 drivers/crypto/inside-secure/eip93/eip93-cipher.h create mode 100644 drivers/crypto/inside-secure/eip93/eip93-common.c create mode 100644 drivers/crypto/inside-secure/eip93/eip93-common.h create mode 100644 drivers/crypto/inside-secure/eip93/eip93-des.h create mode 100644 drivers/crypto/inside-secure/eip93/eip93-hash.c create mode 100644 drivers/crypto/inside-secure/eip93/eip93-hash.h create mode 100644 drivers/crypto/inside-secure/eip93/eip93-main.c create mode 100644 drivers/crypto/inside-secure/eip93/eip93-main.h create mode 100644 drivers/crypto/inside-secure/eip93/eip93-regs.h (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 19ab145f912e..47082782008a 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -855,5 +855,6 @@ config CRYPTO_DEV_SA2UL source "drivers/crypto/aspeed/Kconfig" source "drivers/crypto/starfive/Kconfig" +source "drivers/crypto/inside-secure/eip93/Kconfig" endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index fef18ffdb128..62c7ce3c9d3e 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -50,3 +50,4 @@ obj-y += hisilicon/ obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ obj-y += intel/ obj-y += starfive/ +obj-y += inside-secure/eip93/ diff --git a/drivers/crypto/inside-secure/eip93/Kconfig b/drivers/crypto/inside-secure/eip93/Kconfig new file mode 100644 index 000000000000..8353d3d7ec9b --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +config CRYPTO_DEV_EIP93 + tristate "Support for EIP93 crypto HW accelerators" + depends on SOC_MT7621 || ARCH_AIROHA ||COMPILE_TEST + select CRYPTO_LIB_AES + select CRYPTO_LIB_DES + select CRYPTO_SKCIPHER + select CRYPTO_AEAD + select CRYPTO_AUTHENC + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + help + EIP93 have various crypto HW accelerators. Select this if + you want to use the EIP93 modules for any of the crypto algorithms. + + If the IP supports it, this provide offload for AES - ECB, CBC and + CTR crypto. Also provide DES and 3DES ECB and CBC. + + Also provide AEAD authenc(hmac(x), cipher(y)) for supported algo. diff --git a/drivers/crypto/inside-secure/eip93/Makefile b/drivers/crypto/inside-secure/eip93/Makefile new file mode 100644 index 000000000000..a3d3d3677cdc --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_CRYPTO_DEV_EIP93) += crypto-hw-eip93.o + +crypto-hw-eip93-y += eip93-main.o eip93-common.o +crypto-hw-eip93-y += eip93-cipher.o eip93-aead.o +crypto-hw-eip93-y += eip93-hash.o diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.c b/drivers/crypto/inside-secure/eip93/eip93-aead.c new file mode 100644 index 000000000000..18dd8a9a5165 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-aead.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen + * Christian Marangi +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "eip93-aead.h" +#include "eip93-cipher.h" +#include "eip93-common.h" +#include "eip93-regs.h" + +void eip93_aead_handle_result(struct crypto_async_request *async, int err) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm); + struct eip93_device *eip93 = ctx->eip93; + struct aead_request *req = aead_request_cast(async); + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + + eip93_unmap_dma(eip93, rctx, req->src, req->dst); + eip93_handle_result(eip93, rctx, req->iv); + + aead_request_complete(req, err); +} + +static int eip93_aead_send_req(struct crypto_async_request *async) +{ + struct aead_request *req = aead_request_cast(async); + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + int err; + + err = check_valid_request(rctx); + if (err) { + aead_request_complete(req, err); + return err; + } + + return eip93_send_req(async, req->iv, rctx); +} + +/* Crypto aead API functions */ +static int eip93_aead_cra_init(struct crypto_tfm *tfm) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, + struct eip93_alg_template, alg.aead.base); + + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct eip93_cipher_reqctx)); + + ctx->eip93 = tmpl->eip93; + ctx->flags = tmpl->flags; + ctx->type = tmpl->type; + ctx->set_assoc = true; + + ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL); + if (!ctx->sa_record) + return -ENOMEM; + + return 0; +} + +static void eip93_aead_cra_exit(struct crypto_tfm *tfm) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + dma_unmap_single(ctx->eip93->dev, ctx->sa_record_base, + sizeof(*ctx->sa_record), DMA_TO_DEVICE); + kfree(ctx->sa_record); +} + +static int eip93_aead_setkey(struct crypto_aead *ctfm, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_authenc_keys keys; + struct crypto_aes_ctx aes; + struct sa_record *sa_record = ctx->sa_record; + u32 nonce = 0; + int ret; + + if (crypto_authenc_extractkeys(&keys, key, len)) + return -EINVAL; + + if (IS_RFC3686(ctx->flags)) { + if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) + return -EINVAL; + + keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; + memcpy(&nonce, keys.enckey + keys.enckeylen, + CTR_RFC3686_NONCE_SIZE); + } + + switch ((ctx->flags & EIP93_ALG_MASK)) { + case EIP93_ALG_DES: + ret = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen); + if (ret) + return ret; + + break; + case EIP93_ALG_3DES: + if (keys.enckeylen != DES3_EDE_KEY_SIZE) + return -EINVAL; + + ret = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen); + if (ret) + return ret; + + break; + case EIP93_ALG_AES: + ret = aes_expandkey(&aes, keys.enckey, keys.enckeylen); + if (ret) + return ret; + + break; + } + + ctx->blksize = crypto_aead_blocksize(ctfm); + /* Encryption key */ + eip93_set_sa_record(sa_record, keys.enckeylen, ctx->flags); + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE; + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE, + EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH); + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH; + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, + ctx->authsize / sizeof(u32)); + + memcpy(sa_record->sa_key, keys.enckey, keys.enckeylen); + ctx->sa_nonce = nonce; + sa_record->sa_nonce = nonce; + + /* authentication key */ + ret = eip93_hmac_setkey(ctx->flags, keys.authkey, keys.authkeylen, + ctx->authsize, sa_record->sa_i_digest, + sa_record->sa_o_digest, false); + + ctx->set_assoc = true; + + return ret; +} + +static int eip93_aead_setauthsize(struct crypto_aead *ctfm, + unsigned int authsize) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->authsize = authsize; + ctx->sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH; + ctx->sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, + ctx->authsize / sizeof(u32)); + + return 0; +} + +static void eip93_aead_setassoc(struct eip93_crypto_ctx *ctx, + struct aead_request *req) +{ + struct sa_record *sa_record = ctx->sa_record; + + sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HASH_CRYPT_OFFSET; + sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_HASH_CRYPT_OFFSET, + req->assoclen / sizeof(u32)); + + ctx->assoclen = req->assoclen; +} + +static int eip93_aead_crypt(struct aead_request *req) +{ + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + struct crypto_async_request *async = &req->base; + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + int ret; + + ctx->sa_record_base = dma_map_single(ctx->eip93->dev, ctx->sa_record, + sizeof(*ctx->sa_record), DMA_TO_DEVICE); + ret = dma_mapping_error(ctx->eip93->dev, ctx->sa_record_base); + if (ret) + return ret; + + rctx->textsize = req->cryptlen; + rctx->blksize = ctx->blksize; + rctx->assoclen = req->assoclen; + rctx->authsize = ctx->authsize; + rctx->sg_src = req->src; + rctx->sg_dst = req->dst; + rctx->ivsize = crypto_aead_ivsize(aead); + rctx->desc_flags = EIP93_DESC_AEAD; + rctx->sa_record_base = ctx->sa_record_base; + + if (IS_DECRYPT(rctx->flags)) + rctx->textsize -= rctx->authsize; + + return eip93_aead_send_req(async); +} + +static int eip93_aead_encrypt(struct aead_request *req) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + + rctx->flags = ctx->flags; + rctx->flags |= EIP93_ENCRYPT; + if (ctx->set_assoc) { + eip93_aead_setassoc(ctx, req); + ctx->set_assoc = false; + } + + if (req->assoclen != ctx->assoclen) { + dev_err(ctx->eip93->dev, "Request AAD length error\n"); + return -EINVAL; + } + + return eip93_aead_crypt(req); +} + +static int eip93_aead_decrypt(struct aead_request *req) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + + ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN; + ctx->sa_record->sa_cmd1_word &= ~(EIP93_SA_CMD_COPY_PAD | + EIP93_SA_CMD_COPY_DIGEST); + + rctx->flags = ctx->flags; + rctx->flags |= EIP93_DECRYPT; + if (ctx->set_assoc) { + eip93_aead_setassoc(ctx, req); + ctx->set_assoc = false; + } + + if (req->assoclen != ctx->assoclen) { + dev_err(ctx->eip93->dev, "Request AAD length error\n"); + return -EINVAL; + } + + return eip93_aead_crypt(req); +} + +/* Available authenc algorithms in this module */ +struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = + "authenc(hmac(md5-eip93), cbc(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha1-eip93),cbc(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha224-eip93),cbc(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha256-eip93),cbc(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),rfc3686(ctr(aes)))", + .cra_driver_name = + "authenc(hmac(md5-eip93),rfc3686(ctr(aes-eip93)))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", + .cra_driver_name = + "authenc(hmac(sha1-eip93),rfc3686(ctr(aes-eip93)))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", + .cra_driver_name = + "authenc(hmac(sha224-eip93),rfc3686(ctr(aes-eip93)))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", + .cra_driver_name = + "authenc(hmac(sha256-eip93),rfc3686(ctr(aes-eip93)))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),cbc(des))", + .cra_driver_name = + "authenc(hmac(md5-eip93),cbc(des-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des))", + .cra_driver_name = + "authenc(hmac(sha1-eip93),cbc(des-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),cbc(des))", + .cra_driver_name = + "authenc(hmac(sha224-eip93),cbc(des-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des))", + .cra_driver_name = + "authenc(hmac(sha256-eip93),cbc(des-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = + "authenc(hmac(md5-eip93),cbc(des3_ede-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0x0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", + .cra_driver_name = + "authenc(hmac(sha1-eip93),cbc(des3_ede-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0x0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", + .cra_driver_name = + "authenc(hmac(sha224-eip93),cbc(des3_ede-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0x0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", + .cra_driver_name = + "authenc(hmac(sha256-eip93),cbc(des3_ede-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0x0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.h b/drivers/crypto/inside-secure/eip93/eip93-aead.h new file mode 100644 index 000000000000..e2fa8fd39c50 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-aead.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen + * Christian Marangi + * Christian Marangi + * Christian Marangi +#include +#include +#include + +#include "eip93-aes.h" +#include "eip93-cipher.h" +#include "eip93-common.h" +#include "eip93-des.h" +#include "eip93-regs.h" + +void eip93_skcipher_handle_result(struct crypto_async_request *async, int err) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm); + struct eip93_device *eip93 = ctx->eip93; + struct skcipher_request *req = skcipher_request_cast(async); + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + + eip93_unmap_dma(eip93, rctx, req->src, req->dst); + eip93_handle_result(eip93, rctx, req->iv); + + skcipher_request_complete(req, err); +} + +static int eip93_skcipher_send_req(struct crypto_async_request *async) +{ + struct skcipher_request *req = skcipher_request_cast(async); + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + int err; + + err = check_valid_request(rctx); + + if (err) { + skcipher_request_complete(req, err); + return err; + } + + return eip93_send_req(async, req->iv, rctx); +} + +/* Crypto skcipher API functions */ +static int eip93_skcipher_cra_init(struct crypto_tfm *tfm) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, + struct eip93_alg_template, alg.skcipher.base); + + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct eip93_cipher_reqctx)); + + memset(ctx, 0, sizeof(*ctx)); + + ctx->eip93 = tmpl->eip93; + ctx->type = tmpl->type; + + ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL); + if (!ctx->sa_record) + return -ENOMEM; + + return 0; +} + +static void eip93_skcipher_cra_exit(struct crypto_tfm *tfm) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + dma_unmap_single(ctx->eip93->dev, ctx->sa_record_base, + sizeof(*ctx->sa_record), DMA_TO_DEVICE); + kfree(ctx->sa_record); +} + +static int eip93_skcipher_setkey(struct crypto_skcipher *ctfm, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, + struct eip93_alg_template, + alg.skcipher.base); + struct sa_record *sa_record = ctx->sa_record; + unsigned int keylen = len; + u32 flags = tmpl->flags; + u32 nonce = 0; + int ret; + + if (!key || !keylen) + return -EINVAL; + + if (IS_RFC3686(flags)) { + if (len < CTR_RFC3686_NONCE_SIZE) + return -EINVAL; + + keylen = len - CTR_RFC3686_NONCE_SIZE; + memcpy(&nonce, key + keylen, CTR_RFC3686_NONCE_SIZE); + } + + if (flags & EIP93_ALG_DES) { + ctx->blksize = DES_BLOCK_SIZE; + ret = verify_skcipher_des_key(ctfm, key); + if (ret) + return ret; + } + if (flags & EIP93_ALG_3DES) { + ctx->blksize = DES3_EDE_BLOCK_SIZE; + ret = verify_skcipher_des3_key(ctfm, key); + if (ret) + return ret; + } + + if (flags & EIP93_ALG_AES) { + struct crypto_aes_ctx aes; + + ctx->blksize = AES_BLOCK_SIZE; + ret = aes_expandkey(&aes, key, keylen); + if (ret) + return ret; + } + + eip93_set_sa_record(sa_record, keylen, flags); + + memcpy(sa_record->sa_key, key, keylen); + ctx->sa_nonce = nonce; + sa_record->sa_nonce = nonce; + + return 0; +} + +static int eip93_skcipher_crypt(struct skcipher_request *req) +{ + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct crypto_async_request *async = &req->base; + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + int ret; + + if (!req->cryptlen) + return 0; + + /* + * ECB and CBC algorithms require message lengths to be + * multiples of block size. + */ + if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags)) + if (!IS_ALIGNED(req->cryptlen, + crypto_skcipher_blocksize(skcipher))) + return -EINVAL; + + ctx->sa_record_base = dma_map_single(ctx->eip93->dev, ctx->sa_record, + sizeof(*ctx->sa_record), DMA_TO_DEVICE); + ret = dma_mapping_error(ctx->eip93->dev, ctx->sa_record_base); + if (ret) + return ret; + + rctx->assoclen = 0; + rctx->textsize = req->cryptlen; + rctx->authsize = 0; + rctx->sg_src = req->src; + rctx->sg_dst = req->dst; + rctx->ivsize = crypto_skcipher_ivsize(skcipher); + rctx->blksize = ctx->blksize; + rctx->desc_flags = EIP93_DESC_SKCIPHER; + rctx->sa_record_base = ctx->sa_record_base; + + return eip93_skcipher_send_req(async); +} + +static int eip93_skcipher_encrypt(struct skcipher_request *req) +{ + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg, + struct eip93_alg_template, alg.skcipher.base); + + rctx->flags = tmpl->flags; + rctx->flags |= EIP93_ENCRYPT; + + return eip93_skcipher_crypt(req); +} + +static int eip93_skcipher_decrypt(struct skcipher_request *req) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg, + struct eip93_alg_template, alg.skcipher.base); + + ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN; + + rctx->flags = tmpl->flags; + rctx->flags |= EIP93_DECRYPT; + + return eip93_skcipher_crypt(req); +} + +/* Available algorithms in this module */ +struct eip93_alg_template eip93_alg_ecb_aes = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_ECB | EIP93_ALG_AES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = 0, + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb(aes-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0xf, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_cbc_aes = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc(aes-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0xf, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_ctr_aes = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CTR | EIP93_ALG_AES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr(aes-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0xf, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_rfc3686_aes = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .base = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "rfc3686(ctr(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0xf, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_ecb_des = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_ECB | EIP93_ALG_DES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = 0, + .base = { + .cra_name = "ecb(des)", + .cra_driver_name = "ebc(des-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_cbc_des = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc(des-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_ecb_des3_ede = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_ECB | EIP93_ALG_3DES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = 0, + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb(des3_ede-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc(des3_ede-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.h b/drivers/crypto/inside-secure/eip93/eip93-cipher.h new file mode 100644 index 000000000000..6e2545ebd879 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen + * Christian Marangi + * Christian Marangi +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eip93-cipher.h" +#include "eip93-hash.h" +#include "eip93-common.h" +#include "eip93-main.h" +#include "eip93-regs.h" + +int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err) +{ + u32 ext_err; + + if (!err) + return 0; + + switch (err & ~EIP93_PE_CTRL_PE_EXT_ERR_CODE) { + case EIP93_PE_CTRL_PE_AUTH_ERR: + case EIP93_PE_CTRL_PE_PAD_ERR: + return -EBADMSG; + /* let software handle anti-replay errors */ + case EIP93_PE_CTRL_PE_SEQNUM_ERR: + return 0; + case EIP93_PE_CTRL_PE_EXT_ERR: + break; + default: + dev_err(eip93->dev, "Unhandled error 0x%08x\n", err); + return -EINVAL; + } + + /* Parse additional ext errors */ + ext_err = FIELD_GET(EIP93_PE_CTRL_PE_EXT_ERR_CODE, err); + switch (ext_err) { + case EIP93_PE_CTRL_PE_EXT_ERR_BUS: + case EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING: + return -EIO; + case EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER: + return -EACCES; + case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP: + case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO: + case EIP93_PE_CTRL_PE_EXT_ERR_SPI: + return -EINVAL; + case EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH: + case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH: + case EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR: + return -EBADMSG; + default: + dev_err(eip93->dev, "Unhandled ext error 0x%08x\n", ext_err); + return -EINVAL; + } +} + +static void *eip93_ring_next_wptr(struct eip93_device *eip93, + struct eip93_desc_ring *ring) +{ + void *ptr = ring->write; + + if ((ring->write == ring->read - ring->offset) || + (ring->read == ring->base && ring->write == ring->base_end)) + return ERR_PTR(-ENOMEM); + + if (ring->write == ring->base_end) + ring->write = ring->base; + else + ring->write += ring->offset; + + return ptr; +} + +static void *eip93_ring_next_rptr(struct eip93_device *eip93, + struct eip93_desc_ring *ring) +{ + void *ptr = ring->read; + + if (ring->write == ring->read) + return ERR_PTR(-ENOENT); + + if (ring->read == ring->base_end) + ring->read = ring->base; + else + ring->read += ring->offset; + + return ptr; +} + +int eip93_put_descriptor(struct eip93_device *eip93, + struct eip93_descriptor *desc) +{ + struct eip93_descriptor *cdesc; + struct eip93_descriptor *rdesc; + + rdesc = eip93_ring_next_wptr(eip93, &eip93->ring->rdr); + if (IS_ERR(rdesc)) + return -ENOENT; + + cdesc = eip93_ring_next_wptr(eip93, &eip93->ring->cdr); + if (IS_ERR(cdesc)) + return -ENOENT; + + memset(rdesc, 0, sizeof(struct eip93_descriptor)); + + memcpy(cdesc, desc, sizeof(struct eip93_descriptor)); + + return 0; +} + +void *eip93_get_descriptor(struct eip93_device *eip93) +{ + struct eip93_descriptor *cdesc; + void *ptr; + + cdesc = eip93_ring_next_rptr(eip93, &eip93->ring->cdr); + if (IS_ERR(cdesc)) + return ERR_PTR(-ENOENT); + + memset(cdesc, 0, sizeof(struct eip93_descriptor)); + + ptr = eip93_ring_next_rptr(eip93, &eip93->ring->rdr); + if (IS_ERR(ptr)) + return ERR_PTR(-ENOENT); + + return ptr; +} + +static void eip93_free_sg_copy(const int len, struct scatterlist **sg) +{ + if (!*sg || !len) + return; + + free_pages((unsigned long)sg_virt(*sg), get_order(len)); + kfree(*sg); + *sg = NULL; +} + +static int eip93_make_sg_copy(struct scatterlist *src, struct scatterlist **dst, + const u32 len, const bool copy) +{ + void *pages; + + *dst = kmalloc(sizeof(**dst), GFP_KERNEL); + if (!*dst) + return -ENOMEM; + + pages = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, + get_order(len)); + if (!pages) { + kfree(*dst); + *dst = NULL; + return -ENOMEM; + } + + sg_init_table(*dst, 1); + sg_set_buf(*dst, pages, len); + + /* copy only as requested */ + if (copy) + sg_copy_to_buffer(src, sg_nents(src), pages, len); + + return 0; +} + +static bool eip93_is_sg_aligned(struct scatterlist *sg, u32 len, + const int blksize) +{ + int nents; + + for (nents = 0; sg; sg = sg_next(sg), ++nents) { + if (!IS_ALIGNED(sg->offset, 4)) + return false; + + if (len <= sg->length) { + if (!IS_ALIGNED(len, blksize)) + return false; + + return true; + } + + if (!IS_ALIGNED(sg->length, blksize)) + return false; + + len -= sg->length; + } + return false; +} + +int check_valid_request(struct eip93_cipher_reqctx *rctx) +{ + struct scatterlist *src = rctx->sg_src; + struct scatterlist *dst = rctx->sg_dst; + u32 src_nents, dst_nents; + u32 textsize = rctx->textsize; + u32 authsize = rctx->authsize; + u32 blksize = rctx->blksize; + u32 totlen_src = rctx->assoclen + rctx->textsize; + u32 totlen_dst = rctx->assoclen + rctx->textsize; + u32 copy_len; + bool src_align, dst_align; + int err = -EINVAL; + + if (!IS_CTR(rctx->flags)) { + if (!IS_ALIGNED(textsize, blksize)) + return err; + } + + if (authsize) { + if (IS_ENCRYPT(rctx->flags)) + totlen_dst += authsize; + else + totlen_src += authsize; + } + + src_nents = sg_nents_for_len(src, totlen_src); + dst_nents = sg_nents_for_len(dst, totlen_dst); + + if (src == dst) { + src_nents = max(src_nents, dst_nents); + dst_nents = src_nents; + if (unlikely((totlen_src || totlen_dst) && src_nents <= 0)) + return err; + + } else { + if (unlikely(totlen_src && src_nents <= 0)) + return err; + + if (unlikely(totlen_dst && dst_nents <= 0)) + return err; + } + + if (authsize) { + if (dst_nents == 1 && src_nents == 1) { + src_align = eip93_is_sg_aligned(src, totlen_src, blksize); + if (src == dst) + dst_align = src_align; + else + dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize); + } else { + src_align = false; + dst_align = false; + } + } else { + src_align = eip93_is_sg_aligned(src, totlen_src, blksize); + if (src == dst) + dst_align = src_align; + else + dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize); + } + + copy_len = max(totlen_src, totlen_dst); + if (!src_align) { + err = eip93_make_sg_copy(src, &rctx->sg_src, copy_len, true); + if (err) + return err; + } + + if (!dst_align) { + err = eip93_make_sg_copy(dst, &rctx->sg_dst, copy_len, false); + if (err) + return err; + } + + rctx->src_nents = sg_nents_for_len(rctx->sg_src, totlen_src); + rctx->dst_nents = sg_nents_for_len(rctx->sg_dst, totlen_dst); + + return 0; +} + +/* + * Set sa_record function: + * Even sa_record is set to "0", keep " = 0" for readability. + */ +void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen, + const u32 flags) +{ + /* Reset cmd word */ + sa_record->sa_cmd0_word = 0; + sa_record->sa_cmd1_word = 0; + + sa_record->sa_cmd0_word |= EIP93_SA_CMD_IV_FROM_STATE; + if (!IS_ECB(flags)) + sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_IV; + + sa_record->sa_cmd0_word |= EIP93_SA_CMD_OP_BASIC; + + switch ((flags & EIP93_ALG_MASK)) { + case EIP93_ALG_AES: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_AES; + sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, + keylen >> 3); + break; + case EIP93_ALG_3DES: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_3DES; + break; + case EIP93_ALG_DES: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_DES; + break; + default: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_NULL; + } + + switch ((flags & EIP93_HASH_MASK)) { + case EIP93_HASH_SHA256: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA256; + break; + case EIP93_HASH_SHA224: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA224; + break; + case EIP93_HASH_SHA1: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA1; + break; + case EIP93_HASH_MD5: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_MD5; + break; + default: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_NULL; + } + + sa_record->sa_cmd0_word |= EIP93_SA_CMD_PAD_ZERO; + + switch ((flags & EIP93_MODE_MASK)) { + case EIP93_MODE_CBC: + sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CBC; + break; + case EIP93_MODE_CTR: + sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CTR; + break; + case EIP93_MODE_ECB: + sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_ECB; + break; + } + + sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIGEST_3WORD; + if (IS_HASH(flags)) { + sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_PAD; + sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_DIGEST; + } + + if (IS_HMAC(flags)) { + sa_record->sa_cmd1_word |= EIP93_SA_CMD_HMAC; + sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_HEADER; + } + + sa_record->sa_spi = 0x0; + sa_record->sa_seqmum_mask[0] = 0xFFFFFFFF; + sa_record->sa_seqmum_mask[1] = 0x0; +} + +/* + * Poor mans Scatter/gather function: + * Create a Descriptor for every segment to avoid copying buffers. + * For performance better to wait for hardware to perform multiple DMA + */ +static int eip93_scatter_combine(struct eip93_device *eip93, + struct eip93_cipher_reqctx *rctx, + u32 datalen, u32 split, int offsetin) +{ + struct eip93_descriptor *cdesc = rctx->cdesc; + struct scatterlist *sgsrc = rctx->sg_src; + struct scatterlist *sgdst = rctx->sg_dst; + unsigned int remainin = sg_dma_len(sgsrc); + unsigned int remainout = sg_dma_len(sgdst); + dma_addr_t saddr = sg_dma_address(sgsrc); + dma_addr_t daddr = sg_dma_address(sgdst); + dma_addr_t state_addr; + u32 src_addr, dst_addr, len, n; + bool nextin = false; + bool nextout = false; + int offsetout = 0; + int err; + + if (IS_ECB(rctx->flags)) + rctx->sa_state_base = 0; + + if (split < datalen) { + state_addr = rctx->sa_state_ctr_base; + n = split; + } else { + state_addr = rctx->sa_state_base; + n = datalen; + } + + do { + if (nextin) { + sgsrc = sg_next(sgsrc); + remainin = sg_dma_len(sgsrc); + if (remainin == 0) + continue; + + saddr = sg_dma_address(sgsrc); + offsetin = 0; + nextin = false; + } + + if (nextout) { + sgdst = sg_next(sgdst); + remainout = sg_dma_len(sgdst); + if (remainout == 0) + continue; + + daddr = sg_dma_address(sgdst); + offsetout = 0; + nextout = false; + } + src_addr = saddr + offsetin; + dst_addr = daddr + offsetout; + + if (remainin == remainout) { + len = remainin; + if (len > n) { + len = n; + remainin -= n; + remainout -= n; + offsetin += n; + offsetout += n; + } else { + nextin = true; + nextout = true; + } + } else if (remainin < remainout) { + len = remainin; + if (len > n) { + len = n; + remainin -= n; + remainout -= n; + offsetin += n; + offsetout += n; + } else { + offsetout += len; + remainout -= len; + nextin = true; + } + } else { + len = remainout; + if (len > n) { + len = n; + remainin -= n; + remainout -= n; + offsetin += n; + offsetout += n; + } else { + offsetin += len; + remainin -= len; + nextout = true; + } + } + n -= len; + + cdesc->src_addr = src_addr; + cdesc->dst_addr = dst_addr; + cdesc->state_addr = state_addr; + cdesc->pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY, + EIP93_PE_LENGTH_HOST_READY); + cdesc->pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, len); + + if (n == 0) { + n = datalen - split; + split = datalen; + state_addr = rctx->sa_state_base; + } + + if (n == 0) + cdesc->user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, + EIP93_DESC_LAST); + + /* + * Loop - Delay - No need to rollback + * Maybe refine by slowing down at EIP93_RING_BUSY + */ +again: + scoped_guard(spinlock_irqsave, &eip93->ring->write_lock) + err = eip93_put_descriptor(eip93, cdesc); + if (err) { + usleep_range(EIP93_RING_BUSY_DELAY, + EIP93_RING_BUSY_DELAY * 2); + goto again; + } + /* Writing new descriptor count starts DMA action */ + writel(1, eip93->base + EIP93_REG_PE_CD_COUNT); + } while (n); + + return -EINPROGRESS; +} + +int eip93_send_req(struct crypto_async_request *async, + const u8 *reqiv, struct eip93_cipher_reqctx *rctx) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm); + struct eip93_device *eip93 = ctx->eip93; + struct scatterlist *src = rctx->sg_src; + struct scatterlist *dst = rctx->sg_dst; + struct sa_state *sa_state; + struct eip93_descriptor cdesc; + u32 flags = rctx->flags; + int offsetin = 0, err; + u32 datalen = rctx->assoclen + rctx->textsize; + u32 split = datalen; + u32 start, end, ctr, blocks; + u32 iv[AES_BLOCK_SIZE / sizeof(u32)]; + int crypto_async_idr; + + rctx->sa_state_ctr = NULL; + rctx->sa_state = NULL; + + if (IS_ECB(flags)) + goto skip_iv; + + memcpy(iv, reqiv, rctx->ivsize); + + rctx->sa_state = kzalloc(sizeof(*rctx->sa_state), GFP_KERNEL); + if (!rctx->sa_state) + return -ENOMEM; + + sa_state = rctx->sa_state; + + memcpy(sa_state->state_iv, iv, rctx->ivsize); + if (IS_RFC3686(flags)) { + sa_state->state_iv[0] = ctx->sa_nonce; + sa_state->state_iv[1] = iv[0]; + sa_state->state_iv[2] = iv[1]; + sa_state->state_iv[3] = (u32 __force)cpu_to_be32(0x1); + } else if (!IS_HMAC(flags) && IS_CTR(flags)) { + /* Compute data length. */ + blocks = DIV_ROUND_UP(rctx->textsize, AES_BLOCK_SIZE); + ctr = be32_to_cpu((__be32 __force)iv[3]); + /* Check 32bit counter overflow. */ + start = ctr; + end = start + blocks - 1; + if (end < start) { + split = AES_BLOCK_SIZE * -start; + /* + * Increment the counter manually to cope with + * the hardware counter overflow. + */ + iv[3] = 0xffffffff; + crypto_inc((u8 *)iv, AES_BLOCK_SIZE); + + rctx->sa_state_ctr = kzalloc(sizeof(*rctx->sa_state_ctr), + GFP_KERNEL); + if (!rctx->sa_state_ctr) { + err = -ENOMEM; + goto free_sa_state; + } + + memcpy(rctx->sa_state_ctr->state_iv, reqiv, rctx->ivsize); + memcpy(sa_state->state_iv, iv, rctx->ivsize); + + rctx->sa_state_ctr_base = dma_map_single(eip93->dev, rctx->sa_state_ctr, + sizeof(*rctx->sa_state_ctr), + DMA_TO_DEVICE); + err = dma_mapping_error(eip93->dev, rctx->sa_state_ctr_base); + if (err) + goto free_sa_state_ctr; + } + } + + rctx->sa_state_base = dma_map_single(eip93->dev, rctx->sa_state, + sizeof(*rctx->sa_state), DMA_TO_DEVICE); + err = dma_mapping_error(eip93->dev, rctx->sa_state_base); + if (err) + goto free_sa_state_ctr_dma; + +skip_iv: + + cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, + EIP93_PE_CTRL_HOST_READY); + cdesc.sa_addr = rctx->sa_record_base; + cdesc.arc4_addr = 0; + + scoped_guard(spinlock_bh, &eip93->ring->idr_lock) + crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0, + EIP93_RING_NUM - 1, GFP_ATOMIC); + + cdesc.user_id = FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) | + FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, rctx->desc_flags); + + rctx->cdesc = &cdesc; + + /* map DMA_BIDIRECTIONAL to invalidate cache on destination + * implies __dma_cache_wback_inv + */ + if (!dma_map_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL)) { + err = -ENOMEM; + goto free_sa_state_ctr_dma; + } + + if (src != dst && + !dma_map_sg(eip93->dev, src, rctx->src_nents, DMA_TO_DEVICE)) { + err = -ENOMEM; + goto free_sg_dma; + } + + return eip93_scatter_combine(eip93, rctx, datalen, split, offsetin); + +free_sg_dma: + dma_unmap_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL); +free_sa_state_ctr_dma: + if (rctx->sa_state_ctr) + dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base, + sizeof(*rctx->sa_state_ctr), + DMA_TO_DEVICE); +free_sa_state_ctr: + kfree(rctx->sa_state_ctr); + if (rctx->sa_state) + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*rctx->sa_state), + DMA_TO_DEVICE); +free_sa_state: + kfree(rctx->sa_state); + + return err; +} + +void eip93_unmap_dma(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx, + struct scatterlist *reqsrc, struct scatterlist *reqdst) +{ + u32 len = rctx->assoclen + rctx->textsize; + u32 authsize = rctx->authsize; + u32 flags = rctx->flags; + u32 *otag; + int i; + + if (rctx->sg_src == rctx->sg_dst) { + dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents, + DMA_BIDIRECTIONAL); + goto process_tag; + } + + dma_unmap_sg(eip93->dev, rctx->sg_src, rctx->src_nents, + DMA_TO_DEVICE); + + if (rctx->sg_src != reqsrc) + eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_src); + + dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents, + DMA_BIDIRECTIONAL); + + /* SHA tags need conversion from net-to-host */ +process_tag: + if (IS_DECRYPT(flags)) + authsize = 0; + + if (authsize) { + if (!IS_HASH_MD5(flags)) { + otag = sg_virt(rctx->sg_dst) + len; + for (i = 0; i < (authsize / 4); i++) + otag[i] = be32_to_cpu((__be32 __force)otag[i]); + } + } + + if (rctx->sg_dst != reqdst) { + sg_copy_from_buffer(reqdst, sg_nents(reqdst), + sg_virt(rctx->sg_dst), len + authsize); + eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_dst); + } +} + +void eip93_handle_result(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx, + u8 *reqiv) +{ + if (rctx->sa_state_ctr) + dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base, + sizeof(*rctx->sa_state_ctr), + DMA_FROM_DEVICE); + + if (rctx->sa_state) + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*rctx->sa_state), + DMA_FROM_DEVICE); + + if (!IS_ECB(rctx->flags)) + memcpy(reqiv, rctx->sa_state->state_iv, rctx->ivsize); + + kfree(rctx->sa_state_ctr); + kfree(rctx->sa_state); +} + +int eip93_hmac_setkey(u32 ctx_flags, const u8 *key, unsigned int keylen, + unsigned int hashlen, u8 *dest_ipad, u8 *dest_opad, + bool skip_ipad) +{ + u8 ipad[SHA256_BLOCK_SIZE], opad[SHA256_BLOCK_SIZE]; + struct crypto_ahash *ahash_tfm; + struct eip93_hash_reqctx *rctx; + struct ahash_request *req; + DECLARE_CRYPTO_WAIT(wait); + struct scatterlist sg[1]; + const char *alg_name; + int i, ret; + + switch (ctx_flags & EIP93_HASH_MASK) { + case EIP93_HASH_SHA256: + alg_name = "sha256-eip93"; + break; + case EIP93_HASH_SHA224: + alg_name = "sha224-eip93"; + break; + case EIP93_HASH_SHA1: + alg_name = "sha1-eip93"; + break; + case EIP93_HASH_MD5: + alg_name = "md5-eip93"; + break; + default: /* Impossible */ + return -EINVAL; + } + + ahash_tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(ahash_tfm)) + return PTR_ERR(ahash_tfm); + + req = ahash_request_alloc(ahash_tfm, GFP_ATOMIC); + if (!req) { + ret = -ENOMEM; + goto err_ahash; + } + + rctx = ahash_request_ctx_dma(req); + crypto_init_wait(&wait); + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &wait); + + /* Hash the key if > SHA256_BLOCK_SIZE */ + if (keylen > SHA256_BLOCK_SIZE) { + sg_init_one(&sg[0], key, keylen); + + ahash_request_set_crypt(req, sg, ipad, keylen); + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); + if (ret) + goto err_req; + + keylen = hashlen; + } else { + memcpy(ipad, key, keylen); + } + + /* Copy to opad */ + memset(ipad + keylen, 0, SHA256_BLOCK_SIZE - keylen); + memcpy(opad, ipad, SHA256_BLOCK_SIZE); + + /* Pad with HMAC constants */ + for (i = 0; i < SHA256_BLOCK_SIZE; i++) { + ipad[i] ^= HMAC_IPAD_VALUE; + opad[i] ^= HMAC_OPAD_VALUE; + } + + if (skip_ipad) { + memcpy(dest_ipad, ipad, SHA256_BLOCK_SIZE); + } else { + /* Hash ipad */ + sg_init_one(&sg[0], ipad, SHA256_BLOCK_SIZE); + ahash_request_set_crypt(req, sg, dest_ipad, SHA256_BLOCK_SIZE); + ret = crypto_ahash_init(req); + if (ret) + goto err_req; + + /* Disable HASH_FINALIZE for ipad hash */ + rctx->partial_hash = true; + + ret = crypto_wait_req(crypto_ahash_finup(req), &wait); + if (ret) + goto err_req; + } + + /* Hash opad */ + sg_init_one(&sg[0], opad, SHA256_BLOCK_SIZE); + ahash_request_set_crypt(req, sg, dest_opad, SHA256_BLOCK_SIZE); + ret = crypto_ahash_init(req); + if (ret) + goto err_req; + + /* Disable HASH_FINALIZE for opad hash */ + rctx->partial_hash = true; + + ret = crypto_wait_req(crypto_ahash_finup(req), &wait); + if (ret) + goto err_req; + + if (!IS_HASH_MD5(ctx_flags)) { + for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) { + u32 *ipad_hash = (u32 *)dest_ipad; + u32 *opad_hash = (u32 *)dest_opad; + + if (!skip_ipad) + ipad_hash[i] = (u32 __force)cpu_to_be32(ipad_hash[i]); + opad_hash[i] = (u32 __force)cpu_to_be32(opad_hash[i]); + } + } + +err_req: + ahash_request_free(req); +err_ahash: + crypto_free_ahash(ahash_tfm); + + return ret; +} diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.h b/drivers/crypto/inside-secure/eip93/eip93-common.h new file mode 100644 index 000000000000..80964cfa34df --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-common.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen + * Christian Marangi + * Christian Marangi +#include +#include +#include +#include +#include + +#include "eip93-cipher.h" +#include "eip93-hash.h" +#include "eip93-main.h" +#include "eip93-common.h" +#include "eip93-regs.h" + +static void eip93_hash_free_data_blocks(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct eip93_device *eip93 = ctx->eip93; + struct mkt_hash_block *block, *tmp; + + list_for_each_entry_safe(block, tmp, &rctx->blocks, list) { + dma_unmap_single(eip93->dev, block->data_dma, + SHA256_BLOCK_SIZE, DMA_TO_DEVICE); + kfree(block); + } + if (!list_empty(&rctx->blocks)) + INIT_LIST_HEAD(&rctx->blocks); + + if (rctx->finalize) + dma_unmap_single(eip93->dev, rctx->data_dma, + rctx->data_used, + DMA_TO_DEVICE); +} + +static void eip93_hash_free_sa_record(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct eip93_device *eip93 = ctx->eip93; + + if (IS_HMAC(ctx->flags)) + dma_unmap_single(eip93->dev, rctx->sa_record_hmac_base, + sizeof(rctx->sa_record_hmac), DMA_TO_DEVICE); + + dma_unmap_single(eip93->dev, rctx->sa_record_base, + sizeof(rctx->sa_record), DMA_TO_DEVICE); +} + +void eip93_hash_handle_result(struct crypto_async_request *async, int err) +{ + struct ahash_request *req = ahash_request_cast(async); + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + struct sa_state *sa_state = &rctx->sa_state; + struct eip93_device *eip93 = ctx->eip93; + int i; + + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*sa_state), DMA_FROM_DEVICE); + + /* + * With partial_hash assume SHA256_DIGEST_SIZE buffer is passed. + * This is to handle SHA224 that have a 32 byte intermediate digest. + */ + if (rctx->partial_hash) + digestsize = SHA256_DIGEST_SIZE; + + if (rctx->finalize || rctx->partial_hash) { + /* bytes needs to be swapped for req->result */ + if (!IS_HASH_MD5(ctx->flags)) { + for (i = 0; i < digestsize / sizeof(u32); i++) { + u32 *digest = (u32 *)sa_state->state_i_digest; + + digest[i] = be32_to_cpu((__be32 __force)digest[i]); + } + } + + memcpy(req->result, sa_state->state_i_digest, digestsize); + } + + eip93_hash_free_sa_record(req); + eip93_hash_free_data_blocks(req); + + ahash_request_complete(req, err); +} + +static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest) +{ + u32 sha256_init[] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 }; + u32 sha224_init[] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, + SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 }; + u32 sha1_init[] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }; + u32 md5_init[] = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 }; + + /* Init HASH constant */ + switch (hash) { + case EIP93_HASH_SHA256: + memcpy(digest, sha256_init, sizeof(sha256_init)); + return; + case EIP93_HASH_SHA224: + memcpy(digest, sha224_init, sizeof(sha224_init)); + return; + case EIP93_HASH_SHA1: + memcpy(digest, sha1_init, sizeof(sha1_init)); + return; + case EIP93_HASH_MD5: + memcpy(digest, md5_init, sizeof(md5_init)); + return; + default: /* Impossible */ + return; + } +} + +static void eip93_hash_export_sa_state(struct ahash_request *req, + struct eip93_hash_export_state *state) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct sa_state *sa_state = &rctx->sa_state; + + /* + * EIP93 have special handling for state_byte_cnt in sa_state. + * Even if a zero packet is passed (and a BADMSG is returned), + * state_byte_cnt is incremented to the digest handled (with the hash + * primitive). This is problematic with export/import as EIP93 + * expect 0 state_byte_cnt for the very first iteration. + */ + if (!rctx->len) + memset(state->state_len, 0, sizeof(u32) * 2); + else + memcpy(state->state_len, sa_state->state_byte_cnt, + sizeof(u32) * 2); + memcpy(state->state_hash, sa_state->state_i_digest, + SHA256_DIGEST_SIZE); + state->len = rctx->len; + state->data_used = rctx->data_used; +} + +static void __eip93_hash_init(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct sa_record *sa_record = &rctx->sa_record; + int digestsize; + + digestsize = crypto_ahash_digestsize(ahash); + + eip93_set_sa_record(sa_record, 0, ctx->flags); + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_FROM_STATE; + sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_HASH; + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE; + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE, + EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH); + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH; + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, + digestsize / sizeof(u32)); + + /* + * HMAC special handling + * Enabling CMD_HMAC force the inner hash to be always finalized. + * This cause problems on handling message > 64 byte as we + * need to produce intermediate inner hash on sending intermediate + * 64 bytes blocks. + * + * To handle this, enable CMD_HMAC only on the last block. + * We make a duplicate of sa_record and on the last descriptor, + * we pass a dedicated sa_record with CMD_HMAC enabled to make + * EIP93 apply the outer hash. + */ + if (IS_HMAC(ctx->flags)) { + struct sa_record *sa_record_hmac = &rctx->sa_record_hmac; + + memcpy(sa_record_hmac, sa_record, sizeof(*sa_record)); + /* Copy pre-hashed opad for HMAC */ + memcpy(sa_record_hmac->sa_o_digest, ctx->opad, SHA256_DIGEST_SIZE); + + /* Disable HMAC for hash normal sa_record */ + sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HMAC; + } + + rctx->len = 0; + rctx->data_used = 0; + rctx->partial_hash = false; + rctx->finalize = false; + INIT_LIST_HEAD(&rctx->blocks); +} + +static int eip93_send_hash_req(struct crypto_async_request *async, u8 *data, + dma_addr_t *data_dma, u32 len, bool last) +{ + struct ahash_request *req = ahash_request_cast(async); + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct eip93_device *eip93 = ctx->eip93; + struct eip93_descriptor cdesc = { }; + dma_addr_t src_addr; + int ret; + + /* Map block data to DMA */ + src_addr = dma_map_single(eip93->dev, data, len, DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, src_addr); + if (ret) + return ret; + + cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, + EIP93_PE_CTRL_HOST_READY); + cdesc.sa_addr = rctx->sa_record_base; + cdesc.arc4_addr = 0; + + cdesc.state_addr = rctx->sa_state_base; + cdesc.src_addr = src_addr; + cdesc.pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY, + EIP93_PE_LENGTH_HOST_READY); + cdesc.pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, + len); + + cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_HASH); + + if (last) { + int crypto_async_idr; + + if (rctx->finalize && !rctx->partial_hash) { + /* For last block, pass sa_record with CMD_HMAC enabled */ + if (IS_HMAC(ctx->flags)) { + struct sa_record *sa_record_hmac = &rctx->sa_record_hmac; + + rctx->sa_record_hmac_base = dma_map_single(eip93->dev, + sa_record_hmac, + sizeof(*sa_record_hmac), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_record_hmac_base); + if (ret) + return ret; + + cdesc.sa_addr = rctx->sa_record_hmac_base; + } + + cdesc.pe_ctrl_stat_word |= EIP93_PE_CTRL_PE_HASH_FINAL; + } + + scoped_guard(spinlock_bh, &eip93->ring->idr_lock) + crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0, + EIP93_RING_NUM - 1, GFP_ATOMIC); + + cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) | + FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_LAST); + } + +again: + ret = eip93_put_descriptor(eip93, &cdesc); + if (ret) { + usleep_range(EIP93_RING_BUSY_DELAY, + EIP93_RING_BUSY_DELAY * 2); + goto again; + } + + /* Writing new descriptor count starts DMA action */ + writel(1, eip93->base + EIP93_REG_PE_CD_COUNT); + + *data_dma = src_addr; + return 0; +} + +static int eip93_hash_init(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct sa_state *sa_state = &rctx->sa_state; + + memset(sa_state->state_byte_cnt, 0, sizeof(u32) * 2); + eip93_hash_init_sa_state_digest(ctx->flags & EIP93_HASH_MASK, + sa_state->state_i_digest); + + __eip93_hash_init(req); + + /* For HMAC setup the initial block for ipad */ + if (IS_HMAC(ctx->flags)) { + memcpy(rctx->data, ctx->ipad, SHA256_BLOCK_SIZE); + + rctx->data_used = SHA256_BLOCK_SIZE; + rctx->len += SHA256_BLOCK_SIZE; + } + + return 0; +} + +/* + * With complete_req true, we wait for the engine to consume all the block in list, + * else we just queue the block to the engine as final() will wait. This is useful + * for finup(). + */ +static int __eip93_hash_update(struct ahash_request *req, bool complete_req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_async_request *async = &req->base; + unsigned int read, to_consume = req->nbytes; + unsigned int max_read, consumed = 0; + struct mkt_hash_block *block; + bool wait_req = false; + int offset; + int ret; + + /* Get the offset and available space to fill req data */ + offset = rctx->data_used; + max_read = SHA256_BLOCK_SIZE - offset; + + /* Consume req in block of SHA256_BLOCK_SIZE. + * to_read is initially set to space available in the req data + * and then reset to SHA256_BLOCK_SIZE. + */ + while (to_consume > max_read) { + block = kzalloc(sizeof(*block), GFP_ATOMIC); + if (!block) { + ret = -ENOMEM; + goto free_blocks; + } + + read = sg_pcopy_to_buffer(req->src, sg_nents(req->src), + block->data + offset, + max_read, consumed); + + /* + * For first iteration only, copy req data to block + * and reset offset and max_read for next iteration. + */ + if (offset > 0) { + memcpy(block->data, rctx->data, offset); + offset = 0; + max_read = SHA256_BLOCK_SIZE; + } + + list_add(&block->list, &rctx->blocks); + to_consume -= read; + consumed += read; + } + + /* Write the remaining data to req data */ + read = sg_pcopy_to_buffer(req->src, sg_nents(req->src), + rctx->data + offset, to_consume, + consumed); + rctx->data_used = offset + read; + + /* Update counter with processed bytes */ + rctx->len += read + consumed; + + /* Consume all the block added to list */ + list_for_each_entry_reverse(block, &rctx->blocks, list) { + wait_req = complete_req && + list_is_first(&block->list, &rctx->blocks); + + ret = eip93_send_hash_req(async, block->data, + &block->data_dma, + SHA256_BLOCK_SIZE, wait_req); + if (ret) + goto free_blocks; + } + + return wait_req ? -EINPROGRESS : 0; + +free_blocks: + eip93_hash_free_data_blocks(req); + + return ret; +} + +static int eip93_hash_update(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct sa_record *sa_record = &rctx->sa_record; + struct sa_state *sa_state = &rctx->sa_state; + struct eip93_device *eip93 = ctx->eip93; + int ret; + + if (!req->nbytes) + return 0; + + rctx->sa_state_base = dma_map_single(eip93->dev, sa_state, + sizeof(*sa_state), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_state_base); + if (ret) + return ret; + + rctx->sa_record_base = dma_map_single(eip93->dev, sa_record, + sizeof(*sa_record), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_record_base); + if (ret) + goto free_sa_state; + + ret = __eip93_hash_update(req, true); + if (ret && ret != -EINPROGRESS) + goto free_sa_record; + + return ret; + +free_sa_record: + dma_unmap_single(eip93->dev, rctx->sa_record_base, + sizeof(*sa_record), DMA_TO_DEVICE); + +free_sa_state: + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*sa_state), DMA_TO_DEVICE); + + return ret; +} + +/* + * With map_data true, we map the sa_record and sa_state. This is needed + * for finup() as the they are mapped before calling update() + */ +static int __eip93_hash_final(struct ahash_request *req, bool map_dma) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct crypto_async_request *async = &req->base; + struct sa_record *sa_record = &rctx->sa_record; + struct sa_state *sa_state = &rctx->sa_state; + struct eip93_device *eip93 = ctx->eip93; + int ret; + + /* EIP93 can't handle zero bytes hash */ + if (!rctx->len && !IS_HMAC(ctx->flags)) { + switch ((ctx->flags & EIP93_HASH_MASK)) { + case EIP93_HASH_SHA256: + memcpy(req->result, sha256_zero_message_hash, + SHA256_DIGEST_SIZE); + break; + case EIP93_HASH_SHA224: + memcpy(req->result, sha224_zero_message_hash, + SHA224_DIGEST_SIZE); + break; + case EIP93_HASH_SHA1: + memcpy(req->result, sha1_zero_message_hash, + SHA1_DIGEST_SIZE); + break; + case EIP93_HASH_MD5: + memcpy(req->result, md5_zero_message_hash, + MD5_DIGEST_SIZE); + break; + default: /* Impossible */ + return -EINVAL; + } + + return 0; + } + + /* Signal interrupt from engine is for last block */ + rctx->finalize = true; + + if (map_dma) { + rctx->sa_state_base = dma_map_single(eip93->dev, sa_state, + sizeof(*sa_state), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_state_base); + if (ret) + return ret; + + rctx->sa_record_base = dma_map_single(eip93->dev, sa_record, + sizeof(*sa_record), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_record_base); + if (ret) + goto free_sa_state; + } + + /* Send last block */ + ret = eip93_send_hash_req(async, rctx->data, &rctx->data_dma, + rctx->data_used, true); + if (ret) + goto free_blocks; + + return -EINPROGRESS; + +free_blocks: + eip93_hash_free_data_blocks(req); + + dma_unmap_single(eip93->dev, rctx->sa_record_base, + sizeof(*sa_record), DMA_TO_DEVICE); + +free_sa_state: + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*sa_state), DMA_TO_DEVICE); + + return ret; +} + +static int eip93_hash_final(struct ahash_request *req) +{ + return __eip93_hash_final(req, true); +} + +static int eip93_hash_finup(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct sa_record *sa_record = &rctx->sa_record; + struct sa_state *sa_state = &rctx->sa_state; + struct eip93_device *eip93 = ctx->eip93; + int ret; + + if (rctx->len + req->nbytes || IS_HMAC(ctx->flags)) { + rctx->sa_state_base = dma_map_single(eip93->dev, sa_state, + sizeof(*sa_state), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_state_base); + if (ret) + return ret; + + rctx->sa_record_base = dma_map_single(eip93->dev, sa_record, + sizeof(*sa_record), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_record_base); + if (ret) + goto free_sa_state; + + ret = __eip93_hash_update(req, false); + if (ret) + goto free_sa_record; + } + + return __eip93_hash_final(req, false); + +free_sa_record: + dma_unmap_single(eip93->dev, rctx->sa_record_base, + sizeof(*sa_record), DMA_TO_DEVICE); +free_sa_state: + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*sa_state), DMA_TO_DEVICE); + + return ret; +} + +static int eip93_hash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, + u32 keylen) +{ + unsigned int digestsize = crypto_ahash_digestsize(ahash); + struct crypto_tfm *tfm = crypto_ahash_tfm(ahash); + struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + return eip93_hmac_setkey(ctx->flags, key, keylen, digestsize, + ctx->ipad, ctx->opad, true); +} + +static int eip93_hash_cra_init(struct crypto_tfm *tfm) +{ + struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, + struct eip93_alg_template, alg.ahash.halg.base); + + crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm), + sizeof(struct eip93_hash_reqctx)); + + ctx->eip93 = tmpl->eip93; + ctx->flags = tmpl->flags; + + return 0; +} + +static int eip93_hash_digest(struct ahash_request *req) +{ + int ret; + + ret = eip93_hash_init(req); + if (ret) + return ret; + + return eip93_hash_finup(req); +} + +static int eip93_hash_import(struct ahash_request *req, const void *in) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + const struct eip93_hash_export_state *state = in; + struct sa_state *sa_state = &rctx->sa_state; + + memcpy(sa_state->state_byte_cnt, state->state_len, sizeof(u32) * 2); + memcpy(sa_state->state_i_digest, state->state_hash, SHA256_DIGEST_SIZE); + + __eip93_hash_init(req); + + rctx->len = state->len; + rctx->data_used = state->data_used; + + /* Skip copying data if we have nothing to copy */ + if (rctx->len) + memcpy(rctx->data, state->data, rctx->data_used); + + return 0; +} + +static int eip93_hash_export(struct ahash_request *req, void *out) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct eip93_hash_export_state *state = out; + + /* Save the first block in state data */ + if (rctx->len) + memcpy(state->data, rctx->data, rctx->data_used); + + eip93_hash_export_sa_state(req, state); + + return 0; +} + +struct eip93_alg_template eip93_alg_md5 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_MD5, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "md5", + .cra_driver_name = "md5-eip93", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_sha1 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_SHA1, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-eip93", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_sha224 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_SHA224, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "sha224", + .cra_driver_name = "sha224-eip93", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_sha256 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_SHA256, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-eip93", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_hmac_md5 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .setkey = eip93_hash_hmac_setkey, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "hmac(md5)", + .cra_driver_name = "hmac(md5-eip93)", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_hmac_sha1 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .setkey = eip93_hash_hmac_setkey, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "hmac(sha1-eip93)", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_hmac_sha224 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .setkey = eip93_hash_hmac_setkey, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "hmac(sha224)", + .cra_driver_name = "hmac(sha224-eip93)", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_hmac_sha256 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .setkey = eip93_hash_hmac_setkey, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "hmac(sha256-eip93)", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.h b/drivers/crypto/inside-secure/eip93/eip93-hash.h new file mode 100644 index 000000000000..556f22fc1dd0 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-hash.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen + * Christian Marangi + +#include "eip93-main.h" +#include "eip93-regs.h" + +struct eip93_hash_ctx { + struct eip93_device *eip93; + u32 flags; + + u8 ipad[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); + u8 opad[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); +}; + +struct eip93_hash_reqctx { + /* Placement is important for DMA align */ + struct { + struct sa_record sa_record; + struct sa_record sa_record_hmac; + struct sa_state sa_state; + } __aligned(CRYPTO_DMA_ALIGN); + + dma_addr_t sa_record_base; + dma_addr_t sa_record_hmac_base; + dma_addr_t sa_state_base; + + /* Don't enable HASH_FINALIZE when last block is sent */ + bool partial_hash; + + /* Set to signal interrupt is for final packet */ + bool finalize; + + /* + * EIP93 requires data to be accumulated in block of 64 bytes + * for intermediate hash calculation. + */ + u64 len; + u32 data_used; + + u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); + dma_addr_t data_dma; + + struct list_head blocks; +}; + +struct mkt_hash_block { + struct list_head list; + u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); + dma_addr_t data_dma; +}; + +struct eip93_hash_export_state { + u64 len; + u32 data_used; + + u32 state_len[2]; + u8 state_hash[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); + + u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); +}; + +void eip93_hash_handle_result(struct crypto_async_request *async, int err); + +extern struct eip93_alg_template eip93_alg_md5; +extern struct eip93_alg_template eip93_alg_sha1; +extern struct eip93_alg_template eip93_alg_sha224; +extern struct eip93_alg_template eip93_alg_sha256; +extern struct eip93_alg_template eip93_alg_hmac_md5; +extern struct eip93_alg_template eip93_alg_hmac_sha1; +extern struct eip93_alg_template eip93_alg_hmac_sha224; +extern struct eip93_alg_template eip93_alg_hmac_sha256; + +#endif /* _EIP93_HASH_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.c b/drivers/crypto/inside-secure/eip93/eip93-main.c new file mode 100644 index 000000000000..0b38a567da0e --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-main.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen + * Christian Marangi +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eip93-main.h" +#include "eip93-regs.h" +#include "eip93-common.h" +#include "eip93-cipher.h" +#include "eip93-aes.h" +#include "eip93-des.h" +#include "eip93-aead.h" +#include "eip93-hash.h" + +static struct eip93_alg_template *eip93_algs[] = { + &eip93_alg_ecb_des, + &eip93_alg_cbc_des, + &eip93_alg_ecb_des3_ede, + &eip93_alg_cbc_des3_ede, + &eip93_alg_ecb_aes, + &eip93_alg_cbc_aes, + &eip93_alg_ctr_aes, + &eip93_alg_rfc3686_aes, + &eip93_alg_authenc_hmac_md5_cbc_des, + &eip93_alg_authenc_hmac_sha1_cbc_des, + &eip93_alg_authenc_hmac_sha224_cbc_des, + &eip93_alg_authenc_hmac_sha256_cbc_des, + &eip93_alg_authenc_hmac_md5_cbc_des3_ede, + &eip93_alg_authenc_hmac_sha1_cbc_des3_ede, + &eip93_alg_authenc_hmac_sha224_cbc_des3_ede, + &eip93_alg_authenc_hmac_sha256_cbc_des3_ede, + &eip93_alg_authenc_hmac_md5_cbc_aes, + &eip93_alg_authenc_hmac_sha1_cbc_aes, + &eip93_alg_authenc_hmac_sha224_cbc_aes, + &eip93_alg_authenc_hmac_sha256_cbc_aes, + &eip93_alg_authenc_hmac_md5_rfc3686_aes, + &eip93_alg_authenc_hmac_sha1_rfc3686_aes, + &eip93_alg_authenc_hmac_sha224_rfc3686_aes, + &eip93_alg_authenc_hmac_sha256_rfc3686_aes, + &eip93_alg_md5, + &eip93_alg_sha1, + &eip93_alg_sha224, + &eip93_alg_sha256, + &eip93_alg_hmac_md5, + &eip93_alg_hmac_sha1, + &eip93_alg_hmac_sha224, + &eip93_alg_hmac_sha256, +}; + +inline void eip93_irq_disable(struct eip93_device *eip93, u32 mask) +{ + __raw_writel(mask, eip93->base + EIP93_REG_MASK_DISABLE); +} + +inline void eip93_irq_enable(struct eip93_device *eip93, u32 mask) +{ + __raw_writel(mask, eip93->base + EIP93_REG_MASK_ENABLE); +} + +inline void eip93_irq_clear(struct eip93_device *eip93, u32 mask) +{ + __raw_writel(mask, eip93->base + EIP93_REG_INT_CLR); +} + +static void eip93_unregister_algs(unsigned int i) +{ + unsigned int j; + + for (j = 0; j < i; j++) { + switch (eip93_algs[j]->type) { + case EIP93_ALG_TYPE_SKCIPHER: + crypto_unregister_skcipher(&eip93_algs[j]->alg.skcipher); + break; + case EIP93_ALG_TYPE_AEAD: + crypto_unregister_aead(&eip93_algs[j]->alg.aead); + break; + case EIP93_ALG_TYPE_HASH: + crypto_unregister_ahash(&eip93_algs[i]->alg.ahash); + break; + } + } +} + +static int eip93_register_algs(struct eip93_device *eip93, u32 supported_algo_flags) +{ + unsigned int i; + int ret = 0; + + for (i = 0; i < ARRAY_SIZE(eip93_algs); i++) { + u32 alg_flags = eip93_algs[i]->flags; + + eip93_algs[i]->eip93 = eip93; + + if ((IS_DES(alg_flags) || IS_3DES(alg_flags)) && + !(supported_algo_flags & EIP93_PE_OPTION_TDES)) + continue; + + if (IS_AES(alg_flags)) { + if (!(supported_algo_flags & EIP93_PE_OPTION_AES)) + continue; + + if (!IS_HMAC(alg_flags)) { + if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY128) + eip93_algs[i]->alg.skcipher.max_keysize = + AES_KEYSIZE_128; + + if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY192) + eip93_algs[i]->alg.skcipher.max_keysize = + AES_KEYSIZE_192; + + if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY256) + eip93_algs[i]->alg.skcipher.max_keysize = + AES_KEYSIZE_256; + + if (IS_RFC3686(alg_flags)) + eip93_algs[i]->alg.skcipher.max_keysize += + CTR_RFC3686_NONCE_SIZE; + } + } + + if (IS_HASH_MD5(alg_flags) && + !(supported_algo_flags & EIP93_PE_OPTION_MD5)) + continue; + + if (IS_HASH_SHA1(alg_flags) && + !(supported_algo_flags & EIP93_PE_OPTION_SHA_1)) + continue; + + if (IS_HASH_SHA224(alg_flags) && + !(supported_algo_flags & EIP93_PE_OPTION_SHA_224)) + continue; + + if (IS_HASH_SHA256(alg_flags) && + !(supported_algo_flags & EIP93_PE_OPTION_SHA_256)) + continue; + + switch (eip93_algs[i]->type) { + case EIP93_ALG_TYPE_SKCIPHER: + ret = crypto_register_skcipher(&eip93_algs[i]->alg.skcipher); + break; + case EIP93_ALG_TYPE_AEAD: + ret = crypto_register_aead(&eip93_algs[i]->alg.aead); + break; + case EIP93_ALG_TYPE_HASH: + ret = crypto_register_ahash(&eip93_algs[i]->alg.ahash); + break; + } + if (ret) + goto fail; + } + + return 0; + +fail: + eip93_unregister_algs(i); + + return ret; +} + +static void eip93_handle_result_descriptor(struct eip93_device *eip93) +{ + struct crypto_async_request *async; + struct eip93_descriptor *rdesc; + u16 desc_flags, crypto_idr; + bool last_entry; + int handled, left, err; + u32 pe_ctrl_stat; + u32 pe_length; + +get_more: + handled = 0; + + left = readl(eip93->base + EIP93_REG_PE_RD_COUNT) & EIP93_PE_RD_COUNT; + + if (!left) { + eip93_irq_clear(eip93, EIP93_INT_RDR_THRESH); + eip93_irq_enable(eip93, EIP93_INT_RDR_THRESH); + return; + } + + last_entry = false; + + while (left) { + scoped_guard(spinlock_irqsave, &eip93->ring->read_lock) + rdesc = eip93_get_descriptor(eip93); + if (IS_ERR(rdesc)) { + dev_err(eip93->dev, "Ndesc: %d nreq: %d\n", + handled, left); + err = -EIO; + break; + } + /* make sure DMA is finished writing */ + do { + pe_ctrl_stat = READ_ONCE(rdesc->pe_ctrl_stat_word); + pe_length = READ_ONCE(rdesc->pe_length_word); + } while (FIELD_GET(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, pe_ctrl_stat) != + EIP93_PE_CTRL_PE_READY || + FIELD_GET(EIP93_PE_LENGTH_HOST_PE_READY, pe_length) != + EIP93_PE_LENGTH_PE_READY); + + err = rdesc->pe_ctrl_stat_word & (EIP93_PE_CTRL_PE_EXT_ERR_CODE | + EIP93_PE_CTRL_PE_EXT_ERR | + EIP93_PE_CTRL_PE_SEQNUM_ERR | + EIP93_PE_CTRL_PE_PAD_ERR | + EIP93_PE_CTRL_PE_AUTH_ERR); + + desc_flags = FIELD_GET(EIP93_PE_USER_ID_DESC_FLAGS, rdesc->user_id); + crypto_idr = FIELD_GET(EIP93_PE_USER_ID_CRYPTO_IDR, rdesc->user_id); + + writel(1, eip93->base + EIP93_REG_PE_RD_COUNT); + eip93_irq_clear(eip93, EIP93_INT_RDR_THRESH); + + handled++; + left--; + + if (desc_flags & EIP93_DESC_LAST) { + last_entry = true; + break; + } + } + + if (!last_entry) + goto get_more; + + /* Get crypto async ref only for last descriptor */ + scoped_guard(spinlock_bh, &eip93->ring->idr_lock) { + async = idr_find(&eip93->ring->crypto_async_idr, crypto_idr); + idr_remove(&eip93->ring->crypto_async_idr, crypto_idr); + } + + /* Parse error in ctrl stat word */ + err = eip93_parse_ctrl_stat_err(eip93, err); + + if (desc_flags & EIP93_DESC_SKCIPHER) + eip93_skcipher_handle_result(async, err); + + if (desc_flags & EIP93_DESC_AEAD) + eip93_aead_handle_result(async, err); + + if (desc_flags & EIP93_DESC_HASH) + eip93_hash_handle_result(async, err); + + goto get_more; +} + +static void eip93_done_task(unsigned long data) +{ + struct eip93_device *eip93 = (struct eip93_device *)data; + + eip93_handle_result_descriptor(eip93); +} + +static irqreturn_t eip93_irq_handler(int irq, void *data) +{ + struct eip93_device *eip93 = data; + u32 irq_status; + + irq_status = readl(eip93->base + EIP93_REG_INT_MASK_STAT); + if (FIELD_GET(EIP93_INT_RDR_THRESH, irq_status)) { + eip93_irq_disable(eip93, EIP93_INT_RDR_THRESH); + tasklet_schedule(&eip93->ring->done_task); + return IRQ_HANDLED; + } + + /* Ignore errors in AUTO mode, handled by the RDR */ + eip93_irq_clear(eip93, irq_status); + if (irq_status) + eip93_irq_disable(eip93, irq_status); + + return IRQ_NONE; +} + +static void eip93_initialize(struct eip93_device *eip93, u32 supported_algo_flags) +{ + u32 val; + + /* Reset PE and rings */ + val = EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING; + val |= EIP93_PE_TARGET_AUTO_RING_MODE; + /* For Auto more, update the CDR ring owner after processing */ + val |= EIP93_PE_CONFIG_EN_CDR_UPDATE; + writel(val, eip93->base + EIP93_REG_PE_CONFIG); + + /* Wait for PE and ring to reset */ + usleep_range(10, 20); + + /* Release PE and ring reset */ + val = readl(eip93->base + EIP93_REG_PE_CONFIG); + val &= ~(EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING); + writel(val, eip93->base + EIP93_REG_PE_CONFIG); + + /* Config Clocks */ + val = EIP93_PE_CLOCK_EN_PE_CLK; + if (supported_algo_flags & EIP93_PE_OPTION_TDES) + val |= EIP93_PE_CLOCK_EN_DES_CLK; + if (supported_algo_flags & EIP93_PE_OPTION_AES) + val |= EIP93_PE_CLOCK_EN_AES_CLK; + if (supported_algo_flags & + (EIP93_PE_OPTION_MD5 | EIP93_PE_OPTION_SHA_1 | EIP93_PE_OPTION_SHA_224 | + EIP93_PE_OPTION_SHA_256)) + val |= EIP93_PE_CLOCK_EN_HASH_CLK; + writel(val, eip93->base + EIP93_REG_PE_CLOCK_CTRL); + + /* Config DMA thresholds */ + val = FIELD_PREP(EIP93_PE_OUTBUF_THRESH, 128) | + FIELD_PREP(EIP93_PE_INBUF_THRESH, 128); + writel(val, eip93->base + EIP93_REG_PE_BUF_THRESH); + + /* Clear/ack all interrupts before disable all */ + eip93_irq_clear(eip93, EIP93_INT_ALL); + eip93_irq_disable(eip93, EIP93_INT_ALL); + + /* Setup CRD threshold to trigger interrupt */ + val = FIELD_PREP(EIPR93_PE_CDR_THRESH, EIP93_RING_NUM - EIP93_RING_BUSY); + /* + * Configure RDR interrupt to be triggered if RD counter is not 0 + * for more than 2^(N+10) system clocks. + */ + val |= FIELD_PREP(EIPR93_PE_RD_TIMEOUT, 5) | EIPR93_PE_TIMEROUT_EN; + writel(val, eip93->base + EIP93_REG_PE_RING_THRESH); +} + +static void eip93_desc_free(struct eip93_device *eip93) +{ + writel(0, eip93->base + EIP93_REG_PE_RING_CONFIG); + writel(0, eip93->base + EIP93_REG_PE_CDR_BASE); + writel(0, eip93->base + EIP93_REG_PE_RDR_BASE); +} + +static int eip93_set_ring(struct eip93_device *eip93, struct eip93_desc_ring *ring) +{ + ring->offset = sizeof(struct eip93_descriptor); + ring->base = dmam_alloc_coherent(eip93->dev, + sizeof(struct eip93_descriptor) * EIP93_RING_NUM, + &ring->base_dma, GFP_KERNEL); + if (!ring->base) + return -ENOMEM; + + ring->write = ring->base; + ring->base_end = ring->base + sizeof(struct eip93_descriptor) * (EIP93_RING_NUM - 1); + ring->read = ring->base; + + return 0; +} + +static int eip93_desc_init(struct eip93_device *eip93) +{ + struct eip93_desc_ring *cdr = &eip93->ring->cdr; + struct eip93_desc_ring *rdr = &eip93->ring->rdr; + int ret; + u32 val; + + ret = eip93_set_ring(eip93, cdr); + if (ret) + return ret; + + ret = eip93_set_ring(eip93, rdr); + if (ret) + return ret; + + writel((u32 __force)cdr->base_dma, eip93->base + EIP93_REG_PE_CDR_BASE); + writel((u32 __force)rdr->base_dma, eip93->base + EIP93_REG_PE_RDR_BASE); + + val = FIELD_PREP(EIP93_PE_RING_SIZE, EIP93_RING_NUM - 1); + writel(val, eip93->base + EIP93_REG_PE_RING_CONFIG); + + return 0; +} + +static void eip93_cleanup(struct eip93_device *eip93) +{ + tasklet_kill(&eip93->ring->done_task); + + /* Clear/ack all interrupts before disable all */ + eip93_irq_clear(eip93, EIP93_INT_ALL); + eip93_irq_disable(eip93, EIP93_INT_ALL); + + writel(0, eip93->base + EIP93_REG_PE_CLOCK_CTRL); + + eip93_desc_free(eip93); + + idr_destroy(&eip93->ring->crypto_async_idr); +} + +static int eip93_crypto_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct eip93_device *eip93; + u32 ver, algo_flags; + int ret; + + eip93 = devm_kzalloc(dev, sizeof(*eip93), GFP_KERNEL); + if (!eip93) + return -ENOMEM; + + eip93->dev = dev; + platform_set_drvdata(pdev, eip93); + + eip93->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(eip93->base)) + return PTR_ERR(eip93->base); + + eip93->irq = platform_get_irq(pdev, 0); + if (eip93->irq < 0) + return eip93->irq; + + ret = devm_request_threaded_irq(eip93->dev, eip93->irq, eip93_irq_handler, + NULL, IRQF_ONESHOT, + dev_name(eip93->dev), eip93); + + eip93->ring = devm_kcalloc(eip93->dev, 1, sizeof(*eip93->ring), GFP_KERNEL); + if (!eip93->ring) + return -ENOMEM; + + ret = eip93_desc_init(eip93); + + if (ret) + return ret; + + tasklet_init(&eip93->ring->done_task, eip93_done_task, (unsigned long)eip93); + + spin_lock_init(&eip93->ring->read_lock); + spin_lock_init(&eip93->ring->write_lock); + + spin_lock_init(&eip93->ring->idr_lock); + idr_init(&eip93->ring->crypto_async_idr); + + algo_flags = readl(eip93->base + EIP93_REG_PE_OPTION_1); + + eip93_initialize(eip93, algo_flags); + + /* Init finished, enable RDR interrupt */ + eip93_irq_enable(eip93, EIP93_INT_RDR_THRESH); + + ret = eip93_register_algs(eip93, algo_flags); + if (ret) { + eip93_cleanup(eip93); + return ret; + } + + ver = readl(eip93->base + EIP93_REG_PE_REVISION); + /* EIP_EIP_NO:MAJOR_HW_REV:MINOR_HW_REV:HW_PATCH,PE(ALGO_FLAGS) */ + dev_info(eip93->dev, "EIP%lu:%lx:%lx:%lx,PE(0x%x:0x%x)\n", + FIELD_GET(EIP93_PE_REVISION_EIP_NO, ver), + FIELD_GET(EIP93_PE_REVISION_MAJ_HW_REV, ver), + FIELD_GET(EIP93_PE_REVISION_MIN_HW_REV, ver), + FIELD_GET(EIP93_PE_REVISION_HW_PATCH, ver), + algo_flags, + readl(eip93->base + EIP93_REG_PE_OPTION_0)); + + return 0; +} + +static void eip93_crypto_remove(struct platform_device *pdev) +{ + struct eip93_device *eip93 = platform_get_drvdata(pdev); + + eip93_unregister_algs(ARRAY_SIZE(eip93_algs)); + eip93_cleanup(eip93); +} + +static const struct of_device_id eip93_crypto_of_match[] = { + { .compatible = "inside-secure,safexcel-eip93i", }, + { .compatible = "inside-secure,safexcel-eip93ie", }, + { .compatible = "inside-secure,safexcel-eip93is", }, + { .compatible = "inside-secure,safexcel-eip93ies", }, + /* IW not supported currently, missing AES-XCB-MAC/AES-CCM */ + /* { .compatible = "inside-secure,safexcel-eip93iw", }, */ + {} +}; +MODULE_DEVICE_TABLE(of, eip93_crypto_of_match); + +static struct platform_driver eip93_crypto_driver = { + .probe = eip93_crypto_probe, + .remove = eip93_crypto_remove, + .driver = { + .name = "inside-secure-eip93", + .of_match_table = eip93_crypto_of_match, + }, +}; +module_platform_driver(eip93_crypto_driver); + +MODULE_AUTHOR("Richard van Schagen "); +MODULE_AUTHOR("Christian Marangi "); +MODULE_DESCRIPTION("Mediatek EIP-93 crypto engine driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.h b/drivers/crypto/inside-secure/eip93/eip93-main.h new file mode 100644 index 000000000000..79b078f0e5da --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-main.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen + * Christian Marangi +#include +#include +#include +#include + +#define EIP93_RING_BUSY_DELAY 500 + +#define EIP93_RING_NUM 512 +#define EIP93_RING_BUSY 32 +#define EIP93_CRA_PRIORITY 1500 + +#define EIP93_RING_SA_STATE_ADDR(base, idx) ((base) + (idx)) +#define EIP93_RING_SA_STATE_DMA(dma_base, idx) ((u32 __force)(dma_base) + \ + ((idx) * sizeof(struct sa_state))) + +/* cipher algorithms */ +#define EIP93_ALG_DES BIT(0) +#define EIP93_ALG_3DES BIT(1) +#define EIP93_ALG_AES BIT(2) +#define EIP93_ALG_MASK GENMASK(2, 0) +/* hash and hmac algorithms */ +#define EIP93_HASH_MD5 BIT(3) +#define EIP93_HASH_SHA1 BIT(4) +#define EIP93_HASH_SHA224 BIT(5) +#define EIP93_HASH_SHA256 BIT(6) +#define EIP93_HASH_HMAC BIT(7) +#define EIP93_HASH_MASK GENMASK(6, 3) +/* cipher modes */ +#define EIP93_MODE_CBC BIT(8) +#define EIP93_MODE_ECB BIT(9) +#define EIP93_MODE_CTR BIT(10) +#define EIP93_MODE_RFC3686 BIT(11) +#define EIP93_MODE_MASK GENMASK(10, 8) + +/* cipher encryption/decryption operations */ +#define EIP93_ENCRYPT BIT(12) +#define EIP93_DECRYPT BIT(13) + +#define EIP93_BUSY BIT(14) + +/* descriptor flags */ +#define EIP93_DESC_DMA_IV BIT(0) +#define EIP93_DESC_IPSEC BIT(1) +#define EIP93_DESC_FINISH BIT(2) +#define EIP93_DESC_LAST BIT(3) +#define EIP93_DESC_FAKE_HMAC BIT(4) +#define EIP93_DESC_PRNG BIT(5) +#define EIP93_DESC_HASH BIT(6) +#define EIP93_DESC_AEAD BIT(7) +#define EIP93_DESC_SKCIPHER BIT(8) +#define EIP93_DESC_ASYNC BIT(9) + +#define IS_DMA_IV(desc_flags) ((desc_flags) & EIP93_DESC_DMA_IV) + +#define IS_DES(flags) ((flags) & EIP93_ALG_DES) +#define IS_3DES(flags) ((flags) & EIP93_ALG_3DES) +#define IS_AES(flags) ((flags) & EIP93_ALG_AES) + +#define IS_HASH_MD5(flags) ((flags) & EIP93_HASH_MD5) +#define IS_HASH_SHA1(flags) ((flags) & EIP93_HASH_SHA1) +#define IS_HASH_SHA224(flags) ((flags) & EIP93_HASH_SHA224) +#define IS_HASH_SHA256(flags) ((flags) & EIP93_HASH_SHA256) +#define IS_HMAC(flags) ((flags) & EIP93_HASH_HMAC) + +#define IS_CBC(mode) ((mode) & EIP93_MODE_CBC) +#define IS_ECB(mode) ((mode) & EIP93_MODE_ECB) +#define IS_CTR(mode) ((mode) & EIP93_MODE_CTR) +#define IS_RFC3686(mode) ((mode) & EIP93_MODE_RFC3686) + +#define IS_BUSY(flags) ((flags) & EIP93_BUSY) + +#define IS_ENCRYPT(dir) ((dir) & EIP93_ENCRYPT) +#define IS_DECRYPT(dir) ((dir) & EIP93_DECRYPT) + +#define IS_CIPHER(flags) ((flags) & (EIP93_ALG_DES | \ + EIP93_ALG_3DES | \ + EIP93_ALG_AES)) + +#define IS_HASH(flags) ((flags) & (EIP93_HASH_MD5 | \ + EIP93_HASH_SHA1 | \ + EIP93_HASH_SHA224 | \ + EIP93_HASH_SHA256)) + +/** + * struct eip93_device - crypto engine device structure + */ +struct eip93_device { + void __iomem *base; + struct device *dev; + struct clk *clk; + int irq; + struct eip93_ring *ring; +}; + +struct eip93_desc_ring { + void *base; + void *base_end; + dma_addr_t base_dma; + /* write and read pointers */ + void *read; + void *write; + /* descriptor element offset */ + u32 offset; +}; + +struct eip93_state_pool { + void *base; + dma_addr_t base_dma; +}; + +struct eip93_ring { + struct tasklet_struct done_task; + /* command/result rings */ + struct eip93_desc_ring cdr; + struct eip93_desc_ring rdr; + spinlock_t write_lock; + spinlock_t read_lock; + /* aync idr */ + spinlock_t idr_lock; + struct idr crypto_async_idr; +}; + +enum eip93_alg_type { + EIP93_ALG_TYPE_AEAD, + EIP93_ALG_TYPE_SKCIPHER, + EIP93_ALG_TYPE_HASH, +}; + +struct eip93_alg_template { + struct eip93_device *eip93; + enum eip93_alg_type type; + u32 flags; + union { + struct aead_alg aead; + struct skcipher_alg skcipher; + struct ahash_alg ahash; + } alg; +}; + +#endif /* _EIP93_MAIN_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-regs.h b/drivers/crypto/inside-secure/eip93/eip93-regs.h new file mode 100644 index 000000000000..0490b8d15131 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-regs.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen + * Christian Marangi Date: Tue, 14 Jan 2025 20:05:01 +0100 Subject: crypto: drivers - Use str_enable_disable-like helpers Replace ternary (condition ? "enable" : "disable") syntax with helpers from string_choices.h because: 1. Simple function call with one argument is easier to read. Ternary operator has three arguments and with wrapping might lead to quite long code. 2. Is slightly shorter thus also easier to read. 3. It brings uniformity in the text - same string. 4. Allows deduping by the linker, which results in a smaller binary file. Signed-off-by: Krzysztof Kozlowski Acked-by: Giovanni Cabiddu # QAT Signed-off-by: Herbert Xu --- drivers/crypto/bcm/cipher.c | 3 ++- drivers/crypto/bcm/spu2.c | 3 ++- drivers/crypto/caam/caamalg_qi2.c | 3 ++- drivers/crypto/intel/qat/qat_common/adf_sysfs.c | 10 +++------- drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c | 5 +++-- drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c | 3 ++- 6 files changed, 14 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 9e6798efbfb7..66accd8e08f6 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -2687,7 +2688,7 @@ static int aead_enqueue(struct aead_request *req, bool is_encrypt) flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len); flow_dump(" iv: ", req->iv, rctx->iv_ctr_len); flow_log(" authkeylen:%u\n", ctx->authkeylen); - flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no"); + flow_log(" is_esp: %s\n", str_yes_no(ctx->is_esp)); if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) flow_log(" max_payload infinite"); diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c index 3fdc64b5a65e..ce322cf1baa5 100644 --- a/drivers/crypto/bcm/spu2.c +++ b/drivers/crypto/bcm/spu2.c @@ -11,6 +11,7 @@ #include #include +#include #include "util.h" #include "spu.h" @@ -999,7 +1000,7 @@ u32 spu2_create_request(u8 *spu_hdr, req_opts->is_inbound, req_opts->auth_first); flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg, cipher_parms->mode, cipher_parms->type); - flow_log(" is_esp: %s\n", req_opts->is_esp ? "yes" : "no"); + flow_log(" is_esp: %s\n", str_yes_no(req_opts->is_esp)); flow_log(" key: %d\n", cipher_parms->key_len); flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len); flow_log(" iv: %d\n", cipher_parms->iv_len); diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index e809d030ab11..107ccb2ade42 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -5175,7 +5176,7 @@ static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv) return err; } - dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true"); + dev_dbg(dev, "disable: %s\n", str_false_true(enabled)); for (i = 0; i < priv->num_pairs; i++) { ppriv = per_cpu_ptr(priv->ppriv, i); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index 4fcd61ff70d1..84450bffacb6 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -3,6 +3,7 @@ #include #include #include +#include #include "adf_accel_devices.h" #include "adf_cfg.h" #include "adf_cfg_services.h" @@ -19,14 +20,12 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct adf_accel_dev *accel_dev; - char *state; accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) return -EINVAL; - state = adf_dev_started(accel_dev) ? "up" : "down"; - return sysfs_emit(buf, "%s\n", state); + return sysfs_emit(buf, "%s\n", str_up_down(adf_dev_started(accel_dev))); } static ssize_t state_store(struct device *dev, struct device_attribute *attr, @@ -207,16 +206,13 @@ static DEVICE_ATTR_RW(pm_idle_enabled); static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr, char *buf) { - char *auto_reset; struct adf_accel_dev *accel_dev; accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) return -EINVAL; - auto_reset = accel_dev->autoreset_on_error ? "on" : "off"; - - return sysfs_emit(buf, "%s\n", auto_reset); + return sysfs_emit(buf, "%s\n", str_on_off(accel_dev->autoreset_on_error)); } static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c index c4250e5fcf8f..2c08e928e44e 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c @@ -10,6 +10,7 @@ #include #include +#include #include "otx_cpt_common.h" #include "otx_cptpf_ucode.h" #include "otx_cptpf.h" @@ -614,8 +615,8 @@ static void print_dbg_info(struct device *dev, for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { grp = &eng_grps->grp[i]; - pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ? - "enabled" : "disabled"); + pr_debug("engine_group%d, state %s\n", i, + str_enabled_disabled(grp->is_enabled)); if (grp->is_enabled) { mirrored_grp = &eng_grps->grp[grp->mirror.idx]; pr_debug("Ucode0 filename %s, version %s\n", diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 5c9484646172..881fce53e369 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -3,6 +3,7 @@ #include #include +#include #include "otx2_cptpf_ucode.h" #include "otx2_cpt_common.h" #include "otx2_cptpf.h" @@ -1835,7 +1836,7 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf) for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) { grp = &eng_grps->grp[i]; pr_debug("engine_group%d, state %s", i, - grp->is_enabled ? "enabled" : "disabled"); + str_enabled_disabled(grp->is_enabled)); if (grp->is_enabled) { mirrored_grp = &eng_grps->grp[grp->mirror.idx]; pr_debug("Ucode0 filename %s, version %s", -- cgit v1.2.3 From 67b78a34e48b981014a9f9336ea649039310d18a Mon Sep 17 00:00:00 2001 From: Dragan Simic Date: Wed, 15 Jan 2025 14:07:00 +0100 Subject: hwrng: Kconfig - Use tabs as leading whitespace consistently in Kconfig Replace instances of leading size-eight groups of space characters with the usual tab characters, as spotted in the hw_random Kconfig file. No intended functional changes are introduced by this trivial cleanup. Signed-off-by: Dragan Simic Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 17854f052386..e0244a66366b 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -579,10 +579,10 @@ config HW_RANDOM_ARM_SMCCC_TRNG module will be called arm_smccc_trng. config HW_RANDOM_CN10K - tristate "Marvell CN10K Random Number Generator support" - depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST)) - default HW_RANDOM if ARCH_THUNDER - help + tristate "Marvell CN10K Random Number Generator support" + depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST)) + default HW_RANDOM if ARCH_THUNDER + help This driver provides support for the True Random Number generator available in Marvell CN10K SoCs. -- cgit v1.2.3 From 3371482c89c1e1a2061ab85444e18cc6c7a00f6d Mon Sep 17 00:00:00 2001 From: Dragan Simic Date: Wed, 15 Jan 2025 14:07:01 +0100 Subject: hwrng: Kconfig - Move one "tristate" Kconfig description to the usual place It's pretty usual to have "tristate" descriptions in Kconfig files placed immediately after the actual configuration options, so correct the position of one misplaced "tristate" spotted in the hw_random Kconfig file. No intended functional changes are introduced by this trivial cleanup. Signed-off-by: Dragan Simic Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index e0244a66366b..e84c7f431840 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -534,10 +534,10 @@ config HW_RANDOM_NPCM If unsure, say Y. config HW_RANDOM_KEYSTONE + tristate "TI Keystone NETCP SA Hardware random number generator" depends on ARCH_KEYSTONE || COMPILE_TEST depends on HAS_IOMEM && OF default HW_RANDOM - tristate "TI Keystone NETCP SA Hardware random number generator" help This option enables Keystone's hardware random generator. -- cgit v1.2.3 From 07bb097b92b987db518e72525b515d77904e966e Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Fri, 17 Jan 2025 17:05:47 -0600 Subject: crypto: ccp - Fix check for the primary ASP device Currently, the ASP primary device check does not have support for PCI domains, and, as a result, when the system is configured with PCI domains (PCI segments) the wrong device can be selected as primary. This results in commands submitted to the device timing out and failing. The device check also relies on specific device and function assignments that may not hold in the future. Fix the primary ASP device check to include support for PCI domains and to perform proper checking of the Bus/Device/Function positions. Fixes: 2a6170dfe755 ("crypto: ccp: Add Platform Security Processor (PSP) device support") Cc: stable@vger.kernel.org Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sp-pci.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 248d98fd8c48..157f9a9ed636 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -189,14 +189,17 @@ static bool sp_pci_is_master(struct sp_device *sp) pdev_new = to_pci_dev(dev_new); pdev_cur = to_pci_dev(dev_cur); - if (pdev_new->bus->number < pdev_cur->bus->number) - return true; + if (pci_domain_nr(pdev_new->bus) != pci_domain_nr(pdev_cur->bus)) + return pci_domain_nr(pdev_new->bus) < pci_domain_nr(pdev_cur->bus); - if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn)) - return true; + if (pdev_new->bus->number != pdev_cur->bus->number) + return pdev_new->bus->number < pdev_cur->bus->number; - if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn)) - return true; + if (PCI_SLOT(pdev_new->devfn) != PCI_SLOT(pdev_cur->devfn)) + return PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn); + + if (PCI_FUNC(pdev_new->devfn) != PCI_FUNC(pdev_cur->devfn)) + return PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn); return false; } -- cgit v1.2.3 From 1d19058d86e55b1fc89a53b854a97b63b0f4c2b9 Mon Sep 17 00:00:00 2001 From: lizhi Date: Sat, 18 Jan 2025 08:45:20 +0800 Subject: crypto: hisilicon/hpre - adapt ECDH for high-performance cores Only the ECDH with NIST P-256 meets requirements. The algorithm will be scheduled first for high-performance cores. The key step is to config resv1 field of BD. Signed-off-by: lizhi Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 2a2910261210..61b5e1c5d019 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -39,6 +39,8 @@ struct hpre_ctx; #define HPRE_DFX_SEC_TO_US 1000000 #define HPRE_DFX_US_TO_NS 1000 +#define HPRE_ENABLE_HPCORE_SHIFT 7 + /* due to nist p521 */ #define HPRE_ECC_MAX_KSZ 66 @@ -131,6 +133,8 @@ struct hpre_ctx { }; /* for ecc algorithms */ unsigned int curve_id; + /* for high performance core */ + u8 enable_hpcore; }; struct hpre_asym_request { @@ -1619,6 +1623,8 @@ static int hpre_ecdh_compute_value(struct kpp_request *req) } msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL); + msg->resv1 = ctx->enable_hpcore << HPRE_ENABLE_HPCORE_SHIFT; + ret = hpre_send(ctx, msg); if (likely(!ret)) return -EINPROGRESS; @@ -1653,6 +1659,7 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); ctx->curve_id = ECC_CURVE_NIST_P256; + ctx->enable_hpcore = 1; kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); -- cgit v1.2.3 From 1a3fa1c063fb0ef804d9d31136f02305394a68a3 Mon Sep 17 00:00:00 2001 From: Suman Kumar Chakraborty Date: Fri, 31 Jan 2025 11:34:54 +0000 Subject: crypto: qat - set command ids as reserved The XP10 algorithm is not supported by any QAT device. Remove the definition of bit 7 (ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS) and bit 8 (ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS) in the firmware command id enum and rename them as reserved. Those bits shall not be used in future. Signed-off-by: Suman Kumar Chakraborty Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h index a03d43fef2b3..04f645957e28 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h @@ -16,8 +16,8 @@ enum icp_qat_fw_comp_20_cmd_id { ICP_QAT_FW_COMP_20_CMD_LZ4_DECOMPRESS = 4, ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS = 5, ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS = 6, - ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS = 7, - ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS = 8, + ICP_QAT_FW_COMP_20_CMD_RESERVED_7 = 7, + ICP_QAT_FW_COMP_20_CMD_RESERVED_8 = 8, ICP_QAT_FW_COMP_20_CMD_RESERVED_9 = 9, ICP_QAT_FW_COMP_23_CMD_ZSTD_COMPRESS = 10, ICP_QAT_FW_COMP_23_CMD_ZSTD_DECOMPRESS = 11, -- cgit v1.2.3 From 7a96a64e8689f33c60ff3179ee4bec2b0835eed9 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Sat, 1 Feb 2025 19:39:07 +0100 Subject: hwrng: imx-rngc - add runtime pm Add runtime power management to the imx-rngc driver. Disable the peripheral clock when the rngc is idle. The callback functions from struct hwrng wake the rngc up when they're called and set it to idle on exit. Helper functions which are invoked from the callbacks assume that the rngc is active. Device init and probe are done before runtime pm is enabled. The peripheral clock will be handled manually during these steps. Do not use devres any more to enable/disable the peripheral clock, this conflicts with runtime pm. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/imx-rngc.c | 69 +++++++++++++++++++++++++++++---------- 1 file changed, 52 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c index 118a72acb99b..241664a9b5d9 100644 --- a/drivers/char/hw_random/imx-rngc.c +++ b/drivers/char/hw_random/imx-rngc.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include #include #include @@ -53,6 +55,7 @@ #define RNGC_SELFTEST_TIMEOUT 2500 /* us */ #define RNGC_SEED_TIMEOUT 200 /* ms */ +#define RNGC_PM_TIMEOUT 500 /* ms */ static bool self_test = true; module_param(self_test, bool, 0); @@ -123,7 +126,11 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); unsigned int status; - int retval = 0; + int err, retval = 0; + + err = pm_runtime_resume_and_get(rngc->dev); + if (err) + return err; while (max >= sizeof(u32)) { status = readl(rngc->base + RNGC_STATUS); @@ -141,6 +148,8 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait) max -= sizeof(u32); } } + pm_runtime_mark_last_busy(rngc->dev); + pm_runtime_put(rngc->dev); return retval ? retval : -EIO; } @@ -169,7 +178,11 @@ static int imx_rngc_init(struct hwrng *rng) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); u32 cmd, ctrl; - int ret; + int ret, err; + + err = pm_runtime_resume_and_get(rngc->dev); + if (err) + return err; /* clear error */ cmd = readl(rngc->base + RNGC_COMMAND); @@ -186,15 +199,15 @@ static int imx_rngc_init(struct hwrng *rng) ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_SEED_TIMEOUT)); if (!ret) { - ret = -ETIMEDOUT; - goto err; + err = -ETIMEDOUT; + goto out; } } while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR); if (rngc->err_reg) { - ret = -EIO; - goto err; + err = -EIO; + goto out; } /* @@ -205,23 +218,29 @@ static int imx_rngc_init(struct hwrng *rng) ctrl |= RNGC_CTRL_AUTO_SEED; writel(ctrl, rngc->base + RNGC_CONTROL); +out: /* * if initialisation was successful, we keep the interrupt * unmasked until imx_rngc_cleanup is called * we mask the interrupt ourselves if we return an error */ - return 0; + if (err) + imx_rngc_irq_mask_clear(rngc); -err: - imx_rngc_irq_mask_clear(rngc); - return ret; + pm_runtime_put(rngc->dev); + return err; } static void imx_rngc_cleanup(struct hwrng *rng) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); + int err; - imx_rngc_irq_mask_clear(rngc); + err = pm_runtime_resume_and_get(rngc->dev); + if (!err) { + imx_rngc_irq_mask_clear(rngc); + pm_runtime_put(rngc->dev); + } } static int __init imx_rngc_probe(struct platform_device *pdev) @@ -240,7 +259,7 @@ static int __init imx_rngc_probe(struct platform_device *pdev) if (IS_ERR(rngc->base)) return PTR_ERR(rngc->base); - rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL); + rngc->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(rngc->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(rngc->clk), "Cannot get rng_clk\n"); @@ -248,14 +267,18 @@ static int __init imx_rngc_probe(struct platform_device *pdev) if (irq < 0) return irq; + clk_prepare_enable(rngc->clk); + ver_id = readl(rngc->base + RNGC_VER_ID); rng_type = FIELD_GET(RNG_TYPE, ver_id); /* * This driver supports only RNGC and RNGB. (There's a different * driver for RNGA.) */ - if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) + if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) { + clk_disable_unprepare(rngc->clk); return -ENODEV; + } init_completion(&rngc->rng_op_done); @@ -272,15 +295,24 @@ static int __init imx_rngc_probe(struct platform_device *pdev) ret = devm_request_irq(&pdev->dev, irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); - if (ret) + if (ret) { + clk_disable_unprepare(rngc->clk); return dev_err_probe(&pdev->dev, ret, "Can't get interrupt working.\n"); + } if (self_test) { ret = imx_rngc_self_test(rngc); - if (ret) + if (ret) { + clk_disable_unprepare(rngc->clk); return dev_err_probe(&pdev->dev, ret, "self test failed\n"); + } } + pm_runtime_set_autosuspend_delay(&pdev->dev, RNGC_PM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + devm_pm_runtime_enable(&pdev->dev); + ret = devm_hwrng_register(&pdev->dev, &rngc->rng); if (ret) return dev_err_probe(&pdev->dev, ret, "hwrng registration failed\n"); @@ -310,7 +342,10 @@ static int imx_rngc_resume(struct device *dev) return 0; } -static DEFINE_SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume); +static const struct dev_pm_ops imx_rngc_pm_ops = { + SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) + RUNTIME_PM_OPS(imx_rngc_suspend, imx_rngc_resume, NULL) +}; static const struct of_device_id imx_rngc_dt_ids[] = { { .compatible = "fsl,imx25-rngb" }, @@ -321,7 +356,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids); static struct platform_driver imx_rngc_driver = { .driver = { .name = KBUILD_MODNAME, - .pm = pm_sleep_ptr(&imx_rngc_pm_ops), + .pm = pm_ptr(&imx_rngc_pm_ops), .of_match_table = imx_rngc_dt_ids, }, }; -- cgit v1.2.3 From dede7911e603d6d1952f766fb4541b988e4439c0 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Mon, 3 Feb 2025 14:37:01 +0100 Subject: crypto: virtio - Fix kernel-doc of virtcrypto_dev_stop() It seems the kernel-doc of virtcrypto_dev_start() was copied verbatim to virtcrypto_dev_stop(). Fix it. Signed-off-by: Lukas Wunner Signed-off-by: Herbert Xu --- drivers/crypto/virtio/virtio_crypto_mgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c index 70e778aac0f2..bddbd8ebfebe 100644 --- a/drivers/crypto/virtio/virtio_crypto_mgr.c +++ b/drivers/crypto/virtio/virtio_crypto_mgr.c @@ -256,7 +256,7 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto) * @vcrypto: Pointer to virtio crypto device. * * Function notifies all the registered services that the virtio crypto device - * is ready to be used. + * shall no longer be used. * To be used by virtio crypto device specific drivers. * * Return: void -- cgit v1.2.3 From 17410baf65c51d9792528c5484c8167bd186e98d Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Mon, 3 Feb 2025 14:37:02 +0100 Subject: crypto: virtio - Simplify RSA key size caching When setting a public or private RSA key, the integer n is cached in the transform context virtio_crypto_akcipher_ctx -- with the sole purpose of calculating the key size from it in virtio_crypto_rsa_max_size(). It looks like this was copy-pasted from crypto/rsa.c. Cache the key size directly instead of the integer n, thus simplifying the code and reducing the memory footprint. Signed-off-by: Lukas Wunner Signed-off-by: Herbert Xu --- drivers/crypto/virtio/virtio_crypto_akcipher_algs.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c index 48fee07b7e51..7fdf32c79909 100644 --- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c @@ -21,7 +21,7 @@ #include "virtio_crypto_common.h" struct virtio_crypto_rsa_ctx { - MPI n; + unsigned int key_size; }; struct virtio_crypto_akcipher_ctx { @@ -352,10 +352,7 @@ static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm, int node = virtio_crypto_get_current_node(); uint32_t keytype; int ret; - - /* mpi_free will test n, just free it. */ - mpi_free(rsa_ctx->n); - rsa_ctx->n = NULL; + MPI n; if (private) { keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE; @@ -368,10 +365,13 @@ static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm, if (ret) return ret; - rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz); - if (!rsa_ctx->n) + n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz); + if (!n) return -ENOMEM; + rsa_ctx->key_size = mpi_get_size(n); + mpi_free(n); + if (!ctx->vcrypto) { vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER, VIRTIO_CRYPTO_AKCIPHER_RSA); @@ -442,7 +442,7 @@ static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm) struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx; - return mpi_get_size(rsa_ctx->n); + return rsa_ctx->key_size; } static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm) @@ -460,12 +460,9 @@ static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm) static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm) { struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); - struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx; virtio_crypto_alg_akcipher_close_session(ctx); virtcrypto_dev_put(ctx->vcrypto); - mpi_free(rsa_ctx->n); - rsa_ctx->n = NULL; } static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = { -- cgit v1.2.3 From aefeca1188962da52e3ed35ddb865d37a216dd53 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Mon, 3 Feb 2025 14:37:03 +0100 Subject: crypto: virtio - Drop superfluous ctx->tfm backpointer struct virtio_crypto_[as]kcipher_ctx contains a backpointer to struct crypto_[as]kcipher which is superfluous in two ways: First, it's not used anywhere. Second, the context is embedded into struct crypto_tfm, so one could just use container_of() to get from the context to crypto_tfm and from there to crypto_[as]kcipher. Drop the superfluous backpointer. Signed-off-by: Lukas Wunner Signed-off-by: Herbert Xu --- drivers/crypto/virtio/virtio_crypto_akcipher_algs.c | 5 ----- drivers/crypto/virtio/virtio_crypto_skcipher_algs.c | 4 ---- 2 files changed, 9 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c index 7fdf32c79909..aa8255786d6c 100644 --- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c @@ -26,7 +26,6 @@ struct virtio_crypto_rsa_ctx { struct virtio_crypto_akcipher_ctx { struct virtio_crypto *vcrypto; - struct crypto_akcipher *tfm; bool session_valid; __u64 session_id; union { @@ -447,10 +446,6 @@ static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm) static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm) { - struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); - - ctx->tfm = tfm; - akcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_akcipher_request)); diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c index 23c41d87d835..495fc655a51c 100644 --- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c @@ -17,7 +17,6 @@ struct virtio_crypto_skcipher_ctx { struct virtio_crypto *vcrypto; - struct crypto_skcipher *tfm; struct virtio_crypto_sym_session_info enc_sess_info; struct virtio_crypto_sym_session_info dec_sess_info; @@ -515,10 +514,7 @@ static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req) static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm) { - struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request)); - ctx->tfm = tfm; return 0; } -- cgit v1.2.3 From dc91d858fb9251b25828ab63ac7220145cef282c Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Mon, 3 Feb 2025 14:37:04 +0100 Subject: crypto: virtio - Drop superfluous [as]kcipher_ctx pointer The request context virtio_crypto_{akcipher,sym}_request contains a pointer to the transform context virtio_crypto_[as]kcipher_ctx. The pointer is superfluous as it can be calculated with the cheap crypto_akcipher_reqtfm() + akcipher_tfm_ctx() and crypto_skcipher_reqtfm() + crypto_skcipher_ctx() combos. Drop the superfluous pointer. Signed-off-by: Lukas Wunner Signed-off-by: Herbert Xu --- drivers/crypto/virtio/virtio_crypto_akcipher_algs.c | 8 ++++---- drivers/crypto/virtio/virtio_crypto_skcipher_algs.c | 5 +---- 2 files changed, 5 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c index aa8255786d6c..ac8eb3d07c93 100644 --- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c @@ -35,7 +35,6 @@ struct virtio_crypto_akcipher_ctx { struct virtio_crypto_akcipher_request { struct virtio_crypto_request base; - struct virtio_crypto_akcipher_ctx *akcipher_ctx; struct akcipher_request *akcipher_req; void *src_buf; void *dst_buf; @@ -212,7 +211,8 @@ out: static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req, struct akcipher_request *req, struct data_queue *data_vq) { - struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx; + struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req); + struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm); struct virtio_crypto_request *vc_req = &vc_akcipher_req->base; struct virtio_crypto *vcrypto = ctx->vcrypto; struct virtio_crypto_op_data_req *req_data = vc_req->req_data; @@ -272,7 +272,8 @@ static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq) struct akcipher_request *req = container_of(vreq, struct akcipher_request, base); struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req); struct virtio_crypto_request *vc_req = &vc_akcipher_req->base; - struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx; + struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req); + struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm); struct virtio_crypto *vcrypto = ctx->vcrypto; struct data_queue *data_vq = vc_req->dataq; struct virtio_crypto_op_header *header; @@ -318,7 +319,6 @@ static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode) vc_req->dataq = data_vq; vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback; - vc_akcipher_req->akcipher_ctx = ctx; vc_akcipher_req->akcipher_req = req; vc_akcipher_req->opcode = opcode; diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c index 495fc655a51c..e2a481a29b77 100644 --- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c @@ -27,7 +27,6 @@ struct virtio_crypto_sym_request { /* Cipher or aead */ uint32_t type; - struct virtio_crypto_skcipher_ctx *skcipher_ctx; struct skcipher_request *skcipher_req; uint8_t *iv; /* Encryption? */ @@ -324,7 +323,7 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, struct data_queue *data_vq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx; + struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct virtio_crypto_request *vc_req = &vc_sym_req->base; unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct virtio_crypto *vcrypto = ctx->vcrypto; @@ -480,7 +479,6 @@ static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req) vc_req->dataq = data_vq; vc_req->alg_cb = virtio_crypto_dataq_sym_callback; - vc_sym_req->skcipher_ctx = ctx; vc_sym_req->skcipher_req = req; vc_sym_req->encrypt = true; @@ -505,7 +503,6 @@ static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req) vc_req->dataq = data_vq; vc_req->alg_cb = virtio_crypto_dataq_sym_callback; - vc_sym_req->skcipher_ctx = ctx; vc_sym_req->skcipher_req = req; vc_sym_req->encrypt = false; -- cgit v1.2.3 From 62d027fb49c76475c5256ab69059752f42c3730f Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Mon, 3 Feb 2025 14:37:05 +0100 Subject: crypto: virtio - Drop superfluous [as]kcipher_req pointer The request context virtio_crypto_{akcipher,sym}_request contains a pointer to the [as]kcipher_request itself. The pointer is superfluous as it can be calculated with container_of(). Drop the superfluous pointer. Signed-off-by: Lukas Wunner Signed-off-by: Herbert Xu --- drivers/crypto/virtio/virtio_crypto_akcipher_algs.c | 9 ++++----- drivers/crypto/virtio/virtio_crypto_skcipher_algs.c | 8 +++----- 2 files changed, 7 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c index ac8eb3d07c93..2e44915c9f23 100644 --- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c @@ -35,7 +35,6 @@ struct virtio_crypto_akcipher_ctx { struct virtio_crypto_akcipher_request { struct virtio_crypto_request base; - struct akcipher_request *akcipher_req; void *src_buf; void *dst_buf; uint32_t opcode; @@ -67,7 +66,9 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request * { struct virtio_crypto_akcipher_request *vc_akcipher_req = container_of(vc_req, struct virtio_crypto_akcipher_request, base); - struct akcipher_request *akcipher_req; + struct akcipher_request *akcipher_req = + container_of((void *)vc_akcipher_req, struct akcipher_request, + __ctx); int error; switch (vc_req->status) { @@ -86,8 +87,7 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request * break; } - akcipher_req = vc_akcipher_req->akcipher_req; - /* actual length maybe less than dst buffer */ + /* actual length may be less than dst buffer */ akcipher_req->dst_len = len - sizeof(vc_req->status); sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst), vc_akcipher_req->dst_buf, akcipher_req->dst_len); @@ -319,7 +319,6 @@ static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode) vc_req->dataq = data_vq; vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback; - vc_akcipher_req->akcipher_req = req; vc_akcipher_req->opcode = opcode; return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req); diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c index e2a481a29b77..1b3fb21a2a7d 100644 --- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c @@ -27,7 +27,6 @@ struct virtio_crypto_sym_request { /* Cipher or aead */ uint32_t type; - struct skcipher_request *skcipher_req; uint8_t *iv; /* Encryption? */ bool encrypt; @@ -55,7 +54,9 @@ static void virtio_crypto_dataq_sym_callback { struct virtio_crypto_sym_request *vc_sym_req = container_of(vc_req, struct virtio_crypto_sym_request, base); - struct skcipher_request *ablk_req; + struct skcipher_request *ablk_req = + container_of((void *)vc_sym_req, struct skcipher_request, + __ctx); int error; /* Finish the encrypt or decrypt process */ @@ -75,7 +76,6 @@ static void virtio_crypto_dataq_sym_callback error = -EIO; break; } - ablk_req = vc_sym_req->skcipher_req; virtio_crypto_skcipher_finalize_req(vc_sym_req, ablk_req, error); } @@ -479,7 +479,6 @@ static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req) vc_req->dataq = data_vq; vc_req->alg_cb = virtio_crypto_dataq_sym_callback; - vc_sym_req->skcipher_req = req; vc_sym_req->encrypt = true; return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req); @@ -503,7 +502,6 @@ static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req) vc_req->dataq = data_vq; vc_req->alg_cb = virtio_crypto_dataq_sym_callback; - vc_sym_req->skcipher_req = req; vc_sym_req->encrypt = false; return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req); -- cgit v1.2.3 From 8bb8609293ff3d8998d75c8db605c0529e83bcd9 Mon Sep 17 00:00:00 2001 From: Nicolas Frattaroli Date: Tue, 4 Feb 2025 16:35:48 +0100 Subject: hwrng: rockchip - store dev pointer in driver struct The rockchip rng driver does a dance to store the dev pointer in the hwrng's unsigned long "priv" member. However, since the struct hwrng member of rk_rng is not a pointer, we can use container_of to get the struct rk_rng instance from just the struct hwrng*, which means we don't have to subvert what little there is in C of a type system and can instead store a pointer to the device struct in the rk_rng itself. Signed-off-by: Nicolas Frattaroli Signed-off-by: Herbert Xu --- drivers/char/hw_random/rockchip-rng.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c index 289b385bbf05..b1b510087e58 100644 --- a/drivers/char/hw_random/rockchip-rng.c +++ b/drivers/char/hw_random/rockchip-rng.c @@ -54,6 +54,7 @@ struct rk_rng { void __iomem *base; int clk_num; struct clk_bulk_data *clk_bulks; + struct device *dev; }; /* The mask in the upper 16 bits determines the bits that are updated */ @@ -70,8 +71,7 @@ static int rk_rng_init(struct hwrng *rng) /* start clocks */ ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks); if (ret < 0) { - dev_err((struct device *) rk_rng->rng.priv, - "Failed to enable clks %d\n", ret); + dev_err(rk_rng->dev, "Failed to enable clocks: %d\n", ret); return ret; } @@ -105,7 +105,7 @@ static int rk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) u32 reg; int ret = 0; - ret = pm_runtime_resume_and_get((struct device *) rk_rng->rng.priv); + ret = pm_runtime_resume_and_get(rk_rng->dev); if (ret < 0) return ret; @@ -122,8 +122,8 @@ static int rk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) /* Read random data stored in the registers */ memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read); out: - pm_runtime_mark_last_busy((struct device *) rk_rng->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *) rk_rng->rng.priv); + pm_runtime_mark_last_busy(rk_rng->dev); + pm_runtime_put_sync_autosuspend(rk_rng->dev); return (ret < 0) ? ret : to_read; } @@ -164,7 +164,7 @@ static int rk_rng_probe(struct platform_device *pdev) rk_rng->rng.cleanup = rk_rng_cleanup; } rk_rng->rng.read = rk_rng_read; - rk_rng->rng.priv = (unsigned long) dev; + rk_rng->dev = dev; rk_rng->rng.quality = 900; pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY); -- cgit v1.2.3 From 24aaa42ed65c0811b598674a593fc653d643a7e6 Mon Sep 17 00:00:00 2001 From: Nicolas Frattaroli Date: Tue, 4 Feb 2025 16:35:49 +0100 Subject: hwrng: rockchip - eliminate some unnecessary dereferences Despite assigning a temporary variable the value of &pdev->dev early on in the probe function, the probe function then continues to use this construct when it could just use the local dev variable instead. Simplify this by using the local dev variable directly. Signed-off-by: Nicolas Frattaroli Signed-off-by: Herbert Xu --- drivers/char/hw_random/rockchip-rng.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c index b1b510087e58..082daea27e93 100644 --- a/drivers/char/hw_random/rockchip-rng.c +++ b/drivers/char/hw_random/rockchip-rng.c @@ -148,7 +148,7 @@ static int rk_rng_probe(struct platform_device *pdev) return dev_err_probe(dev, rk_rng->clk_num, "Failed to get clks property\n"); - rst = devm_reset_control_array_get_exclusive(&pdev->dev); + rst = devm_reset_control_array_get_exclusive(dev); if (IS_ERR(rst)) return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n"); @@ -171,11 +171,11 @@ static int rk_rng_probe(struct platform_device *pdev) pm_runtime_use_autosuspend(dev); ret = devm_pm_runtime_enable(dev); if (ret) - return dev_err_probe(&pdev->dev, ret, "Runtime pm activation failed.\n"); + return dev_err_probe(dev, ret, "Runtime pm activation failed.\n"); ret = devm_hwrng_register(dev, &rk_rng->rng); if (ret) - return dev_err_probe(&pdev->dev, ret, "Failed to register Rockchip hwrng\n"); + return dev_err_probe(dev, ret, "Failed to register Rockchip hwrng\n"); return 0; } -- cgit v1.2.3 From 8eff8eb83fc0ae8b5f76220e2bb8644d836e99ff Mon Sep 17 00:00:00 2001 From: Nicolas Frattaroli Date: Tue, 4 Feb 2025 16:35:50 +0100 Subject: hwrng: rockchip - add support for rk3588's standalone TRNG The RK3588 SoC includes several TRNGs, one part of the Crypto IP block, and the other one (referred to as "trngv1") as a standalone new IP. Add support for this new standalone TRNG to the driver by both generalising it to support multiple different rockchip RNGs and then implementing the required functionality for the new hardware. This work was partly based on the downstream vendor driver by Rockchip's Lin Jinhan, which is why they are listed as a Co-author. While the hardware does support notifying the CPU with an IRQ when the random data is ready, I've discovered while implementing the code to use this interrupt that this results in significantly slower throughput of the TRNG even when under heavy CPU load. I assume this is because with only 32 bytes of data per invocation, the overhead of reinitialising a completion, enabling the interrupt, sleeping and then triggering the completion in the IRQ handler is way more expensive than busylooping. Speaking of busylooping, the poll interval for reading the ISTAT is an atomic read with a delay of 0. In my testing, I've found that this gives us the largest throughput, and it appears the random data is ready pretty much the moment we begin polling, as increasing the poll delay leads to a drop in throughput significant enough to not just be due to the poll interval missing the ideal timing by a microsecond or two. According to downstream, the IP should take 1024 clock cycles to generate 56 bits of random data, which at 150MHz should work out to 6.8us. I did not test whether the data really does take 256/56*6.8us to arrive, though changing the readl to a __raw_readl makes no difference in throughput, and this data does pass the rngtest FIPS checks, so I'm not entirely sure what's going on but I presume it's got something to do with the AHB bus speed and the memory barriers that mainline's readl/writel functions insert. The only other current SoC that uses this new IP is the Rockchip RV1106, but that SoC does not have mainline support as of the time of writing, so we make no effort to declare it as supported for now. Co-developed-by: Lin Jinhan Signed-off-by: Lin Jinhan Signed-off-by: Nicolas Frattaroli Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 3 +- drivers/char/hw_random/rockchip-rng.c | 234 +++++++++++++++++++++++++++++++--- 2 files changed, 216 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index e84c7f431840..fa9e06b5adcf 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -606,7 +606,8 @@ config HW_RANDOM_ROCKCHIP default HW_RANDOM help This driver provides kernel-side support for the True Random Number - Generator hardware found on some Rockchip SoC like RK3566 or RK3568. + Generator hardware found on some Rockchip SoCs like RK3566, RK3568 + or RK3588. To compile this driver as a module, choose M here: the module will be called rockchip-rng. diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c index 082daea27e93..161050591663 100644 --- a/drivers/char/hw_random/rockchip-rng.c +++ b/drivers/char/hw_random/rockchip-rng.c @@ -1,12 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 /* - * rockchip-rng.c True Random Number Generator driver for Rockchip RK3568 SoC + * rockchip-rng.c True Random Number Generator driver for Rockchip SoCs * * Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd. * Copyright (c) 2022, Aurelien Jarno + * Copyright (c) 2025, Collabora Ltd. * Authors: * Lin Jinhan * Aurelien Jarno + * Nicolas Frattaroli */ #include #include @@ -32,6 +34,9 @@ */ #define RK_RNG_SAMPLE_CNT 1000 +/* after how many bytes of output TRNGv1 implementations should be reseeded */ +#define RK_TRNG_V1_AUTO_RESEED_CNT 16000 + /* TRNG registers from RK3568 TRM-Part2, section 5.4.1 */ #define TRNG_RST_CTL 0x0004 #define TRNG_RNG_CTL 0x0400 @@ -49,25 +54,85 @@ #define TRNG_RNG_SAMPLE_CNT 0x0404 #define TRNG_RNG_DOUT 0x0410 +/* + * TRNG V1 register definitions + * The TRNG V1 IP is a stand-alone TRNG implementation (not part of a crypto IP) + * and can be found in the Rockchip RK3588 SoC + */ +#define TRNG_V1_CTRL 0x0000 +#define TRNG_V1_CTRL_NOP 0x00 +#define TRNG_V1_CTRL_RAND 0x01 +#define TRNG_V1_CTRL_SEED 0x02 + +#define TRNG_V1_STAT 0x0004 +#define TRNG_V1_STAT_SEEDED BIT(9) +#define TRNG_V1_STAT_GENERATING BIT(30) +#define TRNG_V1_STAT_RESEEDING BIT(31) + +#define TRNG_V1_MODE 0x0008 +#define TRNG_V1_MODE_128_BIT (0x00 << 3) +#define TRNG_V1_MODE_256_BIT (0x01 << 3) + +/* Interrupt Enable register; unused because polling is faster */ +#define TRNG_V1_IE 0x0010 +#define TRNG_V1_IE_GLBL_EN BIT(31) +#define TRNG_V1_IE_SEED_DONE_EN BIT(1) +#define TRNG_V1_IE_RAND_RDY_EN BIT(0) + +#define TRNG_V1_ISTAT 0x0014 +#define TRNG_V1_ISTAT_RAND_RDY BIT(0) + +/* RAND0 ~ RAND7 */ +#define TRNG_V1_RAND0 0x0020 +#define TRNG_V1_RAND7 0x003C + +/* Auto Reseed Register */ +#define TRNG_V1_AUTO_RQSTS 0x0060 + +#define TRNG_V1_VERSION 0x00F0 +#define TRNG_v1_VERSION_CODE 0x46bc +/* end of TRNG_V1 register definitions */ + +/* Before removing this assert, give rk3588_rng_read an upper bound of 32 */ +static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0), + "You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats."); + struct rk_rng { struct hwrng rng; void __iomem *base; int clk_num; struct clk_bulk_data *clk_bulks; + const struct rk_rng_soc_data *soc_data; struct device *dev; }; +struct rk_rng_soc_data { + int (*rk_rng_init)(struct hwrng *rng); + int (*rk_rng_read)(struct hwrng *rng, void *buf, size_t max, bool wait); + void (*rk_rng_cleanup)(struct hwrng *rng); + unsigned short quality; + bool reset_optional; +}; + /* The mask in the upper 16 bits determines the bits that are updated */ static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask) { writel((mask << 16) | val, rng->base + TRNG_RNG_CTL); } -static int rk_rng_init(struct hwrng *rng) +static inline void rk_rng_writel(struct rk_rng *rng, u32 val, u32 offset) { - struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); - int ret; + writel(val, rng->base + offset); +} +static inline u32 rk_rng_readl(struct rk_rng *rng, u32 offset) +{ + return readl(rng->base + offset); +} + +static int rk_rng_enable_clks(struct rk_rng *rk_rng) +{ + int ret; /* start clocks */ ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks); if (ret < 0) { @@ -75,6 +140,18 @@ static int rk_rng_init(struct hwrng *rng) return ret; } + return 0; +} + +static int rk3568_rng_init(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + int ret; + + ret = rk_rng_enable_clks(rk_rng); + if (ret < 0) + return ret; + /* set the sample period */ writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT); @@ -87,7 +164,7 @@ static int rk_rng_init(struct hwrng *rng) return 0; } -static void rk_rng_cleanup(struct hwrng *rng) +static void rk3568_rng_cleanup(struct hwrng *rng) { struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); @@ -98,7 +175,7 @@ static void rk_rng_cleanup(struct hwrng *rng) clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks); } -static int rk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +static int rk3568_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE); @@ -128,6 +205,114 @@ out: return (ret < 0) ? ret : to_read; } +static int rk3588_rng_init(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + u32 version, status, mask, istat; + int ret; + + ret = rk_rng_enable_clks(rk_rng); + if (ret < 0) + return ret; + + version = rk_rng_readl(rk_rng, TRNG_V1_VERSION); + if (version != TRNG_v1_VERSION_CODE) { + dev_err(rk_rng->dev, + "wrong trng version, expected = %08x, actual = %08x\n", + TRNG_V1_VERSION, version); + ret = -EFAULT; + goto err_disable_clk; + } + + mask = TRNG_V1_STAT_SEEDED | TRNG_V1_STAT_GENERATING | + TRNG_V1_STAT_RESEEDING; + if (readl_poll_timeout(rk_rng->base + TRNG_V1_STAT, status, + (status & mask) == TRNG_V1_STAT_SEEDED, + RK_RNG_POLL_PERIOD_US, RK_RNG_POLL_TIMEOUT_US) < 0) { + dev_err(rk_rng->dev, "timed out waiting for hwrng to reseed\n"); + ret = -ETIMEDOUT; + goto err_disable_clk; + } + + /* + * clear ISTAT flag, downstream advises to do this to avoid + * auto-reseeding "on power on" + */ + istat = rk_rng_readl(rk_rng, TRNG_V1_ISTAT); + rk_rng_writel(rk_rng, istat, TRNG_V1_ISTAT); + + /* auto reseed after RK_TRNG_V1_AUTO_RESEED_CNT bytes */ + rk_rng_writel(rk_rng, RK_TRNG_V1_AUTO_RESEED_CNT / 16, TRNG_V1_AUTO_RQSTS); + + return 0; +err_disable_clk: + clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks); + return ret; +} + +static void rk3588_rng_cleanup(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + + clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks); +} + +static int rk3588_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE); + int ret = 0; + u32 reg; + + ret = pm_runtime_resume_and_get(rk_rng->dev); + if (ret < 0) + return ret; + + /* Clear ISTAT, even without interrupts enabled, this will be updated */ + reg = rk_rng_readl(rk_rng, TRNG_V1_ISTAT); + rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT); + + /* generate 256 bits of random data */ + rk_rng_writel(rk_rng, TRNG_V1_MODE_256_BIT, TRNG_V1_MODE); + rk_rng_writel(rk_rng, TRNG_V1_CTRL_RAND, TRNG_V1_CTRL); + + ret = readl_poll_timeout_atomic(rk_rng->base + TRNG_V1_ISTAT, reg, + (reg & TRNG_V1_ISTAT_RAND_RDY), 0, + RK_RNG_POLL_TIMEOUT_US); + if (ret < 0) + goto out; + + /* Read random data that's in registers TRNG_V1_RAND0 through RAND7 */ + memcpy_fromio(buf, rk_rng->base + TRNG_V1_RAND0, to_read); + +out: + /* Clear ISTAT */ + rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT); + /* close the TRNG */ + rk_rng_writel(rk_rng, TRNG_V1_CTRL_NOP, TRNG_V1_CTRL); + + pm_runtime_mark_last_busy(rk_rng->dev); + pm_runtime_put_sync_autosuspend(rk_rng->dev); + + return (ret < 0) ? ret : to_read; +} + +static const struct rk_rng_soc_data rk3568_soc_data = { + .rk_rng_init = rk3568_rng_init, + .rk_rng_read = rk3568_rng_read, + .rk_rng_cleanup = rk3568_rng_cleanup, + .quality = 900, + .reset_optional = false, +}; + +static const struct rk_rng_soc_data rk3588_soc_data = { + .rk_rng_init = rk3588_rng_init, + .rk_rng_read = rk3588_rng_read, + .rk_rng_cleanup = rk3588_rng_cleanup, + .quality = 999, /* as determined by actual testing */ + .reset_optional = true, +}; + static int rk_rng_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -139,6 +324,7 @@ static int rk_rng_probe(struct platform_device *pdev) if (!rk_rng) return -ENOMEM; + rk_rng->soc_data = of_device_get_match_data(dev); rk_rng->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rk_rng->base)) return PTR_ERR(rk_rng->base); @@ -148,24 +334,30 @@ static int rk_rng_probe(struct platform_device *pdev) return dev_err_probe(dev, rk_rng->clk_num, "Failed to get clks property\n"); - rst = devm_reset_control_array_get_exclusive(dev); - if (IS_ERR(rst)) - return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n"); + if (rk_rng->soc_data->reset_optional) + rst = devm_reset_control_array_get_optional_exclusive(dev); + else + rst = devm_reset_control_array_get_exclusive(dev); - reset_control_assert(rst); - udelay(2); - reset_control_deassert(rst); + if (rst) { + if (IS_ERR(rst)) + return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n"); + + reset_control_assert(rst); + udelay(2); + reset_control_deassert(rst); + } platform_set_drvdata(pdev, rk_rng); rk_rng->rng.name = dev_driver_string(dev); if (!IS_ENABLED(CONFIG_PM)) { - rk_rng->rng.init = rk_rng_init; - rk_rng->rng.cleanup = rk_rng_cleanup; + rk_rng->rng.init = rk_rng->soc_data->rk_rng_init; + rk_rng->rng.cleanup = rk_rng->soc_data->rk_rng_cleanup; } - rk_rng->rng.read = rk_rng_read; + rk_rng->rng.read = rk_rng->soc_data->rk_rng_read; rk_rng->dev = dev; - rk_rng->rng.quality = 900; + rk_rng->rng.quality = rk_rng->soc_data->quality; pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(dev); @@ -184,7 +376,7 @@ static int __maybe_unused rk_rng_runtime_suspend(struct device *dev) { struct rk_rng *rk_rng = dev_get_drvdata(dev); - rk_rng_cleanup(&rk_rng->rng); + rk_rng->soc_data->rk_rng_cleanup(&rk_rng->rng); return 0; } @@ -193,7 +385,7 @@ static int __maybe_unused rk_rng_runtime_resume(struct device *dev) { struct rk_rng *rk_rng = dev_get_drvdata(dev); - return rk_rng_init(&rk_rng->rng); + return rk_rng->soc_data->rk_rng_init(&rk_rng->rng); } static const struct dev_pm_ops rk_rng_pm_ops = { @@ -204,7 +396,8 @@ static const struct dev_pm_ops rk_rng_pm_ops = { }; static const struct of_device_id rk_rng_dt_match[] = { - { .compatible = "rockchip,rk3568-rng", }, + { .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data }, + { .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data }, { /* sentinel */ }, }; @@ -221,8 +414,9 @@ static struct platform_driver rk_rng_driver = { module_platform_driver(rk_rng_driver); -MODULE_DESCRIPTION("Rockchip RK3568 True Random Number Generator driver"); +MODULE_DESCRIPTION("Rockchip True Random Number Generator driver"); MODULE_AUTHOR("Lin Jinhan "); MODULE_AUTHOR("Aurelien Jarno "); MODULE_AUTHOR("Daniel Golle "); +MODULE_AUTHOR("Nicolas Frattaroli "); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 1b284ffc30b02808a0de698667cbcf5ce5f9144e Mon Sep 17 00:00:00 2001 From: Wenkai Lin Date: Wed, 5 Feb 2025 11:56:26 +0800 Subject: crypto: hisilicon/sec2 - fix for aead auth key length According to the HMAC RFC, the authentication key can be 0 bytes, and the hardware can handle this scenario. Therefore, remove the incorrect validation for this case. Fixes: 2f072d75d1ab ("crypto: hisilicon - Add aead support on SEC2") Signed-off-by: Wenkai Lin Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 66bc07da9eb6..472c5c52bf3d 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -1090,11 +1090,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, struct crypto_shash *hash_tfm = ctx->hash_tfm; int blocksize, digestsize, ret; - if (!keys->authkeylen) { - pr_err("hisi_sec2: aead auth key error!\n"); - return -EINVAL; - } - blocksize = crypto_shash_blocksize(hash_tfm); digestsize = crypto_shash_digestsize(hash_tfm); if (keys->authkeylen > blocksize) { @@ -1106,7 +1101,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, } ctx->a_key_len = digestsize; } else { - memcpy(ctx->a_key, keys->authkey, keys->authkeylen); + if (keys->authkeylen) + memcpy(ctx->a_key, keys->authkey, keys->authkeylen); ctx->a_key_len = keys->authkeylen; } -- cgit v1.2.3 From a49cc71e219040d771a8c1254879984f98192811 Mon Sep 17 00:00:00 2001 From: Wenkai Lin Date: Wed, 5 Feb 2025 11:56:27 +0800 Subject: crypto: hisilicon/sec2 - fix for aead authsize alignment The hardware only supports authentication sizes that are 4-byte aligned. Therefore, the driver switches to software computation in this case. Fixes: 2f072d75d1ab ("crypto: hisilicon - Add aead support on SEC2") Signed-off-by: Wenkai Lin Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 472c5c52bf3d..23e98a21bc04 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -57,7 +57,6 @@ #define SEC_TYPE_MASK 0x0F #define SEC_DONE_MASK 0x0001 #define SEC_ICV_MASK 0x000E -#define SEC_SQE_LEN_RATE_MASK 0x3 #define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) #define SEC_SGL_SGE_NR 128 @@ -80,16 +79,16 @@ #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ SEC_PBUF_LEFT_SZ(depth)) -#define SEC_SQE_LEN_RATE 4 #define SEC_SQE_CFLAG 2 #define SEC_SQE_AEAD_FLAG 3 #define SEC_SQE_DONE 0x1 #define SEC_ICV_ERR 0x2 -#define MIN_MAC_LEN 4 #define MAC_LEN_MASK 0x1U #define MAX_INPUT_DATA_LEN 0xFFFE00 #define BITS_MASK 0xFF +#define WORD_MASK 0x3 #define BYTE_BITS 0x8 +#define BYTES_TO_WORDS(bcount) ((bcount) >> 2) #define SEC_XTS_NAME_SZ 0x3 #define IV_CM_CAL_NUM 2 #define IV_CL_MASK 0x7 @@ -1171,7 +1170,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, goto bad_key; } - if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) { + if (ctx->a_ctx.a_key_len & WORD_MASK) { ret = -EINVAL; dev_err(dev, "AUTH key length error!\n"); goto bad_key; @@ -1579,11 +1578,10 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); - sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE); + sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize)); sec_sqe->type2.mac_key_alg |= - cpu_to_le32((u32)((ctx->a_key_len) / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET); sec_sqe->type2.mac_key_alg |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); @@ -1635,12 +1633,10 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma); sqe3->auth_mac_key |= - cpu_to_le32((u32)(authsize / - SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3); + cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3); sqe3->auth_mac_key |= - cpu_to_le32((u32)(ctx->a_key_len / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3); sqe3->auth_mac_key |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3); @@ -2230,8 +2226,8 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) struct device *dev = ctx->dev; int ret; - /* Hardware does not handle cases where authsize is less than 4 bytes */ - if (unlikely(sz < MIN_MAC_LEN)) { + /* Hardware does not handle cases where authsize is not 4 bytes aligned */ + if (c_mode == SEC_CMODE_CBC && (sz & WORD_MASK)) { sreq->aead_req.fallback = true; return -EINVAL; } -- cgit v1.2.3 From f4f353cb7ae9bb43e34943edb693532a39118eca Mon Sep 17 00:00:00 2001 From: Wenkai Lin Date: Wed, 5 Feb 2025 11:56:28 +0800 Subject: crypto: hisilicon/sec2 - fix for sec spec check During encryption and decryption, user requests must be checked first, if the specifications that are not supported by the hardware are used, the software computing is used for processing. Fixes: 2f072d75d1ab ("crypto: hisilicon - Add aead support on SEC2") Signed-off-by: Wenkai Lin Signed-off-by: Chenghai Huang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec.h | 1 - drivers/crypto/hisilicon/sec2/sec_crypto.c | 101 +++++++++++------------------ 2 files changed, 39 insertions(+), 63 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 4b9970230822..703920b49c7c 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -37,7 +37,6 @@ struct sec_aead_req { u8 *a_ivin; dma_addr_t a_ivin_dma; struct aead_request *aead_req; - bool fallback; }; /* SEC request of Crypto */ diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 23e98a21bc04..8ea5305bc320 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -690,14 +690,10 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm) c_ctx->fallback = false; - /* Currently, only XTS mode need fallback tfm when using 192bit key */ - if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ))) - return 0; - c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(c_ctx->fbtfm)) { - pr_err("failed to alloc xts mode fallback tfm!\n"); + pr_err("failed to alloc fallback tfm for %s!\n", alg); return PTR_ERR(c_ctx->fbtfm); } @@ -857,7 +853,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, } memcpy(c_ctx->c_key, key, keylen); - if (c_ctx->fallback && c_ctx->fbtfm) { + if (c_ctx->fbtfm) { ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); if (ret) { dev_err(dev, "failed to set fallback skcipher key!\n"); @@ -1155,8 +1151,10 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, } ret = crypto_authenc_extractkeys(&keys, key, keylen); - if (ret) + if (ret) { + dev_err(dev, "sec extract aead keys err!\n"); goto bad_key; + } ret = sec_aead_aes_set_key(c_ctx, &keys); if (ret) { @@ -1170,12 +1168,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, goto bad_key; } - if (ctx->a_ctx.a_key_len & WORD_MASK) { - ret = -EINVAL; - dev_err(dev, "AUTH key length error!\n"); - goto bad_key; - } - ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); if (ret) { dev_err(dev, "set sec fallback key err!\n"); @@ -1995,8 +1987,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) return sec_aead_ctx_init(tfm, "sha512"); } -static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, - struct sec_req *sreq) +static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq) { u32 cryptlen = sreq->c_req.sk_req->cryptlen; struct device *dev = ctx->dev; @@ -2018,10 +2009,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, } break; case SEC_CMODE_CTR: - if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) { - dev_err(dev, "skcipher HW version error!\n"); - ret = -EINVAL; - } break; default: ret = -EINVAL; @@ -2030,17 +2017,21 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, return ret; } -static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +static int sec_skcipher_param_check(struct sec_ctx *ctx, + struct sec_req *sreq, bool *need_fallback) { struct skcipher_request *sk_req = sreq->c_req.sk_req; struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; - if (unlikely(!sk_req->src || !sk_req->dst || - sk_req->cryptlen > MAX_INPUT_DATA_LEN)) { + if (unlikely(!sk_req->src || !sk_req->dst)) { dev_err(dev, "skcipher input param error!\n"); return -EINVAL; } + + if (sk_req->cryptlen > MAX_INPUT_DATA_LEN) + *need_fallback = true; + sreq->c_req.c_len = sk_req->cryptlen; if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) @@ -2098,6 +2089,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); struct sec_req *req = skcipher_request_ctx(sk_req); struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + bool need_fallback = false; int ret; if (!sk_req->cryptlen) { @@ -2111,11 +2103,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) req->c_req.encrypt = encrypt; req->ctx = ctx; - ret = sec_skcipher_param_check(ctx, req); + ret = sec_skcipher_param_check(ctx, req, &need_fallback); if (unlikely(ret)) return -EINVAL; - if (unlikely(ctx->c_ctx.fallback)) + if (unlikely(ctx->c_ctx.fallback || need_fallback)) return sec_skcipher_soft_crypto(ctx, sk_req, encrypt); return ctx->req_op->process(ctx, req); @@ -2223,52 +2215,35 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) struct crypto_aead *tfm = crypto_aead_reqtfm(req); size_t sz = crypto_aead_authsize(tfm); u8 c_mode = ctx->c_ctx.c_mode; - struct device *dev = ctx->dev; int ret; - /* Hardware does not handle cases where authsize is not 4 bytes aligned */ - if (c_mode == SEC_CMODE_CBC && (sz & WORD_MASK)) { - sreq->aead_req.fallback = true; + if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len)) return -EINVAL; - } if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || - req->assoclen > SEC_MAX_AAD_LEN)) { - dev_err(dev, "aead input spec error!\n"); + req->assoclen > SEC_MAX_AAD_LEN)) return -EINVAL; - } if (c_mode == SEC_CMODE_CCM) { - if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) { - dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n"); + if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) return -EINVAL; - } - ret = aead_iv_demension_check(req); - if (ret) { - dev_err(dev, "aead input iv param error!\n"); - return ret; - } - } - if (sreq->c_req.encrypt) - sreq->c_req.c_len = req->cryptlen; - else - sreq->c_req.c_len = req->cryptlen - sz; - if (c_mode == SEC_CMODE_CBC) { - if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { - dev_err(dev, "aead crypto length error!\n"); + ret = aead_iv_demension_check(req); + if (unlikely(ret)) + return -EINVAL; + } else if (c_mode == SEC_CMODE_CBC) { + if (unlikely(sz & WORD_MASK)) + return -EINVAL; + if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK)) return -EINVAL; - } } return 0; } -static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback) { struct aead_request *req = sreq->aead_req.aead_req; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - size_t authsize = crypto_aead_authsize(tfm); struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; @@ -2277,12 +2252,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; } - if (ctx->sec->qm.ver == QM_HW_V2) { - if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && - req->cryptlen <= authsize))) { - sreq->aead_req.fallback = true; - return -EINVAL; - } + if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC && + sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { + dev_err(dev, "aead cbc mode input data length error!\n"); + return -EINVAL; } /* Support AES or SM4 */ @@ -2291,8 +2264,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; } - if (unlikely(sec_aead_spec_check(ctx, sreq))) + if (unlikely(sec_aead_spec_check(ctx, sreq))) { + *need_fallback = true; return -EINVAL; + } if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= SEC_PBUF_SZ) @@ -2336,17 +2311,19 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); struct sec_req *req = aead_request_ctx(a_req); struct sec_ctx *ctx = crypto_aead_ctx(tfm); + size_t sz = crypto_aead_authsize(tfm); + bool need_fallback = false; int ret; req->flag = a_req->base.flags; req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; - req->aead_req.fallback = false; + req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz); - ret = sec_aead_param_check(ctx, req); + ret = sec_aead_param_check(ctx, req, &need_fallback); if (unlikely(ret)) { - if (req->aead_req.fallback) + if (need_fallback) return sec_aead_soft_crypto(ctx, a_req, encrypt); return -EINVAL; } -- cgit v1.2.3 From 6cb345939b8cc4be79909875276aa9dc87d16757 Mon Sep 17 00:00:00 2001 From: Devaraj Rangasamy Date: Fri, 7 Feb 2025 03:41:52 +0530 Subject: crypto: ccp - Add support for PCI device 0x1134 PCI device 0x1134 shares same register features as PCI device 0x17E0. Hence reuse same data for the new PCI device ID 0x1134. Signed-off-by: Devaraj Rangasamy Acked-by: Tom Lendacky Reviewed-by: Mario Limonciello Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sp-pci.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 157f9a9ed636..2ebc878da160 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -532,6 +532,7 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] }, { PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] }, { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] }, + { PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, /* Last entry must be zero */ -- cgit v1.2.3 From ea6f861a3c459abd0fe19a5eaf746afc0aca7530 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 9 Feb 2025 18:17:30 +0800 Subject: crypto: inside-secure - Eliminate duplication in top-level Makefile Instead of having two entries for inside-secure in the top-level Makefile, make it just a single one. Signed-off-by: Herbert Xu --- drivers/crypto/Makefile | 3 +-- drivers/crypto/inside-secure/Makefile | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 62c7ce3c9d3e..c97f0ebc55ec 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -43,11 +43,10 @@ obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra/ obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/ #obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ -obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/ +obj-y += inside-secure/ obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/ obj-y += xilinx/ obj-y += hisilicon/ obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ obj-y += intel/ obj-y += starfive/ -obj-y += inside-secure/eip93/ diff --git a/drivers/crypto/inside-secure/Makefile b/drivers/crypto/inside-secure/Makefile index 13f64f96c626..30d13fd5d58e 100644 --- a/drivers/crypto/inside-secure/Makefile +++ b/drivers/crypto/inside-secure/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += crypto_safexcel.o crypto_safexcel-objs := safexcel.o safexcel_ring.o safexcel_cipher.o safexcel_hash.o +obj-y += eip93/ -- cgit v1.2.3 From 4f95a6d2748acffaf866cc58e51d2fd00227e91b Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 10 Feb 2025 23:36:44 +0100 Subject: crypto: bcm - set memory to zero only once Use kmalloc_array() instead of kcalloc() because sg_init_table() already sets the memory to zero. This avoids zeroing the memory twice. Signed-off-by: Thorsten Blum Signed-off-by: Herbert Xu --- drivers/crypto/bcm/cipher.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 66accd8e08f6..6b80d033648e 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -141,8 +141,8 @@ spu_skcipher_rx_sg_create(struct brcm_message *mssg, struct iproc_ctx_s *ctx = rctx->ctx; u32 datalen; /* Number of bytes of response data expected */ - mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.dst) return -ENOMEM; @@ -205,8 +205,8 @@ spu_skcipher_tx_sg_create(struct brcm_message *mssg, u32 datalen; /* Number of bytes of response data expected */ u32 stat_len; - mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (unlikely(!mssg->spu.src)) return -ENOMEM; @@ -532,8 +532,8 @@ spu_ahash_rx_sg_create(struct brcm_message *mssg, struct scatterlist *sg; /* used to build sgs in mbox message */ struct iproc_ctx_s *ctx = rctx->ctx; - mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.dst) return -ENOMEM; @@ -587,8 +587,8 @@ spu_ahash_tx_sg_create(struct brcm_message *mssg, u32 datalen; /* Number of bytes of response data expected */ u32 stat_len; - mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.src) return -ENOMEM; @@ -1077,8 +1077,8 @@ static int spu_aead_rx_sg_create(struct brcm_message *mssg, /* have to catch gcm pad in separate buffer */ rx_frag_num++; - mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.dst) return -ENOMEM; @@ -1179,8 +1179,8 @@ static int spu_aead_tx_sg_create(struct brcm_message *mssg, u32 assoc_offset = 0; u32 stat_len; - mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.src) return -ENOMEM; -- cgit v1.2.3 From a4f95a2d28b49fdcd6fbeee65266118fa36075a2 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Tue, 11 Feb 2025 09:58:52 +0000 Subject: crypto: qat - fix object goals in Makefiles Align with kbuild documentation by using -y instead of -objs, following the kernel convention for building modules from multiple object files. Link: https://docs.kernel.org/kbuild/makefiles.html#loadable-module-goals-obj-m Signed-off-by: Giovanni Cabiddu Suggested-by: Andy Shevchenko Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_420xx/Makefile | 2 +- drivers/crypto/intel/qat/qat_4xxx/Makefile | 2 +- drivers/crypto/intel/qat/qat_c3xxx/Makefile | 2 +- drivers/crypto/intel/qat/qat_c3xxxvf/Makefile | 2 +- drivers/crypto/intel/qat/qat_c62x/Makefile | 2 +- drivers/crypto/intel/qat/qat_c62xvf/Makefile | 2 +- drivers/crypto/intel/qat/qat_common/Makefile | 2 +- drivers/crypto/intel/qat/qat_dh895xcc/Makefile | 2 +- drivers/crypto/intel/qat/qat_dh895xccvf/Makefile | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile index 45728659fbc4..72b24b1804cf 100644 --- a/drivers/crypto/intel/qat/qat_420xx/Makefile +++ b/drivers/crypto/intel/qat/qat_420xx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o -qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o +qat_420xx-y := adf_drv.o adf_420xx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile index 9ba202079a22..e8480bb80dee 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/Makefile +++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o -qat_4xxx-objs := adf_drv.o adf_4xxx_hw_data.o +qat_4xxx-y := adf_drv.o adf_4xxx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile index 7a06ad519bfc..d9e568572da8 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/Makefile +++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o -qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o +qat_c3xxx-y := adf_drv.o adf_c3xxx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile index 7ef633058c4f..31a908a211ac 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o -qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o +qat_c3xxxvf-y := adf_drv.o adf_c3xxxvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile index cc9255b3b198..cbdaaa135e84 100644 --- a/drivers/crypto/intel/qat/qat_c62x/Makefile +++ b/drivers/crypto/intel/qat/qat_c62x/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o -qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o +qat_c62x-y := adf_drv.o adf_c62x_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile index 256786662d60..60e499b041ec 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/Makefile +++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o -qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o +qat_c62xvf-y := adf_drv.o adf_c62xvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 7acf9c576149..c515cc1e986a 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"' -intel_qat-objs := adf_cfg.o \ +intel_qat-y := adf_cfg.o \ adf_isr.o \ adf_ctl_drv.o \ adf_cfg_services.o \ diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile index cfd3bd757715..5bf5c890c362 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile +++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o -qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o +qat_dh895xcc-y := adf_drv.o adf_dh895xcc_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile index 64b54e92b2b4..93f9c81edf09 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o -qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o +qat_dh895xccvf-y := adf_drv.o adf_dh895xccvf_hw_data.o -- cgit v1.2.3 From 3af4e7fa26523852e642cd1462aea99a2183843a Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Tue, 11 Feb 2025 09:58:53 +0000 Subject: crypto: qat - reorder objects in qat_common Makefile The objects in the qat_common Makefile are currently listed in a random order. Reorder the objects alphabetically to make it easier to find where to add a new object. Signed-off-by: Giovanni Cabiddu Reviewed-by: Ahsan Atta Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/Makefile | 66 ++++++++++++++-------------- 1 file changed, 33 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index c515cc1e986a..af5df29fd2e3 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -1,62 +1,62 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"' -intel_qat-y := adf_cfg.o \ - adf_isr.o \ - adf_ctl_drv.o \ +intel_qat-y := adf_accel_engine.o \ + adf_admin.o \ + adf_aer.o \ + adf_cfg.o \ adf_cfg_services.o \ + adf_clock.o \ + adf_ctl_drv.o \ adf_dev_mgr.o \ - adf_init.o \ - adf_accel_engine.o \ - adf_aer.o \ - adf_transport.o \ - adf_admin.o \ - adf_hw_arbiter.o \ - adf_sysfs.o \ - adf_sysfs_ras_counters.o \ + adf_gen2_config.o \ + adf_gen2_dc.o \ adf_gen2_hw_csr_data.o \ adf_gen2_hw_data.o \ - adf_gen2_config.o \ adf_gen4_config.o \ + adf_gen4_dc.o \ adf_gen4_hw_csr_data.o \ adf_gen4_hw_data.o \ - adf_gen4_vf_mig.o \ adf_gen4_pm.o \ - adf_gen2_dc.o \ - adf_gen4_dc.o \ adf_gen4_ras.o \ adf_gen4_timer.o \ - adf_clock.o \ + adf_gen4_vf_mig.o \ + adf_hw_arbiter.o \ + adf_init.o \ + adf_isr.o \ adf_mstate_mgr.o \ - qat_crypto.o \ - qat_compression.o \ - qat_comp_algs.o \ - qat_algs.o \ - qat_asym_algs.o \ - qat_algs_send.o \ - adf_rl.o \ adf_rl_admin.o \ + adf_rl.o \ + adf_sysfs.o \ + adf_sysfs_ras_counters.o \ adf_sysfs_rl.o \ - qat_uclo.o \ - qat_hal.o \ + adf_transport.o \ + qat_algs.o \ + qat_algs_send.o \ + qat_asym_algs.o \ qat_bl.o \ - qat_mig_dev.o + qat_comp_algs.o \ + qat_compression.o \ + qat_crypto.o \ + qat_hal.o \ + qat_mig_dev.o \ + qat_uclo.o -intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ +intel_qat-$(CONFIG_DEBUG_FS) += adf_cnv_dbgfs.o \ + adf_dbgfs.o \ adf_fw_counters.o \ - adf_cnv_dbgfs.o \ adf_gen4_pm_debugfs.o \ adf_gen4_tl.o \ - adf_heartbeat.o \ adf_heartbeat_dbgfs.o \ + adf_heartbeat.o \ adf_pm_dbgfs.o \ adf_telemetry.o \ adf_tl_debugfs.o \ - adf_dbgfs.o + adf_transport_debug.o -intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ +intel_qat-$(CONFIG_PCI_IOV) += adf_gen2_pfvf.o adf_gen4_pfvf.o \ adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \ - adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \ - adf_gen2_pfvf.o adf_gen4_pfvf.o + adf_pfvf_utils.o adf_pfvf_vf_msg.o \ + adf_pfvf_vf_proto.o adf_sriov.o adf_vf_isr.o intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o -- cgit v1.2.3 From 9057f824c197596b97279caba1c291fe1c26507c Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Fri, 14 Feb 2025 16:40:42 +0000 Subject: crypto: qat - do not export adf_cfg_services The symbol `adf_cfg_services` is only used on the intel_qat module. There is no need to export it. Signed-off-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_cfg_services.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c index 268052294468..d30d686447e2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c @@ -20,7 +20,6 @@ const char *const adf_cfg_services[] = { [SVC_DC_SYM] = ADF_CFG_DC_SYM, [SVC_SYM_DC] = ADF_CFG_SYM_DC, }; -EXPORT_SYMBOL_GPL(adf_cfg_services); int adf_get_service_enabled(struct adf_accel_dev *accel_dev) { -- cgit v1.2.3 From 5ab6c06dff298ecf2afb64975dc66761c261d944 Mon Sep 17 00:00:00 2001 From: Małgorzata Mielnik Date: Fri, 14 Feb 2025 16:40:43 +0000 Subject: crypto: qat - refactor service parsing logic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The service parsing logic is used to parse the configuration string provided by the user using the attribute qat/cfg_services in sysfs. The logic relies on hard-coded strings. For example, the service "sym;asym" is also replicated as "asym;sym". This makes the addition of new services or service combinations complex as it requires the addition of new hard-coded strings for all possible combinations. This commit addresses this issue by: * reducing the number of internal service strings to only the basic service representations. * modifying the service parsing logic to analyze the service string token by token instead of comparing a whole string with patterns. * introducing the concept of a service mask where each service is represented by a single bit. * dividing the parsing logic into several functions to allow for code reuse (e.g. by sysfs-related functions). * introducing a new, device generation-specific function to verify whether the requested service combination is supported by the currently used device. Signed-off-by: Małgorzata Mielnik Co-developed-by: Giovanni Cabiddu Signed-off-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- .../crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c | 16 +- .../crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 11 +- .../intel/qat/qat_common/adf_accel_devices.h | 1 + .../crypto/intel/qat/qat_common/adf_cfg_services.c | 166 ++++++++++++++++++--- .../crypto/intel/qat/qat_common/adf_cfg_services.h | 26 ++-- .../crypto/intel/qat/qat_common/adf_cfg_strings.h | 6 +- .../crypto/intel/qat/qat_common/adf_gen4_config.c | 15 +- .../crypto/intel/qat/qat_common/adf_gen4_hw_data.c | 26 +++- .../crypto/intel/qat/qat_common/adf_gen4_hw_data.h | 1 + drivers/crypto/intel/qat/qat_common/adf_sysfs.c | 12 +- 10 files changed, 202 insertions(+), 78 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 9faef33e54bd..87c71914ae87 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -106,8 +106,7 @@ static u32 get_ae_mask(struct adf_hw_device_data *self) static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) { switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return ARRAY_SIZE(adf_fw_cy_config); case SVC_DC: return ARRAY_SIZE(adf_fw_dc_config); @@ -118,10 +117,8 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) case SVC_ASYM: return ARRAY_SIZE(adf_fw_asym_config); case SVC_ASYM_DC: - case SVC_DC_ASYM: return ARRAY_SIZE(adf_fw_asym_dc_config); case SVC_SYM_DC: - case SVC_DC_SYM: return ARRAY_SIZE(adf_fw_sym_dc_config); default: return 0; @@ -131,8 +128,7 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev) { switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return adf_fw_cy_config; case SVC_DC: return adf_fw_dc_config; @@ -143,10 +139,8 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev case SVC_ASYM: return adf_fw_asym_config; case SVC_ASYM_DC: - case SVC_DC_ASYM: return adf_fw_asym_dc_config; case SVC_SYM_DC: - case SVC_DC_SYM: return adf_fw_sym_dc_config; default: return NULL; @@ -266,8 +260,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) } switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return capabilities_sym | capabilities_asym; case SVC_DC: return capabilities_dc; @@ -284,10 +277,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) case SVC_ASYM: return capabilities_asym; case SVC_ASYM_DC: - case SVC_DC_ASYM: return capabilities_asym | capabilities_dc; case SVC_SYM_DC: - case SVC_DC_SYM: return capabilities_sym | capabilities_dc; default: return 0; @@ -482,6 +473,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; hw_data->clock_frequency = ADF_420XX_AE_FREQ; + hw_data->services_supported = adf_gen4_services_supported; adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index bbd92c017c28..36eebda3a028 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -178,8 +178,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) } switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return capabilities_sym | capabilities_asym; case SVC_DC: return capabilities_dc; @@ -196,10 +195,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) case SVC_ASYM: return capabilities_asym; case SVC_ASYM_DC: - case SVC_DC_ASYM: return capabilities_asym | capabilities_dc; case SVC_SYM_DC: - case SVC_DC_SYM: return capabilities_sym | capabilities_dc; default: return 0; @@ -241,8 +238,7 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev) { switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return adf_fw_cy_config; case SVC_DC: return adf_fw_dc_config; @@ -253,10 +249,8 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev case SVC_ASYM: return adf_fw_asym_config; case SVC_ASYM_DC: - case SVC_DC_ASYM: return adf_fw_asym_dc_config; case SVC_SYM_DC: - case SVC_DC_SYM: return adf_fw_sym_dc_config; default: return NULL; @@ -466,6 +460,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; hw_data->clock_frequency = ADF_4XXX_AE_FREQ; + hw_data->services_supported = adf_gen4_services_supported; adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 7830ecb1a1f1..9c15c31ad134 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -333,6 +333,7 @@ struct adf_hw_device_data { int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask); u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); int (*dev_config)(struct adf_accel_dev *accel_dev); + bool (*services_supported)(unsigned long mask); struct adf_pfvf_ops pfvf_ops; struct adf_hw_csr_ops csr_ops; struct adf_dc_ops dc_ops; diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c index d30d686447e2..30abcd9e1283 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2023 Intel Corporation */ +#include +#include #include #include #include @@ -8,39 +10,165 @@ #include "adf_cfg_services.h" #include "adf_cfg_strings.h" -const char *const adf_cfg_services[] = { - [SVC_CY] = ADF_CFG_CY, - [SVC_CY2] = ADF_CFG_ASYM_SYM, +static const char *const adf_cfg_services[] = { + [SVC_ASYM] = ADF_CFG_ASYM, + [SVC_SYM] = ADF_CFG_SYM, [SVC_DC] = ADF_CFG_DC, [SVC_DCC] = ADF_CFG_DCC, - [SVC_SYM] = ADF_CFG_SYM, - [SVC_ASYM] = ADF_CFG_ASYM, - [SVC_DC_ASYM] = ADF_CFG_DC_ASYM, - [SVC_ASYM_DC] = ADF_CFG_ASYM_DC, - [SVC_DC_SYM] = ADF_CFG_DC_SYM, - [SVC_SYM_DC] = ADF_CFG_SYM_DC, }; -int adf_get_service_enabled(struct adf_accel_dev *accel_dev) +/* + * Ensure that the size of the array matches the number of services, + * SVC_BASE_COUNT, that is used to size the bitmap. + */ +static_assert(ARRAY_SIZE(adf_cfg_services) == SVC_BASE_COUNT); + +/* + * Ensure that the maximum number of concurrent services that can be + * enabled on a device is less than or equal to the number of total + * supported services. + */ +static_assert(ARRAY_SIZE(adf_cfg_services) >= MAX_NUM_CONCURR_SVC); + +/* + * Ensure that the number of services fit a single unsigned long, as each + * service is represented by a bit in the mask. + */ +static_assert(BITS_PER_LONG >= SVC_BASE_COUNT); + +/* + * Ensure that size of the concatenation of all service strings is smaller + * than the size of the buffer that will contain them. + */ +static_assert(sizeof(ADF_CFG_SYM ADF_SERVICES_DELIMITER + ADF_CFG_ASYM ADF_SERVICES_DELIMITER + ADF_CFG_DC ADF_SERVICES_DELIMITER + ADF_CFG_DCC) < ADF_CFG_MAX_VAL_LEN_IN_BYTES); + +static int adf_service_string_to_mask(struct adf_accel_dev *accel_dev, const char *buf, + size_t len, unsigned long *out_mask) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { }; + unsigned long mask = 0; + char *substr, *token; + int id, num_svc = 0; + + if (len > ADF_CFG_MAX_VAL_LEN_IN_BYTES - 1) + return -EINVAL; + + strscpy(services, buf, ADF_CFG_MAX_VAL_LEN_IN_BYTES); + substr = services; + + while ((token = strsep(&substr, ADF_SERVICES_DELIMITER))) { + id = sysfs_match_string(adf_cfg_services, token); + if (id < 0) + return id; + + if (test_and_set_bit(id, &mask)) + return -EINVAL; + + if (num_svc++ == MAX_NUM_CONCURR_SVC) + return -EINVAL; + } + + if (hw_data->services_supported && !hw_data->services_supported(mask)) + return -EINVAL; + + *out_mask = mask; + + return 0; +} + +static int adf_service_mask_to_string(unsigned long mask, char *buf, size_t len) +{ + int offset = 0; + int bit; + + if (len < ADF_CFG_MAX_VAL_LEN_IN_BYTES) + return -ENOSPC; + + for_each_set_bit(bit, &mask, SVC_BASE_COUNT) { + if (offset) + offset += scnprintf(buf + offset, len - offset, + ADF_SERVICES_DELIMITER); + + offset += scnprintf(buf + offset, len - offset, "%s", + adf_cfg_services[bit]); + } + + return 0; +} + +int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in, + size_t in_len, char *out, size_t out_len) { - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + unsigned long mask; + int ret; + + ret = adf_service_string_to_mask(accel_dev, in, in_len, &mask); + if (ret) + return ret; + + if (!mask) + return -EINVAL; + + return adf_service_mask_to_string(mask, out, out_len); +} + +static int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask) +{ + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { }; + size_t len; int ret; ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_SERVICES_ENABLED, services); if (ret) { - dev_err(&GET_DEV(accel_dev), - ADF_SERVICES_ENABLED " param not found\n"); + dev_err(&GET_DEV(accel_dev), "%s param not found\n", + ADF_SERVICES_ENABLED); return ret; } - ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services), - services); - if (ret < 0) - dev_err(&GET_DEV(accel_dev), - "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n", - services); + len = strnlen(services, ADF_CFG_MAX_VAL_LEN_IN_BYTES); + ret = adf_service_string_to_mask(accel_dev, services, len, mask); + if (ret) + dev_err(&GET_DEV(accel_dev), "Invalid value of %s param: %s\n", + ADF_SERVICES_ENABLED, services); return ret; } + +int adf_get_service_enabled(struct adf_accel_dev *accel_dev) +{ + unsigned long mask; + int ret; + + ret = adf_get_service_mask(accel_dev, &mask); + if (ret) + return ret; + + if (test_bit(SVC_SYM, &mask) && test_bit(SVC_ASYM, &mask)) + return SVC_SYM_ASYM; + + if (test_bit(SVC_SYM, &mask) && test_bit(SVC_DC, &mask)) + return SVC_SYM_DC; + + if (test_bit(SVC_ASYM, &mask) && test_bit(SVC_DC, &mask)) + return SVC_ASYM_DC; + + if (test_bit(SVC_SYM, &mask)) + return SVC_SYM; + + if (test_bit(SVC_ASYM, &mask)) + return SVC_ASYM; + + if (test_bit(SVC_DC, &mask)) + return SVC_DC; + + if (test_bit(SVC_DCC, &mask)) + return SVC_DCC; + + return -EINVAL; +} EXPORT_SYMBOL_GPL(adf_get_service_enabled); diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h index c6b0328b0f5b..f6bafc15cbc6 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h @@ -8,21 +8,29 @@ struct adf_accel_dev; enum adf_services { - SVC_CY = 0, - SVC_CY2, + SVC_ASYM = 0, + SVC_SYM, SVC_DC, SVC_DCC, - SVC_SYM, - SVC_ASYM, - SVC_DC_ASYM, - SVC_ASYM_DC, - SVC_DC_SYM, + SVC_BASE_COUNT +}; + +enum adf_composed_services { + SVC_SYM_ASYM = SVC_BASE_COUNT, SVC_SYM_DC, - SVC_COUNT + SVC_ASYM_DC, +}; + +enum { + ADF_ONE_SERVICE = 1, + ADF_TWO_SERVICES, + ADF_THREE_SERVICES, }; -extern const char *const adf_cfg_services[SVC_COUNT]; +#define MAX_NUM_CONCURR_SVC ADF_THREE_SERVICES +int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in, + size_t in_len, char *out, size_t out_len); int adf_get_service_enabled(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h index e015ad6cace2..b79982c4a856 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h @@ -27,13 +27,9 @@ #define ADF_CFG_CY "sym;asym" #define ADF_CFG_SYM "sym" #define ADF_CFG_ASYM "asym" -#define ADF_CFG_ASYM_SYM "asym;sym" -#define ADF_CFG_ASYM_DC "asym;dc" -#define ADF_CFG_DC_ASYM "dc;asym" -#define ADF_CFG_SYM_DC "sym;dc" -#define ADF_CFG_DC_SYM "dc;sym" #define ADF_CFG_DCC "dcc" #define ADF_SERVICES_ENABLED "ServicesEnabled" +#define ADF_SERVICES_DELIMITER ";" #define ADF_PM_IDLE_SUPPORT "PmIdleSupport" #define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled" #define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c index fe1f3d727dc5..f97e7a880f3a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c @@ -213,7 +213,6 @@ static int adf_no_dev_config(struct adf_accel_dev *accel_dev) */ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) { - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; int ret; ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); @@ -224,18 +223,8 @@ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) if (ret) goto err; - ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, services); - if (ret) - goto err; - - ret = sysfs_match_string(adf_cfg_services, services); - if (ret < 0) - goto err; - - switch (ret) { - case SVC_CY: - case SVC_CY2: + switch (adf_get_service_enabled(accel_dev)) { + case SVC_SYM_ASYM: ret = adf_crypto_dev_config(accel_dev); break; case SVC_DC: diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 41a0979e68c1..ade279e48010 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2020 Intel Corporation */ +#include #include #include #include "adf_accel_devices.h" @@ -265,18 +266,29 @@ static bool is_single_service(int service_id) case SVC_SYM: case SVC_ASYM: return true; - case SVC_CY: - case SVC_CY2: - case SVC_DCC: - case SVC_ASYM_DC: - case SVC_DC_ASYM: - case SVC_SYM_DC: - case SVC_DC_SYM: default: return false; } } +bool adf_gen4_services_supported(unsigned long mask) +{ + unsigned long num_svc = hweight_long(mask); + + if (mask >= BIT(SVC_BASE_COUNT)) + return false; + + switch (num_svc) { + case ADF_ONE_SERVICE: + return true; + case ADF_TWO_SERVICES: + return !test_bit(SVC_DCC, &mask); + default: + return false; + } +} +EXPORT_SYMBOL_GPL(adf_gen4_services_supported); + int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index e8c53bd76f1b..51fc2eaa263e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -179,5 +179,6 @@ int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number, struct bank_state *state); int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number, struct bank_state *state); +bool adf_gen4_services_supported(unsigned long service_mask); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index 84450bffacb6..6c39194647f0 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -116,25 +116,27 @@ static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev, static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { }; struct adf_hw_device_data *hw_data; struct adf_accel_dev *accel_dev; int ret; - ret = sysfs_match_string(adf_cfg_services, buf); - if (ret < 0) - return ret; - accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) return -EINVAL; + ret = adf_parse_service_string(accel_dev, buf, count, services, + ADF_CFG_MAX_VAL_LEN_IN_BYTES); + if (ret) + return ret; + if (adf_dev_started(accel_dev)) { dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n", accel_dev->accel_id); return -EINVAL; } - ret = adf_sysfs_update_dev_config(accel_dev, adf_cfg_services[ret]); + ret = adf_sysfs_update_dev_config(accel_dev, services); if (ret < 0) return ret; -- cgit v1.2.3 From 217460544a1b9741874dab826eb614e8c006f8a8 Mon Sep 17 00:00:00 2001 From: Christian Marangi Date: Sun, 16 Feb 2025 00:36:18 +0100 Subject: crypto: inside-secure/eip93 - Correctly handle return of for sg_nents_for_len Fix smatch warning for sg_nents_for_len return value in Inside Secure EIP93 driver. The return value of sg_nents_for_len was assigned to an u32 and the error was ignored and converted to a positive integer. Rework the code to correctly handle the error from sg_nents_for_len to mute smatch warning. Fixes: 9739f5f93b78 ("crypto: eip93 - Add Inside Secure SafeXcel EIP-93 crypto engine support") Reported-by: Dan Carpenter Signed-off-by: Christian Marangi Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/eip93/eip93-common.c | 25 +++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.c b/drivers/crypto/inside-secure/eip93/eip93-common.c index 446a047c242d..66153aa2493f 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-common.c +++ b/drivers/crypto/inside-secure/eip93/eip93-common.c @@ -202,7 +202,6 @@ int check_valid_request(struct eip93_cipher_reqctx *rctx) { struct scatterlist *src = rctx->sg_src; struct scatterlist *dst = rctx->sg_dst; - u32 src_nents, dst_nents; u32 textsize = rctx->textsize; u32 authsize = rctx->authsize; u32 blksize = rctx->blksize; @@ -210,6 +209,7 @@ int check_valid_request(struct eip93_cipher_reqctx *rctx) u32 totlen_dst = rctx->assoclen + rctx->textsize; u32 copy_len; bool src_align, dst_align; + int src_nents, dst_nents; int err = -EINVAL; if (!IS_CTR(rctx->flags)) { @@ -225,19 +225,24 @@ int check_valid_request(struct eip93_cipher_reqctx *rctx) } src_nents = sg_nents_for_len(src, totlen_src); + if (src_nents < 0) + return src_nents; + dst_nents = sg_nents_for_len(dst, totlen_dst); + if (dst_nents < 0) + return dst_nents; if (src == dst) { src_nents = max(src_nents, dst_nents); dst_nents = src_nents; - if (unlikely((totlen_src || totlen_dst) && src_nents <= 0)) + if (unlikely((totlen_src || totlen_dst) && !src_nents)) return err; } else { - if (unlikely(totlen_src && src_nents <= 0)) + if (unlikely(totlen_src && !src_nents)) return err; - if (unlikely(totlen_dst && dst_nents <= 0)) + if (unlikely(totlen_dst && !dst_nents)) return err; } @@ -273,8 +278,16 @@ int check_valid_request(struct eip93_cipher_reqctx *rctx) return err; } - rctx->src_nents = sg_nents_for_len(rctx->sg_src, totlen_src); - rctx->dst_nents = sg_nents_for_len(rctx->sg_dst, totlen_dst); + src_nents = sg_nents_for_len(rctx->sg_src, totlen_src); + if (src_nents < 0) + return src_nents; + + dst_nents = sg_nents_for_len(rctx->sg_dst, totlen_dst); + if (dst_nents < 0) + return dst_nents; + + rctx->src_nents = src_nents; + rctx->dst_nents = dst_nents; return 0; } -- cgit v1.2.3 From 2291399384c00bf61b117e848f3da6cf14c90f32 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 19 Feb 2025 16:03:32 +0100 Subject: hwrng: Kconfig - Fix indentation of HW_RANDOM_CN10K help text Change the indentation of the help text of the HW_RANDOM_CN10K symbol from one TAB plus one space to one TAB plus two spaces, as is customary. Signed-off-by: Geert Uytterhoeven Reviewed-by: Dragan Simic Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index fa9e06b5adcf..c85827843447 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -583,11 +583,11 @@ config HW_RANDOM_CN10K depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST)) default HW_RANDOM if ARCH_THUNDER help - This driver provides support for the True Random Number - generator available in Marvell CN10K SoCs. + This driver provides support for the True Random Number + generator available in Marvell CN10K SoCs. - To compile this driver as a module, choose M here. - The module will be called cn10k_rng. If unsure, say Y. + To compile this driver as a module, choose M here. + The module will be called cn10k_rng. If unsure, say Y. config HW_RANDOM_JH7110 tristate "StarFive JH7110 Random Number Generator support" -- cgit v1.2.3 From 422bf8fc9999b79d4962c79cffc3c9d9dc8cab62 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 19 Feb 2025 10:23:32 -0800 Subject: crypto: nx - use the new scatterwalk functions - In nx_walk_and_build(), use scatterwalk_start_at_pos() instead of a more complex way to achieve the same result. - Also in nx_walk_and_build(), use the new functions scatterwalk_next() which consolidates scatterwalk_clamp() and scatterwalk_map(), and use scatterwalk_done_src() which consolidates scatterwalk_unmap(), scatterwalk_advance(), and scatterwalk_done(). Remove unnecessary code that seemed to be intended to advance to the next sg entry, which is already handled by the scatterwalk functions. Note that nx_walk_and_build() does not actually read or write the mapped virtual address, and thus it is misusing the scatter_walk API. It really should just access the scatterlist directly. This patch does not try to address this existing issue. - In nx_gca(), use memcpy_from_sglist() instead of a more complex way to achieve the same result. - In various functions, replace calls to scatterwalk_map_and_copy() with memcpy_from_sglist() or memcpy_to_sglist() as appropriate. Note that this eliminates the confusing 'out' argument (which this driver had tried to work around by defining the missing constants for it...) Cc: Christophe Leroy Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Naveen N Rao Cc: Nicholas Piggin Cc: linuxppc-dev@lists.ozlabs.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-ccm.c | 16 ++++++---------- drivers/crypto/nx/nx-aes-gcm.c | 17 ++++++----------- drivers/crypto/nx/nx.c | 31 +++++-------------------------- drivers/crypto/nx/nx.h | 3 --- 4 files changed, 17 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index c843f4c6f684..56a0b3a67c33 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -217,13 +217,11 @@ static int generate_pat(u8 *iv, memset(b1, 0, 16); if (assoclen <= 65280) { *(u16 *)b1 = assoclen; - scatterwalk_map_and_copy(b1 + 2, req->src, 0, - iauth_len, SCATTERWALK_FROM_SG); + memcpy_from_sglist(b1 + 2, req->src, 0, iauth_len); } else { *(u16 *)b1 = (u16)(0xfffe); *(u32 *)&b1[2] = assoclen; - scatterwalk_map_and_copy(b1 + 6, req->src, 0, - iauth_len, SCATTERWALK_FROM_SG); + memcpy_from_sglist(b1 + 6, req->src, 0, iauth_len); } } @@ -341,9 +339,8 @@ static int ccm_nx_decrypt(struct aead_request *req, nbytes -= authsize; /* copy out the auth tag to compare with later */ - scatterwalk_map_and_copy(priv->oauth_tag, - req->src, nbytes + req->assoclen, authsize, - SCATTERWALK_FROM_SG); + memcpy_from_sglist(priv->oauth_tag, req->src, nbytes + req->assoclen, + authsize); rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen, csbcpb->cpb.aes_ccm.in_pat_or_b0); @@ -465,9 +462,8 @@ static int ccm_nx_encrypt(struct aead_request *req, } while (processed < nbytes); /* copy out the auth tag */ - scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, - req->dst, nbytes + req->assoclen, authsize, - SCATTERWALK_TO_SG); + memcpy_to_sglist(req->dst, nbytes + req->assoclen, + csbcpb->cpb.aes_ccm.out_pat_or_mac, authsize); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 4a796318b430..b7fe2de96d96 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -103,16 +103,13 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, { int rc; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; - struct scatter_walk walk; struct nx_sg *nx_sg = nx_ctx->in_sg; unsigned int nbytes = assoclen; unsigned int processed = 0, to_process; unsigned int max_sg_len; if (nbytes <= AES_BLOCK_SIZE) { - scatterwalk_start(&walk, req->src); - scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); - scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); + memcpy_from_sglist(out, req->src, 0, nbytes); return 0; } @@ -391,19 +388,17 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc, mac: if (enc) { /* copy out the auth tag */ - scatterwalk_map_and_copy( - csbcpb->cpb.aes_gcm.out_pat_or_mac, + memcpy_to_sglist( req->dst, req->assoclen + nbytes, - crypto_aead_authsize(crypto_aead_reqtfm(req)), - SCATTERWALK_TO_SG); + csbcpb->cpb.aes_gcm.out_pat_or_mac, + crypto_aead_authsize(crypto_aead_reqtfm(req))); } else { u8 *itag = nx_ctx->priv.gcm.iauth_tag; u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; - scatterwalk_map_and_copy( + memcpy_from_sglist( itag, req->src, req->assoclen + nbytes, - crypto_aead_authsize(crypto_aead_reqtfm(req)), - SCATTERWALK_FROM_SG); + crypto_aead_authsize(crypto_aead_reqtfm(req))); rc = crypto_memneq(itag, otag, crypto_aead_authsize(crypto_aead_reqtfm(req))) ? -EBADMSG : 0; diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 010e87d9da36..dd95e5361d88 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -153,40 +153,19 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, { struct scatter_walk walk; struct nx_sg *nx_sg = nx_dst; - unsigned int n, offset = 0, len = *src_len; + unsigned int n, len = *src_len; char *dst; /* we need to fast forward through @start bytes first */ - for (;;) { - scatterwalk_start(&walk, sg_src); - - if (start < offset + sg_src->length) - break; - - offset += sg_src->length; - sg_src = sg_next(sg_src); - } - - /* start - offset is the number of bytes to advance in the scatterlist - * element we're currently looking at */ - scatterwalk_advance(&walk, start - offset); + scatterwalk_start_at_pos(&walk, sg_src, start); while (len && (nx_sg - nx_dst) < sglen) { - n = scatterwalk_clamp(&walk, len); - if (!n) { - /* In cases where we have scatterlist chain sg_next - * handles with it properly */ - scatterwalk_start(&walk, sg_next(walk.sg)); - n = scatterwalk_clamp(&walk, len); - } - dst = scatterwalk_map(&walk); + dst = scatterwalk_next(&walk, len, &n); nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst)); - len -= n; - scatterwalk_unmap(dst); - scatterwalk_advance(&walk, n); - scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len); + scatterwalk_done_src(&walk, dst, n); + len -= n; } /* update to_process */ *src_len -= len; diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index 2697baebb6a3..e1b4b6927bec 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -189,7 +189,4 @@ extern struct shash_alg nx_shash_sha256_alg; extern struct nx_crypto_driver nx_driver; -#define SCATTERWALK_TO_SG 1 -#define SCATTERWALK_FROM_SG 0 - #endif -- cgit v1.2.3 From 323abf2569865a578e24382324aa0fc0b18a2490 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 19 Feb 2025 10:23:34 -0800 Subject: crypto: s5p-sss - use the new scatterwalk functions s5p_sg_copy_buf() open-coded a copy from/to a scatterlist using scatterwalk_* functions that are planned for removal. Replace it with the new functions memcpy_from_sglist() and memcpy_to_sglist() instead. Also take the opportunity to replace calls to scatterwalk_map_and_copy() in the same file; this eliminates the confusing 'out' argument. Cc: Krzysztof Kozlowski Cc: Vladimir Zapolskiy Cc: linux-samsung-soc@vger.kernel.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/s5p-sss.c | 38 +++++++++++--------------------------- 1 file changed, 11 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 57ab237e899e..b4c3c14dafd5 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -458,19 +458,6 @@ static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg) *sg = NULL; } -static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg, - unsigned int nbytes, int out) -{ - struct scatter_walk walk; - - if (!nbytes) - return; - - scatterwalk_start(&walk, sg); - scatterwalk_copychunks(buf, &walk, nbytes, out); - scatterwalk_done(&walk, out, 0); -} - static void s5p_sg_done(struct s5p_aes_dev *dev) { struct skcipher_request *req = dev->req; @@ -480,8 +467,8 @@ static void s5p_sg_done(struct s5p_aes_dev *dev) dev_dbg(dev->dev, "Copying %d bytes of output data back to original place\n", dev->req->cryptlen); - s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst, - dev->req->cryptlen, 1); + memcpy_to_sglist(dev->req->dst, 0, sg_virt(dev->sg_dst_cpy), + dev->req->cryptlen); } s5p_free_sg_cpy(dev, &dev->sg_src_cpy); s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); @@ -526,7 +513,7 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src, return -ENOMEM; } - s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0); + memcpy_from_sglist(pages, src, 0, dev->req->cryptlen); sg_init_table(*dst, 1); sg_set_buf(*dst, pages, len); @@ -1035,8 +1022,7 @@ static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx, if (ctx->bufcnt) memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt); - scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip, - new_len, 0); + memcpy_from_sglist(buf + ctx->bufcnt, sg, ctx->skip, new_len); sg_init_table(ctx->sgl, 1); sg_set_buf(ctx->sgl, buf, len); ctx->sg = ctx->sgl; @@ -1229,8 +1215,7 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update) if (len > nbytes) len = nbytes; - scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, - 0, len, 0); + memcpy_from_sglist(ctx->buffer + ctx->bufcnt, req->src, 0, len); ctx->bufcnt += len; nbytes -= len; ctx->skip = len; @@ -1253,9 +1238,8 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update) hash_later = ctx->total - xmit_len; /* copy hash_later bytes from end of req->src */ /* previous bytes are in xmit_buf, so no overwrite */ - scatterwalk_map_and_copy(ctx->buffer, req->src, - req->nbytes - hash_later, - hash_later, 0); + memcpy_from_sglist(ctx->buffer, req->src, + req->nbytes - hash_later, hash_later); } if (xmit_len > BUFLEN) { @@ -1267,8 +1251,8 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update) /* have buffered data only */ if (unlikely(!ctx->bufcnt)) { /* first update didn't fill up buffer */ - scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src, - 0, xmit_len, 0); + memcpy_from_sglist(ctx->dd->xmit_buf, req->src, + 0, xmit_len); } sg_init_table(ctx->sgl, 1); @@ -1506,8 +1490,8 @@ static int s5p_hash_update(struct ahash_request *req) return 0; if (ctx->bufcnt + req->nbytes <= BUFLEN) { - scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, - 0, req->nbytes, 0); + memcpy_from_sglist(ctx->buffer + ctx->bufcnt, req->src, + 0, req->nbytes); ctx->bufcnt += req->nbytes; return 0; } -- cgit v1.2.3 From 95c47514b91659a4d500b925fba45461b839e5db Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 19 Feb 2025 10:23:35 -0800 Subject: crypto: stm32 - use the new scatterwalk functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace calls to the deprecated function scatterwalk_copychunks() with memcpy_from_scatterwalk(), memcpy_to_scatterwalk(), scatterwalk_skip(), or scatterwalk_start_at_pos() as appropriate. Cc: Alexandre Torgue Cc: Maxime Coquelin Cc: Maxime Méré Cc: Thomas Bourgoin Cc: linux-stm32@st-md-mailman.stormreply.com Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-cryp.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 14c6339c2e43..5ce88e7a8f65 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -666,7 +666,7 @@ static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp) written = min_t(size_t, AES_BLOCK_SIZE - len, alen); - scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0); + memcpy_from_scatterwalk((char *)block + len, &cryp->in_walk, written); writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32); @@ -993,7 +993,7 @@ static int stm32_cryp_header_dma_start(struct stm32_cryp *cryp) /* Advance scatterwalk to not DMA'ed data */ align_size = ALIGN_DOWN(cryp->header_in, cryp->hw_blocksize); - scatterwalk_copychunks(NULL, &cryp->in_walk, align_size, 2); + scatterwalk_skip(&cryp->in_walk, align_size); cryp->header_in -= align_size; ret = dma_submit_error(dmaengine_submit(tx_in)); @@ -1056,7 +1056,7 @@ static int stm32_cryp_dma_start(struct stm32_cryp *cryp) /* Advance scatterwalk to not DMA'ed data */ align_size = ALIGN_DOWN(cryp->payload_in, cryp->hw_blocksize); - scatterwalk_copychunks(NULL, &cryp->in_walk, align_size, 2); + scatterwalk_skip(&cryp->in_walk, align_size); cryp->payload_in -= align_size; ret = dma_submit_error(dmaengine_submit(tx_in)); @@ -1067,7 +1067,7 @@ static int stm32_cryp_dma_start(struct stm32_cryp *cryp) dma_async_issue_pending(cryp->dma_lch_in); /* Advance scatterwalk to not DMA'ed data */ - scatterwalk_copychunks(NULL, &cryp->out_walk, align_size, 2); + scatterwalk_skip(&cryp->out_walk, align_size); cryp->payload_out -= align_size; ret = dma_submit_error(dmaengine_submit(tx_out)); if (ret < 0) { @@ -1737,9 +1737,9 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req, out_sg = areq->dst; scatterwalk_start(&cryp->in_walk, in_sg); - scatterwalk_start(&cryp->out_walk, out_sg); /* In output, jump after assoc data */ - scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->areq->assoclen, 2); + scatterwalk_start_at_pos(&cryp->out_walk, out_sg, + areq->assoclen); ret = stm32_cryp_hw_init(cryp); if (ret) @@ -1873,12 +1873,12 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) /* Get and write tag */ readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32); - scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1); + memcpy_to_scatterwalk(&cryp->out_walk, out_tag, cryp->authsize); } else { /* Get and check tag */ u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32]; - scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0); + memcpy_from_scatterwalk(in_tag, &cryp->in_walk, cryp->authsize); readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32); if (crypto_memneq(in_tag, out_tag, cryp->authsize)) @@ -1923,8 +1923,8 @@ static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp) u32 block[AES_BLOCK_32]; readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); - scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, - cryp->payload_out), 1); + memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out)); cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out); } @@ -1933,8 +1933,8 @@ static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp) { u32 block[AES_BLOCK_32] = {0}; - scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize, - cryp->payload_in), 0); + memcpy_from_scatterwalk(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize, + cryp->payload_in)); writesl(cryp->regs + cryp->caps->din, block, cryp->hw_blocksize / sizeof(u32)); cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in); } @@ -1981,8 +1981,8 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) */ readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); - scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, - cryp->payload_out), 1); + memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out)); cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out); @@ -2079,8 +2079,8 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) */ readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); - scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, - cryp->payload_out), 1); + memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out)); cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out); /* d) Load again CRYP_CSGCMCCMxR */ @@ -2161,7 +2161,7 @@ static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp) written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in); - scatterwalk_copychunks(block, &cryp->in_walk, written, 0); + memcpy_from_scatterwalk(block, &cryp->in_walk, written); writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32); -- cgit v1.2.3 From 1b5da8b2d71de83b422633fa0bd11ae86f0b804c Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Sun, 23 Feb 2025 00:53:54 +0000 Subject: crypto: octeontx2 - Remove unused otx2_cpt_print_uc_dbg_info otx2_cpt_print_uc_dbg_info() has been unused since 2023's commit 82f89f1aa6ca ("crypto: octeontx2 - add devlink option to set t106 mode") Remove it and the get_engs_info() helper it's the only user of. Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Herbert Xu --- .../crypto/marvell/octeontx2/otx2_cptpf_ucode.c | 99 ---------------------- .../crypto/marvell/octeontx2/otx2_cptpf_ucode.h | 1 - 2 files changed, 100 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 881fce53e369..42c5484ce66a 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -1775,102 +1775,3 @@ err_print: dev_err(dev, "%s\n", err_msg); return -EINVAL; } - -static void get_engs_info(struct otx2_cpt_eng_grp_info *eng_grp, char *buf, - int size, int idx) -{ - struct otx2_cpt_engs_rsvd *mirrored_engs = NULL; - struct otx2_cpt_engs_rsvd *engs; - int len, i; - - buf[0] = '\0'; - for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) { - engs = &eng_grp->engs[i]; - if (!engs->type) - continue; - if (idx != -1 && idx != i) - continue; - - if (eng_grp->mirror.is_ena) - mirrored_engs = find_engines_by_type( - &eng_grp->g->grp[eng_grp->mirror.idx], - engs->type); - if (i > 0 && idx == -1) { - len = strlen(buf); - scnprintf(buf + len, size - len, ", "); - } - - len = strlen(buf); - scnprintf(buf + len, size - len, "%d %s ", - mirrored_engs ? engs->count + mirrored_engs->count : - engs->count, - get_eng_type_str(engs->type)); - if (mirrored_engs) { - len = strlen(buf); - scnprintf(buf + len, size - len, - "(%d shared with engine_group%d) ", - engs->count <= 0 ? - engs->count + mirrored_engs->count : - mirrored_engs->count, - eng_grp->mirror.idx); - } - } -} - -void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf) -{ - struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps; - struct otx2_cpt_eng_grp_info *mirrored_grp; - char engs_info[2 * OTX2_CPT_NAME_LENGTH]; - struct otx2_cpt_eng_grp_info *grp; - struct otx2_cpt_engs_rsvd *engs; - int i, j; - - pr_debug("Engine groups global info"); - pr_debug("max SE %d, max IE %d, max AE %d", eng_grps->avail.max_se_cnt, - eng_grps->avail.max_ie_cnt, eng_grps->avail.max_ae_cnt); - pr_debug("free SE %d", eng_grps->avail.se_cnt); - pr_debug("free IE %d", eng_grps->avail.ie_cnt); - pr_debug("free AE %d", eng_grps->avail.ae_cnt); - - for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) { - grp = &eng_grps->grp[i]; - pr_debug("engine_group%d, state %s", i, - str_enabled_disabled(grp->is_enabled)); - if (grp->is_enabled) { - mirrored_grp = &eng_grps->grp[grp->mirror.idx]; - pr_debug("Ucode0 filename %s, version %s", - grp->mirror.is_ena ? - mirrored_grp->ucode[0].filename : - grp->ucode[0].filename, - grp->mirror.is_ena ? - mirrored_grp->ucode[0].ver_str : - grp->ucode[0].ver_str); - if (is_2nd_ucode_used(grp)) - pr_debug("Ucode1 filename %s, version %s", - grp->ucode[1].filename, - grp->ucode[1].ver_str); - } - - for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) { - engs = &grp->engs[j]; - if (engs->type) { - u32 mask[5] = { }; - - get_engs_info(grp, engs_info, - 2 * OTX2_CPT_NAME_LENGTH, j); - pr_debug("Slot%d: %s", j, engs_info); - bitmap_to_arr32(mask, engs->bmap, - eng_grps->engs_num); - if (is_dev_otx2(cptpf->pdev)) - pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x", - mask[3], mask[2], mask[1], - mask[0]); - else - pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x %8.8x", - mask[4], mask[3], mask[2], mask[1], - mask[0]); - } - } - } -} diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h index 365fe8943bd9..7e6a6a4ec37c 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h @@ -166,7 +166,6 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf, struct devlink_param_gset_ctx *ctx); int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf, struct devlink_param_gset_ctx *ctx); -void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf); struct otx2_cpt_engs_rsvd *find_engines_by_type( struct otx2_cpt_eng_grp_info *eng_grp, int eng_type); -- cgit v1.2.3 From 006401d29a5cc3774b035e933ca6f063134b8433 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Sun, 23 Feb 2025 00:56:46 +0000 Subject: crypto: octeontx - Remove unused function otx_cpt_eng_grp_has_eng_type otx_cpt_eng_grp_has_eng_type() was added in 2020 by commit d9110b0b01ff ("crypto: marvell - add support for OCTEON TX CPT engine") but has remained unused. Remove it. Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c | 11 ----------- drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h | 2 -- 2 files changed, 13 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c index 2c08e928e44e..9f5601c0280b 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c @@ -506,17 +506,6 @@ int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type) } EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type); -int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp, - int eng_type) -{ - struct otx_cpt_engs_rsvd *engs; - - engs = find_engines_by_type(eng_grp, eng_type); - - return (engs != NULL ? 1 : 0); -} -EXPORT_SYMBOL_GPL(otx_cpt_eng_grp_has_eng_type); - static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp, char *buf, int size) { diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h index 8620ac87a447..df79ee416c0d 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h @@ -174,7 +174,5 @@ int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev, void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps, bool is_rdonly); int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type); -int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp, - int eng_type); #endif /* __OTX_CPTPF_UCODE_H */ -- cgit v1.2.3 From caa9dbb76ff52ec848a57245062aaeaa07740adc Mon Sep 17 00:00:00 2001 From: Sven Schwermer Date: Mon, 24 Feb 2025 08:42:25 +0100 Subject: crypto: mxs-dcp - Only set OTP_KEY bit for OTP key While MXS_DCP_CONTROL0_OTP_KEY is set, the CRYPTO_KEY (DCP_PAES_KEY_OTP) is used even if the UNIQUE_KEY (DCP_PAES_KEY_UNIQUE) is selected. This is not clearly documented, but this implementation is consistent with NXP's downstream kernel fork and optee_os. Signed-off-by: Sven Schwermer Signed-off-by: Herbert Xu --- drivers/crypto/mxs-dcp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index d94a26c3541a..133ebc998236 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -265,12 +265,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, MXS_DCP_CONTROL0_INTERRUPT | MXS_DCP_CONTROL0_ENABLE_CIPHER; - if (key_referenced) - /* Set OTP key bit to select the key via KEY_SELECT. */ - desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY; - else + if (!key_referenced) /* Payload contains the key. */ desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; + else if (actx->key[0] == DCP_PAES_KEY_OTP) + /* Set OTP key bit to select the key via KEY_SELECT. */ + desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY; if (rctx->enc) desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; -- cgit v1.2.3 From bcfc8fc53f3acb3213fb9d28675244aa4ce208e0 Mon Sep 17 00:00:00 2001 From: Akhil R Date: Mon, 24 Feb 2025 14:46:01 +0530 Subject: crypto: tegra - Use separate buffer for setkey The buffer which sends the commands to host1x was shared for all tasks in the engine. This causes a problem with the setkey() function as it gets called asynchronous to the crypto engine queue. Modifying the same cmdbuf in setkey() will corrupt the ongoing host1x task and in turn break the encryption/decryption operation. Hence use a separate cmdbuf for setkey(). Fixes: 0880bb3b00c8 ("crypto: tegra - Add Tegra Security Engine driver") Signed-off-by: Akhil R Signed-off-by: Herbert Xu --- drivers/crypto/tegra/tegra-se-aes.c | 16 ++++++++-------- drivers/crypto/tegra/tegra-se-hash.c | 13 +++++++------ drivers/crypto/tegra/tegra-se-key.c | 10 ++++++++-- drivers/crypto/tegra/tegra-se-main.c | 16 ++++++++++++---- drivers/crypto/tegra/tegra-se.h | 3 ++- 5 files changed, 37 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index d734c9a56786..7da7e169a314 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -282,7 +282,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) /* Prepare the command and submit for execution */ cmdlen = tegra_aes_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); /* Copy the result */ tegra_aes_update_iv(req, ctx); @@ -719,7 +719,7 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct cmdlen = tegra_gmac_prep_cmd(ctx, rctx); - return tegra_se_host1x_submit(se, cmdlen); + return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); } static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) @@ -736,7 +736,7 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc /* Prepare command and submit */ cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -759,7 +759,7 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc /* Prepare command and submit */ cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -891,7 +891,7 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req /* Prepare command and submit */ cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx); - return tegra_se_host1x_submit(se, cmdlen); + return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); } static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) @@ -1098,7 +1098,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx /* Prepare command and submit */ cmdlen = tegra_ctr_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -1519,8 +1519,8 @@ static int tegra_cmac_do_update(struct ahash_request *req) tegra_cmac_paste_result(ctx->se, rctx); cmdlen = tegra_cmac_prep_cmd(ctx, rctx); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); - ret = tegra_se_host1x_submit(se, cmdlen); /* * If this is not the final update, copy the intermediate results * from the registers so that it can be used in the next 'update' @@ -1553,7 +1553,7 @@ static int tegra_cmac_do_final(struct ahash_request *req) /* Prepare command and submit */ cmdlen = tegra_cmac_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) goto out; diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 0b5cdd5676b1..c7b2a062a03c 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -300,8 +300,9 @@ static int tegra_sha_do_update(struct ahash_request *req) { struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct tegra_se *se = ctx->se; unsigned int nblks, nresidue, size, ret; - u32 *cpuvaddr = ctx->se->cmdbuf->addr; + u32 *cpuvaddr = se->cmdbuf->addr; nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size; nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size; @@ -353,11 +354,11 @@ static int tegra_sha_do_update(struct ahash_request *req) * This is to support the import/export functionality. */ if (!(rctx->task & SHA_FIRST)) - tegra_sha_paste_hash_result(ctx->se, rctx); + tegra_sha_paste_hash_result(se, rctx); - size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx); + size = tegra_sha_prep_cmd(se, cpuvaddr, rctx); - ret = tegra_se_host1x_submit(ctx->se, size); + ret = tegra_se_host1x_submit(se, se->cmdbuf, size); /* * If this is not the final update, copy the intermediate results @@ -365,7 +366,7 @@ static int tegra_sha_do_update(struct ahash_request *req) * call. This is to support the import/export functionality. */ if (!(rctx->task & SHA_FINAL)) - tegra_sha_copy_hash_result(ctx->se, rctx); + tegra_sha_copy_hash_result(se, rctx); return ret; } @@ -388,7 +389,7 @@ static int tegra_sha_do_final(struct ahash_request *req) size = tegra_sha_prep_cmd(se, cpuvaddr, rctx); - ret = tegra_se_host1x_submit(se, size); + ret = tegra_se_host1x_submit(se, se->cmdbuf, size); if (ret) goto out; diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c index ac14678dbd30..276b261fb6df 100644 --- a/drivers/crypto/tegra/tegra-se-key.c +++ b/drivers/crypto/tegra/tegra-se-key.c @@ -115,11 +115,17 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key, u32 keylen, u16 slot, u32 alg) { const u32 *keyval = (u32 *)key; - u32 *addr = se->cmdbuf->addr, size; + u32 *addr = se->keybuf->addr, size; + int ret; + + mutex_lock(&kslt_lock); size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg); + ret = tegra_se_host1x_submit(se, se->keybuf, size); - return tegra_se_host1x_submit(se, size); + mutex_unlock(&kslt_lock); + + return ret; } void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c index 918c0b10614d..1c94f1de0546 100644 --- a/drivers/crypto/tegra/tegra-se-main.c +++ b/drivers/crypto/tegra/tegra-se-main.c @@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi return cmdbuf; } -int tegra_se_host1x_submit(struct tegra_se *se, u32 size) +int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size) { struct host1x_job *job; int ret; @@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size) job->engine_fallback_streamid = se->stream_id; job->engine_streamid_offset = SE_STREAM_ID; - se->cmdbuf->words = size; + cmdbuf->words = size; - host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0); + host1x_job_add_gather(job, &cmdbuf->bo, size, 0); ret = host1x_job_pin(job, se->dev); if (ret) { @@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client) goto syncpt_put; } + se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K); + if (!se->keybuf) { + ret = -ENOMEM; + goto cmdbuf_put; + } + ret = se->hw->init_alg(se); if (ret) { dev_err(se->dev, "failed to register algorithms\n"); - goto cmdbuf_put; + goto keybuf_put; } return 0; +keybuf_put: + tegra_se_cmdbuf_put(&se->keybuf->bo); cmdbuf_put: tegra_se_cmdbuf_put(&se->cmdbuf->bo); syncpt_put: diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h index b9dd7ceb8783..b54aefe717a1 100644 --- a/drivers/crypto/tegra/tegra-se.h +++ b/drivers/crypto/tegra/tegra-se.h @@ -420,6 +420,7 @@ struct tegra_se { struct host1x_client client; struct host1x_channel *channel; struct tegra_se_cmdbuf *cmdbuf; + struct tegra_se_cmdbuf *keybuf; struct crypto_engine *engine; struct host1x_syncpt *syncpt; struct device *dev; @@ -502,7 +503,7 @@ void tegra_deinit_hash(struct tegra_se *se); int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid); void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg); -int tegra_se_host1x_submit(struct tegra_se *se, u32 size); +int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size); /* HOST1x OPCODES */ static inline u32 host1x_opcode_setpayload(unsigned int payload) -- cgit v1.2.3 From 1cb328da4e8f34350c61a2b6548766c79b4bb64c Mon Sep 17 00:00:00 2001 From: Akhil R Date: Mon, 24 Feb 2025 14:46:02 +0530 Subject: crypto: tegra - Do not use fixed size buffers Allocate the buffer based on the request instead of a fixed buffer length. In operations which may require larger buffer size, a fixed buffer may fail. Fixes: 0880bb3b00c8 ("crypto: tegra - Add Tegra Security Engine driver") Signed-off-by: Akhil R Signed-off-by: Herbert Xu --- drivers/crypto/tegra/tegra-se-aes.c | 124 ++++++++++++++++++----------------- drivers/crypto/tegra/tegra-se-hash.c | 38 +++++++---- drivers/crypto/tegra/tegra-se.h | 2 - 3 files changed, 89 insertions(+), 75 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index 7da7e169a314..c2b8891a905d 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -263,12 +263,6 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) unsigned int cmdlen; int ret; - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - return -ENOMEM; - - rctx->datbuf.size = SE_AES_BUFLEN; rctx->iv = (u32 *)req->iv; rctx->len = req->cryptlen; @@ -278,6 +272,12 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE); } + rctx->datbuf.size = rctx->len; + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0); /* Prepare the command and submit for execution */ @@ -289,7 +289,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1); /* Free the buffer */ - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, rctx->datbuf.buf, rctx->datbuf.addr); crypto_finalize_skcipher_request(se->engine, req, ret); @@ -1117,6 +1117,11 @@ static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se, rctx->assoclen = req->assoclen; rctx->authsize = crypto_aead_authsize(tfm); + if (rctx->encrypt) + rctx->cryptlen = req->cryptlen; + else + rctx->cryptlen = req->cryptlen - rctx->authsize; + memcpy(iv, req->iv, 16); ret = tegra_ccm_check_iv(iv); @@ -1145,30 +1150,26 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_se *se = ctx->se; int ret; + ret = tegra_ccm_crypt_init(req, se, rctx); + if (ret) + return ret; + /* Allocate buffers required */ - rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, &rctx->inbuf.addr, GFP_KERNEL); if (!rctx->inbuf.buf) return -ENOMEM; - rctx->inbuf.size = SE_AES_BUFLEN; - - rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, &rctx->outbuf.addr, GFP_KERNEL); if (!rctx->outbuf.buf) { ret = -ENOMEM; goto outbuf_err; } - rctx->outbuf.size = SE_AES_BUFLEN; - - ret = tegra_ccm_crypt_init(req, se, rctx); - if (ret) - goto out; - if (rctx->encrypt) { - rctx->cryptlen = req->cryptlen; - /* CBC MAC Operation */ ret = tegra_ccm_compute_auth(ctx, rctx); if (ret) @@ -1179,8 +1180,6 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) if (ret) goto out; } else { - rctx->cryptlen = req->cryptlen - ctx->authsize; - /* CTR operation */ ret = tegra_ccm_do_ctr(ctx, rctx); if (ret) @@ -1193,11 +1192,11 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) } out: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->outbuf.buf, rctx->outbuf.addr); outbuf_err: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); crypto_finalize_aead_request(ctx->se->engine, req, ret); @@ -1213,23 +1212,6 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_aead_reqctx *rctx = aead_request_ctx(req); int ret; - /* Allocate buffers required */ - rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->inbuf.addr, GFP_KERNEL); - if (!rctx->inbuf.buf) - return -ENOMEM; - - rctx->inbuf.size = SE_AES_BUFLEN; - - rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->outbuf.addr, GFP_KERNEL); - if (!rctx->outbuf.buf) { - ret = -ENOMEM; - goto outbuf_err; - } - - rctx->outbuf.size = SE_AES_BUFLEN; - rctx->src_sg = req->src; rctx->dst_sg = req->dst; rctx->assoclen = req->assoclen; @@ -1243,6 +1225,21 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); rctx->iv[3] = (1 << 24); + /* Allocate buffers required */ + rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, + &rctx->inbuf.addr, GFP_KERNEL); + if (!rctx->inbuf.buf) + return -ENOMEM; + + rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, + &rctx->outbuf.addr, GFP_KERNEL); + if (!rctx->outbuf.buf) { + ret = -ENOMEM; + goto outbuf_err; + } + /* If there is associated data perform GMAC operation */ if (rctx->assoclen) { ret = tegra_gcm_do_gmac(ctx, rctx); @@ -1266,11 +1263,11 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) ret = tegra_gcm_do_verify(ctx->se, rctx); out: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->outbuf.buf, rctx->outbuf.addr); outbuf_err: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); /* Finalize the request if there are no errors */ @@ -1497,6 +1494,11 @@ static int tegra_cmac_do_update(struct ahash_request *req) return 0; } + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + /* Copy the previous residue first */ if (rctx->residue.size) memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); @@ -1529,6 +1531,9 @@ static int tegra_cmac_do_update(struct ahash_request *req) if (!(rctx->task & SHA_FINAL)) tegra_cmac_copy_result(ctx->se, rctx); + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); + return ret; } @@ -1543,10 +1548,20 @@ static int tegra_cmac_do_final(struct ahash_request *req) if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) { return crypto_shash_tfm_digest(ctx->fallback_tfm, - rctx->datbuf.buf, 0, req->result); + NULL, 0, req->result); + } + + if (rctx->residue.size) { + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_free; + } + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); } - memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); rctx->datbuf.size = rctx->residue.size; rctx->total_len += rctx->residue.size; rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); @@ -1565,8 +1580,10 @@ static int tegra_cmac_do_final(struct ahash_request *req) writel(0, se->base + se->hw->regs->result + (i * 4)); out: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, - rctx->datbuf.buf, rctx->datbuf.addr); + if (rctx->residue.size) + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); +out_free: dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2, rctx->residue.buf, rctx->residue.addr); return ret; @@ -1672,28 +1689,15 @@ static int tegra_cmac_init(struct ahash_request *req) rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, &rctx->residue.addr, GFP_KERNEL); if (!rctx->residue.buf) - goto resbuf_fail; + return -ENOMEM; rctx->residue.size = 0; - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - goto datbuf_fail; - - rctx->datbuf.size = 0; - /* Clear any previous result */ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) writel(0, se->base + se->hw->regs->result + (i * 4)); return 0; - -datbuf_fail: - dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, - rctx->residue.addr); -resbuf_fail: - return -ENOMEM; } static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index c7b2a062a03c..b4a179a8febd 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -332,6 +332,11 @@ static int tegra_sha_do_update(struct ahash_request *req) return 0; } + rctx->datbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + /* Copy the previous residue first */ if (rctx->residue.size) memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); @@ -368,6 +373,9 @@ static int tegra_sha_do_update(struct ahash_request *req) if (!(rctx->task & SHA_FINAL)) tegra_sha_copy_hash_result(se, rctx); + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); + return ret; } @@ -380,7 +388,17 @@ static int tegra_sha_do_final(struct ahash_request *req) u32 *cpuvaddr = se->cmdbuf->addr; int size, ret = 0; - memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + if (rctx->residue.size) { + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_free; + } + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + } + rctx->datbuf.size = rctx->residue.size; rctx->total_len += rctx->residue.size; @@ -397,8 +415,10 @@ static int tegra_sha_do_final(struct ahash_request *req) memcpy(req->result, rctx->digest.buf, rctx->digest.size); out: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, - rctx->datbuf.buf, rctx->datbuf.addr); + if (rctx->residue.size) + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); +out_free: dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm), rctx->residue.buf, rctx->residue.addr); dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, @@ -527,19 +547,11 @@ static int tegra_sha_init(struct ahash_request *req) if (!rctx->residue.buf) goto resbuf_fail; - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - goto datbuf_fail; - return 0; -datbuf_fail: - dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, - rctx->residue.addr); resbuf_fail: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf, - rctx->datbuf.addr); + dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, + rctx->digest.addr); digbuf_fail: return -ENOMEM; } diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h index b54aefe717a1..e196a90eedb9 100644 --- a/drivers/crypto/tegra/tegra-se.h +++ b/drivers/crypto/tegra/tegra-se.h @@ -340,8 +340,6 @@ #define SE_CRYPTO_CTR_REG_COUNT 4 #define SE_MAX_KEYSLOT 15 #define SE_MAX_MEM_ALLOC SZ_4M -#define SE_AES_BUFLEN 0x8000 -#define SE_SHA_BUFLEN 0x2000 #define SHA_FIRST BIT(0) #define SHA_UPDATE BIT(1) -- cgit v1.2.3 From 1e245948ca0c252f561792fabb45de5518301d97 Mon Sep 17 00:00:00 2001 From: Akhil R Date: Mon, 24 Feb 2025 14:46:03 +0530 Subject: crypto: tegra - finalize crypto req on error Call the crypto finalize function before exiting *do_one_req() functions. This allows the driver to take up further requests even if the previous one fails. Fixes: 0880bb3b00c8 ("crypto: tegra - Add Tegra Security Engine driver") Signed-off-by: Akhil R Signed-off-by: Herbert Xu --- drivers/crypto/tegra/tegra-se-aes.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index c2b8891a905d..c7bb6f951a84 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -275,8 +275,10 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) rctx->datbuf.size = rctx->len; rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - return -ENOMEM; + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_finalize; + } scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0); @@ -292,6 +294,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) dma_free_coherent(ctx->se->dev, rctx->datbuf.size, rctx->datbuf.buf, rctx->datbuf.addr); +out_finalize: crypto_finalize_skcipher_request(se->engine, req, ret); return 0; @@ -1152,21 +1155,21 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) ret = tegra_ccm_crypt_init(req, se, rctx); if (ret) - return ret; + goto out_finalize; /* Allocate buffers required */ rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, &rctx->inbuf.addr, GFP_KERNEL); if (!rctx->inbuf.buf) - return -ENOMEM; + goto out_finalize; rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, &rctx->outbuf.addr, GFP_KERNEL); if (!rctx->outbuf.buf) { ret = -ENOMEM; - goto outbuf_err; + goto out_free_inbuf; } if (rctx->encrypt) { @@ -1195,10 +1198,11 @@ out: dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->outbuf.buf, rctx->outbuf.addr); -outbuf_err: +out_free_inbuf: dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); +out_finalize: crypto_finalize_aead_request(ctx->se->engine, req, ret); return 0; @@ -1229,15 +1233,17 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, &rctx->inbuf.addr, GFP_KERNEL); - if (!rctx->inbuf.buf) - return -ENOMEM; + if (!rctx->inbuf.buf) { + ret = -ENOMEM; + goto out_finalize; + } rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, &rctx->outbuf.addr, GFP_KERNEL); if (!rctx->outbuf.buf) { ret = -ENOMEM; - goto outbuf_err; + goto out_free_inbuf; } /* If there is associated data perform GMAC operation */ @@ -1266,11 +1272,11 @@ out: dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->outbuf.buf, rctx->outbuf.addr); -outbuf_err: +out_free_inbuf: dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); - /* Finalize the request if there are no errors */ +out_finalize: crypto_finalize_aead_request(ctx->se->engine, req, ret); return 0; -- cgit v1.2.3 From dcf8b7e49b86738296c77fb58c123dd2d74a22a7 Mon Sep 17 00:00:00 2001 From: Akhil R Date: Mon, 24 Feb 2025 14:46:04 +0530 Subject: crypto: tegra - check return value for hash do_one_req Initialize and check the return value in hash *do_one_req() functions and exit the function if there is an error. This fixes the 'uninitialized variable' warnings reported by testbots. Reported-by: kernel test robot Reported-by: Dan Carpenter Closes: https://lore.kernel.org/r/202412071747.flPux4oB-lkp@intel.com/ Fixes: 0880bb3b00c8 ("crypto: tegra - Add Tegra Security Engine driver") Signed-off-by: Akhil R Signed-off-by: Herbert Xu --- drivers/crypto/tegra/tegra-se-aes.c | 10 ++++++++-- drivers/crypto/tegra/tegra-se-hash.c | 7 +++++++ 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index c7bb6f951a84..5ca7996ecc6d 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -1602,18 +1602,24 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_se *se = ctx->se; - int ret; + int ret = 0; if (rctx->task & SHA_UPDATE) { ret = tegra_cmac_do_update(req); + if (ret) + goto out; + rctx->task &= ~SHA_UPDATE; } if (rctx->task & SHA_FINAL) { ret = tegra_cmac_do_final(req); + if (ret) + goto out; + rctx->task &= ~SHA_FINAL; } - +out: crypto_finalize_hash_request(se->engine, req, ret); return 0; diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index b4a179a8febd..0ae5ce67bdd0 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -437,14 +437,21 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq) if (rctx->task & SHA_UPDATE) { ret = tegra_sha_do_update(req); + if (ret) + goto out; + rctx->task &= ~SHA_UPDATE; } if (rctx->task & SHA_FINAL) { ret = tegra_sha_do_final(req); + if (ret) + goto out; + rctx->task &= ~SHA_FINAL; } +out: crypto_finalize_hash_request(se->engine, req, ret); return 0; -- cgit v1.2.3 From 97ee15ea101629d2ffe21d3c5dc03b8d8be43603 Mon Sep 17 00:00:00 2001 From: Akhil R Date: Mon, 24 Feb 2025 14:46:05 +0530 Subject: crypto: tegra - Transfer HASH init function to crypto engine Ahash init() function was called asynchronous to the crypto engine queue. This could corrupt the request context if there is any ongoing operation for the same request. Queue the init function as well to the crypto engine queue so that this scenario can be avoided. Fixes: 0880bb3b00c8 ("crypto: tegra - Add Tegra Security Engine driver") Signed-off-by: Akhil R Signed-off-by: Herbert Xu --- drivers/crypto/tegra/tegra-se-aes.c | 81 ++++++++++++++++------------ drivers/crypto/tegra/tegra-se-hash.c | 101 ++++++++++++++++++++--------------- drivers/crypto/tegra/tegra-se.h | 5 +- 3 files changed, 109 insertions(+), 78 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index 5ca7996ecc6d..13d76de2bc81 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -1459,6 +1459,34 @@ static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqct se->base + se->hw->regs->result + (i * 4)); } +static int tegra_cmac_do_init(struct ahash_request *req) +{ + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + int i; + + rctx->total_len = 0; + rctx->datbuf.size = 0; + rctx->residue.size = 0; + rctx->task |= SHA_FIRST; + rctx->blk_size = crypto_ahash_blocksize(tfm); + + rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, + &rctx->residue.addr, GFP_KERNEL); + if (!rctx->residue.buf) + return -ENOMEM; + + rctx->residue.size = 0; + + /* Clear any previous result */ + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + writel(0, se->base + se->hw->regs->result + (i * 4)); + + return 0; +} + static int tegra_cmac_do_update(struct ahash_request *req) { struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); @@ -1604,6 +1632,14 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_se *se = ctx->se; int ret = 0; + if (rctx->task & SHA_INIT) { + ret = tegra_cmac_do_init(req); + if (ret) + goto out; + + rctx->task &= ~SHA_INIT; + } + if (rctx->task & SHA_UPDATE) { ret = tegra_cmac_do_update(req); if (ret) @@ -1684,34 +1720,6 @@ static void tegra_cmac_cra_exit(struct crypto_tfm *tfm) tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); } -static int tegra_cmac_init(struct ahash_request *req) -{ - struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); - struct tegra_se *se = ctx->se; - int i; - - rctx->total_len = 0; - rctx->datbuf.size = 0; - rctx->residue.size = 0; - rctx->task = SHA_FIRST; - rctx->blk_size = crypto_ahash_blocksize(tfm); - - rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, - &rctx->residue.addr, GFP_KERNEL); - if (!rctx->residue.buf) - return -ENOMEM; - - rctx->residue.size = 0; - - /* Clear any previous result */ - for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) - writel(0, se->base + se->hw->regs->result + (i * 4)); - - return 0; -} - static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -1728,6 +1736,17 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); } +static int tegra_cmac_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + + rctx->task = SHA_INIT; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); +} + static int tegra_cmac_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -1766,13 +1785,9 @@ static int tegra_cmac_digest(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); - int ret; - ret = tegra_cmac_init(req); - if (ret) - return ret; + rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL; - rctx->task |= SHA_UPDATE | SHA_FINAL; return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); } diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 0ae5ce67bdd0..07e4c7320ec8 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -296,6 +296,44 @@ static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_re se->base + se->hw->regs->result + (i * 4)); } +static int tegra_sha_do_init(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + + if (ctx->fallback) + return tegra_sha_fallback_init(req); + + rctx->total_len = 0; + rctx->datbuf.size = 0; + rctx->residue.size = 0; + rctx->key_id = ctx->key_id; + rctx->task |= SHA_FIRST; + rctx->alg = ctx->alg; + rctx->blk_size = crypto_ahash_blocksize(tfm); + rctx->digest.size = crypto_ahash_digestsize(tfm); + + rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size, + &rctx->digest.addr, GFP_KERNEL); + if (!rctx->digest.buf) + goto digbuf_fail; + + rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size, + &rctx->residue.addr, GFP_KERNEL); + if (!rctx->residue.buf) + goto resbuf_fail; + + return 0; + +resbuf_fail: + dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, + rctx->digest.addr); +digbuf_fail: + return -ENOMEM; +} + static int tegra_sha_do_update(struct ahash_request *req) { struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); @@ -435,6 +473,14 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_se *se = ctx->se; int ret = 0; + if (rctx->task & SHA_INIT) { + ret = tegra_sha_do_init(req); + if (ret) + goto out; + + rctx->task &= ~SHA_INIT; + } + if (rctx->task & SHA_UPDATE) { ret = tegra_sha_do_update(req); if (ret) @@ -525,44 +571,6 @@ static void tegra_sha_cra_exit(struct crypto_tfm *tfm) tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); } -static int tegra_sha_init(struct ahash_request *req) -{ - struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); - struct tegra_se *se = ctx->se; - - if (ctx->fallback) - return tegra_sha_fallback_init(req); - - rctx->total_len = 0; - rctx->datbuf.size = 0; - rctx->residue.size = 0; - rctx->key_id = ctx->key_id; - rctx->task = SHA_FIRST; - rctx->alg = ctx->alg; - rctx->blk_size = crypto_ahash_blocksize(tfm); - rctx->digest.size = crypto_ahash_digestsize(tfm); - - rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size, - &rctx->digest.addr, GFP_KERNEL); - if (!rctx->digest.buf) - goto digbuf_fail; - - rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size, - &rctx->residue.addr, GFP_KERNEL); - if (!rctx->residue.buf) - goto resbuf_fail; - - return 0; - -resbuf_fail: - dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, - rctx->digest.addr); -digbuf_fail: - return -ENOMEM; -} - static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key, unsigned int keylen) { @@ -588,6 +596,17 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); } +static int tegra_sha_init(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + rctx->task = SHA_INIT; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); +} + static int tegra_sha_update(struct ahash_request *req) { struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); @@ -635,16 +654,12 @@ static int tegra_sha_digest(struct ahash_request *req) struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); - int ret; if (ctx->fallback) return tegra_sha_fallback_digest(req); - ret = tegra_sha_init(req); - if (ret) - return ret; + rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL; - rctx->task |= SHA_UPDATE | SHA_FINAL; return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); } diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h index e196a90eedb9..e1ec37bfb80a 100644 --- a/drivers/crypto/tegra/tegra-se.h +++ b/drivers/crypto/tegra/tegra-se.h @@ -342,8 +342,9 @@ #define SE_MAX_MEM_ALLOC SZ_4M #define SHA_FIRST BIT(0) -#define SHA_UPDATE BIT(1) -#define SHA_FINAL BIT(2) +#define SHA_INIT BIT(1) +#define SHA_UPDATE BIT(2) +#define SHA_FINAL BIT(3) /* Security Engine operation modes */ enum se_aes_alg { -- cgit v1.2.3 From ff4b7df0b511b6121f3386607f02c16fb5d41192 Mon Sep 17 00:00:00 2001 From: Akhil R Date: Mon, 24 Feb 2025 14:46:06 +0530 Subject: crypto: tegra - Fix HASH intermediate result handling The intermediate hash values generated during an update task were handled incorrectly in the driver. The values have a defined format for each algorithm. Copying and pasting from the HASH_RESULT register balantly would not work for all the supported algorithms. This incorrect handling causes failures when there is a context switch between multiple operations. To handle the expected format correctly, add a separate buffer for storing the intermediate results for each request. Remove the previous copy/paste functions which read/wrote to the registers directly. Instead configure the hardware to get the intermediate result copied to the buffer and use host1x path to restore the intermediate hash results. Fixes: 0880bb3b00c8 ("crypto: tegra - Add Tegra Security Engine driver") Signed-off-by: Akhil R Signed-off-by: Herbert Xu --- drivers/crypto/tegra/tegra-se-hash.c | 149 +++++++++++++++++++++++------------ drivers/crypto/tegra/tegra-se.h | 1 + 2 files changed, 98 insertions(+), 52 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 07e4c7320ec8..8bed13552ab9 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -34,6 +34,7 @@ struct tegra_sha_reqctx { struct tegra_se_datbuf datbuf; struct tegra_se_datbuf residue; struct tegra_se_datbuf digest; + struct tegra_se_datbuf intr_res; unsigned int alg; unsigned int config; unsigned int total_len; @@ -211,9 +212,62 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out) return crypto_ahash_export(&rctx->fallback_req, out); } -static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, +static int tegra_se_insert_hash_result(struct tegra_sha_ctx *ctx, u32 *cpuvaddr, + struct tegra_sha_reqctx *rctx) +{ + __be32 *res_be = (__be32 *)rctx->intr_res.buf; + u32 *res = (u32 *)rctx->intr_res.buf; + int i = 0, j; + + cpuvaddr[i++] = 0; + cpuvaddr[i++] = host1x_opcode_setpayload(HASH_RESULT_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_HASH_RESULT); + + for (j = 0; j < HASH_RESULT_REG_COUNT; j++) { + int idx = j; + + /* + * The initial, intermediate and final hash value of SHA-384, SHA-512 + * in SHA_HASH_RESULT registers follow the below layout of bytes. + * + * +---------------+------------+ + * | HASH_RESULT_0 | B4...B7 | + * +---------------+------------+ + * | HASH_RESULT_1 | B0...B3 | + * +---------------+------------+ + * | HASH_RESULT_2 | B12...B15 | + * +---------------+------------+ + * | HASH_RESULT_3 | B8...B11 | + * +---------------+------------+ + * | ...... | + * +---------------+------------+ + * | HASH_RESULT_14| B60...B63 | + * +---------------+------------+ + * | HASH_RESULT_15| B56...B59 | + * +---------------+------------+ + * + */ + if (ctx->alg == SE_ALG_SHA384 || ctx->alg == SE_ALG_SHA512) + idx = (j % 2) ? j - 1 : j + 1; + + /* For SHA-1, SHA-224, SHA-256, SHA-384, SHA-512 the initial + * intermediate and final hash value when stored in + * SHA_HASH_RESULT registers, the byte order is NOT in + * little-endian. + */ + if (ctx->alg <= SE_ALG_SHA512) + cpuvaddr[i++] = be32_to_cpu(res_be[idx]); + else + cpuvaddr[i++] = res[idx]; + } + + return i; +} + +static int tegra_sha_prep_cmd(struct tegra_sha_ctx *ctx, u32 *cpuvaddr, struct tegra_sha_reqctx *rctx) { + struct tegra_se *se = ctx->se; u64 msg_len, msg_left; int i = 0; @@ -241,7 +295,7 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = upper_32_bits(msg_left); cpuvaddr[i++] = 0; cpuvaddr[i++] = 0; - cpuvaddr[i++] = host1x_opcode_setpayload(6); + cpuvaddr[i++] = host1x_opcode_setpayload(2); cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_CFG); cpuvaddr[i++] = rctx->config; @@ -249,15 +303,29 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = SE_SHA_TASK_HASH_INIT; rctx->task &= ~SHA_FIRST; } else { - cpuvaddr[i++] = 0; + /* + * If it isn't the first task, program the HASH_RESULT register + * with the intermediate result from the previous task + */ + i += tegra_se_insert_hash_result(ctx, cpuvaddr + i, rctx); } + cpuvaddr[i++] = host1x_opcode_setpayload(4); + cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_IN_ADDR); cpuvaddr[i++] = rctx->datbuf.addr; cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) | SE_ADDR_HI_SZ(rctx->datbuf.size)); - cpuvaddr[i++] = rctx->digest.addr; - cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) | - SE_ADDR_HI_SZ(rctx->digest.size)); + + if (rctx->task & SHA_UPDATE) { + cpuvaddr[i++] = rctx->intr_res.addr; + cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->intr_res.addr)) | + SE_ADDR_HI_SZ(rctx->intr_res.size)); + } else { + cpuvaddr[i++] = rctx->digest.addr; + cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) | + SE_ADDR_HI_SZ(rctx->digest.size)); + } + if (rctx->key_id) { cpuvaddr[i++] = host1x_opcode_setpayload(1); cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG); @@ -266,36 +334,18 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = host1x_opcode_setpayload(1); cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_OPERATION); - cpuvaddr[i++] = SE_SHA_OP_WRSTALL | - SE_SHA_OP_START | + cpuvaddr[i++] = SE_SHA_OP_WRSTALL | SE_SHA_OP_START | SE_SHA_OP_LASTBUF; cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); - dev_dbg(se->dev, "msg len %llu msg left %llu cfg %#x", - msg_len, msg_left, rctx->config); + dev_dbg(se->dev, "msg len %llu msg left %llu sz %lu cfg %#x", + msg_len, msg_left, rctx->datbuf.size, rctx->config); return i; } -static void tegra_sha_copy_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx) -{ - int i; - - for (i = 0; i < HASH_RESULT_REG_COUNT; i++) - rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4)); -} - -static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx) -{ - int i; - - for (i = 0; i < HASH_RESULT_REG_COUNT; i++) - writel(rctx->result[i], - se->base + se->hw->regs->result + (i * 4)); -} - static int tegra_sha_do_init(struct ahash_request *req) { struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); @@ -325,8 +375,17 @@ static int tegra_sha_do_init(struct ahash_request *req) if (!rctx->residue.buf) goto resbuf_fail; + rctx->intr_res.size = HASH_RESULT_REG_COUNT * 4; + rctx->intr_res.buf = dma_alloc_coherent(se->dev, rctx->intr_res.size, + &rctx->intr_res.addr, GFP_KERNEL); + if (!rctx->intr_res.buf) + goto intr_res_fail; + return 0; +intr_res_fail: + dma_free_coherent(se->dev, rctx->residue.size, rctx->residue.buf, + rctx->residue.addr); resbuf_fail: dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, rctx->digest.addr); @@ -356,7 +415,6 @@ static int tegra_sha_do_update(struct ahash_request *req) rctx->src_sg = req->src; rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue; - rctx->total_len += rctx->datbuf.size; /* * If nbytes are less than a block size, copy it residue and @@ -365,12 +423,12 @@ static int tegra_sha_do_update(struct ahash_request *req) if (nblks < 1) { scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size, rctx->src_sg, 0, req->nbytes, 0); - rctx->residue.size += req->nbytes; + return 0; } - rctx->datbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->datbuf.size, + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, &rctx->datbuf.addr, GFP_KERNEL); if (!rctx->datbuf.buf) return -ENOMEM; @@ -387,31 +445,15 @@ static int tegra_sha_do_update(struct ahash_request *req) /* Update residue value with the residue after current block */ rctx->residue.size = nresidue; + rctx->total_len += rctx->datbuf.size; rctx->config = tegra_sha_get_config(rctx->alg) | - SE_SHA_DST_HASH_REG; - - /* - * If this is not the first 'update' call, paste the previous copied - * intermediate results to the registers so that it gets picked up. - * This is to support the import/export functionality. - */ - if (!(rctx->task & SHA_FIRST)) - tegra_sha_paste_hash_result(se, rctx); - - size = tegra_sha_prep_cmd(se, cpuvaddr, rctx); + SE_SHA_DST_MEMORY; + size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx); ret = tegra_se_host1x_submit(se, se->cmdbuf, size); - /* - * If this is not the final update, copy the intermediate results - * from the registers so that it can be used in the next 'update' - * call. This is to support the import/export functionality. - */ - if (!(rctx->task & SHA_FINAL)) - tegra_sha_copy_hash_result(se, rctx); - - dma_free_coherent(ctx->se->dev, rctx->datbuf.size, + dma_free_coherent(se->dev, rctx->datbuf.size, rctx->datbuf.buf, rctx->datbuf.addr); return ret; @@ -443,8 +485,7 @@ static int tegra_sha_do_final(struct ahash_request *req) rctx->config = tegra_sha_get_config(rctx->alg) | SE_SHA_DST_MEMORY; - size = tegra_sha_prep_cmd(se, cpuvaddr, rctx); - + size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx); ret = tegra_se_host1x_submit(se, se->cmdbuf, size); if (ret) goto out; @@ -461,6 +502,10 @@ out_free: rctx->residue.buf, rctx->residue.addr); dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, rctx->digest.addr); + + dma_free_coherent(se->dev, rctx->intr_res.size, rctx->intr_res.buf, + rctx->intr_res.addr); + return ret; } diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h index e1ec37bfb80a..0f5bcf27358b 100644 --- a/drivers/crypto/tegra/tegra-se.h +++ b/drivers/crypto/tegra/tegra-se.h @@ -24,6 +24,7 @@ #define SE_STREAM_ID 0x90 #define SE_SHA_CFG 0x4004 +#define SE_SHA_IN_ADDR 0x400c #define SE_SHA_KEY_ADDR 0x4094 #define SE_SHA_KEY_DATA 0x4098 #define SE_SHA_KEYMANIFEST 0x409c -- cgit v1.2.3 From ce390d6c2675d2e24d798169a1a0e3cdbc076907 Mon Sep 17 00:00:00 2001 From: Akhil R Date: Mon, 24 Feb 2025 14:46:07 +0530 Subject: crypto: tegra - Fix CMAC intermediate result handling Saving and restoring of the intermediate results are needed if there is context switch caused by another ongoing request on the same engine. This is therefore not only to support import/export functionality. Hence, save and restore the intermediate result for every non-first task. Fixes: 0880bb3b00c8 ("crypto: tegra - Add Tegra Security Engine driver") Signed-off-by: Akhil R Signed-off-by: Herbert Xu --- drivers/crypto/tegra/tegra-se-aes.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index 13d76de2bc81..c3afa8344e4a 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -1547,9 +1547,8 @@ static int tegra_cmac_do_update(struct ahash_request *req) rctx->residue.size = nresidue; /* - * If this is not the first 'update' call, paste the previous copied + * If this is not the first task, paste the previous copied * intermediate results to the registers so that it gets picked up. - * This is to support the import/export functionality. */ if (!(rctx->task & SHA_FIRST)) tegra_cmac_paste_result(ctx->se, rctx); @@ -1557,13 +1556,7 @@ static int tegra_cmac_do_update(struct ahash_request *req) cmdlen = tegra_cmac_prep_cmd(ctx, rctx); ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); - /* - * If this is not the final update, copy the intermediate results - * from the registers so that it can be used in the next 'update' - * call. This is to support the import/export functionality. - */ - if (!(rctx->task & SHA_FINAL)) - tegra_cmac_copy_result(ctx->se, rctx); + tegra_cmac_copy_result(ctx->se, rctx); dma_free_coherent(ctx->se->dev, rctx->datbuf.size, rctx->datbuf.buf, rctx->datbuf.addr); @@ -1600,6 +1593,13 @@ static int tegra_cmac_do_final(struct ahash_request *req) rctx->total_len += rctx->residue.size; rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); + /* + * If this is not the first task, paste the previous copied + * intermediate results to the registers so that it gets picked up. + */ + if (!(rctx->task & SHA_FIRST)) + tegra_cmac_paste_result(ctx->se, rctx); + /* Prepare command and submit */ cmdlen = tegra_cmac_prep_cmd(ctx, rctx); ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); -- cgit v1.2.3 From bde558220866e74f19450e16d9a2472b488dfedf Mon Sep 17 00:00:00 2001 From: Akhil R Date: Mon, 24 Feb 2025 14:46:08 +0530 Subject: crypto: tegra - Set IV to NULL explicitly for AES ECB It may happen that the variable req->iv may have stale values or zero sized buffer by default and may end up getting used during encryption/decryption. This inturn may corrupt the results or break the operation. Set the req->iv variable to NULL explicitly for algorithms like AES-ECB where IV is not used. Fixes: 0880bb3b00c8 ("crypto: tegra - Add Tegra Security Engine driver") Signed-off-by: Akhil R Signed-off-by: Herbert Xu --- drivers/crypto/tegra/tegra-se-aes.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index c3afa8344e4a..a1b469c3a55b 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -446,6 +446,9 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt) if (!req->cryptlen) return 0; + if (ctx->alg == SE_ALG_ECB) + req->iv = NULL; + rctx->encrypt = encrypt; rctx->config = tegra234_aes_cfg(ctx->alg, encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt); -- cgit v1.2.3 From b157e7a228aee9b48c2de05129476b822aa7956d Mon Sep 17 00:00:00 2001 From: Akhil R Date: Mon, 24 Feb 2025 14:46:09 +0530 Subject: crypto: tegra - Reserve keyslots to allocate dynamically The HW supports only storing 15 keys at a time. This limits the number of tfms that can work without failutes. Reserve keyslots to solve this and use the reserved ones during the encryption/decryption operation. This allow users to have the capability of hardware protected keys and faster operations if there are limited number of tfms while not halting the operation if there are more tfms. Fixes: 0880bb3b00c8 ("crypto: tegra - Add Tegra Security Engine driver") Signed-off-by: Akhil R Signed-off-by: Herbert Xu --- drivers/crypto/tegra/tegra-se-aes.c | 139 ++++++++++++++++++++++++++++++------ drivers/crypto/tegra/tegra-se-key.c | 19 ++++- drivers/crypto/tegra/tegra-se.h | 28 ++++++++ 3 files changed, 164 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index a1b469c3a55b..ca9d0cca1f74 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -28,6 +28,9 @@ struct tegra_aes_ctx { u32 ivsize; u32 key1_id; u32 key2_id; + u32 keylen; + u8 key1[AES_MAX_KEY_SIZE]; + u8 key2[AES_MAX_KEY_SIZE]; }; struct tegra_aes_reqctx { @@ -43,8 +46,9 @@ struct tegra_aead_ctx { struct tegra_se *se; unsigned int authsize; u32 alg; - u32 keylen; u32 key_id; + u32 keylen; + u8 key[AES_MAX_KEY_SIZE]; }; struct tegra_aead_reqctx { @@ -56,8 +60,8 @@ struct tegra_aead_reqctx { unsigned int cryptlen; unsigned int authsize; bool encrypt; - u32 config; u32 crypto_config; + u32 config; u32 key_id; u32 iv[4]; u8 authdata[16]; @@ -67,6 +71,8 @@ struct tegra_cmac_ctx { struct tegra_se *se; unsigned int alg; u32 key_id; + u32 keylen; + u8 key[AES_MAX_KEY_SIZE]; struct crypto_shash *fallback_tfm; }; @@ -260,11 +266,13 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req); struct tegra_se *se = ctx->se; - unsigned int cmdlen; + unsigned int cmdlen, key1_id, key2_id; int ret; rctx->iv = (u32 *)req->iv; rctx->len = req->cryptlen; + key1_id = ctx->key1_id; + key2_id = ctx->key2_id; /* Pad input to AES Block size */ if (ctx->alg != SE_ALG_XTS) { @@ -282,6 +290,29 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0); + rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt); + rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt); + + if (!key1_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1, + ctx->keylen, ctx->alg, &key1_id); + if (ret) + goto out; + } + + rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id); + + if (ctx->alg == SE_ALG_XTS) { + if (!key2_id) { + ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2, + ctx->keylen, ctx->alg, &key2_id); + if (ret) + goto out; + } + + rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id); + } + /* Prepare the command and submit for execution */ cmdlen = tegra_aes_prep_cmd(ctx, rctx); ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); @@ -290,10 +321,17 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) tegra_aes_update_iv(req, ctx); scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1); +out: /* Free the buffer */ dma_free_coherent(ctx->se->dev, rctx->datbuf.size, rctx->datbuf.buf, rctx->datbuf.addr); + if (tegra_key_is_reserved(key1_id)) + tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg); + + if (tegra_key_is_reserved(key2_id)) + tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg); + out_finalize: crypto_finalize_skcipher_request(se->engine, req, ret); @@ -316,6 +354,7 @@ static int tegra_aes_cra_init(struct crypto_skcipher *tfm) ctx->se = se_alg->se_dev; ctx->key1_id = 0; ctx->key2_id = 0; + ctx->keylen = 0; algname = crypto_tfm_alg_name(&tfm->base); ret = se_algname_to_algid(algname); @@ -344,13 +383,20 @@ static int tegra_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, u32 keylen) { struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; if (aes_check_keylen(keylen)) { dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); return -EINVAL; } - return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id); + if (ret) { + ctx->keylen = keylen; + memcpy(ctx->key1, key, keylen); + } + + return 0; } static int tegra_xts_setkey(struct crypto_skcipher *tfm, @@ -368,11 +414,17 @@ static int tegra_xts_setkey(struct crypto_skcipher *tfm, ret = tegra_key_submit(ctx->se, key, len, ctx->alg, &ctx->key1_id); - if (ret) - return ret; + if (ret) { + ctx->keylen = len; + memcpy(ctx->key1, key, len); + } - return tegra_key_submit(ctx->se, key + len, len, + ret = tegra_key_submit(ctx->se, key + len, len, ctx->alg, &ctx->key2_id); + if (ret) { + ctx->keylen = len; + memcpy(ctx->key2, key + len, len); + } return 0; } @@ -450,12 +502,6 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt) req->iv = NULL; rctx->encrypt = encrypt; - rctx->config = tegra234_aes_cfg(ctx->alg, encrypt); - rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt); - rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id); - - if (ctx->key2_id) - rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id); return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req); } @@ -721,7 +767,7 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); cmdlen = tegra_gmac_prep_cmd(ctx, rctx); @@ -738,7 +784,7 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); /* Prepare command and submit */ cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx); @@ -761,7 +807,7 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); /* Prepare command and submit */ cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx); @@ -892,7 +938,7 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); /* Prepare command and submit */ cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx); @@ -1079,7 +1125,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); /* Copy authdata in the top of buffer for encryption/decryption */ if (rctx->encrypt) @@ -1160,6 +1206,8 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) if (ret) goto out_finalize; + rctx->key_id = ctx->key_id; + /* Allocate buffers required */ rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, @@ -1175,6 +1223,13 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) goto out_free_inbuf; } + if (!ctx->key_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key, + ctx->keylen, ctx->alg, &rctx->key_id); + if (ret) + goto out; + } + if (rctx->encrypt) { /* CBC MAC Operation */ ret = tegra_ccm_compute_auth(ctx, rctx); @@ -1205,6 +1260,9 @@ out_free_inbuf: dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); + if (tegra_key_is_reserved(rctx->key_id)) + tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg); + out_finalize: crypto_finalize_aead_request(ctx->se->engine, req, ret); @@ -1232,6 +1290,8 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); rctx->iv[3] = (1 << 24); + rctx->key_id = ctx->key_id; + /* Allocate buffers required */ rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, @@ -1249,6 +1309,13 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) goto out_free_inbuf; } + if (!ctx->key_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key, + ctx->keylen, ctx->alg, &rctx->key_id); + if (ret) + goto out; + } + /* If there is associated data perform GMAC operation */ if (rctx->assoclen) { ret = tegra_gcm_do_gmac(ctx, rctx); @@ -1279,6 +1346,9 @@ out_free_inbuf: dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); + if (tegra_key_is_reserved(rctx->key_id)) + tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg); + out_finalize: crypto_finalize_aead_request(ctx->se->engine, req, ret); @@ -1301,6 +1371,7 @@ static int tegra_aead_cra_init(struct crypto_aead *tfm) ctx->se = se_alg->se_dev; ctx->key_id = 0; + ctx->keylen = 0; ret = se_algname_to_algid(algname); if (ret < 0) { @@ -1382,13 +1453,20 @@ static int tegra_aead_setkey(struct crypto_aead *tfm, const u8 *key, u32 keylen) { struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); + int ret; if (aes_check_keylen(keylen)) { dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); return -EINVAL; } - return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + if (ret) { + ctx->keylen = keylen; + memcpy(ctx->key, key, keylen); + } + + return 0; } static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx, @@ -1473,6 +1551,7 @@ static int tegra_cmac_do_init(struct ahash_request *req) rctx->total_len = 0; rctx->datbuf.size = 0; rctx->residue.size = 0; + rctx->key_id = ctx->key_id; rctx->task |= SHA_FIRST; rctx->blk_size = crypto_ahash_blocksize(tfm); @@ -1517,7 +1596,7 @@ static int tegra_cmac_do_update(struct ahash_request *req) rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue; rctx->total_len += rctx->datbuf.size; rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); - rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id); + rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id); /* * Keep one block and residue bytes in residue and @@ -1643,6 +1722,13 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq) rctx->task &= ~SHA_INIT; } + if (!ctx->key_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key, + ctx->keylen, ctx->alg, &rctx->key_id); + if (ret) + goto out; + } + if (rctx->task & SHA_UPDATE) { ret = tegra_cmac_do_update(req); if (ret) @@ -1659,6 +1745,9 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq) rctx->task &= ~SHA_FINAL; } out: + if (tegra_key_is_reserved(rctx->key_id)) + tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg); + crypto_finalize_hash_request(se->engine, req, ret); return 0; @@ -1699,6 +1788,7 @@ static int tegra_cmac_cra_init(struct crypto_tfm *tfm) ctx->se = se_alg->se_dev; ctx->key_id = 0; + ctx->keylen = 0; ret = se_algname_to_algid(algname); if (ret < 0) { @@ -1727,6 +1817,7 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + int ret; if (aes_check_keylen(keylen)) { dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); @@ -1736,7 +1827,13 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, if (ctx->fallback_tfm) crypto_shash_setkey(ctx->fallback_tfm, key, keylen); - return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + if (ret) { + ctx->keylen = keylen; + memcpy(ctx->key, key, keylen); + } + + return 0; } static int tegra_cmac_init(struct ahash_request *req) diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c index 276b261fb6df..956fa9b4e9b1 100644 --- a/drivers/crypto/tegra/tegra-se-key.c +++ b/drivers/crypto/tegra/tegra-se-key.c @@ -141,6 +141,23 @@ void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) tegra_keyslot_free(keyid); } +void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg) +{ + u8 zkey[AES_MAX_KEY_SIZE] = {0}; + + if (!keyid) + return; + + /* Overwrite the key with 0s */ + tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg); +} + +inline int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid) +{ + return tegra_key_insert(se, key, keylen, *keyid, alg); +} + int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid) { int ret; @@ -149,7 +166,7 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3 if (!tegra_key_in_kslt(*keyid)) { *keyid = tegra_keyslot_alloc(); if (!(*keyid)) { - dev_err(se->dev, "failed to allocate key slot\n"); + dev_dbg(se->dev, "failed to allocate key slot\n"); return -ENOMEM; } } diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h index 0f5bcf27358b..b6cac9384f66 100644 --- a/drivers/crypto/tegra/tegra-se.h +++ b/drivers/crypto/tegra/tegra-se.h @@ -342,6 +342,9 @@ #define SE_MAX_KEYSLOT 15 #define SE_MAX_MEM_ALLOC SZ_4M +#define TEGRA_AES_RESERVED_KSLT 14 +#define TEGRA_XTS_RESERVED_KSLT 15 + #define SHA_FIRST BIT(0) #define SHA_INIT BIT(1) #define SHA_UPDATE BIT(2) @@ -502,9 +505,34 @@ void tegra_deinit_aes(struct tegra_se *se); void tegra_deinit_hash(struct tegra_se *se); int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid); + +int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid); + void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg); +void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg); int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size); +static inline int tegra_key_submit_reserved_aes(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid) +{ + *keyid = TEGRA_AES_RESERVED_KSLT; + return tegra_key_submit_reserved(se, key, keylen, alg, keyid); +} + +static inline int tegra_key_submit_reserved_xts(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid) +{ + *keyid = TEGRA_XTS_RESERVED_KSLT; + return tegra_key_submit_reserved(se, key, keylen, alg, keyid); +} + +static inline bool tegra_key_is_reserved(u32 keyid) +{ + return ((keyid == TEGRA_AES_RESERVED_KSLT) || + (keyid == TEGRA_XTS_RESERVED_KSLT)); +} + /* HOST1x OPCODES */ static inline u32 host1x_opcode_setpayload(unsigned int payload) { -- cgit v1.2.3 From f80a2e2e77bedd0aa645a60f89b4f581c70accda Mon Sep 17 00:00:00 2001 From: Akhil R Date: Mon, 24 Feb 2025 14:46:10 +0530 Subject: crypto: tegra - Use HMAC fallback when keyslots are full The intermediate results for HMAC is stored in the allocated keyslot by the hardware. Dynamic allocation of keyslot during an operation is hence not possible. As the number of keyslots are limited in the hardware, fallback to the HMAC software implementation if keyslots are not available Fixes: 0880bb3b00c8 ("crypto: tegra - Add Tegra Security Engine driver") Signed-off-by: Akhil R Signed-off-by: Herbert Xu --- drivers/crypto/tegra/tegra-se-hash.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 8bed13552ab9..65a50f29bd7e 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -632,13 +632,18 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + int ret; if (aes_check_keylen(keylen)) return tegra_hmac_fallback_setkey(ctx, key, keylen); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + if (ret) + return tegra_hmac_fallback_setkey(ctx, key, keylen); + ctx->fallback = false; - return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + return 0; } static int tegra_sha_init(struct ahash_request *req) -- cgit v1.2.3 From fc4bd01d9ff592f620c499686245c093440db0e8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 27 Feb 2025 18:14:55 +0800 Subject: crypto: iaa - Test the correct request flag Test the correct flags for the MAY_SLEEP bit. Fixes: 2ec6761df889 ("crypto: iaa - Add support for deflate-iaa compression algorithm") Signed-off-by: Herbert Xu --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index c3776b0de51d..990ea46955bb 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1537,7 +1537,7 @@ static int iaa_comp_acompress(struct acomp_req *req) iaa_wq = idxd_wq_get_private(wq); if (!req->dst) { - gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; + gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; /* incompressible data will always be < 2 * slen */ req->dlen = 2 * req->slen; @@ -1619,7 +1619,7 @@ out: static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) { - gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; struct crypto_tfm *tfm = req->base.tfm; dma_addr_t src_addr, dst_addr; -- cgit v1.2.3 From fb14ef46e27757d57ccaf437c79bd6aadce3f9b8 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 3 Mar 2025 20:08:04 +0100 Subject: crypto: virtio - Erase some sensitive memory when it is freed virtcrypto_clear_request() does the same as the code here, but uses kfree_sensitive() for one of the free operation. So, better safe than sorry, use virtcrypto_clear_request() directly to save a few lines of code and cleanly free the memory. Signed-off-by: Christophe JAILLET Tested-by: Lei Yang Acked-by: Jason Wang Signed-off-by: Herbert Xu --- drivers/crypto/virtio/virtio_crypto_core.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index d0278eb568b9..0d522049f595 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -480,10 +480,8 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto) for (i = 0; i < vcrypto->max_data_queues; i++) { vq = vcrypto->data_vq[i].vq; - while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) { - kfree(vc_req->req_data); - kfree(vc_req->sgs); - } + while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) + virtcrypto_clear_request(vc_req); cond_resched(); } } -- cgit v1.2.3 From 64b7871522a4cba99d092e1c849d6f9092868aaa Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Wed, 5 Mar 2025 13:27:05 +0530 Subject: crypto: octeontx2 - suppress auth failure screaming due to negative tests This patch addresses an issue where authentication failures were being erroneously reported due to negative test failures in the "ccm(aes)" selftest. pr_debug suppress unnecessary screaming of these tests. Signed-off-by: Shashank Gupta Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c index 5387c68f3c9d..426244107037 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c @@ -264,9 +264,10 @@ static int cpt_process_ccode(struct otx2_cptlfs_info *lfs, break; } - dev_err(&pdev->dev, - "Request failed with software error code 0x%x\n", - cpt_status->s.uc_compcode); + pr_debug("Request failed with software error code 0x%x: algo = %s driver = %s\n", + cpt_status->s.uc_compcode, + info->req->areq->tfm->__crt_alg->cra_name, + info->req->areq->tfm->__crt_alg->cra_driver_name); otx2_cpt_dump_sg_list(pdev, info->req); break; } -- cgit v1.2.3 From 65775cf313987926e9746b0ca7f5519d297af2da Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 8 Mar 2025 20:45:21 +0800 Subject: crypto: scatterwalk - Change scatterwalk_next calling convention Rather than returning the address and storing the length into an argument pointer, add an address field to the walk struct and use that to store the address. The length is returned directly. Change the done functions to use this stored address instead of getting them from the caller. Split the address into two using a union. The user should only access the const version so that it is never changed. Signed-off-by: Herbert Xu --- arch/arm/crypto/ghash-ce-glue.c | 7 +++---- arch/arm64/crypto/aes-ce-ccm-glue.c | 9 ++++----- arch/arm64/crypto/ghash-ce-glue.c | 7 +++---- arch/arm64/crypto/sm4-ce-ccm-glue.c | 8 ++++---- arch/arm64/crypto/sm4-ce-gcm-glue.c | 8 ++++---- arch/s390/crypto/aes_s390.c | 21 +++++++++------------ arch/x86/crypto/aegis128-aesni-glue.c | 7 +++---- arch/x86/crypto/aesni-intel_glue.c | 9 ++++----- crypto/aegis128-core.c | 7 +++---- crypto/scatterwalk.c | 14 ++++++-------- crypto/skcipher.c | 10 +++++++--- drivers/crypto/nx/nx.c | 7 +++---- include/crypto/algapi.h | 7 +++++++ include/crypto/scatterwalk.h | 35 ++++++++++++++++++----------------- 14 files changed, 78 insertions(+), 78 deletions(-) (limited to 'drivers') diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c index 9613ffed84f9..dab66b520b6e 100644 --- a/arch/arm/crypto/ghash-ce-glue.c +++ b/arch/arm/crypto/ghash-ce-glue.c @@ -460,11 +460,10 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[], u32 len) do { unsigned int n; - const u8 *p; - p = scatterwalk_next(&walk, len, &n); - gcm_update_mac(dg, p, n, buf, &buf_count, ctx); - scatterwalk_done_src(&walk, p, n); + n = scatterwalk_next(&walk, len); + gcm_update_mac(dg, walk.addr, n, buf, &buf_count, ctx); + scatterwalk_done_src(&walk, n); if (unlikely(len / SZ_4K > (len - n) / SZ_4K)) { kernel_neon_end(); diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 1c29546983bf..2d791d51891b 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -157,12 +157,11 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) do { unsigned int n; - const u8 *p; - p = scatterwalk_next(&walk, len, &n); - macp = ce_aes_ccm_auth_data(mac, p, n, macp, ctx->key_enc, - num_rounds(ctx)); - scatterwalk_done_src(&walk, p, n); + n = scatterwalk_next(&walk, len); + macp = ce_aes_ccm_auth_data(mac, walk.addr, n, macp, + ctx->key_enc, num_rounds(ctx)); + scatterwalk_done_src(&walk, n); len -= n; } while (len); } diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 69d4fb78c30d..071e122f9c37 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -309,11 +309,10 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[], u32 len) do { unsigned int n; - const u8 *p; - p = scatterwalk_next(&walk, len, &n); - gcm_update_mac(dg, p, n, buf, &buf_count, ctx); - scatterwalk_done_src(&walk, p, n); + n = scatterwalk_next(&walk, len); + gcm_update_mac(dg, walk.addr, n, buf, &buf_count, ctx); + scatterwalk_done_src(&walk, n); len -= n; } while (len); diff --git a/arch/arm64/crypto/sm4-ce-ccm-glue.c b/arch/arm64/crypto/sm4-ce-ccm-glue.c index 119f86eb7cc9..e9cc1c1364ec 100644 --- a/arch/arm64/crypto/sm4-ce-ccm-glue.c +++ b/arch/arm64/crypto/sm4-ce-ccm-glue.c @@ -113,10 +113,10 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) do { unsigned int n, orig_n; - const u8 *p, *orig_p; + const u8 *p; - orig_p = scatterwalk_next(&walk, assoclen, &orig_n); - p = orig_p; + orig_n = scatterwalk_next(&walk, assoclen); + p = walk.addr; n = orig_n; while (n > 0) { @@ -149,7 +149,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) } } - scatterwalk_done_src(&walk, orig_p, orig_n); + scatterwalk_done_src(&walk, orig_n); assoclen -= orig_n; } while (assoclen); } diff --git a/arch/arm64/crypto/sm4-ce-gcm-glue.c b/arch/arm64/crypto/sm4-ce-gcm-glue.c index 2e27d7752d4f..c2ea3d5f690b 100644 --- a/arch/arm64/crypto/sm4-ce-gcm-glue.c +++ b/arch/arm64/crypto/sm4-ce-gcm-glue.c @@ -83,10 +83,10 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[]) do { unsigned int n, orig_n; - const u8 *p, *orig_p; + const u8 *p; - orig_p = scatterwalk_next(&walk, assoclen, &orig_n); - p = orig_p; + orig_n = scatterwalk_next(&walk, assoclen); + p = walk.addr; n = orig_n; if (n + buflen < GHASH_BLOCK_SIZE) { @@ -118,7 +118,7 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[]) memcpy(&buffer[0], p, buflen); } - scatterwalk_done_src(&walk, orig_p, orig_n); + scatterwalk_done_src(&walk, orig_n); assoclen -= orig_n; } while (assoclen); diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 7fd303df05ab..ed85bd6e298f 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -66,7 +66,6 @@ struct s390_xts_ctx { struct gcm_sg_walk { struct scatter_walk walk; unsigned int walk_bytes; - u8 *walk_ptr; unsigned int walk_bytes_remain; u8 buf[AES_BLOCK_SIZE]; unsigned int buf_bytes; @@ -789,8 +788,7 @@ static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw) { if (gw->walk_bytes_remain == 0) return 0; - gw->walk_ptr = scatterwalk_next(&gw->walk, gw->walk_bytes_remain, - &gw->walk_bytes); + gw->walk_bytes = scatterwalk_next(&gw->walk, gw->walk_bytes_remain); return gw->walk_bytes; } @@ -799,10 +797,9 @@ static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw, { gw->walk_bytes_remain -= nbytes; if (out) - scatterwalk_done_dst(&gw->walk, gw->walk_ptr, nbytes); + scatterwalk_done_dst(&gw->walk, nbytes); else - scatterwalk_done_src(&gw->walk, gw->walk_ptr, nbytes); - gw->walk_ptr = NULL; + scatterwalk_done_src(&gw->walk, nbytes); } static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) @@ -828,14 +825,14 @@ static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) } if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) { - gw->ptr = gw->walk_ptr; + gw->ptr = gw->walk.addr; gw->nbytes = gw->walk_bytes; goto out; } while (1) { n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes); - memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n); + memcpy(gw->buf + gw->buf_bytes, gw->walk.addr, n); gw->buf_bytes += n; _gcm_sg_unmap_and_advance(gw, n, false); if (gw->buf_bytes >= minbytesneeded) { @@ -869,13 +866,13 @@ static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) } if (gw->walk_bytes >= minbytesneeded) { - gw->ptr = gw->walk_ptr; + gw->ptr = gw->walk.addr; gw->nbytes = gw->walk_bytes; goto out; } - scatterwalk_unmap(gw->walk_ptr); - gw->walk_ptr = NULL; + /* XXX */ + scatterwalk_unmap(gw->walk.addr); gw->ptr = gw->buf; gw->nbytes = sizeof(gw->buf); @@ -914,7 +911,7 @@ static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) if (!_gcm_sg_clamp_and_map(gw)) return i; n = min(gw->walk_bytes, bytesdone - i); - memcpy(gw->walk_ptr, gw->buf + i, n); + memcpy(gw->walk.addr, gw->buf + i, n); _gcm_sg_unmap_and_advance(gw, n, true); } } else diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c index 1bd093d073ed..26786e15abac 100644 --- a/arch/x86/crypto/aegis128-aesni-glue.c +++ b/arch/x86/crypto/aegis128-aesni-glue.c @@ -71,10 +71,9 @@ static void crypto_aegis128_aesni_process_ad( scatterwalk_start(&walk, sg_src); while (assoclen != 0) { - unsigned int size; - const u8 *mapped = scatterwalk_next(&walk, assoclen, &size); + unsigned int size = scatterwalk_next(&walk, assoclen); + const u8 *src = walk.addr; unsigned int left = size; - const u8 *src = mapped; if (pos + size >= AEGIS128_BLOCK_SIZE) { if (pos > 0) { @@ -97,7 +96,7 @@ static void crypto_aegis128_aesni_process_ad( pos += left; assoclen -= size; - scatterwalk_done_src(&walk, mapped, size); + scatterwalk_done_src(&walk, size); } if (pos > 0) { diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index c4bd05688b55..e141b7995304 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -1306,12 +1306,11 @@ static void gcm_process_assoc(const struct aes_gcm_key *key, u8 ghash_acc[16], scatterwalk_start(&walk, sg_src); while (assoclen) { - unsigned int orig_len_this_step; - const u8 *orig_src = scatterwalk_next(&walk, assoclen, - &orig_len_this_step); + unsigned int orig_len_this_step = scatterwalk_next( + &walk, assoclen); unsigned int len_this_step = orig_len_this_step; unsigned int len; - const u8 *src = orig_src; + const u8 *src = walk.addr; if (unlikely(pos)) { len = min(len_this_step, 16 - pos); @@ -1335,7 +1334,7 @@ static void gcm_process_assoc(const struct aes_gcm_key *key, u8 ghash_acc[16], pos = len_this_step; } next: - scatterwalk_done_src(&walk, orig_src, orig_len_this_step); + scatterwalk_done_src(&walk, orig_len_this_step); if (need_resched()) { kernel_fpu_end(); kernel_fpu_begin(); diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c index 15d64d836356..72f6ee1345ef 100644 --- a/crypto/aegis128-core.c +++ b/crypto/aegis128-core.c @@ -284,10 +284,9 @@ static void crypto_aegis128_process_ad(struct aegis_state *state, scatterwalk_start(&walk, sg_src); while (assoclen != 0) { - unsigned int size; - const u8 *mapped = scatterwalk_next(&walk, assoclen, &size); + unsigned int size = scatterwalk_next(&walk, assoclen); + const u8 *src = walk.addr; unsigned int left = size; - const u8 *src = mapped; if (pos + size >= AEGIS_BLOCK_SIZE) { if (pos > 0) { @@ -308,7 +307,7 @@ static void crypto_aegis128_process_ad(struct aegis_state *state, pos += left; assoclen -= size; - scatterwalk_done_src(&walk, mapped, size); + scatterwalk_done_src(&walk, size); } if (pos > 0) { diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 87c080f565d4..20a28c6d94da 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -34,12 +34,11 @@ inline void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk, unsigned int nbytes) { do { - const void *src_addr; unsigned int to_copy; - src_addr = scatterwalk_next(walk, nbytes, &to_copy); - memcpy(buf, src_addr, to_copy); - scatterwalk_done_src(walk, src_addr, to_copy); + to_copy = scatterwalk_next(walk, nbytes); + memcpy(buf, walk->addr, to_copy); + scatterwalk_done_src(walk, to_copy); buf += to_copy; nbytes -= to_copy; } while (nbytes); @@ -50,12 +49,11 @@ inline void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf, unsigned int nbytes) { do { - void *dst_addr; unsigned int to_copy; - dst_addr = scatterwalk_next(walk, nbytes, &to_copy); - memcpy(dst_addr, buf, to_copy); - scatterwalk_done_dst(walk, dst_addr, to_copy); + to_copy = scatterwalk_next(walk, nbytes); + memcpy(walk->addr, buf, to_copy); + scatterwalk_done_dst(walk, to_copy); buf += to_copy; nbytes -= to_copy; } while (nbytes); diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 66d19c360dd8..0c6911154241 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -41,12 +41,16 @@ static int skcipher_walk_next(struct skcipher_walk *walk); static inline void skcipher_map_src(struct skcipher_walk *walk) { - walk->src.virt.addr = scatterwalk_map(&walk->in); + /* XXX */ + walk->in.__addr = scatterwalk_map(&walk->in); + walk->src.virt.addr = walk->in.addr; } static inline void skcipher_map_dst(struct skcipher_walk *walk) { - walk->dst.virt.addr = scatterwalk_map(&walk->out); + /* XXX */ + walk->out.__addr = scatterwalk_map(&walk->out); + walk->dst.virt.addr = walk->out.addr; } static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) @@ -120,7 +124,7 @@ int skcipher_walk_done(struct skcipher_walk *walk, int res) goto dst_done; } - scatterwalk_done_dst(&walk->out, walk->dst.virt.addr, n); + scatterwalk_done_dst(&walk->out, n); dst_done: if (res > 0) diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index dd95e5361d88..a3b979193d9b 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -154,17 +154,16 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, struct scatter_walk walk; struct nx_sg *nx_sg = nx_dst; unsigned int n, len = *src_len; - char *dst; /* we need to fast forward through @start bytes first */ scatterwalk_start_at_pos(&walk, sg_src, start); while (len && (nx_sg - nx_dst) < sglen) { - dst = scatterwalk_next(&walk, len, &n); + n = scatterwalk_next(&walk, len); - nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst)); + nx_sg = nx_build_sg_list(nx_sg, walk.addr, &n, sglen - (nx_sg - nx_dst)); - scatterwalk_done_src(&walk, dst, n); + scatterwalk_done_src(&walk, n); len -= n; } /* update to_process */ diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 94989b2e1350..f92e22686a68 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -54,6 +54,7 @@ struct rtattr; struct scatterlist; struct seq_file; struct sk_buff; +union crypto_no_such_thing; struct crypto_instance { struct crypto_alg alg; @@ -108,6 +109,12 @@ struct crypto_queue { struct scatter_walk { struct scatterlist *sg; unsigned int offset; + union { + void *const addr; + + /* Private API field, do not touch. */ + union crypto_no_such_thing *__addr; + }; }; struct crypto_attr_alg { diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 3024adbdd443..8523c7591d95 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -120,18 +120,20 @@ static inline void *scatterwalk_map(struct scatter_walk *walk) * scatterwalk_next() - Get the next data buffer in a scatterlist walk * @walk: the scatter_walk * @total: the total number of bytes remaining, > 0 - * @nbytes_ret: (out) the next number of bytes available, <= @total * - * Return: A virtual address for the next segment of data from the scatterlist. - * The caller must call scatterwalk_done_src() or scatterwalk_done_dst() - * when it is done using this virtual address. + * A virtual address for the next segment of data from the scatterlist will + * be placed into @walk->addr. The caller must call scatterwalk_done_src() + * or scatterwalk_done_dst() when it is done using this virtual address. + * + * Returns: the next number of bytes available, <= @total */ -static inline void *scatterwalk_next(struct scatter_walk *walk, - unsigned int total, - unsigned int *nbytes_ret) +static inline unsigned int scatterwalk_next(struct scatter_walk *walk, + unsigned int total) { - *nbytes_ret = scatterwalk_clamp(walk, total); - return scatterwalk_map(walk); + unsigned int nbytes = scatterwalk_clamp(walk, total); + + walk->__addr = scatterwalk_map(walk); + return nbytes; } static inline void scatterwalk_unmap(const void *vaddr) @@ -149,32 +151,31 @@ static inline void scatterwalk_advance(struct scatter_walk *walk, /** * scatterwalk_done_src() - Finish one step of a walk of source scatterlist * @walk: the scatter_walk - * @vaddr: the address returned by scatterwalk_next() * @nbytes: the number of bytes processed this step, less than or equal to the * number of bytes that scatterwalk_next() returned. * - * Use this if the @vaddr was not written to, i.e. it is source data. + * Use this if the mapped address was not written to, i.e. it is source data. */ static inline void scatterwalk_done_src(struct scatter_walk *walk, - const void *vaddr, unsigned int nbytes) + unsigned int nbytes) { - scatterwalk_unmap(vaddr); + scatterwalk_unmap(walk->addr); scatterwalk_advance(walk, nbytes); } /** * scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist * @walk: the scatter_walk - * @vaddr: the address returned by scatterwalk_next() * @nbytes: the number of bytes processed this step, less than or equal to the * number of bytes that scatterwalk_next() returned. * - * Use this if the @vaddr may have been written to, i.e. it is destination data. + * Use this if the mapped address may have been written to, i.e. it is + * destination data. */ static inline void scatterwalk_done_dst(struct scatter_walk *walk, - void *vaddr, unsigned int nbytes) + unsigned int nbytes) { - scatterwalk_unmap(vaddr); + scatterwalk_unmap(walk->addr); /* * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just * relying on flush_dcache_page() being a no-op when not implemented, -- cgit v1.2.3 From 01894c8488d84138bd79311200ee5d9dd088b9d7 Mon Sep 17 00:00:00 2001 From: Ethan Carter Edwards Date: Sat, 8 Mar 2025 19:30:52 -0500 Subject: crypto: artpec6 - change from kzalloc to kcalloc in artpec6_crypto_probe() We are trying to get rid of all multiplications from allocation functions to prevent potential integer overflows. Here the multiplication is probably safe, but using kcalloc() is more appropriate and improves readability. This patch has no effect on runtime behavior. Link: https://github.com/KSPP/linux/issues/162 [1] Link: https://www.kernel.org/doc/html/next/process/deprecated.html#open-coded-arithmetic-in-allocator-arguments Signed-off-by: Ethan Carter Edwards Acked-by: Jesper Nilsson Signed-off-by: Herbert Xu --- drivers/crypto/axis/artpec6_crypto.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 1c1f57baef0e..500b08e42282 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -2897,13 +2897,13 @@ static int artpec6_crypto_probe(struct platform_device *pdev) tasklet_init(&ac->task, artpec6_crypto_task, (unsigned long)ac); - ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX, + ac->pad_buffer = devm_kcalloc(&pdev->dev, 2, ARTPEC_CACHE_LINE_MAX, GFP_KERNEL); if (!ac->pad_buffer) return -ENOMEM; ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX); - ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX, + ac->zero_buffer = devm_kcalloc(&pdev->dev, 2, ARTPEC_CACHE_LINE_MAX, GFP_KERNEL); if (!ac->zero_buffer) return -ENOMEM; -- cgit v1.2.3 From 0af7304c0696ec5b3589af6973b7f27e014c2903 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 9 Mar 2025 10:43:14 +0800 Subject: crypto: scomp - Remove tfm argument from alloc/free_ctx The tfm argument is completely unused and meaningless as the same stream object is identical over all transforms of a given algorithm. Remove it. Signed-off-by: Herbert Xu --- crypto/842.c | 8 ++++---- crypto/deflate.c | 4 ++-- crypto/lz4.c | 8 ++++---- crypto/lz4hc.c | 8 ++++---- crypto/lzo-rle.c | 8 ++++---- crypto/lzo.c | 8 ++++---- crypto/zstd.c | 4 ++-- drivers/crypto/cavium/zip/zip_crypto.c | 6 +++--- drivers/crypto/cavium/zip/zip_crypto.h | 6 +++--- include/crypto/internal/scompress.h | 8 ++++---- 10 files changed, 34 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/crypto/842.c b/crypto/842.c index e59e54d76960..2238478c3493 100644 --- a/crypto/842.c +++ b/crypto/842.c @@ -28,7 +28,7 @@ struct crypto842_ctx { void *wmem; /* working memory for compress */ }; -static void *crypto842_alloc_ctx(struct crypto_scomp *tfm) +static void *crypto842_alloc_ctx(void) { void *ctx; @@ -43,14 +43,14 @@ static int crypto842_init(struct crypto_tfm *tfm) { struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); - ctx->wmem = crypto842_alloc_ctx(NULL); + ctx->wmem = crypto842_alloc_ctx(); if (IS_ERR(ctx->wmem)) return -ENOMEM; return 0; } -static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void crypto842_free_ctx(void *ctx) { kfree(ctx); } @@ -59,7 +59,7 @@ static void crypto842_exit(struct crypto_tfm *tfm) { struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); - crypto842_free_ctx(NULL, ctx->wmem); + crypto842_free_ctx(ctx->wmem); } static int crypto842_compress(struct crypto_tfm *tfm, diff --git a/crypto/deflate.c b/crypto/deflate.c index 98e8bcb81a6a..1bf7184ad670 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -112,7 +112,7 @@ out: return ret; } -static void *deflate_alloc_ctx(struct crypto_scomp *tfm) +static void *deflate_alloc_ctx(void) { struct deflate_ctx *ctx; int ret; @@ -143,7 +143,7 @@ static void __deflate_exit(void *ctx) deflate_decomp_exit(ctx); } -static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void deflate_free_ctx(void *ctx) { __deflate_exit(ctx); kfree_sensitive(ctx); diff --git a/crypto/lz4.c b/crypto/lz4.c index 0606f8862e78..e66c6d1ba34f 100644 --- a/crypto/lz4.c +++ b/crypto/lz4.c @@ -16,7 +16,7 @@ struct lz4_ctx { void *lz4_comp_mem; }; -static void *lz4_alloc_ctx(struct crypto_scomp *tfm) +static void *lz4_alloc_ctx(void) { void *ctx; @@ -31,14 +31,14 @@ static int lz4_init(struct crypto_tfm *tfm) { struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); - ctx->lz4_comp_mem = lz4_alloc_ctx(NULL); + ctx->lz4_comp_mem = lz4_alloc_ctx(); if (IS_ERR(ctx->lz4_comp_mem)) return -ENOMEM; return 0; } -static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lz4_free_ctx(void *ctx) { vfree(ctx); } @@ -47,7 +47,7 @@ static void lz4_exit(struct crypto_tfm *tfm) { struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); - lz4_free_ctx(NULL, ctx->lz4_comp_mem); + lz4_free_ctx(ctx->lz4_comp_mem); } static int __lz4_compress_crypto(const u8 *src, unsigned int slen, diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c index d7cc94aa2fcf..25a95b65aca5 100644 --- a/crypto/lz4hc.c +++ b/crypto/lz4hc.c @@ -15,7 +15,7 @@ struct lz4hc_ctx { void *lz4hc_comp_mem; }; -static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm) +static void *lz4hc_alloc_ctx(void) { void *ctx; @@ -30,14 +30,14 @@ static int lz4hc_init(struct crypto_tfm *tfm) { struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); - ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL); + ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(); if (IS_ERR(ctx->lz4hc_comp_mem)) return -ENOMEM; return 0; } -static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lz4hc_free_ctx(void *ctx) { vfree(ctx); } @@ -46,7 +46,7 @@ static void lz4hc_exit(struct crypto_tfm *tfm) { struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); - lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem); + lz4hc_free_ctx(ctx->lz4hc_comp_mem); } static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen, diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c index 0abc2d87f042..6c845e7d32f5 100644 --- a/crypto/lzo-rle.c +++ b/crypto/lzo-rle.c @@ -15,7 +15,7 @@ struct lzorle_ctx { void *lzorle_comp_mem; }; -static void *lzorle_alloc_ctx(struct crypto_scomp *tfm) +static void *lzorle_alloc_ctx(void) { void *ctx; @@ -30,14 +30,14 @@ static int lzorle_init(struct crypto_tfm *tfm) { struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm); - ctx->lzorle_comp_mem = lzorle_alloc_ctx(NULL); + ctx->lzorle_comp_mem = lzorle_alloc_ctx(); if (IS_ERR(ctx->lzorle_comp_mem)) return -ENOMEM; return 0; } -static void lzorle_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lzorle_free_ctx(void *ctx) { kvfree(ctx); } @@ -46,7 +46,7 @@ static void lzorle_exit(struct crypto_tfm *tfm) { struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm); - lzorle_free_ctx(NULL, ctx->lzorle_comp_mem); + lzorle_free_ctx(ctx->lzorle_comp_mem); } static int __lzorle_compress(const u8 *src, unsigned int slen, diff --git a/crypto/lzo.c b/crypto/lzo.c index 8338851c7406..035d62e2afe0 100644 --- a/crypto/lzo.c +++ b/crypto/lzo.c @@ -15,7 +15,7 @@ struct lzo_ctx { void *lzo_comp_mem; }; -static void *lzo_alloc_ctx(struct crypto_scomp *tfm) +static void *lzo_alloc_ctx(void) { void *ctx; @@ -30,14 +30,14 @@ static int lzo_init(struct crypto_tfm *tfm) { struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); - ctx->lzo_comp_mem = lzo_alloc_ctx(NULL); + ctx->lzo_comp_mem = lzo_alloc_ctx(); if (IS_ERR(ctx->lzo_comp_mem)) return -ENOMEM; return 0; } -static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lzo_free_ctx(void *ctx) { kvfree(ctx); } @@ -46,7 +46,7 @@ static void lzo_exit(struct crypto_tfm *tfm) { struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); - lzo_free_ctx(NULL, ctx->lzo_comp_mem); + lzo_free_ctx(ctx->lzo_comp_mem); } static int __lzo_compress(const u8 *src, unsigned int slen, diff --git a/crypto/zstd.c b/crypto/zstd.c index 154a969c83a8..68a093427944 100644 --- a/crypto/zstd.c +++ b/crypto/zstd.c @@ -103,7 +103,7 @@ static int __zstd_init(void *ctx) return ret; } -static void *zstd_alloc_ctx(struct crypto_scomp *tfm) +static void *zstd_alloc_ctx(void) { int ret; struct zstd_ctx *ctx; @@ -134,7 +134,7 @@ static void __zstd_exit(void *ctx) zstd_decomp_exit(ctx); } -static void zstd_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void zstd_free_ctx(void *ctx) { __zstd_exit(ctx); kfree_sensitive(ctx); diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c index 1046a746d36f..a9c3efce8f2d 100644 --- a/drivers/crypto/cavium/zip/zip_crypto.c +++ b/drivers/crypto/cavium/zip/zip_crypto.c @@ -236,7 +236,7 @@ int zip_comp_decompress(struct crypto_tfm *tfm, } /* Legacy compress framework end */ /* SCOMP framework start */ -void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm) +void *zip_alloc_scomp_ctx_deflate(void) { int ret; struct zip_kernel_ctx *zip_ctx; @@ -255,7 +255,7 @@ void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm) return zip_ctx; } -void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm) +void *zip_alloc_scomp_ctx_lzs(void) { int ret; struct zip_kernel_ctx *zip_ctx; @@ -274,7 +274,7 @@ void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm) return zip_ctx; } -void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *ctx) +void zip_free_scomp_ctx(void *ctx) { struct zip_kernel_ctx *zip_ctx = ctx; diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h index b59ddfcacd34..dbe20bfeb3e9 100644 --- a/drivers/crypto/cavium/zip/zip_crypto.h +++ b/drivers/crypto/cavium/zip/zip_crypto.h @@ -67,9 +67,9 @@ int zip_comp_decompress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen); -void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm); -void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm); -void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *zip_ctx); +void *zip_alloc_scomp_ctx_deflate(void); +void *zip_alloc_scomp_ctx_lzs(void); +void zip_free_scomp_ctx(void *zip_ctx); int zip_scomp_compress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx); diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index 07a10fd2d321..6ba9974df7d3 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -31,8 +31,8 @@ struct crypto_scomp { * @calg: Cmonn algorithm data structure shared with acomp */ struct scomp_alg { - void *(*alloc_ctx)(struct crypto_scomp *tfm); - void (*free_ctx)(struct crypto_scomp *tfm, void *ctx); + void *(*alloc_ctx)(void); + void (*free_ctx)(void *ctx); int (*compress)(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx); @@ -73,13 +73,13 @@ static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm) static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm) { - return crypto_scomp_alg(tfm)->alloc_ctx(tfm); + return crypto_scomp_alg(tfm)->alloc_ctx(); } static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm, void *ctx) { - return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx); + return crypto_scomp_alg(tfm)->free_ctx(ctx); } static inline int crypto_scomp_compress(struct crypto_scomp *tfm, -- cgit v1.2.3 From ea3d35467ba474768013365ce5f2c6eb454fed3a Mon Sep 17 00:00:00 2001 From: Suman Kumar Chakraborty Date: Mon, 10 Mar 2025 16:15:40 +0000 Subject: crypto: qat - add macro to write 64-bit values to registers Introduce the ADF_CSR_WR_LO_HI macro to simplify writing a 64-bit values to hardware registers. This macro works by splitting the 64-bit value into two 32-bit segments, which are then written separately to the specified lower and upper register offsets. Update the adf_gen4_set_ssm_wdtimer() function to utilize this newly introduced macro. Signed-off-by: Suman Kumar Chakraborty Reviewed-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- .../intel/qat/qat_common/adf_accel_devices.h | 10 ++++++++ .../crypto/intel/qat/qat_common/adf_gen4_hw_data.c | 30 +++++----------------- 2 files changed, 16 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 9c15c31ad134..0509174d6030 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -10,6 +10,7 @@ #include #include #include +#include #include "adf_cfg_common.h" #include "adf_rl.h" #include "adf_telemetry.h" @@ -371,6 +372,15 @@ struct adf_hw_device_data { /* CSR write macro */ #define ADF_CSR_WR(csr_base, csr_offset, val) \ __raw_writel(val, csr_base + csr_offset) +/* + * CSR write macro to handle cases where the high and low + * offsets are sparsely located. + */ +#define ADF_CSR_WR64_LO_HI(csr_base, csr_low_offset, csr_high_offset, val) \ +do { \ + ADF_CSR_WR(csr_base, csr_low_offset, lower_32_bits(val)); \ + ADF_CSR_WR(csr_base, csr_high_offset, upper_32_bits(val)); \ +} while (0) /* CSR read macro */ #define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index ade279e48010..099949a2421c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -135,36 +135,18 @@ int adf_gen4_init_device(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_gen4_init_device); -static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper, - u32 *lower) -{ - *lower = lower_32_bits(value); - *upper = upper_32_bits(value); -} - void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) { void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE; u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE; - u32 ssm_wdt_pke_high = 0; - u32 ssm_wdt_pke_low = 0; - u32 ssm_wdt_high = 0; - u32 ssm_wdt_low = 0; - /* Convert 64bit WDT timer value into 32bit values for - * mmio write to 32bit CSRs. - */ - adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low); - adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high, - &ssm_wdt_pke_low); - - /* Enable WDT for sym and dc */ - ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low); - ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high); - /* Enable WDT for pke */ - ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low); - ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high); + /* Enable watchdog timer for sym and dc */ + ADF_CSR_WR64_LO_HI(pmisc_addr, ADF_SSMWDTL_OFFSET, ADF_SSMWDTH_OFFSET, timer_val); + + /* Enable watchdog timer for pke */ + ADF_CSR_WR64_LO_HI(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET, + timer_val_pke); } EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer); -- cgit v1.2.3 From f3bda3b9b69cb193968ba781446ff18c734f0276 Mon Sep 17 00:00:00 2001 From: Suman Kumar Chakraborty Date: Wed, 12 Mar 2025 11:39:38 +0000 Subject: crypto: qat - introduce fuse array Change the representation of fuses in the accelerator device structure from a single value to an array. This allows the structure to accommodate additional fuses that are required for future generations of QAT hardware. This does not introduce any functional changes. Signed-off-by: Suman Kumar Chakraborty Reviewed-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c | 2 +- drivers/crypto/intel/qat/qat_420xx/adf_drv.c | 2 +- drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 2 +- drivers/crypto/intel/qat/qat_4xxx/adf_drv.c | 2 +- drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c | 4 ++-- drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c | 2 +- drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c | 4 ++-- drivers/crypto/intel/qat/qat_c62x/adf_drv.c | 4 ++-- drivers/crypto/intel/qat/qat_common/adf_accel_devices.h | 12 +++++++++++- drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c | 2 +- drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 6 +++--- drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c | 2 +- 12 files changed, 27 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 87c71914ae87..98f3c1210666 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -98,7 +98,7 @@ static struct adf_hw_device_class adf_420xx_class = { static u32 get_ae_mask(struct adf_hw_device_data *self) { - u32 me_disable = self->fuses; + u32 me_disable = self->fuses[ADF_FUSECTL4]; return ~me_disable & ADF_420XX_ACCELENGINES_MASK; } diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c index 9589d60fb281..8084aa0f7f41 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -79,7 +79,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_420xx(accel_dev->hw_device, ent->device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); - pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data); diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 36eebda3a028..4eb6ef99efdd 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -101,7 +101,7 @@ static struct adf_hw_device_class adf_4xxx_class = { static u32 get_ae_mask(struct adf_hw_device_data *self) { - u32 me_disable = self->fuses; + u32 me_disable = self->fuses[ADF_FUSECTL4]; return ~me_disable & ADF_4XXX_ACCELENGINES_MASK; } diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index d7de1cad1335..5537a9991e4e 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -81,7 +81,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); - pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data); diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index 201f9412c582..e78f7bfd30b8 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -27,8 +27,8 @@ static struct adf_hw_device_class c3xxx_class = { static u32 get_accel_mask(struct adf_hw_device_data *self) { + u32 fuses = self->fuses[ADF_FUSECTL0]; u32 straps = self->straps; - u32 fuses = self->fuses; u32 accel; accel = ~(fuses | straps) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET; @@ -39,8 +39,8 @@ static u32 get_accel_mask(struct adf_hw_device_data *self) static u32 get_ae_mask(struct adf_hw_device_data *self) { + u32 fuses = self->fuses[ADF_FUSECTL0]; u32 straps = self->straps; - u32 fuses = self->fuses; unsigned long disabled; u32 ae_disable; int accel; diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c index caa53882fda6..b825b35ab4bf 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c @@ -126,7 +126,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_c3xxx(accel_dev->hw_device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, - &hw_data->fuses); + &hw_data->fuses[ADF_FUSECTL0]); pci_read_config_dword(pdev, ADF_C3XXX_SOFTSTRAP_CSR_OFFSET, &hw_data->straps); diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index 6b5b0cf9c7c7..32ebe09477a8 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -27,8 +27,8 @@ static struct adf_hw_device_class c62x_class = { static u32 get_accel_mask(struct adf_hw_device_data *self) { + u32 fuses = self->fuses[ADF_FUSECTL0]; u32 straps = self->straps; - u32 fuses = self->fuses; u32 accel; accel = ~(fuses | straps) >> ADF_C62X_ACCELERATORS_REG_OFFSET; @@ -39,8 +39,8 @@ static u32 get_accel_mask(struct adf_hw_device_data *self) static u32 get_ae_mask(struct adf_hw_device_data *self) { + u32 fuses = self->fuses[ADF_FUSECTL0]; u32 straps = self->straps; - u32 fuses = self->fuses; unsigned long disabled; u32 ae_disable; int accel; diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c index b7398fee19ed..8a7bdec358d6 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c @@ -126,7 +126,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_c62x(accel_dev->hw_device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, - &hw_data->fuses); + &hw_data->fuses[ADF_FUSECTL0]); pci_read_config_dword(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET, &hw_data->straps); @@ -169,7 +169,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); /* Find and map all the device's BARS */ - i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; + i = (hw_data->fuses[ADF_FUSECTL0] & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 0509174d6030..dc21551153cb 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -53,6 +53,16 @@ enum adf_accel_capabilities { ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128 }; +enum adf_fuses { + ADF_FUSECTL0, + ADF_FUSECTL1, + ADF_FUSECTL2, + ADF_FUSECTL3, + ADF_FUSECTL4, + ADF_FUSECTL5, + ADF_MAX_FUSES +}; + struct adf_bar { resource_size_t base_addr; void __iomem *virt_addr; @@ -345,7 +355,7 @@ struct adf_hw_device_data { struct qat_migdev_ops vfmig_ops; const char *fw_name; const char *fw_mmp_name; - u32 fuses; + u32 fuses[ADF_MAX_FUSES]; u32 straps; u32 accel_capabilities_mask; u32 extended_dc_capabilities; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c index 1f64bf49b221..2b263442c856 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c @@ -115,8 +115,8 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; + u32 fuses = hw_data->fuses[ADF_FUSECTL0]; u32 straps = hw_data->straps; - u32 fuses = hw_data->fuses; u32 legfuses; u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index c0661ff5e929..e48bcf1818cd 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -29,7 +29,7 @@ static struct adf_hw_device_class dh895xcc_class = { static u32 get_accel_mask(struct adf_hw_device_data *self) { - u32 fuses = self->fuses; + u32 fuses = self->fuses[ADF_FUSECTL0]; return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET & ADF_DH895XCC_ACCELERATORS_MASK; @@ -37,7 +37,7 @@ static u32 get_accel_mask(struct adf_hw_device_data *self) static u32 get_ae_mask(struct adf_hw_device_data *self) { - u32 fuses = self->fuses; + u32 fuses = self->fuses[ADF_FUSECTL0]; return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK; } @@ -99,7 +99,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { - int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK) + int sku = (self->fuses[ADF_FUSECTL0] & ADF_DH895XCC_FUSECTL_SKU_MASK) >> ADF_DH895XCC_FUSECTL_SKU_SHIFT; switch (sku) { diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c index 3137fc3b5cf6..07e9d7e52861 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c @@ -126,7 +126,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_dh895xcc(accel_dev->hw_device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, - &hw_data->fuses); + &hw_data->fuses[ADF_FUSECTL0]); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data); -- cgit v1.2.3 From 795e5bdb0ada2c77ea28611d88f1d5d7ca9b2f4d Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Thu, 13 Mar 2025 16:29:31 +0100 Subject: crypto: tegra - Fix format specifier in tegra_sha_prep_cmd() When building for 32-bit targets, for which ssize_t is 'int' instead of 'long', there is a warning due to an incorrect format specifier: In file included from include/linux/printk.h:610, from include/linux/kernel.h:31, from include/linux/clk.h:13, from drivers/crypto/tegra/tegra-se-hash.c:7: drivers/crypto/tegra/tegra-se-hash.c: In function 'tegra_sha_prep_cmd': drivers/crypto/tegra/tegra-se-hash.c:343:26: error: format '%lu' expects argument of type 'long unsigned int', but argument 6 has type 'ssize_t' {aka 'int'} [-Werror=format=] 343 | dev_dbg(se->dev, "msg len %llu msg left %llu sz %lu cfg %#x", | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ... drivers/crypto/tegra/tegra-se-hash.c:343:59: note: format string is defined here 343 | dev_dbg(se->dev, "msg len %llu msg left %llu sz %lu cfg %#x", | ~~^ | | | long unsigned int | %u cc1: all warnings being treated as errors Use '%zd', the proper specifier for ssize_t, to resolve the warning. Fixes: ff4b7df0b511 ("crypto: tegra - Fix HASH intermediate result handling") Signed-off-by: Nathan Chancellor Signed-off-by: Herbert Xu --- drivers/crypto/tegra/tegra-se-hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 65a50f29bd7e..42d007b7af45 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -340,7 +340,7 @@ static int tegra_sha_prep_cmd(struct tegra_sha_ctx *ctx, u32 *cpuvaddr, cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); - dev_dbg(se->dev, "msg len %llu msg left %llu sz %lu cfg %#x", + dev_dbg(se->dev, "msg len %llu msg left %llu sz %zd cfg %#x", msg_len, msg_left, rctx->datbuf.size, rctx->config); return i; -- cgit v1.2.3 From bd2c6e0e0616ae10eaf73f34223680ae406c5d42 Mon Sep 17 00:00:00 2001 From: Jack Xu Date: Fri, 14 Mar 2025 12:57:52 +0000 Subject: crypto: qat - remove unused members in suof structure Remove the members `css_key` and `css_signature` which are not used for doing the firmware authentication. The signed image pointer can be calculated using the pointer to the CSS header and the length of the CSS header, making these members redundant. Signed-off-by: Jack Xu Reviewed-by: Ahsan Atta Reviewed-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h | 2 -- drivers/crypto/intel/qat/qat_common/qat_uclo.c | 9 ++------- 2 files changed, 2 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h index e28241bdd0f4..4b5e7dcd11d1 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h @@ -404,8 +404,6 @@ struct icp_qat_suof_img_hdr { char *simg_buf; unsigned long simg_len; char *css_header; - char *css_key; - char *css_signature; char *css_simg; unsigned long simg_size; unsigned int ae_num; diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index 7ea40b4f6e5b..5ace036addec 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -1064,6 +1064,7 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle, struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) { struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; + unsigned int offset = ICP_QAT_AE_IMG_OFFSET(handle); struct icp_qat_simg_ae_mode *ae_mode; struct icp_qat_suof_objhdr *suof_objhdr; @@ -1075,13 +1076,7 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle, suof_chunk_hdr->offset))->img_length; suof_img_hdr->css_header = suof_img_hdr->simg_buf; - suof_img_hdr->css_key = (suof_img_hdr->css_header + - sizeof(struct icp_qat_css_hdr)); - suof_img_hdr->css_signature = suof_img_hdr->css_key + - ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + - ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle); - suof_img_hdr->css_simg = suof_img_hdr->css_signature + - ICP_QAT_CSS_SIGNATURE_LEN(handle); + suof_img_hdr->css_simg = suof_img_hdr->css_header + offset; ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg); suof_img_hdr->ae_mask = ae_mode->ae_mask; -- cgit v1.2.3 From 0d5cb730b59ba23d6472b8b1eacd351e7bc48c2b Mon Sep 17 00:00:00 2001 From: Jack Xu Date: Fri, 14 Mar 2025 12:57:53 +0000 Subject: crypto: qat - remove redundant FW image size check The FW image size check is already performed in the function qat_uclo_check_image() before calling `qat_uclo_map_auth_fw()`. Therefore, the additional check in `qat_uclo_map_auth_fw()` is redundant and can be safely removed. Signed-off-by: Jack Xu Reviewed-by: Ahsan Atta Reviewed-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/qat_uclo.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index 5ace036addec..61be6df50684 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -1418,10 +1418,6 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_simg_ae_mode *simg_ae_mode; struct icp_firml_dram_desc img_desc; - if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) { - pr_err("QAT: error, input image size overflow %d\n", size); - return -EINVAL; - } length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ? ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset : size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset; -- cgit v1.2.3 From 987fd1a4bad6cd0c5daf422bc65621c70c88087d Mon Sep 17 00:00:00 2001 From: Jack Xu Date: Fri, 14 Mar 2025 12:57:54 +0000 Subject: crypto: qat - optimize allocations for fw authentication The memory requested to hold the image data for authentication will never exceed `ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN`. Therefore, we can simplify the allocation by always requesting the maximum size needed for any image. Also introduce the following checks: * Ensure the allocated memory is 8-byte aligned to meet the requirements of the authentication firmware. * Prevent overflow when constructing the authentication descriptor. Signed-off-by: Jack Xu Reviewed-by: Ahsan Atta Reviewed-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h | 8 ------- drivers/crypto/intel/qat/qat_common/qat_uclo.c | 25 ++++++++++++++++------ 2 files changed, 19 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h index 4b5e7dcd11d1..1c7bcd8e4055 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h @@ -43,7 +43,6 @@ #define ICP_QAT_SUOF_OBJS "SUF_OBJS" #define ICP_QAT_SUOF_IMAG "SUF_IMAG" #define ICP_QAT_SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long)) -#define ICP_QAT_SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long)) #define DSS_FWSK_MODULUS_LEN 384 /* RSA3K */ #define DSS_FWSK_EXPONENT_LEN 4 @@ -75,13 +74,6 @@ DSS_SIGNATURE_LEN : \ CSS_SIGNATURE_LEN) -#define ICP_QAT_CSS_AE_IMG_LEN (sizeof(struct icp_qat_simg_ae_mode) + \ - ICP_QAT_SIMG_AE_INIT_SEQ_LEN + \ - ICP_QAT_SIMG_AE_INSTS_LEN) -#define ICP_QAT_CSS_AE_SIMG_LEN(handle) (sizeof(struct icp_qat_css_hdr) + \ - ICP_QAT_CSS_FWSK_PUB_LEN(handle) + \ - ICP_QAT_CSS_SIGNATURE_LEN(handle) + \ - ICP_QAT_CSS_AE_IMG_LEN) #define ICP_QAT_AE_IMG_OFFSET(handle) (sizeof(struct icp_qat_css_hdr) + \ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \ ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \ diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index 61be6df50684..7678a93c6853 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2020 Intel Corporation */ +#include #include #include #include @@ -1414,16 +1415,21 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_fw_auth_desc *auth_desc; struct icp_qat_auth_chunk *auth_chunk; u64 virt_addr, bus_addr, virt_base; - unsigned int length, simg_offset = sizeof(*auth_chunk); + unsigned int simg_offset = sizeof(*auth_chunk); struct icp_qat_simg_ae_mode *simg_ae_mode; struct icp_firml_dram_desc img_desc; + int ret; - length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ? - ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset : - size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset; - if (qat_uclo_simg_alloc(handle, &img_desc, length)) { + ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN); + if (ret) { pr_err("QAT: error, allocate continuous dram fail\n"); - return -ENOMEM; + return ret; + } + + if (!IS_ALIGNED(img_desc.dram_size, 8) || !img_desc.dram_bus_addr) { + pr_debug("QAT: invalid address\n"); + qat_uclo_simg_free(handle, &img_desc); + return -EINVAL; } auth_chunk = img_desc.dram_base_addr_v; @@ -1481,6 +1487,13 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->img_low = (unsigned int)bus_addr; auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle); + if (bus_addr + auth_desc->img_len > img_desc.dram_bus_addr + + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) { + pr_err("QAT: insufficient memory size for authentication data\n"); + qat_uclo_simg_free(handle, &img_desc); + return -ENOMEM; + } + memcpy((void *)(uintptr_t)virt_addr, (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)), auth_desc->img_len); -- cgit v1.2.3 From f9555d18084985c80a91baa4fdb7d205b401a754 Mon Sep 17 00:00:00 2001 From: Bairavi Alagappan Date: Fri, 14 Mar 2025 13:14:29 +0000 Subject: crypto: qat - set parity error mask for qat_420xx The field parerr_wat_wcp_mask in the structure adf_dev_err_mask enables the detection and reporting of parity errors for the wireless cipher and wireless authentication accelerators. Set the parerr_wat_wcp_mask field, which was inadvertently omitted during the initial enablement of the qat_420xx driver, to ensure that parity errors are enabled for those accelerators. In addition, fix the string used to report such errors that was inadvertently set to "ath_cph" (authentication and cipher). Fixes: fcf60f4bcf54 ("crypto: qat - add support for 420xx devices") Signed-off-by: Bairavi Alagappan Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c | 1 + drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 98f3c1210666..4feeef83f7a3 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -411,6 +411,7 @@ static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK; dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK; dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK; + dev_err_mask->parerr_wat_wcp_mask = ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK; dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 2dd3772bf58a..bf0ea09faa65 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -695,7 +695,7 @@ static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev, if (err_mask->parerr_wat_wcp_mask) adf_poll_slicehang_csr(accel_dev, csr, ADF_GEN4_SLICEHANGSTATUS_WAT_WCP, - "ath_cph"); + "wat_wcp"); return false; } -- cgit v1.2.3 From 92c6a707d82f0629debf1c21dd87717776d96af2 Mon Sep 17 00:00:00 2001 From: Bairavi Alagappan Date: Fri, 14 Mar 2025 15:09:31 +0000 Subject: crypto: qat - remove access to parity register for QAT GEN4 The firmware already handles parity errors reported by the accelerators by clearing them through the corresponding SSMSOFTERRORPARITY register. To ensure consistent behavior and prevent race conditions between the driver and firmware, remove the logic that checks the SSMSOFTERRORPARITY registers. Additionally, change the return type of the function adf_handle_rf_parr_err() to void, as it consistently returns false. Parity errors are recoverable and do not necessitate a device reset. Fixes: 895f7d532c84 ("crypto: qat - add handling of errors from ERRSOU2 for QAT GEN4") Signed-off-by: Bairavi Alagappan Reviewed-by: Andy Shevchenko Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c | 57 ++-------------------- 1 file changed, 5 insertions(+), 52 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index bf0ea09faa65..0f7f00a19e7d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -1043,63 +1043,16 @@ static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, return reset_required; } -static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, +static void adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { - struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); - u32 reg; - if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT)) - return false; - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC); - reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH); - reg &= err_mask->parerr_ath_cph_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT); - reg &= err_mask->parerr_cpr_xlt_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS); - reg &= err_mask->parerr_dcpr_ucs_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE); - reg &= err_mask->parerr_pke_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg); - } - - if (err_mask->parerr_wat_wcp_mask) { - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP); - reg &= err_mask->parerr_wat_wcp_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP, - reg); - } - } + return; + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported"); - return false; + return; } static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, @@ -1171,8 +1124,8 @@ static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm); reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm); reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm); - reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm); reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm); + adf_handle_rf_parr_err(accel_dev, csr, iastatssm); ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm); -- cgit v1.2.3 From edc8e80bf862a7282da017e7f16e63b52585c793 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 14 Mar 2025 17:05:32 +0100 Subject: crypto: lib/Kconfig - hide library options Any driver that needs these library functions should already be selecting the corresponding Kconfig symbols, so there is no real point in making these visible. The original patch that made these user selectable described problems with drivers failing to select the code they use, but for consistency it's better to always use 'select' on a symbol than to mix it with 'depends on'. Fixes: e56e18985596 ("lib/crypto: add prompts back to crypto libraries") Signed-off-by: Arnd Bergmann Acked-by: Jarkko Sakkinen Signed-off-by: Herbert Xu --- drivers/crypto/marvell/Kconfig | 4 ++-- lib/crypto/Kconfig | 8 ++++---- security/keys/Kconfig | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig index 4c25a78ab3ed..aa269abb0499 100644 --- a/drivers/crypto/marvell/Kconfig +++ b/drivers/crypto/marvell/Kconfig @@ -24,7 +24,7 @@ config CRYPTO_DEV_OCTEONTX_CPT tristate "Support for Marvell OcteonTX CPT driver" depends on ARCH_THUNDER || COMPILE_TEST depends on PCI_MSI && 64BIT - depends on CRYPTO_LIB_AES + select CRYPTO_LIB_AES select CRYPTO_SKCIPHER select CRYPTO_HASH select CRYPTO_AEAD @@ -41,10 +41,10 @@ config CRYPTO_DEV_OCTEONTX2_CPT tristate "Marvell OcteonTX2 CPT driver" depends on ARCH_THUNDER2 || COMPILE_TEST depends on PCI_MSI && 64BIT - depends on CRYPTO_LIB_AES depends on NET_VENDOR_MARVELL select OCTEONTX2_MBOX select CRYPTO_DEV_MARVELL + select CRYPTO_LIB_AES select CRYPTO_SKCIPHER select CRYPTO_HASH select CRYPTO_AEAD diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 17322f871586..798972b29b68 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -63,7 +63,7 @@ config CRYPTO_LIB_CHACHA_INTERNAL select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n config CRYPTO_LIB_CHACHA - tristate "ChaCha library interface" + tristate select CRYPTO select CRYPTO_LIB_CHACHA_INTERNAL help @@ -93,7 +93,7 @@ config CRYPTO_LIB_CURVE25519_INTERNAL select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n config CRYPTO_LIB_CURVE25519 - tristate "Curve25519 scalar multiplication library" + tristate select CRYPTO select CRYPTO_LIB_CURVE25519_INTERNAL help @@ -132,7 +132,7 @@ config CRYPTO_LIB_POLY1305_INTERNAL select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n config CRYPTO_LIB_POLY1305 - tristate "Poly1305 library interface" + tristate select CRYPTO select CRYPTO_LIB_POLY1305_INTERNAL help @@ -141,7 +141,7 @@ config CRYPTO_LIB_POLY1305 is available and enabled. config CRYPTO_LIB_CHACHA20POLY1305 - tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)" + tristate select CRYPTO_LIB_CHACHA select CRYPTO_LIB_POLY1305 select CRYPTO_LIB_UTILS diff --git a/security/keys/Kconfig b/security/keys/Kconfig index abb03a1b2a5c..d4f5fc1e7263 100644 --- a/security/keys/Kconfig +++ b/security/keys/Kconfig @@ -60,7 +60,7 @@ config BIG_KEYS bool "Large payload keys" depends on KEYS depends on TMPFS - depends on CRYPTO_LIB_CHACHA20POLY1305 = y + select CRYPTO_LIB_CHACHA20POLY1305 help This option provides support for holding large keys within the kernel (for example Kerberos ticket caches). The data may be stored out to -- cgit v1.2.3 From 9cf792844d5da59403c1b36e68d65291676d935b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 15 Mar 2025 16:20:16 +0800 Subject: crypto: padlock - Use zero page instead of stack buffer Use desc instead of a stack buffer in the final function. This fixes a compiler warning about buf being uninitialised. Signed-off-by: Herbert Xu --- drivers/crypto/padlock-sha.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 6865c7f1fc1a..db9e84c0c9fb 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -125,7 +125,7 @@ out: static int padlock_sha1_final(struct shash_desc *desc, u8 *out) { - u8 buf[4]; + const u8 *buf = (void *)desc; return padlock_sha1_finup(desc, buf, 0, out); } @@ -186,7 +186,7 @@ out: static int padlock_sha256_final(struct shash_desc *desc, u8 *out) { - u8 buf[4]; + const u8 *buf = (void *)desc; return padlock_sha256_finup(desc, buf, 0, out); } -- cgit v1.2.3 From 9b00eb923f3e60ca76cbc8b31123716f3a87ac6a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 15 Mar 2025 16:50:42 +0800 Subject: crypto: nx - Fix uninitialised hv_nxc on error The compiler correctly warns that hv_nxc may be used uninitialised as that will occur when NX-GZIP is unavailable. Fix it by rearranging the code and delay setting caps_feat until the final query succeeds. Fixes: b4ba22114c78 ("crypto/nx: Get NX capabilities for GZIP coprocessor type") Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-common-pseries.c | 37 ++++++++++++++++------------------- 1 file changed, 17 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c index 1660c5cf3641..56129bdf53ab 100644 --- a/drivers/crypto/nx/nx-common-pseries.c +++ b/drivers/crypto/nx/nx-common-pseries.c @@ -1145,6 +1145,7 @@ static void __init nxcop_get_capabilities(void) { struct hv_vas_all_caps *hv_caps; struct hv_nx_cop_caps *hv_nxc; + u64 feat; int rc; hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL); @@ -1155,27 +1156,26 @@ static void __init nxcop_get_capabilities(void) */ rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0, (u64)virt_to_phys(hv_caps)); + if (!rc) + feat = be64_to_cpu(hv_caps->feat_type); + kfree(hv_caps); if (rc) - goto out; + return; + if (!(feat & VAS_NX_GZIP_FEAT_BIT)) + return; - caps_feat = be64_to_cpu(hv_caps->feat_type); /* * NX-GZIP feature available */ - if (caps_feat & VAS_NX_GZIP_FEAT_BIT) { - hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); - if (!hv_nxc) - goto out; - /* - * Get capabilities for NX-GZIP feature - */ - rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, - VAS_NX_GZIP_FEAT, - (u64)virt_to_phys(hv_nxc)); - } else { - pr_err("NX-GZIP feature is not available\n"); - rc = -EINVAL; - } + hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); + if (!hv_nxc) + return; + /* + * Get capabilities for NX-GZIP feature + */ + rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, + VAS_NX_GZIP_FEAT, + (u64)virt_to_phys(hv_nxc)); if (!rc) { nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor); @@ -1185,13 +1185,10 @@ static void __init nxcop_get_capabilities(void) be64_to_cpu(hv_nxc->min_compress_len); nx_cop_caps.min_decompress_len = be64_to_cpu(hv_nxc->min_decompress_len); - } else { - caps_feat = 0; + caps_feat = feat; } kfree(hv_nxc); -out: - kfree(hv_caps); } static const struct vio_device_id nx842_vio_driver_ids[] = { -- cgit v1.2.3 From 02c974294c740bfb747ec64933e12148eb3d99e1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 15 Mar 2025 18:30:24 +0800 Subject: crypto: iaa - Remove dst_null support Remove the unused dst_null support. Signed-off-by: Herbert Xu --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 136 ++--------------------------- 1 file changed, 6 insertions(+), 130 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 990ea46955bb..50cb100bf1c8 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1136,8 +1136,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, dma_addr_t dst_addr, unsigned int *dlen, - u32 *compression_crc, - bool disable_async) + u32 *compression_crc) { struct iaa_device_compression_mode *active_compression_mode; struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); @@ -1180,7 +1179,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, desc->src2_size = sizeof(struct aecs_comp_table_record); desc->completion_addr = idxd_desc->compl_dma; - if (ctx->use_irq && !disable_async) { + if (ctx->use_irq) { desc->flags |= IDXD_OP_FLAG_RCI; idxd_desc->crypto.req = req; @@ -1193,7 +1192,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, " src_addr %llx, dst_addr %llx\n", __func__, active_compression_mode->name, src_addr, dst_addr); - } else if (ctx->async_mode && !disable_async) + } else if (ctx->async_mode) req->base.data = idxd_desc; dev_dbg(dev, "%s: compression mode %s," @@ -1214,7 +1213,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, update_total_comp_calls(); update_wq_comp_calls(wq); - if (ctx->async_mode && !disable_async) { + if (ctx->async_mode) { ret = -EINPROGRESS; dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); goto out; @@ -1234,7 +1233,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, *compression_crc = idxd_desc->iax_completion->crc; - if (!ctx->async_mode || disable_async) + if (!ctx->async_mode) idxd_free_desc(wq, idxd_desc); out: return ret; @@ -1500,13 +1499,11 @@ static int iaa_comp_acompress(struct acomp_req *req) struct iaa_compression_ctx *compression_ctx; struct crypto_tfm *tfm = req->base.tfm; dma_addr_t src_addr, dst_addr; - bool disable_async = false; int nr_sgs, cpu, ret = 0; struct iaa_wq *iaa_wq; u32 compression_crc; struct idxd_wq *wq; struct device *dev; - int order = -1; compression_ctx = crypto_tfm_ctx(tfm); @@ -1536,21 +1533,6 @@ static int iaa_comp_acompress(struct acomp_req *req) iaa_wq = idxd_wq_get_private(wq); - if (!req->dst) { - gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; - - /* incompressible data will always be < 2 * slen */ - req->dlen = 2 * req->slen; - order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); - req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); - if (!req->dst) { - ret = -ENOMEM; - order = -1; - goto out; - } - disable_async = true; - } - dev = &wq->idxd->pdev->dev; nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); @@ -1580,7 +1562,7 @@ static int iaa_comp_acompress(struct acomp_req *req) req->dst, req->dlen, sg_dma_len(req->dst)); ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, - &req->dlen, &compression_crc, disable_async); + &req->dlen, &compression_crc); if (ret == -EINPROGRESS) return ret; @@ -1611,100 +1593,6 @@ err_map_dst: out: iaa_wq_put(wq); - if (order >= 0) - sgl_free_order(req->dst, order); - - return ret; -} - -static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) -{ - gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? - GFP_KERNEL : GFP_ATOMIC; - struct crypto_tfm *tfm = req->base.tfm; - dma_addr_t src_addr, dst_addr; - int nr_sgs, cpu, ret = 0; - struct iaa_wq *iaa_wq; - struct device *dev; - struct idxd_wq *wq; - int order = -1; - - cpu = get_cpu(); - wq = wq_table_next_wq(cpu); - put_cpu(); - if (!wq) { - pr_debug("no wq configured for cpu=%d\n", cpu); - return -ENODEV; - } - - ret = iaa_wq_get(wq); - if (ret) { - pr_debug("no wq available for cpu=%d\n", cpu); - return -ENODEV; - } - - iaa_wq = idxd_wq_get_private(wq); - - dev = &wq->idxd->pdev->dev; - - nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); - if (nr_sgs <= 0 || nr_sgs > 1) { - dev_dbg(dev, "couldn't map src sg for iaa device %d," - " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, - iaa_wq->wq->id, ret); - ret = -EIO; - goto out; - } - src_addr = sg_dma_address(req->src); - dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," - " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, - req->src, req->slen, sg_dma_len(req->src)); - - req->dlen = 4 * req->slen; /* start with ~avg comp rato */ -alloc_dest: - order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); - req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); - if (!req->dst) { - ret = -ENOMEM; - order = -1; - goto out; - } - - nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); - if (nr_sgs <= 0 || nr_sgs > 1) { - dev_dbg(dev, "couldn't map dst sg for iaa device %d," - " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, - iaa_wq->wq->id, ret); - ret = -EIO; - goto err_map_dst; - } - - dst_addr = sg_dma_address(req->dst); - dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," - " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, - req->dst, req->dlen, sg_dma_len(req->dst)); - ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, - dst_addr, &req->dlen, true); - if (ret == -EOVERFLOW) { - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); - req->dlen *= 2; - if (req->dlen > CRYPTO_ACOMP_DST_MAX) - goto err_map_dst; - goto alloc_dest; - } - - if (ret != 0) - dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret); - - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); -err_map_dst: - dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); -out: - iaa_wq_put(wq); - - if (order >= 0) - sgl_free_order(req->dst, order); - return ret; } @@ -1727,9 +1615,6 @@ static int iaa_comp_adecompress(struct acomp_req *req) return -EINVAL; } - if (!req->dst) - return iaa_comp_adecompress_alloc_dest(req); - cpu = get_cpu(); wq = wq_table_next_wq(cpu); put_cpu(); @@ -1810,19 +1695,10 @@ static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm) return 0; } -static void dst_free(struct scatterlist *sgl) -{ - /* - * Called for req->dst = NULL cases but we free elsewhere - * using sgl_free_order(). - */ -} - static struct acomp_alg iaa_acomp_fixed_deflate = { .init = iaa_comp_init_fixed, .compress = iaa_comp_acompress, .decompress = iaa_comp_adecompress, - .dst_free = dst_free, .base = { .cra_name = "deflate", .cra_driver_name = "deflate-iaa", -- cgit v1.2.3 From c964444fa7ac0aabf1773a7bbcfeb7457f853d99 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 15 Mar 2025 18:30:27 +0800 Subject: crypto: qat - Remove dst_null support Remove the unused dst_null support. Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/qat_bl.c | 159 --------------------- drivers/crypto/intel/qat/qat_common/qat_bl.h | 6 - .../crypto/intel/qat/qat_common/qat_comp_algs.c | 85 +---------- drivers/crypto/intel/qat/qat_common/qat_comp_req.h | 10 -- 4 files changed, 1 insertion(+), 259 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c index 338acf29c487..5e4dad4693ca 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.c +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c @@ -251,162 +251,3 @@ int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, extra_dst_buff, sz_extra_dst_buff, sskip, dskip, flags); } - -static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev, - struct qat_alg_buf_list *bl) -{ - struct device *dev = &GET_DEV(accel_dev); - int n = bl->num_bufs; - int i; - - for (i = 0; i < n; i++) - if (!dma_mapping_error(dev, bl->buffers[i].addr)) - dma_unmap_single(dev, bl->buffers[i].addr, - bl->buffers[i].len, DMA_FROM_DEVICE); -} - -static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev, - struct scatterlist *sgl, - struct qat_alg_buf_list **bl) -{ - struct device *dev = &GET_DEV(accel_dev); - struct qat_alg_buf_list *bufl; - int node = dev_to_node(dev); - struct scatterlist *sg; - int n, i, sg_nctr; - size_t sz; - - n = sg_nents(sgl); - sz = struct_size(bufl, buffers, n); - bufl = kzalloc_node(sz, GFP_KERNEL, node); - if (unlikely(!bufl)) - return -ENOMEM; - - for (i = 0; i < n; i++) - bufl->buffers[i].addr = DMA_MAPPING_ERROR; - - sg_nctr = 0; - for_each_sg(sgl, sg, n, i) { - int y = sg_nctr; - - if (!sg->length) - continue; - - bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg), - sg->length, - DMA_FROM_DEVICE); - bufl->buffers[y].len = sg->length; - if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr))) - goto err_map; - sg_nctr++; - } - bufl->num_bufs = sg_nctr; - bufl->num_mapped_bufs = sg_nctr; - - *bl = bufl; - - return 0; - -err_map: - for (i = 0; i < n; i++) - if (!dma_mapping_error(dev, bufl->buffers[i].addr)) - dma_unmap_single(dev, bufl->buffers[i].addr, - bufl->buffers[i].len, - DMA_FROM_DEVICE); - kfree(bufl); - *bl = NULL; - - return -ENOMEM; -} - -static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev, - struct scatterlist *sgl, - struct qat_alg_buf_list *bl, - bool free_bl) -{ - if (bl) { - qat_bl_sgl_unmap(accel_dev, bl); - - if (free_bl) - kfree(bl); - } - if (sgl) - sgl_free(sgl); -} - -static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev, - struct scatterlist **sgl, - struct qat_alg_buf_list **bl, - unsigned int dlen, - gfp_t gfp) -{ - struct scatterlist *dst; - int ret; - - dst = sgl_alloc(dlen, gfp, NULL); - if (!dst) { - dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n"); - return -ENOMEM; - } - - ret = qat_bl_sgl_map(accel_dev, dst, bl); - if (ret) - goto err; - - *sgl = dst; - - return 0; - -err: - sgl_free(dst); - *sgl = NULL; - return ret; -} - -int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev, - struct scatterlist **sg, - unsigned int dlen, - struct qat_request_buffs *qat_bufs, - gfp_t gfp) -{ - struct device *dev = &GET_DEV(accel_dev); - dma_addr_t new_blp = DMA_MAPPING_ERROR; - struct qat_alg_buf_list *new_bl; - struct scatterlist *new_sg; - size_t new_bl_size; - int ret; - - ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp); - if (ret) - return ret; - - new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs); - - /* Map new firmware SGL descriptor */ - new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, new_blp))) - goto err; - - /* Unmap old firmware SGL descriptor */ - dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE); - - /* Free and unmap old scatterlist */ - qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout, - !qat_bufs->sgl_dst_valid); - - qat_bufs->sgl_dst_valid = false; - qat_bufs->blout = new_bl; - qat_bufs->bloutp = new_blp; - qat_bufs->sz_out = new_bl_size; - - *sg = new_sg; - - return 0; -err: - qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true); - - if (!dma_mapping_error(dev, new_blp)) - dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE); - - return -ENOMEM; -} diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h index 3f5b79015400..2827d5055d3c 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.h +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h @@ -65,10 +65,4 @@ static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req) return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; } -int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev, - struct scatterlist **newd, - unsigned int dlen, - struct qat_request_buffs *qat_bufs, - gfp_t gfp); - #endif diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index 2ba4aa22e092..a6e02405d402 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -29,11 +29,6 @@ struct qat_compression_ctx { int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp); }; -struct qat_dst { - bool is_null; - int resubmitted; -}; - struct qat_compression_req { u8 req[QAT_COMP_REQ_SIZE]; struct qat_compression_ctx *qat_compression_ctx; @@ -42,8 +37,6 @@ struct qat_compression_req { enum direction dir; int actual_dlen; struct qat_alg_req alg_req; - struct work_struct resubmit; - struct qat_dst dst; }; static int qat_alg_send_dc_message(struct qat_compression_req *qat_req, @@ -60,46 +53,6 @@ static int qat_alg_send_dc_message(struct qat_compression_req *qat_req, return qat_alg_send_message(alg_req); } -static void qat_comp_resubmit(struct work_struct *work) -{ - struct qat_compression_req *qat_req = - container_of(work, struct qat_compression_req, resubmit); - struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx; - struct adf_accel_dev *accel_dev = ctx->inst->accel_dev; - struct qat_request_buffs *qat_bufs = &qat_req->buf; - struct qat_compression_instance *inst = ctx->inst; - struct acomp_req *areq = qat_req->acompress_req; - struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq); - unsigned int dlen = CRYPTO_ACOMP_DST_MAX; - u8 *req = qat_req->req; - dma_addr_t dfbuf; - int ret; - - areq->dlen = dlen; - - dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n", - crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)), - qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen); - - ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs, - qat_algs_alloc_flags(&areq->base)); - if (ret) - goto err; - - qat_req->dst.resubmitted = true; - - dfbuf = qat_req->buf.bloutp; - qat_comp_override_dst(req, dfbuf, dlen); - - ret = qat_alg_send_dc_message(qat_req, inst, &areq->base); - if (ret != -ENOSPC) - return; - -err: - qat_bl_free_bufl(accel_dev, qat_bufs); - acomp_request_complete(areq, ret); -} - static void qat_comp_generic_callback(struct qat_compression_req *qat_req, void *resp) { @@ -131,21 +84,6 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req, areq->dlen = 0; - if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) { - if (cmp_err == ERR_CODE_OVERFLOW_ERROR) { - if (qat_req->dst.resubmitted) { - dev_dbg(&GET_DEV(accel_dev), - "Output does not fit destination buffer\n"); - res = -EOVERFLOW; - goto end; - } - - INIT_WORK(&qat_req->resubmit, qat_comp_resubmit); - adf_misc_wq_queue_work(&qat_req->resubmit); - return; - } - } - if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) goto end; @@ -245,29 +183,9 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum directi if (!areq->src || !slen) return -EINVAL; - if (areq->dst && !dlen) + if (!areq->dst || !dlen) return -EINVAL; - qat_req->dst.is_null = false; - - /* Handle acomp requests that require the allocation of a destination - * buffer. The size of the destination buffer is double the source - * buffer (rounded up to the size of a page) to fit the decompressed - * output or an expansion on the data for compression. - */ - if (!areq->dst) { - qat_req->dst.is_null = true; - - dlen = round_up(2 * slen, PAGE_SIZE); - areq->dst = sgl_alloc(dlen, f, NULL); - if (!areq->dst) - return -ENOMEM; - - dlen -= dhdr + dftr; - areq->dlen = dlen; - qat_req->dst.resubmitted = false; - } - if (dir == COMPRESSION) { params.extra_dst_buff = inst->dc_data->ovf_buff_p; ovf_buff_sz = inst->dc_data->ovf_buff_sz; @@ -329,7 +247,6 @@ static struct acomp_alg qat_acomp[] = { { .exit = qat_comp_alg_exit_tfm, .compress = qat_comp_alg_compress, .decompress = qat_comp_alg_decompress, - .dst_free = sgl_free, .reqsize = sizeof(struct qat_compression_req), }}; diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h index 404e32c5e778..18a1f33a6db9 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h @@ -25,16 +25,6 @@ static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen, req_pars->out_buffer_sz = dlen; } -static inline void qat_comp_override_dst(void *req, u64 dst, u32 dlen) -{ - struct icp_qat_fw_comp_req *fw_req = req; - struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars; - - fw_req->comn_mid.dest_data_addr = dst; - fw_req->comn_mid.dst_length = dlen; - req_pars->out_buffer_sz = dlen; -} - static inline void qat_comp_create_compression_req(void *ctx, void *req, u64 src, u32 slen, u64 dst, u32 dlen, -- cgit v1.2.3 From dfd28c89fa91d92b7790ec4d1e8d8d5b4e8f1b19 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 15 Mar 2025 18:30:36 +0800 Subject: crypto: iaa - Use acomp stack fallback Use ACOMP_REQUEST_ON_STACK instead of allocating legacy fallback compression transform. Signed-off-by: Herbert Xu --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 50cb100bf1c8..09d9589f2d68 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -33,8 +33,6 @@ static unsigned int nr_cpus_per_node; /* Number of physical cpus sharing each iaa instance */ static unsigned int cpus_per_iaa; -static struct crypto_comp *deflate_generic_tfm; - /* Per-cpu lookup table for balanced wqs */ static struct wq_table_entry __percpu *wq_table; @@ -1001,17 +999,14 @@ out: static int deflate_generic_decompress(struct acomp_req *req) { - void *src, *dst; + ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req)); int ret; - src = kmap_local_page(sg_page(req->src)) + req->src->offset; - dst = kmap_local_page(sg_page(req->dst)) + req->dst->offset; - - ret = crypto_comp_decompress(deflate_generic_tfm, - src, req->slen, dst, &req->dlen); - - kunmap_local(src); - kunmap_local(dst); + acomp_request_set_callback(fbreq, 0, NULL, NULL); + acomp_request_set_params(fbreq, req->src, req->dst, req->slen, + req->dlen); + ret = crypto_acomp_decompress(fbreq); + req->dlen = fbreq->dlen; update_total_sw_decomp_calls(); @@ -1898,15 +1893,6 @@ static int __init iaa_crypto_init_module(void) } nr_cpus_per_node = nr_cpus / nr_nodes; - if (crypto_has_comp("deflate-generic", 0, 0)) - deflate_generic_tfm = crypto_alloc_comp("deflate-generic", 0, 0); - - if (IS_ERR_OR_NULL(deflate_generic_tfm)) { - pr_err("IAA could not alloc %s tfm: errcode = %ld\n", - "deflate-generic", PTR_ERR(deflate_generic_tfm)); - return -ENOMEM; - } - ret = iaa_aecs_init_fixed(); if (ret < 0) { pr_debug("IAA fixed compression mode init failed\n"); @@ -1948,7 +1934,6 @@ err_verify_attr_create: err_driver_reg: iaa_aecs_cleanup_fixed(); err_aecs_init: - crypto_free_comp(deflate_generic_tfm); goto out; } @@ -1965,7 +1950,6 @@ static void __exit iaa_crypto_cleanup_module(void) &driver_attr_verify_compress); idxd_driver_unregister(&iaa_crypto_driver); iaa_aecs_cleanup_fixed(); - crypto_free_comp(deflate_generic_tfm); pr_debug("cleaned up\n"); } -- cgit v1.2.3 From 980b5705f4e73f567e405cd18337cc32fd51cf79 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sun, 16 Mar 2025 09:21:04 +0800 Subject: crypto: nx - Migrate to scomp API The only remaining user of 842 compression has been migrated to the acomp compression API, and so the NX hardware driver has to follow suit, given that no users of the obsolete 'comp' API remain, and it is going to be removed. So migrate the NX driver code to scomp. These will be wrapped and exposed as acomp implementation via the crypto subsystem's acomp-to-scomp adaptation layer. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-842.c | 33 +++++++++++++++++++-------------- drivers/crypto/nx/nx-842.h | 15 ++++++++------- drivers/crypto/nx/nx-common-powernv.c | 31 +++++++++++++++---------------- drivers/crypto/nx/nx-common-pseries.c | 33 ++++++++++++++++----------------- 4 files changed, 58 insertions(+), 54 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 82214cde2bcd..b950fcce8a9b 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -101,9 +101,13 @@ static int update_param(struct nx842_crypto_param *p, return 0; } -int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver) +void *nx842_crypto_alloc_ctx(struct nx842_driver *driver) { - struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_crypto_ctx *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); spin_lock_init(&ctx->lock); ctx->driver = driver; @@ -114,22 +118,23 @@ int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver) kfree(ctx->wmem); free_page((unsigned long)ctx->sbounce); free_page((unsigned long)ctx->dbounce); - return -ENOMEM; + kfree(ctx); + return ERR_PTR(-ENOMEM); } - return 0; + return ctx; } -EXPORT_SYMBOL_GPL(nx842_crypto_init); +EXPORT_SYMBOL_GPL(nx842_crypto_alloc_ctx); -void nx842_crypto_exit(struct crypto_tfm *tfm) +void nx842_crypto_free_ctx(void *p) { - struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_crypto_ctx *ctx = p; kfree(ctx->wmem); free_page((unsigned long)ctx->sbounce); free_page((unsigned long)ctx->dbounce); } -EXPORT_SYMBOL_GPL(nx842_crypto_exit); +EXPORT_SYMBOL_GPL(nx842_crypto_free_ctx); static void check_constraints(struct nx842_constraints *c) { @@ -246,11 +251,11 @@ nospc: return update_param(p, slen, dskip + dlen); } -int nx842_crypto_compress(struct crypto_tfm *tfm, +int nx842_crypto_compress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) + u8 *dst, unsigned int *dlen, void *pctx) { - struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_crypto_ctx *ctx = pctx; struct nx842_crypto_header *hdr = container_of(&ctx->header, struct nx842_crypto_header, hdr); @@ -431,11 +436,11 @@ usesw: return update_param(p, slen + padding, dlen); } -int nx842_crypto_decompress(struct crypto_tfm *tfm, +int nx842_crypto_decompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) + u8 *dst, unsigned int *dlen, void *pctx) { - struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_crypto_ctx *ctx = pctx; struct nx842_crypto_header *hdr; struct nx842_crypto_param p; struct nx842_constraints c = *ctx->driver->constraints; diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index 887d4ce3cb49..f5e2c82ba876 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -3,7 +3,6 @@ #ifndef __NX_842_H__ #define __NX_842_H__ -#include #include #include #include @@ -101,6 +100,8 @@ #define LEN_ON_SIZE(pa, size) ((size) - ((pa) & ((size) - 1))) #define LEN_ON_PAGE(pa) LEN_ON_SIZE(pa, PAGE_SIZE) +struct crypto_scomp; + static inline unsigned long nx842_get_pa(void *addr) { if (!is_vmalloc_addr(addr)) @@ -182,13 +183,13 @@ struct nx842_crypto_ctx { struct nx842_driver *driver; }; -int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver); -void nx842_crypto_exit(struct crypto_tfm *tfm); -int nx842_crypto_compress(struct crypto_tfm *tfm, +void *nx842_crypto_alloc_ctx(struct nx842_driver *driver); +void nx842_crypto_free_ctx(void *ctx); +int nx842_crypto_compress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); -int nx842_crypto_decompress(struct crypto_tfm *tfm, + u8 *dst, unsigned int *dlen, void *ctx); +int nx842_crypto_decompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); + u8 *dst, unsigned int *dlen, void *ctx); #endif /* __NX_842_H__ */ diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c index 8c859872c183..fd0a98b2fb1b 100644 --- a/drivers/crypto/nx/nx-common-powernv.c +++ b/drivers/crypto/nx/nx-common-powernv.c @@ -9,6 +9,7 @@ #include "nx-842.h" +#include #include #include @@ -1031,23 +1032,21 @@ static struct nx842_driver nx842_powernv_driver = { .decompress = nx842_powernv_decompress, }; -static int nx842_powernv_crypto_init(struct crypto_tfm *tfm) +static void *nx842_powernv_crypto_alloc_ctx(void) { - return nx842_crypto_init(tfm, &nx842_powernv_driver); + return nx842_crypto_alloc_ctx(&nx842_powernv_driver); } -static struct crypto_alg nx842_powernv_alg = { - .cra_name = "842", - .cra_driver_name = "842-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct nx842_crypto_ctx), - .cra_module = THIS_MODULE, - .cra_init = nx842_powernv_crypto_init, - .cra_exit = nx842_crypto_exit, - .cra_u = { .compress = { - .coa_compress = nx842_crypto_compress, - .coa_decompress = nx842_crypto_decompress } } +static struct scomp_alg nx842_powernv_alg = { + .base.cra_name = "842", + .base.cra_driver_name = "842-nx", + .base.cra_priority = 300, + .base.cra_module = THIS_MODULE, + + .alloc_ctx = nx842_powernv_crypto_alloc_ctx, + .free_ctx = nx842_crypto_free_ctx, + .compress = nx842_crypto_compress, + .decompress = nx842_crypto_decompress, }; static __init int nx_compress_powernv_init(void) @@ -1107,7 +1106,7 @@ static __init int nx_compress_powernv_init(void) nx842_powernv_exec = nx842_exec_vas; } - ret = crypto_register_alg(&nx842_powernv_alg); + ret = crypto_register_scomp(&nx842_powernv_alg); if (ret) { nx_delete_coprocs(); return ret; @@ -1128,7 +1127,7 @@ static void __exit nx_compress_powernv_exit(void) if (!nx842_ct) vas_unregister_api_powernv(); - crypto_unregister_alg(&nx842_powernv_alg); + crypto_unregister_scomp(&nx842_powernv_alg); nx_delete_coprocs(); } diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c index 56129bdf53ab..f528e072494a 100644 --- a/drivers/crypto/nx/nx-common-pseries.c +++ b/drivers/crypto/nx/nx-common-pseries.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "nx-842.h" #include "nx_csbcpb.h" /* struct nx_csbcpb */ @@ -1008,23 +1009,21 @@ static struct nx842_driver nx842_pseries_driver = { .decompress = nx842_pseries_decompress, }; -static int nx842_pseries_crypto_init(struct crypto_tfm *tfm) +static void *nx842_pseries_crypto_alloc_ctx(void) { - return nx842_crypto_init(tfm, &nx842_pseries_driver); + return nx842_crypto_alloc_ctx(&nx842_pseries_driver); } -static struct crypto_alg nx842_pseries_alg = { - .cra_name = "842", - .cra_driver_name = "842-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct nx842_crypto_ctx), - .cra_module = THIS_MODULE, - .cra_init = nx842_pseries_crypto_init, - .cra_exit = nx842_crypto_exit, - .cra_u = { .compress = { - .coa_compress = nx842_crypto_compress, - .coa_decompress = nx842_crypto_decompress } } +static struct scomp_alg nx842_pseries_alg = { + .base.cra_name = "842", + .base.cra_driver_name = "842-nx", + .base.cra_priority = 300, + .base.cra_module = THIS_MODULE, + + .alloc_ctx = nx842_pseries_crypto_alloc_ctx, + .free_ctx = nx842_crypto_free_ctx, + .compress = nx842_crypto_compress, + .decompress = nx842_crypto_decompress, }; static int nx842_probe(struct vio_dev *viodev, @@ -1072,7 +1071,7 @@ static int nx842_probe(struct vio_dev *viodev, if (ret) goto error; - ret = crypto_register_alg(&nx842_pseries_alg); + ret = crypto_register_scomp(&nx842_pseries_alg); if (ret) { dev_err(&viodev->dev, "could not register comp alg: %d\n", ret); goto error; @@ -1120,7 +1119,7 @@ static void nx842_remove(struct vio_dev *viodev) if (caps_feat) sysfs_remove_group(&viodev->dev.kobj, &nxcop_caps_attr_group); - crypto_unregister_alg(&nx842_pseries_alg); + crypto_unregister_scomp(&nx842_pseries_alg); of_reconfig_notifier_unregister(&nx842_of_nb); @@ -1253,7 +1252,7 @@ static void __exit nx842_pseries_exit(void) vas_unregister_api_pseries(); - crypto_unregister_alg(&nx842_pseries_alg); + crypto_unregister_scomp(&nx842_pseries_alg); spin_lock_irqsave(&devdata_spinlock, flags); old_devdata = rcu_dereference_check(devdata, -- cgit v1.2.3 From bd40bf1ad26dc42ead655a7cccabf7e9a0c10769 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sun, 16 Mar 2025 09:21:22 +0800 Subject: crypto: cavium/zip - drop obsolete 'comp' implementation The 'comp' API is obsolete and will be removed, so remove this comp implementation. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- drivers/crypto/cavium/zip/zip_crypto.c | 40 --------------------------- drivers/crypto/cavium/zip/zip_crypto.h | 11 -------- drivers/crypto/cavium/zip/zip_main.c | 50 +--------------------------------- 3 files changed, 1 insertion(+), 100 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c index a9c3efce8f2d..02e87f2d50db 100644 --- a/drivers/crypto/cavium/zip/zip_crypto.c +++ b/drivers/crypto/cavium/zip/zip_crypto.c @@ -195,46 +195,6 @@ static int zip_decompress(const u8 *src, unsigned int slen, return ret; } -/* Legacy Compress framework start */ -int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm) -{ - struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); - - return zip_ctx_init(zip_ctx, 0); -} - -int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm) -{ - struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); - - return zip_ctx_init(zip_ctx, 1); -} - -void zip_free_comp_ctx(struct crypto_tfm *tfm) -{ - struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); - - zip_ctx_exit(zip_ctx); -} - -int zip_comp_compress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); - - return zip_compress(src, slen, dst, dlen, zip_ctx); -} - -int zip_comp_decompress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); - - return zip_decompress(src, slen, dst, dlen, zip_ctx); -} /* Legacy compress framework end */ - /* SCOMP framework start */ void *zip_alloc_scomp_ctx_deflate(void) { diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h index dbe20bfeb3e9..10899ece2d1f 100644 --- a/drivers/crypto/cavium/zip/zip_crypto.h +++ b/drivers/crypto/cavium/zip/zip_crypto.h @@ -46,7 +46,6 @@ #ifndef __ZIP_CRYPTO_H__ #define __ZIP_CRYPTO_H__ -#include #include #include "common.h" #include "zip_deflate.h" @@ -57,16 +56,6 @@ struct zip_kernel_ctx { struct zip_operation zip_decomp; }; -int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm); -int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm); -void zip_free_comp_ctx(struct crypto_tfm *tfm); -int zip_comp_compress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); -int zip_comp_decompress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); - void *zip_alloc_scomp_ctx_deflate(void); void *zip_alloc_scomp_ctx_lzs(void); void zip_free_scomp_ctx(void *zip_ctx); diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c index dc5b7bf7e1fd..abd58de4343d 100644 --- a/drivers/crypto/cavium/zip/zip_main.c +++ b/drivers/crypto/cavium/zip/zip_main.c @@ -371,36 +371,6 @@ static struct pci_driver zip_driver = { /* Kernel Crypto Subsystem Interface */ -static struct crypto_alg zip_comp_deflate = { - .cra_name = "deflate", - .cra_driver_name = "deflate-cavium", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct zip_kernel_ctx), - .cra_priority = 300, - .cra_module = THIS_MODULE, - .cra_init = zip_alloc_comp_ctx_deflate, - .cra_exit = zip_free_comp_ctx, - .cra_u = { .compress = { - .coa_compress = zip_comp_compress, - .coa_decompress = zip_comp_decompress - } } -}; - -static struct crypto_alg zip_comp_lzs = { - .cra_name = "lzs", - .cra_driver_name = "lzs-cavium", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct zip_kernel_ctx), - .cra_priority = 300, - .cra_module = THIS_MODULE, - .cra_init = zip_alloc_comp_ctx_lzs, - .cra_exit = zip_free_comp_ctx, - .cra_u = { .compress = { - .coa_compress = zip_comp_compress, - .coa_decompress = zip_comp_decompress - } } -}; - static struct scomp_alg zip_scomp_deflate = { .alloc_ctx = zip_alloc_scomp_ctx_deflate, .free_ctx = zip_free_scomp_ctx, @@ -431,22 +401,10 @@ static int zip_register_compression_device(void) { int ret; - ret = crypto_register_alg(&zip_comp_deflate); - if (ret < 0) { - zip_err("Deflate algorithm registration failed\n"); - return ret; - } - - ret = crypto_register_alg(&zip_comp_lzs); - if (ret < 0) { - zip_err("LZS algorithm registration failed\n"); - goto err_unregister_alg_deflate; - } - ret = crypto_register_scomp(&zip_scomp_deflate); if (ret < 0) { zip_err("Deflate scomp algorithm registration failed\n"); - goto err_unregister_alg_lzs; + return ret; } ret = crypto_register_scomp(&zip_scomp_lzs); @@ -459,18 +417,12 @@ static int zip_register_compression_device(void) err_unregister_scomp_deflate: crypto_unregister_scomp(&zip_scomp_deflate); -err_unregister_alg_lzs: - crypto_unregister_alg(&zip_comp_lzs); -err_unregister_alg_deflate: - crypto_unregister_alg(&zip_comp_deflate); return ret; } static void zip_unregister_compression_device(void) { - crypto_unregister_alg(&zip_comp_deflate); - crypto_unregister_alg(&zip_comp_lzs); crypto_unregister_scomp(&zip_scomp_deflate); crypto_unregister_scomp(&zip_scomp_lzs); } -- cgit v1.2.3