summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-13 13:08:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-13 13:08:01 -0700
commit32f44d62e463f66307513e90c09b4ceeac13cc22 (patch)
treea0e0c33b3c9c4474787ebb007ff15d6600fb053f /arch
parentf3ad116588151b3371ae4e092290e4f48e62b8bb (diff)
parent08ced854fc4a979d9e59ba01000bf96e7057cfbc (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (35 commits) hwrng: timeriomem - Fix potential oops (request_mem_region/__devinit) crypto: api - Use formatting of module name crypto: testmgr - Allow hash test vectors longer than a page crypto: testmgr - Check all test vector lengths crypto: hifn_795x - fix __dev{init,exit} markings crypto: tcrypt - Do not exit on success in fips mode crypto: compress - Return produced bytes in crypto_{,de}compress_{update,final} hwrng: via_rng - Support VIA Nano hardware RNG on X86_64 builds hwrng: via_rng - Support VIA Nano hardware RNG hwrng: via_rng - The VIA Hardware RNG driver is for the CPU, not Chipset crypto: testmgr - Skip algs not flagged fips_allowed in fips mode crypto: testmgr - Mark algs allowed in fips mode crypto: testmgr - Add ctr(aes) test vectors crypto: testmgr - Dynamically allocate xbuf and axbuf crypto: testmgr - Print self-test pass notices in fips mode crypto: testmgr - Catch base cipher self-test failures in fips mode crypto: testmgr - Add ansi_cprng test vectors crypto: testmgr - Add infrastructure for ansi_cprng self-tests crypto: testmgr - Add self-tests for rfc4309(ccm(aes)) crypto: testmgr - Handle AEAD test vectors expected to fail verification ...
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c267
-rw-r--r--arch/x86/crypto/fpu.c166
3 files changed, 434 insertions, 1 deletions
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index ebe7deedd5b..cfb0010fa94 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -2,6 +2,8 @@
# Arch-specific CryptoAPI modules.
#
+obj-$(CONFIG_CRYPTO_FPU) += fpu.o
+
obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 02af0af6549..4e663398f77 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -21,6 +21,22 @@
#include <asm/i387.h>
#include <asm/aes.h>
+#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
+#define HAS_CTR
+#endif
+
+#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
+#define HAS_LRW
+#endif
+
+#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
+#define HAS_PCBC
+#endif
+
+#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
+#define HAS_XTS
+#endif
+
struct async_aes_ctx {
struct cryptd_ablkcipher *cryptd_tfm;
};
@@ -137,6 +153,41 @@ static struct crypto_alg aesni_alg = {
}
};
+static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
+
+ aesni_enc(ctx, dst, src);
+}
+
+static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
+
+ aesni_dec(ctx, dst, src);
+}
+
+static struct crypto_alg __aesni_alg = {
+ .cra_name = "__aes-aesni",
+ .cra_driver_name = "__driver-aes-aesni",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(__aesni_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = aes_set_key,
+ .cia_encrypt = __aes_encrypt,
+ .cia_decrypt = __aes_decrypt
+ }
+ }
+};
+
static int ecb_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
@@ -277,8 +328,16 @@ static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
+ int err;
- return crypto_ablkcipher_setkey(&ctx->cryptd_tfm->base, key, key_len);
+ crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
+ & CRYPTO_TFM_REQ_MASK);
+ err = crypto_ablkcipher_setkey(child, key, key_len);
+ crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
+ & CRYPTO_TFM_RES_MASK);
+ return err;
}
static int ablk_encrypt(struct ablkcipher_request *req)
@@ -411,6 +470,163 @@ static struct crypto_alg ablk_cbc_alg = {
},
};
+#ifdef HAS_CTR
+static int ablk_ctr_init(struct crypto_tfm *tfm)
+{
+ struct cryptd_ablkcipher *cryptd_tfm;
+
+ cryptd_tfm = cryptd_alloc_ablkcipher("fpu(ctr(__driver-aes-aesni))",
+ 0, 0);
+ if (IS_ERR(cryptd_tfm))
+ return PTR_ERR(cryptd_tfm);
+ ablk_init_common(tfm, cryptd_tfm);
+ return 0;
+}
+
+static struct crypto_alg ablk_ctr_alg = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct async_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
+ .cra_init = ablk_ctr_init,
+ .cra_exit = ablk_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ .geniv = "chainiv",
+ },
+ },
+};
+#endif
+
+#ifdef HAS_LRW
+static int ablk_lrw_init(struct crypto_tfm *tfm)
+{
+ struct cryptd_ablkcipher *cryptd_tfm;
+
+ cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
+ 0, 0);
+ if (IS_ERR(cryptd_tfm))
+ return PTR_ERR(cryptd_tfm);
+ ablk_init_common(tfm, cryptd_tfm);
+ return 0;
+}
+
+static struct crypto_alg ablk_lrw_alg = {
+ .cra_name = "lrw(aes)",
+ .cra_driver_name = "lrw-aes-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
+ .cra_init = ablk_lrw_init,
+ .cra_exit = ablk_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ },
+ },
+};
+#endif
+
+#ifdef HAS_PCBC
+static int ablk_pcbc_init(struct crypto_tfm *tfm)
+{
+ struct cryptd_ablkcipher *cryptd_tfm;
+
+ cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
+ 0, 0);
+ if (IS_ERR(cryptd_tfm))
+ return PTR_ERR(cryptd_tfm);
+ ablk_init_common(tfm, cryptd_tfm);
+ return 0;
+}
+
+static struct crypto_alg ablk_pcbc_alg = {
+ .cra_name = "pcbc(aes)",
+ .cra_driver_name = "pcbc-aes-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
+ .cra_init = ablk_pcbc_init,
+ .cra_exit = ablk_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ },
+ },
+};
+#endif
+
+#ifdef HAS_XTS
+static int ablk_xts_init(struct crypto_tfm *tfm)
+{
+ struct cryptd_ablkcipher *cryptd_tfm;
+
+ cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
+ 0, 0);
+ if (IS_ERR(cryptd_tfm))
+ return PTR_ERR(cryptd_tfm);
+ ablk_init_common(tfm, cryptd_tfm);
+ return 0;
+}
+
+static struct crypto_alg ablk_xts_alg = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
+ .cra_init = ablk_xts_init,
+ .cra_exit = ablk_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ },
+ },
+};
+#endif
+
static int __init aesni_init(void)
{
int err;
@@ -421,6 +637,8 @@ static int __init aesni_init(void)
}
if ((err = crypto_register_alg(&aesni_alg)))
goto aes_err;
+ if ((err = crypto_register_alg(&__aesni_alg)))
+ goto __aes_err;
if ((err = crypto_register_alg(&blk_ecb_alg)))
goto blk_ecb_err;
if ((err = crypto_register_alg(&blk_cbc_alg)))
@@ -429,9 +647,41 @@ static int __init aesni_init(void)
goto ablk_ecb_err;
if ((err = crypto_register_alg(&ablk_cbc_alg)))
goto ablk_cbc_err;
+#ifdef HAS_CTR
+ if ((err = crypto_register_alg(&ablk_ctr_alg)))
+ goto ablk_ctr_err;
+#endif
+#ifdef HAS_LRW
+ if ((err = crypto_register_alg(&ablk_lrw_alg)))
+ goto ablk_lrw_err;
+#endif
+#ifdef HAS_PCBC
+ if ((err = crypto_register_alg(&ablk_pcbc_alg)))
+ goto ablk_pcbc_err;
+#endif
+#ifdef HAS_XTS
+ if ((err = crypto_register_alg(&ablk_xts_alg)))
+ goto ablk_xts_err;
+#endif
return err;
+#ifdef HAS_XTS
+ablk_xts_err:
+#endif
+#ifdef HAS_PCBC
+ crypto_unregister_alg(&ablk_pcbc_alg);
+ablk_pcbc_err:
+#endif
+#ifdef HAS_LRW
+ crypto_unregister_alg(&ablk_lrw_alg);
+ablk_lrw_err:
+#endif
+#ifdef HAS_CTR
+ crypto_unregister_alg(&ablk_ctr_alg);
+ablk_ctr_err:
+#endif
+ crypto_unregister_alg(&ablk_cbc_alg);
ablk_cbc_err:
crypto_unregister_alg(&ablk_ecb_alg);
ablk_ecb_err:
@@ -439,6 +689,8 @@ ablk_ecb_err:
blk_cbc_err:
crypto_unregister_alg(&blk_ecb_alg);
blk_ecb_err:
+ crypto_unregister_alg(&__aesni_alg);
+__aes_err:
crypto_unregister_alg(&aesni_alg);
aes_err:
return err;
@@ -446,10 +698,23 @@ aes_err:
static void __exit aesni_exit(void)
{
+#ifdef HAS_XTS
+ crypto_unregister_alg(&ablk_xts_alg);
+#endif
+#ifdef HAS_PCBC
+ crypto_unregister_alg(&ablk_pcbc_alg);
+#endif
+#ifdef HAS_LRW
+ crypto_unregister_alg(&ablk_lrw_alg);
+#endif
+#ifdef HAS_CTR
+ crypto_unregister_alg(&ablk_ctr_alg);
+#endif
crypto_unregister_alg(&ablk_cbc_alg);
crypto_unregister_alg(&ablk_ecb_alg);
crypto_unregister_alg(&blk_cbc_alg);
crypto_unregister_alg(&blk_ecb_alg);
+ crypto_unregister_alg(&__aesni_alg);
crypto_unregister_alg(&aesni_alg);
}
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
new file mode 100644
index 00000000000..5f9781a3815
--- /dev/null
+++ b/arch/x86/crypto/fpu.c
@@ -0,0 +1,166 @@
+/*
+ * FPU: Wrapper for blkcipher touching fpu
+ *
+ * Copyright (c) Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/algapi.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/i387.h>
+
+struct crypto_fpu_ctx {
+ struct crypto_blkcipher *child;
+};
+
+static int crypto_fpu_setkey(struct crypto_tfm *parent, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(parent);
+ struct crypto_blkcipher *child = ctx->child;
+ int err;
+
+ crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_blkcipher_setkey(child, key, keylen);
+ crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ int err;
+ struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm);
+ struct crypto_blkcipher *child = ctx->child;
+ struct blkcipher_desc desc = {
+ .tfm = child,
+ .info = desc_in->info,
+ .flags = desc_in->flags,
+ };
+
+ kernel_fpu_begin();
+ err = crypto_blkcipher_crt(desc.tfm)->encrypt(&desc, dst, src, nbytes);
+ kernel_fpu_end();
+ return err;
+}
+
+static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ int err;
+ struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm);
+ struct crypto_blkcipher *child = ctx->child;
+ struct blkcipher_desc desc = {
+ .tfm = child,
+ .info = desc_in->info,
+ .flags = desc_in->flags,
+ };
+
+ kernel_fpu_begin();
+ err = crypto_blkcipher_crt(desc.tfm)->decrypt(&desc, dst, src, nbytes);
+ kernel_fpu_end();
+ return err;
+}
+
+static int crypto_fpu_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+ struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+ struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_blkcipher *cipher;
+
+ cipher = crypto_spawn_blkcipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
+ return 0;
+}
+
+static void crypto_fpu_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm);
+ crypto_free_blkcipher(ctx->child);
+}
+
+static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb)
+{
+ struct crypto_instance *inst;
+ struct crypto_alg *alg;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
+ if (err)
+ return ERR_PTR(err);
+
+ alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
+ CRYPTO_ALG_TYPE_MASK);
+ if (IS_ERR(alg))
+ return ERR_CAST(alg);
+
+ inst = crypto_alloc_instance("fpu", alg);
+ if (IS_ERR(inst))
+ goto out_put_alg;
+
+ inst->alg.cra_flags = alg->cra_flags;
+ inst->alg.cra_priority = alg->cra_priority;
+ inst->alg.cra_blocksize = alg->cra_blocksize;
+ inst->alg.cra_alignmask = alg->cra_alignmask;
+ inst->alg.cra_type = alg->cra_type;
+ inst->alg.cra_blkcipher.ivsize = alg->cra_blkcipher.ivsize;
+ inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
+ inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
+ inst->alg.cra_ctxsize = sizeof(struct crypto_fpu_ctx);
+ inst->alg.cra_init = crypto_fpu_init_tfm;
+ inst->alg.cra_exit = crypto_fpu_exit_tfm;
+ inst->alg.cra_blkcipher.setkey = crypto_fpu_setkey;
+ inst->alg.cra_blkcipher.encrypt = crypto_fpu_encrypt;
+ inst->alg.cra_blkcipher.decrypt = crypto_fpu_decrypt;
+
+out_put_alg:
+ crypto_mod_put(alg);
+ return inst;
+}
+
+static void crypto_fpu_free(struct crypto_instance *inst)
+{
+ crypto_drop_spawn(crypto_instance_ctx(inst));
+ kfree(inst);
+}
+
+static struct crypto_template crypto_fpu_tmpl = {
+ .name = "fpu",
+ .alloc = crypto_fpu_alloc,
+ .free = crypto_fpu_free,
+ .module = THIS_MODULE,
+};
+
+static int __init crypto_fpu_module_init(void)
+{
+ return crypto_register_template(&crypto_fpu_tmpl);
+}
+
+static void __exit crypto_fpu_module_exit(void)
+{
+ crypto_unregister_template(&crypto_fpu_tmpl);
+}
+
+module_init(crypto_fpu_module_init);
+module_exit(crypto_fpu_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FPU block cipher wrapper");