From: Linus Torvalds Date: Wed, 4 Nov 2015 17:11:12 +0000 (-0800) Subject: Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6 X-Git-Tag: v4.4-rc1~142 X-Git-Url: https://git.kernelconcepts.de/?p=karo-tx-linux.git;a=commitdiff_plain;h=ccc9d4a6d640cbde05d519edeb727881646cf71b Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6 Pull crypto update from Herbert Xu: "API: - Add support for cipher output IVs in testmgr - Add missing crypto_ahash_blocksize helper - Mark authenc and des ciphers as not allowed under FIPS. Algorithms: - Add CRC support to 842 compression - Add keywrap algorithm - A number of changes to the akcipher interface: + Separate functions for setting public/private keys. + Use SG lists. Drivers: - Add Intel SHA Extension optimised SHA1 and SHA256 - Use dma_map_sg instead of custom functions in crypto drivers - Add support for STM32 RNG - Add support for ST RNG - Add Device Tree support to exynos RNG driver - Add support for mxs-dcp crypto device on MX6SL - Add xts(aes) support to caam - Add ctr(aes) and xts(aes) support to qat - A large set of fixes from Russell King for the marvell/cesa driver" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (115 commits) crypto: asymmetric_keys - Fix unaligned access in x509_get_sig_params() crypto: akcipher - Don't #include crypto/public_key.h as the contents aren't used hwrng: exynos - Add Device Tree support hwrng: exynos - Fix missing configuration after suspend to RAM hwrng: exynos - Add timeout for waiting on init done dt-bindings: rng: Describe Exynos4 PRNG bindings crypto: marvell/cesa - use __le32 for hardware descriptors crypto: marvell/cesa - fix missing cpu_to_le32() in mv_cesa_dma_add_op() crypto: marvell/cesa - use memcpy_fromio()/memcpy_toio() crypto: marvell/cesa - use gfp_t for gfp flags crypto: marvell/cesa - use dma_addr_t for cur_dma crypto: marvell/cesa - use readl_relaxed()/writel_relaxed() crypto: caam - fix indentation of close braces crypto: caam - only export the state we really need to export crypto: caam - fix non-block aligned hash calculation crypto: caam - avoid needlessly saving and restoring caam_hash_ctx crypto: caam - print errno code when hash registration fails crypto: marvell/cesa - fix memory leak crypto: marvell/cesa - fix first-fragment handling in mv_cesa_ahash_dma_last_req() crypto: marvell/cesa - rearrange handling for sw padded hashes ... --- ccc9d4a6d640cbde05d519edeb727881646cf71b diff --cc arch/x86/Makefile index 2dfaa72260b4,a8009c77918a..4086abca0b32 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@@ -171,9 -165,11 +171,11 @@@ asinstr += $(call as-instr,pshufb %xmm0 asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1) avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) + sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1) + sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1) - KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) - KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) -KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr) -KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr) ++KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr) ++KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr) LDFLAGS := -m elf_$(UTS_MACHINE) diff --cc arch/x86/crypto/sha1_ssse3_glue.c index 00212c32d4db,c934197fe84a..dd14616b7739 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@@ -118,10 -110,62 +110,62 @@@ static struct shash_alg sha1_ssse3_alg } }; + static int register_sha1_ssse3(void) + { + if (boot_cpu_has(X86_FEATURE_SSSE3)) + return crypto_register_shash(&sha1_ssse3_alg); + return 0; + } + + static void unregister_sha1_ssse3(void) + { + if (boot_cpu_has(X86_FEATURE_SSSE3)) + crypto_unregister_shash(&sha1_ssse3_alg); + } + #ifdef CONFIG_AS_AVX - static bool __init avx_usable(void) + asmlinkage void sha1_transform_avx(u32 *digest, const char *data, + unsigned int rounds); + + static int sha1_avx_update(struct shash_desc *desc, const u8 *data, + unsigned int len) + { + return sha1_update(desc, data, len, + (sha1_transform_fn *) sha1_transform_avx); + } + + static int sha1_avx_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) + { + return sha1_finup(desc, data, len, out, + (sha1_transform_fn *) sha1_transform_avx); + } + + static int sha1_avx_final(struct shash_desc *desc, u8 *out) + { + return sha1_avx_finup(desc, NULL, 0, out); + } + + static struct shash_alg sha1_avx_alg = { + .digestsize = SHA1_DIGEST_SIZE, + .init = sha1_base_init, + .update = sha1_avx_update, + .final = sha1_avx_final, + .finup = sha1_avx_finup, + .descsize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-avx", + .cra_priority = 160, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } + }; + + static bool avx_usable(void) { - if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) { + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { if (cpu_has_avx) pr_info("AVX detected but unusable.\n"); return false; diff --cc arch/x86/crypto/sha256_ssse3_glue.c index 0e0e85aea634,863e2f6aad13..5f4d6086dc59 --- a/arch/x86/crypto/sha256_ssse3_glue.c +++ b/arch/x86/crypto/sha256_ssse3_glue.c @@@ -127,10 -130,77 +130,77 @@@ static struct shash_alg sha256_ssse3_al } } }; + static int register_sha256_ssse3(void) + { + if (boot_cpu_has(X86_FEATURE_SSSE3)) + return crypto_register_shashes(sha256_ssse3_algs, + ARRAY_SIZE(sha256_ssse3_algs)); + return 0; + } + + static void unregister_sha256_ssse3(void) + { + if (boot_cpu_has(X86_FEATURE_SSSE3)) + crypto_unregister_shashes(sha256_ssse3_algs, + ARRAY_SIZE(sha256_ssse3_algs)); + } + #ifdef CONFIG_AS_AVX - static bool __init avx_usable(void) + asmlinkage void sha256_transform_avx(u32 *digest, const char *data, + u64 rounds); + + static int sha256_avx_update(struct shash_desc *desc, const u8 *data, + unsigned int len) + { + return sha256_update(desc, data, len, sha256_transform_avx); + } + + static int sha256_avx_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) + { + return sha256_finup(desc, data, len, out, sha256_transform_avx); + } + + static int sha256_avx_final(struct shash_desc *desc, u8 *out) + { + return sha256_avx_finup(desc, NULL, 0, out); + } + + static struct shash_alg sha256_avx_algs[] = { { + .digestsize = SHA256_DIGEST_SIZE, + .init = sha256_base_init, + .update = sha256_avx_update, + .final = sha256_avx_final, + .finup = sha256_avx_finup, + .descsize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-avx", + .cra_priority = 160, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } + }, { + .digestsize = SHA224_DIGEST_SIZE, + .init = sha224_base_init, + .update = sha256_avx_update, + .final = sha256_avx_final, + .finup = sha256_avx_finup, + .descsize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha224", + .cra_driver_name = "sha224-avx", + .cra_priority = 160, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } + } }; + + static bool avx_usable(void) { - if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) { + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { if (cpu_has_avx) pr_info("AVX detected but unusable.\n"); return false; diff --cc arch/x86/crypto/sha512_ssse3_glue.c index 0c8c38c101ac,0dfe9a2ba64b..34e5083d6f36 --- a/arch/x86/crypto/sha512_ssse3_glue.c +++ b/arch/x86/crypto/sha512_ssse3_glue.c @@@ -126,10 -130,27 +130,27 @@@ static struct shash_alg sha512_ssse3_al } } }; + static int register_sha512_ssse3(void) + { + if (boot_cpu_has(X86_FEATURE_SSSE3)) + return crypto_register_shashes(sha512_ssse3_algs, + ARRAY_SIZE(sha512_ssse3_algs)); + return 0; + } + + static void unregister_sha512_ssse3(void) + { + if (boot_cpu_has(X86_FEATURE_SSSE3)) + crypto_unregister_shashes(sha512_ssse3_algs, + ARRAY_SIZE(sha512_ssse3_algs)); + } + #ifdef CONFIG_AS_AVX - static bool __init avx_usable(void) + asmlinkage void sha512_transform_avx(u64 *digest, const char *data, + u64 rounds); + static bool avx_usable(void) { - if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) { + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { if (cpu_has_avx) pr_info("AVX detected but unusable.\n"); return false; diff --cc drivers/crypto/marvell/cesa.h index bc2a55bc35e4,186868e54ebc..bd985e72520b --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@@ -685,35 -688,14 +688,41 @@@ static inline u32 mv_cesa_get_int_mask( return engine->int_mask; } + static inline bool mv_cesa_mac_op_is_first_frag(const struct mv_cesa_op_ctx *op) + { + return (mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK) == + CESA_SA_DESC_CFG_FIRST_FRAG; + } + int mv_cesa_queue_req(struct crypto_async_request *req); +/* + * Helper function that indicates whether a crypto request needs to be + * cleaned up or not after being enqueued using mv_cesa_queue_req(). + */ +static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req, + int ret) +{ + /* + * The queue still had some space, the request was queued + * normally, so there's no need to clean it up. + */ + if (ret == -EINPROGRESS) + return false; + + /* + * The queue had not space left, but since the request is + * flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to + * the backlog and will be processed later. There's no need to + * clean it up. + */ + if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + return false; + + /* Request wasn't queued, we need to clean it up */ + return true; +} + /* TDMA functions */ static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,