From 04cadb4fe0341304741ef60a297366b553f0ce36 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 10 Oct 2025 17:10:47 -0700 Subject: lib/crypto: Add FIPS self-tests for SHA-1 and SHA-2 Add FIPS cryptographic algorithm self-tests for all SHA-1 and SHA-2 algorithms. Following the "Implementation Guidance for FIPS 140-3" document, to achieve this it's sufficient to just test a single test vector for each of HMAC-SHA1, HMAC-SHA256, and HMAC-SHA512. Just run these tests in the initcalls, following the example of e.g. crypto/kdf_sp800108.c. Note that this should meet the FIPS self-test requirement even in the built-in case, given that the initcalls run before userspace, storage, network, etc. are accessible. This does not fix a regression, seeing as lib/ has had SHA-1 support since 2005 and SHA-256 support since 2018. Neither ever had FIPS self-tests. Moreover, fips=1 support has always been an unfinished feature upstream. However, with lib/ now being used more widely, it's now seeing more scrutiny and people seem to want these now [1][2]. [1] https://lore.kernel.org/r/3226361.1758126043@warthog.procyon.org.uk/ [2] https://lore.kernel.org/r/f31dbb22-0add-481c-aee0-e337a7731f8e@oracle.com/ Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251011001047.51886-1-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/fips.h | 38 ++++++++++++++++++++++++++++++++++++++ lib/crypto/sha1.c | 19 ++++++++++++++++++- lib/crypto/sha256.c | 26 ++++++++++++++++++++++---- lib/crypto/sha512.c | 19 ++++++++++++++++++- 4 files changed, 96 insertions(+), 6 deletions(-) create mode 100644 lib/crypto/fips.h (limited to 'lib/crypto') diff --git a/lib/crypto/fips.h b/lib/crypto/fips.h new file mode 100644 index 000000000000..78a1bdd33a15 --- /dev/null +++ b/lib/crypto/fips.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* This file was generated by: gen-fips-testvecs.py */ + +#include + +static const u8 fips_test_data[] __initconst __maybe_unused = { + 0x66, 0x69, 0x70, 0x73, 0x20, 0x74, 0x65, 0x73, + 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x00, 0x00, +}; + +static const u8 fips_test_key[] __initconst __maybe_unused = { + 0x66, 0x69, 0x70, 0x73, 0x20, 0x74, 0x65, 0x73, + 0x74, 0x20, 0x6b, 0x65, 0x79, 0x00, 0x00, 0x00, +}; + +static const u8 fips_test_hmac_sha1_value[] __initconst __maybe_unused = { + 0x29, 0xa9, 0x88, 0xb8, 0x5c, 0xb4, 0xaf, 0x4b, + 0x97, 0x2a, 0xee, 0x87, 0x5b, 0x0a, 0x02, 0x55, + 0x99, 0xbf, 0x86, 0x78, +}; + +static const u8 fips_test_hmac_sha256_value[] __initconst __maybe_unused = { + 0x59, 0x25, 0x85, 0xcc, 0x40, 0xe9, 0x64, 0x2f, + 0xe9, 0xbf, 0x82, 0xb7, 0xd3, 0x15, 0x3d, 0x43, + 0x22, 0x0b, 0x4c, 0x00, 0x90, 0x14, 0x25, 0xcf, + 0x9e, 0x13, 0x2b, 0xc2, 0x30, 0xe6, 0xe8, 0x93, +}; + +static const u8 fips_test_hmac_sha512_value[] __initconst __maybe_unused = { + 0x6b, 0xea, 0x5d, 0x27, 0x49, 0x5b, 0x3f, 0xea, + 0xde, 0x2d, 0xfa, 0x32, 0x75, 0xdb, 0x77, 0xc8, + 0x26, 0xe9, 0x4e, 0x95, 0x4d, 0xad, 0x88, 0x02, + 0x87, 0xf9, 0x52, 0x0a, 0xd1, 0x92, 0x80, 0x1d, + 0x92, 0x7e, 0x3c, 0xbd, 0xb1, 0x3c, 0x49, 0x98, + 0x44, 0x9c, 0x8f, 0xee, 0x3f, 0x02, 0x71, 0x51, + 0x57, 0x0b, 0x15, 0x38, 0x95, 0xd8, 0xa3, 0x81, + 0xba, 0xb3, 0x15, 0x37, 0x5c, 0x6d, 0x57, 0x2b, +}; diff --git a/lib/crypto/sha1.c b/lib/crypto/sha1.c index 5904e4ae85d2..52788278cd17 100644 --- a/lib/crypto/sha1.c +++ b/lib/crypto/sha1.c @@ -12,6 +12,7 @@ #include #include #include +#include "fips.h" static const struct sha1_block_state sha1_iv = { .h = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, @@ -330,10 +331,26 @@ void hmac_sha1_usingrawkey(const u8 *raw_key, size_t raw_key_len, } EXPORT_SYMBOL_GPL(hmac_sha1_usingrawkey); -#ifdef sha1_mod_init_arch +#if defined(sha1_mod_init_arch) || defined(CONFIG_CRYPTO_FIPS) static int __init sha1_mod_init(void) { +#ifdef sha1_mod_init_arch sha1_mod_init_arch(); +#endif + if (fips_enabled) { + /* + * FIPS cryptographic algorithm self-test. As per the FIPS + * Implementation Guidance, testing HMAC-SHA1 satisfies the test + * requirement for SHA-1 too. + */ + u8 mac[SHA1_DIGEST_SIZE]; + + hmac_sha1_usingrawkey(fips_test_key, sizeof(fips_test_key), + fips_test_data, sizeof(fips_test_data), + mac); + if (memcmp(fips_test_hmac_sha1_value, mac, sizeof(mac)) != 0) + panic("sha1: FIPS self-test failed\n"); + } return 0; } subsys_initcall(sha1_mod_init); diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c index 881b935418ce..5d6b77e7e141 100644 --- a/lib/crypto/sha256.c +++ b/lib/crypto/sha256.c @@ -17,6 +17,7 @@ #include #include #include +#include "fips.h" static const struct sha256_block_state sha224_iv = { .h = { @@ -269,8 +270,8 @@ void sha256(const u8 *data, size_t len, u8 out[SHA256_DIGEST_SIZE]) EXPORT_SYMBOL(sha256); /* - * Pre-boot environment (as indicated by __DISABLE_EXPORTS being defined) - * doesn't need either HMAC support or interleaved hashing support + * Pre-boot environments (as indicated by __DISABLE_EXPORTS being defined) just + * need the generic SHA-256 code. Omit all other features from them. */ #ifndef __DISABLE_EXPORTS @@ -477,12 +478,27 @@ void hmac_sha256_usingrawkey(const u8 *raw_key, size_t raw_key_len, hmac_sha256_final(&ctx, out); } EXPORT_SYMBOL_GPL(hmac_sha256_usingrawkey); -#endif /* !__DISABLE_EXPORTS */ -#ifdef sha256_mod_init_arch +#if defined(sha256_mod_init_arch) || defined(CONFIG_CRYPTO_FIPS) static int __init sha256_mod_init(void) { +#ifdef sha256_mod_init_arch sha256_mod_init_arch(); +#endif + if (fips_enabled) { + /* + * FIPS cryptographic algorithm self-test. As per the FIPS + * Implementation Guidance, testing HMAC-SHA256 satisfies the + * test requirement for SHA-224, SHA-256, and HMAC-SHA224 too. + */ + u8 mac[SHA256_DIGEST_SIZE]; + + hmac_sha256_usingrawkey(fips_test_key, sizeof(fips_test_key), + fips_test_data, sizeof(fips_test_data), + mac); + if (memcmp(fips_test_hmac_sha256_value, mac, sizeof(mac)) != 0) + panic("sha256: FIPS self-test failed\n"); + } return 0; } subsys_initcall(sha256_mod_init); @@ -493,5 +509,7 @@ static void __exit sha256_mod_exit(void) module_exit(sha256_mod_exit); #endif +#endif /* !__DISABLE_EXPORTS */ + MODULE_DESCRIPTION("SHA-224, SHA-256, HMAC-SHA224, and HMAC-SHA256 library functions"); MODULE_LICENSE("GPL"); diff --git a/lib/crypto/sha512.c b/lib/crypto/sha512.c index d8062188be98..605eab51aabd 100644 --- a/lib/crypto/sha512.c +++ b/lib/crypto/sha512.c @@ -17,6 +17,7 @@ #include #include #include +#include "fips.h" static const struct sha512_block_state sha384_iv = { .h = { @@ -405,10 +406,26 @@ void hmac_sha512_usingrawkey(const u8 *raw_key, size_t raw_key_len, } EXPORT_SYMBOL_GPL(hmac_sha512_usingrawkey); -#ifdef sha512_mod_init_arch +#if defined(sha512_mod_init_arch) || defined(CONFIG_CRYPTO_FIPS) static int __init sha512_mod_init(void) { +#ifdef sha512_mod_init_arch sha512_mod_init_arch(); +#endif + if (fips_enabled) { + /* + * FIPS cryptographic algorithm self-test. As per the FIPS + * Implementation Guidance, testing HMAC-SHA512 satisfies the + * test requirement for SHA-384, SHA-512, and HMAC-SHA384 too. + */ + u8 mac[SHA512_DIGEST_SIZE]; + + hmac_sha512_usingrawkey(fips_test_key, sizeof(fips_test_key), + fips_test_data, sizeof(fips_test_data), + mac); + if (memcmp(fips_test_hmac_sha512_value, mac, sizeof(mac)) != 0) + panic("sha512: FIPS self-test failed\n"); + } return 0; } subsys_initcall(sha512_mod_init); -- cgit v1.2.3 From 50b8e36994a042103ea92b6d9f6d7de725f9ac5f Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 17 Oct 2025 21:30:57 -0700 Subject: lib/crypto: blake2s: Adjust parameter order of blake2s() Reorder the parameters of blake2s() from (out, in, key, outlen, inlen, keylen) to (key, keylen, in, inlen, out, outlen). This aligns BLAKE2s with the common conventions of pairing buffers and their lengths, and having outputs follow inputs. This is widely used elsewhere in lib/crypto/ and crypto/, and even elsewhere in the BLAKE2s code itself such as blake2s_init_key() and blake2s_final(). So blake2s() was a bit of an exception. Notably, this results in the same order as hmac_*_usingrawkey(). Note that since the type signature changed, it's not possible for a blake2s() call site to be silently missed. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251018043106.375964-2-ebiggers@kernel.org Signed-off-by: Eric Biggers --- drivers/char/random.c | 4 ++-- drivers/net/wireguard/cookie.c | 4 ++-- drivers/net/wireguard/noise.c | 4 ++-- include/crypto/blake2s.h | 6 +++--- lib/crypto/tests/blake2s_kunit.c | 16 ++++++++-------- 5 files changed, 17 insertions(+), 17 deletions(-) (limited to 'lib/crypto') diff --git a/drivers/char/random.c b/drivers/char/random.c index b8b24b6ed3fe..422c5c76571b 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -701,7 +701,7 @@ static void extract_entropy(void *buf, size_t len) /* next_key = HASHPRF(seed, RDSEED || 0) */ block.counter = 0; - blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed)); + blake2s(seed, sizeof(seed), (const u8 *)&block, sizeof(block), next_key, sizeof(next_key)); blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key)); spin_unlock_irqrestore(&input_pool.lock, flags); @@ -711,7 +711,7 @@ static void extract_entropy(void *buf, size_t len) i = min_t(size_t, len, BLAKE2S_HASH_SIZE); /* output = HASHPRF(seed, RDSEED || ++counter) */ ++block.counter; - blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed)); + blake2s(seed, sizeof(seed), (const u8 *)&block, sizeof(block), buf, i); len -= i; buf += i; } diff --git a/drivers/net/wireguard/cookie.c b/drivers/net/wireguard/cookie.c index 94d0a7206084..be1b83aae03b 100644 --- a/drivers/net/wireguard/cookie.c +++ b/drivers/net/wireguard/cookie.c @@ -77,7 +77,7 @@ static void compute_mac1(u8 mac1[COOKIE_LEN], const void *message, size_t len, { len = len - sizeof(struct message_macs) + offsetof(struct message_macs, mac1); - blake2s(mac1, message, key, COOKIE_LEN, len, NOISE_SYMMETRIC_KEY_LEN); + blake2s(key, NOISE_SYMMETRIC_KEY_LEN, message, len, mac1, COOKIE_LEN); } static void compute_mac2(u8 mac2[COOKIE_LEN], const void *message, size_t len, @@ -85,7 +85,7 @@ static void compute_mac2(u8 mac2[COOKIE_LEN], const void *message, size_t len, { len = len - sizeof(struct message_macs) + offsetof(struct message_macs, mac2); - blake2s(mac2, message, cookie, COOKIE_LEN, len, COOKIE_LEN); + blake2s(cookie, COOKIE_LEN, message, len, mac2, COOKIE_LEN); } static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb, diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c index 7eb9a23a3d4d..306abb876c80 100644 --- a/drivers/net/wireguard/noise.c +++ b/drivers/net/wireguard/noise.c @@ -35,8 +35,8 @@ void __init wg_noise_init(void) { struct blake2s_state blake; - blake2s(handshake_init_chaining_key, handshake_name, NULL, - NOISE_HASH_LEN, sizeof(handshake_name), 0); + blake2s(NULL, 0, handshake_name, sizeof(handshake_name), + handshake_init_chaining_key, NOISE_HASH_LEN); blake2s_init(&blake, NOISE_HASH_LEN); blake2s_update(&blake, handshake_init_chaining_key, NOISE_HASH_LEN); blake2s_update(&blake, identifier_name, sizeof(identifier_name)); diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h index f9ffd39194eb..a7dd678725b2 100644 --- a/include/crypto/blake2s.h +++ b/include/crypto/blake2s.h @@ -86,9 +86,9 @@ static inline void blake2s_init_key(struct blake2s_state *state, void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen); void blake2s_final(struct blake2s_state *state, u8 *out); -static inline void blake2s(u8 *out, const u8 *in, const u8 *key, - const size_t outlen, const size_t inlen, - const size_t keylen) +static inline void blake2s(const u8 *key, const size_t keylen, + const u8 *in, const size_t inlen, + u8 *out, const size_t outlen) { struct blake2s_state state; diff --git a/lib/crypto/tests/blake2s_kunit.c b/lib/crypto/tests/blake2s_kunit.c index 057c40132246..247bbdf7dc86 100644 --- a/lib/crypto/tests/blake2s_kunit.c +++ b/lib/crypto/tests/blake2s_kunit.c @@ -14,7 +14,7 @@ static void blake2s_default(const u8 *data, size_t len, u8 out[BLAKE2S_HASH_SIZE]) { - blake2s(out, data, NULL, BLAKE2S_HASH_SIZE, len, 0); + blake2s(NULL, 0, data, len, out, BLAKE2S_HASH_SIZE); } static void blake2s_init_default(struct blake2s_state *state) @@ -52,7 +52,7 @@ static void test_blake2s_all_key_and_hash_lens(struct kunit *test) for (int key_len = 0; key_len <= BLAKE2S_KEY_SIZE; key_len++) { rand_bytes_seeded_from_len(key, key_len); for (int out_len = 1; out_len <= BLAKE2S_HASH_SIZE; out_len++) { - blake2s(hash, data, key, out_len, data_len, key_len); + blake2s(key, key_len, data, data_len, hash, out_len); blake2s_update(&main_state, hash, out_len); } } @@ -80,10 +80,10 @@ static void test_blake2s_with_guarded_key_buf(struct kunit *test) rand_bytes(key, key_len); memcpy(guarded_key, key, key_len); - blake2s(hash1, test_buf, key, - BLAKE2S_HASH_SIZE, data_len, key_len); - blake2s(hash2, test_buf, guarded_key, - BLAKE2S_HASH_SIZE, data_len, key_len); + blake2s(key, key_len, test_buf, data_len, + hash1, BLAKE2S_HASH_SIZE); + blake2s(guarded_key, key_len, test_buf, data_len, + hash2, BLAKE2S_HASH_SIZE); KUNIT_ASSERT_MEMEQ(test, hash1, hash2, BLAKE2S_HASH_SIZE); blake2s_init_key(&state, BLAKE2S_HASH_SIZE, @@ -107,8 +107,8 @@ static void test_blake2s_with_guarded_out_buf(struct kunit *test) u8 hash[BLAKE2S_HASH_SIZE]; u8 *guarded_hash = &test_buf[TEST_BUF_LEN - out_len]; - blake2s(hash, test_buf, NULL, out_len, data_len, 0); - blake2s(guarded_hash, test_buf, NULL, out_len, data_len, 0); + blake2s(NULL, 0, test_buf, data_len, hash, out_len); + blake2s(NULL, 0, test_buf, data_len, guarded_hash, out_len); KUNIT_ASSERT_MEMEQ(test, hash, guarded_hash, out_len); } } -- cgit v1.2.3 From 5e0ec8e46d4d6488242bb39a4ce5c0276afa5f32 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 17 Oct 2025 21:30:58 -0700 Subject: lib/crypto: blake2s: Rename blake2s_state to blake2s_ctx For consistency with the SHA-1, SHA-2, SHA-3 (in development), and MD5 library APIs, rename blake2s_state to blake2s_ctx. As a refresher, the ctx name: - Is a bit shorter. - Avoids confusion with the compression function state, which is also often called the state (but is just part of the full context). - Is consistent with OpenSSL. Not a big deal, of course. But consistency is nice. With a BLAKE2b library API about to be added, this is a convenient time to update this. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251018043106.375964-3-ebiggers@kernel.org Signed-off-by: Eric Biggers --- drivers/char/random.c | 2 +- drivers/net/wireguard/cookie.c | 14 +++++----- drivers/net/wireguard/noise.c | 28 +++++++++---------- include/crypto/blake2s.h | 59 ++++++++++++++++++++-------------------- lib/crypto/arm/blake2s-core.S | 10 +++---- lib/crypto/arm/blake2s.h | 4 +-- lib/crypto/blake2s.c | 58 +++++++++++++++++++-------------------- lib/crypto/tests/blake2s_kunit.c | 23 ++++++++-------- lib/crypto/x86/blake2s.h | 12 ++++---- 9 files changed, 104 insertions(+), 106 deletions(-) (limited to 'lib/crypto') diff --git a/drivers/char/random.c b/drivers/char/random.c index 422c5c76571b..7e0486d8c51d 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -636,7 +636,7 @@ enum { }; static struct { - struct blake2s_state hash; + struct blake2s_ctx hash; spinlock_t lock; unsigned int init_bits; } input_pool = { diff --git a/drivers/net/wireguard/cookie.c b/drivers/net/wireguard/cookie.c index be1b83aae03b..08731b3fa32b 100644 --- a/drivers/net/wireguard/cookie.c +++ b/drivers/net/wireguard/cookie.c @@ -33,7 +33,7 @@ static void precompute_key(u8 key[NOISE_SYMMETRIC_KEY_LEN], const u8 pubkey[NOISE_PUBLIC_KEY_LEN], const u8 label[COOKIE_KEY_LABEL_LEN]) { - struct blake2s_state blake; + struct blake2s_ctx blake; blake2s_init(&blake, NOISE_SYMMETRIC_KEY_LEN); blake2s_update(&blake, label, COOKIE_KEY_LABEL_LEN); @@ -91,7 +91,7 @@ static void compute_mac2(u8 mac2[COOKIE_LEN], const void *message, size_t len, static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb, struct cookie_checker *checker) { - struct blake2s_state state; + struct blake2s_ctx blake; if (wg_birthdate_has_expired(checker->secret_birthdate, COOKIE_SECRET_MAX_AGE)) { @@ -103,15 +103,15 @@ static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb, down_read(&checker->secret_lock); - blake2s_init_key(&state, COOKIE_LEN, checker->secret, NOISE_HASH_LEN); + blake2s_init_key(&blake, COOKIE_LEN, checker->secret, NOISE_HASH_LEN); if (skb->protocol == htons(ETH_P_IP)) - blake2s_update(&state, (u8 *)&ip_hdr(skb)->saddr, + blake2s_update(&blake, (u8 *)&ip_hdr(skb)->saddr, sizeof(struct in_addr)); else if (skb->protocol == htons(ETH_P_IPV6)) - blake2s_update(&state, (u8 *)&ipv6_hdr(skb)->saddr, + blake2s_update(&blake, (u8 *)&ipv6_hdr(skb)->saddr, sizeof(struct in6_addr)); - blake2s_update(&state, (u8 *)&udp_hdr(skb)->source, sizeof(__be16)); - blake2s_final(&state, cookie); + blake2s_update(&blake, (u8 *)&udp_hdr(skb)->source, sizeof(__be16)); + blake2s_final(&blake, cookie); up_read(&checker->secret_lock); } diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c index 306abb876c80..1fe8468f0bef 100644 --- a/drivers/net/wireguard/noise.c +++ b/drivers/net/wireguard/noise.c @@ -33,7 +33,7 @@ static atomic64_t keypair_counter = ATOMIC64_INIT(0); void __init wg_noise_init(void) { - struct blake2s_state blake; + struct blake2s_ctx blake; blake2s(NULL, 0, handshake_name, sizeof(handshake_name), handshake_init_chaining_key, NOISE_HASH_LEN); @@ -304,33 +304,33 @@ void wg_noise_set_static_identity_private_key( static void hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, const size_t keylen) { - struct blake2s_state state; + struct blake2s_ctx blake; u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 }; u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32)); int i; if (keylen > BLAKE2S_BLOCK_SIZE) { - blake2s_init(&state, BLAKE2S_HASH_SIZE); - blake2s_update(&state, key, keylen); - blake2s_final(&state, x_key); + blake2s_init(&blake, BLAKE2S_HASH_SIZE); + blake2s_update(&blake, key, keylen); + blake2s_final(&blake, x_key); } else memcpy(x_key, key, keylen); for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) x_key[i] ^= 0x36; - blake2s_init(&state, BLAKE2S_HASH_SIZE); - blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); - blake2s_update(&state, in, inlen); - blake2s_final(&state, i_hash); + blake2s_init(&blake, BLAKE2S_HASH_SIZE); + blake2s_update(&blake, x_key, BLAKE2S_BLOCK_SIZE); + blake2s_update(&blake, in, inlen); + blake2s_final(&blake, i_hash); for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) x_key[i] ^= 0x5c ^ 0x36; - blake2s_init(&state, BLAKE2S_HASH_SIZE); - blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); - blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE); - blake2s_final(&state, i_hash); + blake2s_init(&blake, BLAKE2S_HASH_SIZE); + blake2s_update(&blake, x_key, BLAKE2S_BLOCK_SIZE); + blake2s_update(&blake, i_hash, BLAKE2S_HASH_SIZE); + blake2s_final(&blake, i_hash); memcpy(out, i_hash, BLAKE2S_HASH_SIZE); memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE); @@ -431,7 +431,7 @@ static bool __must_check mix_precomputed_dh(u8 chaining_key[NOISE_HASH_LEN], static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len) { - struct blake2s_state blake; + struct blake2s_ctx blake; blake2s_init(&blake, NOISE_HASH_LEN); blake2s_update(&blake, hash, NOISE_HASH_LEN); diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h index a7dd678725b2..4c8d532ee97b 100644 --- a/include/crypto/blake2s.h +++ b/include/crypto/blake2s.h @@ -22,7 +22,7 @@ enum blake2s_lengths { BLAKE2S_256_HASH_SIZE = 32, }; -struct blake2s_state { +struct blake2s_ctx { /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */ u32 h[8]; u32 t[2]; @@ -43,62 +43,61 @@ enum blake2s_iv { BLAKE2S_IV7 = 0x5BE0CD19UL, }; -static inline void __blake2s_init(struct blake2s_state *state, size_t outlen, +static inline void __blake2s_init(struct blake2s_ctx *ctx, size_t outlen, const void *key, size_t keylen) { - state->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen); - state->h[1] = BLAKE2S_IV1; - state->h[2] = BLAKE2S_IV2; - state->h[3] = BLAKE2S_IV3; - state->h[4] = BLAKE2S_IV4; - state->h[5] = BLAKE2S_IV5; - state->h[6] = BLAKE2S_IV6; - state->h[7] = BLAKE2S_IV7; - state->t[0] = 0; - state->t[1] = 0; - state->f[0] = 0; - state->f[1] = 0; - state->buflen = 0; - state->outlen = outlen; + ctx->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen); + ctx->h[1] = BLAKE2S_IV1; + ctx->h[2] = BLAKE2S_IV2; + ctx->h[3] = BLAKE2S_IV3; + ctx->h[4] = BLAKE2S_IV4; + ctx->h[5] = BLAKE2S_IV5; + ctx->h[6] = BLAKE2S_IV6; + ctx->h[7] = BLAKE2S_IV7; + ctx->t[0] = 0; + ctx->t[1] = 0; + ctx->f[0] = 0; + ctx->f[1] = 0; + ctx->buflen = 0; + ctx->outlen = outlen; if (keylen) { - memcpy(state->buf, key, keylen); - memset(&state->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen); - state->buflen = BLAKE2S_BLOCK_SIZE; + memcpy(ctx->buf, key, keylen); + memset(&ctx->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen); + ctx->buflen = BLAKE2S_BLOCK_SIZE; } } -static inline void blake2s_init(struct blake2s_state *state, - const size_t outlen) +static inline void blake2s_init(struct blake2s_ctx *ctx, const size_t outlen) { - __blake2s_init(state, outlen, NULL, 0); + __blake2s_init(ctx, outlen, NULL, 0); } -static inline void blake2s_init_key(struct blake2s_state *state, +static inline void blake2s_init_key(struct blake2s_ctx *ctx, const size_t outlen, const void *key, const size_t keylen) { WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE || !key || !keylen || keylen > BLAKE2S_KEY_SIZE)); - __blake2s_init(state, outlen, key, keylen); + __blake2s_init(ctx, outlen, key, keylen); } -void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen); -void blake2s_final(struct blake2s_state *state, u8 *out); +void blake2s_update(struct blake2s_ctx *ctx, const u8 *in, size_t inlen); +void blake2s_final(struct blake2s_ctx *ctx, u8 *out); static inline void blake2s(const u8 *key, const size_t keylen, const u8 *in, const size_t inlen, u8 *out, const size_t outlen) { - struct blake2s_state state; + struct blake2s_ctx ctx; WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen || outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE || (!key && keylen))); - __blake2s_init(&state, outlen, key, keylen); - blake2s_update(&state, in, inlen); - blake2s_final(&state, out); + __blake2s_init(&ctx, outlen, key, keylen); + blake2s_update(&ctx, in, inlen); + blake2s_final(&ctx, out); } #endif /* _CRYPTO_BLAKE2S_H */ diff --git a/lib/crypto/arm/blake2s-core.S b/lib/crypto/arm/blake2s-core.S index 293f44fa8f31..78e758a7cb3e 100644 --- a/lib/crypto/arm/blake2s-core.S +++ b/lib/crypto/arm/blake2s-core.S @@ -170,10 +170,10 @@ .endm // -// void blake2s_compress(struct blake2s_state *state, +// void blake2s_compress(struct blake2s_ctx *ctx, // const u8 *block, size_t nblocks, u32 inc); // -// Only the first three fields of struct blake2s_state are used: +// Only the first three fields of struct blake2s_ctx are used: // u32 h[8]; (inout) // u32 t[2]; (inout) // u32 f[2]; (in) @@ -183,7 +183,7 @@ ENTRY(blake2s_compress) push {r0-r2,r4-r11,lr} // keep this an even number .Lnext_block: - // r0 is 'state' + // r0 is 'ctx' // r1 is 'block' // r3 is 'inc' @@ -211,7 +211,7 @@ ENTRY(blake2s_compress) // Calculate v[8..15]. Push v[9..15] onto the stack, and leave space // for spilling v[8..9]. Leave v[8..9] in r8-r9. - mov r14, r0 // r14 = state + mov r14, r0 // r14 = ctx adr r12, .Lblake2s_IV ldmia r12!, {r8-r9} // load IV[0..1] __ldrd r0, r1, r14, 40 // load f[0..1] @@ -275,7 +275,7 @@ ENTRY(blake2s_compress) // Advance to the next block, if there is one. Note that if there are // multiple blocks, then 'inc' (the counter increment amount) must be // 64. So we can simply set it to 64 without re-loading it. - ldm sp, {r0, r1, r2} // load (state, block, nblocks) + ldm sp, {r0, r1, r2} // load (ctx, block, nblocks) mov r3, #64 // set 'inc' subs r2, r2, #1 // nblocks-- str r2, [sp, #8] diff --git a/lib/crypto/arm/blake2s.h b/lib/crypto/arm/blake2s.h index aa7a97139ea7..ce009cd98de9 100644 --- a/lib/crypto/arm/blake2s.h +++ b/lib/crypto/arm/blake2s.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* defined in blake2s-core.S */ -void blake2s_compress(struct blake2s_state *state, const u8 *block, - size_t nblocks, u32 inc); +void blake2s_compress(struct blake2s_ctx *ctx, + const u8 *block, size_t nblocks, u32 inc); diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index 5638ed9d882d..1ad36cb29835 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -29,15 +29,15 @@ static const u8 blake2s_sigma[10][16] = { { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, }; -static inline void blake2s_increment_counter(struct blake2s_state *state, +static inline void blake2s_increment_counter(struct blake2s_ctx *ctx, const u32 inc) { - state->t[0] += inc; - state->t[1] += (state->t[0] < inc); + ctx->t[0] += inc; + ctx->t[1] += (ctx->t[0] < inc); } static void __maybe_unused -blake2s_compress_generic(struct blake2s_state *state, const u8 *block, +blake2s_compress_generic(struct blake2s_ctx *ctx, const u8 *block, size_t nblocks, const u32 inc) { u32 m[16]; @@ -48,18 +48,18 @@ blake2s_compress_generic(struct blake2s_state *state, const u8 *block, (nblocks > 1 && inc != BLAKE2S_BLOCK_SIZE)); while (nblocks > 0) { - blake2s_increment_counter(state, inc); + blake2s_increment_counter(ctx, inc); memcpy(m, block, BLAKE2S_BLOCK_SIZE); le32_to_cpu_array(m, ARRAY_SIZE(m)); - memcpy(v, state->h, 32); + memcpy(v, ctx->h, 32); v[ 8] = BLAKE2S_IV0; v[ 9] = BLAKE2S_IV1; v[10] = BLAKE2S_IV2; v[11] = BLAKE2S_IV3; - v[12] = BLAKE2S_IV4 ^ state->t[0]; - v[13] = BLAKE2S_IV5 ^ state->t[1]; - v[14] = BLAKE2S_IV6 ^ state->f[0]; - v[15] = BLAKE2S_IV7 ^ state->f[1]; + v[12] = BLAKE2S_IV4 ^ ctx->t[0]; + v[13] = BLAKE2S_IV5 ^ ctx->t[1]; + v[14] = BLAKE2S_IV6 ^ ctx->f[0]; + v[15] = BLAKE2S_IV7 ^ ctx->f[1]; #define G(r, i, a, b, c, d) do { \ a += b + m[blake2s_sigma[r][2 * i + 0]]; \ @@ -97,7 +97,7 @@ blake2s_compress_generic(struct blake2s_state *state, const u8 *block, #undef ROUND for (i = 0; i < 8; ++i) - state->h[i] ^= v[i] ^ v[i + 8]; + ctx->h[i] ^= v[i] ^ v[i + 8]; block += BLAKE2S_BLOCK_SIZE; --nblocks; @@ -110,45 +110,45 @@ blake2s_compress_generic(struct blake2s_state *state, const u8 *block, #define blake2s_compress blake2s_compress_generic #endif -static inline void blake2s_set_lastblock(struct blake2s_state *state) +static inline void blake2s_set_lastblock(struct blake2s_ctx *ctx) { - state->f[0] = -1; + ctx->f[0] = -1; } -void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen) +void blake2s_update(struct blake2s_ctx *ctx, const u8 *in, size_t inlen) { - const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; + const size_t fill = BLAKE2S_BLOCK_SIZE - ctx->buflen; if (unlikely(!inlen)) return; if (inlen > fill) { - memcpy(state->buf + state->buflen, in, fill); - blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); - state->buflen = 0; + memcpy(ctx->buf + ctx->buflen, in, fill); + blake2s_compress(ctx, ctx->buf, 1, BLAKE2S_BLOCK_SIZE); + ctx->buflen = 0; in += fill; inlen -= fill; } if (inlen > BLAKE2S_BLOCK_SIZE) { const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); - blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); + blake2s_compress(ctx, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); } - memcpy(state->buf + state->buflen, in, inlen); - state->buflen += inlen; + memcpy(ctx->buf + ctx->buflen, in, inlen); + ctx->buflen += inlen; } EXPORT_SYMBOL(blake2s_update); -void blake2s_final(struct blake2s_state *state, u8 *out) +void blake2s_final(struct blake2s_ctx *ctx, u8 *out) { WARN_ON(IS_ENABLED(DEBUG) && !out); - blake2s_set_lastblock(state); - memset(state->buf + state->buflen, 0, - BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ - blake2s_compress(state, state->buf, 1, state->buflen); - cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); - memcpy(out, state->h, state->outlen); - memzero_explicit(state, sizeof(*state)); + blake2s_set_lastblock(ctx); + memset(ctx->buf + ctx->buflen, 0, + BLAKE2S_BLOCK_SIZE - ctx->buflen); /* Padding */ + blake2s_compress(ctx, ctx->buf, 1, ctx->buflen); + cpu_to_le32_array(ctx->h, ARRAY_SIZE(ctx->h)); + memcpy(out, ctx->h, ctx->outlen); + memzero_explicit(ctx, sizeof(*ctx)); } EXPORT_SYMBOL(blake2s_final); diff --git a/lib/crypto/tests/blake2s_kunit.c b/lib/crypto/tests/blake2s_kunit.c index 247bbdf7dc86..6832d9aa7b82 100644 --- a/lib/crypto/tests/blake2s_kunit.c +++ b/lib/crypto/tests/blake2s_kunit.c @@ -17,9 +17,9 @@ static void blake2s_default(const u8 *data, size_t len, blake2s(NULL, 0, data, len, out, BLAKE2S_HASH_SIZE); } -static void blake2s_init_default(struct blake2s_state *state) +static void blake2s_init_default(struct blake2s_ctx *ctx) { - blake2s_init(state, BLAKE2S_HASH_SIZE); + blake2s_init(ctx, BLAKE2S_HASH_SIZE); } /* @@ -27,7 +27,7 @@ static void blake2s_init_default(struct blake2s_state *state) * with a key length of 0 and a hash length of BLAKE2S_HASH_SIZE. */ #define HASH blake2s_default -#define HASH_CTX blake2s_state +#define HASH_CTX blake2s_ctx #define HASH_SIZE BLAKE2S_HASH_SIZE #define HASH_INIT blake2s_init_default #define HASH_UPDATE blake2s_update @@ -44,19 +44,19 @@ static void test_blake2s_all_key_and_hash_lens(struct kunit *test) u8 *data = &test_buf[0]; u8 *key = data + data_len; u8 *hash = key + BLAKE2S_KEY_SIZE; - struct blake2s_state main_state; + struct blake2s_ctx main_ctx; u8 main_hash[BLAKE2S_HASH_SIZE]; rand_bytes_seeded_from_len(data, data_len); - blake2s_init(&main_state, BLAKE2S_HASH_SIZE); + blake2s_init(&main_ctx, BLAKE2S_HASH_SIZE); for (int key_len = 0; key_len <= BLAKE2S_KEY_SIZE; key_len++) { rand_bytes_seeded_from_len(key, key_len); for (int out_len = 1; out_len <= BLAKE2S_HASH_SIZE; out_len++) { blake2s(key, key_len, data, data_len, hash, out_len); - blake2s_update(&main_state, hash, out_len); + blake2s_update(&main_ctx, hash, out_len); } } - blake2s_final(&main_state, main_hash); + blake2s_final(&main_ctx, main_hash); KUNIT_ASSERT_MEMEQ(test, main_hash, blake2s_keyed_testvec_consolidated, BLAKE2S_HASH_SIZE); } @@ -75,7 +75,7 @@ static void test_blake2s_with_guarded_key_buf(struct kunit *test) u8 *guarded_key = &test_buf[TEST_BUF_LEN - key_len]; u8 hash1[BLAKE2S_HASH_SIZE]; u8 hash2[BLAKE2S_HASH_SIZE]; - struct blake2s_state state; + struct blake2s_ctx ctx; rand_bytes(key, key_len); memcpy(guarded_key, key, key_len); @@ -86,10 +86,9 @@ static void test_blake2s_with_guarded_key_buf(struct kunit *test) hash2, BLAKE2S_HASH_SIZE); KUNIT_ASSERT_MEMEQ(test, hash1, hash2, BLAKE2S_HASH_SIZE); - blake2s_init_key(&state, BLAKE2S_HASH_SIZE, - guarded_key, key_len); - blake2s_update(&state, test_buf, data_len); - blake2s_final(&state, hash2); + blake2s_init_key(&ctx, BLAKE2S_HASH_SIZE, guarded_key, key_len); + blake2s_update(&ctx, test_buf, data_len); + blake2s_final(&ctx, hash2); KUNIT_ASSERT_MEMEQ(test, hash1, hash2, BLAKE2S_HASH_SIZE); } } diff --git a/lib/crypto/x86/blake2s.h b/lib/crypto/x86/blake2s.h index b6d30d2fa045..de360935b820 100644 --- a/lib/crypto/x86/blake2s.h +++ b/lib/crypto/x86/blake2s.h @@ -11,24 +11,24 @@ #include #include -asmlinkage void blake2s_compress_ssse3(struct blake2s_state *state, +asmlinkage void blake2s_compress_ssse3(struct blake2s_ctx *ctx, const u8 *block, const size_t nblocks, const u32 inc); -asmlinkage void blake2s_compress_avx512(struct blake2s_state *state, +asmlinkage void blake2s_compress_avx512(struct blake2s_ctx *ctx, const u8 *block, const size_t nblocks, const u32 inc); static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3); static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512); -static void blake2s_compress(struct blake2s_state *state, const u8 *block, +static void blake2s_compress(struct blake2s_ctx *ctx, const u8 *block, size_t nblocks, const u32 inc) { /* SIMD disables preemption, so relax after processing each page. */ BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8); if (!static_branch_likely(&blake2s_use_ssse3) || !may_use_simd()) { - blake2s_compress_generic(state, block, nblocks, inc); + blake2s_compress_generic(ctx, block, nblocks, inc); return; } @@ -38,9 +38,9 @@ static void blake2s_compress(struct blake2s_state *state, const u8 *block, kernel_fpu_begin(); if (static_branch_likely(&blake2s_use_avx512)) - blake2s_compress_avx512(state, block, blocks, inc); + blake2s_compress_avx512(ctx, block, blocks, inc); else - blake2s_compress_ssse3(state, block, blocks, inc); + blake2s_compress_ssse3(ctx, block, blocks, inc); kernel_fpu_end(); nblocks -= blocks; -- cgit v1.2.3 From 5385bcbffe5a76a74d6bb135af1c88fb235f8134 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 17 Oct 2025 21:30:59 -0700 Subject: lib/crypto: blake2s: Drop excessive const & rename block => data A couple more small cleanups to the BLAKE2s code before these things get propagated into the BLAKE2b code: - Drop 'const' from some non-pointer function parameters. It was a bit excessive and not conventional. - Rename 'block' argument of blake2s_compress*() to 'data'. This is for consistency with the SHA-* code, and also to avoid the implication that it points to a singular "block". No functional changes. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251018043106.375964-4-ebiggers@kernel.org Signed-off-by: Eric Biggers --- include/crypto/blake2s.h | 13 ++++++------- lib/crypto/arm/blake2s-core.S | 6 +++--- lib/crypto/arm/blake2s.h | 2 +- lib/crypto/blake2s.c | 12 ++++++------ lib/crypto/x86/blake2s.h | 18 ++++++++---------- 5 files changed, 24 insertions(+), 27 deletions(-) (limited to 'lib/crypto') diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h index 4c8d532ee97b..33893057eb41 100644 --- a/include/crypto/blake2s.h +++ b/include/crypto/blake2s.h @@ -67,14 +67,13 @@ static inline void __blake2s_init(struct blake2s_ctx *ctx, size_t outlen, } } -static inline void blake2s_init(struct blake2s_ctx *ctx, const size_t outlen) +static inline void blake2s_init(struct blake2s_ctx *ctx, size_t outlen) { __blake2s_init(ctx, outlen, NULL, 0); } -static inline void blake2s_init_key(struct blake2s_ctx *ctx, - const size_t outlen, const void *key, - const size_t keylen) +static inline void blake2s_init_key(struct blake2s_ctx *ctx, size_t outlen, + const void *key, size_t keylen) { WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE || !key || !keylen || keylen > BLAKE2S_KEY_SIZE)); @@ -85,9 +84,9 @@ static inline void blake2s_init_key(struct blake2s_ctx *ctx, void blake2s_update(struct blake2s_ctx *ctx, const u8 *in, size_t inlen); void blake2s_final(struct blake2s_ctx *ctx, u8 *out); -static inline void blake2s(const u8 *key, const size_t keylen, - const u8 *in, const size_t inlen, - u8 *out, const size_t outlen) +static inline void blake2s(const u8 *key, size_t keylen, + const u8 *in, size_t inlen, + u8 *out, size_t outlen) { struct blake2s_ctx ctx; diff --git a/lib/crypto/arm/blake2s-core.S b/lib/crypto/arm/blake2s-core.S index 78e758a7cb3e..14eb7c18a836 100644 --- a/lib/crypto/arm/blake2s-core.S +++ b/lib/crypto/arm/blake2s-core.S @@ -171,7 +171,7 @@ // // void blake2s_compress(struct blake2s_ctx *ctx, -// const u8 *block, size_t nblocks, u32 inc); +// const u8 *data, size_t nblocks, u32 inc); // // Only the first three fields of struct blake2s_ctx are used: // u32 h[8]; (inout) @@ -184,7 +184,7 @@ ENTRY(blake2s_compress) .Lnext_block: // r0 is 'ctx' - // r1 is 'block' + // r1 is 'data' // r3 is 'inc' // Load and increment the counter t[0..1]. @@ -275,7 +275,7 @@ ENTRY(blake2s_compress) // Advance to the next block, if there is one. Note that if there are // multiple blocks, then 'inc' (the counter increment amount) must be // 64. So we can simply set it to 64 without re-loading it. - ldm sp, {r0, r1, r2} // load (ctx, block, nblocks) + ldm sp, {r0, r1, r2} // load (ctx, data, nblocks) mov r3, #64 // set 'inc' subs r2, r2, #1 // nblocks-- str r2, [sp, #8] diff --git a/lib/crypto/arm/blake2s.h b/lib/crypto/arm/blake2s.h index ce009cd98de9..42c04440c191 100644 --- a/lib/crypto/arm/blake2s.h +++ b/lib/crypto/arm/blake2s.h @@ -2,4 +2,4 @@ /* defined in blake2s-core.S */ void blake2s_compress(struct blake2s_ctx *ctx, - const u8 *block, size_t nblocks, u32 inc); + const u8 *data, size_t nblocks, u32 inc); diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index 1ad36cb29835..6182c21ed943 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -29,16 +29,15 @@ static const u8 blake2s_sigma[10][16] = { { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, }; -static inline void blake2s_increment_counter(struct blake2s_ctx *ctx, - const u32 inc) +static inline void blake2s_increment_counter(struct blake2s_ctx *ctx, u32 inc) { ctx->t[0] += inc; ctx->t[1] += (ctx->t[0] < inc); } static void __maybe_unused -blake2s_compress_generic(struct blake2s_ctx *ctx, const u8 *block, - size_t nblocks, const u32 inc) +blake2s_compress_generic(struct blake2s_ctx *ctx, + const u8 *data, size_t nblocks, u32 inc) { u32 m[16]; u32 v[16]; @@ -49,7 +48,7 @@ blake2s_compress_generic(struct blake2s_ctx *ctx, const u8 *block, while (nblocks > 0) { blake2s_increment_counter(ctx, inc); - memcpy(m, block, BLAKE2S_BLOCK_SIZE); + memcpy(m, data, BLAKE2S_BLOCK_SIZE); le32_to_cpu_array(m, ARRAY_SIZE(m)); memcpy(v, ctx->h, 32); v[ 8] = BLAKE2S_IV0; @@ -99,7 +98,7 @@ blake2s_compress_generic(struct blake2s_ctx *ctx, const u8 *block, for (i = 0; i < 8; ++i) ctx->h[i] ^= v[i] ^ v[i + 8]; - block += BLAKE2S_BLOCK_SIZE; + data += BLAKE2S_BLOCK_SIZE; --nblocks; } } @@ -130,6 +129,7 @@ void blake2s_update(struct blake2s_ctx *ctx, const u8 *in, size_t inlen) } if (inlen > BLAKE2S_BLOCK_SIZE) { const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); + blake2s_compress(ctx, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); diff --git a/lib/crypto/x86/blake2s.h b/lib/crypto/x86/blake2s.h index de360935b820..f8eed6cb042e 100644 --- a/lib/crypto/x86/blake2s.h +++ b/lib/crypto/x86/blake2s.h @@ -12,23 +12,21 @@ #include asmlinkage void blake2s_compress_ssse3(struct blake2s_ctx *ctx, - const u8 *block, const size_t nblocks, - const u32 inc); + const u8 *data, size_t nblocks, u32 inc); asmlinkage void blake2s_compress_avx512(struct blake2s_ctx *ctx, - const u8 *block, const size_t nblocks, - const u32 inc); + const u8 *data, size_t nblocks, u32 inc); static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3); static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512); -static void blake2s_compress(struct blake2s_ctx *ctx, const u8 *block, - size_t nblocks, const u32 inc) +static void blake2s_compress(struct blake2s_ctx *ctx, + const u8 *data, size_t nblocks, u32 inc) { /* SIMD disables preemption, so relax after processing each page. */ BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8); if (!static_branch_likely(&blake2s_use_ssse3) || !may_use_simd()) { - blake2s_compress_generic(ctx, block, nblocks, inc); + blake2s_compress_generic(ctx, data, nblocks, inc); return; } @@ -38,13 +36,13 @@ static void blake2s_compress(struct blake2s_ctx *ctx, const u8 *block, kernel_fpu_begin(); if (static_branch_likely(&blake2s_use_avx512)) - blake2s_compress_avx512(ctx, block, blocks, inc); + blake2s_compress_avx512(ctx, data, blocks, inc); else - blake2s_compress_ssse3(ctx, block, blocks, inc); + blake2s_compress_ssse3(ctx, data, blocks, inc); kernel_fpu_end(); + data += blocks * BLAKE2S_BLOCK_SIZE; nblocks -= blocks; - block += blocks * BLAKE2S_BLOCK_SIZE; } while (nblocks); } -- cgit v1.2.3 From 23a16c9533ed92cc639c8f5bd9eb104809fe2919 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 17 Oct 2025 21:31:02 -0700 Subject: lib/crypto: blake2b: Add BLAKE2b library functions Add a library API for BLAKE2b, closely modeled after the BLAKE2s API. This will allow in-kernel users such as btrfs to use BLAKE2b without going through the generic crypto layer. In addition, as usual the BLAKE2b crypto_shash algorithms will be reimplemented on top of this. Note: to create lib/crypto/blake2b.c I made a copy of lib/crypto/blake2s.c and made the updates from BLAKE2s => BLAKE2b. This way, the BLAKE2s and BLAKE2b code is kept consistent. Therefore, it borrows the SPDX-License-Identifier and Copyright from lib/crypto/blake2s.c rather than crypto/blake2b_generic.c. The library API uses 'struct blake2b_ctx', consistent with other lib/crypto/ APIs. The existing 'struct blake2b_state' will be removed once the blake2b crypto_shash algorithms are updated to stop using it. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251018043106.375964-7-ebiggers@kernel.org Signed-off-by: Eric Biggers --- include/crypto/blake2b.h | 133 ++++++++++++++++++++++++++--- include/crypto/internal/blake2b.h | 17 +++- lib/crypto/Kconfig | 10 +++ lib/crypto/Makefile | 9 ++ lib/crypto/blake2b.c | 174 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 330 insertions(+), 13 deletions(-) create mode 100644 lib/crypto/blake2b.c (limited to 'lib/crypto') diff --git a/include/crypto/blake2b.h b/include/crypto/blake2b.h index dd7694477e50..4879e2ec2686 100644 --- a/include/crypto/blake2b.h +++ b/include/crypto/blake2b.h @@ -28,6 +28,25 @@ enum blake2b_lengths { BLAKE2B_512_HASH_SIZE = 64, }; +/** + * struct blake2b_ctx - Context for hashing a message with BLAKE2b + * @h: compression function state + * @t: block counter + * @f: finalization indicator + * @buf: partial block buffer; 'buflen' bytes are valid + * @buflen: number of bytes buffered in @buf + * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE + */ +struct blake2b_ctx { + /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */ + u64 h[8]; + u64 t[2]; + u64 f[2]; + u8 buf[BLAKE2B_BLOCK_SIZE]; + unsigned int buflen; + unsigned int outlen; +}; + enum blake2b_iv { BLAKE2B_IV0 = 0x6A09E667F3BCC908ULL, BLAKE2B_IV1 = 0xBB67AE8584CAA73BULL, @@ -39,19 +58,109 @@ enum blake2b_iv { BLAKE2B_IV7 = 0x5BE0CD19137E2179ULL, }; -static inline void __blake2b_init(struct blake2b_state *state, size_t outlen, - size_t keylen) +static inline void __blake2b_init(struct blake2b_ctx *ctx, size_t outlen, + const void *key, size_t keylen) +{ + ctx->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen); + ctx->h[1] = BLAKE2B_IV1; + ctx->h[2] = BLAKE2B_IV2; + ctx->h[3] = BLAKE2B_IV3; + ctx->h[4] = BLAKE2B_IV4; + ctx->h[5] = BLAKE2B_IV5; + ctx->h[6] = BLAKE2B_IV6; + ctx->h[7] = BLAKE2B_IV7; + ctx->t[0] = 0; + ctx->t[1] = 0; + ctx->f[0] = 0; + ctx->f[1] = 0; + ctx->buflen = 0; + ctx->outlen = outlen; + if (keylen) { + memcpy(ctx->buf, key, keylen); + memset(&ctx->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen); + ctx->buflen = BLAKE2B_BLOCK_SIZE; + } +} + +/** + * blake2b_init() - Initialize a BLAKE2b context for a new message (unkeyed) + * @ctx: the context to initialize + * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE + * + * Context: Any context. + */ +static inline void blake2b_init(struct blake2b_ctx *ctx, size_t outlen) +{ + __blake2b_init(ctx, outlen, NULL, 0); +} + +/** + * blake2b_init_key() - Initialize a BLAKE2b context for a new message (keyed) + * @ctx: the context to initialize + * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE + * @key: the key + * @keylen: the key length in bytes, at most BLAKE2B_KEY_SIZE + * + * Context: Any context. + */ +static inline void blake2b_init_key(struct blake2b_ctx *ctx, size_t outlen, + const void *key, size_t keylen) +{ + WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2B_HASH_SIZE || + !key || !keylen || keylen > BLAKE2B_KEY_SIZE)); + + __blake2b_init(ctx, outlen, key, keylen); +} + +/** + * blake2b_update() - Update a BLAKE2b context with message data + * @ctx: the context to update; must have been initialized + * @in: the message data + * @inlen: the data length in bytes + * + * This can be called any number of times. + * + * Context: Any context. + */ +void blake2b_update(struct blake2b_ctx *ctx, const u8 *in, size_t inlen); + +/** + * blake2b_final() - Finish computing a BLAKE2b hash + * @ctx: the context to finalize; must have been initialized + * @out: (output) the resulting BLAKE2b hash. Its length will be equal to the + * @outlen that was passed to blake2b_init() or blake2b_init_key(). + * + * After finishing, this zeroizes @ctx. So the caller does not need to do it. + * + * Context: Any context. + */ +void blake2b_final(struct blake2b_ctx *ctx, u8 *out); + +/** + * blake2b() - Compute BLAKE2b hash in one shot + * @key: the key, or NULL for an unkeyed hash + * @keylen: the key length in bytes (at most BLAKE2B_KEY_SIZE), or 0 for an + * unkeyed hash + * @in: the message data + * @inlen: the data length in bytes + * @out: (output) the resulting BLAKE2b hash, with length @outlen + * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE + * + * Context: Any context. + */ +static inline void blake2b(const u8 *key, size_t keylen, + const u8 *in, size_t inlen, + u8 *out, size_t outlen) { - state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen); - state->h[1] = BLAKE2B_IV1; - state->h[2] = BLAKE2B_IV2; - state->h[3] = BLAKE2B_IV3; - state->h[4] = BLAKE2B_IV4; - state->h[5] = BLAKE2B_IV5; - state->h[6] = BLAKE2B_IV6; - state->h[7] = BLAKE2B_IV7; - state->t[0] = 0; - state->t[1] = 0; + struct blake2b_ctx ctx; + + WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen || + outlen > BLAKE2B_HASH_SIZE || keylen > BLAKE2B_KEY_SIZE || + (!key && keylen))); + + __blake2b_init(&ctx, outlen, key, keylen); + blake2b_update(&ctx, in, inlen); + blake2b_final(&ctx, out); } #endif /* _CRYPTO_BLAKE2B_H */ diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h index 3e09e2485306..3712df69def1 100644 --- a/include/crypto/internal/blake2b.h +++ b/include/crypto/internal/blake2b.h @@ -57,13 +57,28 @@ static inline int crypto_blake2b_setkey(struct crypto_shash *tfm, return 0; } +static inline void __crypto_blake2b_init(struct blake2b_state *state, + size_t outlen, size_t keylen) +{ + state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen); + state->h[1] = BLAKE2B_IV1; + state->h[2] = BLAKE2B_IV2; + state->h[3] = BLAKE2B_IV3; + state->h[4] = BLAKE2B_IV4; + state->h[5] = BLAKE2B_IV5; + state->h[6] = BLAKE2B_IV6; + state->h[7] = BLAKE2B_IV7; + state->t[0] = 0; + state->t[1] = 0; +} + static inline int crypto_blake2b_init(struct shash_desc *desc) { const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct blake2b_state *state = shash_desc_ctx(desc); unsigned int outlen = crypto_shash_digestsize(desc->tfm); - __blake2b_init(state, outlen, tctx->keylen); + __crypto_blake2b_init(state, outlen, tctx->keylen); return tctx->keylen ? crypto_shash_update(desc, tctx->key, BLAKE2B_BLOCK_SIZE) : 0; } diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 8886055e938f..918378b7e833 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -28,6 +28,16 @@ config CRYPTO_LIB_ARC4 config CRYPTO_LIB_GF128MUL tristate +config CRYPTO_LIB_BLAKE2B + tristate + help + The BLAKE2b library functions. Select this if your module uses any of + the functions from . + +config CRYPTO_LIB_BLAKE2B_ARCH + bool + depends on CRYPTO_LIB_BLAKE2B && !UML + # BLAKE2s support is always built-in, so there's no CRYPTO_LIB_BLAKE2S option. config CRYPTO_LIB_BLAKE2S_ARCH diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index bded351aeace..f863417b1681 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -31,6 +31,15 @@ obj-$(CONFIG_CRYPTO_LIB_GF128MUL) += gf128mul.o ################################################################################ +obj-$(CONFIG_CRYPTO_LIB_BLAKE2B) += libblake2b.o +libblake2b-y := blake2b.o +CFLAGS_blake2b.o := -Wframe-larger-than=4096 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105930 +ifeq ($(CONFIG_CRYPTO_LIB_BLAKE2B_ARCH),y) +CFLAGS_blake2b.o += -I$(src)/$(SRCARCH) +endif # CONFIG_CRYPTO_LIB_BLAKE2B_ARCH + +################################################################################ + # blake2s is used by the /dev/random driver which is always builtin obj-y += blake2s.o ifeq ($(CONFIG_CRYPTO_LIB_BLAKE2S_ARCH),y) diff --git a/lib/crypto/blake2b.c b/lib/crypto/blake2b.c new file mode 100644 index 000000000000..09c6d65d8a6e --- /dev/null +++ b/lib/crypto/blake2b.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * Copyright 2025 Google LLC + * + * This is an implementation of the BLAKE2b hash and PRF functions. + * + * Information: https://blake2.net/ + */ + +#include +#include +#include +#include +#include +#include +#include + +static const u8 blake2b_sigma[12][16] = { + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, + { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, + { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, + { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, + { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, + { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, + { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, + { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, + { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } +}; + +static inline void blake2b_increment_counter(struct blake2b_ctx *ctx, u32 inc) +{ + ctx->t[0] += inc; + ctx->t[1] += (ctx->t[0] < inc); +} + +static void __maybe_unused +blake2b_compress_generic(struct blake2b_ctx *ctx, + const u8 *data, size_t nblocks, u32 inc) +{ + u64 m[16]; + u64 v[16]; + int i; + + WARN_ON(IS_ENABLED(DEBUG) && + (nblocks > 1 && inc != BLAKE2B_BLOCK_SIZE)); + + while (nblocks > 0) { + blake2b_increment_counter(ctx, inc); + memcpy(m, data, BLAKE2B_BLOCK_SIZE); + le64_to_cpu_array(m, ARRAY_SIZE(m)); + memcpy(v, ctx->h, 64); + v[ 8] = BLAKE2B_IV0; + v[ 9] = BLAKE2B_IV1; + v[10] = BLAKE2B_IV2; + v[11] = BLAKE2B_IV3; + v[12] = BLAKE2B_IV4 ^ ctx->t[0]; + v[13] = BLAKE2B_IV5 ^ ctx->t[1]; + v[14] = BLAKE2B_IV6 ^ ctx->f[0]; + v[15] = BLAKE2B_IV7 ^ ctx->f[1]; + +#define G(r, i, a, b, c, d) do { \ + a += b + m[blake2b_sigma[r][2 * i + 0]]; \ + d = ror64(d ^ a, 32); \ + c += d; \ + b = ror64(b ^ c, 24); \ + a += b + m[blake2b_sigma[r][2 * i + 1]]; \ + d = ror64(d ^ a, 16); \ + c += d; \ + b = ror64(b ^ c, 63); \ +} while (0) + +#define ROUND(r) do { \ + G(r, 0, v[0], v[ 4], v[ 8], v[12]); \ + G(r, 1, v[1], v[ 5], v[ 9], v[13]); \ + G(r, 2, v[2], v[ 6], v[10], v[14]); \ + G(r, 3, v[3], v[ 7], v[11], v[15]); \ + G(r, 4, v[0], v[ 5], v[10], v[15]); \ + G(r, 5, v[1], v[ 6], v[11], v[12]); \ + G(r, 6, v[2], v[ 7], v[ 8], v[13]); \ + G(r, 7, v[3], v[ 4], v[ 9], v[14]); \ +} while (0) + ROUND(0); + ROUND(1); + ROUND(2); + ROUND(3); + ROUND(4); + ROUND(5); + ROUND(6); + ROUND(7); + ROUND(8); + ROUND(9); + ROUND(10); + ROUND(11); + +#undef G +#undef ROUND + + for (i = 0; i < 8; ++i) + ctx->h[i] ^= v[i] ^ v[i + 8]; + + data += BLAKE2B_BLOCK_SIZE; + --nblocks; + } +} + +#ifdef CONFIG_CRYPTO_LIB_BLAKE2B_ARCH +#include "blake2b.h" /* $(SRCARCH)/blake2b.h */ +#else +#define blake2b_compress blake2b_compress_generic +#endif + +static inline void blake2b_set_lastblock(struct blake2b_ctx *ctx) +{ + ctx->f[0] = -1; +} + +void blake2b_update(struct blake2b_ctx *ctx, const u8 *in, size_t inlen) +{ + const size_t fill = BLAKE2B_BLOCK_SIZE - ctx->buflen; + + if (unlikely(!inlen)) + return; + if (inlen > fill) { + memcpy(ctx->buf + ctx->buflen, in, fill); + blake2b_compress(ctx, ctx->buf, 1, BLAKE2B_BLOCK_SIZE); + ctx->buflen = 0; + in += fill; + inlen -= fill; + } + if (inlen > BLAKE2B_BLOCK_SIZE) { + const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE); + + blake2b_compress(ctx, in, nblocks - 1, BLAKE2B_BLOCK_SIZE); + in += BLAKE2B_BLOCK_SIZE * (nblocks - 1); + inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1); + } + memcpy(ctx->buf + ctx->buflen, in, inlen); + ctx->buflen += inlen; +} +EXPORT_SYMBOL(blake2b_update); + +void blake2b_final(struct blake2b_ctx *ctx, u8 *out) +{ + WARN_ON(IS_ENABLED(DEBUG) && !out); + blake2b_set_lastblock(ctx); + memset(ctx->buf + ctx->buflen, 0, + BLAKE2B_BLOCK_SIZE - ctx->buflen); /* Padding */ + blake2b_compress(ctx, ctx->buf, 1, ctx->buflen); + cpu_to_le64_array(ctx->h, ARRAY_SIZE(ctx->h)); + memcpy(out, ctx->h, ctx->outlen); + memzero_explicit(ctx, sizeof(*ctx)); +} +EXPORT_SYMBOL(blake2b_final); + +#ifdef blake2b_mod_init_arch +static int __init blake2b_mod_init(void) +{ + blake2b_mod_init_arch(); + return 0; +} +subsys_initcall(blake2b_mod_init); + +static void __exit blake2b_mod_exit(void) +{ +} +module_exit(blake2b_mod_exit); +#endif + +MODULE_DESCRIPTION("BLAKE2b hash function"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From ba6617bd47c2263bd2ead34e1b31d90c66af5dea Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 17 Oct 2025 21:31:03 -0700 Subject: lib/crypto: arm/blake2b: Migrate optimized code into library Migrate the arm-optimized BLAKE2b code from arch/arm/crypto/ to lib/crypto/arm/. This makes the BLAKE2b library able to use it, and it also simplifies the code because it's easier to integrate with the library than crypto_shash. This temporarily makes the arm-optimized BLAKE2b code unavailable via crypto_shash. A later commit reimplements the blake2b-* crypto_shash algorithms on top of the BLAKE2b library API, making it available again. Note that as per the lib/crypto/ convention, the optimized code is now enabled by default. So, this also fixes the longstanding issue where the optimized BLAKE2b code was not enabled by default. To see the diff from arch/arm/crypto/blake2b-neon-glue.c to lib/crypto/arm/blake2b.h, view this commit with 'git show -M10'. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251018043106.375964-8-ebiggers@kernel.org Signed-off-by: Eric Biggers --- arch/arm/crypto/Kconfig | 16 -- arch/arm/crypto/Makefile | 2 - arch/arm/crypto/blake2b-neon-core.S | 347 ----------------------------------- arch/arm/crypto/blake2b-neon-glue.c | 104 ----------- lib/crypto/Kconfig | 1 + lib/crypto/Makefile | 1 + lib/crypto/arm/blake2b-neon-core.S | 350 ++++++++++++++++++++++++++++++++++++ lib/crypto/arm/blake2b.h | 41 +++++ 8 files changed, 393 insertions(+), 469 deletions(-) delete mode 100644 arch/arm/crypto/blake2b-neon-core.S delete mode 100644 arch/arm/crypto/blake2b-neon-glue.c create mode 100644 lib/crypto/arm/blake2b-neon-core.S create mode 100644 lib/crypto/arm/blake2b.h (limited to 'lib/crypto') diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index c436eec22d86..f30d743df264 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -33,22 +33,6 @@ config CRYPTO_NHPOLY1305_NEON Architecture: arm using: - NEON (Advanced SIMD) extensions -config CRYPTO_BLAKE2B_NEON - tristate "Hash functions: BLAKE2b (NEON)" - depends on KERNEL_MODE_NEON - select CRYPTO_BLAKE2B - help - BLAKE2b cryptographic hash function (RFC 7693) - - Architecture: arm using - - NEON (Advanced SIMD) extensions - - BLAKE2b digest algorithm optimized with ARM NEON instructions. - On ARM processors that have NEON support but not the ARMv8 - Crypto Extensions, typically this BLAKE2b implementation is - much faster than the SHA-2 family and slightly faster than - SHA-1. - config CRYPTO_AES_ARM tristate "Ciphers: AES" select CRYPTO_ALGAPI diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 6346a73effc0..86dd43313dbf 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -5,7 +5,6 @@ obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o -obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o @@ -13,7 +12,6 @@ obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o aes-arm-y := aes-cipher-core.o aes-cipher-glue.o aes-arm-bs-y := aes-neonbs-core.o aes-neonbs-glue.o -blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o diff --git a/arch/arm/crypto/blake2b-neon-core.S b/arch/arm/crypto/blake2b-neon-core.S deleted file mode 100644 index 0406a186377f..000000000000 --- a/arch/arm/crypto/blake2b-neon-core.S +++ /dev/null @@ -1,347 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * BLAKE2b digest algorithm, NEON accelerated - * - * Copyright 2020 Google LLC - * - * Author: Eric Biggers - */ - -#include - - .text - .fpu neon - - // The arguments to blake2b_compress_neon() - STATE .req r0 - BLOCK .req r1 - NBLOCKS .req r2 - INC .req r3 - - // Pointers to the rotation tables - ROR24_TABLE .req r4 - ROR16_TABLE .req r5 - - // The original stack pointer - ORIG_SP .req r6 - - // NEON registers which contain the message words of the current block. - // M_0-M_3 are occasionally used for other purposes too. - M_0 .req d16 - M_1 .req d17 - M_2 .req d18 - M_3 .req d19 - M_4 .req d20 - M_5 .req d21 - M_6 .req d22 - M_7 .req d23 - M_8 .req d24 - M_9 .req d25 - M_10 .req d26 - M_11 .req d27 - M_12 .req d28 - M_13 .req d29 - M_14 .req d30 - M_15 .req d31 - - .align 4 - // Tables for computing ror64(x, 24) and ror64(x, 16) using the vtbl.8 - // instruction. This is the most efficient way to implement these - // rotation amounts with NEON. (On Cortex-A53 it's the same speed as - // vshr.u64 + vsli.u64, while on Cortex-A7 it's faster.) -.Lror24_table: - .byte 3, 4, 5, 6, 7, 0, 1, 2 -.Lror16_table: - .byte 2, 3, 4, 5, 6, 7, 0, 1 - // The BLAKE2b initialization vector -.Lblake2b_IV: - .quad 0x6a09e667f3bcc908, 0xbb67ae8584caa73b - .quad 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1 - .quad 0x510e527fade682d1, 0x9b05688c2b3e6c1f - .quad 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179 - -// Execute one round of BLAKE2b by updating the state matrix v[0..15] in the -// NEON registers q0-q7. The message block is in q8..q15 (M_0-M_15). The stack -// pointer points to a 32-byte aligned buffer containing a copy of q8 and q9 -// (M_0-M_3), so that they can be reloaded if they are used as temporary -// registers. The macro arguments s0-s15 give the order in which the message -// words are used in this round. 'final' is 1 if this is the final round. -.macro _blake2b_round s0, s1, s2, s3, s4, s5, s6, s7, \ - s8, s9, s10, s11, s12, s13, s14, s15, final=0 - - // Mix the columns: - // (v[0], v[4], v[8], v[12]), (v[1], v[5], v[9], v[13]), - // (v[2], v[6], v[10], v[14]), and (v[3], v[7], v[11], v[15]). - - // a += b + m[blake2b_sigma[r][2*i + 0]]; - vadd.u64 q0, q0, q2 - vadd.u64 q1, q1, q3 - vadd.u64 d0, d0, M_\s0 - vadd.u64 d1, d1, M_\s2 - vadd.u64 d2, d2, M_\s4 - vadd.u64 d3, d3, M_\s6 - - // d = ror64(d ^ a, 32); - veor q6, q6, q0 - veor q7, q7, q1 - vrev64.32 q6, q6 - vrev64.32 q7, q7 - - // c += d; - vadd.u64 q4, q4, q6 - vadd.u64 q5, q5, q7 - - // b = ror64(b ^ c, 24); - vld1.8 {M_0}, [ROR24_TABLE, :64] - veor q2, q2, q4 - veor q3, q3, q5 - vtbl.8 d4, {d4}, M_0 - vtbl.8 d5, {d5}, M_0 - vtbl.8 d6, {d6}, M_0 - vtbl.8 d7, {d7}, M_0 - - // a += b + m[blake2b_sigma[r][2*i + 1]]; - // - // M_0 got clobbered above, so we have to reload it if any of the four - // message words this step needs happens to be M_0. Otherwise we don't - // need to reload it here, as it will just get clobbered again below. -.if \s1 == 0 || \s3 == 0 || \s5 == 0 || \s7 == 0 - vld1.8 {M_0}, [sp, :64] -.endif - vadd.u64 q0, q0, q2 - vadd.u64 q1, q1, q3 - vadd.u64 d0, d0, M_\s1 - vadd.u64 d1, d1, M_\s3 - vadd.u64 d2, d2, M_\s5 - vadd.u64 d3, d3, M_\s7 - - // d = ror64(d ^ a, 16); - vld1.8 {M_0}, [ROR16_TABLE, :64] - veor q6, q6, q0 - veor q7, q7, q1 - vtbl.8 d12, {d12}, M_0 - vtbl.8 d13, {d13}, M_0 - vtbl.8 d14, {d14}, M_0 - vtbl.8 d15, {d15}, M_0 - - // c += d; - vadd.u64 q4, q4, q6 - vadd.u64 q5, q5, q7 - - // b = ror64(b ^ c, 63); - // - // This rotation amount isn't a multiple of 8, so it has to be - // implemented using a pair of shifts, which requires temporary - // registers. Use q8-q9 (M_0-M_3) for this, and reload them afterwards. - veor q8, q2, q4 - veor q9, q3, q5 - vshr.u64 q2, q8, #63 - vshr.u64 q3, q9, #63 - vsli.u64 q2, q8, #1 - vsli.u64 q3, q9, #1 - vld1.8 {q8-q9}, [sp, :256] - - // Mix the diagonals: - // (v[0], v[5], v[10], v[15]), (v[1], v[6], v[11], v[12]), - // (v[2], v[7], v[8], v[13]), and (v[3], v[4], v[9], v[14]). - // - // There are two possible ways to do this: use 'vext' instructions to - // shift the rows of the matrix so that the diagonals become columns, - // and undo it afterwards; or just use 64-bit operations on 'd' - // registers instead of 128-bit operations on 'q' registers. We use the - // latter approach, as it performs much better on Cortex-A7. - - // a += b + m[blake2b_sigma[r][2*i + 0]]; - vadd.u64 d0, d0, d5 - vadd.u64 d1, d1, d6 - vadd.u64 d2, d2, d7 - vadd.u64 d3, d3, d4 - vadd.u64 d0, d0, M_\s8 - vadd.u64 d1, d1, M_\s10 - vadd.u64 d2, d2, M_\s12 - vadd.u64 d3, d3, M_\s14 - - // d = ror64(d ^ a, 32); - veor d15, d15, d0 - veor d12, d12, d1 - veor d13, d13, d2 - veor d14, d14, d3 - vrev64.32 d15, d15 - vrev64.32 d12, d12 - vrev64.32 d13, d13 - vrev64.32 d14, d14 - - // c += d; - vadd.u64 d10, d10, d15 - vadd.u64 d11, d11, d12 - vadd.u64 d8, d8, d13 - vadd.u64 d9, d9, d14 - - // b = ror64(b ^ c, 24); - vld1.8 {M_0}, [ROR24_TABLE, :64] - veor d5, d5, d10 - veor d6, d6, d11 - veor d7, d7, d8 - veor d4, d4, d9 - vtbl.8 d5, {d5}, M_0 - vtbl.8 d6, {d6}, M_0 - vtbl.8 d7, {d7}, M_0 - vtbl.8 d4, {d4}, M_0 - - // a += b + m[blake2b_sigma[r][2*i + 1]]; -.if \s9 == 0 || \s11 == 0 || \s13 == 0 || \s15 == 0 - vld1.8 {M_0}, [sp, :64] -.endif - vadd.u64 d0, d0, d5 - vadd.u64 d1, d1, d6 - vadd.u64 d2, d2, d7 - vadd.u64 d3, d3, d4 - vadd.u64 d0, d0, M_\s9 - vadd.u64 d1, d1, M_\s11 - vadd.u64 d2, d2, M_\s13 - vadd.u64 d3, d3, M_\s15 - - // d = ror64(d ^ a, 16); - vld1.8 {M_0}, [ROR16_TABLE, :64] - veor d15, d15, d0 - veor d12, d12, d1 - veor d13, d13, d2 - veor d14, d14, d3 - vtbl.8 d12, {d12}, M_0 - vtbl.8 d13, {d13}, M_0 - vtbl.8 d14, {d14}, M_0 - vtbl.8 d15, {d15}, M_0 - - // c += d; - vadd.u64 d10, d10, d15 - vadd.u64 d11, d11, d12 - vadd.u64 d8, d8, d13 - vadd.u64 d9, d9, d14 - - // b = ror64(b ^ c, 63); - veor d16, d4, d9 - veor d17, d5, d10 - veor d18, d6, d11 - veor d19, d7, d8 - vshr.u64 q2, q8, #63 - vshr.u64 q3, q9, #63 - vsli.u64 q2, q8, #1 - vsli.u64 q3, q9, #1 - // Reloading q8-q9 can be skipped on the final round. -.if ! \final - vld1.8 {q8-q9}, [sp, :256] -.endif -.endm - -// -// void blake2b_compress_neon(struct blake2b_state *state, -// const u8 *block, size_t nblocks, u32 inc); -// -// Only the first three fields of struct blake2b_state are used: -// u64 h[8]; (inout) -// u64 t[2]; (inout) -// u64 f[2]; (in) -// - .align 5 -ENTRY(blake2b_compress_neon) - push {r4-r10} - - // Allocate a 32-byte stack buffer that is 32-byte aligned. - mov ORIG_SP, sp - sub ip, sp, #32 - bic ip, ip, #31 - mov sp, ip - - adr ROR24_TABLE, .Lror24_table - adr ROR16_TABLE, .Lror16_table - - mov ip, STATE - vld1.64 {q0-q1}, [ip]! // Load h[0..3] - vld1.64 {q2-q3}, [ip]! // Load h[4..7] -.Lnext_block: - adr r10, .Lblake2b_IV - vld1.64 {q14-q15}, [ip] // Load t[0..1] and f[0..1] - vld1.64 {q4-q5}, [r10]! // Load IV[0..3] - vmov r7, r8, d28 // Copy t[0] to (r7, r8) - vld1.64 {q6-q7}, [r10] // Load IV[4..7] - adds r7, r7, INC // Increment counter - bcs .Lslow_inc_ctr - vmov.i32 d28[0], r7 - vst1.64 {d28}, [ip] // Update t[0] -.Linc_ctr_done: - - // Load the next message block and finish initializing the state matrix - // 'v'. Fortunately, there are exactly enough NEON registers to fit the - // entire state matrix in q0-q7 and the entire message block in q8-15. - // - // However, _blake2b_round also needs some extra registers for rotates, - // so we have to spill some registers. It's better to spill the message - // registers than the state registers, as the message doesn't change. - // Therefore we store a copy of the first 32 bytes of the message block - // (q8-q9) in an aligned buffer on the stack so that they can be - // reloaded when needed. (We could just reload directly from the - // message buffer, but it's faster to use aligned loads.) - vld1.8 {q8-q9}, [BLOCK]! - veor q6, q6, q14 // v[12..13] = IV[4..5] ^ t[0..1] - vld1.8 {q10-q11}, [BLOCK]! - veor q7, q7, q15 // v[14..15] = IV[6..7] ^ f[0..1] - vld1.8 {q12-q13}, [BLOCK]! - vst1.8 {q8-q9}, [sp, :256] - mov ip, STATE - vld1.8 {q14-q15}, [BLOCK]! - - // Execute the rounds. Each round is provided the order in which it - // needs to use the message words. - _blake2b_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 - _blake2b_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 - _blake2b_round 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 - _blake2b_round 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 - _blake2b_round 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 - _blake2b_round 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 - _blake2b_round 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 - _blake2b_round 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 - _blake2b_round 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 - _blake2b_round 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 - _blake2b_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 - _blake2b_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 \ - final=1 - - // Fold the final state matrix into the hash chaining value: - // - // for (i = 0; i < 8; i++) - // h[i] ^= v[i] ^ v[i + 8]; - // - vld1.64 {q8-q9}, [ip]! // Load old h[0..3] - veor q0, q0, q4 // v[0..1] ^= v[8..9] - veor q1, q1, q5 // v[2..3] ^= v[10..11] - vld1.64 {q10-q11}, [ip] // Load old h[4..7] - veor q2, q2, q6 // v[4..5] ^= v[12..13] - veor q3, q3, q7 // v[6..7] ^= v[14..15] - veor q0, q0, q8 // v[0..1] ^= h[0..1] - veor q1, q1, q9 // v[2..3] ^= h[2..3] - mov ip, STATE - subs NBLOCKS, NBLOCKS, #1 // nblocks-- - vst1.64 {q0-q1}, [ip]! // Store new h[0..3] - veor q2, q2, q10 // v[4..5] ^= h[4..5] - veor q3, q3, q11 // v[6..7] ^= h[6..7] - vst1.64 {q2-q3}, [ip]! // Store new h[4..7] - - // Advance to the next block, if there is one. - bne .Lnext_block // nblocks != 0? - - mov sp, ORIG_SP - pop {r4-r10} - mov pc, lr - -.Lslow_inc_ctr: - // Handle the case where the counter overflowed its low 32 bits, by - // carrying the overflow bit into the full 128-bit counter. - vmov r9, r10, d29 - adcs r8, r8, #0 - adcs r9, r9, #0 - adc r10, r10, #0 - vmov d28, r7, r8 - vmov d29, r9, r10 - vst1.64 {q14}, [ip] // Update t[0] and t[1] - b .Linc_ctr_done -ENDPROC(blake2b_compress_neon) diff --git a/arch/arm/crypto/blake2b-neon-glue.c b/arch/arm/crypto/blake2b-neon-glue.c deleted file mode 100644 index 2ff443a91724..000000000000 --- a/arch/arm/crypto/blake2b-neon-glue.c +++ /dev/null @@ -1,104 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * BLAKE2b digest algorithm, NEON accelerated - * - * Copyright 2020 Google LLC - */ - -#include -#include - -#include -#include - -#include -#include - -asmlinkage void blake2b_compress_neon(struct blake2b_state *state, - const u8 *block, size_t nblocks, u32 inc); - -static void blake2b_compress_arch(struct blake2b_state *state, - const u8 *block, size_t nblocks, u32 inc) -{ - do { - const size_t blocks = min_t(size_t, nblocks, - SZ_4K / BLAKE2B_BLOCK_SIZE); - - kernel_neon_begin(); - blake2b_compress_neon(state, block, blocks, inc); - kernel_neon_end(); - - nblocks -= blocks; - block += blocks * BLAKE2B_BLOCK_SIZE; - } while (nblocks); -} - -static int crypto_blake2b_update_neon(struct shash_desc *desc, - const u8 *in, unsigned int inlen) -{ - return crypto_blake2b_update_bo(desc, in, inlen, blake2b_compress_arch); -} - -static int crypto_blake2b_finup_neon(struct shash_desc *desc, const u8 *in, - unsigned int inlen, u8 *out) -{ - return crypto_blake2b_finup(desc, in, inlen, out, - blake2b_compress_arch); -} - -#define BLAKE2B_ALG(name, driver_name, digest_size) \ - { \ - .base.cra_name = name, \ - .base.cra_driver_name = driver_name, \ - .base.cra_priority = 200, \ - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY | \ - CRYPTO_AHASH_ALG_BLOCK_ONLY | \ - CRYPTO_AHASH_ALG_FINAL_NONZERO, \ - .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \ - .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \ - .base.cra_module = THIS_MODULE, \ - .digestsize = digest_size, \ - .setkey = crypto_blake2b_setkey, \ - .init = crypto_blake2b_init, \ - .update = crypto_blake2b_update_neon, \ - .finup = crypto_blake2b_finup_neon, \ - .descsize = sizeof(struct blake2b_state), \ - .statesize = BLAKE2B_STATE_SIZE, \ - } - -static struct shash_alg blake2b_neon_algs[] = { - BLAKE2B_ALG("blake2b-160", "blake2b-160-neon", BLAKE2B_160_HASH_SIZE), - BLAKE2B_ALG("blake2b-256", "blake2b-256-neon", BLAKE2B_256_HASH_SIZE), - BLAKE2B_ALG("blake2b-384", "blake2b-384-neon", BLAKE2B_384_HASH_SIZE), - BLAKE2B_ALG("blake2b-512", "blake2b-512-neon", BLAKE2B_512_HASH_SIZE), -}; - -static int __init blake2b_neon_mod_init(void) -{ - if (!(elf_hwcap & HWCAP_NEON)) - return -ENODEV; - - return crypto_register_shashes(blake2b_neon_algs, - ARRAY_SIZE(blake2b_neon_algs)); -} - -static void __exit blake2b_neon_mod_exit(void) -{ - crypto_unregister_shashes(blake2b_neon_algs, - ARRAY_SIZE(blake2b_neon_algs)); -} - -module_init(blake2b_neon_mod_init); -module_exit(blake2b_neon_mod_exit); - -MODULE_DESCRIPTION("BLAKE2b digest algorithm, NEON accelerated"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Eric Biggers "); -MODULE_ALIAS_CRYPTO("blake2b-160"); -MODULE_ALIAS_CRYPTO("blake2b-160-neon"); -MODULE_ALIAS_CRYPTO("blake2b-256"); -MODULE_ALIAS_CRYPTO("blake2b-256-neon"); -MODULE_ALIAS_CRYPTO("blake2b-384"); -MODULE_ALIAS_CRYPTO("blake2b-384-neon"); -MODULE_ALIAS_CRYPTO("blake2b-512"); -MODULE_ALIAS_CRYPTO("blake2b-512-neon"); diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 918378b7e833..280b888153bf 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -37,6 +37,7 @@ config CRYPTO_LIB_BLAKE2B config CRYPTO_LIB_BLAKE2B_ARCH bool depends on CRYPTO_LIB_BLAKE2B && !UML + default y if ARM && KERNEL_MODE_NEON # BLAKE2s support is always built-in, so there's no CRYPTO_LIB_BLAKE2S option. diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index f863417b1681..bc26777d08e9 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -36,6 +36,7 @@ libblake2b-y := blake2b.o CFLAGS_blake2b.o := -Wframe-larger-than=4096 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105930 ifeq ($(CONFIG_CRYPTO_LIB_BLAKE2B_ARCH),y) CFLAGS_blake2b.o += -I$(src)/$(SRCARCH) +libblake2b-$(CONFIG_ARM) += arm/blake2b-neon-core.o endif # CONFIG_CRYPTO_LIB_BLAKE2B_ARCH ################################################################################ diff --git a/lib/crypto/arm/blake2b-neon-core.S b/lib/crypto/arm/blake2b-neon-core.S new file mode 100644 index 000000000000..b55c37f0b88f --- /dev/null +++ b/lib/crypto/arm/blake2b-neon-core.S @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * BLAKE2b digest algorithm optimized with ARM NEON instructions. On ARM + * processors that have NEON support but not the ARMv8 Crypto Extensions, + * typically this BLAKE2b implementation is much faster than the SHA-2 family + * and slightly faster than SHA-1. + * + * Copyright 2020 Google LLC + * + * Author: Eric Biggers + */ + +#include + + .text + .fpu neon + + // The arguments to blake2b_compress_neon() + CTX .req r0 + DATA .req r1 + NBLOCKS .req r2 + INC .req r3 + + // Pointers to the rotation tables + ROR24_TABLE .req r4 + ROR16_TABLE .req r5 + + // The original stack pointer + ORIG_SP .req r6 + + // NEON registers which contain the message words of the current block. + // M_0-M_3 are occasionally used for other purposes too. + M_0 .req d16 + M_1 .req d17 + M_2 .req d18 + M_3 .req d19 + M_4 .req d20 + M_5 .req d21 + M_6 .req d22 + M_7 .req d23 + M_8 .req d24 + M_9 .req d25 + M_10 .req d26 + M_11 .req d27 + M_12 .req d28 + M_13 .req d29 + M_14 .req d30 + M_15 .req d31 + + .align 4 + // Tables for computing ror64(x, 24) and ror64(x, 16) using the vtbl.8 + // instruction. This is the most efficient way to implement these + // rotation amounts with NEON. (On Cortex-A53 it's the same speed as + // vshr.u64 + vsli.u64, while on Cortex-A7 it's faster.) +.Lror24_table: + .byte 3, 4, 5, 6, 7, 0, 1, 2 +.Lror16_table: + .byte 2, 3, 4, 5, 6, 7, 0, 1 + // The BLAKE2b initialization vector +.Lblake2b_IV: + .quad 0x6a09e667f3bcc908, 0xbb67ae8584caa73b + .quad 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1 + .quad 0x510e527fade682d1, 0x9b05688c2b3e6c1f + .quad 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179 + +// Execute one round of BLAKE2b by updating the state matrix v[0..15] in the +// NEON registers q0-q7. The message block is in q8..q15 (M_0-M_15). The stack +// pointer points to a 32-byte aligned buffer containing a copy of q8 and q9 +// (M_0-M_3), so that they can be reloaded if they are used as temporary +// registers. The macro arguments s0-s15 give the order in which the message +// words are used in this round. 'final' is 1 if this is the final round. +.macro _blake2b_round s0, s1, s2, s3, s4, s5, s6, s7, \ + s8, s9, s10, s11, s12, s13, s14, s15, final=0 + + // Mix the columns: + // (v[0], v[4], v[8], v[12]), (v[1], v[5], v[9], v[13]), + // (v[2], v[6], v[10], v[14]), and (v[3], v[7], v[11], v[15]). + + // a += b + m[blake2b_sigma[r][2*i + 0]]; + vadd.u64 q0, q0, q2 + vadd.u64 q1, q1, q3 + vadd.u64 d0, d0, M_\s0 + vadd.u64 d1, d1, M_\s2 + vadd.u64 d2, d2, M_\s4 + vadd.u64 d3, d3, M_\s6 + + // d = ror64(d ^ a, 32); + veor q6, q6, q0 + veor q7, q7, q1 + vrev64.32 q6, q6 + vrev64.32 q7, q7 + + // c += d; + vadd.u64 q4, q4, q6 + vadd.u64 q5, q5, q7 + + // b = ror64(b ^ c, 24); + vld1.8 {M_0}, [ROR24_TABLE, :64] + veor q2, q2, q4 + veor q3, q3, q5 + vtbl.8 d4, {d4}, M_0 + vtbl.8 d5, {d5}, M_0 + vtbl.8 d6, {d6}, M_0 + vtbl.8 d7, {d7}, M_0 + + // a += b + m[blake2b_sigma[r][2*i + 1]]; + // + // M_0 got clobbered above, so we have to reload it if any of the four + // message words this step needs happens to be M_0. Otherwise we don't + // need to reload it here, as it will just get clobbered again below. +.if \s1 == 0 || \s3 == 0 || \s5 == 0 || \s7 == 0 + vld1.8 {M_0}, [sp, :64] +.endif + vadd.u64 q0, q0, q2 + vadd.u64 q1, q1, q3 + vadd.u64 d0, d0, M_\s1 + vadd.u64 d1, d1, M_\s3 + vadd.u64 d2, d2, M_\s5 + vadd.u64 d3, d3, M_\s7 + + // d = ror64(d ^ a, 16); + vld1.8 {M_0}, [ROR16_TABLE, :64] + veor q6, q6, q0 + veor q7, q7, q1 + vtbl.8 d12, {d12}, M_0 + vtbl.8 d13, {d13}, M_0 + vtbl.8 d14, {d14}, M_0 + vtbl.8 d15, {d15}, M_0 + + // c += d; + vadd.u64 q4, q4, q6 + vadd.u64 q5, q5, q7 + + // b = ror64(b ^ c, 63); + // + // This rotation amount isn't a multiple of 8, so it has to be + // implemented using a pair of shifts, which requires temporary + // registers. Use q8-q9 (M_0-M_3) for this, and reload them afterwards. + veor q8, q2, q4 + veor q9, q3, q5 + vshr.u64 q2, q8, #63 + vshr.u64 q3, q9, #63 + vsli.u64 q2, q8, #1 + vsli.u64 q3, q9, #1 + vld1.8 {q8-q9}, [sp, :256] + + // Mix the diagonals: + // (v[0], v[5], v[10], v[15]), (v[1], v[6], v[11], v[12]), + // (v[2], v[7], v[8], v[13]), and (v[3], v[4], v[9], v[14]). + // + // There are two possible ways to do this: use 'vext' instructions to + // shift the rows of the matrix so that the diagonals become columns, + // and undo it afterwards; or just use 64-bit operations on 'd' + // registers instead of 128-bit operations on 'q' registers. We use the + // latter approach, as it performs much better on Cortex-A7. + + // a += b + m[blake2b_sigma[r][2*i + 0]]; + vadd.u64 d0, d0, d5 + vadd.u64 d1, d1, d6 + vadd.u64 d2, d2, d7 + vadd.u64 d3, d3, d4 + vadd.u64 d0, d0, M_\s8 + vadd.u64 d1, d1, M_\s10 + vadd.u64 d2, d2, M_\s12 + vadd.u64 d3, d3, M_\s14 + + // d = ror64(d ^ a, 32); + veor d15, d15, d0 + veor d12, d12, d1 + veor d13, d13, d2 + veor d14, d14, d3 + vrev64.32 d15, d15 + vrev64.32 d12, d12 + vrev64.32 d13, d13 + vrev64.32 d14, d14 + + // c += d; + vadd.u64 d10, d10, d15 + vadd.u64 d11, d11, d12 + vadd.u64 d8, d8, d13 + vadd.u64 d9, d9, d14 + + // b = ror64(b ^ c, 24); + vld1.8 {M_0}, [ROR24_TABLE, :64] + veor d5, d5, d10 + veor d6, d6, d11 + veor d7, d7, d8 + veor d4, d4, d9 + vtbl.8 d5, {d5}, M_0 + vtbl.8 d6, {d6}, M_0 + vtbl.8 d7, {d7}, M_0 + vtbl.8 d4, {d4}, M_0 + + // a += b + m[blake2b_sigma[r][2*i + 1]]; +.if \s9 == 0 || \s11 == 0 || \s13 == 0 || \s15 == 0 + vld1.8 {M_0}, [sp, :64] +.endif + vadd.u64 d0, d0, d5 + vadd.u64 d1, d1, d6 + vadd.u64 d2, d2, d7 + vadd.u64 d3, d3, d4 + vadd.u64 d0, d0, M_\s9 + vadd.u64 d1, d1, M_\s11 + vadd.u64 d2, d2, M_\s13 + vadd.u64 d3, d3, M_\s15 + + // d = ror64(d ^ a, 16); + vld1.8 {M_0}, [ROR16_TABLE, :64] + veor d15, d15, d0 + veor d12, d12, d1 + veor d13, d13, d2 + veor d14, d14, d3 + vtbl.8 d12, {d12}, M_0 + vtbl.8 d13, {d13}, M_0 + vtbl.8 d14, {d14}, M_0 + vtbl.8 d15, {d15}, M_0 + + // c += d; + vadd.u64 d10, d10, d15 + vadd.u64 d11, d11, d12 + vadd.u64 d8, d8, d13 + vadd.u64 d9, d9, d14 + + // b = ror64(b ^ c, 63); + veor d16, d4, d9 + veor d17, d5, d10 + veor d18, d6, d11 + veor d19, d7, d8 + vshr.u64 q2, q8, #63 + vshr.u64 q3, q9, #63 + vsli.u64 q2, q8, #1 + vsli.u64 q3, q9, #1 + // Reloading q8-q9 can be skipped on the final round. +.if ! \final + vld1.8 {q8-q9}, [sp, :256] +.endif +.endm + +// +// void blake2b_compress_neon(struct blake2b_ctx *ctx, +// const u8 *data, size_t nblocks, u32 inc); +// +// Only the first three fields of struct blake2b_ctx are used: +// u64 h[8]; (inout) +// u64 t[2]; (inout) +// u64 f[2]; (in) +// + .align 5 +ENTRY(blake2b_compress_neon) + push {r4-r10} + + // Allocate a 32-byte stack buffer that is 32-byte aligned. + mov ORIG_SP, sp + sub ip, sp, #32 + bic ip, ip, #31 + mov sp, ip + + adr ROR24_TABLE, .Lror24_table + adr ROR16_TABLE, .Lror16_table + + mov ip, CTX + vld1.64 {q0-q1}, [ip]! // Load h[0..3] + vld1.64 {q2-q3}, [ip]! // Load h[4..7] +.Lnext_block: + adr r10, .Lblake2b_IV + vld1.64 {q14-q15}, [ip] // Load t[0..1] and f[0..1] + vld1.64 {q4-q5}, [r10]! // Load IV[0..3] + vmov r7, r8, d28 // Copy t[0] to (r7, r8) + vld1.64 {q6-q7}, [r10] // Load IV[4..7] + adds r7, r7, INC // Increment counter + bcs .Lslow_inc_ctr + vmov.i32 d28[0], r7 + vst1.64 {d28}, [ip] // Update t[0] +.Linc_ctr_done: + + // Load the next message block and finish initializing the state matrix + // 'v'. Fortunately, there are exactly enough NEON registers to fit the + // entire state matrix in q0-q7 and the entire message block in q8-15. + // + // However, _blake2b_round also needs some extra registers for rotates, + // so we have to spill some registers. It's better to spill the message + // registers than the state registers, as the message doesn't change. + // Therefore we store a copy of the first 32 bytes of the message block + // (q8-q9) in an aligned buffer on the stack so that they can be + // reloaded when needed. (We could just reload directly from the + // message buffer, but it's faster to use aligned loads.) + vld1.8 {q8-q9}, [DATA]! + veor q6, q6, q14 // v[12..13] = IV[4..5] ^ t[0..1] + vld1.8 {q10-q11}, [DATA]! + veor q7, q7, q15 // v[14..15] = IV[6..7] ^ f[0..1] + vld1.8 {q12-q13}, [DATA]! + vst1.8 {q8-q9}, [sp, :256] + mov ip, CTX + vld1.8 {q14-q15}, [DATA]! + + // Execute the rounds. Each round is provided the order in which it + // needs to use the message words. + _blake2b_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 + _blake2b_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 + _blake2b_round 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 + _blake2b_round 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 + _blake2b_round 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 + _blake2b_round 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 + _blake2b_round 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 + _blake2b_round 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 + _blake2b_round 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 + _blake2b_round 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 + _blake2b_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 + _blake2b_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 \ + final=1 + + // Fold the final state matrix into the hash chaining value: + // + // for (i = 0; i < 8; i++) + // h[i] ^= v[i] ^ v[i + 8]; + // + vld1.64 {q8-q9}, [ip]! // Load old h[0..3] + veor q0, q0, q4 // v[0..1] ^= v[8..9] + veor q1, q1, q5 // v[2..3] ^= v[10..11] + vld1.64 {q10-q11}, [ip] // Load old h[4..7] + veor q2, q2, q6 // v[4..5] ^= v[12..13] + veor q3, q3, q7 // v[6..7] ^= v[14..15] + veor q0, q0, q8 // v[0..1] ^= h[0..1] + veor q1, q1, q9 // v[2..3] ^= h[2..3] + mov ip, CTX + subs NBLOCKS, NBLOCKS, #1 // nblocks-- + vst1.64 {q0-q1}, [ip]! // Store new h[0..3] + veor q2, q2, q10 // v[4..5] ^= h[4..5] + veor q3, q3, q11 // v[6..7] ^= h[6..7] + vst1.64 {q2-q3}, [ip]! // Store new h[4..7] + + // Advance to the next block, if there is one. + bne .Lnext_block // nblocks != 0? + + mov sp, ORIG_SP + pop {r4-r10} + mov pc, lr + +.Lslow_inc_ctr: + // Handle the case where the counter overflowed its low 32 bits, by + // carrying the overflow bit into the full 128-bit counter. + vmov r9, r10, d29 + adcs r8, r8, #0 + adcs r9, r9, #0 + adc r10, r10, #0 + vmov d28, r7, r8 + vmov d29, r9, r10 + vst1.64 {q14}, [ip] // Update t[0] and t[1] + b .Linc_ctr_done +ENDPROC(blake2b_compress_neon) diff --git a/lib/crypto/arm/blake2b.h b/lib/crypto/arm/blake2b.h new file mode 100644 index 000000000000..1b9154d119db --- /dev/null +++ b/lib/crypto/arm/blake2b.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * BLAKE2b digest algorithm, NEON accelerated + * + * Copyright 2020 Google LLC + */ + +#include +#include + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); + +asmlinkage void blake2b_compress_neon(struct blake2b_ctx *ctx, + const u8 *data, size_t nblocks, u32 inc); + +static void blake2b_compress(struct blake2b_ctx *ctx, + const u8 *data, size_t nblocks, u32 inc) +{ + if (!static_branch_likely(&have_neon) || !may_use_simd()) { + blake2b_compress_generic(ctx, data, nblocks, inc); + return; + } + do { + const size_t blocks = min_t(size_t, nblocks, + SZ_4K / BLAKE2B_BLOCK_SIZE); + + kernel_neon_begin(); + blake2b_compress_neon(ctx, data, blocks, inc); + kernel_neon_end(); + + data += blocks * BLAKE2B_BLOCK_SIZE; + nblocks -= blocks; + } while (nblocks); +} + +#define blake2b_mod_init_arch blake2b_mod_init_arch +static void blake2b_mod_init_arch(void) +{ + if (elf_hwcap & HWCAP_NEON) + static_branch_enable(&have_neon); +} -- cgit v1.2.3 From 0593447248044ab609b43b947d0e198c887ac281 Mon Sep 17 00:00:00 2001 From: David Howells Date: Sat, 25 Oct 2025 22:50:20 -0700 Subject: lib/crypto: sha3: Add SHA-3 support Add SHA-3 support to lib/crypto/. All six algorithms in the SHA-3 family are supported: four digests (SHA3-224, SHA3-256, SHA3-384, and SHA3-512) and two extendable-output functions (SHAKE128 and SHAKE256). The SHAKE algorithms will be required for ML-DSA. [EB: simplified the API to use fewer types and functions, fixed bug that sometimes caused incorrect SHAKE output, cleaned up the documentation, dropped an ad-hoc test that was inconsistent with the rest of lib/crypto/, and many other cleanups] Signed-off-by: David Howells Co-developed-by: Eric Biggers Tested-by: Harald Freudenberger Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251026055032.1413733-4-ebiggers@kernel.org Signed-off-by: Eric Biggers --- Documentation/crypto/index.rst | 1 + Documentation/crypto/sha3.rst | 119 ++++++++++++++ include/crypto/sha3.h | 322 +++++++++++++++++++++++++++++++++++- lib/crypto/Kconfig | 7 + lib/crypto/Makefile | 5 + lib/crypto/sha3.c | 359 +++++++++++++++++++++++++++++++++++++++++ 6 files changed, 810 insertions(+), 3 deletions(-) create mode 100644 Documentation/crypto/sha3.rst create mode 100644 lib/crypto/sha3.c (limited to 'lib/crypto') diff --git a/Documentation/crypto/index.rst b/Documentation/crypto/index.rst index 100b47d049c0..4ee667c446f9 100644 --- a/Documentation/crypto/index.rst +++ b/Documentation/crypto/index.rst @@ -27,3 +27,4 @@ for cryptographic use cases, as well as programming examples. descore-readme device_drivers/index krb5 + sha3 diff --git a/Documentation/crypto/sha3.rst b/Documentation/crypto/sha3.rst new file mode 100644 index 000000000000..b705e70691d7 --- /dev/null +++ b/Documentation/crypto/sha3.rst @@ -0,0 +1,119 @@ +.. SPDX-License-Identifier: GPL-2.0-or-later + +========================== +SHA-3 Algorithm Collection +========================== + +.. contents:: + +Overview +======== + +The SHA-3 family of algorithms, as specified in NIST FIPS-202 [1]_, contains six +algorithms based on the Keccak sponge function. The differences between them +are: the "rate" (how much of the state buffer gets updated with new data between +invocations of the Keccak function and analogous to the "block size"), what +domain separation suffix gets appended to the input data, and how much output +data is extracted at the end. The Keccak sponge function is designed such that +arbitrary amounts of output can be obtained for certain algorithms. + +Four digest algorithms are provided: + + - SHA3-224 + - SHA3-256 + - SHA3-384 + - SHA3-512 + +Additionally, two Extendable-Output Functions (XOFs) are provided: + + - SHAKE128 + - SHAKE256 + +The SHA-3 library API supports all six of these algorithms. The four digest +algorithms are also supported by the crypto_shash and crypto_ahash APIs. + +This document describes the SHA-3 library API. + + +Digests +======= + +The following functions compute SHA-3 digests:: + + void sha3_224(const u8 *in, size_t in_len, u8 out[SHA3_224_DIGEST_SIZE]); + void sha3_256(const u8 *in, size_t in_len, u8 out[SHA3_256_DIGEST_SIZE]); + void sha3_384(const u8 *in, size_t in_len, u8 out[SHA3_384_DIGEST_SIZE]); + void sha3_512(const u8 *in, size_t in_len, u8 out[SHA3_512_DIGEST_SIZE]); + +For users that need to pass in data incrementally, an incremental API is also +provided. The incremental API uses the following struct:: + + struct sha3_ctx { ... }; + +Initialization is done with one of:: + + void sha3_224_init(struct sha3_ctx *ctx); + void sha3_256_init(struct sha3_ctx *ctx); + void sha3_384_init(struct sha3_ctx *ctx); + void sha3_512_init(struct sha3_ctx *ctx); + +Input data is then added with any number of calls to:: + + void sha3_update(struct sha3_ctx *ctx, const u8 *in, size_t in_len); + +Finally, the digest is generated using:: + + void sha3_final(struct sha3_ctx *ctx, u8 *out); + +which also zeroizes the context. The length of the digest is determined by the +initialization function that was called. + + +Extendable-Output Functions +=========================== + +The following functions compute the SHA-3 extendable-output functions (XOFs):: + + void shake128(const u8 *in, size_t in_len, u8 *out, size_t out_len); + void shake256(const u8 *in, size_t in_len, u8 *out, size_t out_len); + +For users that need to provide the input data incrementally and/or receive the +output data incrementally, an incremental API is also provided. The incremental +API uses the following struct:: + + struct shake_ctx { ... }; + +Initialization is done with one of:: + + void shake128_init(struct shake_ctx *ctx); + void shake256_init(struct shake_ctx *ctx); + +Input data is then added with any number of calls to:: + + void shake_update(struct shake_ctx *ctx, const u8 *in, size_t in_len); + +Finally, the output data is extracted with any number of calls to:: + + void shake_squeeze(struct shake_ctx *ctx, u8 *out, size_t out_len); + +and telling it how much data should be extracted. Note that performing multiple +squeezes, with the output laid consecutively in a buffer, gets exactly the same +output as doing a single squeeze for the combined amount over the same buffer. + +More input data cannot be added after squeezing has started. + +Once all the desired output has been extracted, zeroize the context:: + + void shake_zeroize_ctx(struct shake_ctx *ctx); + + +References +========== + +.. [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf + + +API Function Reference +====================== + +.. kernel-doc:: include/crypto/sha3.h diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h index 41e1b83a6d91..c0c468ee099e 100644 --- a/include/crypto/sha3.h +++ b/include/crypto/sha3.h @@ -1,11 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Common values for SHA-3 algorithms + * + * See also Documentation/crypto/sha3.rst */ #ifndef __CRYPTO_SHA3_H__ #define __CRYPTO_SHA3_H__ #include +#include #define SHA3_224_DIGEST_SIZE (224 / 8) #define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE) @@ -23,14 +26,327 @@ #define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE) #define SHA3_512_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_512_BLOCK_SIZE + 1 +/* + * SHAKE128 and SHAKE256 actually have variable output size, but this is used to + * calculate the block size (rate) analogously to the above. + */ +#define SHAKE128_DEFAULT_SIZE (128 / 8) +#define SHAKE128_BLOCK_SIZE (200 - 2 * SHAKE128_DEFAULT_SIZE) +#define SHAKE256_DEFAULT_SIZE (256 / 8) +#define SHAKE256_BLOCK_SIZE (200 - 2 * SHAKE256_DEFAULT_SIZE) + #define SHA3_STATE_SIZE 200 struct shash_desc; +int crypto_sha3_init(struct shash_desc *desc); + +/* + * State for the Keccak-f[1600] permutation: 25 64-bit words. + * + * We usually keep the state words as little-endian, to make absorbing and + * squeezing easier. (It means that absorbing and squeezing can just treat the + * state as a byte array.) The state words are converted to native-endian only + * temporarily by implementations of the permutation that need native-endian + * words. Of course, that conversion is a no-op on little-endian machines. + */ struct sha3_state { - u64 st[SHA3_STATE_SIZE / 8]; + union { + u64 st[SHA3_STATE_SIZE / 8]; /* temporarily retained for compatibility purposes */ + + __le64 words[SHA3_STATE_SIZE / 8]; + u8 bytes[SHA3_STATE_SIZE]; + + u64 native_words[SHA3_STATE_SIZE / 8]; /* see comment above */ + }; }; -int crypto_sha3_init(struct shash_desc *desc); +/* Internal context, shared by the digests (SHA3-*) and the XOFs (SHAKE*) */ +struct __sha3_ctx { + struct sha3_state state; + u8 digest_size; /* Digests only: the digest size in bytes */ + u8 block_size; /* Block size in bytes */ + u8 absorb_offset; /* Index of next state byte to absorb into */ + u8 squeeze_offset; /* XOFs only: index of next state byte to extract */ +}; + +void __sha3_update(struct __sha3_ctx *ctx, const u8 *in, size_t in_len); + +/** + * struct sha3_ctx - Context for SHA3-224, SHA3-256, SHA3-384, or SHA3-512 + * @ctx: private + */ +struct sha3_ctx { + struct __sha3_ctx ctx; +}; + +/** + * sha3_zeroize_ctx() - Zeroize a SHA-3 context + * @ctx: The context to zeroize + * + * This is already called by sha3_final(). Call this explicitly when abandoning + * a context without calling sha3_final(). + */ +static inline void sha3_zeroize_ctx(struct sha3_ctx *ctx) +{ + memzero_explicit(ctx, sizeof(*ctx)); +} + +/** + * struct shake_ctx - Context for SHAKE128 or SHAKE256 + * @ctx: private + */ +struct shake_ctx { + struct __sha3_ctx ctx; +}; + +/** + * shake_zeroize_ctx() - Zeroize a SHAKE context + * @ctx: The context to zeroize + * + * Call this after the last squeeze. + */ +static inline void shake_zeroize_ctx(struct shake_ctx *ctx) +{ + memzero_explicit(ctx, sizeof(*ctx)); +} + +/** + * sha3_224_init() - Initialize a context for SHA3-224 + * @ctx: The context to initialize + * + * This begins a new SHA3-224 message digest computation. + * + * Context: Any context. + */ +static inline void sha3_224_init(struct sha3_ctx *ctx) +{ + *ctx = (struct sha3_ctx){ + .ctx.digest_size = SHA3_224_DIGEST_SIZE, + .ctx.block_size = SHA3_224_BLOCK_SIZE, + }; +} + +/** + * sha3_256_init() - Initialize a context for SHA3-256 + * @ctx: The context to initialize + * + * This begins a new SHA3-256 message digest computation. + * + * Context: Any context. + */ +static inline void sha3_256_init(struct sha3_ctx *ctx) +{ + *ctx = (struct sha3_ctx){ + .ctx.digest_size = SHA3_256_DIGEST_SIZE, + .ctx.block_size = SHA3_256_BLOCK_SIZE, + }; +} + +/** + * sha3_384_init() - Initialize a context for SHA3-384 + * @ctx: The context to initialize + * + * This begins a new SHA3-384 message digest computation. + * + * Context: Any context. + */ +static inline void sha3_384_init(struct sha3_ctx *ctx) +{ + *ctx = (struct sha3_ctx){ + .ctx.digest_size = SHA3_384_DIGEST_SIZE, + .ctx.block_size = SHA3_384_BLOCK_SIZE, + }; +} + +/** + * sha3_512_init() - Initialize a context for SHA3-512 + * @ctx: The context to initialize + * + * This begins a new SHA3-512 message digest computation. + * + * Context: Any context. + */ +static inline void sha3_512_init(struct sha3_ctx *ctx) +{ + *ctx = (struct sha3_ctx){ + .ctx.digest_size = SHA3_512_DIGEST_SIZE, + .ctx.block_size = SHA3_512_BLOCK_SIZE, + }; +} + +/** + * sha3_update() - Update a SHA-3 digest context with input data + * @ctx: The context to update; must have been initialized + * @in: The input data + * @in_len: Length of the input data in bytes + * + * This can be called any number of times to add data to a SHA3-224, SHA3-256, + * SHA3-384, or SHA3-512 digest (depending on which init function was called). + * + * Context: Any context. + */ +static inline void sha3_update(struct sha3_ctx *ctx, + const u8 *in, size_t in_len) +{ + __sha3_update(&ctx->ctx, in, in_len); +} + +/** + * sha3_final() - Finish computing a SHA-3 message digest + * @ctx: The context to finalize; must have been initialized + * @out: (output) The resulting SHA3-224, SHA3-256, SHA3-384, or SHA3-512 + * message digest, matching the init function that was called. Note that + * the size differs for each one; see SHA3_*_DIGEST_SIZE. + * + * After finishing, this zeroizes @ctx. So the caller does not need to do it. + * + * Context: Any context. + */ +void sha3_final(struct sha3_ctx *ctx, u8 *out); + +/** + * shake128_init() - Initialize a context for SHAKE128 + * @ctx: The context to initialize + * + * This begins a new SHAKE128 extendable-output function (XOF) computation. + * + * Context: Any context. + */ +static inline void shake128_init(struct shake_ctx *ctx) +{ + *ctx = (struct shake_ctx){ + .ctx.block_size = SHAKE128_BLOCK_SIZE, + }; +} + +/** + * shake256_init() - Initialize a context for SHAKE256 + * @ctx: The context to initialize + * + * This begins a new SHAKE256 extendable-output function (XOF) computation. + * + * Context: Any context. + */ +static inline void shake256_init(struct shake_ctx *ctx) +{ + *ctx = (struct shake_ctx){ + .ctx.block_size = SHAKE256_BLOCK_SIZE, + }; +} + +/** + * shake_update() - Update a SHAKE context with input data + * @ctx: The context to update; must have been initialized + * @in: The input data + * @in_len: Length of the input data in bytes + * + * This can be called any number of times to add more input data to SHAKE128 or + * SHAKE256. This cannot be called after squeezing has begun. + * + * Context: Any context. + */ +static inline void shake_update(struct shake_ctx *ctx, + const u8 *in, size_t in_len) +{ + __sha3_update(&ctx->ctx, in, in_len); +} + +/** + * shake_squeeze() - Generate output from SHAKE128 or SHAKE256 + * @ctx: The context to squeeze; must have been initialized + * @out: Where to write the resulting output data + * @out_len: The amount of data to extract to @out in bytes + * + * This may be called multiple times. A number of consecutive squeezes laid + * end-to-end will yield the same output as one big squeeze generating the same + * total amount of output. More input cannot be provided after squeezing has + * begun. After the last squeeze, call shake_zeroize_ctx(). + * + * Context: Any context. + */ +void shake_squeeze(struct shake_ctx *ctx, u8 *out, size_t out_len); + +/** + * sha3_224() - Compute SHA3-224 digest in one shot + * @in: The input data to be digested + * @in_len: Length of the input data in bytes + * @out: The buffer into which the digest will be stored + * + * Convenience function that computes a SHA3-224 digest. Use this instead of + * the incremental API if you're able to provide all the input at once. + * + * Context: Any context. + */ +void sha3_224(const u8 *in, size_t in_len, u8 out[SHA3_224_DIGEST_SIZE]); + +/** + * sha3_256() - Compute SHA3-256 digest in one shot + * @in: The input data to be digested + * @in_len: Length of the input data in bytes + * @out: The buffer into which the digest will be stored + * + * Convenience function that computes a SHA3-256 digest. Use this instead of + * the incremental API if you're able to provide all the input at once. + * + * Context: Any context. + */ +void sha3_256(const u8 *in, size_t in_len, u8 out[SHA3_256_DIGEST_SIZE]); + +/** + * sha3_384() - Compute SHA3-384 digest in one shot + * @in: The input data to be digested + * @in_len: Length of the input data in bytes + * @out: The buffer into which the digest will be stored + * + * Convenience function that computes a SHA3-384 digest. Use this instead of + * the incremental API if you're able to provide all the input at once. + * + * Context: Any context. + */ +void sha3_384(const u8 *in, size_t in_len, u8 out[SHA3_384_DIGEST_SIZE]); + +/** + * sha3_512() - Compute SHA3-512 digest in one shot + * @in: The input data to be digested + * @in_len: Length of the input data in bytes + * @out: The buffer into which the digest will be stored + * + * Convenience function that computes a SHA3-512 digest. Use this instead of + * the incremental API if you're able to provide all the input at once. + * + * Context: Any context. + */ +void sha3_512(const u8 *in, size_t in_len, u8 out[SHA3_512_DIGEST_SIZE]); + +/** + * shake128() - Compute SHAKE128 in one shot + * @in: The input data to be used + * @in_len: Length of the input data in bytes + * @out: The buffer into which the output will be stored + * @out_len: Length of the output to produce in bytes + * + * Convenience function that computes SHAKE128 in one shot. Use this instead of + * the incremental API if you're able to provide all the input at once as well + * as receive all the output at once. All output lengths are supported. + * + * Context: Any context. + */ +void shake128(const u8 *in, size_t in_len, u8 *out, size_t out_len); + +/** + * shake256() - Compute SHAKE256 in one shot + * @in: The input data to be used + * @in_len: Length of the input data in bytes + * @out: The buffer into which the output will be stored + * @out_len: Length of the output to produce in bytes + * + * Convenience function that computes SHAKE256 in one shot. Use this instead of + * the incremental API if you're able to provide all the input at once as well + * as receive all the output at once. All output lengths are supported. + * + * Context: Any context. + */ +void shake256(const u8 *in, size_t in_len, u8 *out, size_t out_len); -#endif +#endif /* __CRYPTO_SHA3_H__ */ diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 280b888153bf..a05f5a349cd8 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -195,6 +195,13 @@ config CRYPTO_LIB_SHA512_ARCH default y if SPARC64 default y if X86_64 +config CRYPTO_LIB_SHA3 + tristate + select CRYPTO_LIB_UTILS + help + The SHA3 library functions. Select this if your module uses any of + the functions from . + config CRYPTO_LIB_SM3 tristate diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index bc26777d08e9..0cfdb511f32b 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -278,6 +278,11 @@ endif # CONFIG_CRYPTO_LIB_SHA512_ARCH ################################################################################ +obj-$(CONFIG_CRYPTO_LIB_SHA3) += libsha3.o +libsha3-y := sha3.o + +################################################################################ + obj-$(CONFIG_MPILIB) += mpi/ obj-$(CONFIG_CRYPTO_SELFTESTS_FULL) += simd.o diff --git a/lib/crypto/sha3.c b/lib/crypto/sha3.c new file mode 100644 index 000000000000..56d8353f9c5b --- /dev/null +++ b/lib/crypto/sha3.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SHA-3, as specified in + * https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf + * + * SHA-3 code by Jeff Garzik + * Ard Biesheuvel + * David Howells + * + * See also Documentation/crypto/sha3.rst + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include + +/* + * On some 32-bit architectures, such as h8300, GCC ends up using over 1 KB of + * stack if the round calculation gets inlined into the loop in + * sha3_keccakf_generic(). On the other hand, on 64-bit architectures with + * plenty of [64-bit wide] general purpose registers, not inlining it severely + * hurts performance. So let's use 64-bitness as a heuristic to decide whether + * to inline or not. + */ +#ifdef CONFIG_64BIT +#define SHA3_INLINE inline +#else +#define SHA3_INLINE noinline +#endif + +#define SHA3_KECCAK_ROUNDS 24 + +static const u64 sha3_keccakf_rndc[SHA3_KECCAK_ROUNDS] = { + 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, + 0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL, + 0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008aULL, + 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL, + 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, + 0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL, + 0x000000000000800aULL, 0x800000008000000aULL, 0x8000000080008081ULL, + 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL +}; + +/* + * Perform a single round of Keccak mixing. + */ +static SHA3_INLINE void sha3_keccakf_one_round_generic(u64 st[25]) +{ + u64 t[5], tt, bc[5]; + + /* Theta */ + bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20]; + bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21]; + bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22]; + bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23]; + bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24]; + + t[0] = bc[4] ^ rol64(bc[1], 1); + t[1] = bc[0] ^ rol64(bc[2], 1); + t[2] = bc[1] ^ rol64(bc[3], 1); + t[3] = bc[2] ^ rol64(bc[4], 1); + t[4] = bc[3] ^ rol64(bc[0], 1); + + st[0] ^= t[0]; + + /* Rho Pi */ + tt = st[1]; + st[ 1] = rol64(st[ 6] ^ t[1], 44); + st[ 6] = rol64(st[ 9] ^ t[4], 20); + st[ 9] = rol64(st[22] ^ t[2], 61); + st[22] = rol64(st[14] ^ t[4], 39); + st[14] = rol64(st[20] ^ t[0], 18); + st[20] = rol64(st[ 2] ^ t[2], 62); + st[ 2] = rol64(st[12] ^ t[2], 43); + st[12] = rol64(st[13] ^ t[3], 25); + st[13] = rol64(st[19] ^ t[4], 8); + st[19] = rol64(st[23] ^ t[3], 56); + st[23] = rol64(st[15] ^ t[0], 41); + st[15] = rol64(st[ 4] ^ t[4], 27); + st[ 4] = rol64(st[24] ^ t[4], 14); + st[24] = rol64(st[21] ^ t[1], 2); + st[21] = rol64(st[ 8] ^ t[3], 55); + st[ 8] = rol64(st[16] ^ t[1], 45); + st[16] = rol64(st[ 5] ^ t[0], 36); + st[ 5] = rol64(st[ 3] ^ t[3], 28); + st[ 3] = rol64(st[18] ^ t[3], 21); + st[18] = rol64(st[17] ^ t[2], 15); + st[17] = rol64(st[11] ^ t[1], 10); + st[11] = rol64(st[ 7] ^ t[2], 6); + st[ 7] = rol64(st[10] ^ t[0], 3); + st[10] = rol64( tt ^ t[1], 1); + + /* Chi */ + bc[ 0] = ~st[ 1] & st[ 2]; + bc[ 1] = ~st[ 2] & st[ 3]; + bc[ 2] = ~st[ 3] & st[ 4]; + bc[ 3] = ~st[ 4] & st[ 0]; + bc[ 4] = ~st[ 0] & st[ 1]; + st[ 0] ^= bc[ 0]; + st[ 1] ^= bc[ 1]; + st[ 2] ^= bc[ 2]; + st[ 3] ^= bc[ 3]; + st[ 4] ^= bc[ 4]; + + bc[ 0] = ~st[ 6] & st[ 7]; + bc[ 1] = ~st[ 7] & st[ 8]; + bc[ 2] = ~st[ 8] & st[ 9]; + bc[ 3] = ~st[ 9] & st[ 5]; + bc[ 4] = ~st[ 5] & st[ 6]; + st[ 5] ^= bc[ 0]; + st[ 6] ^= bc[ 1]; + st[ 7] ^= bc[ 2]; + st[ 8] ^= bc[ 3]; + st[ 9] ^= bc[ 4]; + + bc[ 0] = ~st[11] & st[12]; + bc[ 1] = ~st[12] & st[13]; + bc[ 2] = ~st[13] & st[14]; + bc[ 3] = ~st[14] & st[10]; + bc[ 4] = ~st[10] & st[11]; + st[10] ^= bc[ 0]; + st[11] ^= bc[ 1]; + st[12] ^= bc[ 2]; + st[13] ^= bc[ 3]; + st[14] ^= bc[ 4]; + + bc[ 0] = ~st[16] & st[17]; + bc[ 1] = ~st[17] & st[18]; + bc[ 2] = ~st[18] & st[19]; + bc[ 3] = ~st[19] & st[15]; + bc[ 4] = ~st[15] & st[16]; + st[15] ^= bc[ 0]; + st[16] ^= bc[ 1]; + st[17] ^= bc[ 2]; + st[18] ^= bc[ 3]; + st[19] ^= bc[ 4]; + + bc[ 0] = ~st[21] & st[22]; + bc[ 1] = ~st[22] & st[23]; + bc[ 2] = ~st[23] & st[24]; + bc[ 3] = ~st[24] & st[20]; + bc[ 4] = ~st[20] & st[21]; + st[20] ^= bc[ 0]; + st[21] ^= bc[ 1]; + st[22] ^= bc[ 2]; + st[23] ^= bc[ 3]; + st[24] ^= bc[ 4]; +} + +/* Generic implementation of the Keccak-f[1600] permutation */ +static void sha3_keccakf_generic(struct sha3_state *state) +{ + /* + * Temporarily convert the state words from little-endian to native- + * endian so that they can be operated on. Note that on little-endian + * machines this conversion is a no-op and is optimized out. + */ + + for (int i = 0; i < ARRAY_SIZE(state->words); i++) + state->native_words[i] = le64_to_cpu(state->words[i]); + + for (int round = 0; round < SHA3_KECCAK_ROUNDS; round++) { + sha3_keccakf_one_round_generic(state->native_words); + /* Iota */ + state->native_words[0] ^= sha3_keccakf_rndc[round]; + } + + for (int i = 0; i < ARRAY_SIZE(state->words); i++) + state->words[i] = cpu_to_le64(state->native_words[i]); +} + +/* + * Generic implementation of absorbing the given nonzero number of full blocks + * into the sponge function Keccak[r=8*block_size, c=1600-8*block_size]. + */ +static void __maybe_unused +sha3_absorb_blocks_generic(struct sha3_state *state, const u8 *data, + size_t nblocks, size_t block_size) +{ + do { + for (size_t i = 0; i < block_size; i += 8) + state->words[i / 8] ^= get_unaligned((__le64 *)&data[i]); + sha3_keccakf_generic(state); + data += block_size; + } while (--nblocks); +} + +#ifdef CONFIG_CRYPTO_LIB_SHA3_ARCH +#include "sha3.h" /* $(SRCARCH)/sha3.h */ +#else +#define sha3_keccakf sha3_keccakf_generic +#define sha3_absorb_blocks sha3_absorb_blocks_generic +#endif + +void __sha3_update(struct __sha3_ctx *ctx, const u8 *in, size_t in_len) +{ + const size_t block_size = ctx->block_size; + size_t absorb_offset = ctx->absorb_offset; + + /* Warn if squeezing has already begun. */ + WARN_ON_ONCE(absorb_offset >= block_size); + + if (absorb_offset && absorb_offset + in_len >= block_size) { + crypto_xor(&ctx->state.bytes[absorb_offset], in, + block_size - absorb_offset); + in += block_size - absorb_offset; + in_len -= block_size - absorb_offset; + sha3_keccakf(&ctx->state); + absorb_offset = 0; + } + + if (in_len >= block_size) { + size_t nblocks = in_len / block_size; + + sha3_absorb_blocks(&ctx->state, in, nblocks, block_size); + in += nblocks * block_size; + in_len -= nblocks * block_size; + } + + if (in_len) { + crypto_xor(&ctx->state.bytes[absorb_offset], in, in_len); + absorb_offset += in_len; + } + ctx->absorb_offset = absorb_offset; +} +EXPORT_SYMBOL_GPL(__sha3_update); + +void sha3_final(struct sha3_ctx *sha3_ctx, u8 *out) +{ + struct __sha3_ctx *ctx = &sha3_ctx->ctx; + + ctx->state.bytes[ctx->absorb_offset] ^= 0x06; + ctx->state.bytes[ctx->block_size - 1] ^= 0x80; + sha3_keccakf(&ctx->state); + memcpy(out, ctx->state.bytes, ctx->digest_size); + sha3_zeroize_ctx(sha3_ctx); +} +EXPORT_SYMBOL_GPL(sha3_final); + +void shake_squeeze(struct shake_ctx *shake_ctx, u8 *out, size_t out_len) +{ + struct __sha3_ctx *ctx = &shake_ctx->ctx; + const size_t block_size = ctx->block_size; + size_t squeeze_offset = ctx->squeeze_offset; + + if (ctx->absorb_offset < block_size) { + /* First squeeze: */ + + /* Add the domain separation suffix and padding. */ + ctx->state.bytes[ctx->absorb_offset] ^= 0x1f; + ctx->state.bytes[block_size - 1] ^= 0x80; + + /* Indicate that squeezing has begun. */ + ctx->absorb_offset = block_size; + + /* + * Indicate that no output is pending yet, i.e. sha3_keccakf() + * will need to be called before the first copy. + */ + squeeze_offset = block_size; + } + while (out_len) { + if (squeeze_offset == block_size) { + sha3_keccakf(&ctx->state); + squeeze_offset = 0; + } + size_t copy = min(out_len, block_size - squeeze_offset); + + memcpy(out, &ctx->state.bytes[squeeze_offset], copy); + out += copy; + out_len -= copy; + squeeze_offset += copy; + } + ctx->squeeze_offset = squeeze_offset; +} +EXPORT_SYMBOL_GPL(shake_squeeze); + +void sha3_224(const u8 *in, size_t in_len, u8 out[SHA3_224_DIGEST_SIZE]) +{ + struct sha3_ctx ctx; + + sha3_224_init(&ctx); + sha3_update(&ctx, in, in_len); + sha3_final(&ctx, out); +} +EXPORT_SYMBOL_GPL(sha3_224); + +void sha3_256(const u8 *in, size_t in_len, u8 out[SHA3_256_DIGEST_SIZE]) +{ + struct sha3_ctx ctx; + + sha3_256_init(&ctx); + sha3_update(&ctx, in, in_len); + sha3_final(&ctx, out); +} +EXPORT_SYMBOL_GPL(sha3_256); + +void sha3_384(const u8 *in, size_t in_len, u8 out[SHA3_384_DIGEST_SIZE]) +{ + struct sha3_ctx ctx; + + sha3_384_init(&ctx); + sha3_update(&ctx, in, in_len); + sha3_final(&ctx, out); +} +EXPORT_SYMBOL_GPL(sha3_384); + +void sha3_512(const u8 *in, size_t in_len, u8 out[SHA3_512_DIGEST_SIZE]) +{ + struct sha3_ctx ctx; + + sha3_512_init(&ctx); + sha3_update(&ctx, in, in_len); + sha3_final(&ctx, out); +} +EXPORT_SYMBOL_GPL(sha3_512); + +void shake128(const u8 *in, size_t in_len, u8 *out, size_t out_len) +{ + struct shake_ctx ctx; + + shake128_init(&ctx); + shake_update(&ctx, in, in_len); + shake_squeeze(&ctx, out, out_len); + shake_zeroize_ctx(&ctx); +} +EXPORT_SYMBOL_GPL(shake128); + +void shake256(const u8 *in, size_t in_len, u8 *out, size_t out_len) +{ + struct shake_ctx ctx; + + shake256_init(&ctx); + shake_update(&ctx, in, in_len); + shake_squeeze(&ctx, out, out_len); + shake_zeroize_ctx(&ctx); +} +EXPORT_SYMBOL_GPL(shake256); + +#ifdef sha3_mod_init_arch +static int __init sha3_mod_init(void) +{ + sha3_mod_init_arch(); + return 0; +} +subsys_initcall(sha3_mod_init); + +static void __exit sha3_mod_exit(void) +{ +} +module_exit(sha3_mod_exit); +#endif + +MODULE_DESCRIPTION("SHA-3 library functions"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From c0db39e253ebca9dea57e8885450ff0a0a6d6155 Mon Sep 17 00:00:00 2001 From: David Howells Date: Sat, 25 Oct 2025 22:50:21 -0700 Subject: lib/crypto: sha3: Move SHA3 Iota step mapping into round function In crypto/sha3_generic.c, the keccakf() function calls keccakf_round() to do four of Keccak-f's five step mappings. However, it does not do the Iota step mapping - presumably because that is dependent on round number, whereas Theta, Rho, Pi and Chi are not. Note that the keccakf_round() function needs to be explicitly non-inlined on certain architectures as gcc's produced output will (or used to) use over 1KiB of stack space if inlined. Now, this code was copied more or less verbatim into lib/crypto/sha3.c, so that has the same aesthetic issue. Fix this there by passing the round number into sha3_keccakf_one_round_generic() and doing the Iota step mapping there. crypto/sha3_generic.c is left untouched as that will be converted to use lib/crypto/sha3.c at some point. Suggested-by: Eric Biggers Signed-off-by: David Howells Tested-by: Harald Freudenberger Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251026055032.1413733-5-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/sha3.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'lib/crypto') diff --git a/lib/crypto/sha3.c b/lib/crypto/sha3.c index 56d8353f9c5b..2102c2ecac96 100644 --- a/lib/crypto/sha3.c +++ b/lib/crypto/sha3.c @@ -48,7 +48,7 @@ static const u64 sha3_keccakf_rndc[SHA3_KECCAK_ROUNDS] = { /* * Perform a single round of Keccak mixing. */ -static SHA3_INLINE void sha3_keccakf_one_round_generic(u64 st[25]) +static SHA3_INLINE void sha3_keccakf_one_round_generic(u64 st[25], int round) { u64 t[5], tt, bc[5]; @@ -149,6 +149,9 @@ static SHA3_INLINE void sha3_keccakf_one_round_generic(u64 st[25]) st[22] ^= bc[ 2]; st[23] ^= bc[ 3]; st[24] ^= bc[ 4]; + + /* Iota */ + st[0] ^= sha3_keccakf_rndc[round]; } /* Generic implementation of the Keccak-f[1600] permutation */ @@ -163,11 +166,8 @@ static void sha3_keccakf_generic(struct sha3_state *state) for (int i = 0; i < ARRAY_SIZE(state->words); i++) state->native_words[i] = le64_to_cpu(state->words[i]); - for (int round = 0; round < SHA3_KECCAK_ROUNDS; round++) { - sha3_keccakf_one_round_generic(state->native_words); - /* Iota */ - state->native_words[0] ^= sha3_keccakf_rndc[round]; - } + for (int round = 0; round < SHA3_KECCAK_ROUNDS; round++) + sha3_keccakf_one_round_generic(state->native_words, round); for (int i = 0; i < ARRAY_SIZE(state->words); i++) state->words[i] = cpu_to_le64(state->native_words[i]); -- cgit v1.2.3 From 6fa873641c0bdfa849130a81aa7339ccfd42b52a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 25 Oct 2025 22:50:24 -0700 Subject: lib/crypto: sha3: Add FIPS cryptographic algorithm self-test Since the SHA-3 algorithms are FIPS-approved, add the boot-time self-test which is apparently required. This closely follows the corresponding SHA-1, SHA-256, and SHA-512 tests. Tested-by: Harald Freudenberger Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251026055032.1413733-8-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/fips.h | 7 +++++++ lib/crypto/sha3.c | 17 ++++++++++++++++- scripts/crypto/gen-fips-testvecs.py | 4 ++++ 3 files changed, 27 insertions(+), 1 deletion(-) (limited to 'lib/crypto') diff --git a/lib/crypto/fips.h b/lib/crypto/fips.h index 78a1bdd33a15..023410c2e0db 100644 --- a/lib/crypto/fips.h +++ b/lib/crypto/fips.h @@ -36,3 +36,10 @@ static const u8 fips_test_hmac_sha512_value[] __initconst __maybe_unused = { 0x57, 0x0b, 0x15, 0x38, 0x95, 0xd8, 0xa3, 0x81, 0xba, 0xb3, 0x15, 0x37, 0x5c, 0x6d, 0x57, 0x2b, }; + +static const u8 fips_test_sha3_256_value[] __initconst __maybe_unused = { + 0x77, 0xc4, 0x8b, 0x69, 0x70, 0x5f, 0x0a, 0xb1, + 0xb1, 0xa5, 0x82, 0x0a, 0x22, 0x2b, 0x49, 0x31, + 0xba, 0x9b, 0xb6, 0xaa, 0x32, 0xa7, 0x97, 0x00, + 0x98, 0xdb, 0xff, 0xe7, 0xc6, 0xde, 0xb5, 0x82, +}; diff --git a/lib/crypto/sha3.c b/lib/crypto/sha3.c index 2102c2ecac96..8434f9bff138 100644 --- a/lib/crypto/sha3.c +++ b/lib/crypto/sha3.c @@ -17,6 +17,7 @@ #include #include #include +#include "fips.h" /* * On some 32-bit architectures, such as h8300, GCC ends up using over 1 KB of @@ -341,10 +342,24 @@ void shake256(const u8 *in, size_t in_len, u8 *out, size_t out_len) } EXPORT_SYMBOL_GPL(shake256); -#ifdef sha3_mod_init_arch +#if defined(sha3_mod_init_arch) || defined(CONFIG_CRYPTO_FIPS) static int __init sha3_mod_init(void) { +#ifdef sha3_mod_init_arch sha3_mod_init_arch(); +#endif + if (fips_enabled) { + /* + * FIPS cryptographic algorithm self-test. As per the FIPS + * Implementation Guidance, testing any SHA-3 algorithm + * satisfies the test requirement for all of them. + */ + u8 hash[SHA3_256_DIGEST_SIZE]; + + sha3_256(fips_test_data, sizeof(fips_test_data), hash); + if (memcmp(fips_test_sha3_256_value, hash, sizeof(hash)) != 0) + panic("sha3: FIPS self-test failed\n"); + } return 0; } subsys_initcall(sha3_mod_init); diff --git a/scripts/crypto/gen-fips-testvecs.py b/scripts/crypto/gen-fips-testvecs.py index 2956f88b764a..db873f88619a 100755 --- a/scripts/crypto/gen-fips-testvecs.py +++ b/scripts/crypto/gen-fips-testvecs.py @@ -5,6 +5,7 @@ # # Copyright 2025 Google LLC +import hashlib import hmac fips_test_data = b"fips test data\0\0" @@ -30,3 +31,6 @@ for alg in 'sha1', 'sha256', 'sha512': ctx = hmac.new(fips_test_key, digestmod=alg) ctx.update(fips_test_data) print_static_u8_array_definition(f'fips_test_hmac_{alg}_value', ctx.digest()) + +print_static_u8_array_definition(f'fips_test_sha3_256_value', + hashlib.sha3_256(fips_test_data).digest()) -- cgit v1.2.3 From 1e29a750572a25200fcea995d91e5f6448f340c0 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 25 Oct 2025 22:50:26 -0700 Subject: lib/crypto: arm64/sha3: Migrate optimized code into library Instead of exposing the arm64-optimized SHA-3 code via arm64-specific crypto_shash algorithms, instead just implement the sha3_absorb_blocks() and sha3_keccakf() library functions. This is much simpler, it makes the SHA-3 library functions be arm64-optimized, and it fixes the longstanding issue where the arm64-optimized SHA-3 code was disabled by default. SHA-3 still remains available through crypto_shash, but individual architectures no longer need to handle it. Note: to see the diff from arch/arm64/crypto/sha3-ce-glue.c to lib/crypto/arm64/sha3.h, view this commit with 'git show -M10'. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251026055032.1413733-10-ebiggers@kernel.org Signed-off-by: Eric Biggers --- arch/arm64/configs/defconfig | 2 +- arch/arm64/crypto/Kconfig | 11 -- arch/arm64/crypto/Makefile | 3 - arch/arm64/crypto/sha3-ce-core.S | 213 --------------------------------------- arch/arm64/crypto/sha3-ce-glue.c | 150 --------------------------- lib/crypto/Kconfig | 5 + lib/crypto/Makefile | 5 + lib/crypto/arm64/sha3-ce-core.S | 213 +++++++++++++++++++++++++++++++++++++++ lib/crypto/arm64/sha3.h | 62 ++++++++++++ 9 files changed, 286 insertions(+), 378 deletions(-) delete mode 100644 arch/arm64/crypto/sha3-ce-core.S delete mode 100644 arch/arm64/crypto/sha3-ce-glue.c create mode 100644 lib/crypto/arm64/sha3-ce-core.S create mode 100644 lib/crypto/arm64/sha3.h (limited to 'lib/crypto') diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index e3a2d37bd104..20dd3a39faea 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1783,10 +1783,10 @@ CONFIG_CRYPTO_CHACHA20=m CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_ECHAINIV=y CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_GHASH_ARM64_CE=y -CONFIG_CRYPTO_SHA3_ARM64=m CONFIG_CRYPTO_SM3_ARM64_CE=m CONFIG_CRYPTO_AES_ARM64_CE_BLK=y CONFIG_CRYPTO_AES_ARM64_BS=m diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 91f3093eee6a..376d6b50743f 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -25,17 +25,6 @@ config CRYPTO_NHPOLY1305_NEON Architecture: arm64 using: - NEON (Advanced SIMD) extensions -config CRYPTO_SHA3_ARM64 - tristate "Hash functions: SHA-3 (ARMv8.2 Crypto Extensions)" - depends on KERNEL_MODE_NEON - select CRYPTO_HASH - select CRYPTO_SHA3 - help - SHA-3 secure hash algorithms (FIPS 202) - - Architecture: arm64 using: - - ARMv8.2 Crypto Extensions - config CRYPTO_SM3_NEON tristate "Hash functions: SM3 (NEON)" depends on KERNEL_MODE_NEON diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index a8b2cdbe202c..fd3d590fa113 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -5,9 +5,6 @@ # Copyright (C) 2014 Linaro Ltd # -obj-$(CONFIG_CRYPTO_SHA3_ARM64) += sha3-ce.o -sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o - obj-$(CONFIG_CRYPTO_SM3_NEON) += sm3-neon.o sm3-neon-y := sm3-neon-glue.o sm3-neon-core.o diff --git a/arch/arm64/crypto/sha3-ce-core.S b/arch/arm64/crypto/sha3-ce-core.S deleted file mode 100644 index b62bd714839b..000000000000 --- a/arch/arm64/crypto/sha3-ce-core.S +++ /dev/null @@ -1,213 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * sha3-ce-core.S - core SHA-3 transform using v8.2 Crypto Extensions - * - * Copyright (C) 2018 Linaro Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include - - .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 - .set .Lv\b\().2d, \b - .set .Lv\b\().16b, \b - .endr - - /* - * ARMv8.2 Crypto Extensions instructions - */ - .macro eor3, rd, rn, rm, ra - .inst 0xce000000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) - .endm - - .macro rax1, rd, rn, rm - .inst 0xce608c00 | .L\rd | (.L\rn << 5) | (.L\rm << 16) - .endm - - .macro bcax, rd, rn, rm, ra - .inst 0xce200000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) - .endm - - .macro xar, rd, rn, rm, imm6 - .inst 0xce800000 | .L\rd | (.L\rn << 5) | ((\imm6) << 10) | (.L\rm << 16) - .endm - - /* - * size_t sha3_ce_transform(struct sha3_state *state, const u8 *data, - * size_t nblocks, size_t block_size) - * - * block_size is assumed to be one of 72 (SHA3-512), 104 (SHA3-384), 136 - * (SHA3-256 and SHAKE256), 144 (SHA3-224), or 168 (SHAKE128). - */ - .text -SYM_FUNC_START(sha3_ce_transform) - /* load state */ - add x8, x0, #32 - ld1 { v0.1d- v3.1d}, [x0] - ld1 { v4.1d- v7.1d}, [x8], #32 - ld1 { v8.1d-v11.1d}, [x8], #32 - ld1 {v12.1d-v15.1d}, [x8], #32 - ld1 {v16.1d-v19.1d}, [x8], #32 - ld1 {v20.1d-v23.1d}, [x8], #32 - ld1 {v24.1d}, [x8] - -0: sub x2, x2, #1 - mov w8, #24 - adr_l x9, .Lsha3_rcon - - /* load input */ - ld1 {v25.8b-v28.8b}, [x1], #32 - ld1 {v29.8b}, [x1], #8 - eor v0.8b, v0.8b, v25.8b - eor v1.8b, v1.8b, v26.8b - eor v2.8b, v2.8b, v27.8b - eor v3.8b, v3.8b, v28.8b - eor v4.8b, v4.8b, v29.8b - - ld1 {v25.8b-v28.8b}, [x1], #32 - eor v5.8b, v5.8b, v25.8b - eor v6.8b, v6.8b, v26.8b - eor v7.8b, v7.8b, v27.8b - eor v8.8b, v8.8b, v28.8b - cmp x3, #72 - b.eq 3f /* SHA3-512 (block_size=72)? */ - - ld1 {v25.8b-v28.8b}, [x1], #32 - eor v9.8b, v9.8b, v25.8b - eor v10.8b, v10.8b, v26.8b - eor v11.8b, v11.8b, v27.8b - eor v12.8b, v12.8b, v28.8b - cmp x3, #104 - b.eq 3f /* SHA3-384 (block_size=104)? */ - - ld1 {v25.8b-v28.8b}, [x1], #32 - eor v13.8b, v13.8b, v25.8b - eor v14.8b, v14.8b, v26.8b - eor v15.8b, v15.8b, v27.8b - eor v16.8b, v16.8b, v28.8b - cmp x3, #144 - b.lt 3f /* SHA3-256 or SHAKE256 (block_size=136)? */ - b.eq 2f /* SHA3-224 (block_size=144)? */ - - /* SHAKE128 (block_size=168) */ - ld1 {v25.8b-v28.8b}, [x1], #32 - eor v17.8b, v17.8b, v25.8b - eor v18.8b, v18.8b, v26.8b - eor v19.8b, v19.8b, v27.8b - eor v20.8b, v20.8b, v28.8b - b 3f -2: - /* SHA3-224 (block_size=144) */ - ld1 {v25.8b}, [x1], #8 - eor v17.8b, v17.8b, v25.8b - -3: sub w8, w8, #1 - - eor3 v29.16b, v4.16b, v9.16b, v14.16b - eor3 v26.16b, v1.16b, v6.16b, v11.16b - eor3 v28.16b, v3.16b, v8.16b, v13.16b - eor3 v25.16b, v0.16b, v5.16b, v10.16b - eor3 v27.16b, v2.16b, v7.16b, v12.16b - eor3 v29.16b, v29.16b, v19.16b, v24.16b - eor3 v26.16b, v26.16b, v16.16b, v21.16b - eor3 v28.16b, v28.16b, v18.16b, v23.16b - eor3 v25.16b, v25.16b, v15.16b, v20.16b - eor3 v27.16b, v27.16b, v17.16b, v22.16b - - rax1 v30.2d, v29.2d, v26.2d // bc[0] - rax1 v26.2d, v26.2d, v28.2d // bc[2] - rax1 v28.2d, v28.2d, v25.2d // bc[4] - rax1 v25.2d, v25.2d, v27.2d // bc[1] - rax1 v27.2d, v27.2d, v29.2d // bc[3] - - eor v0.16b, v0.16b, v30.16b - xar v29.2d, v1.2d, v25.2d, (64 - 1) - xar v1.2d, v6.2d, v25.2d, (64 - 44) - xar v6.2d, v9.2d, v28.2d, (64 - 20) - xar v9.2d, v22.2d, v26.2d, (64 - 61) - xar v22.2d, v14.2d, v28.2d, (64 - 39) - xar v14.2d, v20.2d, v30.2d, (64 - 18) - xar v31.2d, v2.2d, v26.2d, (64 - 62) - xar v2.2d, v12.2d, v26.2d, (64 - 43) - xar v12.2d, v13.2d, v27.2d, (64 - 25) - xar v13.2d, v19.2d, v28.2d, (64 - 8) - xar v19.2d, v23.2d, v27.2d, (64 - 56) - xar v23.2d, v15.2d, v30.2d, (64 - 41) - xar v15.2d, v4.2d, v28.2d, (64 - 27) - xar v28.2d, v24.2d, v28.2d, (64 - 14) - xar v24.2d, v21.2d, v25.2d, (64 - 2) - xar v8.2d, v8.2d, v27.2d, (64 - 55) - xar v4.2d, v16.2d, v25.2d, (64 - 45) - xar v16.2d, v5.2d, v30.2d, (64 - 36) - xar v5.2d, v3.2d, v27.2d, (64 - 28) - xar v27.2d, v18.2d, v27.2d, (64 - 21) - xar v3.2d, v17.2d, v26.2d, (64 - 15) - xar v25.2d, v11.2d, v25.2d, (64 - 10) - xar v26.2d, v7.2d, v26.2d, (64 - 6) - xar v30.2d, v10.2d, v30.2d, (64 - 3) - - bcax v20.16b, v31.16b, v22.16b, v8.16b - bcax v21.16b, v8.16b, v23.16b, v22.16b - bcax v22.16b, v22.16b, v24.16b, v23.16b - bcax v23.16b, v23.16b, v31.16b, v24.16b - bcax v24.16b, v24.16b, v8.16b, v31.16b - - ld1r {v31.2d}, [x9], #8 - - bcax v17.16b, v25.16b, v19.16b, v3.16b - bcax v18.16b, v3.16b, v15.16b, v19.16b - bcax v19.16b, v19.16b, v16.16b, v15.16b - bcax v15.16b, v15.16b, v25.16b, v16.16b - bcax v16.16b, v16.16b, v3.16b, v25.16b - - bcax v10.16b, v29.16b, v12.16b, v26.16b - bcax v11.16b, v26.16b, v13.16b, v12.16b - bcax v12.16b, v12.16b, v14.16b, v13.16b - bcax v13.16b, v13.16b, v29.16b, v14.16b - bcax v14.16b, v14.16b, v26.16b, v29.16b - - bcax v7.16b, v30.16b, v9.16b, v4.16b - bcax v8.16b, v4.16b, v5.16b, v9.16b - bcax v9.16b, v9.16b, v6.16b, v5.16b - bcax v5.16b, v5.16b, v30.16b, v6.16b - bcax v6.16b, v6.16b, v4.16b, v30.16b - - bcax v3.16b, v27.16b, v0.16b, v28.16b - bcax v4.16b, v28.16b, v1.16b, v0.16b - bcax v0.16b, v0.16b, v2.16b, v1.16b - bcax v1.16b, v1.16b, v27.16b, v2.16b - bcax v2.16b, v2.16b, v28.16b, v27.16b - - eor v0.16b, v0.16b, v31.16b - - cbnz w8, 3b - cond_yield 4f, x8, x9 - cbnz x2, 0b - - /* save state */ -4: st1 { v0.1d- v3.1d}, [x0], #32 - st1 { v4.1d- v7.1d}, [x0], #32 - st1 { v8.1d-v11.1d}, [x0], #32 - st1 {v12.1d-v15.1d}, [x0], #32 - st1 {v16.1d-v19.1d}, [x0], #32 - st1 {v20.1d-v23.1d}, [x0], #32 - st1 {v24.1d}, [x0] - mov x0, x2 - ret -SYM_FUNC_END(sha3_ce_transform) - - .section ".rodata", "a" - .align 8 -.Lsha3_rcon: - .quad 0x0000000000000001, 0x0000000000008082, 0x800000000000808a - .quad 0x8000000080008000, 0x000000000000808b, 0x0000000080000001 - .quad 0x8000000080008081, 0x8000000000008009, 0x000000000000008a - .quad 0x0000000000000088, 0x0000000080008009, 0x000000008000000a - .quad 0x000000008000808b, 0x800000000000008b, 0x8000000000008089 - .quad 0x8000000000008003, 0x8000000000008002, 0x8000000000000080 - .quad 0x000000000000800a, 0x800000008000000a, 0x8000000080008081 - .quad 0x8000000000008080, 0x0000000080000001, 0x8000000080008008 diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c deleted file mode 100644 index 250f4fb76b47..000000000000 --- a/arch/arm64/crypto/sha3-ce-glue.c +++ /dev/null @@ -1,150 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * sha3-ce-glue.c - core SHA-3 transform using v8.2 Crypto Extensions - * - * Copyright (C) 2018 Linaro Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions"); -MODULE_AUTHOR("Ard Biesheuvel "); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("sha3-224"); -MODULE_ALIAS_CRYPTO("sha3-256"); -MODULE_ALIAS_CRYPTO("sha3-384"); -MODULE_ALIAS_CRYPTO("sha3-512"); - -asmlinkage size_t sha3_ce_transform(struct sha3_state *state, const u8 *data, - size_t nblocks, size_t block_size); - -static int arm64_sha3_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha3_state *sctx = shash_desc_ctx(desc); - struct crypto_shash *tfm = desc->tfm; - unsigned int bs; - int blocks; - - bs = crypto_shash_blocksize(tfm); - blocks = len / bs; - len -= blocks * bs; - do { - int rem; - - kernel_neon_begin(); - rem = sha3_ce_transform(sctx, data, blocks, bs); - kernel_neon_end(); - data += (blocks - rem) * bs; - blocks = rem; - } while (blocks); - return len; -} - -static int sha3_finup(struct shash_desc *desc, const u8 *src, unsigned int len, - u8 *out) -{ - struct sha3_state *sctx = shash_desc_ctx(desc); - struct crypto_shash *tfm = desc->tfm; - __le64 *digest = (__le64 *)out; - u8 block[SHA3_224_BLOCK_SIZE]; - unsigned int bs, ds; - int i; - - ds = crypto_shash_digestsize(tfm); - bs = crypto_shash_blocksize(tfm); - memcpy(block, src, len); - - block[len++] = 0x06; - memset(block + len, 0, bs - len); - block[bs - 1] |= 0x80; - - kernel_neon_begin(); - sha3_ce_transform(sctx, block, 1, bs); - kernel_neon_end(); - memzero_explicit(block , sizeof(block)); - - for (i = 0; i < ds / 8; i++) - put_unaligned_le64(sctx->st[i], digest++); - - if (ds & 4) - put_unaligned_le32(sctx->st[i], (__le32 *)digest); - - return 0; -} - -static struct shash_alg algs[] = { { - .digestsize = SHA3_224_DIGEST_SIZE, - .init = crypto_sha3_init, - .update = arm64_sha3_update, - .finup = sha3_finup, - .descsize = SHA3_STATE_SIZE, - .base.cra_name = "sha3-224", - .base.cra_driver_name = "sha3-224-ce", - .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, - .base.cra_blocksize = SHA3_224_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - .base.cra_priority = 200, -}, { - .digestsize = SHA3_256_DIGEST_SIZE, - .init = crypto_sha3_init, - .update = arm64_sha3_update, - .finup = sha3_finup, - .descsize = SHA3_STATE_SIZE, - .base.cra_name = "sha3-256", - .base.cra_driver_name = "sha3-256-ce", - .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, - .base.cra_blocksize = SHA3_256_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - .base.cra_priority = 200, -}, { - .digestsize = SHA3_384_DIGEST_SIZE, - .init = crypto_sha3_init, - .update = arm64_sha3_update, - .finup = sha3_finup, - .descsize = SHA3_STATE_SIZE, - .base.cra_name = "sha3-384", - .base.cra_driver_name = "sha3-384-ce", - .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, - .base.cra_blocksize = SHA3_384_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - .base.cra_priority = 200, -}, { - .digestsize = SHA3_512_DIGEST_SIZE, - .init = crypto_sha3_init, - .update = arm64_sha3_update, - .finup = sha3_finup, - .descsize = SHA3_STATE_SIZE, - .base.cra_name = "sha3-512", - .base.cra_driver_name = "sha3-512-ce", - .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, - .base.cra_blocksize = SHA3_512_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, - .base.cra_priority = 200, -} }; - -static int __init sha3_neon_mod_init(void) -{ - return crypto_register_shashes(algs, ARRAY_SIZE(algs)); -} - -static void __exit sha3_neon_mod_fini(void) -{ - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); -} - -module_cpu_feature_match(SHA3, sha3_neon_mod_init); -module_exit(sha3_neon_mod_fini); diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index a05f5a349cd8..587490ca6565 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -202,6 +202,11 @@ config CRYPTO_LIB_SHA3 The SHA3 library functions. Select this if your module uses any of the functions from . +config CRYPTO_LIB_SHA3_ARCH + bool + depends on CRYPTO_LIB_SHA3 && !UML + default y if ARM64 && KERNEL_MODE_NEON + config CRYPTO_LIB_SM3 tristate diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 0cfdb511f32b..5515e73bfd5e 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -281,6 +281,11 @@ endif # CONFIG_CRYPTO_LIB_SHA512_ARCH obj-$(CONFIG_CRYPTO_LIB_SHA3) += libsha3.o libsha3-y := sha3.o +ifeq ($(CONFIG_CRYPTO_LIB_SHA3_ARCH),y) +CFLAGS_sha3.o += -I$(src)/$(SRCARCH) +libsha3-$(CONFIG_ARM64) += arm64/sha3-ce-core.o +endif # CONFIG_CRYPTO_LIB_SHA3_ARCH + ################################################################################ obj-$(CONFIG_MPILIB) += mpi/ diff --git a/lib/crypto/arm64/sha3-ce-core.S b/lib/crypto/arm64/sha3-ce-core.S new file mode 100644 index 000000000000..b62bd714839b --- /dev/null +++ b/lib/crypto/arm64/sha3-ce-core.S @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * sha3-ce-core.S - core SHA-3 transform using v8.2 Crypto Extensions + * + * Copyright (C) 2018 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + + .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + .set .Lv\b\().2d, \b + .set .Lv\b\().16b, \b + .endr + + /* + * ARMv8.2 Crypto Extensions instructions + */ + .macro eor3, rd, rn, rm, ra + .inst 0xce000000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) + .endm + + .macro rax1, rd, rn, rm + .inst 0xce608c00 | .L\rd | (.L\rn << 5) | (.L\rm << 16) + .endm + + .macro bcax, rd, rn, rm, ra + .inst 0xce200000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) + .endm + + .macro xar, rd, rn, rm, imm6 + .inst 0xce800000 | .L\rd | (.L\rn << 5) | ((\imm6) << 10) | (.L\rm << 16) + .endm + + /* + * size_t sha3_ce_transform(struct sha3_state *state, const u8 *data, + * size_t nblocks, size_t block_size) + * + * block_size is assumed to be one of 72 (SHA3-512), 104 (SHA3-384), 136 + * (SHA3-256 and SHAKE256), 144 (SHA3-224), or 168 (SHAKE128). + */ + .text +SYM_FUNC_START(sha3_ce_transform) + /* load state */ + add x8, x0, #32 + ld1 { v0.1d- v3.1d}, [x0] + ld1 { v4.1d- v7.1d}, [x8], #32 + ld1 { v8.1d-v11.1d}, [x8], #32 + ld1 {v12.1d-v15.1d}, [x8], #32 + ld1 {v16.1d-v19.1d}, [x8], #32 + ld1 {v20.1d-v23.1d}, [x8], #32 + ld1 {v24.1d}, [x8] + +0: sub x2, x2, #1 + mov w8, #24 + adr_l x9, .Lsha3_rcon + + /* load input */ + ld1 {v25.8b-v28.8b}, [x1], #32 + ld1 {v29.8b}, [x1], #8 + eor v0.8b, v0.8b, v25.8b + eor v1.8b, v1.8b, v26.8b + eor v2.8b, v2.8b, v27.8b + eor v3.8b, v3.8b, v28.8b + eor v4.8b, v4.8b, v29.8b + + ld1 {v25.8b-v28.8b}, [x1], #32 + eor v5.8b, v5.8b, v25.8b + eor v6.8b, v6.8b, v26.8b + eor v7.8b, v7.8b, v27.8b + eor v8.8b, v8.8b, v28.8b + cmp x3, #72 + b.eq 3f /* SHA3-512 (block_size=72)? */ + + ld1 {v25.8b-v28.8b}, [x1], #32 + eor v9.8b, v9.8b, v25.8b + eor v10.8b, v10.8b, v26.8b + eor v11.8b, v11.8b, v27.8b + eor v12.8b, v12.8b, v28.8b + cmp x3, #104 + b.eq 3f /* SHA3-384 (block_size=104)? */ + + ld1 {v25.8b-v28.8b}, [x1], #32 + eor v13.8b, v13.8b, v25.8b + eor v14.8b, v14.8b, v26.8b + eor v15.8b, v15.8b, v27.8b + eor v16.8b, v16.8b, v28.8b + cmp x3, #144 + b.lt 3f /* SHA3-256 or SHAKE256 (block_size=136)? */ + b.eq 2f /* SHA3-224 (block_size=144)? */ + + /* SHAKE128 (block_size=168) */ + ld1 {v25.8b-v28.8b}, [x1], #32 + eor v17.8b, v17.8b, v25.8b + eor v18.8b, v18.8b, v26.8b + eor v19.8b, v19.8b, v27.8b + eor v20.8b, v20.8b, v28.8b + b 3f +2: + /* SHA3-224 (block_size=144) */ + ld1 {v25.8b}, [x1], #8 + eor v17.8b, v17.8b, v25.8b + +3: sub w8, w8, #1 + + eor3 v29.16b, v4.16b, v9.16b, v14.16b + eor3 v26.16b, v1.16b, v6.16b, v11.16b + eor3 v28.16b, v3.16b, v8.16b, v13.16b + eor3 v25.16b, v0.16b, v5.16b, v10.16b + eor3 v27.16b, v2.16b, v7.16b, v12.16b + eor3 v29.16b, v29.16b, v19.16b, v24.16b + eor3 v26.16b, v26.16b, v16.16b, v21.16b + eor3 v28.16b, v28.16b, v18.16b, v23.16b + eor3 v25.16b, v25.16b, v15.16b, v20.16b + eor3 v27.16b, v27.16b, v17.16b, v22.16b + + rax1 v30.2d, v29.2d, v26.2d // bc[0] + rax1 v26.2d, v26.2d, v28.2d // bc[2] + rax1 v28.2d, v28.2d, v25.2d // bc[4] + rax1 v25.2d, v25.2d, v27.2d // bc[1] + rax1 v27.2d, v27.2d, v29.2d // bc[3] + + eor v0.16b, v0.16b, v30.16b + xar v29.2d, v1.2d, v25.2d, (64 - 1) + xar v1.2d, v6.2d, v25.2d, (64 - 44) + xar v6.2d, v9.2d, v28.2d, (64 - 20) + xar v9.2d, v22.2d, v26.2d, (64 - 61) + xar v22.2d, v14.2d, v28.2d, (64 - 39) + xar v14.2d, v20.2d, v30.2d, (64 - 18) + xar v31.2d, v2.2d, v26.2d, (64 - 62) + xar v2.2d, v12.2d, v26.2d, (64 - 43) + xar v12.2d, v13.2d, v27.2d, (64 - 25) + xar v13.2d, v19.2d, v28.2d, (64 - 8) + xar v19.2d, v23.2d, v27.2d, (64 - 56) + xar v23.2d, v15.2d, v30.2d, (64 - 41) + xar v15.2d, v4.2d, v28.2d, (64 - 27) + xar v28.2d, v24.2d, v28.2d, (64 - 14) + xar v24.2d, v21.2d, v25.2d, (64 - 2) + xar v8.2d, v8.2d, v27.2d, (64 - 55) + xar v4.2d, v16.2d, v25.2d, (64 - 45) + xar v16.2d, v5.2d, v30.2d, (64 - 36) + xar v5.2d, v3.2d, v27.2d, (64 - 28) + xar v27.2d, v18.2d, v27.2d, (64 - 21) + xar v3.2d, v17.2d, v26.2d, (64 - 15) + xar v25.2d, v11.2d, v25.2d, (64 - 10) + xar v26.2d, v7.2d, v26.2d, (64 - 6) + xar v30.2d, v10.2d, v30.2d, (64 - 3) + + bcax v20.16b, v31.16b, v22.16b, v8.16b + bcax v21.16b, v8.16b, v23.16b, v22.16b + bcax v22.16b, v22.16b, v24.16b, v23.16b + bcax v23.16b, v23.16b, v31.16b, v24.16b + bcax v24.16b, v24.16b, v8.16b, v31.16b + + ld1r {v31.2d}, [x9], #8 + + bcax v17.16b, v25.16b, v19.16b, v3.16b + bcax v18.16b, v3.16b, v15.16b, v19.16b + bcax v19.16b, v19.16b, v16.16b, v15.16b + bcax v15.16b, v15.16b, v25.16b, v16.16b + bcax v16.16b, v16.16b, v3.16b, v25.16b + + bcax v10.16b, v29.16b, v12.16b, v26.16b + bcax v11.16b, v26.16b, v13.16b, v12.16b + bcax v12.16b, v12.16b, v14.16b, v13.16b + bcax v13.16b, v13.16b, v29.16b, v14.16b + bcax v14.16b, v14.16b, v26.16b, v29.16b + + bcax v7.16b, v30.16b, v9.16b, v4.16b + bcax v8.16b, v4.16b, v5.16b, v9.16b + bcax v9.16b, v9.16b, v6.16b, v5.16b + bcax v5.16b, v5.16b, v30.16b, v6.16b + bcax v6.16b, v6.16b, v4.16b, v30.16b + + bcax v3.16b, v27.16b, v0.16b, v28.16b + bcax v4.16b, v28.16b, v1.16b, v0.16b + bcax v0.16b, v0.16b, v2.16b, v1.16b + bcax v1.16b, v1.16b, v27.16b, v2.16b + bcax v2.16b, v2.16b, v28.16b, v27.16b + + eor v0.16b, v0.16b, v31.16b + + cbnz w8, 3b + cond_yield 4f, x8, x9 + cbnz x2, 0b + + /* save state */ +4: st1 { v0.1d- v3.1d}, [x0], #32 + st1 { v4.1d- v7.1d}, [x0], #32 + st1 { v8.1d-v11.1d}, [x0], #32 + st1 {v12.1d-v15.1d}, [x0], #32 + st1 {v16.1d-v19.1d}, [x0], #32 + st1 {v20.1d-v23.1d}, [x0], #32 + st1 {v24.1d}, [x0] + mov x0, x2 + ret +SYM_FUNC_END(sha3_ce_transform) + + .section ".rodata", "a" + .align 8 +.Lsha3_rcon: + .quad 0x0000000000000001, 0x0000000000008082, 0x800000000000808a + .quad 0x8000000080008000, 0x000000000000808b, 0x0000000080000001 + .quad 0x8000000080008081, 0x8000000000008009, 0x000000000000008a + .quad 0x0000000000000088, 0x0000000080008009, 0x000000008000000a + .quad 0x000000008000808b, 0x800000000000008b, 0x8000000000008089 + .quad 0x8000000000008003, 0x8000000000008002, 0x8000000000000080 + .quad 0x000000000000800a, 0x800000008000000a, 0x8000000080008081 + .quad 0x8000000000008080, 0x0000000080000001, 0x8000000080008008 diff --git a/lib/crypto/arm64/sha3.h b/lib/crypto/arm64/sha3.h new file mode 100644 index 000000000000..6dd5183056da --- /dev/null +++ b/lib/crypto/arm64/sha3.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha3); + +asmlinkage size_t sha3_ce_transform(struct sha3_state *state, const u8 *data, + size_t nblocks, size_t block_size); + +static void sha3_absorb_blocks(struct sha3_state *state, const u8 *data, + size_t nblocks, size_t block_size) +{ + if (static_branch_likely(&have_sha3) && likely(may_use_simd())) { + do { + size_t rem; + + kernel_neon_begin(); + rem = sha3_ce_transform(state, data, nblocks, + block_size); + kernel_neon_end(); + data += (nblocks - rem) * block_size; + nblocks = rem; + } while (nblocks); + } else { + sha3_absorb_blocks_generic(state, data, nblocks, block_size); + } +} + +static void sha3_keccakf(struct sha3_state *state) +{ + if (static_branch_likely(&have_sha3) && likely(may_use_simd())) { + /* + * Passing zeroes into sha3_ce_transform() gives the plain + * Keccak-f permutation, which is what we want here. Any + * supported block size may be used. Use SHA3_512_BLOCK_SIZE + * since it's the shortest. + */ + static const u8 zeroes[SHA3_512_BLOCK_SIZE]; + + kernel_neon_begin(); + sha3_ce_transform(state, zeroes, 1, sizeof(zeroes)); + kernel_neon_end(); + } else { + sha3_keccakf_generic(state); + } +} + +#define sha3_mod_init_arch sha3_mod_init_arch +static void sha3_mod_init_arch(void) +{ + if (cpu_have_named_feature(SHA3)) + static_branch_enable(&have_sha3); +} -- cgit v1.2.3 From 04171105d33ae77945791a637d23b76789f3bef9 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 25 Oct 2025 22:50:27 -0700 Subject: lib/crypto: s390/sha3: Add optimized Keccak functions Implement sha3_absorb_blocks() and sha3_keccakf() using the hardware- accelerated SHA-3 support in Message-Security-Assist Extension 6. This accelerates the SHA3-224, SHA3-256, SHA3-384, SHA3-512, and SHAKE256 library functions. Note that arch/s390/crypto/ already has SHA-3 code that uses this extension, but it is exposed only via crypto_shash. This commit brings the same acceleration to the SHA-3 library. The arch/s390/crypto/ version will become redundant and be removed in later changes. Tested-by: Harald Freudenberger Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251026055032.1413733-11-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/Kconfig | 1 + lib/crypto/s390/sha3.h | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100644 lib/crypto/s390/sha3.h (limited to 'lib/crypto') diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 587490ca6565..7445054fc0ad 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -206,6 +206,7 @@ config CRYPTO_LIB_SHA3_ARCH bool depends on CRYPTO_LIB_SHA3 && !UML default y if ARM64 && KERNEL_MODE_NEON + default y if S390 config CRYPTO_LIB_SM3 tristate diff --git a/lib/crypto/s390/sha3.h b/lib/crypto/s390/sha3.h new file mode 100644 index 000000000000..668e53da93d2 --- /dev/null +++ b/lib/crypto/s390/sha3.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SHA-3 optimized using the CP Assist for Cryptographic Functions (CPACF) + * + * Copyright 2025 Google LLC + */ +#include +#include + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha3); + +static void sha3_absorb_blocks(struct sha3_state *state, const u8 *data, + size_t nblocks, size_t block_size) +{ + if (static_branch_likely(&have_sha3)) { + /* + * Note that KIMD assumes little-endian order of the state + * words. sha3_state already uses that order, though, so + * there's no need for a byteswap. + */ + switch (block_size) { + case SHA3_224_BLOCK_SIZE: + cpacf_kimd(CPACF_KIMD_SHA3_224, state, + data, nblocks * block_size); + return; + case SHA3_256_BLOCK_SIZE: + /* + * This case handles both SHA3-256 and SHAKE256, since + * they have the same block size. + */ + cpacf_kimd(CPACF_KIMD_SHA3_256, state, + data, nblocks * block_size); + return; + case SHA3_384_BLOCK_SIZE: + cpacf_kimd(CPACF_KIMD_SHA3_384, state, + data, nblocks * block_size); + return; + case SHA3_512_BLOCK_SIZE: + cpacf_kimd(CPACF_KIMD_SHA3_512, state, + data, nblocks * block_size); + return; + } + } + sha3_absorb_blocks_generic(state, data, nblocks, block_size); +} + +static void sha3_keccakf(struct sha3_state *state) +{ + if (static_branch_likely(&have_sha3)) { + /* + * Passing zeroes into any of CPACF_KIMD_SHA3_* gives the plain + * Keccak-f permutation, which is what we want here. Use + * SHA3-512 since it has the smallest block size. + */ + static const u8 zeroes[SHA3_512_BLOCK_SIZE]; + + cpacf_kimd(CPACF_KIMD_SHA3_512, state, zeroes, sizeof(zeroes)); + } else { + sha3_keccakf_generic(state); + } +} + +#define sha3_mod_init_arch sha3_mod_init_arch +static void sha3_mod_init_arch(void) +{ + int num_present = 0; + int num_possible = 0; + + if (!cpu_have_feature(S390_CPU_FEATURE_MSA)) + return; + /* + * Since all the SHA-3 functions are in Message-Security-Assist + * Extension 6, just treat them as all or nothing. This way we need + * only one static_key. + */ +#define QUERY(opcode, func) \ + ({ num_present += !!cpacf_query_func(opcode, func); num_possible++; }) + QUERY(CPACF_KIMD, CPACF_KIMD_SHA3_224); + QUERY(CPACF_KIMD, CPACF_KIMD_SHA3_256); + QUERY(CPACF_KIMD, CPACF_KIMD_SHA3_384); + QUERY(CPACF_KIMD, CPACF_KIMD_SHA3_512); +#undef QUERY + + if (num_present == num_possible) + static_branch_enable(&have_sha3); + else if (num_present != 0) + pr_warn("Unsupported combination of SHA-3 facilities\n"); +} -- cgit v1.2.3 From 0354d3c1f1b8628e60eceb304b6d2ef75eea6f41 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 25 Oct 2025 22:50:28 -0700 Subject: lib/crypto: sha3: Support arch overrides of one-shot digest functions Add support for architecture-specific overrides of sha3_224(), sha3_256(), sha3_384(), and sha3_512(). This will be used to implement these functions more efficiently on s390 than is possible via the usual init + update + final flow. Reviewed-by: Ard Biesheuvel Tested-by: Harald Freudenberger Link: https://lore.kernel.org/r/20251026055032.1413733-12-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/sha3.c | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) (limited to 'lib/crypto') diff --git a/lib/crypto/sha3.c b/lib/crypto/sha3.c index 8434f9bff138..32b7074de792 100644 --- a/lib/crypto/sha3.c +++ b/lib/crypto/sha3.c @@ -280,10 +280,41 @@ void shake_squeeze(struct shake_ctx *shake_ctx, u8 *out, size_t out_len) } EXPORT_SYMBOL_GPL(shake_squeeze); +#ifndef sha3_224_arch +static inline bool sha3_224_arch(const u8 *in, size_t in_len, + u8 out[SHA3_224_DIGEST_SIZE]) +{ + return false; +} +#endif +#ifndef sha3_256_arch +static inline bool sha3_256_arch(const u8 *in, size_t in_len, + u8 out[SHA3_256_DIGEST_SIZE]) +{ + return false; +} +#endif +#ifndef sha3_384_arch +static inline bool sha3_384_arch(const u8 *in, size_t in_len, + u8 out[SHA3_384_DIGEST_SIZE]) +{ + return false; +} +#endif +#ifndef sha3_512_arch +static inline bool sha3_512_arch(const u8 *in, size_t in_len, + u8 out[SHA3_512_DIGEST_SIZE]) +{ + return false; +} +#endif + void sha3_224(const u8 *in, size_t in_len, u8 out[SHA3_224_DIGEST_SIZE]) { struct sha3_ctx ctx; + if (sha3_224_arch(in, in_len, out)) + return; sha3_224_init(&ctx); sha3_update(&ctx, in, in_len); sha3_final(&ctx, out); @@ -294,6 +325,8 @@ void sha3_256(const u8 *in, size_t in_len, u8 out[SHA3_256_DIGEST_SIZE]) { struct sha3_ctx ctx; + if (sha3_256_arch(in, in_len, out)) + return; sha3_256_init(&ctx); sha3_update(&ctx, in, in_len); sha3_final(&ctx, out); @@ -304,6 +337,8 @@ void sha3_384(const u8 *in, size_t in_len, u8 out[SHA3_384_DIGEST_SIZE]) { struct sha3_ctx ctx; + if (sha3_384_arch(in, in_len, out)) + return; sha3_384_init(&ctx); sha3_update(&ctx, in, in_len); sha3_final(&ctx, out); @@ -314,6 +349,8 @@ void sha3_512(const u8 *in, size_t in_len, u8 out[SHA3_512_DIGEST_SIZE]) { struct sha3_ctx ctx; + if (sha3_512_arch(in, in_len, out)) + return; sha3_512_init(&ctx); sha3_update(&ctx, in, in_len); sha3_final(&ctx, out); -- cgit v1.2.3 From 862445d3b9e74f58360a7a89787da4dca783e6dd Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 25 Oct 2025 22:50:29 -0700 Subject: lib/crypto: s390/sha3: Add optimized one-shot SHA-3 digest functions Some z/Architecture processors can compute a SHA-3 digest in a single instruction. arch/s390/crypto/ already uses this capability to optimize the SHA-3 crypto_shash algorithms. Use this capability to implement the sha3_224(), sha3_256(), sha3_384(), and sha3_512() library functions too. SHA3-256 benchmark results provided by Harald Freudenberger (https://lore.kernel.org/r/4188d18bfcc8a64941c5ebd8de10ede2@linux.ibm.com/) on a z/Architecture machine with "facility 86" (MSA level 12): Length (bytes) Before (MB/s) After (MB/s) ============== ============= ============ 16 212 225 64 820 915 256 1850 3350 1024 5400 8300 4096 11200 11300 Note: the original data from Harald was given in the form of a graph for each length, showing the distribution of throughputs from 500 runs. I guesstimated the peak of each one. Harald also reported that the generic SHA-3 code was at most 259 MB/s (https://lore.kernel.org/r/c39f6b6c110def0095e5da5becc12085@linux.ibm.com/). So as expected, the earlier commit that optimized sha3_absorb_blocks() and sha3_keccakf() is the more important one; it optimized the Keccak permutation which is the most performance-critical part of SHA-3. Still, this additional commit does notably improve performance further on some lengths. Reviewed-by: Ard Biesheuvel Tested-by: Harald Freudenberger Link: https://lore.kernel.org/r/20251026055032.1413733-13-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/s390/sha3.h | 67 ++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 65 insertions(+), 2 deletions(-) (limited to 'lib/crypto') diff --git a/lib/crypto/s390/sha3.h b/lib/crypto/s390/sha3.h index 668e53da93d2..85471404775a 100644 --- a/lib/crypto/s390/sha3.h +++ b/lib/crypto/s390/sha3.h @@ -8,6 +8,7 @@ #include static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha3); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha3_init_optim); static void sha3_absorb_blocks(struct sha3_state *state, const u8 *data, size_t nblocks, size_t block_size) @@ -60,6 +61,61 @@ static void sha3_keccakf(struct sha3_state *state) } } +static inline bool s390_sha3(int func, const u8 *in, size_t in_len, + u8 *out, size_t out_len) +{ + struct sha3_state state; + + if (!static_branch_likely(&have_sha3)) + return false; + + if (static_branch_likely(&have_sha3_init_optim)) + func |= CPACF_KLMD_NIP | CPACF_KLMD_DUFOP; + else + memset(&state, 0, sizeof(state)); + + cpacf_klmd(func, &state, in, in_len); + + if (static_branch_likely(&have_sha3_init_optim)) + kmsan_unpoison_memory(&state, out_len); + + memcpy(out, &state, out_len); + memzero_explicit(&state, sizeof(state)); + return true; +} + +#define sha3_224_arch sha3_224_arch +static bool sha3_224_arch(const u8 *in, size_t in_len, + u8 out[SHA3_224_DIGEST_SIZE]) +{ + return s390_sha3(CPACF_KLMD_SHA3_224, in, in_len, + out, SHA3_224_DIGEST_SIZE); +} + +#define sha3_256_arch sha3_256_arch +static bool sha3_256_arch(const u8 *in, size_t in_len, + u8 out[SHA3_256_DIGEST_SIZE]) +{ + return s390_sha3(CPACF_KLMD_SHA3_256, in, in_len, + out, SHA3_256_DIGEST_SIZE); +} + +#define sha3_384_arch sha3_384_arch +static bool sha3_384_arch(const u8 *in, size_t in_len, + u8 out[SHA3_384_DIGEST_SIZE]) +{ + return s390_sha3(CPACF_KLMD_SHA3_384, in, in_len, + out, SHA3_384_DIGEST_SIZE); +} + +#define sha3_512_arch sha3_512_arch +static bool sha3_512_arch(const u8 *in, size_t in_len, + u8 out[SHA3_512_DIGEST_SIZE]) +{ + return s390_sha3(CPACF_KLMD_SHA3_512, in, in_len, + out, SHA3_512_DIGEST_SIZE); +} + #define sha3_mod_init_arch sha3_mod_init_arch static void sha3_mod_init_arch(void) { @@ -79,10 +135,17 @@ static void sha3_mod_init_arch(void) QUERY(CPACF_KIMD, CPACF_KIMD_SHA3_256); QUERY(CPACF_KIMD, CPACF_KIMD_SHA3_384); QUERY(CPACF_KIMD, CPACF_KIMD_SHA3_512); + QUERY(CPACF_KLMD, CPACF_KLMD_SHA3_224); + QUERY(CPACF_KLMD, CPACF_KLMD_SHA3_256); + QUERY(CPACF_KLMD, CPACF_KLMD_SHA3_384); + QUERY(CPACF_KLMD, CPACF_KLMD_SHA3_512); #undef QUERY - if (num_present == num_possible) + if (num_present == num_possible) { static_branch_enable(&have_sha3); - else if (num_present != 0) + if (test_facility(86)) + static_branch_enable(&have_sha3_init_optim); + } else if (num_present != 0) { pr_warn("Unsupported combination of SHA-3 facilities\n"); + } } -- cgit v1.2.3 From b8b816ec046922ef362120c14009516b92a26385 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 1 Nov 2025 19:15:53 -0700 Subject: lib/crypto: arm/blake2s: Fix some comments Fix the indices in some comments in blake2s-core.S. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251102021553.176587-1-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/arm/blake2s-core.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'lib/crypto') diff --git a/lib/crypto/arm/blake2s-core.S b/lib/crypto/arm/blake2s-core.S index 14eb7c18a836..933f0558b7cd 100644 --- a/lib/crypto/arm/blake2s-core.S +++ b/lib/crypto/arm/blake2s-core.S @@ -115,7 +115,7 @@ // Execute one round of BLAKE2s by updating the state matrix v[0..15]. v[0..9] // are in r0..r9. The stack pointer points to 8 bytes of scratch space for -// spilling v[8..9], then to v[9..15], then to the message block. r10-r12 and +// spilling v[8..9], then to v[10..15], then to the message block. r10-r12 and // r14 are free to use. The macro arguments s0-s15 give the order in which the // message words are used in this round. // @@ -209,18 +209,18 @@ ENTRY(blake2s_compress) .Lcopy_block_done: str r1, [sp, #68] // Update message pointer - // Calculate v[8..15]. Push v[9..15] onto the stack, and leave space + // Calculate v[8..15]. Push v[10..15] onto the stack, and leave space // for spilling v[8..9]. Leave v[8..9] in r8-r9. mov r14, r0 // r14 = ctx adr r12, .Lblake2s_IV ldmia r12!, {r8-r9} // load IV[0..1] __ldrd r0, r1, r14, 40 // load f[0..1] - ldm r12, {r2-r7} // load IV[3..7] + ldm r12, {r2-r7} // load IV[2..7] eor r4, r4, r10 // v[12] = IV[4] ^ t[0] eor r5, r5, r11 // v[13] = IV[5] ^ t[1] eor r6, r6, r0 // v[14] = IV[6] ^ f[0] eor r7, r7, r1 // v[15] = IV[7] ^ f[1] - push {r2-r7} // push v[9..15] + push {r2-r7} // push v[10..15] sub sp, sp, #8 // leave space for v[8..9] // Load h[0..7] == v[0..7]. -- cgit v1.2.3 From 95ce85de0b8c8a1192c692cbd5948e4dcbc1563f Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 1 Nov 2025 18:48:06 -0700 Subject: lib/crypto: arm, arm64: Drop filenames from file comments Remove self-references to filenames from assembly files in lib/crypto/arm/ and lib/crypto/arm64/. This follows the recommended practice and eliminates an outdated reference to sha2-ce-core.S. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251102014809.170713-1-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/arm/sha1-armv7-neon.S | 2 +- lib/crypto/arm/sha1-ce-core.S | 2 +- lib/crypto/arm/sha256-ce.S | 2 +- lib/crypto/arm64/sha1-ce-core.S | 2 +- lib/crypto/arm64/sha256-ce.S | 2 +- lib/crypto/arm64/sha3-ce-core.S | 2 +- lib/crypto/arm64/sha512-ce-core.S | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) (limited to 'lib/crypto') diff --git a/lib/crypto/arm/sha1-armv7-neon.S b/lib/crypto/arm/sha1-armv7-neon.S index 6edba3ab62e8..a0323fa5c58a 100644 --- a/lib/crypto/arm/sha1-armv7-neon.S +++ b/lib/crypto/arm/sha1-armv7-neon.S @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* sha1-armv7-neon.S - ARM/NEON accelerated SHA-1 transform function +/* ARM/NEON accelerated SHA-1 transform function * * Copyright © 2013-2014 Jussi Kivilinna */ diff --git a/lib/crypto/arm/sha1-ce-core.S b/lib/crypto/arm/sha1-ce-core.S index 2de40dd25e47..7d6b2631ca8d 100644 --- a/lib/crypto/arm/sha1-ce-core.S +++ b/lib/crypto/arm/sha1-ce-core.S @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * sha1-ce-core.S - SHA-1 secure hash using ARMv8 Crypto Extensions + * SHA-1 secure hash using ARMv8 Crypto Extensions * * Copyright (C) 2015 Linaro Ltd. * Author: Ard Biesheuvel diff --git a/lib/crypto/arm/sha256-ce.S b/lib/crypto/arm/sha256-ce.S index 7481ac8e6c0d..144ee805f64a 100644 --- a/lib/crypto/arm/sha256-ce.S +++ b/lib/crypto/arm/sha256-ce.S @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * sha256-ce.S - SHA-224/256 secure hash using ARMv8 Crypto Extensions + * SHA-224/256 secure hash using ARMv8 Crypto Extensions * * Copyright (C) 2015 Linaro Ltd. * Author: Ard Biesheuvel diff --git a/lib/crypto/arm64/sha1-ce-core.S b/lib/crypto/arm64/sha1-ce-core.S index 21efbbafd7d6..8fbd4767f0f0 100644 --- a/lib/crypto/arm64/sha1-ce-core.S +++ b/lib/crypto/arm64/sha1-ce-core.S @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * sha1-ce-core.S - SHA-1 secure hash using ARMv8 Crypto Extensions + * SHA-1 secure hash using ARMv8 Crypto Extensions * * Copyright (C) 2014 Linaro Ltd */ diff --git a/lib/crypto/arm64/sha256-ce.S b/lib/crypto/arm64/sha256-ce.S index 410174ba5237..e4bfe42a61a9 100644 --- a/lib/crypto/arm64/sha256-ce.S +++ b/lib/crypto/arm64/sha256-ce.S @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * sha2-ce-core.S - core SHA-224/SHA-256 transform using v8 Crypto Extensions + * Core SHA-224/SHA-256 transform using v8 Crypto Extensions * * Copyright (C) 2014 Linaro Ltd */ diff --git a/lib/crypto/arm64/sha3-ce-core.S b/lib/crypto/arm64/sha3-ce-core.S index b62bd714839b..ace90b506490 100644 --- a/lib/crypto/arm64/sha3-ce-core.S +++ b/lib/crypto/arm64/sha3-ce-core.S @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * sha3-ce-core.S - core SHA-3 transform using v8.2 Crypto Extensions + * Core SHA-3 transform using v8.2 Crypto Extensions * * Copyright (C) 2018 Linaro Ltd * diff --git a/lib/crypto/arm64/sha512-ce-core.S b/lib/crypto/arm64/sha512-ce-core.S index 22f1ded89bc8..ffd51acfd1ee 100644 --- a/lib/crypto/arm64/sha512-ce-core.S +++ b/lib/crypto/arm64/sha512-ce-core.S @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * sha512-ce-core.S - core SHA-384/SHA-512 transform using v8 Crypto Extensions + * Core SHA-384/SHA-512 transform using v8 Crypto Extensions * * Copyright (C) 2018 Linaro Ltd * -- cgit v1.2.3 From 2f22115709fc7ebcfa40af3367a508fbbd2f71e9 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 2 Nov 2025 15:42:04 -0800 Subject: lib/crypto: x86/blake2s: Fix 32-bit arg treated as 64-bit In the C code, the 'inc' argument to the assembly functions blake2s_compress_ssse3() and blake2s_compress_avx512() is declared with type u32, matching blake2s_compress(). The assembly code then reads it from the 64-bit %rcx. However, the ABI doesn't guarantee zero-extension to 64 bits, nor do gcc or clang guarantee it. Therefore, fix these functions to read this argument from the 32-bit %ecx. In theory, this bug could have caused the wrong 'inc' value to be used, causing incorrect BLAKE2s hashes. In practice, probably not: I've fixed essentially this same bug in many other assembly files too, but there's never been a real report of it having caused a problem. In x86_64, all writes to 32-bit registers are zero-extended to 64 bits. That results in zero-extension in nearly all situations. I've only been able to demonstrate a lack of zero-extension with a somewhat contrived example involving truncation, e.g. when the C code has a u64 variable holding 0x1234567800000040 and passes it as a u32 expecting it to be truncated to 0x40 (64). But that's not what the real code does, of course. Fixes: ed0356eda153 ("crypto: blake2s - x86_64 SIMD implementation") Cc: stable@vger.kernel.org Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251102234209.62133-2-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/x86/blake2s-core.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib/crypto') diff --git a/lib/crypto/x86/blake2s-core.S b/lib/crypto/x86/blake2s-core.S index ef8e9f427aab..093e7814f387 100644 --- a/lib/crypto/x86/blake2s-core.S +++ b/lib/crypto/x86/blake2s-core.S @@ -52,7 +52,7 @@ SYM_FUNC_START(blake2s_compress_ssse3) movdqa ROT16(%rip),%xmm12 movdqa ROR328(%rip),%xmm13 movdqu 0x20(%rdi),%xmm14 - movq %rcx,%xmm15 + movd %ecx,%xmm15 leaq SIGMA+0xa0(%rip),%r8 jmp .Lbeginofloop .align 32 @@ -176,7 +176,7 @@ SYM_FUNC_START(blake2s_compress_avx512) vmovdqu (%rdi),%xmm0 vmovdqu 0x10(%rdi),%xmm1 vmovdqu 0x20(%rdi),%xmm4 - vmovq %rcx,%xmm5 + vmovd %ecx,%xmm5 vmovdqa IV(%rip),%xmm14 vmovdqa IV+16(%rip),%xmm15 jmp .Lblake2s_compress_avx512_mainloop -- cgit v1.2.3 From c19bdf24cc274e96006267173d664df2ef4b13db Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 2 Nov 2025 15:42:05 -0800 Subject: lib/crypto: x86/blake2s: Drop check for nblocks == 0 Since blake2s_compress() is always passed nblocks != 0, remove the unnecessary check for nblocks == 0 from blake2s_compress_ssse3(). Note that this makes it consistent with blake2s_compress_avx512() in the same file as well as the arm32 blake2s_compress(). Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251102234209.62133-3-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/x86/blake2s-core.S | 3 --- 1 file changed, 3 deletions(-) (limited to 'lib/crypto') diff --git a/lib/crypto/x86/blake2s-core.S b/lib/crypto/x86/blake2s-core.S index 093e7814f387..aee13b97cc34 100644 --- a/lib/crypto/x86/blake2s-core.S +++ b/lib/crypto/x86/blake2s-core.S @@ -45,8 +45,6 @@ SIGMA2: .text SYM_FUNC_START(blake2s_compress_ssse3) - testq %rdx,%rdx - je .Lendofloop movdqu (%rdi),%xmm0 movdqu 0x10(%rdi),%xmm1 movdqa ROT16(%rip),%xmm12 @@ -168,7 +166,6 @@ SYM_FUNC_START(blake2s_compress_ssse3) movdqu %xmm0,(%rdi) movdqu %xmm1,0x10(%rdi) movdqu %xmm14,0x20(%rdi) -.Lendofloop: RET SYM_FUNC_END(blake2s_compress_ssse3) -- cgit v1.2.3 From 83c1a867c9999b3bb83e3da8f22eec9ec77a523c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 2 Nov 2025 15:42:06 -0800 Subject: lib/crypto: x86/blake2s: Use local labels for data Following the usual practice, prefix the names of the data labels with ".L" so that the assembler treats them as truly local. This more clearly expresses the intent and is less error-prone. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251102234209.62133-4-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/x86/blake2s-core.S | 45 +++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 19 deletions(-) (limited to 'lib/crypto') diff --git a/lib/crypto/x86/blake2s-core.S b/lib/crypto/x86/blake2s-core.S index aee13b97cc34..14e487559c09 100644 --- a/lib/crypto/x86/blake2s-core.S +++ b/lib/crypto/x86/blake2s-core.S @@ -6,19 +6,25 @@ #include -.section .rodata.cst32.BLAKE2S_IV, "aM", @progbits, 32 +.section .rodata.cst32.iv, "aM", @progbits, 32 .align 32 -IV: .octa 0xA54FF53A3C6EF372BB67AE856A09E667 +.Liv: + .octa 0xA54FF53A3C6EF372BB67AE856A09E667 .octa 0x5BE0CD191F83D9AB9B05688C510E527F -.section .rodata.cst16.ROT16, "aM", @progbits, 16 + +.section .rodata.cst16.ror16, "aM", @progbits, 16 .align 16 -ROT16: .octa 0x0D0C0F0E09080B0A0504070601000302 -.section .rodata.cst16.ROR328, "aM", @progbits, 16 +.Lror16: + .octa 0x0D0C0F0E09080B0A0504070601000302 + +.section .rodata.cst16.ror8, "aM", @progbits, 16 .align 16 -ROR328: .octa 0x0C0F0E0D080B0A090407060500030201 -.section .rodata.cst64.BLAKE2S_SIGMA, "aM", @progbits, 160 +.Lror8: + .octa 0x0C0F0E0D080B0A090407060500030201 + +.section .rodata.cst64.sigma, "aM", @progbits, 160 .align 64 -SIGMA: +.Lsigma: .byte 0, 2, 4, 6, 1, 3, 5, 7, 14, 8, 10, 12, 15, 9, 11, 13 .byte 14, 4, 9, 13, 10, 8, 15, 6, 5, 1, 0, 11, 3, 12, 2, 7 .byte 11, 12, 5, 15, 8, 0, 2, 13, 9, 10, 3, 7, 4, 14, 6, 1 @@ -29,9 +35,10 @@ SIGMA: .byte 13, 7, 12, 3, 11, 14, 1, 9, 2, 5, 15, 8, 10, 0, 4, 6 .byte 6, 14, 11, 0, 15, 9, 3, 8, 10, 12, 13, 1, 5, 2, 7, 4 .byte 10, 8, 7, 1, 2, 4, 6, 5, 13, 15, 9, 3, 0, 11, 14, 12 -.section .rodata.cst64.BLAKE2S_SIGMA2, "aM", @progbits, 160 + +.section .rodata.cst64.sigma2, "aM", @progbits, 160 .align 64 -SIGMA2: +.Lsigma2: .byte 0, 2, 4, 6, 1, 3, 5, 7, 14, 8, 10, 12, 15, 9, 11, 13 .byte 8, 2, 13, 15, 10, 9, 12, 3, 6, 4, 0, 14, 5, 11, 1, 7 .byte 11, 13, 8, 6, 5, 10, 14, 3, 2, 4, 12, 15, 1, 0, 7, 9 @@ -47,21 +54,21 @@ SIGMA2: SYM_FUNC_START(blake2s_compress_ssse3) movdqu (%rdi),%xmm0 movdqu 0x10(%rdi),%xmm1 - movdqa ROT16(%rip),%xmm12 - movdqa ROR328(%rip),%xmm13 + movdqa .Lror16(%rip),%xmm12 + movdqa .Lror8(%rip),%xmm13 movdqu 0x20(%rdi),%xmm14 movd %ecx,%xmm15 - leaq SIGMA+0xa0(%rip),%r8 + leaq .Lsigma+0xa0(%rip),%r8 jmp .Lbeginofloop .align 32 .Lbeginofloop: movdqa %xmm0,%xmm10 movdqa %xmm1,%xmm11 paddq %xmm15,%xmm14 - movdqa IV(%rip),%xmm2 + movdqa .Liv(%rip),%xmm2 movdqa %xmm14,%xmm3 - pxor IV+0x10(%rip),%xmm3 - leaq SIGMA(%rip),%rcx + pxor .Liv+0x10(%rip),%xmm3 + leaq .Lsigma(%rip),%rcx .Lroundloop: movzbl (%rcx),%eax movd (%rsi,%rax,4),%xmm4 @@ -174,8 +181,8 @@ SYM_FUNC_START(blake2s_compress_avx512) vmovdqu 0x10(%rdi),%xmm1 vmovdqu 0x20(%rdi),%xmm4 vmovd %ecx,%xmm5 - vmovdqa IV(%rip),%xmm14 - vmovdqa IV+16(%rip),%xmm15 + vmovdqa .Liv(%rip),%xmm14 + vmovdqa .Liv+16(%rip),%xmm15 jmp .Lblake2s_compress_avx512_mainloop .align 32 .Lblake2s_compress_avx512_mainloop: @@ -187,7 +194,7 @@ SYM_FUNC_START(blake2s_compress_avx512) vmovdqu (%rsi),%ymm6 vmovdqu 0x20(%rsi),%ymm7 addq $0x40,%rsi - leaq SIGMA2(%rip),%rax + leaq .Lsigma2(%rip),%rax movb $0xa,%cl .Lblake2s_compress_avx512_roundloop: vpmovzxbd (%rax),%ymm8 -- cgit v1.2.3 From a7acd77ebd7f17b07a6ab2ca1dd1e4d487bdfa80 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 2 Nov 2025 15:42:07 -0800 Subject: lib/crypto: x86/blake2s: Improve readability Various cleanups for readability. No change to the generated code: - Add some comments - Add #defines for arguments - Rename some labels - Use decimal constants instead of hex where it makes sense. (The pshufd immediates intentionally remain as hex.) - Add blank lines when there's a logical break The round loop still could use some work, but this is at least a start. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251102234209.62133-5-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/x86/blake2s-core.S | 231 ++++++++++++++++++++++++------------------ 1 file changed, 134 insertions(+), 97 deletions(-) (limited to 'lib/crypto') diff --git a/lib/crypto/x86/blake2s-core.S b/lib/crypto/x86/blake2s-core.S index 14e487559c09..f805a49c590d 100644 --- a/lib/crypto/x86/blake2s-core.S +++ b/lib/crypto/x86/blake2s-core.S @@ -50,34 +50,52 @@ .byte 15, 5, 4, 13, 10, 7, 3, 11, 12, 2, 0, 6, 9, 8, 1, 14 .byte 8, 7, 14, 11, 13, 15, 0, 12, 10, 4, 5, 6, 3, 2, 1, 9 +#define CTX %rdi +#define DATA %rsi +#define NBLOCKS %rdx +#define INC %ecx + .text +// +// void blake2s_compress_ssse3(struct blake2s_ctx *ctx, +// const u8 *data, size_t nblocks, u32 inc); +// +// Only the first three fields of struct blake2s_ctx are used: +// u32 h[8]; (inout) +// u32 t[2]; (inout) +// u32 f[2]; (in) +// SYM_FUNC_START(blake2s_compress_ssse3) - movdqu (%rdi),%xmm0 - movdqu 0x10(%rdi),%xmm1 + movdqu (CTX),%xmm0 // Load h[0..3] + movdqu 16(CTX),%xmm1 // Load h[4..7] movdqa .Lror16(%rip),%xmm12 movdqa .Lror8(%rip),%xmm13 - movdqu 0x20(%rdi),%xmm14 - movd %ecx,%xmm15 - leaq .Lsigma+0xa0(%rip),%r8 - jmp .Lbeginofloop + movdqu 32(CTX),%xmm14 // Load t and f + movd INC,%xmm15 // Load inc + leaq .Lsigma+160(%rip),%r8 + jmp .Lssse3_mainloop + .align 32 -.Lbeginofloop: - movdqa %xmm0,%xmm10 - movdqa %xmm1,%xmm11 - paddq %xmm15,%xmm14 - movdqa .Liv(%rip),%xmm2 +.Lssse3_mainloop: + // Main loop: each iteration processes one 64-byte block. + movdqa %xmm0,%xmm10 // Save h[0..3] and let v[0..3] = h[0..3] + movdqa %xmm1,%xmm11 // Save h[4..7] and let v[4..7] = h[4..7] + paddq %xmm15,%xmm14 // t += inc (64-bit addition) + movdqa .Liv(%rip),%xmm2 // v[8..11] = iv[0..3] movdqa %xmm14,%xmm3 - pxor .Liv+0x10(%rip),%xmm3 + pxor .Liv+16(%rip),%xmm3 // v[12..15] = iv[4..7] ^ [t, f] leaq .Lsigma(%rip),%rcx -.Lroundloop: + +.Lssse3_roundloop: + // Round loop: each iteration does 1 round (of 10 rounds total). movzbl (%rcx),%eax - movd (%rsi,%rax,4),%xmm4 - movzbl 0x1(%rcx),%eax - movd (%rsi,%rax,4),%xmm5 - movzbl 0x2(%rcx),%eax - movd (%rsi,%rax,4),%xmm6 - movzbl 0x3(%rcx),%eax - movd (%rsi,%rax,4),%xmm7 + movd (DATA,%rax,4),%xmm4 + movzbl 1(%rcx),%eax + movd (DATA,%rax,4),%xmm5 + movzbl 2(%rcx),%eax + movd (DATA,%rax,4),%xmm6 + movzbl 3(%rcx),%eax + movd (DATA,%rax,4),%xmm7 punpckldq %xmm5,%xmm4 punpckldq %xmm7,%xmm6 punpcklqdq %xmm6,%xmm4 @@ -88,17 +106,17 @@ SYM_FUNC_START(blake2s_compress_ssse3) paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm8 - psrld $0xc,%xmm1 - pslld $0x14,%xmm8 + psrld $12,%xmm1 + pslld $20,%xmm8 por %xmm8,%xmm1 - movzbl 0x4(%rcx),%eax - movd (%rsi,%rax,4),%xmm5 - movzbl 0x5(%rcx),%eax - movd (%rsi,%rax,4),%xmm6 - movzbl 0x6(%rcx),%eax - movd (%rsi,%rax,4),%xmm7 - movzbl 0x7(%rcx),%eax - movd (%rsi,%rax,4),%xmm4 + movzbl 4(%rcx),%eax + movd (DATA,%rax,4),%xmm5 + movzbl 5(%rcx),%eax + movd (DATA,%rax,4),%xmm6 + movzbl 6(%rcx),%eax + movd (DATA,%rax,4),%xmm7 + movzbl 7(%rcx),%eax + movd (DATA,%rax,4),%xmm4 punpckldq %xmm6,%xmm5 punpckldq %xmm4,%xmm7 punpcklqdq %xmm7,%xmm5 @@ -109,20 +127,20 @@ SYM_FUNC_START(blake2s_compress_ssse3) paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm8 - psrld $0x7,%xmm1 - pslld $0x19,%xmm8 + psrld $7,%xmm1 + pslld $25,%xmm8 por %xmm8,%xmm1 pshufd $0x93,%xmm0,%xmm0 pshufd $0x4e,%xmm3,%xmm3 pshufd $0x39,%xmm2,%xmm2 - movzbl 0x8(%rcx),%eax - movd (%rsi,%rax,4),%xmm6 - movzbl 0x9(%rcx),%eax - movd (%rsi,%rax,4),%xmm7 - movzbl 0xa(%rcx),%eax - movd (%rsi,%rax,4),%xmm4 - movzbl 0xb(%rcx),%eax - movd (%rsi,%rax,4),%xmm5 + movzbl 8(%rcx),%eax + movd (DATA,%rax,4),%xmm6 + movzbl 9(%rcx),%eax + movd (DATA,%rax,4),%xmm7 + movzbl 10(%rcx),%eax + movd (DATA,%rax,4),%xmm4 + movzbl 11(%rcx),%eax + movd (DATA,%rax,4),%xmm5 punpckldq %xmm7,%xmm6 punpckldq %xmm5,%xmm4 punpcklqdq %xmm4,%xmm6 @@ -133,17 +151,17 @@ SYM_FUNC_START(blake2s_compress_ssse3) paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm8 - psrld $0xc,%xmm1 - pslld $0x14,%xmm8 + psrld $12,%xmm1 + pslld $20,%xmm8 por %xmm8,%xmm1 - movzbl 0xc(%rcx),%eax - movd (%rsi,%rax,4),%xmm7 - movzbl 0xd(%rcx),%eax - movd (%rsi,%rax,4),%xmm4 - movzbl 0xe(%rcx),%eax - movd (%rsi,%rax,4),%xmm5 - movzbl 0xf(%rcx),%eax - movd (%rsi,%rax,4),%xmm6 + movzbl 12(%rcx),%eax + movd (DATA,%rax,4),%xmm7 + movzbl 13(%rcx),%eax + movd (DATA,%rax,4),%xmm4 + movzbl 14(%rcx),%eax + movd (DATA,%rax,4),%xmm5 + movzbl 15(%rcx),%eax + movd (DATA,%rax,4),%xmm6 punpckldq %xmm4,%xmm7 punpckldq %xmm6,%xmm5 punpcklqdq %xmm5,%xmm7 @@ -154,52 +172,68 @@ SYM_FUNC_START(blake2s_compress_ssse3) paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm8 - psrld $0x7,%xmm1 - pslld $0x19,%xmm8 + psrld $7,%xmm1 + pslld $25,%xmm8 por %xmm8,%xmm1 pshufd $0x39,%xmm0,%xmm0 pshufd $0x4e,%xmm3,%xmm3 pshufd $0x93,%xmm2,%xmm2 - addq $0x10,%rcx + addq $16,%rcx cmpq %r8,%rcx - jnz .Lroundloop + jnz .Lssse3_roundloop + + // Compute the new h: h[0..7] ^= v[0..7] ^ v[8..15] pxor %xmm2,%xmm0 pxor %xmm3,%xmm1 pxor %xmm10,%xmm0 pxor %xmm11,%xmm1 - addq $0x40,%rsi - decq %rdx - jnz .Lbeginofloop - movdqu %xmm0,(%rdi) - movdqu %xmm1,0x10(%rdi) - movdqu %xmm14,0x20(%rdi) + addq $64,DATA + decq NBLOCKS + jnz .Lssse3_mainloop + + movdqu %xmm0,(CTX) // Store new h[0..3] + movdqu %xmm1,16(CTX) // Store new h[4..7] + movdqu %xmm14,32(CTX) // Store new t and f RET SYM_FUNC_END(blake2s_compress_ssse3) +// +// void blake2s_compress_avx512(struct blake2s_ctx *ctx, +// const u8 *data, size_t nblocks, u32 inc); +// +// Only the first three fields of struct blake2s_ctx are used: +// u32 h[8]; (inout) +// u32 t[2]; (inout) +// u32 f[2]; (in) +// SYM_FUNC_START(blake2s_compress_avx512) - vmovdqu (%rdi),%xmm0 - vmovdqu 0x10(%rdi),%xmm1 - vmovdqu 0x20(%rdi),%xmm4 - vmovd %ecx,%xmm5 - vmovdqa .Liv(%rip),%xmm14 - vmovdqa .Liv+16(%rip),%xmm15 - jmp .Lblake2s_compress_avx512_mainloop -.align 32 -.Lblake2s_compress_avx512_mainloop: - vmovdqa %xmm0,%xmm10 - vmovdqa %xmm1,%xmm11 - vpaddq %xmm5,%xmm4,%xmm4 - vmovdqa %xmm14,%xmm2 - vpxor %xmm15,%xmm4,%xmm3 - vmovdqu (%rsi),%ymm6 - vmovdqu 0x20(%rsi),%ymm7 - addq $0x40,%rsi + vmovdqu (CTX),%xmm0 // Load h[0..3] + vmovdqu 16(CTX),%xmm1 // Load h[4..7] + vmovdqu 32(CTX),%xmm4 // Load t and f + vmovd INC,%xmm5 // Load inc + vmovdqa .Liv(%rip),%xmm14 // Load iv[0..3] + vmovdqa .Liv+16(%rip),%xmm15 // Load iv[4..7] + jmp .Lavx512_mainloop + + .align 32 +.Lavx512_mainloop: + // Main loop: each iteration processes one 64-byte block. + vmovdqa %xmm0,%xmm10 // Save h[0..3] and let v[0..3] = h[0..3] + vmovdqa %xmm1,%xmm11 // Save h[4..7] and let v[4..7] = h[4..7] + vpaddq %xmm5,%xmm4,%xmm4 // t += inc (64-bit addition) + vmovdqa %xmm14,%xmm2 // v[8..11] = iv[0..3] + vpxor %xmm15,%xmm4,%xmm3 // v[12..15] = iv[4..7] ^ [t, f] + vmovdqu (DATA),%ymm6 // Load first 8 data words + vmovdqu 32(DATA),%ymm7 // Load second 8 data words + addq $64,DATA leaq .Lsigma2(%rip),%rax - movb $0xa,%cl -.Lblake2s_compress_avx512_roundloop: + movb $10,%cl // Set num rounds remaining + +.Lavx512_roundloop: + // Round loop: each iteration does 1 round (of 10 rounds total). vpmovzxbd (%rax),%ymm8 - vpmovzxbd 0x8(%rax),%ymm9 - addq $0x10,%rax + vpmovzxbd 8(%rax),%ymm9 + addq $16,%rax vpermi2d %ymm7,%ymm6,%ymm8 vpermi2d %ymm7,%ymm6,%ymm9 vmovdqa %ymm8,%ymm6 @@ -207,50 +241,53 @@ SYM_FUNC_START(blake2s_compress_avx512) vpaddd %xmm8,%xmm0,%xmm0 vpaddd %xmm1,%xmm0,%xmm0 vpxor %xmm0,%xmm3,%xmm3 - vprord $0x10,%xmm3,%xmm3 + vprord $16,%xmm3,%xmm3 vpaddd %xmm3,%xmm2,%xmm2 vpxor %xmm2,%xmm1,%xmm1 - vprord $0xc,%xmm1,%xmm1 - vextracti128 $0x1,%ymm8,%xmm8 + vprord $12,%xmm1,%xmm1 + vextracti128 $1,%ymm8,%xmm8 vpaddd %xmm8,%xmm0,%xmm0 vpaddd %xmm1,%xmm0,%xmm0 vpxor %xmm0,%xmm3,%xmm3 - vprord $0x8,%xmm3,%xmm3 + vprord $8,%xmm3,%xmm3 vpaddd %xmm3,%xmm2,%xmm2 vpxor %xmm2,%xmm1,%xmm1 - vprord $0x7,%xmm1,%xmm1 + vprord $7,%xmm1,%xmm1 vpshufd $0x93,%xmm0,%xmm0 vpshufd $0x4e,%xmm3,%xmm3 vpshufd $0x39,%xmm2,%xmm2 vpaddd %xmm9,%xmm0,%xmm0 vpaddd %xmm1,%xmm0,%xmm0 vpxor %xmm0,%xmm3,%xmm3 - vprord $0x10,%xmm3,%xmm3 + vprord $16,%xmm3,%xmm3 vpaddd %xmm3,%xmm2,%xmm2 vpxor %xmm2,%xmm1,%xmm1 - vprord $0xc,%xmm1,%xmm1 - vextracti128 $0x1,%ymm9,%xmm9 + vprord $12,%xmm1,%xmm1 + vextracti128 $1,%ymm9,%xmm9 vpaddd %xmm9,%xmm0,%xmm0 vpaddd %xmm1,%xmm0,%xmm0 vpxor %xmm0,%xmm3,%xmm3 - vprord $0x8,%xmm3,%xmm3 + vprord $8,%xmm3,%xmm3 vpaddd %xmm3,%xmm2,%xmm2 vpxor %xmm2,%xmm1,%xmm1 - vprord $0x7,%xmm1,%xmm1 + vprord $7,%xmm1,%xmm1 vpshufd $0x39,%xmm0,%xmm0 vpshufd $0x4e,%xmm3,%xmm3 vpshufd $0x93,%xmm2,%xmm2 decb %cl - jne .Lblake2s_compress_avx512_roundloop + jne .Lavx512_roundloop + + // Compute the new h: h[0..7] ^= v[0..7] ^ v[8..15] vpxor %xmm10,%xmm0,%xmm0 vpxor %xmm11,%xmm1,%xmm1 vpxor %xmm2,%xmm0,%xmm0 vpxor %xmm3,%xmm1,%xmm1 - decq %rdx - jne .Lblake2s_compress_avx512_mainloop - vmovdqu %xmm0,(%rdi) - vmovdqu %xmm1,0x10(%rdi) - vmovdqu %xmm4,0x20(%rdi) + decq NBLOCKS + jne .Lavx512_mainloop + + vmovdqu %xmm0,(CTX) // Store new h[0..3] + vmovdqu %xmm1,16(CTX) // Store new h[4..7] + vmovdqu %xmm4,32(CTX) // Store new t and f vzeroupper RET SYM_FUNC_END(blake2s_compress_avx512) -- cgit v1.2.3 From cd5528621abb01664a477392cd3e76be2ef6296b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 2 Nov 2025 15:42:08 -0800 Subject: lib/crypto: x86/blake2s: Avoid writing back unchanged 'f' value Just before returning, blake2s_compress_ssse3() and blake2s_compress_avx512() store updated values to the 'h', 't', and 'f' fields of struct blake2s_ctx. But 'f' is always unchanged (which is correct; only the C code changes it). So, there's no need to write to 'f'. Use 64-bit stores (movq and vmovq) instead of 128-bit stores (movdqu and vmovdqu) so that only 't' is written. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251102234209.62133-6-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/x86/blake2s-core.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib/crypto') diff --git a/lib/crypto/x86/blake2s-core.S b/lib/crypto/x86/blake2s-core.S index f805a49c590d..869064f6ac16 100644 --- a/lib/crypto/x86/blake2s-core.S +++ b/lib/crypto/x86/blake2s-core.S @@ -193,7 +193,7 @@ SYM_FUNC_START(blake2s_compress_ssse3) movdqu %xmm0,(CTX) // Store new h[0..3] movdqu %xmm1,16(CTX) // Store new h[4..7] - movdqu %xmm14,32(CTX) // Store new t and f + movq %xmm14,32(CTX) // Store new t (f is unchanged) RET SYM_FUNC_END(blake2s_compress_ssse3) @@ -287,7 +287,7 @@ SYM_FUNC_START(blake2s_compress_avx512) vmovdqu %xmm0,(CTX) // Store new h[0..3] vmovdqu %xmm1,16(CTX) // Store new h[4..7] - vmovdqu %xmm4,32(CTX) // Store new t and f + vmovq %xmm4,32(CTX) // Store new t (f is unchanged) vzeroupper RET SYM_FUNC_END(blake2s_compress_avx512) -- cgit v1.2.3 From 8ba60c5914f25a44f10189c6919a737b199f6dbf Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 2 Nov 2025 15:42:09 -0800 Subject: lib/crypto: x86/blake2s: Use vpternlogd for 3-input XORs AVX-512 supports 3-input XORs via the vpternlogd (or vpternlogq) instruction with immediate 0x96. This approach, vs. the alternative of two vpxor instructions, is already used in the CRC, AES-GCM, and AES-XTS code, since it reduces the instruction count and is faster on some CPUs. Make blake2s_compress_avx512() take advantage of it too. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251102234209.62133-7-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/x86/blake2s-core.S | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'lib/crypto') diff --git a/lib/crypto/x86/blake2s-core.S b/lib/crypto/x86/blake2s-core.S index 869064f6ac16..7b1d98ca7482 100644 --- a/lib/crypto/x86/blake2s-core.S +++ b/lib/crypto/x86/blake2s-core.S @@ -278,10 +278,8 @@ SYM_FUNC_START(blake2s_compress_avx512) jne .Lavx512_roundloop // Compute the new h: h[0..7] ^= v[0..7] ^ v[8..15] - vpxor %xmm10,%xmm0,%xmm0 - vpxor %xmm11,%xmm1,%xmm1 - vpxor %xmm2,%xmm0,%xmm0 - vpxor %xmm3,%xmm1,%xmm1 + vpternlogd $0x96,%xmm10,%xmm2,%xmm0 + vpternlogd $0x96,%xmm11,%xmm3,%xmm1 decq NBLOCKS jne .Lavx512_mainloop -- cgit v1.2.3 From 3d176751e541362ff40c2478d6a2de41f8c62318 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 9 Nov 2025 15:47:17 -0800 Subject: lib/crypto: polyval: Add POLYVAL library Add support for POLYVAL to lib/crypto/. This will replace the polyval crypto_shash algorithm and its use in the hctr2 template, simplifying the code and reducing overhead. Specifically, this commit introduces the POLYVAL library API and a generic implementation of it. Later commits will migrate the existing architecture-optimized implementations of POLYVAL into lib/crypto/ and add a KUnit test suite. I've also rewritten the generic implementation completely, using a more modern approach instead of the traditional table-based approach. It's now constant-time, requires no precomputation or dynamic memory allocations, decreases the per-key memory usage from 4096 bytes to 16 bytes, and is faster than the old polyval-generic even on bulk data reusing the same key (at least on x86_64, where I measured 15% faster). We should do this for GHASH too, but for now just do it for POLYVAL. Reviewed-by: Ard Biesheuvel Tested-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251109234726.638437-3-ebiggers@kernel.org Signed-off-by: Eric Biggers --- include/crypto/polyval.h | 171 +++++++++++++++++++++++++- lib/crypto/Kconfig | 10 ++ lib/crypto/Makefile | 8 ++ lib/crypto/polyval.c | 307 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 493 insertions(+), 3 deletions(-) create mode 100644 lib/crypto/polyval.c (limited to 'lib/crypto') diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h index d2e63743e592..5ba4c248cad1 100644 --- a/include/crypto/polyval.h +++ b/include/crypto/polyval.h @@ -1,14 +1,179 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Common values for the Polyval hash algorithm + * POLYVAL library API * - * Copyright 2021 Google LLC + * Copyright 2025 Google LLC */ #ifndef _CRYPTO_POLYVAL_H #define _CRYPTO_POLYVAL_H +#include +#include + #define POLYVAL_BLOCK_SIZE 16 #define POLYVAL_DIGEST_SIZE 16 +/** + * struct polyval_elem - An element of the POLYVAL finite field + * @bytes: View of the element as a byte array (unioned with @lo and @hi) + * @lo: The low 64 terms of the element's polynomial + * @hi: The high 64 terms of the element's polynomial + * + * This represents an element of the finite field GF(2^128), using the POLYVAL + * convention: little-endian byte order and natural bit order. + */ +struct polyval_elem { + union { + u8 bytes[POLYVAL_BLOCK_SIZE]; + struct { + __le64 lo; + __le64 hi; + }; + }; +}; + +/** + * struct polyval_key - Prepared key for POLYVAL + * + * This may contain just the raw key H, or it may contain precomputed key + * powers, depending on the platform's POLYVAL implementation. Use + * polyval_preparekey() to initialize this. + */ +struct polyval_key { +#ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH +#error "Unhandled arch" +#else /* CONFIG_CRYPTO_LIB_POLYVAL_ARCH */ + /** @h: The hash key H */ + struct polyval_elem h; +#endif /* !CONFIG_CRYPTO_LIB_POLYVAL_ARCH */ +}; + +/** + * struct polyval_ctx - Context for computing a POLYVAL value + * @key: Pointer to the prepared POLYVAL key. The user of the API is + * responsible for ensuring that the key lives as long as the context. + * @acc: The accumulator + * @partial: Number of data bytes processed so far modulo POLYVAL_BLOCK_SIZE + */ +struct polyval_ctx { + const struct polyval_key *key; + struct polyval_elem acc; + size_t partial; +}; + +/** + * polyval_preparekey() - Prepare a POLYVAL key + * @key: (output) The key structure to initialize + * @raw_key: The raw hash key + * + * Initialize a POLYVAL key structure from a raw key. This may be a simple + * copy, or it may involve precomputing powers of the key, depending on the + * platform's POLYVAL implementation. + * + * Context: Any context. + */ +#ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH +void polyval_preparekey(struct polyval_key *key, + const u8 raw_key[POLYVAL_BLOCK_SIZE]); + +#else +static inline void polyval_preparekey(struct polyval_key *key, + const u8 raw_key[POLYVAL_BLOCK_SIZE]) +{ + /* Just a simple copy, so inline it. */ + memcpy(key->h.bytes, raw_key, POLYVAL_BLOCK_SIZE); +} #endif + +/** + * polyval_init() - Initialize a POLYVAL context for a new message + * @ctx: The context to initialize + * @key: The key to use. Note that a pointer to the key is saved in the + * context, so the key must live at least as long as the context. + */ +static inline void polyval_init(struct polyval_ctx *ctx, + const struct polyval_key *key) +{ + *ctx = (struct polyval_ctx){ .key = key }; +} + +/** + * polyval_import_blkaligned() - Import a POLYVAL accumulator value + * @ctx: The context to initialize + * @key: The key to import. Note that a pointer to the key is saved in the + * context, so the key must live at least as long as the context. + * @acc: The accumulator value to import. + * + * This imports an accumulator that was saved by polyval_export_blkaligned(). + * The same key must be used. + */ +static inline void +polyval_import_blkaligned(struct polyval_ctx *ctx, + const struct polyval_key *key, + const struct polyval_elem *acc) +{ + *ctx = (struct polyval_ctx){ .key = key, .acc = *acc }; +} + +/** + * polyval_export_blkaligned() - Export a POLYVAL accumulator value + * @ctx: The context to export the accumulator value from + * @acc: (output) The exported accumulator value + * + * This exports the accumulator from a POLYVAL context. The number of data + * bytes processed so far must be a multiple of POLYVAL_BLOCK_SIZE. + */ +static inline void polyval_export_blkaligned(const struct polyval_ctx *ctx, + struct polyval_elem *acc) +{ + *acc = ctx->acc; +} + +/** + * polyval_update() - Update a POLYVAL context with message data + * @ctx: The context to update; must have been initialized + * @data: The message data + * @len: The data length in bytes. Doesn't need to be block-aligned. + * + * This can be called any number of times. + * + * Context: Any context. + */ +void polyval_update(struct polyval_ctx *ctx, const u8 *data, size_t len); + +/** + * polyval_final() - Finish computing a POLYVAL value + * @ctx: The context to finalize + * @out: The output value + * + * If the total data length isn't a multiple of POLYVAL_BLOCK_SIZE, then the + * final block is automatically zero-padded. + * + * After finishing, this zeroizes @ctx. So the caller does not need to do it. + * + * Context: Any context. + */ +void polyval_final(struct polyval_ctx *ctx, u8 out[POLYVAL_BLOCK_SIZE]); + +/** + * polyval() - Compute a POLYVAL value + * @key: The prepared key + * @data: The message data + * @len: The data length in bytes. Doesn't need to be block-aligned. + * @out: The output value + * + * Context: Any context. + */ +static inline void polyval(const struct polyval_key *key, + const u8 *data, size_t len, + u8 out[POLYVAL_BLOCK_SIZE]) +{ + struct polyval_ctx ctx; + + polyval_init(&ctx, key); + polyval_update(&ctx, data, len); + polyval_final(&ctx, out); +} + +#endif /* _CRYPTO_POLYVAL_H */ diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 7445054fc0ad..6545f0e83b83 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -135,6 +135,16 @@ config CRYPTO_LIB_POLY1305_RSIZE default 9 if ARM || ARM64 default 1 +config CRYPTO_LIB_POLYVAL + tristate + help + The POLYVAL library functions. Select this if your module uses any of + the functions from . + +config CRYPTO_LIB_POLYVAL_ARCH + bool + depends on CRYPTO_LIB_POLYVAL && !UML + config CRYPTO_LIB_CHACHA20POLY1305 tristate select CRYPTO_LIB_CHACHA diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 5515e73bfd5e..055e44008805 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -198,6 +198,14 @@ clean-files += arm/poly1305-core.S \ ################################################################################ +obj-$(CONFIG_CRYPTO_LIB_POLYVAL) += libpolyval.o +libpolyval-y := polyval.o +ifeq ($(CONFIG_CRYPTO_LIB_POLYVAL_ARCH),y) +CFLAGS_polyval.o += -I$(src)/$(SRCARCH) +endif + +################################################################################ + obj-$(CONFIG_CRYPTO_LIB_SHA1) += libsha1.o libsha1-y := sha1.o ifeq ($(CONFIG_CRYPTO_LIB_SHA1_ARCH),y) diff --git a/lib/crypto/polyval.c b/lib/crypto/polyval.c new file mode 100644 index 000000000000..5796275f574a --- /dev/null +++ b/lib/crypto/polyval.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * POLYVAL library functions + * + * Copyright 2025 Google LLC + */ + +#include +#include +#include +#include +#include + +/* + * POLYVAL is an almost-XOR-universal hash function. Similar to GHASH, POLYVAL + * interprets the message as the coefficients of a polynomial in GF(2^128) and + * evaluates that polynomial at a secret point. POLYVAL has a simple + * mathematical relationship with GHASH, but it uses a better field convention + * which makes it easier and faster to implement. + * + * POLYVAL is not a cryptographic hash function, and it should be used only by + * algorithms that are specifically designed to use it. + * + * POLYVAL is specified by "AES-GCM-SIV: Nonce Misuse-Resistant Authenticated + * Encryption" (https://datatracker.ietf.org/doc/html/rfc8452) + * + * POLYVAL is also used by HCTR2. See "Length-preserving encryption with HCTR2" + * (https://eprint.iacr.org/2021/1441.pdf). + * + * This file provides a library API for POLYVAL. This API can delegate to + * either a generic implementation or an architecture-optimized implementation. + * + * For the generic implementation, we don't use the traditional table approach + * to GF(2^128) multiplication. That approach is not constant-time and requires + * a lot of memory. Instead, we use a different approach which emulates + * carryless multiplication using standard multiplications by spreading the data + * bits apart using "holes". This allows the carries to spill harmlessly. This + * approach is borrowed from BoringSSL, which in turn credits BearSSL's + * documentation (https://bearssl.org/constanttime.html#ghash-for-gcm) for the + * "holes" trick and a presentation by Shay Gueron + * (https://crypto.stanford.edu/RealWorldCrypto/slides/gueron.pdf) for the + * 256-bit => 128-bit reduction algorithm. + */ + +#ifdef CONFIG_ARCH_SUPPORTS_INT128 + +/* Do a 64 x 64 => 128 bit carryless multiplication. */ +static void clmul64(u64 a, u64 b, u64 *out_lo, u64 *out_hi) +{ + /* + * With 64-bit multiplicands and one term every 4 bits, there would be + * up to 64 / 4 = 16 one bits per column when each multiplication is + * written out as a series of additions in the schoolbook manner. + * Unfortunately, that doesn't work since the value 16 is 1 too large to + * fit in 4 bits. Carries would sometimes overflow into the next term. + * + * Using one term every 5 bits would work. However, that would cost + * 5 x 5 = 25 multiplications instead of 4 x 4 = 16. + * + * Instead, mask off 4 bits from one multiplicand, giving a max of 15 + * one bits per column. Then handle those 4 bits separately. + */ + u64 a0 = a & 0x1111111111111110; + u64 a1 = a & 0x2222222222222220; + u64 a2 = a & 0x4444444444444440; + u64 a3 = a & 0x8888888888888880; + + u64 b0 = b & 0x1111111111111111; + u64 b1 = b & 0x2222222222222222; + u64 b2 = b & 0x4444444444444444; + u64 b3 = b & 0x8888888888888888; + + /* Multiply the high 60 bits of @a by @b. */ + u128 c0 = (a0 * (u128)b0) ^ (a1 * (u128)b3) ^ + (a2 * (u128)b2) ^ (a3 * (u128)b1); + u128 c1 = (a0 * (u128)b1) ^ (a1 * (u128)b0) ^ + (a2 * (u128)b3) ^ (a3 * (u128)b2); + u128 c2 = (a0 * (u128)b2) ^ (a1 * (u128)b1) ^ + (a2 * (u128)b0) ^ (a3 * (u128)b3); + u128 c3 = (a0 * (u128)b3) ^ (a1 * (u128)b2) ^ + (a2 * (u128)b1) ^ (a3 * (u128)b0); + + /* Multiply the low 4 bits of @a by @b. */ + u64 e0 = -(a & 1) & b; + u64 e1 = -((a >> 1) & 1) & b; + u64 e2 = -((a >> 2) & 1) & b; + u64 e3 = -((a >> 3) & 1) & b; + u64 extra_lo = e0 ^ (e1 << 1) ^ (e2 << 2) ^ (e3 << 3); + u64 extra_hi = (e1 >> 63) ^ (e2 >> 62) ^ (e3 >> 61); + + /* Add all the intermediate products together. */ + *out_lo = (((u64)c0) & 0x1111111111111111) ^ + (((u64)c1) & 0x2222222222222222) ^ + (((u64)c2) & 0x4444444444444444) ^ + (((u64)c3) & 0x8888888888888888) ^ extra_lo; + *out_hi = (((u64)(c0 >> 64)) & 0x1111111111111111) ^ + (((u64)(c1 >> 64)) & 0x2222222222222222) ^ + (((u64)(c2 >> 64)) & 0x4444444444444444) ^ + (((u64)(c3 >> 64)) & 0x8888888888888888) ^ extra_hi; +} + +#else /* CONFIG_ARCH_SUPPORTS_INT128 */ + +/* Do a 32 x 32 => 64 bit carryless multiplication. */ +static u64 clmul32(u32 a, u32 b) +{ + /* + * With 32-bit multiplicands and one term every 4 bits, there are up to + * 32 / 4 = 8 one bits per column when each multiplication is written + * out as a series of additions in the schoolbook manner. The value 8 + * fits in 4 bits, so the carries don't overflow into the next term. + */ + u32 a0 = a & 0x11111111; + u32 a1 = a & 0x22222222; + u32 a2 = a & 0x44444444; + u32 a3 = a & 0x88888888; + + u32 b0 = b & 0x11111111; + u32 b1 = b & 0x22222222; + u32 b2 = b & 0x44444444; + u32 b3 = b & 0x88888888; + + u64 c0 = (a0 * (u64)b0) ^ (a1 * (u64)b3) ^ + (a2 * (u64)b2) ^ (a3 * (u64)b1); + u64 c1 = (a0 * (u64)b1) ^ (a1 * (u64)b0) ^ + (a2 * (u64)b3) ^ (a3 * (u64)b2); + u64 c2 = (a0 * (u64)b2) ^ (a1 * (u64)b1) ^ + (a2 * (u64)b0) ^ (a3 * (u64)b3); + u64 c3 = (a0 * (u64)b3) ^ (a1 * (u64)b2) ^ + (a2 * (u64)b1) ^ (a3 * (u64)b0); + + /* Add all the intermediate products together. */ + return (c0 & 0x1111111111111111) ^ + (c1 & 0x2222222222222222) ^ + (c2 & 0x4444444444444444) ^ + (c3 & 0x8888888888888888); +} + +/* Do a 64 x 64 => 128 bit carryless multiplication. */ +static void clmul64(u64 a, u64 b, u64 *out_lo, u64 *out_hi) +{ + u32 a_lo = (u32)a; + u32 a_hi = a >> 32; + u32 b_lo = (u32)b; + u32 b_hi = b >> 32; + + /* Karatsuba multiplication */ + u64 lo = clmul32(a_lo, b_lo); + u64 hi = clmul32(a_hi, b_hi); + u64 mi = clmul32(a_lo ^ a_hi, b_lo ^ b_hi) ^ lo ^ hi; + + *out_lo = lo ^ (mi << 32); + *out_hi = hi ^ (mi >> 32); +} +#endif /* !CONFIG_ARCH_SUPPORTS_INT128 */ + +/* Compute @a = @a * @b * x^-128 in the POLYVAL field. */ +static void __maybe_unused +polyval_mul_generic(struct polyval_elem *a, const struct polyval_elem *b) +{ + u64 c0, c1, c2, c3, mi0, mi1; + + /* + * Carryless-multiply @a by @b using Karatsuba multiplication. Store + * the 256-bit product in @c0 (low) through @c3 (high). + */ + clmul64(le64_to_cpu(a->lo), le64_to_cpu(b->lo), &c0, &c1); + clmul64(le64_to_cpu(a->hi), le64_to_cpu(b->hi), &c2, &c3); + clmul64(le64_to_cpu(a->lo ^ a->hi), le64_to_cpu(b->lo ^ b->hi), + &mi0, &mi1); + mi0 ^= c0 ^ c2; + mi1 ^= c1 ^ c3; + c1 ^= mi0; + c2 ^= mi1; + + /* + * Cancel out the low 128 bits of the product by adding multiples of + * G(x) = x^128 + x^127 + x^126 + x^121 + 1. Do this in two steps, each + * of which cancels out 64 bits. Note that we break G(x) into three + * parts: 1, x^64 * (x^63 + x^62 + x^57), and x^128 * 1. + */ + + /* + * First, add G(x) times c0 as follows: + * + * (c0, c1, c2) = (0, + * c1 + (c0 * (x^63 + x^62 + x^57) mod x^64), + * c2 + c0 + floor((c0 * (x^63 + x^62 + x^57)) / x^64)) + */ + c1 ^= (c0 << 63) ^ (c0 << 62) ^ (c0 << 57); + c2 ^= c0 ^ (c0 >> 1) ^ (c0 >> 2) ^ (c0 >> 7); + + /* + * Second, add G(x) times the new c1: + * + * (c1, c2, c3) = (0, + * c2 + (c1 * (x^63 + x^62 + x^57) mod x^64), + * c3 + c1 + floor((c1 * (x^63 + x^62 + x^57)) / x^64)) + */ + c2 ^= (c1 << 63) ^ (c1 << 62) ^ (c1 << 57); + c3 ^= c1 ^ (c1 >> 1) ^ (c1 >> 2) ^ (c1 >> 7); + + /* Return (c2, c3). This implicitly multiplies by x^-128. */ + a->lo = cpu_to_le64(c2); + a->hi = cpu_to_le64(c3); +} + +static void __maybe_unused +polyval_blocks_generic(struct polyval_elem *acc, const struct polyval_elem *key, + const u8 *data, size_t nblocks) +{ + do { + acc->lo ^= get_unaligned((__le64 *)data); + acc->hi ^= get_unaligned((__le64 *)(data + 8)); + polyval_mul_generic(acc, key); + data += POLYVAL_BLOCK_SIZE; + } while (--nblocks); +} + +/* Include the arch-optimized implementation of POLYVAL, if one is available. */ +#ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH +#include "polyval.h" /* $(SRCARCH)/polyval.h */ +void polyval_preparekey(struct polyval_key *key, + const u8 raw_key[POLYVAL_BLOCK_SIZE]) +{ + polyval_preparekey_arch(key, raw_key); +} +EXPORT_SYMBOL_GPL(polyval_preparekey); +#endif /* Else, polyval_preparekey() is an inline function. */ + +/* + * polyval_mul_generic() and polyval_blocks_generic() take the key as a + * polyval_elem rather than a polyval_key, so that arch-optimized + * implementations with a different key format can use it as a fallback (if they + * have H^1 stored somewhere in their struct). Thus, the following dispatch + * code is needed to pass the appropriate key argument. + */ + +static void polyval_mul(struct polyval_ctx *ctx) +{ +#ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH + polyval_mul_arch(&ctx->acc, ctx->key); +#else + polyval_mul_generic(&ctx->acc, &ctx->key->h); +#endif +} + +static void polyval_blocks(struct polyval_ctx *ctx, + const u8 *data, size_t nblocks) +{ +#ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH + polyval_blocks_arch(&ctx->acc, ctx->key, data, nblocks); +#else + polyval_blocks_generic(&ctx->acc, &ctx->key->h, data, nblocks); +#endif +} + +void polyval_update(struct polyval_ctx *ctx, const u8 *data, size_t len) +{ + if (unlikely(ctx->partial)) { + size_t n = min(len, POLYVAL_BLOCK_SIZE - ctx->partial); + + len -= n; + while (n--) + ctx->acc.bytes[ctx->partial++] ^= *data++; + if (ctx->partial < POLYVAL_BLOCK_SIZE) + return; + polyval_mul(ctx); + } + if (len >= POLYVAL_BLOCK_SIZE) { + size_t nblocks = len / POLYVAL_BLOCK_SIZE; + + polyval_blocks(ctx, data, nblocks); + data += len & ~(POLYVAL_BLOCK_SIZE - 1); + len &= POLYVAL_BLOCK_SIZE - 1; + } + for (size_t i = 0; i < len; i++) + ctx->acc.bytes[i] ^= data[i]; + ctx->partial = len; +} +EXPORT_SYMBOL_GPL(polyval_update); + +void polyval_final(struct polyval_ctx *ctx, u8 out[POLYVAL_BLOCK_SIZE]) +{ + if (unlikely(ctx->partial)) + polyval_mul(ctx); + memcpy(out, &ctx->acc, POLYVAL_BLOCK_SIZE); + memzero_explicit(ctx, sizeof(*ctx)); +} +EXPORT_SYMBOL_GPL(polyval_final); + +#ifdef polyval_mod_init_arch +static int __init polyval_mod_init(void) +{ + polyval_mod_init_arch(); + return 0; +} +subsys_initcall(polyval_mod_init); + +static void __exit polyval_mod_exit(void) +{ +} +module_exit(polyval_mod_exit); +#endif + +MODULE_DESCRIPTION("POLYVAL almost-XOR-universal hash function"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 37919e239ebb2cba573cca56292f7c39fa6d7415 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 9 Nov 2025 15:47:19 -0800 Subject: lib/crypto: arm64/polyval: Migrate optimized code into library Migrate the arm64 implementation of POLYVAL into lib/crypto/, wiring it up to the POLYVAL library interface. This makes the POLYVAL library be properly optimized on arm64. This drops the arm64 optimizations of polyval in the crypto_shash API. That's fine, since polyval will be removed from crypto_shash entirely since it is unneeded there. But even if it comes back, the crypto_shash API could just be implemented on top of the library API, as usual. Adjust the names and prototypes of the assembly functions to align more closely with the rest of the library code. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251109234726.638437-5-ebiggers@kernel.org Signed-off-by: Eric Biggers --- arch/arm64/crypto/Kconfig | 10 - arch/arm64/crypto/Makefile | 3 - arch/arm64/crypto/polyval-ce-core.S | 361 ------------------------------------ arch/arm64/crypto/polyval-ce-glue.c | 158 ---------------- include/crypto/polyval.h | 8 + lib/crypto/Kconfig | 1 + lib/crypto/Makefile | 1 + lib/crypto/arm64/polyval-ce-core.S | 359 +++++++++++++++++++++++++++++++++++ lib/crypto/arm64/polyval.h | 82 ++++++++ 9 files changed, 451 insertions(+), 532 deletions(-) delete mode 100644 arch/arm64/crypto/polyval-ce-core.S delete mode 100644 arch/arm64/crypto/polyval-ce-glue.c create mode 100644 lib/crypto/arm64/polyval-ce-core.S create mode 100644 lib/crypto/arm64/polyval.h (limited to 'lib/crypto') diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 376d6b50743f..bdd276a6e540 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -47,16 +47,6 @@ config CRYPTO_SM3_ARM64_CE Architecture: arm64 using: - ARMv8.2 Crypto Extensions -config CRYPTO_POLYVAL_ARM64_CE - tristate "Hash functions: POLYVAL (ARMv8 Crypto Extensions)" - depends on KERNEL_MODE_NEON - select CRYPTO_POLYVAL - help - POLYVAL hash function for HCTR2 - - Architecture: arm64 using: - - ARMv8 Crypto Extensions - config CRYPTO_AES_ARM64 tristate "Ciphers: AES, modes: ECB, CBC, CTR, CTS, XCTR, XTS" select CRYPTO_AES diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index fd3d590fa113..1e330aa08d3f 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -29,9 +29,6 @@ sm4-neon-y := sm4-neon-glue.o sm4-neon-core.o obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o -obj-$(CONFIG_CRYPTO_POLYVAL_ARM64_CE) += polyval-ce.o -polyval-ce-y := polyval-ce-glue.o polyval-ce-core.o - obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o diff --git a/arch/arm64/crypto/polyval-ce-core.S b/arch/arm64/crypto/polyval-ce-core.S deleted file mode 100644 index b5326540d2e3..000000000000 --- a/arch/arm64/crypto/polyval-ce-core.S +++ /dev/null @@ -1,361 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Implementation of POLYVAL using ARMv8 Crypto Extensions. - * - * Copyright 2021 Google LLC - */ -/* - * This is an efficient implementation of POLYVAL using ARMv8 Crypto Extensions - * It works on 8 blocks at a time, by precomputing the first 8 keys powers h^8, - * ..., h^1 in the POLYVAL finite field. This precomputation allows us to split - * finite field multiplication into two steps. - * - * In the first step, we consider h^i, m_i as normal polynomials of degree less - * than 128. We then compute p(x) = h^8m_0 + ... + h^1m_7 where multiplication - * is simply polynomial multiplication. - * - * In the second step, we compute the reduction of p(x) modulo the finite field - * modulus g(x) = x^128 + x^127 + x^126 + x^121 + 1. - * - * This two step process is equivalent to computing h^8m_0 + ... + h^1m_7 where - * multiplication is finite field multiplication. The advantage is that the - * two-step process only requires 1 finite field reduction for every 8 - * polynomial multiplications. Further parallelism is gained by interleaving the - * multiplications and polynomial reductions. - */ - -#include -#define STRIDE_BLOCKS 8 - -KEY_POWERS .req x0 -MSG .req x1 -BLOCKS_LEFT .req x2 -ACCUMULATOR .req x3 -KEY_START .req x10 -EXTRA_BYTES .req x11 -TMP .req x13 - -M0 .req v0 -M1 .req v1 -M2 .req v2 -M3 .req v3 -M4 .req v4 -M5 .req v5 -M6 .req v6 -M7 .req v7 -KEY8 .req v8 -KEY7 .req v9 -KEY6 .req v10 -KEY5 .req v11 -KEY4 .req v12 -KEY3 .req v13 -KEY2 .req v14 -KEY1 .req v15 -PL .req v16 -PH .req v17 -TMP_V .req v18 -LO .req v20 -MI .req v21 -HI .req v22 -SUM .req v23 -GSTAR .req v24 - - .text - - .arch armv8-a+crypto - .align 4 - -.Lgstar: - .quad 0xc200000000000000, 0xc200000000000000 - -/* - * Computes the product of two 128-bit polynomials in X and Y and XORs the - * components of the 256-bit product into LO, MI, HI. - * - * Given: - * X = [X_1 : X_0] - * Y = [Y_1 : Y_0] - * - * We compute: - * LO += X_0 * Y_0 - * MI += (X_0 + X_1) * (Y_0 + Y_1) - * HI += X_1 * Y_1 - * - * Later, the 256-bit result can be extracted as: - * [HI_1 : HI_0 + HI_1 + MI_1 + LO_1 : LO_1 + HI_0 + MI_0 + LO_0 : LO_0] - * This step is done when computing the polynomial reduction for efficiency - * reasons. - * - * Karatsuba multiplication is used instead of Schoolbook multiplication because - * it was found to be slightly faster on ARM64 CPUs. - * - */ -.macro karatsuba1 X Y - X .req \X - Y .req \Y - ext v25.16b, X.16b, X.16b, #8 - ext v26.16b, Y.16b, Y.16b, #8 - eor v25.16b, v25.16b, X.16b - eor v26.16b, v26.16b, Y.16b - pmull2 v28.1q, X.2d, Y.2d - pmull v29.1q, X.1d, Y.1d - pmull v27.1q, v25.1d, v26.1d - eor HI.16b, HI.16b, v28.16b - eor LO.16b, LO.16b, v29.16b - eor MI.16b, MI.16b, v27.16b - .unreq X - .unreq Y -.endm - -/* - * Same as karatsuba1, except overwrites HI, LO, MI rather than XORing into - * them. - */ -.macro karatsuba1_store X Y - X .req \X - Y .req \Y - ext v25.16b, X.16b, X.16b, #8 - ext v26.16b, Y.16b, Y.16b, #8 - eor v25.16b, v25.16b, X.16b - eor v26.16b, v26.16b, Y.16b - pmull2 HI.1q, X.2d, Y.2d - pmull LO.1q, X.1d, Y.1d - pmull MI.1q, v25.1d, v26.1d - .unreq X - .unreq Y -.endm - -/* - * Computes the 256-bit polynomial represented by LO, HI, MI. Stores - * the result in PL, PH. - * [PH : PL] = - * [HI_1 : HI_1 + HI_0 + MI_1 + LO_1 : HI_0 + MI_0 + LO_1 + LO_0 : LO_0] - */ -.macro karatsuba2 - // v4 = [HI_1 + MI_1 : HI_0 + MI_0] - eor v4.16b, HI.16b, MI.16b - // v4 = [HI_1 + MI_1 + LO_1 : HI_0 + MI_0 + LO_0] - eor v4.16b, v4.16b, LO.16b - // v5 = [HI_0 : LO_1] - ext v5.16b, LO.16b, HI.16b, #8 - // v4 = [HI_1 + HI_0 + MI_1 + LO_1 : HI_0 + MI_0 + LO_1 + LO_0] - eor v4.16b, v4.16b, v5.16b - // HI = [HI_0 : HI_1] - ext HI.16b, HI.16b, HI.16b, #8 - // LO = [LO_0 : LO_1] - ext LO.16b, LO.16b, LO.16b, #8 - // PH = [HI_1 : HI_1 + HI_0 + MI_1 + LO_1] - ext PH.16b, v4.16b, HI.16b, #8 - // PL = [HI_0 + MI_0 + LO_1 + LO_0 : LO_0] - ext PL.16b, LO.16b, v4.16b, #8 -.endm - -/* - * Computes the 128-bit reduction of PH : PL. Stores the result in dest. - * - * This macro computes p(x) mod g(x) where p(x) is in montgomery form and g(x) = - * x^128 + x^127 + x^126 + x^121 + 1. - * - * We have a 256-bit polynomial PH : PL = P_3 : P_2 : P_1 : P_0 that is the - * product of two 128-bit polynomials in Montgomery form. We need to reduce it - * mod g(x). Also, since polynomials in Montgomery form have an "extra" factor - * of x^128, this product has two extra factors of x^128. To get it back into - * Montgomery form, we need to remove one of these factors by dividing by x^128. - * - * To accomplish both of these goals, we add multiples of g(x) that cancel out - * the low 128 bits P_1 : P_0, leaving just the high 128 bits. Since the low - * bits are zero, the polynomial division by x^128 can be done by right - * shifting. - * - * Since the only nonzero term in the low 64 bits of g(x) is the constant term, - * the multiple of g(x) needed to cancel out P_0 is P_0 * g(x). The CPU can - * only do 64x64 bit multiplications, so split P_0 * g(x) into x^128 * P_0 + - * x^64 * g*(x) * P_0 + P_0, where g*(x) is bits 64-127 of g(x). Adding this to - * the original polynomial gives P_3 : P_2 + P_0 + T_1 : P_1 + T_0 : 0, where T - * = T_1 : T_0 = g*(x) * P_0. Thus, bits 0-63 got "folded" into bits 64-191. - * - * Repeating this same process on the next 64 bits "folds" bits 64-127 into bits - * 128-255, giving the answer in bits 128-255. This time, we need to cancel P_1 - * + T_0 in bits 64-127. The multiple of g(x) required is (P_1 + T_0) * g(x) * - * x^64. Adding this to our previous computation gives P_3 + P_1 + T_0 + V_1 : - * P_2 + P_0 + T_1 + V_0 : 0 : 0, where V = V_1 : V_0 = g*(x) * (P_1 + T_0). - * - * So our final computation is: - * T = T_1 : T_0 = g*(x) * P_0 - * V = V_1 : V_0 = g*(x) * (P_1 + T_0) - * p(x) / x^{128} mod g(x) = P_3 + P_1 + T_0 + V_1 : P_2 + P_0 + T_1 + V_0 - * - * The implementation below saves a XOR instruction by computing P_1 + T_0 : P_0 - * + T_1 and XORing into dest, rather than separately XORing P_1 : P_0 and T_0 : - * T_1 into dest. This allows us to reuse P_1 + T_0 when computing V. - */ -.macro montgomery_reduction dest - DEST .req \dest - // TMP_V = T_1 : T_0 = P_0 * g*(x) - pmull TMP_V.1q, PL.1d, GSTAR.1d - // TMP_V = T_0 : T_1 - ext TMP_V.16b, TMP_V.16b, TMP_V.16b, #8 - // TMP_V = P_1 + T_0 : P_0 + T_1 - eor TMP_V.16b, PL.16b, TMP_V.16b - // PH = P_3 + P_1 + T_0 : P_2 + P_0 + T_1 - eor PH.16b, PH.16b, TMP_V.16b - // TMP_V = V_1 : V_0 = (P_1 + T_0) * g*(x) - pmull2 TMP_V.1q, TMP_V.2d, GSTAR.2d - eor DEST.16b, PH.16b, TMP_V.16b - .unreq DEST -.endm - -/* - * Compute Polyval on 8 blocks. - * - * If reduce is set, also computes the montgomery reduction of the - * previous full_stride call and XORs with the first message block. - * (m_0 + REDUCE(PL, PH))h^8 + ... + m_7h^1. - * I.e., the first multiplication uses m_0 + REDUCE(PL, PH) instead of m_0. - * - * Sets PL, PH. - */ -.macro full_stride reduce - eor LO.16b, LO.16b, LO.16b - eor MI.16b, MI.16b, MI.16b - eor HI.16b, HI.16b, HI.16b - - ld1 {M0.16b, M1.16b, M2.16b, M3.16b}, [MSG], #64 - ld1 {M4.16b, M5.16b, M6.16b, M7.16b}, [MSG], #64 - - karatsuba1 M7 KEY1 - .if \reduce - pmull TMP_V.1q, PL.1d, GSTAR.1d - .endif - - karatsuba1 M6 KEY2 - .if \reduce - ext TMP_V.16b, TMP_V.16b, TMP_V.16b, #8 - .endif - - karatsuba1 M5 KEY3 - .if \reduce - eor TMP_V.16b, PL.16b, TMP_V.16b - .endif - - karatsuba1 M4 KEY4 - .if \reduce - eor PH.16b, PH.16b, TMP_V.16b - .endif - - karatsuba1 M3 KEY5 - .if \reduce - pmull2 TMP_V.1q, TMP_V.2d, GSTAR.2d - .endif - - karatsuba1 M2 KEY6 - .if \reduce - eor SUM.16b, PH.16b, TMP_V.16b - .endif - - karatsuba1 M1 KEY7 - eor M0.16b, M0.16b, SUM.16b - - karatsuba1 M0 KEY8 - karatsuba2 -.endm - -/* - * Handle any extra blocks after full_stride loop. - */ -.macro partial_stride - add KEY_POWERS, KEY_START, #(STRIDE_BLOCKS << 4) - sub KEY_POWERS, KEY_POWERS, BLOCKS_LEFT, lsl #4 - ld1 {KEY1.16b}, [KEY_POWERS], #16 - - ld1 {TMP_V.16b}, [MSG], #16 - eor SUM.16b, SUM.16b, TMP_V.16b - karatsuba1_store KEY1 SUM - sub BLOCKS_LEFT, BLOCKS_LEFT, #1 - - tst BLOCKS_LEFT, #4 - beq .Lpartial4BlocksDone - ld1 {M0.16b, M1.16b, M2.16b, M3.16b}, [MSG], #64 - ld1 {KEY8.16b, KEY7.16b, KEY6.16b, KEY5.16b}, [KEY_POWERS], #64 - karatsuba1 M0 KEY8 - karatsuba1 M1 KEY7 - karatsuba1 M2 KEY6 - karatsuba1 M3 KEY5 -.Lpartial4BlocksDone: - tst BLOCKS_LEFT, #2 - beq .Lpartial2BlocksDone - ld1 {M0.16b, M1.16b}, [MSG], #32 - ld1 {KEY8.16b, KEY7.16b}, [KEY_POWERS], #32 - karatsuba1 M0 KEY8 - karatsuba1 M1 KEY7 -.Lpartial2BlocksDone: - tst BLOCKS_LEFT, #1 - beq .LpartialDone - ld1 {M0.16b}, [MSG], #16 - ld1 {KEY8.16b}, [KEY_POWERS], #16 - karatsuba1 M0 KEY8 -.LpartialDone: - karatsuba2 - montgomery_reduction SUM -.endm - -/* - * Perform montgomery multiplication in GF(2^128) and store result in op1. - * - * Computes op1*op2*x^{-128} mod x^128 + x^127 + x^126 + x^121 + 1 - * If op1, op2 are in montgomery form, this computes the montgomery - * form of op1*op2. - * - * void pmull_polyval_mul(u8 *op1, const u8 *op2); - */ -SYM_FUNC_START(pmull_polyval_mul) - adr TMP, .Lgstar - ld1 {GSTAR.2d}, [TMP] - ld1 {v0.16b}, [x0] - ld1 {v1.16b}, [x1] - karatsuba1_store v0 v1 - karatsuba2 - montgomery_reduction SUM - st1 {SUM.16b}, [x0] - ret -SYM_FUNC_END(pmull_polyval_mul) - -/* - * Perform polynomial evaluation as specified by POLYVAL. This computes: - * h^n * accumulator + h^n * m_0 + ... + h^1 * m_{n-1} - * where n=nblocks, h is the hash key, and m_i are the message blocks. - * - * x0 - pointer to precomputed key powers h^8 ... h^1 - * x1 - pointer to message blocks - * x2 - number of blocks to hash - * x3 - pointer to accumulator - * - * void pmull_polyval_update(const struct polyval_ctx *ctx, const u8 *in, - * size_t nblocks, u8 *accumulator); - */ -SYM_FUNC_START(pmull_polyval_update) - adr TMP, .Lgstar - mov KEY_START, KEY_POWERS - ld1 {GSTAR.2d}, [TMP] - ld1 {SUM.16b}, [ACCUMULATOR] - subs BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS - blt .LstrideLoopExit - ld1 {KEY8.16b, KEY7.16b, KEY6.16b, KEY5.16b}, [KEY_POWERS], #64 - ld1 {KEY4.16b, KEY3.16b, KEY2.16b, KEY1.16b}, [KEY_POWERS], #64 - full_stride 0 - subs BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS - blt .LstrideLoopExitReduce -.LstrideLoop: - full_stride 1 - subs BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS - bge .LstrideLoop -.LstrideLoopExitReduce: - montgomery_reduction SUM -.LstrideLoopExit: - adds BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS - beq .LskipPartial - partial_stride -.LskipPartial: - st1 {SUM.16b}, [ACCUMULATOR] - ret -SYM_FUNC_END(pmull_polyval_update) diff --git a/arch/arm64/crypto/polyval-ce-glue.c b/arch/arm64/crypto/polyval-ce-glue.c deleted file mode 100644 index c4e653688ea0..000000000000 --- a/arch/arm64/crypto/polyval-ce-glue.c +++ /dev/null @@ -1,158 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Glue code for POLYVAL using ARMv8 Crypto Extensions - * - * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen - * Copyright (c) 2009 Intel Corp. - * Author: Huang Ying - * Copyright 2021 Google LLC - */ - -/* - * Glue code based on ghash-clmulni-intel_glue.c. - * - * This implementation of POLYVAL uses montgomery multiplication accelerated by - * ARMv8 Crypto Extensions instructions to implement the finite field operations. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define NUM_KEY_POWERS 8 - -struct polyval_tfm_ctx { - /* - * These powers must be in the order h^8, ..., h^1. - */ - u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE]; -}; - -struct polyval_desc_ctx { - u8 buffer[POLYVAL_BLOCK_SIZE]; -}; - -asmlinkage void pmull_polyval_update(const struct polyval_tfm_ctx *keys, - const u8 *in, size_t nblocks, u8 *accumulator); -asmlinkage void pmull_polyval_mul(u8 *op1, const u8 *op2); - -static void internal_polyval_update(const struct polyval_tfm_ctx *keys, - const u8 *in, size_t nblocks, u8 *accumulator) -{ - kernel_neon_begin(); - pmull_polyval_update(keys, in, nblocks, accumulator); - kernel_neon_end(); -} - -static void internal_polyval_mul(u8 *op1, const u8 *op2) -{ - kernel_neon_begin(); - pmull_polyval_mul(op1, op2); - kernel_neon_end(); -} - -static int polyval_arm64_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen) -{ - struct polyval_tfm_ctx *tctx = crypto_shash_ctx(tfm); - int i; - - if (keylen != POLYVAL_BLOCK_SIZE) - return -EINVAL; - - memcpy(tctx->key_powers[NUM_KEY_POWERS-1], key, POLYVAL_BLOCK_SIZE); - - for (i = NUM_KEY_POWERS-2; i >= 0; i--) { - memcpy(tctx->key_powers[i], key, POLYVAL_BLOCK_SIZE); - internal_polyval_mul(tctx->key_powers[i], - tctx->key_powers[i+1]); - } - - return 0; -} - -static int polyval_arm64_init(struct shash_desc *desc) -{ - struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); - - memset(dctx, 0, sizeof(*dctx)); - - return 0; -} - -static int polyval_arm64_update(struct shash_desc *desc, - const u8 *src, unsigned int srclen) -{ - struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); - const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - unsigned int nblocks; - - do { - /* allow rescheduling every 4K bytes */ - nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE; - internal_polyval_update(tctx, src, nblocks, dctx->buffer); - srclen -= nblocks * POLYVAL_BLOCK_SIZE; - src += nblocks * POLYVAL_BLOCK_SIZE; - } while (srclen >= POLYVAL_BLOCK_SIZE); - - return srclen; -} - -static int polyval_arm64_finup(struct shash_desc *desc, const u8 *src, - unsigned int len, u8 *dst) -{ - struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); - const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - - if (len) { - crypto_xor(dctx->buffer, src, len); - internal_polyval_mul(dctx->buffer, - tctx->key_powers[NUM_KEY_POWERS-1]); - } - - memcpy(dst, dctx->buffer, POLYVAL_BLOCK_SIZE); - - return 0; -} - -static struct shash_alg polyval_alg = { - .digestsize = POLYVAL_DIGEST_SIZE, - .init = polyval_arm64_init, - .update = polyval_arm64_update, - .finup = polyval_arm64_finup, - .setkey = polyval_arm64_setkey, - .descsize = sizeof(struct polyval_desc_ctx), - .base = { - .cra_name = "polyval", - .cra_driver_name = "polyval-ce", - .cra_priority = 200, - .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, - .cra_blocksize = POLYVAL_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct polyval_tfm_ctx), - .cra_module = THIS_MODULE, - }, -}; - -static int __init polyval_ce_mod_init(void) -{ - return crypto_register_shash(&polyval_alg); -} - -static void __exit polyval_ce_mod_exit(void) -{ - crypto_unregister_shash(&polyval_alg); -} - -module_cpu_feature_match(PMULL, polyval_ce_mod_init) -module_exit(polyval_ce_mod_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("POLYVAL hash function accelerated by ARMv8 Crypto Extensions"); -MODULE_ALIAS_CRYPTO("polyval"); -MODULE_ALIAS_CRYPTO("polyval-ce"); diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h index 5ba4c248cad1..f8aaf4275fbd 100644 --- a/include/crypto/polyval.h +++ b/include/crypto/polyval.h @@ -39,10 +39,18 @@ struct polyval_elem { * This may contain just the raw key H, or it may contain precomputed key * powers, depending on the platform's POLYVAL implementation. Use * polyval_preparekey() to initialize this. + * + * By H^i we mean H^(i-1) * H * x^-128, with base case H^1 = H. I.e. the + * exponentiation repeats the POLYVAL dot operation, with its "extra" x^-128. */ struct polyval_key { #ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH +#ifdef CONFIG_ARM64 + /** @h_powers: Powers of the hash key H^8 through H^1 */ + struct polyval_elem h_powers[8]; +#else #error "Unhandled arch" +#endif #else /* CONFIG_CRYPTO_LIB_POLYVAL_ARCH */ /** @h: The hash key H */ struct polyval_elem h; diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 6545f0e83b83..430723994142 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -144,6 +144,7 @@ config CRYPTO_LIB_POLYVAL config CRYPTO_LIB_POLYVAL_ARCH bool depends on CRYPTO_LIB_POLYVAL && !UML + default y if ARM64 && KERNEL_MODE_NEON config CRYPTO_LIB_CHACHA20POLY1305 tristate diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 055e44008805..2efa96afcb4b 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -202,6 +202,7 @@ obj-$(CONFIG_CRYPTO_LIB_POLYVAL) += libpolyval.o libpolyval-y := polyval.o ifeq ($(CONFIG_CRYPTO_LIB_POLYVAL_ARCH),y) CFLAGS_polyval.o += -I$(src)/$(SRCARCH) +libpolyval-$(CONFIG_ARM64) += arm64/polyval-ce-core.o endif ################################################################################ diff --git a/lib/crypto/arm64/polyval-ce-core.S b/lib/crypto/arm64/polyval-ce-core.S new file mode 100644 index 000000000000..7c731a044d02 --- /dev/null +++ b/lib/crypto/arm64/polyval-ce-core.S @@ -0,0 +1,359 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Implementation of POLYVAL using ARMv8 Crypto Extensions. + * + * Copyright 2021 Google LLC + */ +/* + * This is an efficient implementation of POLYVAL using ARMv8 Crypto Extensions + * It works on 8 blocks at a time, by precomputing the first 8 keys powers h^8, + * ..., h^1 in the POLYVAL finite field. This precomputation allows us to split + * finite field multiplication into two steps. + * + * In the first step, we consider h^i, m_i as normal polynomials of degree less + * than 128. We then compute p(x) = h^8m_0 + ... + h^1m_7 where multiplication + * is simply polynomial multiplication. + * + * In the second step, we compute the reduction of p(x) modulo the finite field + * modulus g(x) = x^128 + x^127 + x^126 + x^121 + 1. + * + * This two step process is equivalent to computing h^8m_0 + ... + h^1m_7 where + * multiplication is finite field multiplication. The advantage is that the + * two-step process only requires 1 finite field reduction for every 8 + * polynomial multiplications. Further parallelism is gained by interleaving the + * multiplications and polynomial reductions. + */ + +#include +#define STRIDE_BLOCKS 8 + +ACCUMULATOR .req x0 +KEY_POWERS .req x1 +MSG .req x2 +BLOCKS_LEFT .req x3 +KEY_START .req x10 +EXTRA_BYTES .req x11 +TMP .req x13 + +M0 .req v0 +M1 .req v1 +M2 .req v2 +M3 .req v3 +M4 .req v4 +M5 .req v5 +M6 .req v6 +M7 .req v7 +KEY8 .req v8 +KEY7 .req v9 +KEY6 .req v10 +KEY5 .req v11 +KEY4 .req v12 +KEY3 .req v13 +KEY2 .req v14 +KEY1 .req v15 +PL .req v16 +PH .req v17 +TMP_V .req v18 +LO .req v20 +MI .req v21 +HI .req v22 +SUM .req v23 +GSTAR .req v24 + + .text + + .arch armv8-a+crypto + .align 4 + +.Lgstar: + .quad 0xc200000000000000, 0xc200000000000000 + +/* + * Computes the product of two 128-bit polynomials in X and Y and XORs the + * components of the 256-bit product into LO, MI, HI. + * + * Given: + * X = [X_1 : X_0] + * Y = [Y_1 : Y_0] + * + * We compute: + * LO += X_0 * Y_0 + * MI += (X_0 + X_1) * (Y_0 + Y_1) + * HI += X_1 * Y_1 + * + * Later, the 256-bit result can be extracted as: + * [HI_1 : HI_0 + HI_1 + MI_1 + LO_1 : LO_1 + HI_0 + MI_0 + LO_0 : LO_0] + * This step is done when computing the polynomial reduction for efficiency + * reasons. + * + * Karatsuba multiplication is used instead of Schoolbook multiplication because + * it was found to be slightly faster on ARM64 CPUs. + * + */ +.macro karatsuba1 X Y + X .req \X + Y .req \Y + ext v25.16b, X.16b, X.16b, #8 + ext v26.16b, Y.16b, Y.16b, #8 + eor v25.16b, v25.16b, X.16b + eor v26.16b, v26.16b, Y.16b + pmull2 v28.1q, X.2d, Y.2d + pmull v29.1q, X.1d, Y.1d + pmull v27.1q, v25.1d, v26.1d + eor HI.16b, HI.16b, v28.16b + eor LO.16b, LO.16b, v29.16b + eor MI.16b, MI.16b, v27.16b + .unreq X + .unreq Y +.endm + +/* + * Same as karatsuba1, except overwrites HI, LO, MI rather than XORing into + * them. + */ +.macro karatsuba1_store X Y + X .req \X + Y .req \Y + ext v25.16b, X.16b, X.16b, #8 + ext v26.16b, Y.16b, Y.16b, #8 + eor v25.16b, v25.16b, X.16b + eor v26.16b, v26.16b, Y.16b + pmull2 HI.1q, X.2d, Y.2d + pmull LO.1q, X.1d, Y.1d + pmull MI.1q, v25.1d, v26.1d + .unreq X + .unreq Y +.endm + +/* + * Computes the 256-bit polynomial represented by LO, HI, MI. Stores + * the result in PL, PH. + * [PH : PL] = + * [HI_1 : HI_1 + HI_0 + MI_1 + LO_1 : HI_0 + MI_0 + LO_1 + LO_0 : LO_0] + */ +.macro karatsuba2 + // v4 = [HI_1 + MI_1 : HI_0 + MI_0] + eor v4.16b, HI.16b, MI.16b + // v4 = [HI_1 + MI_1 + LO_1 : HI_0 + MI_0 + LO_0] + eor v4.16b, v4.16b, LO.16b + // v5 = [HI_0 : LO_1] + ext v5.16b, LO.16b, HI.16b, #8 + // v4 = [HI_1 + HI_0 + MI_1 + LO_1 : HI_0 + MI_0 + LO_1 + LO_0] + eor v4.16b, v4.16b, v5.16b + // HI = [HI_0 : HI_1] + ext HI.16b, HI.16b, HI.16b, #8 + // LO = [LO_0 : LO_1] + ext LO.16b, LO.16b, LO.16b, #8 + // PH = [HI_1 : HI_1 + HI_0 + MI_1 + LO_1] + ext PH.16b, v4.16b, HI.16b, #8 + // PL = [HI_0 + MI_0 + LO_1 + LO_0 : LO_0] + ext PL.16b, LO.16b, v4.16b, #8 +.endm + +/* + * Computes the 128-bit reduction of PH : PL. Stores the result in dest. + * + * This macro computes p(x) mod g(x) where p(x) is in montgomery form and g(x) = + * x^128 + x^127 + x^126 + x^121 + 1. + * + * We have a 256-bit polynomial PH : PL = P_3 : P_2 : P_1 : P_0 that is the + * product of two 128-bit polynomials in Montgomery form. We need to reduce it + * mod g(x). Also, since polynomials in Montgomery form have an "extra" factor + * of x^128, this product has two extra factors of x^128. To get it back into + * Montgomery form, we need to remove one of these factors by dividing by x^128. + * + * To accomplish both of these goals, we add multiples of g(x) that cancel out + * the low 128 bits P_1 : P_0, leaving just the high 128 bits. Since the low + * bits are zero, the polynomial division by x^128 can be done by right + * shifting. + * + * Since the only nonzero term in the low 64 bits of g(x) is the constant term, + * the multiple of g(x) needed to cancel out P_0 is P_0 * g(x). The CPU can + * only do 64x64 bit multiplications, so split P_0 * g(x) into x^128 * P_0 + + * x^64 * g*(x) * P_0 + P_0, where g*(x) is bits 64-127 of g(x). Adding this to + * the original polynomial gives P_3 : P_2 + P_0 + T_1 : P_1 + T_0 : 0, where T + * = T_1 : T_0 = g*(x) * P_0. Thus, bits 0-63 got "folded" into bits 64-191. + * + * Repeating this same process on the next 64 bits "folds" bits 64-127 into bits + * 128-255, giving the answer in bits 128-255. This time, we need to cancel P_1 + * + T_0 in bits 64-127. The multiple of g(x) required is (P_1 + T_0) * g(x) * + * x^64. Adding this to our previous computation gives P_3 + P_1 + T_0 + V_1 : + * P_2 + P_0 + T_1 + V_0 : 0 : 0, where V = V_1 : V_0 = g*(x) * (P_1 + T_0). + * + * So our final computation is: + * T = T_1 : T_0 = g*(x) * P_0 + * V = V_1 : V_0 = g*(x) * (P_1 + T_0) + * p(x) / x^{128} mod g(x) = P_3 + P_1 + T_0 + V_1 : P_2 + P_0 + T_1 + V_0 + * + * The implementation below saves a XOR instruction by computing P_1 + T_0 : P_0 + * + T_1 and XORing into dest, rather than separately XORing P_1 : P_0 and T_0 : + * T_1 into dest. This allows us to reuse P_1 + T_0 when computing V. + */ +.macro montgomery_reduction dest + DEST .req \dest + // TMP_V = T_1 : T_0 = P_0 * g*(x) + pmull TMP_V.1q, PL.1d, GSTAR.1d + // TMP_V = T_0 : T_1 + ext TMP_V.16b, TMP_V.16b, TMP_V.16b, #8 + // TMP_V = P_1 + T_0 : P_0 + T_1 + eor TMP_V.16b, PL.16b, TMP_V.16b + // PH = P_3 + P_1 + T_0 : P_2 + P_0 + T_1 + eor PH.16b, PH.16b, TMP_V.16b + // TMP_V = V_1 : V_0 = (P_1 + T_0) * g*(x) + pmull2 TMP_V.1q, TMP_V.2d, GSTAR.2d + eor DEST.16b, PH.16b, TMP_V.16b + .unreq DEST +.endm + +/* + * Compute Polyval on 8 blocks. + * + * If reduce is set, also computes the montgomery reduction of the + * previous full_stride call and XORs with the first message block. + * (m_0 + REDUCE(PL, PH))h^8 + ... + m_7h^1. + * I.e., the first multiplication uses m_0 + REDUCE(PL, PH) instead of m_0. + * + * Sets PL, PH. + */ +.macro full_stride reduce + eor LO.16b, LO.16b, LO.16b + eor MI.16b, MI.16b, MI.16b + eor HI.16b, HI.16b, HI.16b + + ld1 {M0.16b, M1.16b, M2.16b, M3.16b}, [MSG], #64 + ld1 {M4.16b, M5.16b, M6.16b, M7.16b}, [MSG], #64 + + karatsuba1 M7 KEY1 + .if \reduce + pmull TMP_V.1q, PL.1d, GSTAR.1d + .endif + + karatsuba1 M6 KEY2 + .if \reduce + ext TMP_V.16b, TMP_V.16b, TMP_V.16b, #8 + .endif + + karatsuba1 M5 KEY3 + .if \reduce + eor TMP_V.16b, PL.16b, TMP_V.16b + .endif + + karatsuba1 M4 KEY4 + .if \reduce + eor PH.16b, PH.16b, TMP_V.16b + .endif + + karatsuba1 M3 KEY5 + .if \reduce + pmull2 TMP_V.1q, TMP_V.2d, GSTAR.2d + .endif + + karatsuba1 M2 KEY6 + .if \reduce + eor SUM.16b, PH.16b, TMP_V.16b + .endif + + karatsuba1 M1 KEY7 + eor M0.16b, M0.16b, SUM.16b + + karatsuba1 M0 KEY8 + karatsuba2 +.endm + +/* + * Handle any extra blocks after full_stride loop. + */ +.macro partial_stride + add KEY_POWERS, KEY_START, #(STRIDE_BLOCKS << 4) + sub KEY_POWERS, KEY_POWERS, BLOCKS_LEFT, lsl #4 + ld1 {KEY1.16b}, [KEY_POWERS], #16 + + ld1 {TMP_V.16b}, [MSG], #16 + eor SUM.16b, SUM.16b, TMP_V.16b + karatsuba1_store KEY1 SUM + sub BLOCKS_LEFT, BLOCKS_LEFT, #1 + + tst BLOCKS_LEFT, #4 + beq .Lpartial4BlocksDone + ld1 {M0.16b, M1.16b, M2.16b, M3.16b}, [MSG], #64 + ld1 {KEY8.16b, KEY7.16b, KEY6.16b, KEY5.16b}, [KEY_POWERS], #64 + karatsuba1 M0 KEY8 + karatsuba1 M1 KEY7 + karatsuba1 M2 KEY6 + karatsuba1 M3 KEY5 +.Lpartial4BlocksDone: + tst BLOCKS_LEFT, #2 + beq .Lpartial2BlocksDone + ld1 {M0.16b, M1.16b}, [MSG], #32 + ld1 {KEY8.16b, KEY7.16b}, [KEY_POWERS], #32 + karatsuba1 M0 KEY8 + karatsuba1 M1 KEY7 +.Lpartial2BlocksDone: + tst BLOCKS_LEFT, #1 + beq .LpartialDone + ld1 {M0.16b}, [MSG], #16 + ld1 {KEY8.16b}, [KEY_POWERS], #16 + karatsuba1 M0 KEY8 +.LpartialDone: + karatsuba2 + montgomery_reduction SUM +.endm + +/* + * Computes a = a * b * x^{-128} mod x^128 + x^127 + x^126 + x^121 + 1. + * + * void polyval_mul_pmull(struct polyval_elem *a, + * const struct polyval_elem *b); + */ +SYM_FUNC_START(polyval_mul_pmull) + adr TMP, .Lgstar + ld1 {GSTAR.2d}, [TMP] + ld1 {v0.16b}, [x0] + ld1 {v1.16b}, [x1] + karatsuba1_store v0 v1 + karatsuba2 + montgomery_reduction SUM + st1 {SUM.16b}, [x0] + ret +SYM_FUNC_END(polyval_mul_pmull) + +/* + * Perform polynomial evaluation as specified by POLYVAL. This computes: + * h^n * accumulator + h^n * m_0 + ... + h^1 * m_{n-1} + * where n=nblocks, h is the hash key, and m_i are the message blocks. + * + * x0 - pointer to accumulator + * x1 - pointer to precomputed key powers h^8 ... h^1 + * x2 - pointer to message blocks + * x3 - number of blocks to hash + * + * void polyval_blocks_pmull(struct polyval_elem *acc, + * const struct polyval_key *key, + * const u8 *data, size_t nblocks); + */ +SYM_FUNC_START(polyval_blocks_pmull) + adr TMP, .Lgstar + mov KEY_START, KEY_POWERS + ld1 {GSTAR.2d}, [TMP] + ld1 {SUM.16b}, [ACCUMULATOR] + subs BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS + blt .LstrideLoopExit + ld1 {KEY8.16b, KEY7.16b, KEY6.16b, KEY5.16b}, [KEY_POWERS], #64 + ld1 {KEY4.16b, KEY3.16b, KEY2.16b, KEY1.16b}, [KEY_POWERS], #64 + full_stride 0 + subs BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS + blt .LstrideLoopExitReduce +.LstrideLoop: + full_stride 1 + subs BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS + bge .LstrideLoop +.LstrideLoopExitReduce: + montgomery_reduction SUM +.LstrideLoopExit: + adds BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS + beq .LskipPartial + partial_stride +.LskipPartial: + st1 {SUM.16b}, [ACCUMULATOR] + ret +SYM_FUNC_END(polyval_blocks_pmull) diff --git a/lib/crypto/arm64/polyval.h b/lib/crypto/arm64/polyval.h new file mode 100644 index 000000000000..2486e80750d0 --- /dev/null +++ b/lib/crypto/arm64/polyval.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * POLYVAL library functions, arm64 optimized + * + * Copyright 2025 Google LLC + */ +#include +#include +#include + +#define NUM_H_POWERS 8 + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pmull); + +asmlinkage void polyval_mul_pmull(struct polyval_elem *a, + const struct polyval_elem *b); +asmlinkage void polyval_blocks_pmull(struct polyval_elem *acc, + const struct polyval_key *key, + const u8 *data, size_t nblocks); + +static void polyval_preparekey_arch(struct polyval_key *key, + const u8 raw_key[POLYVAL_BLOCK_SIZE]) +{ + static_assert(ARRAY_SIZE(key->h_powers) == NUM_H_POWERS); + memcpy(&key->h_powers[NUM_H_POWERS - 1], raw_key, POLYVAL_BLOCK_SIZE); + if (static_branch_likely(&have_pmull) && may_use_simd()) { + kernel_neon_begin(); + for (int i = NUM_H_POWERS - 2; i >= 0; i--) { + key->h_powers[i] = key->h_powers[i + 1]; + polyval_mul_pmull(&key->h_powers[i], + &key->h_powers[NUM_H_POWERS - 1]); + } + kernel_neon_end(); + } else { + for (int i = NUM_H_POWERS - 2; i >= 0; i--) { + key->h_powers[i] = key->h_powers[i + 1]; + polyval_mul_generic(&key->h_powers[i], + &key->h_powers[NUM_H_POWERS - 1]); + } + } +} + +static void polyval_mul_arch(struct polyval_elem *acc, + const struct polyval_key *key) +{ + if (static_branch_likely(&have_pmull) && may_use_simd()) { + kernel_neon_begin(); + polyval_mul_pmull(acc, &key->h_powers[NUM_H_POWERS - 1]); + kernel_neon_end(); + } else { + polyval_mul_generic(acc, &key->h_powers[NUM_H_POWERS - 1]); + } +} + +static void polyval_blocks_arch(struct polyval_elem *acc, + const struct polyval_key *key, + const u8 *data, size_t nblocks) +{ + if (static_branch_likely(&have_pmull) && may_use_simd()) { + do { + /* Allow rescheduling every 4 KiB. */ + size_t n = min_t(size_t, nblocks, + 4096 / POLYVAL_BLOCK_SIZE); + + kernel_neon_begin(); + polyval_blocks_pmull(acc, key, data, n); + kernel_neon_end(); + data += n * POLYVAL_BLOCK_SIZE; + nblocks -= n; + } while (nblocks); + } else { + polyval_blocks_generic(acc, &key->h_powers[NUM_H_POWERS - 1], + data, nblocks); + } +} + +#define polyval_mod_init_arch polyval_mod_init_arch +static void polyval_mod_init_arch(void) +{ + if (cpu_have_named_feature(PMULL)) + static_branch_enable(&have_pmull); +} -- cgit v1.2.3 From 4d8da35579daad0392d238460ed7e9629d49ca35 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 9 Nov 2025 15:47:20 -0800 Subject: lib/crypto: x86/polyval: Migrate optimized code into library Migrate the x86_64 implementation of POLYVAL into lib/crypto/, wiring it up to the POLYVAL library interface. This makes the POLYVAL library be properly optimized on x86_64. This drops the x86_64 optimizations of polyval in the crypto_shash API. That's fine, since polyval will be removed from crypto_shash entirely since it is unneeded there. But even if it comes back, the crypto_shash API could just be implemented on top of the library API, as usual. Adjust the names and prototypes of the assembly functions to align more closely with the rest of the library code. Also replace a movaps instruction with movups to remove the assumption that the key struct is 16-byte aligned. Users can still align the key if they want (and at least in this case, movups is just as fast as movaps), but it's inconvenient to require it. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20251109234726.638437-6-ebiggers@kernel.org Signed-off-by: Eric Biggers --- arch/x86/crypto/Kconfig | 10 - arch/x86/crypto/Makefile | 3 - arch/x86/crypto/polyval-clmulni_asm.S | 321 --------------------------------- arch/x86/crypto/polyval-clmulni_glue.c | 180 ------------------ include/crypto/polyval.h | 3 + lib/crypto/Kconfig | 1 + lib/crypto/Makefile | 1 + lib/crypto/x86/polyval-pclmul-avx.S | 319 ++++++++++++++++++++++++++++++++ lib/crypto/x86/polyval.h | 83 +++++++++ 9 files changed, 407 insertions(+), 514 deletions(-) delete mode 100644 arch/x86/crypto/polyval-clmulni_asm.S delete mode 100644 arch/x86/crypto/polyval-clmulni_glue.c create mode 100644 lib/crypto/x86/polyval-pclmul-avx.S create mode 100644 lib/crypto/x86/polyval.h (limited to 'lib/crypto') diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index 48d3076b6053..3fd2423d3cf8 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -353,16 +353,6 @@ config CRYPTO_NHPOLY1305_AVX2 Architecture: x86_64 using: - AVX2 (Advanced Vector Extensions 2) -config CRYPTO_POLYVAL_CLMUL_NI - tristate "Hash functions: POLYVAL (CLMUL-NI)" - depends on 64BIT - select CRYPTO_POLYVAL - help - POLYVAL hash function for HCTR2 - - Architecture: x86_64 using: - - CLMUL-NI (carry-less multiplication new instructions) - config CRYPTO_SM3_AVX_X86_64 tristate "Hash functions: SM3 (AVX)" depends on 64BIT diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 2d30d5d36145..4a24dd38da50 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -52,9 +52,6 @@ aesni-intel-$(CONFIG_64BIT) += aes-ctr-avx-x86_64.o \ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o -obj-$(CONFIG_CRYPTO_POLYVAL_CLMUL_NI) += polyval-clmulni.o -polyval-clmulni-y := polyval-clmulni_asm.o polyval-clmulni_glue.o - obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o diff --git a/arch/x86/crypto/polyval-clmulni_asm.S b/arch/x86/crypto/polyval-clmulni_asm.S deleted file mode 100644 index a6ebe4e7dd2b..000000000000 --- a/arch/x86/crypto/polyval-clmulni_asm.S +++ /dev/null @@ -1,321 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright 2021 Google LLC - */ -/* - * This is an efficient implementation of POLYVAL using intel PCLMULQDQ-NI - * instructions. It works on 8 blocks at a time, by precomputing the first 8 - * keys powers h^8, ..., h^1 in the POLYVAL finite field. This precomputation - * allows us to split finite field multiplication into two steps. - * - * In the first step, we consider h^i, m_i as normal polynomials of degree less - * than 128. We then compute p(x) = h^8m_0 + ... + h^1m_7 where multiplication - * is simply polynomial multiplication. - * - * In the second step, we compute the reduction of p(x) modulo the finite field - * modulus g(x) = x^128 + x^127 + x^126 + x^121 + 1. - * - * This two step process is equivalent to computing h^8m_0 + ... + h^1m_7 where - * multiplication is finite field multiplication. The advantage is that the - * two-step process only requires 1 finite field reduction for every 8 - * polynomial multiplications. Further parallelism is gained by interleaving the - * multiplications and polynomial reductions. - */ - -#include -#include - -#define STRIDE_BLOCKS 8 - -#define GSTAR %xmm7 -#define PL %xmm8 -#define PH %xmm9 -#define TMP_XMM %xmm11 -#define LO %xmm12 -#define HI %xmm13 -#define MI %xmm14 -#define SUM %xmm15 - -#define KEY_POWERS %rdi -#define MSG %rsi -#define BLOCKS_LEFT %rdx -#define ACCUMULATOR %rcx -#define TMP %rax - -.section .rodata.cst16.gstar, "aM", @progbits, 16 -.align 16 - -.Lgstar: - .quad 0xc200000000000000, 0xc200000000000000 - -.text - -/* - * Performs schoolbook1_iteration on two lists of 128-bit polynomials of length - * count pointed to by MSG and KEY_POWERS. - */ -.macro schoolbook1 count - .set i, 0 - .rept (\count) - schoolbook1_iteration i 0 - .set i, (i +1) - .endr -.endm - -/* - * Computes the product of two 128-bit polynomials at the memory locations - * specified by (MSG + 16*i) and (KEY_POWERS + 16*i) and XORs the components of - * the 256-bit product into LO, MI, HI. - * - * Given: - * X = [X_1 : X_0] - * Y = [Y_1 : Y_0] - * - * We compute: - * LO += X_0 * Y_0 - * MI += X_0 * Y_1 + X_1 * Y_0 - * HI += X_1 * Y_1 - * - * Later, the 256-bit result can be extracted as: - * [HI_1 : HI_0 + MI_1 : LO_1 + MI_0 : LO_0] - * This step is done when computing the polynomial reduction for efficiency - * reasons. - * - * If xor_sum == 1, then also XOR the value of SUM into m_0. This avoids an - * extra multiplication of SUM and h^8. - */ -.macro schoolbook1_iteration i xor_sum - movups (16*\i)(MSG), %xmm0 - .if (\i == 0 && \xor_sum == 1) - pxor SUM, %xmm0 - .endif - vpclmulqdq $0x01, (16*\i)(KEY_POWERS), %xmm0, %xmm2 - vpclmulqdq $0x00, (16*\i)(KEY_POWERS), %xmm0, %xmm1 - vpclmulqdq $0x10, (16*\i)(KEY_POWERS), %xmm0, %xmm3 - vpclmulqdq $0x11, (16*\i)(KEY_POWERS), %xmm0, %xmm4 - vpxor %xmm2, MI, MI - vpxor %xmm1, LO, LO - vpxor %xmm4, HI, HI - vpxor %xmm3, MI, MI -.endm - -/* - * Performs the same computation as schoolbook1_iteration, except we expect the - * arguments to already be loaded into xmm0 and xmm1 and we set the result - * registers LO, MI, and HI directly rather than XOR'ing into them. - */ -.macro schoolbook1_noload - vpclmulqdq $0x01, %xmm0, %xmm1, MI - vpclmulqdq $0x10, %xmm0, %xmm1, %xmm2 - vpclmulqdq $0x00, %xmm0, %xmm1, LO - vpclmulqdq $0x11, %xmm0, %xmm1, HI - vpxor %xmm2, MI, MI -.endm - -/* - * Computes the 256-bit polynomial represented by LO, HI, MI. Stores - * the result in PL, PH. - * [PH : PL] = [HI_1 : HI_0 + MI_1 : LO_1 + MI_0 : LO_0] - */ -.macro schoolbook2 - vpslldq $8, MI, PL - vpsrldq $8, MI, PH - pxor LO, PL - pxor HI, PH -.endm - -/* - * Computes the 128-bit reduction of PH : PL. Stores the result in dest. - * - * This macro computes p(x) mod g(x) where p(x) is in montgomery form and g(x) = - * x^128 + x^127 + x^126 + x^121 + 1. - * - * We have a 256-bit polynomial PH : PL = P_3 : P_2 : P_1 : P_0 that is the - * product of two 128-bit polynomials in Montgomery form. We need to reduce it - * mod g(x). Also, since polynomials in Montgomery form have an "extra" factor - * of x^128, this product has two extra factors of x^128. To get it back into - * Montgomery form, we need to remove one of these factors by dividing by x^128. - * - * To accomplish both of these goals, we add multiples of g(x) that cancel out - * the low 128 bits P_1 : P_0, leaving just the high 128 bits. Since the low - * bits are zero, the polynomial division by x^128 can be done by right shifting. - * - * Since the only nonzero term in the low 64 bits of g(x) is the constant term, - * the multiple of g(x) needed to cancel out P_0 is P_0 * g(x). The CPU can - * only do 64x64 bit multiplications, so split P_0 * g(x) into x^128 * P_0 + - * x^64 * g*(x) * P_0 + P_0, where g*(x) is bits 64-127 of g(x). Adding this to - * the original polynomial gives P_3 : P_2 + P_0 + T_1 : P_1 + T_0 : 0, where T - * = T_1 : T_0 = g*(x) * P_0. Thus, bits 0-63 got "folded" into bits 64-191. - * - * Repeating this same process on the next 64 bits "folds" bits 64-127 into bits - * 128-255, giving the answer in bits 128-255. This time, we need to cancel P_1 - * + T_0 in bits 64-127. The multiple of g(x) required is (P_1 + T_0) * g(x) * - * x^64. Adding this to our previous computation gives P_3 + P_1 + T_0 + V_1 : - * P_2 + P_0 + T_1 + V_0 : 0 : 0, where V = V_1 : V_0 = g*(x) * (P_1 + T_0). - * - * So our final computation is: - * T = T_1 : T_0 = g*(x) * P_0 - * V = V_1 : V_0 = g*(x) * (P_1 + T_0) - * p(x) / x^{128} mod g(x) = P_3 + P_1 + T_0 + V_1 : P_2 + P_0 + T_1 + V_0 - * - * The implementation below saves a XOR instruction by computing P_1 + T_0 : P_0 - * + T_1 and XORing into dest, rather than separately XORing P_1 : P_0 and T_0 : - * T_1 into dest. This allows us to reuse P_1 + T_0 when computing V. - */ -.macro montgomery_reduction dest - vpclmulqdq $0x00, PL, GSTAR, TMP_XMM # TMP_XMM = T_1 : T_0 = P_0 * g*(x) - pshufd $0b01001110, TMP_XMM, TMP_XMM # TMP_XMM = T_0 : T_1 - pxor PL, TMP_XMM # TMP_XMM = P_1 + T_0 : P_0 + T_1 - pxor TMP_XMM, PH # PH = P_3 + P_1 + T_0 : P_2 + P_0 + T_1 - pclmulqdq $0x11, GSTAR, TMP_XMM # TMP_XMM = V_1 : V_0 = V = [(P_1 + T_0) * g*(x)] - vpxor TMP_XMM, PH, \dest -.endm - -/* - * Compute schoolbook multiplication for 8 blocks - * m_0h^8 + ... + m_7h^1 - * - * If reduce is set, also computes the montgomery reduction of the - * previous full_stride call and XORs with the first message block. - * (m_0 + REDUCE(PL, PH))h^8 + ... + m_7h^1. - * I.e., the first multiplication uses m_0 + REDUCE(PL, PH) instead of m_0. - */ -.macro full_stride reduce - pxor LO, LO - pxor HI, HI - pxor MI, MI - - schoolbook1_iteration 7 0 - .if \reduce - vpclmulqdq $0x00, PL, GSTAR, TMP_XMM - .endif - - schoolbook1_iteration 6 0 - .if \reduce - pshufd $0b01001110, TMP_XMM, TMP_XMM - .endif - - schoolbook1_iteration 5 0 - .if \reduce - pxor PL, TMP_XMM - .endif - - schoolbook1_iteration 4 0 - .if \reduce - pxor TMP_XMM, PH - .endif - - schoolbook1_iteration 3 0 - .if \reduce - pclmulqdq $0x11, GSTAR, TMP_XMM - .endif - - schoolbook1_iteration 2 0 - .if \reduce - vpxor TMP_XMM, PH, SUM - .endif - - schoolbook1_iteration 1 0 - - schoolbook1_iteration 0 1 - - addq $(8*16), MSG - schoolbook2 -.endm - -/* - * Process BLOCKS_LEFT blocks, where 0 < BLOCKS_LEFT < STRIDE_BLOCKS - */ -.macro partial_stride - mov BLOCKS_LEFT, TMP - shlq $4, TMP - addq $(16*STRIDE_BLOCKS), KEY_POWERS - subq TMP, KEY_POWERS - - movups (MSG), %xmm0 - pxor SUM, %xmm0 - movaps (KEY_POWERS), %xmm1 - schoolbook1_noload - dec BLOCKS_LEFT - addq $16, MSG - addq $16, KEY_POWERS - - test $4, BLOCKS_LEFT - jz .Lpartial4BlocksDone - schoolbook1 4 - addq $(4*16), MSG - addq $(4*16), KEY_POWERS -.Lpartial4BlocksDone: - test $2, BLOCKS_LEFT - jz .Lpartial2BlocksDone - schoolbook1 2 - addq $(2*16), MSG - addq $(2*16), KEY_POWERS -.Lpartial2BlocksDone: - test $1, BLOCKS_LEFT - jz .LpartialDone - schoolbook1 1 -.LpartialDone: - schoolbook2 - montgomery_reduction SUM -.endm - -/* - * Perform montgomery multiplication in GF(2^128) and store result in op1. - * - * Computes op1*op2*x^{-128} mod x^128 + x^127 + x^126 + x^121 + 1 - * If op1, op2 are in montgomery form, this computes the montgomery - * form of op1*op2. - * - * void clmul_polyval_mul(u8 *op1, const u8 *op2); - */ -SYM_FUNC_START(clmul_polyval_mul) - FRAME_BEGIN - vmovdqa .Lgstar(%rip), GSTAR - movups (%rdi), %xmm0 - movups (%rsi), %xmm1 - schoolbook1_noload - schoolbook2 - montgomery_reduction SUM - movups SUM, (%rdi) - FRAME_END - RET -SYM_FUNC_END(clmul_polyval_mul) - -/* - * Perform polynomial evaluation as specified by POLYVAL. This computes: - * h^n * accumulator + h^n * m_0 + ... + h^1 * m_{n-1} - * where n=nblocks, h is the hash key, and m_i are the message blocks. - * - * rdi - pointer to precomputed key powers h^8 ... h^1 - * rsi - pointer to message blocks - * rdx - number of blocks to hash - * rcx - pointer to the accumulator - * - * void clmul_polyval_update(const struct polyval_tfm_ctx *keys, - * const u8 *in, size_t nblocks, u8 *accumulator); - */ -SYM_FUNC_START(clmul_polyval_update) - FRAME_BEGIN - vmovdqa .Lgstar(%rip), GSTAR - movups (ACCUMULATOR), SUM - subq $STRIDE_BLOCKS, BLOCKS_LEFT - js .LstrideLoopExit - full_stride 0 - subq $STRIDE_BLOCKS, BLOCKS_LEFT - js .LstrideLoopExitReduce -.LstrideLoop: - full_stride 1 - subq $STRIDE_BLOCKS, BLOCKS_LEFT - jns .LstrideLoop -.LstrideLoopExitReduce: - montgomery_reduction SUM -.LstrideLoopExit: - add $STRIDE_BLOCKS, BLOCKS_LEFT - jz .LskipPartial - partial_stride -.LskipPartial: - movups SUM, (ACCUMULATOR) - FRAME_END - RET -SYM_FUNC_END(clmul_polyval_update) diff --git a/arch/x86/crypto/polyval-clmulni_glue.c b/arch/x86/crypto/polyval-clmulni_glue.c deleted file mode 100644 index 6b466867f91a..000000000000 --- a/arch/x86/crypto/polyval-clmulni_glue.c +++ /dev/null @@ -1,180 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Glue code for POLYVAL using PCMULQDQ-NI - * - * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen - * Copyright (c) 2009 Intel Corp. - * Author: Huang Ying - * Copyright 2021 Google LLC - */ - -/* - * Glue code based on ghash-clmulni-intel_glue.c. - * - * This implementation of POLYVAL uses montgomery multiplication - * accelerated by PCLMULQDQ-NI to implement the finite field - * operations. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define POLYVAL_ALIGN 16 -#define POLYVAL_ALIGN_ATTR __aligned(POLYVAL_ALIGN) -#define POLYVAL_ALIGN_EXTRA ((POLYVAL_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1)) -#define POLYVAL_CTX_SIZE (sizeof(struct polyval_tfm_ctx) + POLYVAL_ALIGN_EXTRA) -#define NUM_KEY_POWERS 8 - -struct polyval_tfm_ctx { - /* - * These powers must be in the order h^8, ..., h^1. - */ - u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE] POLYVAL_ALIGN_ATTR; -}; - -struct polyval_desc_ctx { - u8 buffer[POLYVAL_BLOCK_SIZE]; -}; - -asmlinkage void clmul_polyval_update(const struct polyval_tfm_ctx *keys, - const u8 *in, size_t nblocks, u8 *accumulator); -asmlinkage void clmul_polyval_mul(u8 *op1, const u8 *op2); - -static inline struct polyval_tfm_ctx *polyval_tfm_ctx(struct crypto_shash *tfm) -{ - return PTR_ALIGN(crypto_shash_ctx(tfm), POLYVAL_ALIGN); -} - -static void internal_polyval_update(const struct polyval_tfm_ctx *keys, - const u8 *in, size_t nblocks, u8 *accumulator) -{ - kernel_fpu_begin(); - clmul_polyval_update(keys, in, nblocks, accumulator); - kernel_fpu_end(); -} - -static void internal_polyval_mul(u8 *op1, const u8 *op2) -{ - kernel_fpu_begin(); - clmul_polyval_mul(op1, op2); - kernel_fpu_end(); -} - -static int polyval_x86_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen) -{ - struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(tfm); - int i; - - if (keylen != POLYVAL_BLOCK_SIZE) - return -EINVAL; - - memcpy(tctx->key_powers[NUM_KEY_POWERS-1], key, POLYVAL_BLOCK_SIZE); - - for (i = NUM_KEY_POWERS-2; i >= 0; i--) { - memcpy(tctx->key_powers[i], key, POLYVAL_BLOCK_SIZE); - internal_polyval_mul(tctx->key_powers[i], - tctx->key_powers[i+1]); - } - - return 0; -} - -static int polyval_x86_init(struct shash_desc *desc) -{ - struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); - - memset(dctx, 0, sizeof(*dctx)); - - return 0; -} - -static int polyval_x86_update(struct shash_desc *desc, - const u8 *src, unsigned int srclen) -{ - struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); - const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm); - unsigned int nblocks; - - do { - /* Allow rescheduling every 4K bytes. */ - nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE; - internal_polyval_update(tctx, src, nblocks, dctx->buffer); - srclen -= nblocks * POLYVAL_BLOCK_SIZE; - src += nblocks * POLYVAL_BLOCK_SIZE; - } while (srclen >= POLYVAL_BLOCK_SIZE); - - return srclen; -} - -static int polyval_x86_finup(struct shash_desc *desc, const u8 *src, - unsigned int len, u8 *dst) -{ - struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); - const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm); - - if (len) { - crypto_xor(dctx->buffer, src, len); - internal_polyval_mul(dctx->buffer, - tctx->key_powers[NUM_KEY_POWERS-1]); - } - - memcpy(dst, dctx->buffer, POLYVAL_BLOCK_SIZE); - - return 0; -} - -static struct shash_alg polyval_alg = { - .digestsize = POLYVAL_DIGEST_SIZE, - .init = polyval_x86_init, - .update = polyval_x86_update, - .finup = polyval_x86_finup, - .setkey = polyval_x86_setkey, - .descsize = sizeof(struct polyval_desc_ctx), - .base = { - .cra_name = "polyval", - .cra_driver_name = "polyval-clmulni", - .cra_priority = 200, - .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, - .cra_blocksize = POLYVAL_BLOCK_SIZE, - .cra_ctxsize = POLYVAL_CTX_SIZE, - .cra_module = THIS_MODULE, - }, -}; - -__maybe_unused static const struct x86_cpu_id pcmul_cpu_id[] = { - X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL), - {} -}; -MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id); - -static int __init polyval_clmulni_mod_init(void) -{ - if (!x86_match_cpu(pcmul_cpu_id)) - return -ENODEV; - - if (!boot_cpu_has(X86_FEATURE_AVX)) - return -ENODEV; - - return crypto_register_shash(&polyval_alg); -} - -static void __exit polyval_clmulni_mod_exit(void) -{ - crypto_unregister_shash(&polyval_alg); -} - -module_init(polyval_clmulni_mod_init); -module_exit(polyval_clmulni_mod_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("POLYVAL hash function accelerated by PCLMULQDQ-NI"); -MODULE_ALIAS_CRYPTO("polyval"); -MODULE_ALIAS_CRYPTO("polyval-clmulni"); diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h index f8aaf4275fbd..b28b8ef11353 100644 --- a/include/crypto/polyval.h +++ b/include/crypto/polyval.h @@ -48,6 +48,9 @@ struct polyval_key { #ifdef CONFIG_ARM64 /** @h_powers: Powers of the hash key H^8 through H^1 */ struct polyval_elem h_powers[8]; +#elif defined(CONFIG_X86) + /** @h_powers: Powers of the hash key H^8 through H^1 */ + struct polyval_elem h_powers[8]; #else #error "Unhandled arch" #endif diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 430723994142..9d04b3771ce2 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -145,6 +145,7 @@ config CRYPTO_LIB_POLYVAL_ARCH bool depends on CRYPTO_LIB_POLYVAL && !UML default y if ARM64 && KERNEL_MODE_NEON + default y if X86_64 config CRYPTO_LIB_CHACHA20POLY1305 tristate diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 2efa96afcb4b..6580991f8e12 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -203,6 +203,7 @@ libpolyval-y := polyval.o ifeq ($(CONFIG_CRYPTO_LIB_POLYVAL_ARCH),y) CFLAGS_polyval.o += -I$(src)/$(SRCARCH) libpolyval-$(CONFIG_ARM64) += arm64/polyval-ce-core.o +libpolyval-$(CONFIG_X86) += x86/polyval-pclmul-avx.o endif ################################################################################ diff --git a/lib/crypto/x86/polyval-pclmul-avx.S b/lib/crypto/x86/polyval-pclmul-avx.S new file mode 100644 index 000000000000..7f739465ad35 --- /dev/null +++ b/lib/crypto/x86/polyval-pclmul-avx.S @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2021 Google LLC + */ +/* + * This is an efficient implementation of POLYVAL using intel PCLMULQDQ-NI + * instructions. It works on 8 blocks at a time, by precomputing the first 8 + * keys powers h^8, ..., h^1 in the POLYVAL finite field. This precomputation + * allows us to split finite field multiplication into two steps. + * + * In the first step, we consider h^i, m_i as normal polynomials of degree less + * than 128. We then compute p(x) = h^8m_0 + ... + h^1m_7 where multiplication + * is simply polynomial multiplication. + * + * In the second step, we compute the reduction of p(x) modulo the finite field + * modulus g(x) = x^128 + x^127 + x^126 + x^121 + 1. + * + * This two step process is equivalent to computing h^8m_0 + ... + h^1m_7 where + * multiplication is finite field multiplication. The advantage is that the + * two-step process only requires 1 finite field reduction for every 8 + * polynomial multiplications. Further parallelism is gained by interleaving the + * multiplications and polynomial reductions. + */ + +#include +#include + +#define STRIDE_BLOCKS 8 + +#define GSTAR %xmm7 +#define PL %xmm8 +#define PH %xmm9 +#define TMP_XMM %xmm11 +#define LO %xmm12 +#define HI %xmm13 +#define MI %xmm14 +#define SUM %xmm15 + +#define ACCUMULATOR %rdi +#define KEY_POWERS %rsi +#define MSG %rdx +#define BLOCKS_LEFT %rcx +#define TMP %rax + +.section .rodata.cst16.gstar, "aM", @progbits, 16 +.align 16 + +.Lgstar: + .quad 0xc200000000000000, 0xc200000000000000 + +.text + +/* + * Performs schoolbook1_iteration on two lists of 128-bit polynomials of length + * count pointed to by MSG and KEY_POWERS. + */ +.macro schoolbook1 count + .set i, 0 + .rept (\count) + schoolbook1_iteration i 0 + .set i, (i +1) + .endr +.endm + +/* + * Computes the product of two 128-bit polynomials at the memory locations + * specified by (MSG + 16*i) and (KEY_POWERS + 16*i) and XORs the components of + * the 256-bit product into LO, MI, HI. + * + * Given: + * X = [X_1 : X_0] + * Y = [Y_1 : Y_0] + * + * We compute: + * LO += X_0 * Y_0 + * MI += X_0 * Y_1 + X_1 * Y_0 + * HI += X_1 * Y_1 + * + * Later, the 256-bit result can be extracted as: + * [HI_1 : HI_0 + MI_1 : LO_1 + MI_0 : LO_0] + * This step is done when computing the polynomial reduction for efficiency + * reasons. + * + * If xor_sum == 1, then also XOR the value of SUM into m_0. This avoids an + * extra multiplication of SUM and h^8. + */ +.macro schoolbook1_iteration i xor_sum + movups (16*\i)(MSG), %xmm0 + .if (\i == 0 && \xor_sum == 1) + pxor SUM, %xmm0 + .endif + vpclmulqdq $0x01, (16*\i)(KEY_POWERS), %xmm0, %xmm2 + vpclmulqdq $0x00, (16*\i)(KEY_POWERS), %xmm0, %xmm1 + vpclmulqdq $0x10, (16*\i)(KEY_POWERS), %xmm0, %xmm3 + vpclmulqdq $0x11, (16*\i)(KEY_POWERS), %xmm0, %xmm4 + vpxor %xmm2, MI, MI + vpxor %xmm1, LO, LO + vpxor %xmm4, HI, HI + vpxor %xmm3, MI, MI +.endm + +/* + * Performs the same computation as schoolbook1_iteration, except we expect the + * arguments to already be loaded into xmm0 and xmm1 and we set the result + * registers LO, MI, and HI directly rather than XOR'ing into them. + */ +.macro schoolbook1_noload + vpclmulqdq $0x01, %xmm0, %xmm1, MI + vpclmulqdq $0x10, %xmm0, %xmm1, %xmm2 + vpclmulqdq $0x00, %xmm0, %xmm1, LO + vpclmulqdq $0x11, %xmm0, %xmm1, HI + vpxor %xmm2, MI, MI +.endm + +/* + * Computes the 256-bit polynomial represented by LO, HI, MI. Stores + * the result in PL, PH. + * [PH : PL] = [HI_1 : HI_0 + MI_1 : LO_1 + MI_0 : LO_0] + */ +.macro schoolbook2 + vpslldq $8, MI, PL + vpsrldq $8, MI, PH + pxor LO, PL + pxor HI, PH +.endm + +/* + * Computes the 128-bit reduction of PH : PL. Stores the result in dest. + * + * This macro computes p(x) mod g(x) where p(x) is in montgomery form and g(x) = + * x^128 + x^127 + x^126 + x^121 + 1. + * + * We have a 256-bit polynomial PH : PL = P_3 : P_2 : P_1 : P_0 that is the + * product of two 128-bit polynomials in Montgomery form. We need to reduce it + * mod g(x). Also, since polynomials in Montgomery form have an "extra" factor + * of x^128, this product has two extra factors of x^128. To get it back into + * Montgomery form, we need to remove one of these factors by dividing by x^128. + * + * To accomplish both of these goals, we add multiples of g(x) that cancel out + * the low 128 bits P_1 : P_0, leaving just the high 128 bits. Since the low + * bits are zero, the polynomial division by x^128 can be done by right shifting. + * + * Since the only nonzero term in the low 64 bits of g(x) is the constant term, + * the multiple of g(x) needed to cancel out P_0 is P_0 * g(x). The CPU can + * only do 64x64 bit multiplications, so split P_0 * g(x) into x^128 * P_0 + + * x^64 * g*(x) * P_0 + P_0, where g*(x) is bits 64-127 of g(x). Adding this to + * the original polynomial gives P_3 : P_2 + P_0 + T_1 : P_1 + T_0 : 0, where T + * = T_1 : T_0 = g*(x) * P_0. Thus, bits 0-63 got "folded" into bits 64-191. + * + * Repeating this same process on the next 64 bits "folds" bits 64-127 into bits + * 128-255, giving the answer in bits 128-255. This time, we need to cancel P_1 + * + T_0 in bits 64-127. The multiple of g(x) required is (P_1 + T_0) * g(x) * + * x^64. Adding this to our previous computation gives P_3 + P_1 + T_0 + V_1 : + * P_2 + P_0 + T_1 + V_0 : 0 : 0, where V = V_1 : V_0 = g*(x) * (P_1 + T_0). + * + * So our final computation is: + * T = T_1 : T_0 = g*(x) * P_0 + * V = V_1 : V_0 = g*(x) * (P_1 + T_0) + * p(x) / x^{128} mod g(x) = P_3 + P_1 + T_0 + V_1 : P_2 + P_0 + T_1 + V_0 + * + * The implementation below saves a XOR instruction by computing P_1 + T_0 : P_0 + * + T_1 and XORing into dest, rather than separately XORing P_1 : P_0 and T_0 : + * T_1 into dest. This allows us to reuse P_1 + T_0 when computing V. + */ +.macro montgomery_reduction dest + vpclmulqdq $0x00, PL, GSTAR, TMP_XMM # TMP_XMM = T_1 : T_0 = P_0 * g*(x) + pshufd $0b01001110, TMP_XMM, TMP_XMM # TMP_XMM = T_0 : T_1 + pxor PL, TMP_XMM # TMP_XMM = P_1 + T_0 : P_0 + T_1 + pxor TMP_XMM, PH # PH = P_3 + P_1 + T_0 : P_2 + P_0 + T_1 + pclmulqdq $0x11, GSTAR, TMP_XMM # TMP_XMM = V_1 : V_0 = V = [(P_1 + T_0) * g*(x)] + vpxor TMP_XMM, PH, \dest +.endm + +/* + * Compute schoolbook multiplication for 8 blocks + * m_0h^8 + ... + m_7h^1 + * + * If reduce is set, also computes the montgomery reduction of the + * previous full_stride call and XORs with the first message block. + * (m_0 + REDUCE(PL, PH))h^8 + ... + m_7h^1. + * I.e., the first multiplication uses m_0 + REDUCE(PL, PH) instead of m_0. + */ +.macro full_stride reduce + pxor LO, LO + pxor HI, HI + pxor MI, MI + + schoolbook1_iteration 7 0 + .if \reduce + vpclmulqdq $0x00, PL, GSTAR, TMP_XMM + .endif + + schoolbook1_iteration 6 0 + .if \reduce + pshufd $0b01001110, TMP_XMM, TMP_XMM + .endif + + schoolbook1_iteration 5 0 + .if \reduce + pxor PL, TMP_XMM + .endif + + schoolbook1_iteration 4 0 + .if \reduce + pxor TMP_XMM, PH + .endif + + schoolbook1_iteration 3 0 + .if \reduce + pclmulqdq $0x11, GSTAR, TMP_XMM + .endif + + schoolbook1_iteration 2 0 + .if \reduce + vpxor TMP_XMM, PH, SUM + .endif + + schoolbook1_iteration 1 0 + + schoolbook1_iteration 0 1 + + addq $(8*16), MSG + schoolbook2 +.endm + +/* + * Process BLOCKS_LEFT blocks, where 0 < BLOCKS_LEFT < STRIDE_BLOCKS + */ +.macro partial_stride + mov BLOCKS_LEFT, TMP + shlq $4, TMP + addq $(16*STRIDE_BLOCKS), KEY_POWERS + subq TMP, KEY_POWERS + + movups (MSG), %xmm0 + pxor SUM, %xmm0 + movups (KEY_POWERS), %xmm1 + schoolbook1_noload + dec BLOCKS_LEFT + addq $16, MSG + addq $16, KEY_POWERS + + test $4, BLOCKS_LEFT + jz .Lpartial4BlocksDone + schoolbook1 4 + addq $(4*16), MSG + addq $(4*16), KEY_POWERS +.Lpartial4BlocksDone: + test $2, BLOCKS_LEFT + jz .Lpartial2BlocksDone + schoolbook1 2 + addq $(2*16), MSG + addq $(2*16), KEY_POWERS +.Lpartial2BlocksDone: + test $1, BLOCKS_LEFT + jz .LpartialDone + schoolbook1 1 +.LpartialDone: + schoolbook2 + montgomery_reduction SUM +.endm + +/* + * Computes a = a * b * x^{-128} mod x^128 + x^127 + x^126 + x^121 + 1. + * + * void polyval_mul_pclmul_avx(struct polyval_elem *a, + * const struct polyval_elem *b); + */ +SYM_FUNC_START(polyval_mul_pclmul_avx) + FRAME_BEGIN + vmovdqa .Lgstar(%rip), GSTAR + movups (%rdi), %xmm0 + movups (%rsi), %xmm1 + schoolbook1_noload + schoolbook2 + montgomery_reduction SUM + movups SUM, (%rdi) + FRAME_END + RET +SYM_FUNC_END(polyval_mul_pclmul_avx) + +/* + * Perform polynomial evaluation as specified by POLYVAL. This computes: + * h^n * accumulator + h^n * m_0 + ... + h^1 * m_{n-1} + * where n=nblocks, h is the hash key, and m_i are the message blocks. + * + * rdi - pointer to the accumulator + * rsi - pointer to precomputed key powers h^8 ... h^1 + * rdx - pointer to message blocks + * rcx - number of blocks to hash + * + * void polyval_blocks_pclmul_avx(struct polyval_elem *acc, + * const struct polyval_key *key, + * const u8 *data, size_t nblocks); + */ +SYM_FUNC_START(polyval_blocks_pclmul_avx) + FRAME_BEGIN + vmovdqa .Lgstar(%rip), GSTAR + movups (ACCUMULATOR), SUM + subq $STRIDE_BLOCKS, BLOCKS_LEFT + js .LstrideLoopExit + full_stride 0 + subq $STRIDE_BLOCKS, BLOCKS_LEFT + js .LstrideLoopExitReduce +.LstrideLoop: + full_stride 1 + subq $STRIDE_BLOCKS, BLOCKS_LEFT + jns .LstrideLoop +.LstrideLoopExitReduce: + montgomery_reduction SUM +.LstrideLoopExit: + add $STRIDE_BLOCKS, BLOCKS_LEFT + jz .LskipPartial + partial_stride +.LskipPartial: + movups SUM, (ACCUMULATOR) + FRAME_END + RET +SYM_FUNC_END(polyval_blocks_pclmul_avx) diff --git a/lib/crypto/x86/polyval.h b/lib/crypto/x86/polyval.h new file mode 100644 index 000000000000..ef8797521420 --- /dev/null +++ b/lib/crypto/x86/polyval.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * POLYVAL library functions, x86_64 optimized + * + * Copyright 2025 Google LLC + */ +#include +#include + +#define NUM_H_POWERS 8 + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pclmul_avx); + +asmlinkage void polyval_mul_pclmul_avx(struct polyval_elem *a, + const struct polyval_elem *b); +asmlinkage void polyval_blocks_pclmul_avx(struct polyval_elem *acc, + const struct polyval_key *key, + const u8 *data, size_t nblocks); + +static void polyval_preparekey_arch(struct polyval_key *key, + const u8 raw_key[POLYVAL_BLOCK_SIZE]) +{ + static_assert(ARRAY_SIZE(key->h_powers) == NUM_H_POWERS); + memcpy(&key->h_powers[NUM_H_POWERS - 1], raw_key, POLYVAL_BLOCK_SIZE); + if (static_branch_likely(&have_pclmul_avx) && irq_fpu_usable()) { + kernel_fpu_begin(); + for (int i = NUM_H_POWERS - 2; i >= 0; i--) { + key->h_powers[i] = key->h_powers[i + 1]; + polyval_mul_pclmul_avx( + &key->h_powers[i], + &key->h_powers[NUM_H_POWERS - 1]); + } + kernel_fpu_end(); + } else { + for (int i = NUM_H_POWERS - 2; i >= 0; i--) { + key->h_powers[i] = key->h_powers[i + 1]; + polyval_mul_generic(&key->h_powers[i], + &key->h_powers[NUM_H_POWERS - 1]); + } + } +} + +static void polyval_mul_arch(struct polyval_elem *acc, + const struct polyval_key *key) +{ + if (static_branch_likely(&have_pclmul_avx) && irq_fpu_usable()) { + kernel_fpu_begin(); + polyval_mul_pclmul_avx(acc, &key->h_powers[NUM_H_POWERS - 1]); + kernel_fpu_end(); + } else { + polyval_mul_generic(acc, &key->h_powers[NUM_H_POWERS - 1]); + } +} + +static void polyval_blocks_arch(struct polyval_elem *acc, + const struct polyval_key *key, + const u8 *data, size_t nblocks) +{ + if (static_branch_likely(&have_pclmul_avx) && irq_fpu_usable()) { + do { + /* Allow rescheduling every 4 KiB. */ + size_t n = min_t(size_t, nblocks, + 4096 / POLYVAL_BLOCK_SIZE); + + kernel_fpu_begin(); + polyval_blocks_pclmul_avx(acc, key, data, n); + kernel_fpu_end(); + data += n * POLYVAL_BLOCK_SIZE; + nblocks -= n; + } while (nblocks); + } else { + polyval_blocks_generic(acc, &key->h_powers[NUM_H_POWERS - 1], + data, nblocks); + } +} + +#define polyval_mod_init_arch polyval_mod_init_arch +static void polyval_mod_init_arch(void) +{ + if (boot_cpu_has(X86_FEATURE_PCLMULQDQ) && + boot_cpu_has(X86_FEATURE_AVX)) + static_branch_enable(&have_pclmul_avx); +} -- cgit v1.2.3