1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * x64 SIMD accelerated ChaCha and XChaCha stream ciphers, 4 * including ChaCha20 (RFC7539) 5 * 6 * Copyright (C) 2015 Martin Willi 7 */ 8 9 #include <crypto/algapi.h> 10 #include <crypto/chacha.h> 11 #include <crypto/internal/simd.h> 12 #include <crypto/internal/skcipher.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <asm/simd.h> 16 17 #define CHACHA_STATE_ALIGN 16 18 19 asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, 20 unsigned int len, int nrounds); 21 asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, 22 unsigned int len, int nrounds); 23 asmlinkage void hchacha_block_ssse3(const u32 *state, u32 *out, int nrounds); 24 #ifdef CONFIG_AS_AVX2 25 asmlinkage void chacha_2block_xor_avx2(u32 *state, u8 *dst, const u8 *src, 26 unsigned int len, int nrounds); 27 asmlinkage void chacha_4block_xor_avx2(u32 *state, u8 *dst, const u8 *src, 28 unsigned int len, int nrounds); 29 asmlinkage void chacha_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src, 30 unsigned int len, int nrounds); 31 static bool chacha_use_avx2; 32 #ifdef CONFIG_AS_AVX512 33 asmlinkage void chacha_2block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, 34 unsigned int len, int nrounds); 35 asmlinkage void chacha_4block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, 36 unsigned int len, int nrounds); 37 asmlinkage void chacha_8block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, 38 unsigned int len, int nrounds); 39 static bool chacha_use_avx512vl; 40 #endif 41 #endif 42 43 static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks) 44 { 45 len = min(len, maxblocks * CHACHA_BLOCK_SIZE); 46 return round_up(len, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE; 47 } 48 49 static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src, 50 unsigned int bytes, int nrounds) 51 { 52 #ifdef CONFIG_AS_AVX2 53 #ifdef CONFIG_AS_AVX512 54 if (chacha_use_avx512vl) { 55 while (bytes >= CHACHA_BLOCK_SIZE * 8) { 56 chacha_8block_xor_avx512vl(state, dst, src, bytes, 57 nrounds); 58 bytes -= CHACHA_BLOCK_SIZE * 8; 59 src += CHACHA_BLOCK_SIZE * 8; 60 dst += CHACHA_BLOCK_SIZE * 8; 61 state[12] += 8; 62 } 63 if (bytes > CHACHA_BLOCK_SIZE * 4) { 64 chacha_8block_xor_avx512vl(state, dst, src, bytes, 65 nrounds); 66 state[12] += chacha_advance(bytes, 8); 67 return; 68 } 69 if (bytes > CHACHA_BLOCK_SIZE * 2) { 70 chacha_4block_xor_avx512vl(state, dst, src, bytes, 71 nrounds); 72 state[12] += chacha_advance(bytes, 4); 73 return; 74 } 75 if (bytes) { 76 chacha_2block_xor_avx512vl(state, dst, src, bytes, 77 nrounds); 78 state[12] += chacha_advance(bytes, 2); 79 return; 80 } 81 } 82 #endif 83 if (chacha_use_avx2) { 84 while (bytes >= CHACHA_BLOCK_SIZE * 8) { 85 chacha_8block_xor_avx2(state, dst, src, bytes, nrounds); 86 bytes -= CHACHA_BLOCK_SIZE * 8; 87 src += CHACHA_BLOCK_SIZE * 8; 88 dst += CHACHA_BLOCK_SIZE * 8; 89 state[12] += 8; 90 } 91 if (bytes > CHACHA_BLOCK_SIZE * 4) { 92 chacha_8block_xor_avx2(state, dst, src, bytes, nrounds); 93 state[12] += chacha_advance(bytes, 8); 94 return; 95 } 96 if (bytes > CHACHA_BLOCK_SIZE * 2) { 97 chacha_4block_xor_avx2(state, dst, src, bytes, nrounds); 98 state[12] += chacha_advance(bytes, 4); 99 return; 100 } 101 if (bytes > CHACHA_BLOCK_SIZE) { 102 chacha_2block_xor_avx2(state, dst, src, bytes, nrounds); 103 state[12] += chacha_advance(bytes, 2); 104 return; 105 } 106 } 107 #endif 108 while (bytes >= CHACHA_BLOCK_SIZE * 4) { 109 chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds); 110 bytes -= CHACHA_BLOCK_SIZE * 4; 111 src += CHACHA_BLOCK_SIZE * 4; 112 dst += CHACHA_BLOCK_SIZE * 4; 113 state[12] += 4; 114 } 115 if (bytes > CHACHA_BLOCK_SIZE) { 116 chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds); 117 state[12] += chacha_advance(bytes, 4); 118 return; 119 } 120 if (bytes) { 121 chacha_block_xor_ssse3(state, dst, src, bytes, nrounds); 122 state[12]++; 123 } 124 } 125 126 static int chacha_simd_stream_xor(struct skcipher_walk *walk, 127 struct chacha_ctx *ctx, u8 *iv) 128 { 129 u32 *state, state_buf[16 + 2] __aligned(8); 130 int next_yield = 4096; /* bytes until next FPU yield */ 131 int err = 0; 132 133 BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); 134 state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); 135 136 crypto_chacha_init(state, ctx, iv); 137 138 while (walk->nbytes > 0) { 139 unsigned int nbytes = walk->nbytes; 140 141 if (nbytes < walk->total) { 142 nbytes = round_down(nbytes, walk->stride); 143 next_yield -= nbytes; 144 } 145 146 chacha_dosimd(state, walk->dst.virt.addr, walk->src.virt.addr, 147 nbytes, ctx->nrounds); 148 149 if (next_yield <= 0) { 150 /* temporarily allow preemption */ 151 kernel_fpu_end(); 152 kernel_fpu_begin(); 153 next_yield = 4096; 154 } 155 156 err = skcipher_walk_done(walk, walk->nbytes - nbytes); 157 } 158 159 return err; 160 } 161 162 static int chacha_simd(struct skcipher_request *req) 163 { 164 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 165 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); 166 struct skcipher_walk walk; 167 int err; 168 169 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable()) 170 return crypto_chacha_crypt(req); 171 172 err = skcipher_walk_virt(&walk, req, true); 173 if (err) 174 return err; 175 176 kernel_fpu_begin(); 177 err = chacha_simd_stream_xor(&walk, ctx, req->iv); 178 kernel_fpu_end(); 179 return err; 180 } 181 182 static int xchacha_simd(struct skcipher_request *req) 183 { 184 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 185 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); 186 struct skcipher_walk walk; 187 struct chacha_ctx subctx; 188 u32 *state, state_buf[16 + 2] __aligned(8); 189 u8 real_iv[16]; 190 int err; 191 192 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable()) 193 return crypto_xchacha_crypt(req); 194 195 err = skcipher_walk_virt(&walk, req, true); 196 if (err) 197 return err; 198 199 BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); 200 state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); 201 crypto_chacha_init(state, ctx, req->iv); 202 203 kernel_fpu_begin(); 204 205 hchacha_block_ssse3(state, subctx.key, ctx->nrounds); 206 subctx.nrounds = ctx->nrounds; 207 208 memcpy(&real_iv[0], req->iv + 24, 8); 209 memcpy(&real_iv[8], req->iv + 16, 8); 210 err = chacha_simd_stream_xor(&walk, &subctx, real_iv); 211 212 kernel_fpu_end(); 213 214 return err; 215 } 216 217 static struct skcipher_alg algs[] = { 218 { 219 .base.cra_name = "chacha20", 220 .base.cra_driver_name = "chacha20-simd", 221 .base.cra_priority = 300, 222 .base.cra_blocksize = 1, 223 .base.cra_ctxsize = sizeof(struct chacha_ctx), 224 .base.cra_module = THIS_MODULE, 225 226 .min_keysize = CHACHA_KEY_SIZE, 227 .max_keysize = CHACHA_KEY_SIZE, 228 .ivsize = CHACHA_IV_SIZE, 229 .chunksize = CHACHA_BLOCK_SIZE, 230 .setkey = crypto_chacha20_setkey, 231 .encrypt = chacha_simd, 232 .decrypt = chacha_simd, 233 }, { 234 .base.cra_name = "xchacha20", 235 .base.cra_driver_name = "xchacha20-simd", 236 .base.cra_priority = 300, 237 .base.cra_blocksize = 1, 238 .base.cra_ctxsize = sizeof(struct chacha_ctx), 239 .base.cra_module = THIS_MODULE, 240 241 .min_keysize = CHACHA_KEY_SIZE, 242 .max_keysize = CHACHA_KEY_SIZE, 243 .ivsize = XCHACHA_IV_SIZE, 244 .chunksize = CHACHA_BLOCK_SIZE, 245 .setkey = crypto_chacha20_setkey, 246 .encrypt = xchacha_simd, 247 .decrypt = xchacha_simd, 248 }, { 249 .base.cra_name = "xchacha12", 250 .base.cra_driver_name = "xchacha12-simd", 251 .base.cra_priority = 300, 252 .base.cra_blocksize = 1, 253 .base.cra_ctxsize = sizeof(struct chacha_ctx), 254 .base.cra_module = THIS_MODULE, 255 256 .min_keysize = CHACHA_KEY_SIZE, 257 .max_keysize = CHACHA_KEY_SIZE, 258 .ivsize = XCHACHA_IV_SIZE, 259 .chunksize = CHACHA_BLOCK_SIZE, 260 .setkey = crypto_chacha12_setkey, 261 .encrypt = xchacha_simd, 262 .decrypt = xchacha_simd, 263 }, 264 }; 265 266 static int __init chacha_simd_mod_init(void) 267 { 268 if (!boot_cpu_has(X86_FEATURE_SSSE3)) 269 return -ENODEV; 270 271 #ifdef CONFIG_AS_AVX2 272 chacha_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) && 273 boot_cpu_has(X86_FEATURE_AVX2) && 274 cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); 275 #ifdef CONFIG_AS_AVX512 276 chacha_use_avx512vl = chacha_use_avx2 && 277 boot_cpu_has(X86_FEATURE_AVX512VL) && 278 boot_cpu_has(X86_FEATURE_AVX512BW); /* kmovq */ 279 #endif 280 #endif 281 return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); 282 } 283 284 static void __exit chacha_simd_mod_fini(void) 285 { 286 crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); 287 } 288 289 module_init(chacha_simd_mod_init); 290 module_exit(chacha_simd_mod_fini); 291 292 MODULE_LICENSE("GPL"); 293 MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); 294 MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (x64 SIMD accelerated)"); 295 MODULE_ALIAS_CRYPTO("chacha20"); 296 MODULE_ALIAS_CRYPTO("chacha20-simd"); 297 MODULE_ALIAS_CRYPTO("xchacha20"); 298 MODULE_ALIAS_CRYPTO("xchacha20-simd"); 299 MODULE_ALIAS_CRYPTO("xchacha12"); 300 MODULE_ALIAS_CRYPTO("xchacha12-simd"); 301