1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AES using the RISC-V vector crypto extensions. Includes the bare block
4 * cipher and the ECB, CBC, CBC-CTS, CTR, and XTS modes.
5 *
6 * Copyright (C) 2023 VRULL GmbH
7 * Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
8 *
9 * Copyright (C) 2023 SiFive, Inc.
10 * Author: Jerry Shih <jerry.shih@sifive.com>
11 *
12 * Copyright 2024 Google LLC
13 */
14
15 #include <asm/simd.h>
16 #include <asm/vector.h>
17 #include <crypto/aes.h>
18 #include <crypto/internal/cipher.h>
19 #include <crypto/internal/simd.h>
20 #include <crypto/internal/skcipher.h>
21 #include <crypto/scatterwalk.h>
22 #include <crypto/xts.h>
23 #include <linux/linkage.h>
24 #include <linux/module.h>
25
26 asmlinkage void aes_encrypt_zvkned(const struct crypto_aes_ctx *key,
27 const u8 in[AES_BLOCK_SIZE],
28 u8 out[AES_BLOCK_SIZE]);
29 asmlinkage void aes_decrypt_zvkned(const struct crypto_aes_ctx *key,
30 const u8 in[AES_BLOCK_SIZE],
31 u8 out[AES_BLOCK_SIZE]);
32
33 asmlinkage void aes_ecb_encrypt_zvkned(const struct crypto_aes_ctx *key,
34 const u8 *in, u8 *out, size_t len);
35 asmlinkage void aes_ecb_decrypt_zvkned(const struct crypto_aes_ctx *key,
36 const u8 *in, u8 *out, size_t len);
37
38 asmlinkage void aes_cbc_encrypt_zvkned(const struct crypto_aes_ctx *key,
39 const u8 *in, u8 *out, size_t len,
40 u8 iv[AES_BLOCK_SIZE]);
41 asmlinkage void aes_cbc_decrypt_zvkned(const struct crypto_aes_ctx *key,
42 const u8 *in, u8 *out, size_t len,
43 u8 iv[AES_BLOCK_SIZE]);
44
45 asmlinkage void aes_cbc_cts_crypt_zvkned(const struct crypto_aes_ctx *key,
46 const u8 *in, u8 *out, size_t len,
47 const u8 iv[AES_BLOCK_SIZE], bool enc);
48
49 asmlinkage void aes_ctr32_crypt_zvkned_zvkb(const struct crypto_aes_ctx *key,
50 const u8 *in, u8 *out, size_t len,
51 u8 iv[AES_BLOCK_SIZE]);
52
53 asmlinkage void aes_xts_encrypt_zvkned_zvbb_zvkg(
54 const struct crypto_aes_ctx *key,
55 const u8 *in, u8 *out, size_t len,
56 u8 tweak[AES_BLOCK_SIZE]);
57
58 asmlinkage void aes_xts_decrypt_zvkned_zvbb_zvkg(
59 const struct crypto_aes_ctx *key,
60 const u8 *in, u8 *out, size_t len,
61 u8 tweak[AES_BLOCK_SIZE]);
62
riscv64_aes_setkey(struct crypto_aes_ctx * ctx,const u8 * key,unsigned int keylen)63 static int riscv64_aes_setkey(struct crypto_aes_ctx *ctx,
64 const u8 *key, unsigned int keylen)
65 {
66 /*
67 * For now we just use the generic key expansion, for these reasons:
68 *
69 * - zvkned's key expansion instructions don't support AES-192.
70 * So, non-zvkned fallback code would be needed anyway.
71 *
72 * - Users of AES in Linux usually don't change keys frequently.
73 * So, key expansion isn't performance-critical.
74 *
75 * - For single-block AES exposed as a "cipher" algorithm, it's
76 * necessary to use struct crypto_aes_ctx and initialize its 'key_dec'
77 * field with the round keys for the Equivalent Inverse Cipher. This
78 * is because with "cipher", decryption can be requested from a
79 * context where the vector unit isn't usable, necessitating a
80 * fallback to aes_decrypt(). But, zvkned can only generate and use
81 * the normal round keys. Of course, it's preferable to not have
82 * special code just for "cipher", as e.g. XTS also uses a
83 * single-block AES encryption. It's simplest to just use
84 * struct crypto_aes_ctx and aes_expandkey() everywhere.
85 */
86 return aes_expandkey(ctx, key, keylen);
87 }
88
riscv64_aes_setkey_cipher(struct crypto_tfm * tfm,const u8 * key,unsigned int keylen)89 static int riscv64_aes_setkey_cipher(struct crypto_tfm *tfm,
90 const u8 *key, unsigned int keylen)
91 {
92 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
93
94 return riscv64_aes_setkey(ctx, key, keylen);
95 }
96
riscv64_aes_setkey_skcipher(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)97 static int riscv64_aes_setkey_skcipher(struct crypto_skcipher *tfm,
98 const u8 *key, unsigned int keylen)
99 {
100 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
101
102 return riscv64_aes_setkey(ctx, key, keylen);
103 }
104
105 /* Bare AES, without a mode of operation */
106
riscv64_aes_encrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)107 static void riscv64_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
108 {
109 const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
110
111 if (crypto_simd_usable()) {
112 kernel_vector_begin();
113 aes_encrypt_zvkned(ctx, src, dst);
114 kernel_vector_end();
115 } else {
116 aes_encrypt(ctx, dst, src);
117 }
118 }
119
riscv64_aes_decrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)120 static void riscv64_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
121 {
122 const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
123
124 if (crypto_simd_usable()) {
125 kernel_vector_begin();
126 aes_decrypt_zvkned(ctx, src, dst);
127 kernel_vector_end();
128 } else {
129 aes_decrypt(ctx, dst, src);
130 }
131 }
132
133 /* AES-ECB */
134
riscv64_aes_ecb_crypt(struct skcipher_request * req,bool enc)135 static inline int riscv64_aes_ecb_crypt(struct skcipher_request *req, bool enc)
136 {
137 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
138 const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
139 struct skcipher_walk walk;
140 unsigned int nbytes;
141 int err;
142
143 err = skcipher_walk_virt(&walk, req, false);
144 while ((nbytes = walk.nbytes) != 0) {
145 kernel_vector_begin();
146 if (enc)
147 aes_ecb_encrypt_zvkned(ctx, walk.src.virt.addr,
148 walk.dst.virt.addr,
149 nbytes & ~(AES_BLOCK_SIZE - 1));
150 else
151 aes_ecb_decrypt_zvkned(ctx, walk.src.virt.addr,
152 walk.dst.virt.addr,
153 nbytes & ~(AES_BLOCK_SIZE - 1));
154 kernel_vector_end();
155 err = skcipher_walk_done(&walk, nbytes & (AES_BLOCK_SIZE - 1));
156 }
157
158 return err;
159 }
160
riscv64_aes_ecb_encrypt(struct skcipher_request * req)161 static int riscv64_aes_ecb_encrypt(struct skcipher_request *req)
162 {
163 return riscv64_aes_ecb_crypt(req, true);
164 }
165
riscv64_aes_ecb_decrypt(struct skcipher_request * req)166 static int riscv64_aes_ecb_decrypt(struct skcipher_request *req)
167 {
168 return riscv64_aes_ecb_crypt(req, false);
169 }
170
171 /* AES-CBC */
172
riscv64_aes_cbc_crypt(struct skcipher_request * req,bool enc)173 static int riscv64_aes_cbc_crypt(struct skcipher_request *req, bool enc)
174 {
175 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
176 const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
177 struct skcipher_walk walk;
178 unsigned int nbytes;
179 int err;
180
181 err = skcipher_walk_virt(&walk, req, false);
182 while ((nbytes = walk.nbytes) != 0) {
183 kernel_vector_begin();
184 if (enc)
185 aes_cbc_encrypt_zvkned(ctx, walk.src.virt.addr,
186 walk.dst.virt.addr,
187 nbytes & ~(AES_BLOCK_SIZE - 1),
188 walk.iv);
189 else
190 aes_cbc_decrypt_zvkned(ctx, walk.src.virt.addr,
191 walk.dst.virt.addr,
192 nbytes & ~(AES_BLOCK_SIZE - 1),
193 walk.iv);
194 kernel_vector_end();
195 err = skcipher_walk_done(&walk, nbytes & (AES_BLOCK_SIZE - 1));
196 }
197
198 return err;
199 }
200
riscv64_aes_cbc_encrypt(struct skcipher_request * req)201 static int riscv64_aes_cbc_encrypt(struct skcipher_request *req)
202 {
203 return riscv64_aes_cbc_crypt(req, true);
204 }
205
riscv64_aes_cbc_decrypt(struct skcipher_request * req)206 static int riscv64_aes_cbc_decrypt(struct skcipher_request *req)
207 {
208 return riscv64_aes_cbc_crypt(req, false);
209 }
210
211 /* AES-CBC-CTS */
212
riscv64_aes_cbc_cts_crypt(struct skcipher_request * req,bool enc)213 static int riscv64_aes_cbc_cts_crypt(struct skcipher_request *req, bool enc)
214 {
215 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
216 const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
217 struct scatterlist sg_src[2], sg_dst[2];
218 struct skcipher_request subreq;
219 struct scatterlist *src, *dst;
220 struct skcipher_walk walk;
221 unsigned int cbc_len;
222 int err;
223
224 if (req->cryptlen < AES_BLOCK_SIZE)
225 return -EINVAL;
226
227 err = skcipher_walk_virt(&walk, req, false);
228 if (err)
229 return err;
230 /*
231 * If the full message is available in one step, decrypt it in one call
232 * to the CBC-CTS assembly function. This reduces overhead, especially
233 * on short messages. Otherwise, fall back to doing CBC up to the last
234 * two blocks, then invoke CTS just for the ciphertext stealing.
235 */
236 if (unlikely(walk.nbytes != req->cryptlen)) {
237 cbc_len = round_down(req->cryptlen - AES_BLOCK_SIZE - 1,
238 AES_BLOCK_SIZE);
239 skcipher_walk_abort(&walk);
240 skcipher_request_set_tfm(&subreq, tfm);
241 skcipher_request_set_callback(&subreq,
242 skcipher_request_flags(req),
243 NULL, NULL);
244 skcipher_request_set_crypt(&subreq, req->src, req->dst,
245 cbc_len, req->iv);
246 err = riscv64_aes_cbc_crypt(&subreq, enc);
247 if (err)
248 return err;
249 dst = src = scatterwalk_ffwd(sg_src, req->src, cbc_len);
250 if (req->dst != req->src)
251 dst = scatterwalk_ffwd(sg_dst, req->dst, cbc_len);
252 skcipher_request_set_crypt(&subreq, src, dst,
253 req->cryptlen - cbc_len, req->iv);
254 err = skcipher_walk_virt(&walk, &subreq, false);
255 if (err)
256 return err;
257 }
258 kernel_vector_begin();
259 aes_cbc_cts_crypt_zvkned(ctx, walk.src.virt.addr, walk.dst.virt.addr,
260 walk.nbytes, req->iv, enc);
261 kernel_vector_end();
262 return skcipher_walk_done(&walk, 0);
263 }
264
riscv64_aes_cbc_cts_encrypt(struct skcipher_request * req)265 static int riscv64_aes_cbc_cts_encrypt(struct skcipher_request *req)
266 {
267 return riscv64_aes_cbc_cts_crypt(req, true);
268 }
269
riscv64_aes_cbc_cts_decrypt(struct skcipher_request * req)270 static int riscv64_aes_cbc_cts_decrypt(struct skcipher_request *req)
271 {
272 return riscv64_aes_cbc_cts_crypt(req, false);
273 }
274
275 /* AES-CTR */
276
riscv64_aes_ctr_crypt(struct skcipher_request * req)277 static int riscv64_aes_ctr_crypt(struct skcipher_request *req)
278 {
279 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
280 const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
281 unsigned int nbytes, p1_nbytes;
282 struct skcipher_walk walk;
283 u32 ctr32, nblocks;
284 int err;
285
286 /* Get the low 32-bit word of the 128-bit big endian counter. */
287 ctr32 = get_unaligned_be32(req->iv + 12);
288
289 err = skcipher_walk_virt(&walk, req, false);
290 while ((nbytes = walk.nbytes) != 0) {
291 if (nbytes < walk.total) {
292 /* Not the end yet, so keep the length block-aligned. */
293 nbytes = round_down(nbytes, AES_BLOCK_SIZE);
294 nblocks = nbytes / AES_BLOCK_SIZE;
295 } else {
296 /* It's the end, so include any final partial block. */
297 nblocks = DIV_ROUND_UP(nbytes, AES_BLOCK_SIZE);
298 }
299 ctr32 += nblocks;
300
301 kernel_vector_begin();
302 if (ctr32 >= nblocks) {
303 /* The low 32-bit word of the counter won't overflow. */
304 aes_ctr32_crypt_zvkned_zvkb(ctx, walk.src.virt.addr,
305 walk.dst.virt.addr, nbytes,
306 req->iv);
307 } else {
308 /*
309 * The low 32-bit word of the counter will overflow.
310 * The assembly doesn't handle this case, so split the
311 * operation into two at the point where the overflow
312 * will occur. After the first part, add the carry bit.
313 */
314 p1_nbytes = min_t(unsigned int, nbytes,
315 (nblocks - ctr32) * AES_BLOCK_SIZE);
316 aes_ctr32_crypt_zvkned_zvkb(ctx, walk.src.virt.addr,
317 walk.dst.virt.addr,
318 p1_nbytes, req->iv);
319 crypto_inc(req->iv, 12);
320
321 if (ctr32) {
322 aes_ctr32_crypt_zvkned_zvkb(
323 ctx,
324 walk.src.virt.addr + p1_nbytes,
325 walk.dst.virt.addr + p1_nbytes,
326 nbytes - p1_nbytes, req->iv);
327 }
328 }
329 kernel_vector_end();
330
331 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
332 }
333
334 return err;
335 }
336
337 /* AES-XTS */
338
339 struct riscv64_aes_xts_ctx {
340 struct crypto_aes_ctx ctx1;
341 struct crypto_aes_ctx ctx2;
342 };
343
riscv64_aes_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)344 static int riscv64_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
345 unsigned int keylen)
346 {
347 struct riscv64_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
348
349 return xts_verify_key(tfm, key, keylen) ?:
350 riscv64_aes_setkey(&ctx->ctx1, key, keylen / 2) ?:
351 riscv64_aes_setkey(&ctx->ctx2, key + keylen / 2, keylen / 2);
352 }
353
riscv64_aes_xts_crypt(struct skcipher_request * req,bool enc)354 static int riscv64_aes_xts_crypt(struct skcipher_request *req, bool enc)
355 {
356 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
357 const struct riscv64_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
358 int tail = req->cryptlen % AES_BLOCK_SIZE;
359 struct scatterlist sg_src[2], sg_dst[2];
360 struct skcipher_request subreq;
361 struct scatterlist *src, *dst;
362 struct skcipher_walk walk;
363 int err;
364
365 if (req->cryptlen < AES_BLOCK_SIZE)
366 return -EINVAL;
367
368 /* Encrypt the IV with the tweak key to get the first tweak. */
369 kernel_vector_begin();
370 aes_encrypt_zvkned(&ctx->ctx2, req->iv, req->iv);
371 kernel_vector_end();
372
373 err = skcipher_walk_virt(&walk, req, false);
374
375 /*
376 * If the message length isn't divisible by the AES block size and the
377 * full message isn't available in one step of the scatterlist walk,
378 * then separate off the last full block and the partial block. This
379 * ensures that they are processed in the same call to the assembly
380 * function, which is required for ciphertext stealing.
381 */
382 if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
383 skcipher_walk_abort(&walk);
384
385 skcipher_request_set_tfm(&subreq, tfm);
386 skcipher_request_set_callback(&subreq,
387 skcipher_request_flags(req),
388 NULL, NULL);
389 skcipher_request_set_crypt(&subreq, req->src, req->dst,
390 req->cryptlen - tail - AES_BLOCK_SIZE,
391 req->iv);
392 req = &subreq;
393 err = skcipher_walk_virt(&walk, req, false);
394 } else {
395 tail = 0;
396 }
397
398 while (walk.nbytes) {
399 unsigned int nbytes = walk.nbytes;
400
401 if (nbytes < walk.total)
402 nbytes = round_down(nbytes, AES_BLOCK_SIZE);
403
404 kernel_vector_begin();
405 if (enc)
406 aes_xts_encrypt_zvkned_zvbb_zvkg(
407 &ctx->ctx1, walk.src.virt.addr,
408 walk.dst.virt.addr, nbytes, req->iv);
409 else
410 aes_xts_decrypt_zvkned_zvbb_zvkg(
411 &ctx->ctx1, walk.src.virt.addr,
412 walk.dst.virt.addr, nbytes, req->iv);
413 kernel_vector_end();
414 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
415 }
416
417 if (err || likely(!tail))
418 return err;
419
420 /* Do ciphertext stealing with the last full block and partial block. */
421
422 dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
423 if (req->dst != req->src)
424 dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
425
426 skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
427 req->iv);
428
429 err = skcipher_walk_virt(&walk, req, false);
430 if (err)
431 return err;
432
433 kernel_vector_begin();
434 if (enc)
435 aes_xts_encrypt_zvkned_zvbb_zvkg(
436 &ctx->ctx1, walk.src.virt.addr,
437 walk.dst.virt.addr, walk.nbytes, req->iv);
438 else
439 aes_xts_decrypt_zvkned_zvbb_zvkg(
440 &ctx->ctx1, walk.src.virt.addr,
441 walk.dst.virt.addr, walk.nbytes, req->iv);
442 kernel_vector_end();
443
444 return skcipher_walk_done(&walk, 0);
445 }
446
riscv64_aes_xts_encrypt(struct skcipher_request * req)447 static int riscv64_aes_xts_encrypt(struct skcipher_request *req)
448 {
449 return riscv64_aes_xts_crypt(req, true);
450 }
451
riscv64_aes_xts_decrypt(struct skcipher_request * req)452 static int riscv64_aes_xts_decrypt(struct skcipher_request *req)
453 {
454 return riscv64_aes_xts_crypt(req, false);
455 }
456
457 /* Algorithm definitions */
458
459 static struct crypto_alg riscv64_zvkned_aes_cipher_alg = {
460 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
461 .cra_blocksize = AES_BLOCK_SIZE,
462 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
463 .cra_priority = 300,
464 .cra_name = "aes",
465 .cra_driver_name = "aes-riscv64-zvkned",
466 .cra_cipher = {
467 .cia_min_keysize = AES_MIN_KEY_SIZE,
468 .cia_max_keysize = AES_MAX_KEY_SIZE,
469 .cia_setkey = riscv64_aes_setkey_cipher,
470 .cia_encrypt = riscv64_aes_encrypt,
471 .cia_decrypt = riscv64_aes_decrypt,
472 },
473 .cra_module = THIS_MODULE,
474 };
475
476 static struct skcipher_alg riscv64_zvkned_aes_skcipher_algs[] = {
477 {
478 .setkey = riscv64_aes_setkey_skcipher,
479 .encrypt = riscv64_aes_ecb_encrypt,
480 .decrypt = riscv64_aes_ecb_decrypt,
481 .min_keysize = AES_MIN_KEY_SIZE,
482 .max_keysize = AES_MAX_KEY_SIZE,
483 .walksize = 8 * AES_BLOCK_SIZE, /* matches LMUL=8 */
484 .base = {
485 .cra_blocksize = AES_BLOCK_SIZE,
486 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
487 .cra_priority = 300,
488 .cra_name = "ecb(aes)",
489 .cra_driver_name = "ecb-aes-riscv64-zvkned",
490 .cra_module = THIS_MODULE,
491 },
492 }, {
493 .setkey = riscv64_aes_setkey_skcipher,
494 .encrypt = riscv64_aes_cbc_encrypt,
495 .decrypt = riscv64_aes_cbc_decrypt,
496 .min_keysize = AES_MIN_KEY_SIZE,
497 .max_keysize = AES_MAX_KEY_SIZE,
498 .ivsize = AES_BLOCK_SIZE,
499 .base = {
500 .cra_blocksize = AES_BLOCK_SIZE,
501 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
502 .cra_priority = 300,
503 .cra_name = "cbc(aes)",
504 .cra_driver_name = "cbc-aes-riscv64-zvkned",
505 .cra_module = THIS_MODULE,
506 },
507 }, {
508 .setkey = riscv64_aes_setkey_skcipher,
509 .encrypt = riscv64_aes_cbc_cts_encrypt,
510 .decrypt = riscv64_aes_cbc_cts_decrypt,
511 .min_keysize = AES_MIN_KEY_SIZE,
512 .max_keysize = AES_MAX_KEY_SIZE,
513 .ivsize = AES_BLOCK_SIZE,
514 .walksize = 4 * AES_BLOCK_SIZE, /* matches LMUL=4 */
515 .base = {
516 .cra_blocksize = AES_BLOCK_SIZE,
517 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
518 .cra_priority = 300,
519 .cra_name = "cts(cbc(aes))",
520 .cra_driver_name = "cts-cbc-aes-riscv64-zvkned",
521 .cra_module = THIS_MODULE,
522 },
523 }
524 };
525
526 static struct skcipher_alg riscv64_zvkned_zvkb_aes_skcipher_alg = {
527 .setkey = riscv64_aes_setkey_skcipher,
528 .encrypt = riscv64_aes_ctr_crypt,
529 .decrypt = riscv64_aes_ctr_crypt,
530 .min_keysize = AES_MIN_KEY_SIZE,
531 .max_keysize = AES_MAX_KEY_SIZE,
532 .ivsize = AES_BLOCK_SIZE,
533 .chunksize = AES_BLOCK_SIZE,
534 .walksize = 4 * AES_BLOCK_SIZE, /* matches LMUL=4 */
535 .base = {
536 .cra_blocksize = 1,
537 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
538 .cra_priority = 300,
539 .cra_name = "ctr(aes)",
540 .cra_driver_name = "ctr-aes-riscv64-zvkned-zvkb",
541 .cra_module = THIS_MODULE,
542 },
543 };
544
545 static struct skcipher_alg riscv64_zvkned_zvbb_zvkg_aes_skcipher_alg = {
546 .setkey = riscv64_aes_xts_setkey,
547 .encrypt = riscv64_aes_xts_encrypt,
548 .decrypt = riscv64_aes_xts_decrypt,
549 .min_keysize = 2 * AES_MIN_KEY_SIZE,
550 .max_keysize = 2 * AES_MAX_KEY_SIZE,
551 .ivsize = AES_BLOCK_SIZE,
552 .chunksize = AES_BLOCK_SIZE,
553 .walksize = 4 * AES_BLOCK_SIZE, /* matches LMUL=4 */
554 .base = {
555 .cra_blocksize = AES_BLOCK_SIZE,
556 .cra_ctxsize = sizeof(struct riscv64_aes_xts_ctx),
557 .cra_priority = 300,
558 .cra_name = "xts(aes)",
559 .cra_driver_name = "xts-aes-riscv64-zvkned-zvbb-zvkg",
560 .cra_module = THIS_MODULE,
561 },
562 };
563
riscv64_aes_xts_supported(void)564 static inline bool riscv64_aes_xts_supported(void)
565 {
566 return riscv_isa_extension_available(NULL, ZVBB) &&
567 riscv_isa_extension_available(NULL, ZVKG) &&
568 riscv_vector_vlen() < 2048 /* Implementation limitation */;
569 }
570
riscv64_aes_mod_init(void)571 static int __init riscv64_aes_mod_init(void)
572 {
573 int err = -ENODEV;
574
575 if (riscv_isa_extension_available(NULL, ZVKNED) &&
576 riscv_vector_vlen() >= 128) {
577 err = crypto_register_alg(&riscv64_zvkned_aes_cipher_alg);
578 if (err)
579 return err;
580
581 err = crypto_register_skciphers(
582 riscv64_zvkned_aes_skcipher_algs,
583 ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs));
584 if (err)
585 goto unregister_zvkned_cipher_alg;
586
587 if (riscv_isa_extension_available(NULL, ZVKB)) {
588 err = crypto_register_skcipher(
589 &riscv64_zvkned_zvkb_aes_skcipher_alg);
590 if (err)
591 goto unregister_zvkned_skcipher_algs;
592 }
593
594 if (riscv64_aes_xts_supported()) {
595 err = crypto_register_skcipher(
596 &riscv64_zvkned_zvbb_zvkg_aes_skcipher_alg);
597 if (err)
598 goto unregister_zvkned_zvkb_skcipher_alg;
599 }
600 }
601
602 return err;
603
604 unregister_zvkned_zvkb_skcipher_alg:
605 if (riscv_isa_extension_available(NULL, ZVKB))
606 crypto_unregister_skcipher(&riscv64_zvkned_zvkb_aes_skcipher_alg);
607 unregister_zvkned_skcipher_algs:
608 crypto_unregister_skciphers(riscv64_zvkned_aes_skcipher_algs,
609 ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs));
610 unregister_zvkned_cipher_alg:
611 crypto_unregister_alg(&riscv64_zvkned_aes_cipher_alg);
612 return err;
613 }
614
riscv64_aes_mod_exit(void)615 static void __exit riscv64_aes_mod_exit(void)
616 {
617 if (riscv64_aes_xts_supported())
618 crypto_unregister_skcipher(&riscv64_zvkned_zvbb_zvkg_aes_skcipher_alg);
619 if (riscv_isa_extension_available(NULL, ZVKB))
620 crypto_unregister_skcipher(&riscv64_zvkned_zvkb_aes_skcipher_alg);
621 crypto_unregister_skciphers(riscv64_zvkned_aes_skcipher_algs,
622 ARRAY_SIZE(riscv64_zvkned_aes_skcipher_algs));
623 crypto_unregister_alg(&riscv64_zvkned_aes_cipher_alg);
624 }
625
626 module_init(riscv64_aes_mod_init);
627 module_exit(riscv64_aes_mod_exit);
628
629 MODULE_DESCRIPTION("AES-ECB/CBC/CTS/CTR/XTS (RISC-V accelerated)");
630 MODULE_AUTHOR("Jerry Shih <jerry.shih@sifive.com>");
631 MODULE_LICENSE("GPL");
632 MODULE_ALIAS_CRYPTO("aes");
633 MODULE_ALIAS_CRYPTO("ecb(aes)");
634 MODULE_ALIAS_CRYPTO("cbc(aes)");
635 MODULE_ALIAS_CRYPTO("cts(cbc(aes))");
636 MODULE_ALIAS_CRYPTO("ctr(aes)");
637 MODULE_ALIAS_CRYPTO("xts(aes)");
638