1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * SM4 Cipher Algorithm, using ARMv8 Crypto Extensions
4 * as specified in
5 * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
6 *
7 * Copyright (C) 2022, Alibaba Group.
8 * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
9 */
10
11 #include <asm/simd.h>
12 #include <crypto/b128ops.h>
13 #include <crypto/internal/hash.h>
14 #include <crypto/internal/skcipher.h>
15 #include <crypto/scatterwalk.h>
16 #include <crypto/sm4.h>
17 #include <crypto/utils.h>
18 #include <crypto/xts.h>
19 #include <linux/cpufeature.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/string.h>
23
24 #define BYTES2BLKS(nbytes) ((nbytes) >> 4)
25
26 asmlinkage void sm4_ce_expand_key(const u8 *key, u32 *rkey_enc, u32 *rkey_dec,
27 const u32 *fk, const u32 *ck);
28 asmlinkage void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src);
29 asmlinkage void sm4_ce_crypt(const u32 *rkey, u8 *dst, const u8 *src,
30 unsigned int nblks);
31 asmlinkage void sm4_ce_cbc_enc(const u32 *rkey, u8 *dst, const u8 *src,
32 u8 *iv, unsigned int nblocks);
33 asmlinkage void sm4_ce_cbc_dec(const u32 *rkey, u8 *dst, const u8 *src,
34 u8 *iv, unsigned int nblocks);
35 asmlinkage void sm4_ce_cbc_cts_enc(const u32 *rkey, u8 *dst, const u8 *src,
36 u8 *iv, unsigned int nbytes);
37 asmlinkage void sm4_ce_cbc_cts_dec(const u32 *rkey, u8 *dst, const u8 *src,
38 u8 *iv, unsigned int nbytes);
39 asmlinkage void sm4_ce_ctr_enc(const u32 *rkey, u8 *dst, const u8 *src,
40 u8 *iv, unsigned int nblks);
41 asmlinkage void sm4_ce_xts_enc(const u32 *rkey1, u8 *dst, const u8 *src,
42 u8 *tweak, unsigned int nbytes,
43 const u32 *rkey2_enc);
44 asmlinkage void sm4_ce_xts_dec(const u32 *rkey1, u8 *dst, const u8 *src,
45 u8 *tweak, unsigned int nbytes,
46 const u32 *rkey2_enc);
47 asmlinkage void sm4_ce_mac_update(const u32 *rkey_enc, u8 *digest,
48 const u8 *src, unsigned int nblocks,
49 bool enc_before, bool enc_after);
50
51 EXPORT_SYMBOL(sm4_ce_expand_key);
52 EXPORT_SYMBOL(sm4_ce_crypt_block);
53 EXPORT_SYMBOL(sm4_ce_cbc_enc);
54
55 struct sm4_xts_ctx {
56 struct sm4_ctx key1;
57 struct sm4_ctx key2;
58 };
59
60 struct sm4_mac_tfm_ctx {
61 struct sm4_ctx key;
62 u8 __aligned(8) consts[];
63 };
64
65 struct sm4_mac_desc_ctx {
66 u8 digest[SM4_BLOCK_SIZE];
67 };
68
sm4_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int key_len)69 static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key,
70 unsigned int key_len)
71 {
72 struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
73
74 if (key_len != SM4_KEY_SIZE)
75 return -EINVAL;
76
77 scoped_ksimd()
78 sm4_ce_expand_key(key, ctx->rkey_enc, ctx->rkey_dec,
79 crypto_sm4_fk, crypto_sm4_ck);
80 return 0;
81 }
82
sm4_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int key_len)83 static int sm4_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
84 unsigned int key_len)
85 {
86 struct sm4_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
87 int ret;
88
89 if (key_len != SM4_KEY_SIZE * 2)
90 return -EINVAL;
91
92 ret = xts_verify_key(tfm, key, key_len);
93 if (ret)
94 return ret;
95
96 scoped_ksimd() {
97 sm4_ce_expand_key(key, ctx->key1.rkey_enc,
98 ctx->key1.rkey_dec, crypto_sm4_fk, crypto_sm4_ck);
99 sm4_ce_expand_key(&key[SM4_KEY_SIZE], ctx->key2.rkey_enc,
100 ctx->key2.rkey_dec, crypto_sm4_fk, crypto_sm4_ck);
101 }
102
103 return 0;
104 }
105
sm4_ecb_do_crypt(struct skcipher_request * req,const u32 * rkey)106 static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey)
107 {
108 struct skcipher_walk walk;
109 unsigned int nbytes;
110 int err;
111
112 err = skcipher_walk_virt(&walk, req, false);
113
114 while ((nbytes = walk.nbytes) > 0) {
115 const u8 *src = walk.src.virt.addr;
116 u8 *dst = walk.dst.virt.addr;
117 unsigned int nblks;
118
119 scoped_ksimd() {
120 nblks = BYTES2BLKS(nbytes);
121 if (nblks) {
122 sm4_ce_crypt(rkey, dst, src, nblks);
123 nbytes -= nblks * SM4_BLOCK_SIZE;
124 }
125 }
126
127 err = skcipher_walk_done(&walk, nbytes);
128 }
129
130 return err;
131 }
132
sm4_ecb_encrypt(struct skcipher_request * req)133 static int sm4_ecb_encrypt(struct skcipher_request *req)
134 {
135 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
136 struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
137
138 return sm4_ecb_do_crypt(req, ctx->rkey_enc);
139 }
140
sm4_ecb_decrypt(struct skcipher_request * req)141 static int sm4_ecb_decrypt(struct skcipher_request *req)
142 {
143 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
144 struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
145
146 return sm4_ecb_do_crypt(req, ctx->rkey_dec);
147 }
148
sm4_cbc_crypt(struct skcipher_request * req,struct sm4_ctx * ctx,bool encrypt)149 static int sm4_cbc_crypt(struct skcipher_request *req,
150 struct sm4_ctx *ctx, bool encrypt)
151 {
152 struct skcipher_walk walk;
153 unsigned int nbytes;
154 int err;
155
156 err = skcipher_walk_virt(&walk, req, false);
157 if (err)
158 return err;
159
160 while ((nbytes = walk.nbytes) > 0) {
161 const u8 *src = walk.src.virt.addr;
162 u8 *dst = walk.dst.virt.addr;
163 unsigned int nblocks;
164
165 nblocks = nbytes / SM4_BLOCK_SIZE;
166 if (nblocks) {
167 scoped_ksimd() {
168 if (encrypt)
169 sm4_ce_cbc_enc(ctx->rkey_enc, dst, src,
170 walk.iv, nblocks);
171 else
172 sm4_ce_cbc_dec(ctx->rkey_dec, dst, src,
173 walk.iv, nblocks);
174 }
175 }
176
177 err = skcipher_walk_done(&walk, nbytes % SM4_BLOCK_SIZE);
178 }
179
180 return err;
181 }
182
sm4_cbc_encrypt(struct skcipher_request * req)183 static int sm4_cbc_encrypt(struct skcipher_request *req)
184 {
185 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
186 struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
187
188 return sm4_cbc_crypt(req, ctx, true);
189 }
190
sm4_cbc_decrypt(struct skcipher_request * req)191 static int sm4_cbc_decrypt(struct skcipher_request *req)
192 {
193 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
194 struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
195
196 return sm4_cbc_crypt(req, ctx, false);
197 }
198
sm4_cbc_cts_crypt(struct skcipher_request * req,bool encrypt)199 static int sm4_cbc_cts_crypt(struct skcipher_request *req, bool encrypt)
200 {
201 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
202 struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
203 struct scatterlist *src = req->src;
204 struct scatterlist *dst = req->dst;
205 struct scatterlist sg_src[2], sg_dst[2];
206 struct skcipher_request subreq;
207 struct skcipher_walk walk;
208 int cbc_blocks;
209 int err;
210
211 if (req->cryptlen < SM4_BLOCK_SIZE)
212 return -EINVAL;
213
214 if (req->cryptlen == SM4_BLOCK_SIZE)
215 return sm4_cbc_crypt(req, ctx, encrypt);
216
217 skcipher_request_set_tfm(&subreq, tfm);
218 skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
219 NULL, NULL);
220
221 /* handle the CBC cryption part */
222 cbc_blocks = DIV_ROUND_UP(req->cryptlen, SM4_BLOCK_SIZE) - 2;
223 if (cbc_blocks) {
224 skcipher_request_set_crypt(&subreq, src, dst,
225 cbc_blocks * SM4_BLOCK_SIZE,
226 req->iv);
227
228 err = sm4_cbc_crypt(&subreq, ctx, encrypt);
229 if (err)
230 return err;
231
232 dst = src = scatterwalk_ffwd(sg_src, src, subreq.cryptlen);
233 if (req->dst != req->src)
234 dst = scatterwalk_ffwd(sg_dst, req->dst,
235 subreq.cryptlen);
236 }
237
238 /* handle ciphertext stealing */
239 skcipher_request_set_crypt(&subreq, src, dst,
240 req->cryptlen - cbc_blocks * SM4_BLOCK_SIZE,
241 req->iv);
242
243 err = skcipher_walk_virt(&walk, &subreq, false);
244 if (err)
245 return err;
246
247 scoped_ksimd() {
248 if (encrypt)
249 sm4_ce_cbc_cts_enc(ctx->rkey_enc, walk.dst.virt.addr,
250 walk.src.virt.addr, walk.iv, walk.nbytes);
251 else
252 sm4_ce_cbc_cts_dec(ctx->rkey_dec, walk.dst.virt.addr,
253 walk.src.virt.addr, walk.iv, walk.nbytes);
254 }
255
256 return skcipher_walk_done(&walk, 0);
257 }
258
sm4_cbc_cts_encrypt(struct skcipher_request * req)259 static int sm4_cbc_cts_encrypt(struct skcipher_request *req)
260 {
261 return sm4_cbc_cts_crypt(req, true);
262 }
263
sm4_cbc_cts_decrypt(struct skcipher_request * req)264 static int sm4_cbc_cts_decrypt(struct skcipher_request *req)
265 {
266 return sm4_cbc_cts_crypt(req, false);
267 }
268
sm4_ctr_crypt(struct skcipher_request * req)269 static int sm4_ctr_crypt(struct skcipher_request *req)
270 {
271 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
272 struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
273 struct skcipher_walk walk;
274 unsigned int nbytes;
275 int err;
276
277 err = skcipher_walk_virt(&walk, req, false);
278
279 while ((nbytes = walk.nbytes) > 0) {
280 const u8 *src = walk.src.virt.addr;
281 u8 *dst = walk.dst.virt.addr;
282 unsigned int nblks;
283
284 scoped_ksimd() {
285 nblks = BYTES2BLKS(nbytes);
286 if (nblks) {
287 sm4_ce_ctr_enc(ctx->rkey_enc, dst, src, walk.iv, nblks);
288 dst += nblks * SM4_BLOCK_SIZE;
289 src += nblks * SM4_BLOCK_SIZE;
290 nbytes -= nblks * SM4_BLOCK_SIZE;
291 }
292
293 /* tail */
294 if (walk.nbytes == walk.total && nbytes > 0) {
295 u8 keystream[SM4_BLOCK_SIZE];
296
297 sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv);
298 crypto_inc(walk.iv, SM4_BLOCK_SIZE);
299 crypto_xor_cpy(dst, src, keystream, nbytes);
300 nbytes = 0;
301 }
302 }
303
304 err = skcipher_walk_done(&walk, nbytes);
305 }
306
307 return err;
308 }
309
sm4_xts_crypt(struct skcipher_request * req,bool encrypt)310 static int sm4_xts_crypt(struct skcipher_request *req, bool encrypt)
311 {
312 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
313 struct sm4_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
314 int tail = req->cryptlen % SM4_BLOCK_SIZE;
315 const u32 *rkey2_enc = ctx->key2.rkey_enc;
316 struct scatterlist sg_src[2], sg_dst[2];
317 struct skcipher_request subreq;
318 struct scatterlist *src, *dst;
319 struct skcipher_walk walk;
320 unsigned int nbytes;
321 int err;
322
323 if (req->cryptlen < SM4_BLOCK_SIZE)
324 return -EINVAL;
325
326 err = skcipher_walk_virt(&walk, req, false);
327 if (err)
328 return err;
329
330 if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
331 int nblocks = DIV_ROUND_UP(req->cryptlen, SM4_BLOCK_SIZE) - 2;
332
333 skcipher_walk_abort(&walk);
334
335 skcipher_request_set_tfm(&subreq, tfm);
336 skcipher_request_set_callback(&subreq,
337 skcipher_request_flags(req),
338 NULL, NULL);
339 skcipher_request_set_crypt(&subreq, req->src, req->dst,
340 nblocks * SM4_BLOCK_SIZE, req->iv);
341
342 err = skcipher_walk_virt(&walk, &subreq, false);
343 if (err)
344 return err;
345 } else {
346 tail = 0;
347 }
348
349 while ((nbytes = walk.nbytes) >= SM4_BLOCK_SIZE) {
350 if (nbytes < walk.total)
351 nbytes &= ~(SM4_BLOCK_SIZE - 1);
352
353 scoped_ksimd() {
354 if (encrypt)
355 sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr,
356 walk.src.virt.addr, walk.iv, nbytes,
357 rkey2_enc);
358 else
359 sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr,
360 walk.src.virt.addr, walk.iv, nbytes,
361 rkey2_enc);
362 }
363
364 rkey2_enc = NULL;
365
366 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
367 if (err)
368 return err;
369 }
370
371 if (likely(tail == 0))
372 return 0;
373
374 /* handle ciphertext stealing */
375
376 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
377 if (req->dst != req->src)
378 dst = scatterwalk_ffwd(sg_dst, req->dst, subreq.cryptlen);
379
380 skcipher_request_set_crypt(&subreq, src, dst, SM4_BLOCK_SIZE + tail,
381 req->iv);
382
383 err = skcipher_walk_virt(&walk, &subreq, false);
384 if (err)
385 return err;
386
387 scoped_ksimd() {
388 if (encrypt)
389 sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr,
390 walk.src.virt.addr, walk.iv, walk.nbytes,
391 rkey2_enc);
392 else
393 sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr,
394 walk.src.virt.addr, walk.iv, walk.nbytes,
395 rkey2_enc);
396 }
397
398 return skcipher_walk_done(&walk, 0);
399 }
400
sm4_xts_encrypt(struct skcipher_request * req)401 static int sm4_xts_encrypt(struct skcipher_request *req)
402 {
403 return sm4_xts_crypt(req, true);
404 }
405
sm4_xts_decrypt(struct skcipher_request * req)406 static int sm4_xts_decrypt(struct skcipher_request *req)
407 {
408 return sm4_xts_crypt(req, false);
409 }
410
411 static struct skcipher_alg sm4_algs[] = {
412 {
413 .base = {
414 .cra_name = "ecb(sm4)",
415 .cra_driver_name = "ecb-sm4-ce",
416 .cra_priority = 400,
417 .cra_blocksize = SM4_BLOCK_SIZE,
418 .cra_ctxsize = sizeof(struct sm4_ctx),
419 .cra_module = THIS_MODULE,
420 },
421 .min_keysize = SM4_KEY_SIZE,
422 .max_keysize = SM4_KEY_SIZE,
423 .setkey = sm4_setkey,
424 .encrypt = sm4_ecb_encrypt,
425 .decrypt = sm4_ecb_decrypt,
426 }, {
427 .base = {
428 .cra_name = "cbc(sm4)",
429 .cra_driver_name = "cbc-sm4-ce",
430 .cra_priority = 400,
431 .cra_blocksize = SM4_BLOCK_SIZE,
432 .cra_ctxsize = sizeof(struct sm4_ctx),
433 .cra_module = THIS_MODULE,
434 },
435 .min_keysize = SM4_KEY_SIZE,
436 .max_keysize = SM4_KEY_SIZE,
437 .ivsize = SM4_BLOCK_SIZE,
438 .setkey = sm4_setkey,
439 .encrypt = sm4_cbc_encrypt,
440 .decrypt = sm4_cbc_decrypt,
441 }, {
442 .base = {
443 .cra_name = "ctr(sm4)",
444 .cra_driver_name = "ctr-sm4-ce",
445 .cra_priority = 400,
446 .cra_blocksize = 1,
447 .cra_ctxsize = sizeof(struct sm4_ctx),
448 .cra_module = THIS_MODULE,
449 },
450 .min_keysize = SM4_KEY_SIZE,
451 .max_keysize = SM4_KEY_SIZE,
452 .ivsize = SM4_BLOCK_SIZE,
453 .chunksize = SM4_BLOCK_SIZE,
454 .setkey = sm4_setkey,
455 .encrypt = sm4_ctr_crypt,
456 .decrypt = sm4_ctr_crypt,
457 }, {
458 .base = {
459 .cra_name = "cts(cbc(sm4))",
460 .cra_driver_name = "cts-cbc-sm4-ce",
461 .cra_priority = 400,
462 .cra_blocksize = SM4_BLOCK_SIZE,
463 .cra_ctxsize = sizeof(struct sm4_ctx),
464 .cra_module = THIS_MODULE,
465 },
466 .min_keysize = SM4_KEY_SIZE,
467 .max_keysize = SM4_KEY_SIZE,
468 .ivsize = SM4_BLOCK_SIZE,
469 .walksize = SM4_BLOCK_SIZE * 2,
470 .setkey = sm4_setkey,
471 .encrypt = sm4_cbc_cts_encrypt,
472 .decrypt = sm4_cbc_cts_decrypt,
473 }, {
474 .base = {
475 .cra_name = "xts(sm4)",
476 .cra_driver_name = "xts-sm4-ce",
477 .cra_priority = 400,
478 .cra_blocksize = SM4_BLOCK_SIZE,
479 .cra_ctxsize = sizeof(struct sm4_xts_ctx),
480 .cra_module = THIS_MODULE,
481 },
482 .min_keysize = SM4_KEY_SIZE * 2,
483 .max_keysize = SM4_KEY_SIZE * 2,
484 .ivsize = SM4_BLOCK_SIZE,
485 .walksize = SM4_BLOCK_SIZE * 2,
486 .setkey = sm4_xts_setkey,
487 .encrypt = sm4_xts_encrypt,
488 .decrypt = sm4_xts_decrypt,
489 }
490 };
491
sm4_cbcmac_setkey(struct crypto_shash * tfm,const u8 * key,unsigned int key_len)492 static int sm4_cbcmac_setkey(struct crypto_shash *tfm, const u8 *key,
493 unsigned int key_len)
494 {
495 struct sm4_mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
496
497 if (key_len != SM4_KEY_SIZE)
498 return -EINVAL;
499
500 scoped_ksimd()
501 sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
502 crypto_sm4_fk, crypto_sm4_ck);
503 return 0;
504 }
505
sm4_cmac_setkey(struct crypto_shash * tfm,const u8 * key,unsigned int key_len)506 static int sm4_cmac_setkey(struct crypto_shash *tfm, const u8 *key,
507 unsigned int key_len)
508 {
509 struct sm4_mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
510 be128 *consts = (be128 *)ctx->consts;
511 u64 a, b;
512
513 if (key_len != SM4_KEY_SIZE)
514 return -EINVAL;
515
516 memset(consts, 0, SM4_BLOCK_SIZE);
517
518 scoped_ksimd() {
519 sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
520 crypto_sm4_fk, crypto_sm4_ck);
521
522 /* encrypt the zero block */
523 sm4_ce_crypt_block(ctx->key.rkey_enc, (u8 *)consts, (const u8 *)consts);
524 }
525
526 /* gf(2^128) multiply zero-ciphertext with u and u^2 */
527 a = be64_to_cpu(consts[0].a);
528 b = be64_to_cpu(consts[0].b);
529 consts[0].a = cpu_to_be64((a << 1) | (b >> 63));
530 consts[0].b = cpu_to_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0));
531
532 a = be64_to_cpu(consts[0].a);
533 b = be64_to_cpu(consts[0].b);
534 consts[1].a = cpu_to_be64((a << 1) | (b >> 63));
535 consts[1].b = cpu_to_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0));
536
537 return 0;
538 }
539
sm4_xcbc_setkey(struct crypto_shash * tfm,const u8 * key,unsigned int key_len)540 static int sm4_xcbc_setkey(struct crypto_shash *tfm, const u8 *key,
541 unsigned int key_len)
542 {
543 struct sm4_mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
544 u8 __aligned(8) key2[SM4_BLOCK_SIZE];
545 static u8 const ks[3][SM4_BLOCK_SIZE] = {
546 { [0 ... SM4_BLOCK_SIZE - 1] = 0x1},
547 { [0 ... SM4_BLOCK_SIZE - 1] = 0x2},
548 { [0 ... SM4_BLOCK_SIZE - 1] = 0x3},
549 };
550
551 if (key_len != SM4_KEY_SIZE)
552 return -EINVAL;
553
554 scoped_ksimd() {
555 sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
556 crypto_sm4_fk, crypto_sm4_ck);
557
558 sm4_ce_crypt_block(ctx->key.rkey_enc, key2, ks[0]);
559 sm4_ce_crypt(ctx->key.rkey_enc, ctx->consts, ks[1], 2);
560
561 sm4_ce_expand_key(key2, ctx->key.rkey_enc, ctx->key.rkey_dec,
562 crypto_sm4_fk, crypto_sm4_ck);
563 }
564
565 return 0;
566 }
567
sm4_mac_init(struct shash_desc * desc)568 static int sm4_mac_init(struct shash_desc *desc)
569 {
570 struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
571
572 memset(ctx->digest, 0, SM4_BLOCK_SIZE);
573 return 0;
574 }
575
sm4_mac_update(struct shash_desc * desc,const u8 * p,unsigned int len)576 static int sm4_mac_update(struct shash_desc *desc, const u8 *p,
577 unsigned int len)
578 {
579 struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
580 struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
581 unsigned int nblocks = len / SM4_BLOCK_SIZE;
582
583 len %= SM4_BLOCK_SIZE;
584 scoped_ksimd()
585 sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, p,
586 nblocks, false, true);
587 return len;
588 }
589
sm4_cmac_finup(struct shash_desc * desc,const u8 * src,unsigned int len,u8 * out)590 static int sm4_cmac_finup(struct shash_desc *desc, const u8 *src,
591 unsigned int len, u8 *out)
592 {
593 struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
594 struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
595 const u8 *consts = tctx->consts;
596
597 crypto_xor(ctx->digest, src, len);
598 if (len != SM4_BLOCK_SIZE) {
599 ctx->digest[len] ^= 0x80;
600 consts += SM4_BLOCK_SIZE;
601 }
602 scoped_ksimd()
603 sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, consts, 1,
604 false, true);
605 memcpy(out, ctx->digest, SM4_BLOCK_SIZE);
606 return 0;
607 }
608
sm4_cbcmac_finup(struct shash_desc * desc,const u8 * src,unsigned int len,u8 * out)609 static int sm4_cbcmac_finup(struct shash_desc *desc, const u8 *src,
610 unsigned int len, u8 *out)
611 {
612 struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
613 struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
614
615 if (len) {
616 crypto_xor(ctx->digest, src, len);
617 scoped_ksimd()
618 sm4_ce_crypt_block(tctx->key.rkey_enc, ctx->digest,
619 ctx->digest);
620 }
621 memcpy(out, ctx->digest, SM4_BLOCK_SIZE);
622 return 0;
623 }
624
625 static struct shash_alg sm4_mac_algs[] = {
626 {
627 .base = {
628 .cra_name = "cmac(sm4)",
629 .cra_driver_name = "cmac-sm4-ce",
630 .cra_priority = 400,
631 .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
632 CRYPTO_AHASH_ALG_FINAL_NONZERO,
633 .cra_blocksize = SM4_BLOCK_SIZE,
634 .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx)
635 + SM4_BLOCK_SIZE * 2,
636 .cra_module = THIS_MODULE,
637 },
638 .digestsize = SM4_BLOCK_SIZE,
639 .init = sm4_mac_init,
640 .update = sm4_mac_update,
641 .finup = sm4_cmac_finup,
642 .setkey = sm4_cmac_setkey,
643 .descsize = sizeof(struct sm4_mac_desc_ctx),
644 }, {
645 .base = {
646 .cra_name = "xcbc(sm4)",
647 .cra_driver_name = "xcbc-sm4-ce",
648 .cra_priority = 400,
649 .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
650 CRYPTO_AHASH_ALG_FINAL_NONZERO,
651 .cra_blocksize = SM4_BLOCK_SIZE,
652 .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx)
653 + SM4_BLOCK_SIZE * 2,
654 .cra_module = THIS_MODULE,
655 },
656 .digestsize = SM4_BLOCK_SIZE,
657 .init = sm4_mac_init,
658 .update = sm4_mac_update,
659 .finup = sm4_cmac_finup,
660 .setkey = sm4_xcbc_setkey,
661 .descsize = sizeof(struct sm4_mac_desc_ctx),
662 }, {
663 .base = {
664 .cra_name = "cbcmac(sm4)",
665 .cra_driver_name = "cbcmac-sm4-ce",
666 .cra_priority = 400,
667 .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
668 .cra_blocksize = SM4_BLOCK_SIZE,
669 .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx),
670 .cra_module = THIS_MODULE,
671 },
672 .digestsize = SM4_BLOCK_SIZE,
673 .init = sm4_mac_init,
674 .update = sm4_mac_update,
675 .finup = sm4_cbcmac_finup,
676 .setkey = sm4_cbcmac_setkey,
677 .descsize = sizeof(struct sm4_mac_desc_ctx),
678 }
679 };
680
sm4_init(void)681 static int __init sm4_init(void)
682 {
683 int err;
684
685 err = crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs));
686 if (err)
687 return err;
688
689 err = crypto_register_shashes(sm4_mac_algs, ARRAY_SIZE(sm4_mac_algs));
690 if (err)
691 goto out_err;
692
693 return 0;
694
695 out_err:
696 crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs));
697 return err;
698 }
699
sm4_exit(void)700 static void __exit sm4_exit(void)
701 {
702 crypto_unregister_shashes(sm4_mac_algs, ARRAY_SIZE(sm4_mac_algs));
703 crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs));
704 }
705
706 module_cpu_feature_match(SM4, sm4_init);
707 module_exit(sm4_exit);
708
709 MODULE_DESCRIPTION("SM4 ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
710 MODULE_ALIAS_CRYPTO("sm4-ce");
711 MODULE_ALIAS_CRYPTO("sm4");
712 MODULE_ALIAS_CRYPTO("ecb(sm4)");
713 MODULE_ALIAS_CRYPTO("cbc(sm4)");
714 MODULE_ALIAS_CRYPTO("ctr(sm4)");
715 MODULE_ALIAS_CRYPTO("cts(cbc(sm4))");
716 MODULE_ALIAS_CRYPTO("xts(sm4)");
717 MODULE_ALIAS_CRYPTO("cmac(sm4)");
718 MODULE_ALIAS_CRYPTO("xcbc(sm4)");
719 MODULE_ALIAS_CRYPTO("cbcmac(sm4)");
720 MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
721 MODULE_LICENSE("GPL v2");
722