1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright IBM Corp. 2025
4 *
5 * s390 specific HMAC support for protected keys.
6 */
7
8 #define pr_fmt(fmt) "phmac_s390: " fmt
9
10 #include <asm/cpacf.h>
11 #include <asm/pkey.h>
12 #include <crypto/engine.h>
13 #include <crypto/hash.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/sha2.h>
16 #include <linux/atomic.h>
17 #include <linux/cpufeature.h>
18 #include <linux/delay.h>
19 #include <linux/miscdevice.h>
20 #include <linux/module.h>
21 #include <linux/spinlock.h>
22
23 static struct crypto_engine *phmac_crypto_engine;
24 #define MAX_QLEN 10
25
26 /*
27 * A simple hash walk helper
28 */
29
30 struct hash_walk_helper {
31 struct crypto_hash_walk walk;
32 const u8 *walkaddr;
33 int walkbytes;
34 };
35
36 /*
37 * Prepare hash walk helper.
38 * Set up the base hash walk, fill walkaddr and walkbytes.
39 * Returns 0 on success or negative value on error.
40 */
hwh_prepare(struct ahash_request * req,struct hash_walk_helper * hwh)41 static inline int hwh_prepare(struct ahash_request *req,
42 struct hash_walk_helper *hwh)
43 {
44 hwh->walkbytes = crypto_hash_walk_first(req, &hwh->walk);
45 if (hwh->walkbytes < 0)
46 return hwh->walkbytes;
47 hwh->walkaddr = hwh->walk.data;
48 return 0;
49 }
50
51 /*
52 * Advance hash walk helper by n bytes.
53 * Progress the walkbytes and walkaddr fields by n bytes.
54 * If walkbytes is then 0, pull next hunk from hash walk
55 * and update walkbytes and walkaddr.
56 * If n is negative, unmap hash walk and return error.
57 * Returns 0 on success or negative value on error.
58 */
hwh_advance(struct hash_walk_helper * hwh,int n)59 static inline int hwh_advance(struct hash_walk_helper *hwh, int n)
60 {
61 if (n < 0)
62 return crypto_hash_walk_done(&hwh->walk, n);
63
64 hwh->walkbytes -= n;
65 hwh->walkaddr += n;
66 if (hwh->walkbytes > 0)
67 return 0;
68
69 hwh->walkbytes = crypto_hash_walk_done(&hwh->walk, 0);
70 if (hwh->walkbytes < 0)
71 return hwh->walkbytes;
72
73 hwh->walkaddr = hwh->walk.data;
74 return 0;
75 }
76
77 /*
78 * KMAC param block layout for sha2 function codes:
79 * The layout of the param block for the KMAC instruction depends on the
80 * blocksize of the used hashing sha2-algorithm function codes. The param block
81 * contains the hash chaining value (cv), the input message bit-length (imbl)
82 * and the hmac-secret (key). To prevent code duplication, the sizes of all
83 * these are calculated based on the blocksize.
84 *
85 * param-block:
86 * +-------+
87 * | cv |
88 * +-------+
89 * | imbl |
90 * +-------+
91 * | key |
92 * +-------+
93 *
94 * sizes:
95 * part | sh2-alg | calculation | size | type
96 * -----+---------+-------------+------+--------
97 * cv | 224/256 | blocksize/2 | 32 | u64[8]
98 * | 384/512 | | 64 | u128[8]
99 * imbl | 224/256 | blocksize/8 | 8 | u64
100 * | 384/512 | | 16 | u128
101 * key | 224/256 | blocksize | 96 | u8[96]
102 * | 384/512 | | 160 | u8[160]
103 */
104
105 #define MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
106 #define MAX_IMBL_SIZE sizeof(u128)
107 #define MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
108
109 #define SHA2_CV_SIZE(bs) ((bs) >> 1)
110 #define SHA2_IMBL_SIZE(bs) ((bs) >> 3)
111
112 #define SHA2_IMBL_OFFSET(bs) (SHA2_CV_SIZE(bs))
113 #define SHA2_KEY_OFFSET(bs) (SHA2_CV_SIZE(bs) + SHA2_IMBL_SIZE(bs))
114
115 #define PHMAC_MAX_KEYSIZE 256
116 #define PHMAC_SHA256_PK_SIZE (SHA256_BLOCK_SIZE + 32)
117 #define PHMAC_SHA512_PK_SIZE (SHA512_BLOCK_SIZE + 32)
118 #define PHMAC_MAX_PK_SIZE PHMAC_SHA512_PK_SIZE
119
120 /* phmac protected key struct */
121 struct phmac_protkey {
122 u32 type;
123 u32 len;
124 u8 protkey[PHMAC_MAX_PK_SIZE];
125 };
126
127 #define PK_STATE_NO_KEY 0
128 #define PK_STATE_CONVERT_IN_PROGRESS 1
129 #define PK_STATE_VALID 2
130
131 /* phmac tfm context */
132 struct phmac_tfm_ctx {
133 /* source key material used to derive a protected key from */
134 u8 keybuf[PHMAC_MAX_KEYSIZE];
135 unsigned int keylen;
136
137 /* cpacf function code to use with this protected key type */
138 long fc;
139
140 /* nr of requests enqueued via crypto engine which use this tfm ctx */
141 atomic_t via_engine_ctr;
142
143 /* spinlock to atomic read/update all the following fields */
144 spinlock_t pk_lock;
145
146 /* see PK_STATE* defines above, < 0 holds convert failure rc */
147 int pk_state;
148 /* if state is valid, pk holds the protected key */
149 struct phmac_protkey pk;
150 };
151
152 union kmac_gr0 {
153 unsigned long reg;
154 struct {
155 unsigned long : 48;
156 unsigned long ikp : 1;
157 unsigned long iimp : 1;
158 unsigned long ccup : 1;
159 unsigned long : 6;
160 unsigned long fc : 7;
161 };
162 };
163
164 struct kmac_sha2_ctx {
165 u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + PHMAC_MAX_PK_SIZE];
166 union kmac_gr0 gr0;
167 u8 buf[MAX_BLOCK_SIZE];
168 u64 buflen[2];
169 };
170
171 enum async_op {
172 OP_NOP = 0,
173 OP_UPDATE,
174 OP_FINAL,
175 OP_FINUP,
176 };
177
178 /* phmac request context */
179 struct phmac_req_ctx {
180 struct hash_walk_helper hwh;
181 struct kmac_sha2_ctx kmac_ctx;
182 enum async_op async_op;
183 };
184
185 /*
186 * Pkey 'token' struct used to derive a protected key value from a clear key.
187 */
188 struct hmac_clrkey_token {
189 u8 type;
190 u8 res0[3];
191 u8 version;
192 u8 res1[3];
193 u32 keytype;
194 u32 len;
195 u8 key[];
196 } __packed;
197
hash_key(const u8 * in,unsigned int inlen,u8 * digest,unsigned int digestsize)198 static int hash_key(const u8 *in, unsigned int inlen,
199 u8 *digest, unsigned int digestsize)
200 {
201 unsigned long func;
202 union {
203 struct sha256_paramblock {
204 u32 h[8];
205 u64 mbl;
206 } sha256;
207 struct sha512_paramblock {
208 u64 h[8];
209 u128 mbl;
210 } sha512;
211 } __packed param;
212
213 #define PARAM_INIT(x, y, z) \
214 param.sha##x.h[0] = SHA##y ## _H0; \
215 param.sha##x.h[1] = SHA##y ## _H1; \
216 param.sha##x.h[2] = SHA##y ## _H2; \
217 param.sha##x.h[3] = SHA##y ## _H3; \
218 param.sha##x.h[4] = SHA##y ## _H4; \
219 param.sha##x.h[5] = SHA##y ## _H5; \
220 param.sha##x.h[6] = SHA##y ## _H6; \
221 param.sha##x.h[7] = SHA##y ## _H7; \
222 param.sha##x.mbl = (z)
223
224 switch (digestsize) {
225 case SHA224_DIGEST_SIZE:
226 func = CPACF_KLMD_SHA_256;
227 PARAM_INIT(256, 224, inlen * 8);
228 break;
229 case SHA256_DIGEST_SIZE:
230 func = CPACF_KLMD_SHA_256;
231 PARAM_INIT(256, 256, inlen * 8);
232 break;
233 case SHA384_DIGEST_SIZE:
234 func = CPACF_KLMD_SHA_512;
235 PARAM_INIT(512, 384, inlen * 8);
236 break;
237 case SHA512_DIGEST_SIZE:
238 func = CPACF_KLMD_SHA_512;
239 PARAM_INIT(512, 512, inlen * 8);
240 break;
241 default:
242 return -EINVAL;
243 }
244
245 #undef PARAM_INIT
246
247 cpacf_klmd(func, ¶m, in, inlen);
248
249 memcpy(digest, ¶m, digestsize);
250
251 return 0;
252 }
253
254 /*
255 * make_clrkey_token() - wrap the clear key into a pkey clearkey token.
256 */
make_clrkey_token(const u8 * clrkey,size_t clrkeylen,unsigned int digestsize,u8 * dest)257 static inline int make_clrkey_token(const u8 *clrkey, size_t clrkeylen,
258 unsigned int digestsize, u8 *dest)
259 {
260 struct hmac_clrkey_token *token = (struct hmac_clrkey_token *)dest;
261 unsigned int blocksize;
262 int rc;
263
264 token->type = 0x00;
265 token->version = 0x02;
266 switch (digestsize) {
267 case SHA224_DIGEST_SIZE:
268 case SHA256_DIGEST_SIZE:
269 token->keytype = PKEY_KEYTYPE_HMAC_512;
270 blocksize = 64;
271 break;
272 case SHA384_DIGEST_SIZE:
273 case SHA512_DIGEST_SIZE:
274 token->keytype = PKEY_KEYTYPE_HMAC_1024;
275 blocksize = 128;
276 break;
277 default:
278 return -EINVAL;
279 }
280 token->len = blocksize;
281
282 if (clrkeylen > blocksize) {
283 rc = hash_key(clrkey, clrkeylen, token->key, digestsize);
284 if (rc)
285 return rc;
286 } else {
287 memcpy(token->key, clrkey, clrkeylen);
288 }
289
290 return 0;
291 }
292
293 /*
294 * phmac_tfm_ctx_setkey() - Set key value into tfm context, maybe construct
295 * a clear key token digestible by pkey from a clear key value.
296 */
phmac_tfm_ctx_setkey(struct phmac_tfm_ctx * tfm_ctx,const u8 * key,unsigned int keylen)297 static inline int phmac_tfm_ctx_setkey(struct phmac_tfm_ctx *tfm_ctx,
298 const u8 *key, unsigned int keylen)
299 {
300 if (keylen > sizeof(tfm_ctx->keybuf))
301 return -EINVAL;
302
303 memcpy(tfm_ctx->keybuf, key, keylen);
304 tfm_ctx->keylen = keylen;
305
306 return 0;
307 }
308
309 /*
310 * Convert the raw key material into a protected key via PKEY api.
311 * This function may sleep - don't call in non-sleeping context.
312 */
convert_key(const u8 * key,unsigned int keylen,struct phmac_protkey * pk)313 static inline int convert_key(const u8 *key, unsigned int keylen,
314 struct phmac_protkey *pk)
315 {
316 int rc, i;
317
318 pk->len = sizeof(pk->protkey);
319
320 /*
321 * In case of a busy card retry with increasing delay
322 * of 200, 400, 800 and 1600 ms - in total 3 s.
323 */
324 for (rc = -EIO, i = 0; rc && i < 5; i++) {
325 if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) {
326 rc = -EINTR;
327 goto out;
328 }
329 rc = pkey_key2protkey(key, keylen,
330 pk->protkey, &pk->len, &pk->type,
331 PKEY_XFLAG_NOMEMALLOC);
332 }
333
334 out:
335 pr_debug("rc=%d\n", rc);
336 return rc;
337 }
338
339 /*
340 * (Re-)Convert the raw key material from the tfm ctx into a protected
341 * key via convert_key() function. Update the pk_state, pk_type, pk_len
342 * and the protected key in the tfm context.
343 * Please note this function may be invoked concurrently with the very
344 * same tfm context. The pk_lock spinlock in the context ensures an
345 * atomic update of the pk and the pk state but does not guarantee any
346 * order of update. So a fresh converted valid protected key may get
347 * updated with an 'old' expired key value. As the cpacf instructions
348 * detect this, refuse to operate with an invalid key and the calling
349 * code triggers a (re-)conversion this does no harm. This may lead to
350 * unnecessary additional conversion but never to invalid data on the
351 * hash operation.
352 */
phmac_convert_key(struct phmac_tfm_ctx * tfm_ctx)353 static int phmac_convert_key(struct phmac_tfm_ctx *tfm_ctx)
354 {
355 struct phmac_protkey pk;
356 int rc;
357
358 spin_lock_bh(&tfm_ctx->pk_lock);
359 tfm_ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
360 spin_unlock_bh(&tfm_ctx->pk_lock);
361
362 rc = convert_key(tfm_ctx->keybuf, tfm_ctx->keylen, &pk);
363
364 /* update context */
365 spin_lock_bh(&tfm_ctx->pk_lock);
366 if (rc) {
367 tfm_ctx->pk_state = rc;
368 } else {
369 tfm_ctx->pk_state = PK_STATE_VALID;
370 tfm_ctx->pk = pk;
371 }
372 spin_unlock_bh(&tfm_ctx->pk_lock);
373
374 memzero_explicit(&pk, sizeof(pk));
375 pr_debug("rc=%d\n", rc);
376 return rc;
377 }
378
379 /*
380 * kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize
381 */
kmac_sha2_set_imbl(u8 * param,u64 buflen_lo,u64 buflen_hi,unsigned int blocksize)382 static inline void kmac_sha2_set_imbl(u8 *param, u64 buflen_lo,
383 u64 buflen_hi, unsigned int blocksize)
384 {
385 u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize);
386
387 switch (blocksize) {
388 case SHA256_BLOCK_SIZE:
389 *(u64 *)imbl = buflen_lo * BITS_PER_BYTE;
390 break;
391 case SHA512_BLOCK_SIZE:
392 *(u128 *)imbl = (((u128)buflen_hi << 64) + buflen_lo) << 3;
393 break;
394 default:
395 break;
396 }
397 }
398
phmac_kmac_update(struct ahash_request * req,bool maysleep)399 static int phmac_kmac_update(struct ahash_request *req, bool maysleep)
400 {
401 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
402 struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
403 struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
404 struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
405 struct hash_walk_helper *hwh = &req_ctx->hwh;
406 unsigned int bs = crypto_ahash_blocksize(tfm);
407 unsigned int offset, k, n;
408 int rc = 0;
409
410 /*
411 * The walk is always mapped when this function is called.
412 * Note that in case of partial processing or failure the walk
413 * is NOT unmapped here. So a follow up task may reuse the walk
414 * or in case of unrecoverable failure needs to unmap it.
415 */
416
417 while (hwh->walkbytes > 0) {
418 /* check sha2 context buffer */
419 offset = ctx->buflen[0] % bs;
420 if (offset + hwh->walkbytes < bs)
421 goto store;
422
423 if (offset) {
424 /* fill ctx buffer up to blocksize and process this block */
425 n = bs - offset;
426 memcpy(ctx->buf + offset, hwh->walkaddr, n);
427 ctx->gr0.iimp = 1;
428 for (;;) {
429 k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs);
430 if (likely(k == bs))
431 break;
432 if (unlikely(k > 0)) {
433 /*
434 * Can't deal with hunks smaller than blocksize.
435 * And kmac should always return the nr of
436 * processed bytes as 0 or a multiple of the
437 * blocksize.
438 */
439 rc = -EIO;
440 goto out;
441 }
442 /* protected key is invalid and needs re-conversion */
443 if (!maysleep) {
444 rc = -EKEYEXPIRED;
445 goto out;
446 }
447 rc = phmac_convert_key(tfm_ctx);
448 if (rc)
449 goto out;
450 spin_lock_bh(&tfm_ctx->pk_lock);
451 memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
452 tfm_ctx->pk.protkey, tfm_ctx->pk.len);
453 spin_unlock_bh(&tfm_ctx->pk_lock);
454 }
455 ctx->buflen[0] += n;
456 if (ctx->buflen[0] < n)
457 ctx->buflen[1]++;
458 rc = hwh_advance(hwh, n);
459 if (unlikely(rc))
460 goto out;
461 offset = 0;
462 }
463
464 /* process as many blocks as possible from the walk */
465 while (hwh->walkbytes >= bs) {
466 n = (hwh->walkbytes / bs) * bs;
467 ctx->gr0.iimp = 1;
468 k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, hwh->walkaddr, n);
469 if (likely(k > 0)) {
470 ctx->buflen[0] += k;
471 if (ctx->buflen[0] < k)
472 ctx->buflen[1]++;
473 rc = hwh_advance(hwh, k);
474 if (unlikely(rc))
475 goto out;
476 }
477 if (unlikely(k < n)) {
478 /* protected key is invalid and needs re-conversion */
479 if (!maysleep) {
480 rc = -EKEYEXPIRED;
481 goto out;
482 }
483 rc = phmac_convert_key(tfm_ctx);
484 if (rc)
485 goto out;
486 spin_lock_bh(&tfm_ctx->pk_lock);
487 memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
488 tfm_ctx->pk.protkey, tfm_ctx->pk.len);
489 spin_unlock_bh(&tfm_ctx->pk_lock);
490 }
491 }
492
493 store:
494 /* store incomplete block in context buffer */
495 if (hwh->walkbytes) {
496 memcpy(ctx->buf + offset, hwh->walkaddr, hwh->walkbytes);
497 ctx->buflen[0] += hwh->walkbytes;
498 if (ctx->buflen[0] < hwh->walkbytes)
499 ctx->buflen[1]++;
500 rc = hwh_advance(hwh, hwh->walkbytes);
501 if (unlikely(rc))
502 goto out;
503 }
504
505 } /* end of while (hwh->walkbytes > 0) */
506
507 out:
508 pr_debug("rc=%d\n", rc);
509 return rc;
510 }
511
phmac_kmac_final(struct ahash_request * req,bool maysleep)512 static int phmac_kmac_final(struct ahash_request *req, bool maysleep)
513 {
514 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
515 struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
516 struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
517 struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
518 unsigned int ds = crypto_ahash_digestsize(tfm);
519 unsigned int bs = crypto_ahash_blocksize(tfm);
520 unsigned int k, n;
521 int rc = 0;
522
523 n = ctx->buflen[0] % bs;
524 ctx->gr0.iimp = 0;
525 kmac_sha2_set_imbl(ctx->param, ctx->buflen[0], ctx->buflen[1], bs);
526 for (;;) {
527 k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, n);
528 if (likely(k == n))
529 break;
530 if (unlikely(k > 0)) {
531 /* Can't deal with hunks smaller than blocksize. */
532 rc = -EIO;
533 goto out;
534 }
535 /* protected key is invalid and needs re-conversion */
536 if (!maysleep) {
537 rc = -EKEYEXPIRED;
538 goto out;
539 }
540 rc = phmac_convert_key(tfm_ctx);
541 if (rc)
542 goto out;
543 spin_lock_bh(&tfm_ctx->pk_lock);
544 memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
545 tfm_ctx->pk.protkey, tfm_ctx->pk.len);
546 spin_unlock_bh(&tfm_ctx->pk_lock);
547 }
548
549 memcpy(req->result, ctx->param, ds);
550
551 out:
552 pr_debug("rc=%d\n", rc);
553 return rc;
554 }
555
phmac_init(struct ahash_request * req)556 static int phmac_init(struct ahash_request *req)
557 {
558 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
559 struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
560 struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
561 struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
562 unsigned int bs = crypto_ahash_blocksize(tfm);
563 int rc = 0;
564
565 /* zero request context (includes the kmac sha2 context) */
566 memset(req_ctx, 0, sizeof(*req_ctx));
567
568 /*
569 * setkey() should have set a valid fc into the tfm context.
570 * Copy this function code into the gr0 field of the kmac context.
571 */
572 if (!tfm_ctx->fc) {
573 rc = -ENOKEY;
574 goto out;
575 }
576 kmac_ctx->gr0.fc = tfm_ctx->fc;
577
578 /*
579 * Copy the pk from tfm ctx into kmac ctx. The protected key
580 * may be outdated but update() and final() will handle this.
581 */
582 spin_lock_bh(&tfm_ctx->pk_lock);
583 memcpy(kmac_ctx->param + SHA2_KEY_OFFSET(bs),
584 tfm_ctx->pk.protkey, tfm_ctx->pk.len);
585 spin_unlock_bh(&tfm_ctx->pk_lock);
586
587 out:
588 pr_debug("rc=%d\n", rc);
589 return rc;
590 }
591
phmac_update(struct ahash_request * req)592 static int phmac_update(struct ahash_request *req)
593 {
594 struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
595 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
596 struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
597 struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
598 struct hash_walk_helper *hwh = &req_ctx->hwh;
599 int rc;
600
601 /* prep the walk in the request context */
602 rc = hwh_prepare(req, hwh);
603 if (rc)
604 goto out;
605
606 /* Try synchronous operation if no active engine usage */
607 if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
608 rc = phmac_kmac_update(req, false);
609 if (rc == 0)
610 goto out;
611 }
612
613 /*
614 * If sync operation failed or key expired or there are already
615 * requests enqueued via engine, fallback to async. Mark tfm as
616 * using engine to serialize requests.
617 */
618 if (rc == 0 || rc == -EKEYEXPIRED) {
619 req_ctx->async_op = OP_UPDATE;
620 atomic_inc(&tfm_ctx->via_engine_ctr);
621 rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
622 if (rc != -EINPROGRESS)
623 atomic_dec(&tfm_ctx->via_engine_ctr);
624 }
625
626 if (rc != -EINPROGRESS) {
627 hwh_advance(hwh, rc);
628 memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
629 }
630
631 out:
632 pr_debug("rc=%d\n", rc);
633 return rc;
634 }
635
phmac_final(struct ahash_request * req)636 static int phmac_final(struct ahash_request *req)
637 {
638 struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
639 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
640 struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
641 struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
642 int rc = 0;
643
644 /* Try synchronous operation if no active engine usage */
645 if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
646 rc = phmac_kmac_final(req, false);
647 if (rc == 0)
648 goto out;
649 }
650
651 /*
652 * If sync operation failed or key expired or there are already
653 * requests enqueued via engine, fallback to async. Mark tfm as
654 * using engine to serialize requests.
655 */
656 if (rc == 0 || rc == -EKEYEXPIRED) {
657 req_ctx->async_op = OP_FINAL;
658 atomic_inc(&tfm_ctx->via_engine_ctr);
659 rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
660 if (rc != -EINPROGRESS)
661 atomic_dec(&tfm_ctx->via_engine_ctr);
662 }
663
664 out:
665 if (rc != -EINPROGRESS)
666 memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
667 pr_debug("rc=%d\n", rc);
668 return rc;
669 }
670
phmac_finup(struct ahash_request * req)671 static int phmac_finup(struct ahash_request *req)
672 {
673 struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
674 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
675 struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
676 struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
677 struct hash_walk_helper *hwh = &req_ctx->hwh;
678 int rc;
679
680 /* prep the walk in the request context */
681 rc = hwh_prepare(req, hwh);
682 if (rc)
683 goto out;
684
685 req_ctx->async_op = OP_FINUP;
686
687 /* Try synchronous operations if no active engine usage */
688 if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
689 rc = phmac_kmac_update(req, false);
690 if (rc == 0)
691 req_ctx->async_op = OP_FINAL;
692 }
693 if (!rc && req_ctx->async_op == OP_FINAL &&
694 !atomic_read(&tfm_ctx->via_engine_ctr)) {
695 rc = phmac_kmac_final(req, false);
696 if (rc == 0)
697 goto out;
698 }
699
700 /*
701 * If sync operation failed or key expired or there are already
702 * requests enqueued via engine, fallback to async. Mark tfm as
703 * using engine to serialize requests.
704 */
705 if (rc == 0 || rc == -EKEYEXPIRED) {
706 /* req->async_op has been set to either OP_FINUP or OP_FINAL */
707 atomic_inc(&tfm_ctx->via_engine_ctr);
708 rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
709 if (rc != -EINPROGRESS)
710 atomic_dec(&tfm_ctx->via_engine_ctr);
711 }
712
713 if (rc != -EINPROGRESS)
714 hwh_advance(hwh, rc);
715
716 out:
717 if (rc != -EINPROGRESS)
718 memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
719 pr_debug("rc=%d\n", rc);
720 return rc;
721 }
722
phmac_digest(struct ahash_request * req)723 static int phmac_digest(struct ahash_request *req)
724 {
725 int rc;
726
727 rc = phmac_init(req);
728 if (rc)
729 goto out;
730
731 rc = phmac_finup(req);
732
733 out:
734 pr_debug("rc=%d\n", rc);
735 return rc;
736 }
737
phmac_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)738 static int phmac_setkey(struct crypto_ahash *tfm,
739 const u8 *key, unsigned int keylen)
740 {
741 struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
742 unsigned int ds = crypto_ahash_digestsize(tfm);
743 unsigned int bs = crypto_ahash_blocksize(tfm);
744 unsigned int tmpkeylen;
745 u8 *tmpkey = NULL;
746 int rc = 0;
747
748 if (!crypto_ahash_tested(tfm)) {
749 /*
750 * selftest running: key is a raw hmac clear key and needs
751 * to get embedded into a 'clear key token' in order to have
752 * it correctly processed by the pkey module.
753 */
754 tmpkeylen = sizeof(struct hmac_clrkey_token) + bs;
755 tmpkey = kzalloc(tmpkeylen, GFP_KERNEL);
756 if (!tmpkey) {
757 rc = -ENOMEM;
758 goto out;
759 }
760 rc = make_clrkey_token(key, keylen, ds, tmpkey);
761 if (rc)
762 goto out;
763 keylen = tmpkeylen;
764 key = tmpkey;
765 }
766
767 /* copy raw key into tfm context */
768 rc = phmac_tfm_ctx_setkey(tfm_ctx, key, keylen);
769 if (rc)
770 goto out;
771
772 /* convert raw key into protected key */
773 rc = phmac_convert_key(tfm_ctx);
774 if (rc)
775 goto out;
776
777 /* set function code in tfm context, check for valid pk type */
778 switch (ds) {
779 case SHA224_DIGEST_SIZE:
780 if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512)
781 rc = -EINVAL;
782 else
783 tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_224;
784 break;
785 case SHA256_DIGEST_SIZE:
786 if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512)
787 rc = -EINVAL;
788 else
789 tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_256;
790 break;
791 case SHA384_DIGEST_SIZE:
792 if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024)
793 rc = -EINVAL;
794 else
795 tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_384;
796 break;
797 case SHA512_DIGEST_SIZE:
798 if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024)
799 rc = -EINVAL;
800 else
801 tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_512;
802 break;
803 default:
804 tfm_ctx->fc = 0;
805 rc = -EINVAL;
806 }
807
808 out:
809 kfree(tmpkey);
810 pr_debug("rc=%d\n", rc);
811 return rc;
812 }
813
phmac_export(struct ahash_request * req,void * out)814 static int phmac_export(struct ahash_request *req, void *out)
815 {
816 struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
817 struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
818
819 memcpy(out, ctx, sizeof(*ctx));
820
821 return 0;
822 }
823
phmac_import(struct ahash_request * req,const void * in)824 static int phmac_import(struct ahash_request *req, const void *in)
825 {
826 struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
827 struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
828
829 memset(req_ctx, 0, sizeof(*req_ctx));
830 memcpy(ctx, in, sizeof(*ctx));
831
832 return 0;
833 }
834
phmac_init_tfm(struct crypto_ahash * tfm)835 static int phmac_init_tfm(struct crypto_ahash *tfm)
836 {
837 struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
838
839 memset(tfm_ctx, 0, sizeof(*tfm_ctx));
840 spin_lock_init(&tfm_ctx->pk_lock);
841
842 crypto_ahash_set_reqsize(tfm, sizeof(struct phmac_req_ctx));
843
844 return 0;
845 }
846
phmac_exit_tfm(struct crypto_ahash * tfm)847 static void phmac_exit_tfm(struct crypto_ahash *tfm)
848 {
849 struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
850
851 memzero_explicit(tfm_ctx->keybuf, sizeof(tfm_ctx->keybuf));
852 memzero_explicit(&tfm_ctx->pk, sizeof(tfm_ctx->pk));
853 }
854
phmac_do_one_request(struct crypto_engine * engine,void * areq)855 static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
856 {
857 struct ahash_request *req = ahash_request_cast(areq);
858 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
859 struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
860 struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
861 struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
862 struct hash_walk_helper *hwh = &req_ctx->hwh;
863 int rc = -EINVAL;
864
865 /*
866 * Three kinds of requests come in here:
867 * 1. req->async_op == OP_UPDATE with req->nbytes > 0
868 * 2. req->async_op == OP_FINUP with req->nbytes > 0
869 * 3. req->async_op == OP_FINAL
870 * For update and finup the hwh walk has already been prepared
871 * by the caller. For final there is no hwh walk needed.
872 */
873
874 switch (req_ctx->async_op) {
875 case OP_UPDATE:
876 case OP_FINUP:
877 rc = phmac_kmac_update(req, true);
878 if (rc == -EKEYEXPIRED) {
879 /*
880 * Protected key expired, conversion is in process.
881 * Trigger a re-schedule of this request by returning
882 * -ENOSPC ("hardware queue full") to the crypto engine.
883 * To avoid immediately re-invocation of this callback,
884 * tell scheduler to voluntarily give up the CPU here.
885 */
886 pr_debug("rescheduling request\n");
887 cond_resched();
888 return -ENOSPC;
889 } else if (rc) {
890 hwh_advance(hwh, rc);
891 goto out;
892 }
893 if (req_ctx->async_op == OP_UPDATE)
894 break;
895 req_ctx->async_op = OP_FINAL;
896 fallthrough;
897 case OP_FINAL:
898 rc = phmac_kmac_final(req, true);
899 if (rc == -EKEYEXPIRED) {
900 /*
901 * Protected key expired, conversion is in process.
902 * Trigger a re-schedule of this request by returning
903 * -ENOSPC ("hardware queue full") to the crypto engine.
904 * To avoid immediately re-invocation of this callback,
905 * tell scheduler to voluntarily give up the CPU here.
906 */
907 pr_debug("rescheduling request\n");
908 cond_resched();
909 return -ENOSPC;
910 }
911 break;
912 default:
913 /* unknown/unsupported/unimplemented asynch op */
914 return -EOPNOTSUPP;
915 }
916
917 out:
918 if (rc || req_ctx->async_op == OP_FINAL)
919 memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
920 pr_debug("request complete with rc=%d\n", rc);
921 local_bh_disable();
922 atomic_dec(&tfm_ctx->via_engine_ctr);
923 crypto_finalize_hash_request(engine, req, rc);
924 local_bh_enable();
925 return rc;
926 }
927
928 #define S390_ASYNC_PHMAC_ALG(x) \
929 { \
930 .base = { \
931 .init = phmac_init, \
932 .update = phmac_update, \
933 .final = phmac_final, \
934 .finup = phmac_finup, \
935 .digest = phmac_digest, \
936 .setkey = phmac_setkey, \
937 .import = phmac_import, \
938 .export = phmac_export, \
939 .init_tfm = phmac_init_tfm, \
940 .exit_tfm = phmac_exit_tfm, \
941 .halg = { \
942 .digestsize = SHA##x##_DIGEST_SIZE, \
943 .statesize = sizeof(struct kmac_sha2_ctx), \
944 .base = { \
945 .cra_name = "phmac(sha" #x ")", \
946 .cra_driver_name = "phmac_s390_sha" #x, \
947 .cra_blocksize = SHA##x##_BLOCK_SIZE, \
948 .cra_priority = 400, \
949 .cra_flags = CRYPTO_ALG_ASYNC | \
950 CRYPTO_ALG_NO_FALLBACK, \
951 .cra_ctxsize = sizeof(struct phmac_tfm_ctx), \
952 .cra_module = THIS_MODULE, \
953 }, \
954 }, \
955 }, \
956 .op = { \
957 .do_one_request = phmac_do_one_request, \
958 }, \
959 }
960
961 static struct phmac_alg {
962 unsigned int fc;
963 struct ahash_engine_alg alg;
964 bool registered;
965 } phmac_algs[] = {
966 {
967 .fc = CPACF_KMAC_PHMAC_SHA_224,
968 .alg = S390_ASYNC_PHMAC_ALG(224),
969 }, {
970 .fc = CPACF_KMAC_PHMAC_SHA_256,
971 .alg = S390_ASYNC_PHMAC_ALG(256),
972 }, {
973 .fc = CPACF_KMAC_PHMAC_SHA_384,
974 .alg = S390_ASYNC_PHMAC_ALG(384),
975 }, {
976 .fc = CPACF_KMAC_PHMAC_SHA_512,
977 .alg = S390_ASYNC_PHMAC_ALG(512),
978 }
979 };
980
981 static struct miscdevice phmac_dev = {
982 .name = "phmac",
983 .minor = MISC_DYNAMIC_MINOR,
984 };
985
s390_phmac_exit(void)986 static void s390_phmac_exit(void)
987 {
988 struct phmac_alg *phmac;
989 int i;
990
991 if (phmac_crypto_engine) {
992 crypto_engine_stop(phmac_crypto_engine);
993 crypto_engine_exit(phmac_crypto_engine);
994 }
995
996 for (i = ARRAY_SIZE(phmac_algs) - 1; i >= 0; i--) {
997 phmac = &phmac_algs[i];
998 if (phmac->registered)
999 crypto_engine_unregister_ahash(&phmac->alg);
1000 }
1001
1002 misc_deregister(&phmac_dev);
1003 }
1004
s390_phmac_init(void)1005 static int __init s390_phmac_init(void)
1006 {
1007 struct phmac_alg *phmac;
1008 int i, rc;
1009
1010 /* for selftest cpacf klmd subfunction is needed */
1011 if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256))
1012 return -ENODEV;
1013 if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512))
1014 return -ENODEV;
1015
1016 /* register a simple phmac pseudo misc device */
1017 rc = misc_register(&phmac_dev);
1018 if (rc)
1019 return rc;
1020
1021 /* with this pseudo device alloc and start a crypto engine */
1022 phmac_crypto_engine =
1023 crypto_engine_alloc_init_and_set(phmac_dev.this_device,
1024 true, false, MAX_QLEN);
1025 if (!phmac_crypto_engine) {
1026 rc = -ENOMEM;
1027 goto out_err;
1028 }
1029 rc = crypto_engine_start(phmac_crypto_engine);
1030 if (rc) {
1031 crypto_engine_exit(phmac_crypto_engine);
1032 phmac_crypto_engine = NULL;
1033 goto out_err;
1034 }
1035
1036 for (i = 0; i < ARRAY_SIZE(phmac_algs); i++) {
1037 phmac = &phmac_algs[i];
1038 if (!cpacf_query_func(CPACF_KMAC, phmac->fc))
1039 continue;
1040 rc = crypto_engine_register_ahash(&phmac->alg);
1041 if (rc)
1042 goto out_err;
1043 phmac->registered = true;
1044 pr_debug("%s registered\n", phmac->alg.base.halg.base.cra_name);
1045 }
1046
1047 return 0;
1048
1049 out_err:
1050 s390_phmac_exit();
1051 return rc;
1052 }
1053
1054 module_init(s390_phmac_init);
1055 module_exit(s390_phmac_exit);
1056
1057 MODULE_ALIAS_CRYPTO("phmac(sha224)");
1058 MODULE_ALIAS_CRYPTO("phmac(sha256)");
1059 MODULE_ALIAS_CRYPTO("phmac(sha384)");
1060 MODULE_ALIAS_CRYPTO("phmac(sha512)");
1061
1062 MODULE_DESCRIPTION("S390 HMAC driver for protected keys");
1063 MODULE_LICENSE("GPL");
1064