xref: /linux/arch/s390/crypto/phmac_s390.c (revision 3ad81aa52085a7e67edfa4bc8f518e5962196bb3)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright IBM Corp. 2025
4  *
5  * s390 specific HMAC support for protected keys.
6  */
7 
8 #define KMSG_COMPONENT	"phmac_s390"
9 #define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt
10 
11 #include <asm/cpacf.h>
12 #include <asm/pkey.h>
13 #include <crypto/engine.h>
14 #include <crypto/hash.h>
15 #include <crypto/internal/hash.h>
16 #include <crypto/sha2.h>
17 #include <linux/atomic.h>
18 #include <linux/cpufeature.h>
19 #include <linux/delay.h>
20 #include <linux/miscdevice.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 
24 static struct crypto_engine *phmac_crypto_engine;
25 #define MAX_QLEN 10
26 
27 /*
28  * A simple hash walk helper
29  */
30 
31 struct hash_walk_helper {
32 	struct crypto_hash_walk walk;
33 	const u8 *walkaddr;
34 	int walkbytes;
35 };
36 
37 /*
38  * Prepare hash walk helper.
39  * Set up the base hash walk, fill walkaddr and walkbytes.
40  * Returns 0 on success or negative value on error.
41  */
hwh_prepare(struct ahash_request * req,struct hash_walk_helper * hwh)42 static inline int hwh_prepare(struct ahash_request *req,
43 			      struct hash_walk_helper *hwh)
44 {
45 	hwh->walkbytes = crypto_hash_walk_first(req, &hwh->walk);
46 	if (hwh->walkbytes < 0)
47 		return hwh->walkbytes;
48 	hwh->walkaddr = hwh->walk.data;
49 	return 0;
50 }
51 
52 /*
53  * Advance hash walk helper by n bytes.
54  * Progress the walkbytes and walkaddr fields by n bytes.
55  * If walkbytes is then 0, pull next hunk from hash walk
56  * and update walkbytes and walkaddr.
57  * If n is negative, unmap hash walk and return error.
58  * Returns 0 on success or negative value on error.
59  */
hwh_advance(struct hash_walk_helper * hwh,int n)60 static inline int hwh_advance(struct hash_walk_helper *hwh, int n)
61 {
62 	if (n < 0)
63 		return crypto_hash_walk_done(&hwh->walk, n);
64 
65 	hwh->walkbytes -= n;
66 	hwh->walkaddr += n;
67 	if (hwh->walkbytes > 0)
68 		return 0;
69 
70 	hwh->walkbytes = crypto_hash_walk_done(&hwh->walk, 0);
71 	if (hwh->walkbytes < 0)
72 		return hwh->walkbytes;
73 
74 	hwh->walkaddr = hwh->walk.data;
75 	return 0;
76 }
77 
78 /*
79  * KMAC param block layout for sha2 function codes:
80  * The layout of the param block for the KMAC instruction depends on the
81  * blocksize of the used hashing sha2-algorithm function codes. The param block
82  * contains the hash chaining value (cv), the input message bit-length (imbl)
83  * and the hmac-secret (key). To prevent code duplication, the sizes of all
84  * these are calculated based on the blocksize.
85  *
86  * param-block:
87  * +-------+
88  * | cv    |
89  * +-------+
90  * | imbl  |
91  * +-------+
92  * | key   |
93  * +-------+
94  *
95  * sizes:
96  * part | sh2-alg | calculation | size | type
97  * -----+---------+-------------+------+--------
98  * cv   | 224/256 | blocksize/2 |   32 |  u64[8]
99  *      | 384/512 |             |   64 | u128[8]
100  * imbl | 224/256 | blocksize/8 |    8 |     u64
101  *      | 384/512 |             |   16 |    u128
102  * key  | 224/256 | blocksize   |   96 |  u8[96]
103  *      | 384/512 |             |  160 | u8[160]
104  */
105 
106 #define MAX_DIGEST_SIZE		SHA512_DIGEST_SIZE
107 #define MAX_IMBL_SIZE		sizeof(u128)
108 #define MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
109 
110 #define SHA2_CV_SIZE(bs)	((bs) >> 1)
111 #define SHA2_IMBL_SIZE(bs)	((bs) >> 3)
112 
113 #define SHA2_IMBL_OFFSET(bs)	(SHA2_CV_SIZE(bs))
114 #define SHA2_KEY_OFFSET(bs)	(SHA2_CV_SIZE(bs) + SHA2_IMBL_SIZE(bs))
115 
116 #define PHMAC_MAX_KEYSIZE       256
117 #define PHMAC_SHA256_PK_SIZE	(SHA256_BLOCK_SIZE + 32)
118 #define PHMAC_SHA512_PK_SIZE	(SHA512_BLOCK_SIZE + 32)
119 #define PHMAC_MAX_PK_SIZE	PHMAC_SHA512_PK_SIZE
120 
121 /* phmac protected key struct */
122 struct phmac_protkey {
123 	u32 type;
124 	u32 len;
125 	u8 protkey[PHMAC_MAX_PK_SIZE];
126 };
127 
128 #define PK_STATE_NO_KEY		     0
129 #define PK_STATE_CONVERT_IN_PROGRESS 1
130 #define PK_STATE_VALID		     2
131 
132 /* phmac tfm context */
133 struct phmac_tfm_ctx {
134 	/* source key material used to derive a protected key from */
135 	u8 keybuf[PHMAC_MAX_KEYSIZE];
136 	unsigned int keylen;
137 
138 	/* cpacf function code to use with this protected key type */
139 	long fc;
140 
141 	/* nr of requests enqueued via crypto engine which use this tfm ctx */
142 	atomic_t via_engine_ctr;
143 
144 	/* spinlock to atomic read/update all the following fields */
145 	spinlock_t pk_lock;
146 
147 	/* see PK_STATE* defines above, < 0 holds convert failure rc  */
148 	int pk_state;
149 	/* if state is valid, pk holds the protected key */
150 	struct phmac_protkey pk;
151 };
152 
153 union kmac_gr0 {
154 	unsigned long reg;
155 	struct {
156 		unsigned long		: 48;
157 		unsigned long ikp	:  1;
158 		unsigned long iimp	:  1;
159 		unsigned long ccup	:  1;
160 		unsigned long		:  6;
161 		unsigned long fc	:  7;
162 	};
163 };
164 
165 struct kmac_sha2_ctx {
166 	u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + PHMAC_MAX_PK_SIZE];
167 	union kmac_gr0 gr0;
168 	u8 buf[MAX_BLOCK_SIZE];
169 	u64 buflen[2];
170 };
171 
172 enum async_op {
173 	OP_NOP = 0,
174 	OP_UPDATE,
175 	OP_FINAL,
176 	OP_FINUP,
177 };
178 
179 /* phmac request context */
180 struct phmac_req_ctx {
181 	struct hash_walk_helper hwh;
182 	struct kmac_sha2_ctx kmac_ctx;
183 	enum async_op async_op;
184 };
185 
186 /*
187  * Pkey 'token' struct used to derive a protected key value from a clear key.
188  */
189 struct hmac_clrkey_token {
190 	u8  type;
191 	u8  res0[3];
192 	u8  version;
193 	u8  res1[3];
194 	u32 keytype;
195 	u32 len;
196 	u8 key[];
197 } __packed;
198 
hash_key(const u8 * in,unsigned int inlen,u8 * digest,unsigned int digestsize)199 static int hash_key(const u8 *in, unsigned int inlen,
200 		    u8 *digest, unsigned int digestsize)
201 {
202 	unsigned long func;
203 	union {
204 		struct sha256_paramblock {
205 			u32 h[8];
206 			u64 mbl;
207 		} sha256;
208 		struct sha512_paramblock {
209 			u64 h[8];
210 			u128 mbl;
211 		} sha512;
212 	} __packed param;
213 
214 #define PARAM_INIT(x, y, z)		   \
215 	param.sha##x.h[0] = SHA##y ## _H0; \
216 	param.sha##x.h[1] = SHA##y ## _H1; \
217 	param.sha##x.h[2] = SHA##y ## _H2; \
218 	param.sha##x.h[3] = SHA##y ## _H3; \
219 	param.sha##x.h[4] = SHA##y ## _H4; \
220 	param.sha##x.h[5] = SHA##y ## _H5; \
221 	param.sha##x.h[6] = SHA##y ## _H6; \
222 	param.sha##x.h[7] = SHA##y ## _H7; \
223 	param.sha##x.mbl = (z)
224 
225 	switch (digestsize) {
226 	case SHA224_DIGEST_SIZE:
227 		func = CPACF_KLMD_SHA_256;
228 		PARAM_INIT(256, 224, inlen * 8);
229 		break;
230 	case SHA256_DIGEST_SIZE:
231 		func = CPACF_KLMD_SHA_256;
232 		PARAM_INIT(256, 256, inlen * 8);
233 		break;
234 	case SHA384_DIGEST_SIZE:
235 		func = CPACF_KLMD_SHA_512;
236 		PARAM_INIT(512, 384, inlen * 8);
237 		break;
238 	case SHA512_DIGEST_SIZE:
239 		func = CPACF_KLMD_SHA_512;
240 		PARAM_INIT(512, 512, inlen * 8);
241 		break;
242 	default:
243 		return -EINVAL;
244 	}
245 
246 #undef PARAM_INIT
247 
248 	cpacf_klmd(func, &param, in, inlen);
249 
250 	memcpy(digest, &param, digestsize);
251 
252 	return 0;
253 }
254 
255 /*
256  * make_clrkey_token() - wrap the clear key into a pkey clearkey token.
257  */
make_clrkey_token(const u8 * clrkey,size_t clrkeylen,unsigned int digestsize,u8 * dest)258 static inline int make_clrkey_token(const u8 *clrkey, size_t clrkeylen,
259 				    unsigned int digestsize, u8 *dest)
260 {
261 	struct hmac_clrkey_token *token = (struct hmac_clrkey_token *)dest;
262 	unsigned int blocksize;
263 	int rc;
264 
265 	token->type = 0x00;
266 	token->version = 0x02;
267 	switch (digestsize) {
268 	case SHA224_DIGEST_SIZE:
269 	case SHA256_DIGEST_SIZE:
270 		token->keytype = PKEY_KEYTYPE_HMAC_512;
271 		blocksize = 64;
272 		break;
273 	case SHA384_DIGEST_SIZE:
274 	case SHA512_DIGEST_SIZE:
275 		token->keytype = PKEY_KEYTYPE_HMAC_1024;
276 		blocksize = 128;
277 		break;
278 	default:
279 		return -EINVAL;
280 	}
281 	token->len = blocksize;
282 
283 	if (clrkeylen > blocksize) {
284 		rc = hash_key(clrkey, clrkeylen, token->key, digestsize);
285 		if (rc)
286 			return rc;
287 	} else {
288 		memcpy(token->key, clrkey, clrkeylen);
289 	}
290 
291 	return 0;
292 }
293 
294 /*
295  * phmac_tfm_ctx_setkey() - Set key value into tfm context, maybe construct
296  * a clear key token digestible by pkey from a clear key value.
297  */
phmac_tfm_ctx_setkey(struct phmac_tfm_ctx * tfm_ctx,const u8 * key,unsigned int keylen)298 static inline int phmac_tfm_ctx_setkey(struct phmac_tfm_ctx *tfm_ctx,
299 				       const u8 *key, unsigned int keylen)
300 {
301 	if (keylen > sizeof(tfm_ctx->keybuf))
302 		return -EINVAL;
303 
304 	memcpy(tfm_ctx->keybuf, key, keylen);
305 	tfm_ctx->keylen = keylen;
306 
307 	return 0;
308 }
309 
310 /*
311  * Convert the raw key material into a protected key via PKEY api.
312  * This function may sleep - don't call in non-sleeping context.
313  */
convert_key(const u8 * key,unsigned int keylen,struct phmac_protkey * pk)314 static inline int convert_key(const u8 *key, unsigned int keylen,
315 			      struct phmac_protkey *pk)
316 {
317 	int rc, i;
318 
319 	pk->len = sizeof(pk->protkey);
320 
321 	/*
322 	 * In case of a busy card retry with increasing delay
323 	 * of 200, 400, 800 and 1600 ms - in total 3 s.
324 	 */
325 	for (rc = -EIO, i = 0; rc && i < 5; i++) {
326 		if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) {
327 			rc = -EINTR;
328 			goto out;
329 		}
330 		rc = pkey_key2protkey(key, keylen,
331 				      pk->protkey, &pk->len, &pk->type,
332 				      PKEY_XFLAG_NOMEMALLOC);
333 	}
334 
335 out:
336 	pr_debug("rc=%d\n", rc);
337 	return rc;
338 }
339 
340 /*
341  * (Re-)Convert the raw key material from the tfm ctx into a protected
342  * key via convert_key() function. Update the pk_state, pk_type, pk_len
343  * and the protected key in the tfm context.
344  * Please note this function may be invoked concurrently with the very
345  * same tfm context. The pk_lock spinlock in the context ensures an
346  * atomic update of the pk and the pk state but does not guarantee any
347  * order of update. So a fresh converted valid protected key may get
348  * updated with an 'old' expired key value. As the cpacf instructions
349  * detect this, refuse to operate with an invalid key and the calling
350  * code triggers a (re-)conversion this does no harm. This may lead to
351  * unnecessary additional conversion but never to invalid data on the
352  * hash operation.
353  */
phmac_convert_key(struct phmac_tfm_ctx * tfm_ctx)354 static int phmac_convert_key(struct phmac_tfm_ctx *tfm_ctx)
355 {
356 	struct phmac_protkey pk;
357 	int rc;
358 
359 	spin_lock_bh(&tfm_ctx->pk_lock);
360 	tfm_ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
361 	spin_unlock_bh(&tfm_ctx->pk_lock);
362 
363 	rc = convert_key(tfm_ctx->keybuf, tfm_ctx->keylen, &pk);
364 
365 	/* update context */
366 	spin_lock_bh(&tfm_ctx->pk_lock);
367 	if (rc) {
368 		tfm_ctx->pk_state = rc;
369 	} else {
370 		tfm_ctx->pk_state = PK_STATE_VALID;
371 		tfm_ctx->pk = pk;
372 	}
373 	spin_unlock_bh(&tfm_ctx->pk_lock);
374 
375 	memzero_explicit(&pk, sizeof(pk));
376 	pr_debug("rc=%d\n", rc);
377 	return rc;
378 }
379 
380 /*
381  * kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize
382  */
kmac_sha2_set_imbl(u8 * param,u64 buflen_lo,u64 buflen_hi,unsigned int blocksize)383 static inline void kmac_sha2_set_imbl(u8 *param, u64 buflen_lo,
384 				      u64 buflen_hi, unsigned int blocksize)
385 {
386 	u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize);
387 
388 	switch (blocksize) {
389 	case SHA256_BLOCK_SIZE:
390 		*(u64 *)imbl = buflen_lo * BITS_PER_BYTE;
391 		break;
392 	case SHA512_BLOCK_SIZE:
393 		*(u128 *)imbl = (((u128)buflen_hi << 64) + buflen_lo) << 3;
394 		break;
395 	default:
396 		break;
397 	}
398 }
399 
phmac_kmac_update(struct ahash_request * req,bool maysleep)400 static int phmac_kmac_update(struct ahash_request *req, bool maysleep)
401 {
402 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
403 	struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
404 	struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
405 	struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
406 	struct hash_walk_helper *hwh = &req_ctx->hwh;
407 	unsigned int bs = crypto_ahash_blocksize(tfm);
408 	unsigned int offset, k, n;
409 	int rc = 0;
410 
411 	/*
412 	 * The walk is always mapped when this function is called.
413 	 * Note that in case of partial processing or failure the walk
414 	 * is NOT unmapped here. So a follow up task may reuse the walk
415 	 * or in case of unrecoverable failure needs to unmap it.
416 	 */
417 
418 	while (hwh->walkbytes > 0) {
419 		/* check sha2 context buffer */
420 		offset = ctx->buflen[0] % bs;
421 		if (offset + hwh->walkbytes < bs)
422 			goto store;
423 
424 		if (offset) {
425 			/* fill ctx buffer up to blocksize and process this block */
426 			n = bs - offset;
427 			memcpy(ctx->buf + offset, hwh->walkaddr, n);
428 			ctx->gr0.iimp = 1;
429 			for (;;) {
430 				k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs);
431 				if (likely(k == bs))
432 					break;
433 				if (unlikely(k > 0)) {
434 					/*
435 					 * Can't deal with hunks smaller than blocksize.
436 					 * And kmac should always return the nr of
437 					 * processed bytes as 0 or a multiple of the
438 					 * blocksize.
439 					 */
440 					rc = -EIO;
441 					goto out;
442 				}
443 				/* protected key is invalid and needs re-conversion */
444 				if (!maysleep) {
445 					rc = -EKEYEXPIRED;
446 					goto out;
447 				}
448 				rc = phmac_convert_key(tfm_ctx);
449 				if (rc)
450 					goto out;
451 				spin_lock_bh(&tfm_ctx->pk_lock);
452 				memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
453 				       tfm_ctx->pk.protkey, tfm_ctx->pk.len);
454 				spin_unlock_bh(&tfm_ctx->pk_lock);
455 			}
456 			ctx->buflen[0] += n;
457 			if (ctx->buflen[0] < n)
458 				ctx->buflen[1]++;
459 			rc = hwh_advance(hwh, n);
460 			if (unlikely(rc))
461 				goto out;
462 			offset = 0;
463 		}
464 
465 		/* process as many blocks as possible from the walk */
466 		while (hwh->walkbytes >= bs) {
467 			n = (hwh->walkbytes / bs) * bs;
468 			ctx->gr0.iimp = 1;
469 			k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, hwh->walkaddr, n);
470 			if (likely(k > 0)) {
471 				ctx->buflen[0] += k;
472 				if (ctx->buflen[0] < k)
473 					ctx->buflen[1]++;
474 				rc = hwh_advance(hwh, k);
475 				if (unlikely(rc))
476 					goto out;
477 			}
478 			if (unlikely(k < n)) {
479 				/* protected key is invalid and needs re-conversion */
480 				if (!maysleep) {
481 					rc = -EKEYEXPIRED;
482 					goto out;
483 				}
484 				rc = phmac_convert_key(tfm_ctx);
485 				if (rc)
486 					goto out;
487 				spin_lock_bh(&tfm_ctx->pk_lock);
488 				memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
489 				       tfm_ctx->pk.protkey, tfm_ctx->pk.len);
490 				spin_unlock_bh(&tfm_ctx->pk_lock);
491 			}
492 		}
493 
494 store:
495 		/* store incomplete block in context buffer */
496 		if (hwh->walkbytes) {
497 			memcpy(ctx->buf + offset, hwh->walkaddr, hwh->walkbytes);
498 			ctx->buflen[0] += hwh->walkbytes;
499 			if (ctx->buflen[0] < hwh->walkbytes)
500 				ctx->buflen[1]++;
501 			rc = hwh_advance(hwh, hwh->walkbytes);
502 			if (unlikely(rc))
503 				goto out;
504 		}
505 
506 	} /* end of while (hwh->walkbytes > 0) */
507 
508 out:
509 	pr_debug("rc=%d\n", rc);
510 	return rc;
511 }
512 
phmac_kmac_final(struct ahash_request * req,bool maysleep)513 static int phmac_kmac_final(struct ahash_request *req, bool maysleep)
514 {
515 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
516 	struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
517 	struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
518 	struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
519 	unsigned int ds = crypto_ahash_digestsize(tfm);
520 	unsigned int bs = crypto_ahash_blocksize(tfm);
521 	unsigned int k, n;
522 	int rc = 0;
523 
524 	n = ctx->buflen[0] % bs;
525 	ctx->gr0.iimp = 0;
526 	kmac_sha2_set_imbl(ctx->param, ctx->buflen[0], ctx->buflen[1], bs);
527 	for (;;) {
528 		k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, n);
529 		if (likely(k == n))
530 			break;
531 		if (unlikely(k > 0)) {
532 			/* Can't deal with hunks smaller than blocksize. */
533 			rc = -EIO;
534 			goto out;
535 		}
536 		/* protected key is invalid and needs re-conversion */
537 		if (!maysleep) {
538 			rc = -EKEYEXPIRED;
539 			goto out;
540 		}
541 		rc = phmac_convert_key(tfm_ctx);
542 		if (rc)
543 			goto out;
544 		spin_lock_bh(&tfm_ctx->pk_lock);
545 		memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
546 		       tfm_ctx->pk.protkey, tfm_ctx->pk.len);
547 		spin_unlock_bh(&tfm_ctx->pk_lock);
548 	}
549 
550 	memcpy(req->result, ctx->param, ds);
551 
552 out:
553 	pr_debug("rc=%d\n", rc);
554 	return rc;
555 }
556 
phmac_init(struct ahash_request * req)557 static int phmac_init(struct ahash_request *req)
558 {
559 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
560 	struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
561 	struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
562 	struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
563 	unsigned int bs = crypto_ahash_blocksize(tfm);
564 	int rc = 0;
565 
566 	/* zero request context (includes the kmac sha2 context) */
567 	memset(req_ctx, 0, sizeof(*req_ctx));
568 
569 	/*
570 	 * setkey() should have set a valid fc into the tfm context.
571 	 * Copy this function code into the gr0 field of the kmac context.
572 	 */
573 	if (!tfm_ctx->fc) {
574 		rc = -ENOKEY;
575 		goto out;
576 	}
577 	kmac_ctx->gr0.fc = tfm_ctx->fc;
578 
579 	/*
580 	 * Copy the pk from tfm ctx into kmac ctx. The protected key
581 	 * may be outdated but update() and final() will handle this.
582 	 */
583 	spin_lock_bh(&tfm_ctx->pk_lock);
584 	memcpy(kmac_ctx->param + SHA2_KEY_OFFSET(bs),
585 	       tfm_ctx->pk.protkey, tfm_ctx->pk.len);
586 	spin_unlock_bh(&tfm_ctx->pk_lock);
587 
588 out:
589 	pr_debug("rc=%d\n", rc);
590 	return rc;
591 }
592 
phmac_update(struct ahash_request * req)593 static int phmac_update(struct ahash_request *req)
594 {
595 	struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
596 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
597 	struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
598 	struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
599 	struct hash_walk_helper *hwh = &req_ctx->hwh;
600 	int rc;
601 
602 	/* prep the walk in the request context */
603 	rc = hwh_prepare(req, hwh);
604 	if (rc)
605 		goto out;
606 
607 	/* Try synchronous operation if no active engine usage */
608 	if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
609 		rc = phmac_kmac_update(req, false);
610 		if (rc == 0)
611 			goto out;
612 	}
613 
614 	/*
615 	 * If sync operation failed or key expired or there are already
616 	 * requests enqueued via engine, fallback to async. Mark tfm as
617 	 * using engine to serialize requests.
618 	 */
619 	if (rc == 0 || rc == -EKEYEXPIRED) {
620 		req_ctx->async_op = OP_UPDATE;
621 		atomic_inc(&tfm_ctx->via_engine_ctr);
622 		rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
623 		if (rc != -EINPROGRESS)
624 			atomic_dec(&tfm_ctx->via_engine_ctr);
625 	}
626 
627 	if (rc != -EINPROGRESS) {
628 		hwh_advance(hwh, rc);
629 		memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
630 	}
631 
632 out:
633 	pr_debug("rc=%d\n", rc);
634 	return rc;
635 }
636 
phmac_final(struct ahash_request * req)637 static int phmac_final(struct ahash_request *req)
638 {
639 	struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
640 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
641 	struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
642 	struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
643 	int rc = 0;
644 
645 	/* Try synchronous operation if no active engine usage */
646 	if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
647 		rc = phmac_kmac_final(req, false);
648 		if (rc == 0)
649 			goto out;
650 	}
651 
652 	/*
653 	 * If sync operation failed or key expired or there are already
654 	 * requests enqueued via engine, fallback to async. Mark tfm as
655 	 * using engine to serialize requests.
656 	 */
657 	if (rc == 0 || rc == -EKEYEXPIRED) {
658 		req_ctx->async_op = OP_FINAL;
659 		atomic_inc(&tfm_ctx->via_engine_ctr);
660 		rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
661 		if (rc != -EINPROGRESS)
662 			atomic_dec(&tfm_ctx->via_engine_ctr);
663 	}
664 
665 out:
666 	if (rc != -EINPROGRESS)
667 		memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
668 	pr_debug("rc=%d\n", rc);
669 	return rc;
670 }
671 
phmac_finup(struct ahash_request * req)672 static int phmac_finup(struct ahash_request *req)
673 {
674 	struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
675 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
676 	struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
677 	struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
678 	struct hash_walk_helper *hwh = &req_ctx->hwh;
679 	int rc;
680 
681 	/* prep the walk in the request context */
682 	rc = hwh_prepare(req, hwh);
683 	if (rc)
684 		goto out;
685 
686 	req_ctx->async_op = OP_FINUP;
687 
688 	/* Try synchronous operations if no active engine usage */
689 	if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
690 		rc = phmac_kmac_update(req, false);
691 		if (rc == 0)
692 			req_ctx->async_op = OP_FINAL;
693 	}
694 	if (!rc && req_ctx->async_op == OP_FINAL &&
695 	    !atomic_read(&tfm_ctx->via_engine_ctr)) {
696 		rc = phmac_kmac_final(req, false);
697 		if (rc == 0)
698 			goto out;
699 	}
700 
701 	/*
702 	 * If sync operation failed or key expired or there are already
703 	 * requests enqueued via engine, fallback to async. Mark tfm as
704 	 * using engine to serialize requests.
705 	 */
706 	if (rc == 0 || rc == -EKEYEXPIRED) {
707 		/* req->async_op has been set to either OP_FINUP or OP_FINAL */
708 		atomic_inc(&tfm_ctx->via_engine_ctr);
709 		rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
710 		if (rc != -EINPROGRESS)
711 			atomic_dec(&tfm_ctx->via_engine_ctr);
712 	}
713 
714 	if (rc != -EINPROGRESS)
715 		hwh_advance(hwh, rc);
716 
717 out:
718 	if (rc != -EINPROGRESS)
719 		memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
720 	pr_debug("rc=%d\n", rc);
721 	return rc;
722 }
723 
phmac_digest(struct ahash_request * req)724 static int phmac_digest(struct ahash_request *req)
725 {
726 	int rc;
727 
728 	rc = phmac_init(req);
729 	if (rc)
730 		goto out;
731 
732 	rc = phmac_finup(req);
733 
734 out:
735 	pr_debug("rc=%d\n", rc);
736 	return rc;
737 }
738 
phmac_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)739 static int phmac_setkey(struct crypto_ahash *tfm,
740 			const u8 *key, unsigned int keylen)
741 {
742 	struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
743 	unsigned int ds = crypto_ahash_digestsize(tfm);
744 	unsigned int bs = crypto_ahash_blocksize(tfm);
745 	unsigned int tmpkeylen;
746 	u8 *tmpkey = NULL;
747 	int rc = 0;
748 
749 	if (!crypto_ahash_tested(tfm)) {
750 		/*
751 		 * selftest running: key is a raw hmac clear key and needs
752 		 * to get embedded into a 'clear key token' in order to have
753 		 * it correctly processed by the pkey module.
754 		 */
755 		tmpkeylen = sizeof(struct hmac_clrkey_token) + bs;
756 		tmpkey = kzalloc(tmpkeylen, GFP_KERNEL);
757 		if (!tmpkey) {
758 			rc = -ENOMEM;
759 			goto out;
760 		}
761 		rc = make_clrkey_token(key, keylen, ds, tmpkey);
762 		if (rc)
763 			goto out;
764 		keylen = tmpkeylen;
765 		key = tmpkey;
766 	}
767 
768 	/* copy raw key into tfm context */
769 	rc = phmac_tfm_ctx_setkey(tfm_ctx, key, keylen);
770 	if (rc)
771 		goto out;
772 
773 	/* convert raw key into protected key */
774 	rc = phmac_convert_key(tfm_ctx);
775 	if (rc)
776 		goto out;
777 
778 	/* set function code in tfm context, check for valid pk type */
779 	switch (ds) {
780 	case SHA224_DIGEST_SIZE:
781 		if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512)
782 			rc = -EINVAL;
783 		else
784 			tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_224;
785 		break;
786 	case SHA256_DIGEST_SIZE:
787 		if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512)
788 			rc = -EINVAL;
789 		else
790 			tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_256;
791 		break;
792 	case SHA384_DIGEST_SIZE:
793 		if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024)
794 			rc = -EINVAL;
795 		else
796 			tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_384;
797 		break;
798 	case SHA512_DIGEST_SIZE:
799 		if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024)
800 			rc = -EINVAL;
801 		else
802 			tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_512;
803 		break;
804 	default:
805 		tfm_ctx->fc = 0;
806 		rc = -EINVAL;
807 	}
808 
809 out:
810 	kfree(tmpkey);
811 	pr_debug("rc=%d\n", rc);
812 	return rc;
813 }
814 
phmac_export(struct ahash_request * req,void * out)815 static int phmac_export(struct ahash_request *req, void *out)
816 {
817 	struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
818 	struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
819 
820 	memcpy(out, ctx, sizeof(*ctx));
821 
822 	return 0;
823 }
824 
phmac_import(struct ahash_request * req,const void * in)825 static int phmac_import(struct ahash_request *req, const void *in)
826 {
827 	struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
828 	struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
829 
830 	memset(req_ctx, 0, sizeof(*req_ctx));
831 	memcpy(ctx, in, sizeof(*ctx));
832 
833 	return 0;
834 }
835 
phmac_init_tfm(struct crypto_ahash * tfm)836 static int phmac_init_tfm(struct crypto_ahash *tfm)
837 {
838 	struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
839 
840 	memset(tfm_ctx, 0, sizeof(*tfm_ctx));
841 	spin_lock_init(&tfm_ctx->pk_lock);
842 
843 	crypto_ahash_set_reqsize(tfm, sizeof(struct phmac_req_ctx));
844 
845 	return 0;
846 }
847 
phmac_exit_tfm(struct crypto_ahash * tfm)848 static void phmac_exit_tfm(struct crypto_ahash *tfm)
849 {
850 	struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
851 
852 	memzero_explicit(tfm_ctx->keybuf, sizeof(tfm_ctx->keybuf));
853 	memzero_explicit(&tfm_ctx->pk, sizeof(tfm_ctx->pk));
854 }
855 
phmac_do_one_request(struct crypto_engine * engine,void * areq)856 static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
857 {
858 	struct ahash_request *req = ahash_request_cast(areq);
859 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
860 	struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
861 	struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
862 	struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
863 	struct hash_walk_helper *hwh = &req_ctx->hwh;
864 	int rc = -EINVAL;
865 
866 	/*
867 	 * Three kinds of requests come in here:
868 	 * 1. req->async_op == OP_UPDATE with req->nbytes > 0
869 	 * 2. req->async_op == OP_FINUP with req->nbytes > 0
870 	 * 3. req->async_op == OP_FINAL
871 	 * For update and finup the hwh walk has already been prepared
872 	 * by the caller. For final there is no hwh walk needed.
873 	 */
874 
875 	switch (req_ctx->async_op) {
876 	case OP_UPDATE:
877 	case OP_FINUP:
878 		rc = phmac_kmac_update(req, true);
879 		if (rc == -EKEYEXPIRED) {
880 			/*
881 			 * Protected key expired, conversion is in process.
882 			 * Trigger a re-schedule of this request by returning
883 			 * -ENOSPC ("hardware queue full") to the crypto engine.
884 			 * To avoid immediately re-invocation of this callback,
885 			 * tell scheduler to voluntarily give up the CPU here.
886 			 */
887 			pr_debug("rescheduling request\n");
888 			cond_resched();
889 			return -ENOSPC;
890 		} else if (rc) {
891 			hwh_advance(hwh, rc);
892 			goto out;
893 		}
894 		if (req_ctx->async_op == OP_UPDATE)
895 			break;
896 		req_ctx->async_op = OP_FINAL;
897 		fallthrough;
898 	case OP_FINAL:
899 		rc = phmac_kmac_final(req, true);
900 		if (rc == -EKEYEXPIRED) {
901 			/*
902 			 * Protected key expired, conversion is in process.
903 			 * Trigger a re-schedule of this request by returning
904 			 * -ENOSPC ("hardware queue full") to the crypto engine.
905 			 * To avoid immediately re-invocation of this callback,
906 			 * tell scheduler to voluntarily give up the CPU here.
907 			 */
908 			pr_debug("rescheduling request\n");
909 			cond_resched();
910 			return -ENOSPC;
911 		}
912 		break;
913 	default:
914 		/* unknown/unsupported/unimplemented asynch op */
915 		return -EOPNOTSUPP;
916 	}
917 
918 out:
919 	if (rc || req_ctx->async_op == OP_FINAL)
920 		memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
921 	pr_debug("request complete with rc=%d\n", rc);
922 	local_bh_disable();
923 	atomic_dec(&tfm_ctx->via_engine_ctr);
924 	crypto_finalize_hash_request(engine, req, rc);
925 	local_bh_enable();
926 	return rc;
927 }
928 
929 #define S390_ASYNC_PHMAC_ALG(x)						\
930 {									\
931 	.base = {							\
932 		.init	  = phmac_init,					\
933 		.update	  = phmac_update,				\
934 		.final	  = phmac_final,				\
935 		.finup	  = phmac_finup,				\
936 		.digest	  = phmac_digest,				\
937 		.setkey	  = phmac_setkey,				\
938 		.import	  = phmac_import,				\
939 		.export	  = phmac_export,				\
940 		.init_tfm = phmac_init_tfm,				\
941 		.exit_tfm = phmac_exit_tfm,				\
942 		.halg = {						\
943 			.digestsize = SHA##x##_DIGEST_SIZE,		\
944 			.statesize  = sizeof(struct kmac_sha2_ctx),	\
945 			.base = {					\
946 				.cra_name = "phmac(sha" #x ")",		\
947 				.cra_driver_name = "phmac_s390_sha" #x,	\
948 				.cra_blocksize = SHA##x##_BLOCK_SIZE,	\
949 				.cra_priority = 400,			\
950 				.cra_flags = CRYPTO_ALG_ASYNC |		\
951 					     CRYPTO_ALG_NO_FALLBACK,	\
952 				.cra_ctxsize = sizeof(struct phmac_tfm_ctx), \
953 				.cra_module = THIS_MODULE,		\
954 			},						\
955 		},							\
956 	},								\
957 	.op = {								\
958 		.do_one_request = phmac_do_one_request,			\
959 	},								\
960 }
961 
962 static struct phmac_alg {
963 	unsigned int fc;
964 	struct ahash_engine_alg alg;
965 	bool registered;
966 } phmac_algs[] = {
967 	{
968 		.fc = CPACF_KMAC_PHMAC_SHA_224,
969 		.alg = S390_ASYNC_PHMAC_ALG(224),
970 	}, {
971 		.fc = CPACF_KMAC_PHMAC_SHA_256,
972 		.alg = S390_ASYNC_PHMAC_ALG(256),
973 	}, {
974 		.fc = CPACF_KMAC_PHMAC_SHA_384,
975 		.alg = S390_ASYNC_PHMAC_ALG(384),
976 	}, {
977 		.fc = CPACF_KMAC_PHMAC_SHA_512,
978 		.alg = S390_ASYNC_PHMAC_ALG(512),
979 	}
980 };
981 
982 static struct miscdevice phmac_dev = {
983 	.name	= "phmac",
984 	.minor	= MISC_DYNAMIC_MINOR,
985 };
986 
s390_phmac_exit(void)987 static void s390_phmac_exit(void)
988 {
989 	struct phmac_alg *phmac;
990 	int i;
991 
992 	if (phmac_crypto_engine) {
993 		crypto_engine_stop(phmac_crypto_engine);
994 		crypto_engine_exit(phmac_crypto_engine);
995 	}
996 
997 	for (i = ARRAY_SIZE(phmac_algs) - 1; i >= 0; i--) {
998 		phmac = &phmac_algs[i];
999 		if (phmac->registered)
1000 			crypto_engine_unregister_ahash(&phmac->alg);
1001 	}
1002 
1003 	misc_deregister(&phmac_dev);
1004 }
1005 
s390_phmac_init(void)1006 static int __init s390_phmac_init(void)
1007 {
1008 	struct phmac_alg *phmac;
1009 	int i, rc;
1010 
1011 	/* for selftest cpacf klmd subfunction is needed */
1012 	if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256))
1013 		return -ENODEV;
1014 	if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512))
1015 		return -ENODEV;
1016 
1017 	/* register a simple phmac pseudo misc device */
1018 	rc = misc_register(&phmac_dev);
1019 	if (rc)
1020 		return rc;
1021 
1022 	/* with this pseudo device alloc and start a crypto engine */
1023 	phmac_crypto_engine =
1024 		crypto_engine_alloc_init_and_set(phmac_dev.this_device,
1025 						 true, false, MAX_QLEN);
1026 	if (!phmac_crypto_engine) {
1027 		rc = -ENOMEM;
1028 		goto out_err;
1029 	}
1030 	rc = crypto_engine_start(phmac_crypto_engine);
1031 	if (rc) {
1032 		crypto_engine_exit(phmac_crypto_engine);
1033 		phmac_crypto_engine = NULL;
1034 		goto out_err;
1035 	}
1036 
1037 	for (i = 0; i < ARRAY_SIZE(phmac_algs); i++) {
1038 		phmac = &phmac_algs[i];
1039 		if (!cpacf_query_func(CPACF_KMAC, phmac->fc))
1040 			continue;
1041 		rc = crypto_engine_register_ahash(&phmac->alg);
1042 		if (rc)
1043 			goto out_err;
1044 		phmac->registered = true;
1045 		pr_debug("%s registered\n", phmac->alg.base.halg.base.cra_name);
1046 	}
1047 
1048 	return 0;
1049 
1050 out_err:
1051 	s390_phmac_exit();
1052 	return rc;
1053 }
1054 
1055 module_init(s390_phmac_init);
1056 module_exit(s390_phmac_exit);
1057 
1058 MODULE_ALIAS_CRYPTO("phmac(sha224)");
1059 MODULE_ALIAS_CRYPTO("phmac(sha256)");
1060 MODULE_ALIAS_CRYPTO("phmac(sha384)");
1061 MODULE_ALIAS_CRYPTO("phmac(sha512)");
1062 
1063 MODULE_DESCRIPTION("S390 HMAC driver for protected keys");
1064 MODULE_LICENSE("GPL");
1065