xref: /linux/drivers/crypto/inside-secure/eip93/eip93-common.c (revision af324dc0e2b558678aec42260cce38be16cc77ca)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 - 2021
4  *
5  * Richard van Schagen <vschagen@icloud.com>
6  * Christian Marangi <ansuelsmth@gmail.com
7  */
8 
9 #include <crypto/aes.h>
10 #include <crypto/ctr.h>
11 #include <crypto/hmac.h>
12 #include <crypto/sha1.h>
13 #include <crypto/sha2.h>
14 #include <linux/kernel.h>
15 #include <linux/delay.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/scatterlist.h>
18 
19 #include "eip93-cipher.h"
20 #include "eip93-hash.h"
21 #include "eip93-common.h"
22 #include "eip93-main.h"
23 #include "eip93-regs.h"
24 
25 int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err)
26 {
27 	u32 ext_err;
28 
29 	if (!err)
30 		return 0;
31 
32 	switch (err & ~EIP93_PE_CTRL_PE_EXT_ERR_CODE) {
33 	case EIP93_PE_CTRL_PE_AUTH_ERR:
34 	case EIP93_PE_CTRL_PE_PAD_ERR:
35 		return -EBADMSG;
36 	/* let software handle anti-replay errors */
37 	case EIP93_PE_CTRL_PE_SEQNUM_ERR:
38 		return 0;
39 	case EIP93_PE_CTRL_PE_EXT_ERR:
40 		break;
41 	default:
42 		dev_err(eip93->dev, "Unhandled error 0x%08x\n", err);
43 		return -EINVAL;
44 	}
45 
46 	/* Parse additional ext errors */
47 	ext_err = FIELD_GET(EIP93_PE_CTRL_PE_EXT_ERR_CODE, err);
48 	switch (ext_err) {
49 	case EIP93_PE_CTRL_PE_EXT_ERR_BUS:
50 	case EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING:
51 		return -EIO;
52 	case EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER:
53 		return -EACCES;
54 	case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP:
55 	case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO:
56 	case EIP93_PE_CTRL_PE_EXT_ERR_SPI:
57 		return -EINVAL;
58 	case EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH:
59 	case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH:
60 	case EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR:
61 		return -EBADMSG;
62 	default:
63 		dev_err(eip93->dev, "Unhandled ext error 0x%08x\n", ext_err);
64 		return -EINVAL;
65 	}
66 }
67 
68 static void *eip93_ring_next_wptr(struct eip93_device *eip93,
69 				  struct eip93_desc_ring *ring)
70 {
71 	void *ptr = ring->write;
72 
73 	if ((ring->write == ring->read - ring->offset) ||
74 	    (ring->read == ring->base && ring->write == ring->base_end))
75 		return ERR_PTR(-ENOMEM);
76 
77 	if (ring->write == ring->base_end)
78 		ring->write = ring->base;
79 	else
80 		ring->write += ring->offset;
81 
82 	return ptr;
83 }
84 
85 static void *eip93_ring_next_rptr(struct eip93_device *eip93,
86 				  struct eip93_desc_ring *ring)
87 {
88 	void *ptr = ring->read;
89 
90 	if (ring->write == ring->read)
91 		return ERR_PTR(-ENOENT);
92 
93 	if (ring->read == ring->base_end)
94 		ring->read = ring->base;
95 	else
96 		ring->read += ring->offset;
97 
98 	return ptr;
99 }
100 
101 int eip93_put_descriptor(struct eip93_device *eip93,
102 			 struct eip93_descriptor *desc)
103 {
104 	struct eip93_descriptor *cdesc;
105 	struct eip93_descriptor *rdesc;
106 
107 	rdesc = eip93_ring_next_wptr(eip93, &eip93->ring->rdr);
108 	if (IS_ERR(rdesc))
109 		return -ENOENT;
110 
111 	cdesc = eip93_ring_next_wptr(eip93, &eip93->ring->cdr);
112 	if (IS_ERR(cdesc))
113 		return -ENOENT;
114 
115 	memset(rdesc, 0, sizeof(struct eip93_descriptor));
116 
117 	memcpy(cdesc, desc, sizeof(struct eip93_descriptor));
118 
119 	return 0;
120 }
121 
122 void *eip93_get_descriptor(struct eip93_device *eip93)
123 {
124 	struct eip93_descriptor *cdesc;
125 	void *ptr;
126 
127 	cdesc = eip93_ring_next_rptr(eip93, &eip93->ring->cdr);
128 	if (IS_ERR(cdesc))
129 		return ERR_PTR(-ENOENT);
130 
131 	memset(cdesc, 0, sizeof(struct eip93_descriptor));
132 
133 	ptr = eip93_ring_next_rptr(eip93, &eip93->ring->rdr);
134 	if (IS_ERR(ptr))
135 		return ERR_PTR(-ENOENT);
136 
137 	return ptr;
138 }
139 
140 static void eip93_free_sg_copy(const int len, struct scatterlist **sg)
141 {
142 	if (!*sg || !len)
143 		return;
144 
145 	free_pages((unsigned long)sg_virt(*sg), get_order(len));
146 	kfree(*sg);
147 	*sg = NULL;
148 }
149 
150 static int eip93_make_sg_copy(struct scatterlist *src, struct scatterlist **dst,
151 			      const u32 len, const bool copy)
152 {
153 	void *pages;
154 
155 	*dst = kmalloc(sizeof(**dst), GFP_KERNEL);
156 	if (!*dst)
157 		return -ENOMEM;
158 
159 	pages = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA,
160 					 get_order(len));
161 	if (!pages) {
162 		kfree(*dst);
163 		*dst = NULL;
164 		return -ENOMEM;
165 	}
166 
167 	sg_init_table(*dst, 1);
168 	sg_set_buf(*dst, pages, len);
169 
170 	/* copy only as requested */
171 	if (copy)
172 		sg_copy_to_buffer(src, sg_nents(src), pages, len);
173 
174 	return 0;
175 }
176 
177 static bool eip93_is_sg_aligned(struct scatterlist *sg, u32 len,
178 				const int blksize)
179 {
180 	int nents;
181 
182 	for (nents = 0; sg; sg = sg_next(sg), ++nents) {
183 		if (!IS_ALIGNED(sg->offset, 4))
184 			return false;
185 
186 		if (len <= sg->length) {
187 			if (!IS_ALIGNED(len, blksize))
188 				return false;
189 
190 			return true;
191 		}
192 
193 		if (!IS_ALIGNED(sg->length, blksize))
194 			return false;
195 
196 		len -= sg->length;
197 	}
198 	return false;
199 }
200 
201 int check_valid_request(struct eip93_cipher_reqctx *rctx)
202 {
203 	struct scatterlist *src = rctx->sg_src;
204 	struct scatterlist *dst = rctx->sg_dst;
205 	u32 src_nents, dst_nents;
206 	u32 textsize = rctx->textsize;
207 	u32 authsize = rctx->authsize;
208 	u32 blksize = rctx->blksize;
209 	u32 totlen_src = rctx->assoclen + rctx->textsize;
210 	u32 totlen_dst = rctx->assoclen + rctx->textsize;
211 	u32 copy_len;
212 	bool src_align, dst_align;
213 	int err = -EINVAL;
214 
215 	if (!IS_CTR(rctx->flags)) {
216 		if (!IS_ALIGNED(textsize, blksize))
217 			return err;
218 	}
219 
220 	if (authsize) {
221 		if (IS_ENCRYPT(rctx->flags))
222 			totlen_dst += authsize;
223 		else
224 			totlen_src += authsize;
225 	}
226 
227 	src_nents = sg_nents_for_len(src, totlen_src);
228 	dst_nents = sg_nents_for_len(dst, totlen_dst);
229 
230 	if (src == dst) {
231 		src_nents = max(src_nents, dst_nents);
232 		dst_nents = src_nents;
233 		if (unlikely((totlen_src || totlen_dst) && src_nents <= 0))
234 			return err;
235 
236 	} else {
237 		if (unlikely(totlen_src && src_nents <= 0))
238 			return err;
239 
240 		if (unlikely(totlen_dst && dst_nents <= 0))
241 			return err;
242 	}
243 
244 	if (authsize) {
245 		if (dst_nents == 1 && src_nents == 1) {
246 			src_align = eip93_is_sg_aligned(src, totlen_src, blksize);
247 			if (src ==  dst)
248 				dst_align = src_align;
249 			else
250 				dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize);
251 		} else {
252 			src_align = false;
253 			dst_align = false;
254 		}
255 	} else {
256 		src_align = eip93_is_sg_aligned(src, totlen_src, blksize);
257 		if (src == dst)
258 			dst_align = src_align;
259 		else
260 			dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize);
261 	}
262 
263 	copy_len = max(totlen_src, totlen_dst);
264 	if (!src_align) {
265 		err = eip93_make_sg_copy(src, &rctx->sg_src, copy_len, true);
266 		if (err)
267 			return err;
268 	}
269 
270 	if (!dst_align) {
271 		err = eip93_make_sg_copy(dst, &rctx->sg_dst, copy_len, false);
272 		if (err)
273 			return err;
274 	}
275 
276 	rctx->src_nents = sg_nents_for_len(rctx->sg_src, totlen_src);
277 	rctx->dst_nents = sg_nents_for_len(rctx->sg_dst, totlen_dst);
278 
279 	return 0;
280 }
281 
282 /*
283  * Set sa_record function:
284  * Even sa_record is set to "0", keep " = 0" for readability.
285  */
286 void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen,
287 			 const u32 flags)
288 {
289 	/* Reset cmd word */
290 	sa_record->sa_cmd0_word = 0;
291 	sa_record->sa_cmd1_word = 0;
292 
293 	sa_record->sa_cmd0_word |= EIP93_SA_CMD_IV_FROM_STATE;
294 	if (!IS_ECB(flags))
295 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_IV;
296 
297 	sa_record->sa_cmd0_word |= EIP93_SA_CMD_OP_BASIC;
298 
299 	switch ((flags & EIP93_ALG_MASK)) {
300 	case EIP93_ALG_AES:
301 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_AES;
302 		sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH,
303 						      keylen >> 3);
304 		break;
305 	case EIP93_ALG_3DES:
306 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_3DES;
307 		break;
308 	case EIP93_ALG_DES:
309 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_DES;
310 		break;
311 	default:
312 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_NULL;
313 	}
314 
315 	switch ((flags & EIP93_HASH_MASK)) {
316 	case EIP93_HASH_SHA256:
317 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA256;
318 		break;
319 	case EIP93_HASH_SHA224:
320 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA224;
321 		break;
322 	case EIP93_HASH_SHA1:
323 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA1;
324 		break;
325 	case EIP93_HASH_MD5:
326 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_MD5;
327 		break;
328 	default:
329 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_NULL;
330 	}
331 
332 	sa_record->sa_cmd0_word |= EIP93_SA_CMD_PAD_ZERO;
333 
334 	switch ((flags & EIP93_MODE_MASK)) {
335 	case EIP93_MODE_CBC:
336 		sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CBC;
337 		break;
338 	case EIP93_MODE_CTR:
339 		sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CTR;
340 		break;
341 	case EIP93_MODE_ECB:
342 		sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_ECB;
343 		break;
344 	}
345 
346 	sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIGEST_3WORD;
347 	if (IS_HASH(flags)) {
348 		sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_PAD;
349 		sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_DIGEST;
350 	}
351 
352 	if (IS_HMAC(flags)) {
353 		sa_record->sa_cmd1_word |= EIP93_SA_CMD_HMAC;
354 		sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_HEADER;
355 	}
356 
357 	sa_record->sa_spi = 0x0;
358 	sa_record->sa_seqmum_mask[0] = 0xFFFFFFFF;
359 	sa_record->sa_seqmum_mask[1] = 0x0;
360 }
361 
362 /*
363  * Poor mans Scatter/gather function:
364  * Create a Descriptor for every segment to avoid copying buffers.
365  * For performance better to wait for hardware to perform multiple DMA
366  */
367 static int eip93_scatter_combine(struct eip93_device *eip93,
368 				 struct eip93_cipher_reqctx *rctx,
369 				 u32 datalen, u32 split, int offsetin)
370 {
371 	struct eip93_descriptor *cdesc = rctx->cdesc;
372 	struct scatterlist *sgsrc = rctx->sg_src;
373 	struct scatterlist *sgdst = rctx->sg_dst;
374 	unsigned int remainin = sg_dma_len(sgsrc);
375 	unsigned int remainout = sg_dma_len(sgdst);
376 	dma_addr_t saddr = sg_dma_address(sgsrc);
377 	dma_addr_t daddr = sg_dma_address(sgdst);
378 	dma_addr_t state_addr;
379 	u32 src_addr, dst_addr, len, n;
380 	bool nextin = false;
381 	bool nextout = false;
382 	int offsetout = 0;
383 	int err;
384 
385 	if (IS_ECB(rctx->flags))
386 		rctx->sa_state_base = 0;
387 
388 	if (split < datalen) {
389 		state_addr = rctx->sa_state_ctr_base;
390 		n = split;
391 	} else {
392 		state_addr = rctx->sa_state_base;
393 		n = datalen;
394 	}
395 
396 	do {
397 		if (nextin) {
398 			sgsrc = sg_next(sgsrc);
399 			remainin = sg_dma_len(sgsrc);
400 			if (remainin == 0)
401 				continue;
402 
403 			saddr = sg_dma_address(sgsrc);
404 			offsetin = 0;
405 			nextin = false;
406 		}
407 
408 		if (nextout) {
409 			sgdst = sg_next(sgdst);
410 			remainout = sg_dma_len(sgdst);
411 			if (remainout == 0)
412 				continue;
413 
414 			daddr = sg_dma_address(sgdst);
415 			offsetout = 0;
416 			nextout = false;
417 		}
418 		src_addr = saddr + offsetin;
419 		dst_addr = daddr + offsetout;
420 
421 		if (remainin == remainout) {
422 			len = remainin;
423 			if (len > n) {
424 				len = n;
425 				remainin -= n;
426 				remainout -= n;
427 				offsetin += n;
428 				offsetout += n;
429 			} else {
430 				nextin = true;
431 				nextout = true;
432 			}
433 		} else if (remainin < remainout) {
434 			len = remainin;
435 			if (len > n) {
436 				len = n;
437 				remainin -= n;
438 				remainout -= n;
439 				offsetin += n;
440 				offsetout += n;
441 			} else {
442 				offsetout += len;
443 				remainout -= len;
444 				nextin = true;
445 			}
446 		} else {
447 			len = remainout;
448 			if (len > n) {
449 				len = n;
450 				remainin -= n;
451 				remainout -= n;
452 				offsetin += n;
453 				offsetout += n;
454 			} else {
455 				offsetin += len;
456 				remainin -= len;
457 				nextout = true;
458 			}
459 		}
460 		n -= len;
461 
462 		cdesc->src_addr = src_addr;
463 		cdesc->dst_addr = dst_addr;
464 		cdesc->state_addr = state_addr;
465 		cdesc->pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY,
466 						   EIP93_PE_LENGTH_HOST_READY);
467 		cdesc->pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, len);
468 
469 		if (n == 0) {
470 			n = datalen - split;
471 			split = datalen;
472 			state_addr = rctx->sa_state_base;
473 		}
474 
475 		if (n == 0)
476 			cdesc->user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS,
477 						     EIP93_DESC_LAST);
478 
479 		/*
480 		 * Loop - Delay - No need to rollback
481 		 * Maybe refine by slowing down at EIP93_RING_BUSY
482 		 */
483 again:
484 		scoped_guard(spinlock_irqsave, &eip93->ring->write_lock)
485 			err = eip93_put_descriptor(eip93, cdesc);
486 		if (err) {
487 			usleep_range(EIP93_RING_BUSY_DELAY,
488 				     EIP93_RING_BUSY_DELAY * 2);
489 			goto again;
490 		}
491 		/* Writing new descriptor count starts DMA action */
492 		writel(1, eip93->base + EIP93_REG_PE_CD_COUNT);
493 	} while (n);
494 
495 	return -EINPROGRESS;
496 }
497 
498 int eip93_send_req(struct crypto_async_request *async,
499 		   const u8 *reqiv, struct eip93_cipher_reqctx *rctx)
500 {
501 	struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
502 	struct eip93_device *eip93 = ctx->eip93;
503 	struct scatterlist *src = rctx->sg_src;
504 	struct scatterlist *dst = rctx->sg_dst;
505 	struct sa_state *sa_state;
506 	struct eip93_descriptor cdesc;
507 	u32 flags = rctx->flags;
508 	int offsetin = 0, err;
509 	u32 datalen = rctx->assoclen + rctx->textsize;
510 	u32 split = datalen;
511 	u32 start, end, ctr, blocks;
512 	u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
513 	int crypto_async_idr;
514 
515 	rctx->sa_state_ctr = NULL;
516 	rctx->sa_state = NULL;
517 
518 	if (IS_ECB(flags))
519 		goto skip_iv;
520 
521 	memcpy(iv, reqiv, rctx->ivsize);
522 
523 	rctx->sa_state = kzalloc(sizeof(*rctx->sa_state), GFP_KERNEL);
524 	if (!rctx->sa_state)
525 		return -ENOMEM;
526 
527 	sa_state = rctx->sa_state;
528 
529 	memcpy(sa_state->state_iv, iv, rctx->ivsize);
530 	if (IS_RFC3686(flags)) {
531 		sa_state->state_iv[0] = ctx->sa_nonce;
532 		sa_state->state_iv[1] = iv[0];
533 		sa_state->state_iv[2] = iv[1];
534 		sa_state->state_iv[3] = (u32 __force)cpu_to_be32(0x1);
535 	} else if (!IS_HMAC(flags) && IS_CTR(flags)) {
536 		/* Compute data length. */
537 		blocks = DIV_ROUND_UP(rctx->textsize, AES_BLOCK_SIZE);
538 		ctr = be32_to_cpu((__be32 __force)iv[3]);
539 		/* Check 32bit counter overflow. */
540 		start = ctr;
541 		end = start + blocks - 1;
542 		if (end < start) {
543 			split = AES_BLOCK_SIZE * -start;
544 			/*
545 			 * Increment the counter manually to cope with
546 			 * the hardware counter overflow.
547 			 */
548 			iv[3] = 0xffffffff;
549 			crypto_inc((u8 *)iv, AES_BLOCK_SIZE);
550 
551 			rctx->sa_state_ctr = kzalloc(sizeof(*rctx->sa_state_ctr),
552 						     GFP_KERNEL);
553 			if (!rctx->sa_state_ctr) {
554 				err = -ENOMEM;
555 				goto free_sa_state;
556 			}
557 
558 			memcpy(rctx->sa_state_ctr->state_iv, reqiv, rctx->ivsize);
559 			memcpy(sa_state->state_iv, iv, rctx->ivsize);
560 
561 			rctx->sa_state_ctr_base = dma_map_single(eip93->dev, rctx->sa_state_ctr,
562 								 sizeof(*rctx->sa_state_ctr),
563 								 DMA_TO_DEVICE);
564 			err = dma_mapping_error(eip93->dev, rctx->sa_state_ctr_base);
565 			if (err)
566 				goto free_sa_state_ctr;
567 		}
568 	}
569 
570 	rctx->sa_state_base = dma_map_single(eip93->dev, rctx->sa_state,
571 					     sizeof(*rctx->sa_state), DMA_TO_DEVICE);
572 	err = dma_mapping_error(eip93->dev, rctx->sa_state_base);
573 	if (err)
574 		goto free_sa_state_ctr_dma;
575 
576 skip_iv:
577 
578 	cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN,
579 					     EIP93_PE_CTRL_HOST_READY);
580 	cdesc.sa_addr = rctx->sa_record_base;
581 	cdesc.arc4_addr = 0;
582 
583 	scoped_guard(spinlock_bh, &eip93->ring->idr_lock)
584 		crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0,
585 					     EIP93_RING_NUM - 1, GFP_ATOMIC);
586 
587 	cdesc.user_id = FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) |
588 			FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, rctx->desc_flags);
589 
590 	rctx->cdesc = &cdesc;
591 
592 	/* map DMA_BIDIRECTIONAL to invalidate cache on destination
593 	 * implies __dma_cache_wback_inv
594 	 */
595 	if (!dma_map_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL)) {
596 		err = -ENOMEM;
597 		goto free_sa_state_ctr_dma;
598 	}
599 
600 	if (src != dst &&
601 	    !dma_map_sg(eip93->dev, src, rctx->src_nents, DMA_TO_DEVICE)) {
602 		err = -ENOMEM;
603 		goto free_sg_dma;
604 	}
605 
606 	return eip93_scatter_combine(eip93, rctx, datalen, split, offsetin);
607 
608 free_sg_dma:
609 	dma_unmap_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL);
610 free_sa_state_ctr_dma:
611 	if (rctx->sa_state_ctr)
612 		dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base,
613 				 sizeof(*rctx->sa_state_ctr),
614 				 DMA_TO_DEVICE);
615 free_sa_state_ctr:
616 	kfree(rctx->sa_state_ctr);
617 	if (rctx->sa_state)
618 		dma_unmap_single(eip93->dev, rctx->sa_state_base,
619 				 sizeof(*rctx->sa_state),
620 				 DMA_TO_DEVICE);
621 free_sa_state:
622 	kfree(rctx->sa_state);
623 
624 	return err;
625 }
626 
627 void eip93_unmap_dma(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx,
628 		     struct scatterlist *reqsrc, struct scatterlist *reqdst)
629 {
630 	u32 len = rctx->assoclen + rctx->textsize;
631 	u32 authsize = rctx->authsize;
632 	u32 flags = rctx->flags;
633 	u32 *otag;
634 	int i;
635 
636 	if (rctx->sg_src == rctx->sg_dst) {
637 		dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents,
638 			     DMA_BIDIRECTIONAL);
639 		goto process_tag;
640 	}
641 
642 	dma_unmap_sg(eip93->dev, rctx->sg_src, rctx->src_nents,
643 		     DMA_TO_DEVICE);
644 
645 	if (rctx->sg_src != reqsrc)
646 		eip93_free_sg_copy(len +  rctx->authsize, &rctx->sg_src);
647 
648 	dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents,
649 		     DMA_BIDIRECTIONAL);
650 
651 	/* SHA tags need conversion from net-to-host */
652 process_tag:
653 	if (IS_DECRYPT(flags))
654 		authsize = 0;
655 
656 	if (authsize) {
657 		if (!IS_HASH_MD5(flags)) {
658 			otag = sg_virt(rctx->sg_dst) + len;
659 			for (i = 0; i < (authsize / 4); i++)
660 				otag[i] = be32_to_cpu((__be32 __force)otag[i]);
661 		}
662 	}
663 
664 	if (rctx->sg_dst != reqdst) {
665 		sg_copy_from_buffer(reqdst, sg_nents(reqdst),
666 				    sg_virt(rctx->sg_dst), len + authsize);
667 		eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_dst);
668 	}
669 }
670 
671 void eip93_handle_result(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx,
672 			 u8 *reqiv)
673 {
674 	if (rctx->sa_state_ctr)
675 		dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base,
676 				 sizeof(*rctx->sa_state_ctr),
677 				 DMA_FROM_DEVICE);
678 
679 	if (rctx->sa_state)
680 		dma_unmap_single(eip93->dev, rctx->sa_state_base,
681 				 sizeof(*rctx->sa_state),
682 				 DMA_FROM_DEVICE);
683 
684 	if (!IS_ECB(rctx->flags))
685 		memcpy(reqiv, rctx->sa_state->state_iv, rctx->ivsize);
686 
687 	kfree(rctx->sa_state_ctr);
688 	kfree(rctx->sa_state);
689 }
690 
691 int eip93_hmac_setkey(u32 ctx_flags, const u8 *key, unsigned int keylen,
692 		      unsigned int hashlen, u8 *dest_ipad, u8 *dest_opad,
693 		      bool skip_ipad)
694 {
695 	u8 ipad[SHA256_BLOCK_SIZE], opad[SHA256_BLOCK_SIZE];
696 	struct crypto_ahash *ahash_tfm;
697 	struct eip93_hash_reqctx *rctx;
698 	struct ahash_request *req;
699 	DECLARE_CRYPTO_WAIT(wait);
700 	struct scatterlist sg[1];
701 	const char *alg_name;
702 	int i, ret;
703 
704 	switch (ctx_flags & EIP93_HASH_MASK) {
705 	case EIP93_HASH_SHA256:
706 		alg_name = "sha256-eip93";
707 		break;
708 	case EIP93_HASH_SHA224:
709 		alg_name = "sha224-eip93";
710 		break;
711 	case EIP93_HASH_SHA1:
712 		alg_name = "sha1-eip93";
713 		break;
714 	case EIP93_HASH_MD5:
715 		alg_name = "md5-eip93";
716 		break;
717 	default: /* Impossible */
718 		return -EINVAL;
719 	}
720 
721 	ahash_tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_ASYNC);
722 	if (IS_ERR(ahash_tfm))
723 		return PTR_ERR(ahash_tfm);
724 
725 	req = ahash_request_alloc(ahash_tfm, GFP_ATOMIC);
726 	if (!req) {
727 		ret = -ENOMEM;
728 		goto err_ahash;
729 	}
730 
731 	rctx = ahash_request_ctx_dma(req);
732 	crypto_init_wait(&wait);
733 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
734 				   crypto_req_done, &wait);
735 
736 	/* Hash the key if > SHA256_BLOCK_SIZE */
737 	if (keylen > SHA256_BLOCK_SIZE) {
738 		sg_init_one(&sg[0], key, keylen);
739 
740 		ahash_request_set_crypt(req, sg, ipad, keylen);
741 		ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
742 		if (ret)
743 			goto err_req;
744 
745 		keylen = hashlen;
746 	} else {
747 		memcpy(ipad, key, keylen);
748 	}
749 
750 	/* Copy to opad */
751 	memset(ipad + keylen, 0, SHA256_BLOCK_SIZE - keylen);
752 	memcpy(opad, ipad, SHA256_BLOCK_SIZE);
753 
754 	/* Pad with HMAC constants */
755 	for (i = 0; i < SHA256_BLOCK_SIZE; i++) {
756 		ipad[i] ^= HMAC_IPAD_VALUE;
757 		opad[i] ^= HMAC_OPAD_VALUE;
758 	}
759 
760 	if (skip_ipad) {
761 		memcpy(dest_ipad, ipad, SHA256_BLOCK_SIZE);
762 	} else {
763 		/* Hash ipad */
764 		sg_init_one(&sg[0], ipad, SHA256_BLOCK_SIZE);
765 		ahash_request_set_crypt(req, sg, dest_ipad, SHA256_BLOCK_SIZE);
766 		ret = crypto_ahash_init(req);
767 		if (ret)
768 			goto err_req;
769 
770 		/* Disable HASH_FINALIZE for ipad hash */
771 		rctx->partial_hash = true;
772 
773 		ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
774 		if (ret)
775 			goto err_req;
776 	}
777 
778 	/* Hash opad */
779 	sg_init_one(&sg[0], opad, SHA256_BLOCK_SIZE);
780 	ahash_request_set_crypt(req, sg, dest_opad, SHA256_BLOCK_SIZE);
781 	ret = crypto_ahash_init(req);
782 	if (ret)
783 		goto err_req;
784 
785 	/* Disable HASH_FINALIZE for opad hash */
786 	rctx->partial_hash = true;
787 
788 	ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
789 	if (ret)
790 		goto err_req;
791 
792 	if (!IS_HASH_MD5(ctx_flags)) {
793 		for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) {
794 			u32 *ipad_hash = (u32 *)dest_ipad;
795 			u32 *opad_hash = (u32 *)dest_opad;
796 
797 			if (!skip_ipad)
798 				ipad_hash[i] = (u32 __force)cpu_to_be32(ipad_hash[i]);
799 			opad_hash[i] = (u32 __force)cpu_to_be32(opad_hash[i]);
800 		}
801 	}
802 
803 err_req:
804 	ahash_request_free(req);
805 err_ahash:
806 	crypto_free_ahash(ahash_tfm);
807 
808 	return ret;
809 }
810