xref: /linux/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c (revision 85ffc6e4ed3712f8b3fedb3fbe42afae644a699c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel Keem Bay OCS HCU Crypto Driver.
4  *
5  * Copyright (C) 2018-2020 Intel Corporation
6  */
7 
8 #include <crypto/engine.h>
9 #include <crypto/hmac.h>
10 #include <crypto/internal/hash.h>
11 #include <crypto/scatterwalk.h>
12 #include <crypto/sha2.h>
13 #include <crypto/sm3.h>
14 #include <linux/completion.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/mod_devicetable.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/string.h>
23 
24 #include "ocs-hcu.h"
25 
26 #define DRV_NAME	"keembay-ocs-hcu"
27 
28 /* Flag marking a final request. */
29 #define REQ_FINAL			BIT(0)
30 /* Flag marking a HMAC request. */
31 #define REQ_FLAGS_HMAC			BIT(1)
32 /* Flag set when HW HMAC is being used. */
33 #define REQ_FLAGS_HMAC_HW		BIT(2)
34 /* Flag set when SW HMAC is being used. */
35 #define REQ_FLAGS_HMAC_SW		BIT(3)
36 
37 /**
38  * struct ocs_hcu_ctx: OCS HCU Transform context.
39  * @hcu_dev:	 The OCS HCU device used by the transformation.
40  * @key:	 The key (used only for HMAC transformations).
41  * @key_len:	 The length of the key.
42  * @is_sm3_tfm:  Whether or not this is an SM3 transformation.
43  * @is_hmac_tfm: Whether or not this is a HMAC transformation.
44  */
45 struct ocs_hcu_ctx {
46 	struct ocs_hcu_dev *hcu_dev;
47 	u8 key[SHA512_BLOCK_SIZE];
48 	size_t key_len;
49 	bool is_sm3_tfm;
50 	bool is_hmac_tfm;
51 };
52 
53 /**
54  * struct ocs_hcu_rctx - Context for the request.
55  * @hcu_dev:	    OCS HCU device to be used to service the request.
56  * @flags:	    Flags tracking request status.
57  * @algo:	    Algorithm to use for the request.
58  * @blk_sz:	    Block size of the transformation / request.
59  * @dig_sz:	    Digest size of the transformation / request.
60  * @dma_list:	    OCS DMA linked list.
61  * @hash_ctx:	    OCS HCU hashing context.
62  * @buffer:	    Buffer to store: partial block of data and SW HMAC
63  *		    artifacts (ipad, opad, etc.).
64  * @buf_cnt:	    Number of bytes currently stored in the buffer.
65  * @buf_dma_addr:   The DMA address of @buffer (when mapped).
66  * @buf_dma_count:  The number of bytes in @buffer currently DMA-mapped.
67  * @sg:		    Head of the scatterlist entries containing data.
68  * @sg_data_total:  Total data in the SG list at any time.
69  * @sg_data_offset: Offset into the data of the current individual SG node.
70  * @sg_dma_nents:   Number of sg entries mapped in dma_list.
71  */
72 struct ocs_hcu_rctx {
73 	struct ocs_hcu_dev	*hcu_dev;
74 	u32			flags;
75 	enum ocs_hcu_algo	algo;
76 	size_t			blk_sz;
77 	size_t			dig_sz;
78 	struct ocs_hcu_dma_list	*dma_list;
79 	struct ocs_hcu_hash_ctx	hash_ctx;
80 	/*
81 	 * Buffer is double the block size because we need space for SW HMAC
82 	 * artifacts, i.e:
83 	 * - ipad (1 block) + a possible partial block of data.
84 	 * - opad (1 block) + digest of H(k ^ ipad || m)
85 	 */
86 	u8			buffer[2 * SHA512_BLOCK_SIZE];
87 	size_t			buf_cnt;
88 	dma_addr_t		buf_dma_addr;
89 	size_t			buf_dma_count;
90 	struct scatterlist	*sg;
91 	unsigned int		sg_data_total;
92 	unsigned int		sg_data_offset;
93 	unsigned int		sg_dma_nents;
94 };
95 
96 /**
97  * struct ocs_hcu_drv - Driver data
98  * @dev_list:	The list of HCU devices.
99  * @lock:	The lock protecting dev_list.
100  */
101 struct ocs_hcu_drv {
102 	struct list_head dev_list;
103 	spinlock_t lock; /* Protects dev_list. */
104 };
105 
106 static struct ocs_hcu_drv ocs_hcu = {
107 	.dev_list = LIST_HEAD_INIT(ocs_hcu.dev_list),
108 	.lock = __SPIN_LOCK_UNLOCKED(ocs_hcu.lock),
109 };
110 
111 /*
112  * Return the total amount of data in the request; that is: the data in the
113  * request buffer + the data in the sg list.
114  */
kmb_get_total_data(struct ocs_hcu_rctx * rctx)115 static inline unsigned int kmb_get_total_data(struct ocs_hcu_rctx *rctx)
116 {
117 	return rctx->sg_data_total + rctx->buf_cnt;
118 }
119 
120 /* Move remaining content of scatter-gather list to context buffer. */
flush_sg_to_ocs_buffer(struct ocs_hcu_rctx * rctx)121 static int flush_sg_to_ocs_buffer(struct ocs_hcu_rctx *rctx)
122 {
123 	size_t count;
124 
125 	if (rctx->sg_data_total > (sizeof(rctx->buffer) - rctx->buf_cnt)) {
126 		WARN(1, "%s: sg data does not fit in buffer\n", __func__);
127 		return -EINVAL;
128 	}
129 
130 	while (rctx->sg_data_total) {
131 		if (!rctx->sg) {
132 			WARN(1, "%s: unexpected NULL sg\n", __func__);
133 			return -EINVAL;
134 		}
135 		/*
136 		 * If current sg has been fully processed, skip to the next
137 		 * one.
138 		 */
139 		if (rctx->sg_data_offset == rctx->sg->length) {
140 			rctx->sg = sg_next(rctx->sg);
141 			rctx->sg_data_offset = 0;
142 			continue;
143 		}
144 		/*
145 		 * Determine the maximum data available to copy from the node.
146 		 * Minimum of the length left in the sg node, or the total data
147 		 * in the request.
148 		 */
149 		count = min(rctx->sg->length - rctx->sg_data_offset,
150 			    rctx->sg_data_total);
151 		/* Copy from scatter-list entry to context buffer. */
152 		scatterwalk_map_and_copy(&rctx->buffer[rctx->buf_cnt],
153 					 rctx->sg, rctx->sg_data_offset,
154 					 count, 0);
155 
156 		rctx->sg_data_offset += count;
157 		rctx->sg_data_total -= count;
158 		rctx->buf_cnt += count;
159 	}
160 
161 	return 0;
162 }
163 
kmb_ocs_hcu_find_dev(struct ahash_request * req)164 static struct ocs_hcu_dev *kmb_ocs_hcu_find_dev(struct ahash_request *req)
165 {
166 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
167 	struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
168 
169 	/* If the HCU device for the request was previously set, return it. */
170 	if (tctx->hcu_dev)
171 		return tctx->hcu_dev;
172 
173 	/*
174 	 * Otherwise, get the first HCU device available (there should be one
175 	 * and only one device).
176 	 */
177 	spin_lock_bh(&ocs_hcu.lock);
178 	tctx->hcu_dev = list_first_entry_or_null(&ocs_hcu.dev_list,
179 						 struct ocs_hcu_dev,
180 						 list);
181 	spin_unlock_bh(&ocs_hcu.lock);
182 
183 	return tctx->hcu_dev;
184 }
185 
186 /* Free OCS DMA linked list and DMA-able context buffer. */
kmb_ocs_hcu_dma_cleanup(struct ahash_request * req,struct ocs_hcu_rctx * rctx)187 static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
188 				    struct ocs_hcu_rctx *rctx)
189 {
190 	struct ocs_hcu_dev *hcu_dev = rctx->hcu_dev;
191 	struct device *dev = hcu_dev->dev;
192 
193 	/* Unmap rctx->buffer (if mapped). */
194 	if (rctx->buf_dma_count) {
195 		dma_unmap_single(dev, rctx->buf_dma_addr, rctx->buf_dma_count,
196 				 DMA_TO_DEVICE);
197 		rctx->buf_dma_count = 0;
198 	}
199 
200 	/* Unmap req->src (if mapped). */
201 	if (rctx->sg_dma_nents) {
202 		dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
203 		rctx->sg_dma_nents = 0;
204 	}
205 
206 	/* Free dma_list (if allocated). */
207 	if (rctx->dma_list) {
208 		ocs_hcu_dma_list_free(hcu_dev, rctx->dma_list);
209 		rctx->dma_list = NULL;
210 	}
211 }
212 
213 /*
214  * Prepare for DMA operation:
215  * - DMA-map request context buffer (if needed)
216  * - DMA-map SG list (only the entries to be processed, see note below)
217  * - Allocate OCS HCU DMA linked list (number of elements =  SG entries to
218  *   process + context buffer (if not empty)).
219  * - Add DMA-mapped request context buffer to OCS HCU DMA list.
220  * - Add SG entries to DMA list.
221  *
222  * Note: if this is a final request, we process all the data in the SG list,
223  * otherwise we can only process up to the maximum amount of block-aligned data
224  * (the remainder will be put into the context buffer and processed in the next
225  * request).
226  */
kmb_ocs_dma_prepare(struct ahash_request * req)227 static int kmb_ocs_dma_prepare(struct ahash_request *req)
228 {
229 	struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
230 	struct device *dev = rctx->hcu_dev->dev;
231 	unsigned int remainder = 0;
232 	unsigned int total;
233 	size_t nents;
234 	size_t count;
235 	int rc;
236 	int i;
237 
238 	/* This function should be called only when there is data to process. */
239 	total = kmb_get_total_data(rctx);
240 	if (!total)
241 		return -EINVAL;
242 
243 	/*
244 	 * If this is not a final DMA (terminated DMA), the data passed to the
245 	 * HCU must be aligned to the block size; compute the remainder data to
246 	 * be processed in the next request.
247 	 */
248 	if (!(rctx->flags & REQ_FINAL))
249 		remainder = total % rctx->blk_sz;
250 
251 	/* Determine the number of scatter gather list entries to process. */
252 	nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
253 
254 	/* If there are entries to process, map them. */
255 	if (nents) {
256 		rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
257 						DMA_TO_DEVICE);
258 		if (!rctx->sg_dma_nents) {
259 			dev_err(dev, "Failed to MAP SG\n");
260 			rc = -ENOMEM;
261 			goto cleanup;
262 		}
263 		/*
264 		 * The value returned by dma_map_sg() can be < nents; so update
265 		 * nents accordingly.
266 		 */
267 		nents = rctx->sg_dma_nents;
268 	}
269 
270 	/*
271 	 * If context buffer is not empty, map it and add extra DMA entry for
272 	 * it.
273 	 */
274 	if (rctx->buf_cnt) {
275 		rctx->buf_dma_addr = dma_map_single(dev, rctx->buffer,
276 						    rctx->buf_cnt,
277 						    DMA_TO_DEVICE);
278 		if (dma_mapping_error(dev, rctx->buf_dma_addr)) {
279 			dev_err(dev, "Failed to map request context buffer\n");
280 			rc = -ENOMEM;
281 			goto cleanup;
282 		}
283 		rctx->buf_dma_count = rctx->buf_cnt;
284 		/* Increase number of dma entries. */
285 		nents++;
286 	}
287 
288 	/* Allocate OCS HCU DMA list. */
289 	rctx->dma_list = ocs_hcu_dma_list_alloc(rctx->hcu_dev, nents);
290 	if (!rctx->dma_list) {
291 		rc = -ENOMEM;
292 		goto cleanup;
293 	}
294 
295 	/* Add request context buffer (if previously DMA-mapped) */
296 	if (rctx->buf_dma_count) {
297 		rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev, rctx->dma_list,
298 					       rctx->buf_dma_addr,
299 					       rctx->buf_dma_count);
300 		if (rc)
301 			goto cleanup;
302 	}
303 
304 	/* Add the SG nodes to be processed to the DMA linked list. */
305 	for_each_sg(req->src, rctx->sg, rctx->sg_dma_nents, i) {
306 		/*
307 		 * The number of bytes to add to the list entry is the minimum
308 		 * between:
309 		 * - The DMA length of the SG entry.
310 		 * - The data left to be processed.
311 		 */
312 		count = min(rctx->sg_data_total - remainder,
313 			    sg_dma_len(rctx->sg) - rctx->sg_data_offset);
314 		/*
315 		 * Do not create a zero length DMA descriptor. Check in case of
316 		 * zero length SG node.
317 		 */
318 		if (count == 0)
319 			continue;
320 		/* Add sg to HCU DMA list. */
321 		rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev,
322 					       rctx->dma_list,
323 					       rctx->sg->dma_address,
324 					       count);
325 		if (rc)
326 			goto cleanup;
327 
328 		/* Update amount of data remaining in SG list. */
329 		rctx->sg_data_total -= count;
330 
331 		/*
332 		 * If  remaining data is equal to remainder (note: 'less than'
333 		 * case should never happen in practice), we are done: update
334 		 * offset and exit the loop.
335 		 */
336 		if (rctx->sg_data_total <= remainder) {
337 			WARN_ON(rctx->sg_data_total < remainder);
338 			rctx->sg_data_offset += count;
339 			break;
340 		}
341 
342 		/*
343 		 * If we get here is because we need to process the next sg in
344 		 * the list; set offset within the sg to 0.
345 		 */
346 		rctx->sg_data_offset = 0;
347 	}
348 
349 	return 0;
350 cleanup:
351 	dev_err(dev, "Failed to prepare DMA.\n");
352 	kmb_ocs_hcu_dma_cleanup(req, rctx);
353 
354 	return rc;
355 }
356 
kmb_ocs_hcu_secure_cleanup(struct ahash_request * req)357 static void kmb_ocs_hcu_secure_cleanup(struct ahash_request *req)
358 {
359 	struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
360 
361 	/* Clear buffer of any data. */
362 	memzero_explicit(rctx->buffer, sizeof(rctx->buffer));
363 }
364 
kmb_ocs_hcu_handle_queue(struct ahash_request * req)365 static int kmb_ocs_hcu_handle_queue(struct ahash_request *req)
366 {
367 	struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
368 
369 	if (!hcu_dev)
370 		return -ENOENT;
371 
372 	return crypto_transfer_hash_request_to_engine(hcu_dev->engine, req);
373 }
374 
prepare_ipad(struct ahash_request * req)375 static int prepare_ipad(struct ahash_request *req)
376 {
377 	struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
378 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
379 	struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
380 	int i;
381 
382 	WARN(rctx->buf_cnt, "%s: Context buffer is not empty\n", __func__);
383 	WARN(!(rctx->flags & REQ_FLAGS_HMAC_SW),
384 	     "%s: HMAC_SW flag is not set\n", __func__);
385 	/*
386 	 * Key length must be equal to block size. If key is shorter,
387 	 * we pad it with zero (note: key cannot be longer, since
388 	 * longer keys are hashed by kmb_ocs_hcu_setkey()).
389 	 */
390 	if (ctx->key_len > rctx->blk_sz) {
391 		WARN(1, "%s: Invalid key length in tfm context\n", __func__);
392 		return -EINVAL;
393 	}
394 	memzero_explicit(&ctx->key[ctx->key_len],
395 			 rctx->blk_sz - ctx->key_len);
396 	ctx->key_len = rctx->blk_sz;
397 	/*
398 	 * Prepare IPAD for HMAC. Only done for first block.
399 	 * HMAC(k,m) = H(k ^ opad || H(k ^ ipad || m))
400 	 * k ^ ipad will be first hashed block.
401 	 * k ^ opad will be calculated in the final request.
402 	 * Only needed if not using HW HMAC.
403 	 */
404 	for (i = 0; i < rctx->blk_sz; i++)
405 		rctx->buffer[i] = ctx->key[i] ^ HMAC_IPAD_VALUE;
406 	rctx->buf_cnt = rctx->blk_sz;
407 
408 	return 0;
409 }
410 
kmb_ocs_hcu_do_one_request(struct crypto_engine * engine,void * areq)411 static int kmb_ocs_hcu_do_one_request(struct crypto_engine *engine, void *areq)
412 {
413 	struct ahash_request *req = container_of(areq, struct ahash_request,
414 						 base);
415 	struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
416 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
417 	struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
418 	struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
419 	int rc;
420 	int i;
421 
422 	if (!hcu_dev) {
423 		rc = -ENOENT;
424 		goto error;
425 	}
426 
427 	/*
428 	 * If hardware HMAC flag is set, perform HMAC in hardware.
429 	 *
430 	 * NOTE: this flag implies REQ_FINAL && kmb_get_total_data(rctx)
431 	 */
432 	if (rctx->flags & REQ_FLAGS_HMAC_HW) {
433 		/* Map input data into the HCU DMA linked list. */
434 		rc = kmb_ocs_dma_prepare(req);
435 		if (rc)
436 			goto error;
437 
438 		rc = ocs_hcu_hmac(hcu_dev, rctx->algo, tctx->key, tctx->key_len,
439 				  rctx->dma_list, req->result, rctx->dig_sz);
440 
441 		/* Unmap data and free DMA list regardless of return code. */
442 		kmb_ocs_hcu_dma_cleanup(req, rctx);
443 
444 		/* Process previous return code. */
445 		if (rc)
446 			goto error;
447 
448 		goto done;
449 	}
450 
451 	/* Handle update request case. */
452 	if (!(rctx->flags & REQ_FINAL)) {
453 		/* Update should always have input data. */
454 		if (!kmb_get_total_data(rctx))
455 			return -EINVAL;
456 
457 		/* Map input data into the HCU DMA linked list. */
458 		rc = kmb_ocs_dma_prepare(req);
459 		if (rc)
460 			goto error;
461 
462 		/* Do hashing step. */
463 		rc = ocs_hcu_hash_update(hcu_dev, &rctx->hash_ctx,
464 					 rctx->dma_list);
465 
466 		/* Unmap data and free DMA list regardless of return code. */
467 		kmb_ocs_hcu_dma_cleanup(req, rctx);
468 
469 		/* Process previous return code. */
470 		if (rc)
471 			goto error;
472 
473 		/*
474 		 * Reset request buffer count (data in the buffer was just
475 		 * processed).
476 		 */
477 		rctx->buf_cnt = 0;
478 		/*
479 		 * Move remaining sg data into the request buffer, so that it
480 		 * will be processed during the next request.
481 		 *
482 		 * NOTE: we have remaining data if kmb_get_total_data() was not
483 		 * a multiple of block size.
484 		 */
485 		rc = flush_sg_to_ocs_buffer(rctx);
486 		if (rc)
487 			goto error;
488 
489 		goto done;
490 	}
491 
492 	/* If we get here, this is a final request. */
493 
494 	/* If there is data to process, use finup. */
495 	if (kmb_get_total_data(rctx)) {
496 		/* Map input data into the HCU DMA linked list. */
497 		rc = kmb_ocs_dma_prepare(req);
498 		if (rc)
499 			goto error;
500 
501 		/* Do hashing step. */
502 		rc = ocs_hcu_hash_finup(hcu_dev, &rctx->hash_ctx,
503 					rctx->dma_list,
504 					req->result, rctx->dig_sz);
505 		/* Free DMA list regardless of return code. */
506 		kmb_ocs_hcu_dma_cleanup(req, rctx);
507 
508 		/* Process previous return code. */
509 		if (rc)
510 			goto error;
511 
512 	} else {  /* Otherwise (if we have no data), use final. */
513 		rc = ocs_hcu_hash_final(hcu_dev, &rctx->hash_ctx, req->result,
514 					rctx->dig_sz);
515 		if (rc)
516 			goto error;
517 	}
518 
519 	/*
520 	 * If we are finalizing a SW HMAC request, we just computed the result
521 	 * of: H(k ^ ipad || m).
522 	 *
523 	 * We now need to complete the HMAC calculation with the OPAD step,
524 	 * that is, we need to compute H(k ^ opad || digest), where digest is
525 	 * the digest we just obtained, i.e., H(k ^ ipad || m).
526 	 */
527 	if (rctx->flags & REQ_FLAGS_HMAC_SW) {
528 		/*
529 		 * Compute k ^ opad and store it in the request buffer (which
530 		 * is not used anymore at this point).
531 		 * Note: key has been padded / hashed already (so keylen ==
532 		 * blksz) .
533 		 */
534 		WARN_ON(tctx->key_len != rctx->blk_sz);
535 		for (i = 0; i < rctx->blk_sz; i++)
536 			rctx->buffer[i] = tctx->key[i] ^ HMAC_OPAD_VALUE;
537 		/* Now append the digest to the rest of the buffer. */
538 		for (i = 0; (i < rctx->dig_sz); i++)
539 			rctx->buffer[rctx->blk_sz + i] = req->result[i];
540 
541 		/* Now hash the buffer to obtain the final HMAC. */
542 		rc = ocs_hcu_digest(hcu_dev, rctx->algo, rctx->buffer,
543 				    rctx->blk_sz + rctx->dig_sz, req->result,
544 				    rctx->dig_sz);
545 		if (rc)
546 			goto error;
547 	}
548 
549 	/* Perform secure clean-up. */
550 	kmb_ocs_hcu_secure_cleanup(req);
551 done:
552 	crypto_finalize_hash_request(hcu_dev->engine, req, 0);
553 
554 	return 0;
555 
556 error:
557 	kmb_ocs_hcu_secure_cleanup(req);
558 	return rc;
559 }
560 
kmb_ocs_hcu_init(struct ahash_request * req)561 static int kmb_ocs_hcu_init(struct ahash_request *req)
562 {
563 	struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
564 	struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
565 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
566 	struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
567 
568 	if (!hcu_dev)
569 		return -ENOENT;
570 
571 	/* Initialize entire request context to zero. */
572 	memset(rctx, 0, sizeof(*rctx));
573 
574 	rctx->hcu_dev = hcu_dev;
575 	rctx->dig_sz = crypto_ahash_digestsize(tfm);
576 
577 	switch (rctx->dig_sz) {
578 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
579 	case SHA224_DIGEST_SIZE:
580 		rctx->blk_sz = SHA224_BLOCK_SIZE;
581 		rctx->algo = OCS_HCU_ALGO_SHA224;
582 		break;
583 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
584 	case SHA256_DIGEST_SIZE:
585 		rctx->blk_sz = SHA256_BLOCK_SIZE;
586 		/*
587 		 * SHA256 and SM3 have the same digest size: use info from tfm
588 		 * context to find out which one we should use.
589 		 */
590 		rctx->algo = ctx->is_sm3_tfm ? OCS_HCU_ALGO_SM3 :
591 					       OCS_HCU_ALGO_SHA256;
592 		break;
593 	case SHA384_DIGEST_SIZE:
594 		rctx->blk_sz = SHA384_BLOCK_SIZE;
595 		rctx->algo = OCS_HCU_ALGO_SHA384;
596 		break;
597 	case SHA512_DIGEST_SIZE:
598 		rctx->blk_sz = SHA512_BLOCK_SIZE;
599 		rctx->algo = OCS_HCU_ALGO_SHA512;
600 		break;
601 	default:
602 		return -EINVAL;
603 	}
604 
605 	/* Initialize intermediate data. */
606 	ocs_hcu_hash_init(&rctx->hash_ctx, rctx->algo);
607 
608 	/* If this a HMAC request, set HMAC flag. */
609 	if (ctx->is_hmac_tfm)
610 		rctx->flags |= REQ_FLAGS_HMAC;
611 
612 	return 0;
613 }
614 
kmb_ocs_hcu_update(struct ahash_request * req)615 static int kmb_ocs_hcu_update(struct ahash_request *req)
616 {
617 	struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
618 	int rc;
619 
620 	if (!req->nbytes)
621 		return 0;
622 
623 	rctx->sg_data_total = req->nbytes;
624 	rctx->sg_data_offset = 0;
625 	rctx->sg = req->src;
626 
627 	/*
628 	 * If we are doing HMAC, then we must use SW-assisted HMAC, since HW
629 	 * HMAC does not support context switching (there it can only be used
630 	 * with finup() or digest()).
631 	 */
632 	if (rctx->flags & REQ_FLAGS_HMAC &&
633 	    !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
634 		rctx->flags |= REQ_FLAGS_HMAC_SW;
635 		rc = prepare_ipad(req);
636 		if (rc)
637 			return rc;
638 	}
639 
640 	/*
641 	 * If remaining sg_data fits into ctx buffer, just copy it there; we'll
642 	 * process it at the next update() or final().
643 	 */
644 	if (rctx->sg_data_total <= (sizeof(rctx->buffer) - rctx->buf_cnt))
645 		return flush_sg_to_ocs_buffer(rctx);
646 
647 	return kmb_ocs_hcu_handle_queue(req);
648 }
649 
650 /* Common logic for kmb_ocs_hcu_final() and kmb_ocs_hcu_finup(). */
kmb_ocs_hcu_fin_common(struct ahash_request * req)651 static int kmb_ocs_hcu_fin_common(struct ahash_request *req)
652 {
653 	struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
654 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
655 	struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
656 	int rc;
657 
658 	rctx->flags |= REQ_FINAL;
659 
660 	/*
661 	 * If this is a HMAC request and, so far, we didn't have to switch to
662 	 * SW HMAC, check if we can use HW HMAC.
663 	 */
664 	if (rctx->flags & REQ_FLAGS_HMAC &&
665 	    !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
666 		/*
667 		 * If we are here, it means we never processed any data so far,
668 		 * so we can use HW HMAC, but only if there is some data to
669 		 * process (since OCS HW MAC does not support zero-length
670 		 * messages) and the key length is supported by the hardware
671 		 * (OCS HCU HW only supports length <= 64); if HW HMAC cannot
672 		 * be used, fall back to SW-assisted HMAC.
673 		 */
674 		if (kmb_get_total_data(rctx) &&
675 		    ctx->key_len <= OCS_HCU_HW_KEY_LEN) {
676 			rctx->flags |= REQ_FLAGS_HMAC_HW;
677 		} else {
678 			rctx->flags |= REQ_FLAGS_HMAC_SW;
679 			rc = prepare_ipad(req);
680 			if (rc)
681 				return rc;
682 		}
683 	}
684 
685 	return kmb_ocs_hcu_handle_queue(req);
686 }
687 
kmb_ocs_hcu_final(struct ahash_request * req)688 static int kmb_ocs_hcu_final(struct ahash_request *req)
689 {
690 	struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
691 
692 	rctx->sg_data_total = 0;
693 	rctx->sg_data_offset = 0;
694 	rctx->sg = NULL;
695 
696 	return kmb_ocs_hcu_fin_common(req);
697 }
698 
kmb_ocs_hcu_finup(struct ahash_request * req)699 static int kmb_ocs_hcu_finup(struct ahash_request *req)
700 {
701 	struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
702 
703 	rctx->sg_data_total = req->nbytes;
704 	rctx->sg_data_offset = 0;
705 	rctx->sg = req->src;
706 
707 	return kmb_ocs_hcu_fin_common(req);
708 }
709 
kmb_ocs_hcu_digest(struct ahash_request * req)710 static int kmb_ocs_hcu_digest(struct ahash_request *req)
711 {
712 	int rc = 0;
713 	struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
714 
715 	if (!hcu_dev)
716 		return -ENOENT;
717 
718 	rc = kmb_ocs_hcu_init(req);
719 	if (rc)
720 		return rc;
721 
722 	rc = kmb_ocs_hcu_finup(req);
723 
724 	return rc;
725 }
726 
kmb_ocs_hcu_export(struct ahash_request * req,void * out)727 static int kmb_ocs_hcu_export(struct ahash_request *req, void *out)
728 {
729 	struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
730 
731 	/* Intermediate data is always stored and applied per request. */
732 	memcpy(out, rctx, sizeof(*rctx));
733 
734 	return 0;
735 }
736 
kmb_ocs_hcu_import(struct ahash_request * req,const void * in)737 static int kmb_ocs_hcu_import(struct ahash_request *req, const void *in)
738 {
739 	struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
740 
741 	/* Intermediate data is always stored and applied per request. */
742 	memcpy(rctx, in, sizeof(*rctx));
743 
744 	return 0;
745 }
746 
kmb_ocs_hcu_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)747 static int kmb_ocs_hcu_setkey(struct crypto_ahash *tfm, const u8 *key,
748 			      unsigned int keylen)
749 {
750 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
751 	struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
752 	size_t blk_sz = crypto_ahash_blocksize(tfm);
753 	struct crypto_ahash *ahash_tfm;
754 	struct ahash_request *req;
755 	struct crypto_wait wait;
756 	struct scatterlist sg;
757 	const char *alg_name;
758 	int rc;
759 
760 	/*
761 	 * Key length must be equal to block size:
762 	 * - If key is shorter, we are done for now (the key will be padded
763 	 *   later on); this is to maximize the use of HW HMAC (which works
764 	 *   only for keys <= 64 bytes).
765 	 * - If key is longer, we hash it.
766 	 */
767 	if (keylen <= blk_sz) {
768 		memcpy(ctx->key, key, keylen);
769 		ctx->key_len = keylen;
770 		return 0;
771 	}
772 
773 	switch (digestsize) {
774 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
775 	case SHA224_DIGEST_SIZE:
776 		alg_name = "sha224-keembay-ocs";
777 		break;
778 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
779 	case SHA256_DIGEST_SIZE:
780 		alg_name = ctx->is_sm3_tfm ? "sm3-keembay-ocs" :
781 					     "sha256-keembay-ocs";
782 		break;
783 	case SHA384_DIGEST_SIZE:
784 		alg_name = "sha384-keembay-ocs";
785 		break;
786 	case SHA512_DIGEST_SIZE:
787 		alg_name = "sha512-keembay-ocs";
788 		break;
789 	default:
790 		return -EINVAL;
791 	}
792 
793 	ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
794 	if (IS_ERR(ahash_tfm))
795 		return PTR_ERR(ahash_tfm);
796 
797 	req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
798 	if (!req) {
799 		rc = -ENOMEM;
800 		goto err_free_ahash;
801 	}
802 
803 	crypto_init_wait(&wait);
804 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
805 				   crypto_req_done, &wait);
806 	crypto_ahash_clear_flags(ahash_tfm, ~0);
807 
808 	sg_init_one(&sg, key, keylen);
809 	ahash_request_set_crypt(req, &sg, ctx->key, keylen);
810 
811 	rc = crypto_wait_req(crypto_ahash_digest(req), &wait);
812 	if (rc == 0)
813 		ctx->key_len = digestsize;
814 
815 	ahash_request_free(req);
816 err_free_ahash:
817 	crypto_free_ahash(ahash_tfm);
818 
819 	return rc;
820 }
821 
822 /* Set request size and initialize tfm context. */
__cra_init(struct crypto_tfm * tfm,struct ocs_hcu_ctx * ctx)823 static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
824 {
825 	crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
826 				     sizeof(struct ocs_hcu_rctx));
827 }
828 
kmb_ocs_hcu_sha_cra_init(struct crypto_tfm * tfm)829 static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm)
830 {
831 	struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
832 
833 	__cra_init(tfm, ctx);
834 
835 	return 0;
836 }
837 
kmb_ocs_hcu_sm3_cra_init(struct crypto_tfm * tfm)838 static int kmb_ocs_hcu_sm3_cra_init(struct crypto_tfm *tfm)
839 {
840 	struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
841 
842 	__cra_init(tfm, ctx);
843 
844 	ctx->is_sm3_tfm = true;
845 
846 	return 0;
847 }
848 
kmb_ocs_hcu_hmac_sm3_cra_init(struct crypto_tfm * tfm)849 static int kmb_ocs_hcu_hmac_sm3_cra_init(struct crypto_tfm *tfm)
850 {
851 	struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
852 
853 	__cra_init(tfm, ctx);
854 
855 	ctx->is_sm3_tfm = true;
856 	ctx->is_hmac_tfm = true;
857 
858 	return 0;
859 }
860 
kmb_ocs_hcu_hmac_cra_init(struct crypto_tfm * tfm)861 static int kmb_ocs_hcu_hmac_cra_init(struct crypto_tfm *tfm)
862 {
863 	struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
864 
865 	__cra_init(tfm, ctx);
866 
867 	ctx->is_hmac_tfm = true;
868 
869 	return 0;
870 }
871 
872 /* Function called when 'tfm' is de-initialized. */
kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm * tfm)873 static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm)
874 {
875 	struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
876 
877 	/* Clear the key. */
878 	memzero_explicit(ctx->key, sizeof(ctx->key));
879 }
880 
881 static struct ahash_engine_alg ocs_hcu_algs[] = {
882 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
883 {
884 	.base.init		= kmb_ocs_hcu_init,
885 	.base.update		= kmb_ocs_hcu_update,
886 	.base.final		= kmb_ocs_hcu_final,
887 	.base.finup		= kmb_ocs_hcu_finup,
888 	.base.digest		= kmb_ocs_hcu_digest,
889 	.base.export		= kmb_ocs_hcu_export,
890 	.base.import		= kmb_ocs_hcu_import,
891 	.base.halg = {
892 		.digestsize	= SHA224_DIGEST_SIZE,
893 		.statesize	= sizeof(struct ocs_hcu_rctx),
894 		.base	= {
895 			.cra_name		= "sha224",
896 			.cra_driver_name	= "sha224-keembay-ocs",
897 			.cra_priority		= 255,
898 			.cra_flags		= CRYPTO_ALG_ASYNC,
899 			.cra_blocksize		= SHA224_BLOCK_SIZE,
900 			.cra_ctxsize		= sizeof(struct ocs_hcu_ctx),
901 			.cra_alignmask		= 0,
902 			.cra_module		= THIS_MODULE,
903 			.cra_init		= kmb_ocs_hcu_sha_cra_init,
904 		}
905 	},
906 	.op.do_one_request = kmb_ocs_hcu_do_one_request,
907 },
908 {
909 	.base.init		= kmb_ocs_hcu_init,
910 	.base.update		= kmb_ocs_hcu_update,
911 	.base.final		= kmb_ocs_hcu_final,
912 	.base.finup		= kmb_ocs_hcu_finup,
913 	.base.digest		= kmb_ocs_hcu_digest,
914 	.base.export		= kmb_ocs_hcu_export,
915 	.base.import		= kmb_ocs_hcu_import,
916 	.base.setkey		= kmb_ocs_hcu_setkey,
917 	.base.halg = {
918 		.digestsize	= SHA224_DIGEST_SIZE,
919 		.statesize	= sizeof(struct ocs_hcu_rctx),
920 		.base	= {
921 			.cra_name		= "hmac(sha224)",
922 			.cra_driver_name	= "hmac-sha224-keembay-ocs",
923 			.cra_priority		= 255,
924 			.cra_flags		= CRYPTO_ALG_ASYNC,
925 			.cra_blocksize		= SHA224_BLOCK_SIZE,
926 			.cra_ctxsize		= sizeof(struct ocs_hcu_ctx),
927 			.cra_alignmask		= 0,
928 			.cra_module		= THIS_MODULE,
929 			.cra_init		= kmb_ocs_hcu_hmac_cra_init,
930 			.cra_exit		= kmb_ocs_hcu_hmac_cra_exit,
931 		}
932 	},
933 	.op.do_one_request = kmb_ocs_hcu_do_one_request,
934 },
935 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
936 {
937 	.base.init		= kmb_ocs_hcu_init,
938 	.base.update		= kmb_ocs_hcu_update,
939 	.base.final		= kmb_ocs_hcu_final,
940 	.base.finup		= kmb_ocs_hcu_finup,
941 	.base.digest		= kmb_ocs_hcu_digest,
942 	.base.export		= kmb_ocs_hcu_export,
943 	.base.import		= kmb_ocs_hcu_import,
944 	.base.halg = {
945 		.digestsize	= SHA256_DIGEST_SIZE,
946 		.statesize	= sizeof(struct ocs_hcu_rctx),
947 		.base	= {
948 			.cra_name		= "sha256",
949 			.cra_driver_name	= "sha256-keembay-ocs",
950 			.cra_priority		= 255,
951 			.cra_flags		= CRYPTO_ALG_ASYNC,
952 			.cra_blocksize		= SHA256_BLOCK_SIZE,
953 			.cra_ctxsize		= sizeof(struct ocs_hcu_ctx),
954 			.cra_alignmask		= 0,
955 			.cra_module		= THIS_MODULE,
956 			.cra_init		= kmb_ocs_hcu_sha_cra_init,
957 		}
958 	},
959 	.op.do_one_request = kmb_ocs_hcu_do_one_request,
960 },
961 {
962 	.base.init		= kmb_ocs_hcu_init,
963 	.base.update		= kmb_ocs_hcu_update,
964 	.base.final		= kmb_ocs_hcu_final,
965 	.base.finup		= kmb_ocs_hcu_finup,
966 	.base.digest		= kmb_ocs_hcu_digest,
967 	.base.export		= kmb_ocs_hcu_export,
968 	.base.import		= kmb_ocs_hcu_import,
969 	.base.setkey		= kmb_ocs_hcu_setkey,
970 	.base.halg = {
971 		.digestsize	= SHA256_DIGEST_SIZE,
972 		.statesize	= sizeof(struct ocs_hcu_rctx),
973 		.base	= {
974 			.cra_name		= "hmac(sha256)",
975 			.cra_driver_name	= "hmac-sha256-keembay-ocs",
976 			.cra_priority		= 255,
977 			.cra_flags		= CRYPTO_ALG_ASYNC,
978 			.cra_blocksize		= SHA256_BLOCK_SIZE,
979 			.cra_ctxsize		= sizeof(struct ocs_hcu_ctx),
980 			.cra_alignmask		= 0,
981 			.cra_module		= THIS_MODULE,
982 			.cra_init		= kmb_ocs_hcu_hmac_cra_init,
983 			.cra_exit		= kmb_ocs_hcu_hmac_cra_exit,
984 		}
985 	},
986 	.op.do_one_request = kmb_ocs_hcu_do_one_request,
987 },
988 {
989 	.base.init		= kmb_ocs_hcu_init,
990 	.base.update		= kmb_ocs_hcu_update,
991 	.base.final		= kmb_ocs_hcu_final,
992 	.base.finup		= kmb_ocs_hcu_finup,
993 	.base.digest		= kmb_ocs_hcu_digest,
994 	.base.export		= kmb_ocs_hcu_export,
995 	.base.import		= kmb_ocs_hcu_import,
996 	.base.halg = {
997 		.digestsize	= SM3_DIGEST_SIZE,
998 		.statesize	= sizeof(struct ocs_hcu_rctx),
999 		.base	= {
1000 			.cra_name		= "sm3",
1001 			.cra_driver_name	= "sm3-keembay-ocs",
1002 			.cra_priority		= 255,
1003 			.cra_flags		= CRYPTO_ALG_ASYNC,
1004 			.cra_blocksize		= SM3_BLOCK_SIZE,
1005 			.cra_ctxsize		= sizeof(struct ocs_hcu_ctx),
1006 			.cra_alignmask		= 0,
1007 			.cra_module		= THIS_MODULE,
1008 			.cra_init		= kmb_ocs_hcu_sm3_cra_init,
1009 		}
1010 	},
1011 	.op.do_one_request = kmb_ocs_hcu_do_one_request,
1012 },
1013 {
1014 	.base.init		= kmb_ocs_hcu_init,
1015 	.base.update		= kmb_ocs_hcu_update,
1016 	.base.final		= kmb_ocs_hcu_final,
1017 	.base.finup		= kmb_ocs_hcu_finup,
1018 	.base.digest		= kmb_ocs_hcu_digest,
1019 	.base.export		= kmb_ocs_hcu_export,
1020 	.base.import		= kmb_ocs_hcu_import,
1021 	.base.setkey		= kmb_ocs_hcu_setkey,
1022 	.base.halg = {
1023 		.digestsize	= SM3_DIGEST_SIZE,
1024 		.statesize	= sizeof(struct ocs_hcu_rctx),
1025 		.base	= {
1026 			.cra_name		= "hmac(sm3)",
1027 			.cra_driver_name	= "hmac-sm3-keembay-ocs",
1028 			.cra_priority		= 255,
1029 			.cra_flags		= CRYPTO_ALG_ASYNC,
1030 			.cra_blocksize		= SM3_BLOCK_SIZE,
1031 			.cra_ctxsize		= sizeof(struct ocs_hcu_ctx),
1032 			.cra_alignmask		= 0,
1033 			.cra_module		= THIS_MODULE,
1034 			.cra_init		= kmb_ocs_hcu_hmac_sm3_cra_init,
1035 			.cra_exit		= kmb_ocs_hcu_hmac_cra_exit,
1036 		}
1037 	},
1038 	.op.do_one_request = kmb_ocs_hcu_do_one_request,
1039 },
1040 {
1041 	.base.init		= kmb_ocs_hcu_init,
1042 	.base.update		= kmb_ocs_hcu_update,
1043 	.base.final		= kmb_ocs_hcu_final,
1044 	.base.finup		= kmb_ocs_hcu_finup,
1045 	.base.digest		= kmb_ocs_hcu_digest,
1046 	.base.export		= kmb_ocs_hcu_export,
1047 	.base.import		= kmb_ocs_hcu_import,
1048 	.base.halg = {
1049 		.digestsize	= SHA384_DIGEST_SIZE,
1050 		.statesize	= sizeof(struct ocs_hcu_rctx),
1051 		.base	= {
1052 			.cra_name		= "sha384",
1053 			.cra_driver_name	= "sha384-keembay-ocs",
1054 			.cra_priority		= 255,
1055 			.cra_flags		= CRYPTO_ALG_ASYNC,
1056 			.cra_blocksize		= SHA384_BLOCK_SIZE,
1057 			.cra_ctxsize		= sizeof(struct ocs_hcu_ctx),
1058 			.cra_alignmask		= 0,
1059 			.cra_module		= THIS_MODULE,
1060 			.cra_init		= kmb_ocs_hcu_sha_cra_init,
1061 		}
1062 	},
1063 	.op.do_one_request = kmb_ocs_hcu_do_one_request,
1064 },
1065 {
1066 	.base.init		= kmb_ocs_hcu_init,
1067 	.base.update		= kmb_ocs_hcu_update,
1068 	.base.final		= kmb_ocs_hcu_final,
1069 	.base.finup		= kmb_ocs_hcu_finup,
1070 	.base.digest		= kmb_ocs_hcu_digest,
1071 	.base.export		= kmb_ocs_hcu_export,
1072 	.base.import		= kmb_ocs_hcu_import,
1073 	.base.setkey		= kmb_ocs_hcu_setkey,
1074 	.base.halg = {
1075 		.digestsize	= SHA384_DIGEST_SIZE,
1076 		.statesize	= sizeof(struct ocs_hcu_rctx),
1077 		.base	= {
1078 			.cra_name		= "hmac(sha384)",
1079 			.cra_driver_name	= "hmac-sha384-keembay-ocs",
1080 			.cra_priority		= 255,
1081 			.cra_flags		= CRYPTO_ALG_ASYNC,
1082 			.cra_blocksize		= SHA384_BLOCK_SIZE,
1083 			.cra_ctxsize		= sizeof(struct ocs_hcu_ctx),
1084 			.cra_alignmask		= 0,
1085 			.cra_module		= THIS_MODULE,
1086 			.cra_init		= kmb_ocs_hcu_hmac_cra_init,
1087 			.cra_exit		= kmb_ocs_hcu_hmac_cra_exit,
1088 		}
1089 	},
1090 	.op.do_one_request = kmb_ocs_hcu_do_one_request,
1091 },
1092 {
1093 	.base.init		= kmb_ocs_hcu_init,
1094 	.base.update		= kmb_ocs_hcu_update,
1095 	.base.final		= kmb_ocs_hcu_final,
1096 	.base.finup		= kmb_ocs_hcu_finup,
1097 	.base.digest		= kmb_ocs_hcu_digest,
1098 	.base.export		= kmb_ocs_hcu_export,
1099 	.base.import		= kmb_ocs_hcu_import,
1100 	.base.halg = {
1101 		.digestsize	= SHA512_DIGEST_SIZE,
1102 		.statesize	= sizeof(struct ocs_hcu_rctx),
1103 		.base	= {
1104 			.cra_name		= "sha512",
1105 			.cra_driver_name	= "sha512-keembay-ocs",
1106 			.cra_priority		= 255,
1107 			.cra_flags		= CRYPTO_ALG_ASYNC,
1108 			.cra_blocksize		= SHA512_BLOCK_SIZE,
1109 			.cra_ctxsize		= sizeof(struct ocs_hcu_ctx),
1110 			.cra_alignmask		= 0,
1111 			.cra_module		= THIS_MODULE,
1112 			.cra_init		= kmb_ocs_hcu_sha_cra_init,
1113 		}
1114 	},
1115 	.op.do_one_request = kmb_ocs_hcu_do_one_request,
1116 },
1117 {
1118 	.base.init		= kmb_ocs_hcu_init,
1119 	.base.update		= kmb_ocs_hcu_update,
1120 	.base.final		= kmb_ocs_hcu_final,
1121 	.base.finup		= kmb_ocs_hcu_finup,
1122 	.base.digest		= kmb_ocs_hcu_digest,
1123 	.base.export		= kmb_ocs_hcu_export,
1124 	.base.import		= kmb_ocs_hcu_import,
1125 	.base.setkey		= kmb_ocs_hcu_setkey,
1126 	.base.halg = {
1127 		.digestsize	= SHA512_DIGEST_SIZE,
1128 		.statesize	= sizeof(struct ocs_hcu_rctx),
1129 		.base	= {
1130 			.cra_name		= "hmac(sha512)",
1131 			.cra_driver_name	= "hmac-sha512-keembay-ocs",
1132 			.cra_priority		= 255,
1133 			.cra_flags		= CRYPTO_ALG_ASYNC,
1134 			.cra_blocksize		= SHA512_BLOCK_SIZE,
1135 			.cra_ctxsize		= sizeof(struct ocs_hcu_ctx),
1136 			.cra_alignmask		= 0,
1137 			.cra_module		= THIS_MODULE,
1138 			.cra_init		= kmb_ocs_hcu_hmac_cra_init,
1139 			.cra_exit		= kmb_ocs_hcu_hmac_cra_exit,
1140 		}
1141 	},
1142 	.op.do_one_request = kmb_ocs_hcu_do_one_request,
1143 },
1144 };
1145 
1146 /* Device tree driver match. */
1147 static const struct of_device_id kmb_ocs_hcu_of_match[] = {
1148 	{
1149 		.compatible = "intel,keembay-ocs-hcu",
1150 	},
1151 	{}
1152 };
1153 MODULE_DEVICE_TABLE(of, kmb_ocs_hcu_of_match);
1154 
kmb_ocs_hcu_remove(struct platform_device * pdev)1155 static void kmb_ocs_hcu_remove(struct platform_device *pdev)
1156 {
1157 	struct ocs_hcu_dev *hcu_dev = platform_get_drvdata(pdev);
1158 
1159 	crypto_engine_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
1160 
1161 	crypto_engine_exit(hcu_dev->engine);
1162 
1163 	spin_lock_bh(&ocs_hcu.lock);
1164 	list_del(&hcu_dev->list);
1165 	spin_unlock_bh(&ocs_hcu.lock);
1166 }
1167 
kmb_ocs_hcu_probe(struct platform_device * pdev)1168 static int kmb_ocs_hcu_probe(struct platform_device *pdev)
1169 {
1170 	struct device *dev = &pdev->dev;
1171 	struct ocs_hcu_dev *hcu_dev;
1172 	int rc;
1173 
1174 	hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL);
1175 	if (!hcu_dev)
1176 		return -ENOMEM;
1177 
1178 	hcu_dev->dev = dev;
1179 
1180 	platform_set_drvdata(pdev, hcu_dev);
1181 	rc = dma_set_mask_and_coherent(&pdev->dev, OCS_HCU_DMA_BIT_MASK);
1182 	if (rc)
1183 		return rc;
1184 
1185 	hcu_dev->io_base = devm_platform_ioremap_resource(pdev, 0);
1186 	if (IS_ERR(hcu_dev->io_base))
1187 		return PTR_ERR(hcu_dev->io_base);
1188 
1189 	init_completion(&hcu_dev->irq_done);
1190 
1191 	/* Get and request IRQ. */
1192 	hcu_dev->irq = platform_get_irq(pdev, 0);
1193 	if (hcu_dev->irq < 0)
1194 		return hcu_dev->irq;
1195 
1196 	rc = devm_request_threaded_irq(&pdev->dev, hcu_dev->irq,
1197 				       ocs_hcu_irq_handler, NULL, 0,
1198 				       "keembay-ocs-hcu", hcu_dev);
1199 	if (rc < 0) {
1200 		dev_err(dev, "Could not request IRQ.\n");
1201 		return rc;
1202 	}
1203 
1204 	INIT_LIST_HEAD(&hcu_dev->list);
1205 
1206 	spin_lock_bh(&ocs_hcu.lock);
1207 	list_add_tail(&hcu_dev->list, &ocs_hcu.dev_list);
1208 	spin_unlock_bh(&ocs_hcu.lock);
1209 
1210 	/* Initialize crypto engine */
1211 	hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
1212 	if (!hcu_dev->engine) {
1213 		rc = -ENOMEM;
1214 		goto list_del;
1215 	}
1216 
1217 	rc = crypto_engine_start(hcu_dev->engine);
1218 	if (rc) {
1219 		dev_err(dev, "Could not start engine.\n");
1220 		goto cleanup;
1221 	}
1222 
1223 	/* Security infrastructure guarantees OCS clock is enabled. */
1224 
1225 	rc = crypto_engine_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
1226 	if (rc) {
1227 		dev_err(dev, "Could not register algorithms.\n");
1228 		goto cleanup;
1229 	}
1230 
1231 	return 0;
1232 
1233 cleanup:
1234 	crypto_engine_exit(hcu_dev->engine);
1235 list_del:
1236 	spin_lock_bh(&ocs_hcu.lock);
1237 	list_del(&hcu_dev->list);
1238 	spin_unlock_bh(&ocs_hcu.lock);
1239 
1240 	return rc;
1241 }
1242 
1243 /* The OCS driver is a platform device. */
1244 static struct platform_driver kmb_ocs_hcu_driver = {
1245 	.probe = kmb_ocs_hcu_probe,
1246 	.remove_new = kmb_ocs_hcu_remove,
1247 	.driver = {
1248 			.name = DRV_NAME,
1249 			.of_match_table = kmb_ocs_hcu_of_match,
1250 		},
1251 };
1252 
1253 module_platform_driver(kmb_ocs_hcu_driver);
1254 
1255 MODULE_LICENSE("GPL");
1256