xref: /linux/crypto/crypto_engine.c (revision 4dd4d5e486ebdeb48dbc558237d4ba8aab8917d5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handle async block request by crypto hardware engine.
4  *
5  * Copyright (C) 2016 Linaro, Inc.
6  *
7  * Author: Baolin Wang <baolin.wang@linaro.org>
8  */
9 
10 #include <crypto/internal/aead.h>
11 #include <crypto/internal/akcipher.h>
12 #include <crypto/internal/engine.h>
13 #include <crypto/internal/hash.h>
14 #include <crypto/internal/kpp.h>
15 #include <crypto/internal/skcipher.h>
16 #include <linux/err.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <uapi/linux/sched/types.h>
22 #include "internal.h"
23 
24 #define CRYPTO_ENGINE_MAX_QLEN 10
25 
26 /* Temporary algorithm flag used to indicate an updated driver. */
27 #define CRYPTO_ALG_ENGINE 0x200
28 
29 struct crypto_engine_alg {
30 	struct crypto_alg base;
31 	struct crypto_engine_op op;
32 };
33 
34 /**
35  * crypto_finalize_request - finalize one request if the request is done
36  * @engine: the hardware engine
37  * @req: the request need to be finalized
38  * @err: error number
39  */
40 static void crypto_finalize_request(struct crypto_engine *engine,
41 				    struct crypto_async_request *req, int err)
42 {
43 	unsigned long flags;
44 
45 	/*
46 	 * If hardware cannot enqueue more requests
47 	 * and retry mechanism is not supported
48 	 * make sure we are completing the current request
49 	 */
50 	if (!engine->retry_support) {
51 		spin_lock_irqsave(&engine->queue_lock, flags);
52 		if (engine->cur_req == req) {
53 			engine->cur_req = NULL;
54 		}
55 		spin_unlock_irqrestore(&engine->queue_lock, flags);
56 	}
57 
58 	lockdep_assert_in_softirq();
59 	crypto_request_complete(req, err);
60 
61 	kthread_queue_work(engine->kworker, &engine->pump_requests);
62 }
63 
64 /**
65  * crypto_pump_requests - dequeue one request from engine queue to process
66  * @engine: the hardware engine
67  * @in_kthread: true if we are in the context of the request pump thread
68  *
69  * This function checks if there is any request in the engine queue that
70  * needs processing and if so call out to the driver to initialize hardware
71  * and handle each request.
72  */
73 static void crypto_pump_requests(struct crypto_engine *engine,
74 				 bool in_kthread)
75 {
76 	struct crypto_async_request *async_req, *backlog;
77 	struct crypto_engine_alg *alg;
78 	struct crypto_engine_op *op;
79 	unsigned long flags;
80 	bool was_busy = false;
81 	int ret;
82 	struct crypto_engine_ctx *enginectx;
83 
84 	spin_lock_irqsave(&engine->queue_lock, flags);
85 
86 	/* Make sure we are not already running a request */
87 	if (!engine->retry_support && engine->cur_req)
88 		goto out;
89 
90 	/* If another context is idling then defer */
91 	if (engine->idling) {
92 		kthread_queue_work(engine->kworker, &engine->pump_requests);
93 		goto out;
94 	}
95 
96 	/* Check if the engine queue is idle */
97 	if (!crypto_queue_len(&engine->queue) || !engine->running) {
98 		if (!engine->busy)
99 			goto out;
100 
101 		/* Only do teardown in the thread */
102 		if (!in_kthread) {
103 			kthread_queue_work(engine->kworker,
104 					   &engine->pump_requests);
105 			goto out;
106 		}
107 
108 		engine->busy = false;
109 		engine->idling = true;
110 		spin_unlock_irqrestore(&engine->queue_lock, flags);
111 
112 		if (engine->unprepare_crypt_hardware &&
113 		    engine->unprepare_crypt_hardware(engine))
114 			dev_err(engine->dev, "failed to unprepare crypt hardware\n");
115 
116 		spin_lock_irqsave(&engine->queue_lock, flags);
117 		engine->idling = false;
118 		goto out;
119 	}
120 
121 start_request:
122 	/* Get the fist request from the engine queue to handle */
123 	backlog = crypto_get_backlog(&engine->queue);
124 	async_req = crypto_dequeue_request(&engine->queue);
125 	if (!async_req)
126 		goto out;
127 
128 	/*
129 	 * If hardware doesn't support the retry mechanism,
130 	 * keep track of the request we are processing now.
131 	 * We'll need it on completion (crypto_finalize_request).
132 	 */
133 	if (!engine->retry_support)
134 		engine->cur_req = async_req;
135 
136 	if (engine->busy)
137 		was_busy = true;
138 	else
139 		engine->busy = true;
140 
141 	spin_unlock_irqrestore(&engine->queue_lock, flags);
142 
143 	/* Until here we get the request need to be encrypted successfully */
144 	if (!was_busy && engine->prepare_crypt_hardware) {
145 		ret = engine->prepare_crypt_hardware(engine);
146 		if (ret) {
147 			dev_err(engine->dev, "failed to prepare crypt hardware\n");
148 			goto req_err_1;
149 		}
150 	}
151 
152 	if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) {
153 		alg = container_of(async_req->tfm->__crt_alg,
154 				   struct crypto_engine_alg, base);
155 		op = &alg->op;
156 	} else {
157 		enginectx = crypto_tfm_ctx(async_req->tfm);
158 		op = &enginectx->op;
159 
160 		if (!op->do_one_request) {
161 			dev_err(engine->dev, "failed to do request\n");
162 			ret = -EINVAL;
163 			goto req_err_1;
164 		}
165 	}
166 
167 	ret = op->do_one_request(engine, async_req);
168 
169 	/* Request unsuccessfully executed by hardware */
170 	if (ret < 0) {
171 		/*
172 		 * If hardware queue is full (-ENOSPC), requeue request
173 		 * regardless of backlog flag.
174 		 * Otherwise, unprepare and complete the request.
175 		 */
176 		if (!engine->retry_support ||
177 		    (ret != -ENOSPC)) {
178 			dev_err(engine->dev,
179 				"Failed to do one request from queue: %d\n",
180 				ret);
181 			goto req_err_1;
182 		}
183 		spin_lock_irqsave(&engine->queue_lock, flags);
184 		/*
185 		 * If hardware was unable to execute request, enqueue it
186 		 * back in front of crypto-engine queue, to keep the order
187 		 * of requests.
188 		 */
189 		crypto_enqueue_request_head(&engine->queue, async_req);
190 
191 		kthread_queue_work(engine->kworker, &engine->pump_requests);
192 		goto out;
193 	}
194 
195 	goto retry;
196 
197 req_err_1:
198 	crypto_request_complete(async_req, ret);
199 
200 retry:
201 	if (backlog)
202 		crypto_request_complete(backlog, -EINPROGRESS);
203 
204 	/* If retry mechanism is supported, send new requests to engine */
205 	if (engine->retry_support) {
206 		spin_lock_irqsave(&engine->queue_lock, flags);
207 		goto start_request;
208 	}
209 	return;
210 
211 out:
212 	spin_unlock_irqrestore(&engine->queue_lock, flags);
213 
214 	/*
215 	 * Batch requests is possible only if
216 	 * hardware can enqueue multiple requests
217 	 */
218 	if (engine->do_batch_requests) {
219 		ret = engine->do_batch_requests(engine);
220 		if (ret)
221 			dev_err(engine->dev, "failed to do batch requests: %d\n",
222 				ret);
223 	}
224 
225 	return;
226 }
227 
228 static void crypto_pump_work(struct kthread_work *work)
229 {
230 	struct crypto_engine *engine =
231 		container_of(work, struct crypto_engine, pump_requests);
232 
233 	crypto_pump_requests(engine, true);
234 }
235 
236 /**
237  * crypto_transfer_request - transfer the new request into the engine queue
238  * @engine: the hardware engine
239  * @req: the request need to be listed into the engine queue
240  * @need_pump: indicates whether queue the pump of request to kthread_work
241  */
242 static int crypto_transfer_request(struct crypto_engine *engine,
243 				   struct crypto_async_request *req,
244 				   bool need_pump)
245 {
246 	unsigned long flags;
247 	int ret;
248 
249 	spin_lock_irqsave(&engine->queue_lock, flags);
250 
251 	if (!engine->running) {
252 		spin_unlock_irqrestore(&engine->queue_lock, flags);
253 		return -ESHUTDOWN;
254 	}
255 
256 	ret = crypto_enqueue_request(&engine->queue, req);
257 
258 	if (!engine->busy && need_pump)
259 		kthread_queue_work(engine->kworker, &engine->pump_requests);
260 
261 	spin_unlock_irqrestore(&engine->queue_lock, flags);
262 	return ret;
263 }
264 
265 /**
266  * crypto_transfer_request_to_engine - transfer one request to list
267  * into the engine queue
268  * @engine: the hardware engine
269  * @req: the request need to be listed into the engine queue
270  */
271 static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
272 					     struct crypto_async_request *req)
273 {
274 	return crypto_transfer_request(engine, req, true);
275 }
276 
277 /**
278  * crypto_transfer_aead_request_to_engine - transfer one aead_request
279  * to list into the engine queue
280  * @engine: the hardware engine
281  * @req: the request need to be listed into the engine queue
282  */
283 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
284 					   struct aead_request *req)
285 {
286 	return crypto_transfer_request_to_engine(engine, &req->base);
287 }
288 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
289 
290 /**
291  * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
292  * to list into the engine queue
293  * @engine: the hardware engine
294  * @req: the request need to be listed into the engine queue
295  */
296 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
297 					       struct akcipher_request *req)
298 {
299 	return crypto_transfer_request_to_engine(engine, &req->base);
300 }
301 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
302 
303 /**
304  * crypto_transfer_hash_request_to_engine - transfer one ahash_request
305  * to list into the engine queue
306  * @engine: the hardware engine
307  * @req: the request need to be listed into the engine queue
308  */
309 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
310 					   struct ahash_request *req)
311 {
312 	return crypto_transfer_request_to_engine(engine, &req->base);
313 }
314 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
315 
316 /**
317  * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
318  * into the engine queue
319  * @engine: the hardware engine
320  * @req: the request need to be listed into the engine queue
321  */
322 int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
323 					  struct kpp_request *req)
324 {
325 	return crypto_transfer_request_to_engine(engine, &req->base);
326 }
327 EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
328 
329 /**
330  * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
331  * to list into the engine queue
332  * @engine: the hardware engine
333  * @req: the request need to be listed into the engine queue
334  */
335 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
336 					       struct skcipher_request *req)
337 {
338 	return crypto_transfer_request_to_engine(engine, &req->base);
339 }
340 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
341 
342 /**
343  * crypto_finalize_aead_request - finalize one aead_request if
344  * the request is done
345  * @engine: the hardware engine
346  * @req: the request need to be finalized
347  * @err: error number
348  */
349 void crypto_finalize_aead_request(struct crypto_engine *engine,
350 				  struct aead_request *req, int err)
351 {
352 	return crypto_finalize_request(engine, &req->base, err);
353 }
354 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
355 
356 /**
357  * crypto_finalize_akcipher_request - finalize one akcipher_request if
358  * the request is done
359  * @engine: the hardware engine
360  * @req: the request need to be finalized
361  * @err: error number
362  */
363 void crypto_finalize_akcipher_request(struct crypto_engine *engine,
364 				      struct akcipher_request *req, int err)
365 {
366 	return crypto_finalize_request(engine, &req->base, err);
367 }
368 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
369 
370 /**
371  * crypto_finalize_hash_request - finalize one ahash_request if
372  * the request is done
373  * @engine: the hardware engine
374  * @req: the request need to be finalized
375  * @err: error number
376  */
377 void crypto_finalize_hash_request(struct crypto_engine *engine,
378 				  struct ahash_request *req, int err)
379 {
380 	return crypto_finalize_request(engine, &req->base, err);
381 }
382 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
383 
384 /**
385  * crypto_finalize_kpp_request - finalize one kpp_request if the request is done
386  * @engine: the hardware engine
387  * @req: the request need to be finalized
388  * @err: error number
389  */
390 void crypto_finalize_kpp_request(struct crypto_engine *engine,
391 				 struct kpp_request *req, int err)
392 {
393 	return crypto_finalize_request(engine, &req->base, err);
394 }
395 EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
396 
397 /**
398  * crypto_finalize_skcipher_request - finalize one skcipher_request if
399  * the request is done
400  * @engine: the hardware engine
401  * @req: the request need to be finalized
402  * @err: error number
403  */
404 void crypto_finalize_skcipher_request(struct crypto_engine *engine,
405 				      struct skcipher_request *req, int err)
406 {
407 	return crypto_finalize_request(engine, &req->base, err);
408 }
409 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
410 
411 /**
412  * crypto_engine_start - start the hardware engine
413  * @engine: the hardware engine need to be started
414  *
415  * Return 0 on success, else on fail.
416  */
417 int crypto_engine_start(struct crypto_engine *engine)
418 {
419 	unsigned long flags;
420 
421 	spin_lock_irqsave(&engine->queue_lock, flags);
422 
423 	if (engine->running || engine->busy) {
424 		spin_unlock_irqrestore(&engine->queue_lock, flags);
425 		return -EBUSY;
426 	}
427 
428 	engine->running = true;
429 	spin_unlock_irqrestore(&engine->queue_lock, flags);
430 
431 	kthread_queue_work(engine->kworker, &engine->pump_requests);
432 
433 	return 0;
434 }
435 EXPORT_SYMBOL_GPL(crypto_engine_start);
436 
437 /**
438  * crypto_engine_stop - stop the hardware engine
439  * @engine: the hardware engine need to be stopped
440  *
441  * Return 0 on success, else on fail.
442  */
443 int crypto_engine_stop(struct crypto_engine *engine)
444 {
445 	unsigned long flags;
446 	unsigned int limit = 500;
447 	int ret = 0;
448 
449 	spin_lock_irqsave(&engine->queue_lock, flags);
450 
451 	/*
452 	 * If the engine queue is not empty or the engine is on busy state,
453 	 * we need to wait for a while to pump the requests of engine queue.
454 	 */
455 	while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
456 		spin_unlock_irqrestore(&engine->queue_lock, flags);
457 		msleep(20);
458 		spin_lock_irqsave(&engine->queue_lock, flags);
459 	}
460 
461 	if (crypto_queue_len(&engine->queue) || engine->busy)
462 		ret = -EBUSY;
463 	else
464 		engine->running = false;
465 
466 	spin_unlock_irqrestore(&engine->queue_lock, flags);
467 
468 	if (ret)
469 		dev_warn(engine->dev, "could not stop engine\n");
470 
471 	return ret;
472 }
473 EXPORT_SYMBOL_GPL(crypto_engine_stop);
474 
475 /**
476  * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
477  * and initialize it by setting the maximum number of entries in the software
478  * crypto-engine queue.
479  * @dev: the device attached with one hardware engine
480  * @retry_support: whether hardware has support for retry mechanism
481  * @cbk_do_batch: pointer to a callback function to be invoked when executing
482  *                a batch of requests.
483  *                This has the form:
484  *                callback(struct crypto_engine *engine)
485  *                where:
486  *                engine: the crypto engine structure.
487  * @rt: whether this queue is set to run as a realtime task
488  * @qlen: maximum size of the crypto-engine queue
489  *
490  * This must be called from context that can sleep.
491  * Return: the crypto engine structure on success, else NULL.
492  */
493 struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
494 						       bool retry_support,
495 						       int (*cbk_do_batch)(struct crypto_engine *engine),
496 						       bool rt, int qlen)
497 {
498 	struct crypto_engine *engine;
499 
500 	if (!dev)
501 		return NULL;
502 
503 	engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
504 	if (!engine)
505 		return NULL;
506 
507 	engine->dev = dev;
508 	engine->rt = rt;
509 	engine->running = false;
510 	engine->busy = false;
511 	engine->idling = false;
512 	engine->retry_support = retry_support;
513 	engine->priv_data = dev;
514 	/*
515 	 * Batch requests is possible only if
516 	 * hardware has support for retry mechanism.
517 	 */
518 	engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
519 
520 	snprintf(engine->name, sizeof(engine->name),
521 		 "%s-engine", dev_name(dev));
522 
523 	crypto_init_queue(&engine->queue, qlen);
524 	spin_lock_init(&engine->queue_lock);
525 
526 	engine->kworker = kthread_create_worker(0, "%s", engine->name);
527 	if (IS_ERR(engine->kworker)) {
528 		dev_err(dev, "failed to create crypto request pump task\n");
529 		return NULL;
530 	}
531 	kthread_init_work(&engine->pump_requests, crypto_pump_work);
532 
533 	if (engine->rt) {
534 		dev_info(dev, "will run requests pump with realtime priority\n");
535 		sched_set_fifo(engine->kworker->task);
536 	}
537 
538 	return engine;
539 }
540 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
541 
542 /**
543  * crypto_engine_alloc_init - allocate crypto hardware engine structure and
544  * initialize it.
545  * @dev: the device attached with one hardware engine
546  * @rt: whether this queue is set to run as a realtime task
547  *
548  * This must be called from context that can sleep.
549  * Return: the crypto engine structure on success, else NULL.
550  */
551 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
552 {
553 	return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
554 						CRYPTO_ENGINE_MAX_QLEN);
555 }
556 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
557 
558 /**
559  * crypto_engine_exit - free the resources of hardware engine when exit
560  * @engine: the hardware engine need to be freed
561  *
562  * Return 0 for success.
563  */
564 int crypto_engine_exit(struct crypto_engine *engine)
565 {
566 	int ret;
567 
568 	ret = crypto_engine_stop(engine);
569 	if (ret)
570 		return ret;
571 
572 	kthread_destroy_worker(engine->kworker);
573 
574 	return 0;
575 }
576 EXPORT_SYMBOL_GPL(crypto_engine_exit);
577 
578 int crypto_engine_register_aead(struct aead_engine_alg *alg)
579 {
580 	if (!alg->op.do_one_request)
581 		return -EINVAL;
582 
583 	alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
584 
585 	return crypto_register_aead(&alg->base);
586 }
587 EXPORT_SYMBOL_GPL(crypto_engine_register_aead);
588 
589 void crypto_engine_unregister_aead(struct aead_engine_alg *alg)
590 {
591 	crypto_unregister_aead(&alg->base);
592 }
593 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead);
594 
595 int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count)
596 {
597 	int i, ret;
598 
599 	for (i = 0; i < count; i++) {
600 		ret = crypto_engine_register_aead(&algs[i]);
601 		if (ret)
602 			goto err;
603 	}
604 
605 	return 0;
606 
607 err:
608 	crypto_engine_unregister_aeads(algs, i);
609 
610 	return ret;
611 }
612 EXPORT_SYMBOL_GPL(crypto_engine_register_aeads);
613 
614 void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count)
615 {
616 	int i;
617 
618 	for (i = count - 1; i >= 0; --i)
619 		crypto_engine_unregister_aead(&algs[i]);
620 }
621 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads);
622 
623 int crypto_engine_register_ahash(struct ahash_engine_alg *alg)
624 {
625 	if (!alg->op.do_one_request)
626 		return -EINVAL;
627 
628 	alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE;
629 
630 	return crypto_register_ahash(&alg->base);
631 }
632 EXPORT_SYMBOL_GPL(crypto_engine_register_ahash);
633 
634 void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg)
635 {
636 	crypto_unregister_ahash(&alg->base);
637 }
638 EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash);
639 
640 int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count)
641 {
642 	int i, ret;
643 
644 	for (i = 0; i < count; i++) {
645 		ret = crypto_engine_register_ahash(&algs[i]);
646 		if (ret)
647 			goto err;
648 	}
649 
650 	return 0;
651 
652 err:
653 	crypto_engine_unregister_ahashes(algs, i);
654 
655 	return ret;
656 }
657 EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes);
658 
659 void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
660 				      int count)
661 {
662 	int i;
663 
664 	for (i = count - 1; i >= 0; --i)
665 		crypto_engine_unregister_ahash(&algs[i]);
666 }
667 EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes);
668 
669 int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg)
670 {
671 	if (!alg->op.do_one_request)
672 		return -EINVAL;
673 
674 	alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
675 
676 	return crypto_register_akcipher(&alg->base);
677 }
678 EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher);
679 
680 void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg)
681 {
682 	crypto_unregister_akcipher(&alg->base);
683 }
684 EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher);
685 
686 int crypto_engine_register_kpp(struct kpp_engine_alg *alg)
687 {
688 	if (!alg->op.do_one_request)
689 		return -EINVAL;
690 
691 	alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
692 
693 	return crypto_register_kpp(&alg->base);
694 }
695 EXPORT_SYMBOL_GPL(crypto_engine_register_kpp);
696 
697 void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg)
698 {
699 	crypto_unregister_kpp(&alg->base);
700 }
701 EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp);
702 
703 int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg)
704 {
705 	if (!alg->op.do_one_request)
706 		return -EINVAL;
707 
708 	alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
709 
710 	return crypto_register_skcipher(&alg->base);
711 }
712 EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher);
713 
714 void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg)
715 {
716 	return crypto_unregister_skcipher(&alg->base);
717 }
718 EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher);
719 
720 int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
721 				     int count)
722 {
723 	int i, ret;
724 
725 	for (i = 0; i < count; i++) {
726 		ret = crypto_engine_register_skcipher(&algs[i]);
727 		if (ret)
728 			goto err;
729 	}
730 
731 	return 0;
732 
733 err:
734 	crypto_engine_unregister_skciphers(algs, i);
735 
736 	return ret;
737 }
738 EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers);
739 
740 void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
741 					int count)
742 {
743 	int i;
744 
745 	for (i = count - 1; i >= 0; --i)
746 		crypto_engine_unregister_skcipher(&algs[i]);
747 }
748 EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers);
749 
750 MODULE_LICENSE("GPL");
751 MODULE_DESCRIPTION("Crypto hardware engine framework");
752