xref: /linux/crypto/crypto_engine.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handle async block request by crypto hardware engine.
4  *
5  * Copyright (C) 2016 Linaro, Inc.
6  *
7  * Author: Baolin Wang <baolin.wang@linaro.org>
8  */
9 
10 #include <crypto/internal/aead.h>
11 #include <crypto/internal/akcipher.h>
12 #include <crypto/internal/engine.h>
13 #include <crypto/internal/hash.h>
14 #include <crypto/internal/kpp.h>
15 #include <crypto/internal/skcipher.h>
16 #include <linux/err.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <uapi/linux/sched/types.h>
22 #include "internal.h"
23 
24 #define CRYPTO_ENGINE_MAX_QLEN 10
25 
26 struct crypto_engine_alg {
27 	struct crypto_alg base;
28 	struct crypto_engine_op op;
29 };
30 
31 /**
32  * crypto_finalize_request - finalize one request if the request is done
33  * @engine: the hardware engine
34  * @req: the request need to be finalized
35  * @err: error number
36  */
37 static void crypto_finalize_request(struct crypto_engine *engine,
38 				    struct crypto_async_request *req, int err)
39 {
40 	unsigned long flags;
41 
42 	/*
43 	 * If hardware cannot enqueue more requests
44 	 * and retry mechanism is not supported
45 	 * make sure we are completing the current request
46 	 */
47 	if (!engine->retry_support) {
48 		spin_lock_irqsave(&engine->queue_lock, flags);
49 		if (engine->cur_req == req) {
50 			engine->cur_req = NULL;
51 		}
52 		spin_unlock_irqrestore(&engine->queue_lock, flags);
53 	}
54 
55 	lockdep_assert_in_softirq();
56 	crypto_request_complete(req, err);
57 
58 	kthread_queue_work(engine->kworker, &engine->pump_requests);
59 }
60 
61 /**
62  * crypto_pump_requests - dequeue one request from engine queue to process
63  * @engine: the hardware engine
64  * @in_kthread: true if we are in the context of the request pump thread
65  *
66  * This function checks if there is any request in the engine queue that
67  * needs processing and if so call out to the driver to initialize hardware
68  * and handle each request.
69  */
70 static void crypto_pump_requests(struct crypto_engine *engine,
71 				 bool in_kthread)
72 {
73 	struct crypto_async_request *async_req, *backlog;
74 	struct crypto_engine_alg *alg;
75 	struct crypto_engine_op *op;
76 	unsigned long flags;
77 	bool was_busy = false;
78 	int ret;
79 
80 	spin_lock_irqsave(&engine->queue_lock, flags);
81 
82 	/* Make sure we are not already running a request */
83 	if (!engine->retry_support && engine->cur_req)
84 		goto out;
85 
86 	/* If another context is idling then defer */
87 	if (engine->idling) {
88 		kthread_queue_work(engine->kworker, &engine->pump_requests);
89 		goto out;
90 	}
91 
92 	/* Check if the engine queue is idle */
93 	if (!crypto_queue_len(&engine->queue) || !engine->running) {
94 		if (!engine->busy)
95 			goto out;
96 
97 		/* Only do teardown in the thread */
98 		if (!in_kthread) {
99 			kthread_queue_work(engine->kworker,
100 					   &engine->pump_requests);
101 			goto out;
102 		}
103 
104 		engine->busy = false;
105 		engine->idling = true;
106 		spin_unlock_irqrestore(&engine->queue_lock, flags);
107 
108 		if (engine->unprepare_crypt_hardware &&
109 		    engine->unprepare_crypt_hardware(engine))
110 			dev_err(engine->dev, "failed to unprepare crypt hardware\n");
111 
112 		spin_lock_irqsave(&engine->queue_lock, flags);
113 		engine->idling = false;
114 		goto out;
115 	}
116 
117 start_request:
118 	/* Get the fist request from the engine queue to handle */
119 	backlog = crypto_get_backlog(&engine->queue);
120 	async_req = crypto_dequeue_request(&engine->queue);
121 	if (!async_req)
122 		goto out;
123 
124 	/*
125 	 * If hardware doesn't support the retry mechanism,
126 	 * keep track of the request we are processing now.
127 	 * We'll need it on completion (crypto_finalize_request).
128 	 */
129 	if (!engine->retry_support)
130 		engine->cur_req = async_req;
131 
132 	if (engine->busy)
133 		was_busy = true;
134 	else
135 		engine->busy = true;
136 
137 	spin_unlock_irqrestore(&engine->queue_lock, flags);
138 
139 	/* Until here we get the request need to be encrypted successfully */
140 	if (!was_busy && engine->prepare_crypt_hardware) {
141 		ret = engine->prepare_crypt_hardware(engine);
142 		if (ret) {
143 			dev_err(engine->dev, "failed to prepare crypt hardware\n");
144 			goto req_err_1;
145 		}
146 	}
147 
148 	alg = container_of(async_req->tfm->__crt_alg,
149 			   struct crypto_engine_alg, base);
150 	op = &alg->op;
151 	ret = op->do_one_request(engine, async_req);
152 
153 	/* Request unsuccessfully executed by hardware */
154 	if (ret < 0) {
155 		/*
156 		 * If hardware queue is full (-ENOSPC), requeue request
157 		 * regardless of backlog flag.
158 		 * Otherwise, unprepare and complete the request.
159 		 */
160 		if (!engine->retry_support ||
161 		    (ret != -ENOSPC)) {
162 			dev_err(engine->dev,
163 				"Failed to do one request from queue: %d\n",
164 				ret);
165 			goto req_err_1;
166 		}
167 		spin_lock_irqsave(&engine->queue_lock, flags);
168 		/*
169 		 * If hardware was unable to execute request, enqueue it
170 		 * back in front of crypto-engine queue, to keep the order
171 		 * of requests.
172 		 */
173 		crypto_enqueue_request_head(&engine->queue, async_req);
174 
175 		kthread_queue_work(engine->kworker, &engine->pump_requests);
176 		goto out;
177 	}
178 
179 	goto retry;
180 
181 req_err_1:
182 	crypto_request_complete(async_req, ret);
183 
184 retry:
185 	if (backlog)
186 		crypto_request_complete(backlog, -EINPROGRESS);
187 
188 	/* If retry mechanism is supported, send new requests to engine */
189 	if (engine->retry_support) {
190 		spin_lock_irqsave(&engine->queue_lock, flags);
191 		goto start_request;
192 	}
193 	return;
194 
195 out:
196 	spin_unlock_irqrestore(&engine->queue_lock, flags);
197 
198 	/*
199 	 * Batch requests is possible only if
200 	 * hardware can enqueue multiple requests
201 	 */
202 	if (engine->do_batch_requests) {
203 		ret = engine->do_batch_requests(engine);
204 		if (ret)
205 			dev_err(engine->dev, "failed to do batch requests: %d\n",
206 				ret);
207 	}
208 
209 	return;
210 }
211 
212 static void crypto_pump_work(struct kthread_work *work)
213 {
214 	struct crypto_engine *engine =
215 		container_of(work, struct crypto_engine, pump_requests);
216 
217 	crypto_pump_requests(engine, true);
218 }
219 
220 /**
221  * crypto_transfer_request - transfer the new request into the engine queue
222  * @engine: the hardware engine
223  * @req: the request need to be listed into the engine queue
224  * @need_pump: indicates whether queue the pump of request to kthread_work
225  */
226 static int crypto_transfer_request(struct crypto_engine *engine,
227 				   struct crypto_async_request *req,
228 				   bool need_pump)
229 {
230 	unsigned long flags;
231 	int ret;
232 
233 	spin_lock_irqsave(&engine->queue_lock, flags);
234 
235 	if (!engine->running) {
236 		spin_unlock_irqrestore(&engine->queue_lock, flags);
237 		return -ESHUTDOWN;
238 	}
239 
240 	ret = crypto_enqueue_request(&engine->queue, req);
241 
242 	if (!engine->busy && need_pump)
243 		kthread_queue_work(engine->kworker, &engine->pump_requests);
244 
245 	spin_unlock_irqrestore(&engine->queue_lock, flags);
246 	return ret;
247 }
248 
249 /**
250  * crypto_transfer_request_to_engine - transfer one request to list
251  * into the engine queue
252  * @engine: the hardware engine
253  * @req: the request need to be listed into the engine queue
254  */
255 static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
256 					     struct crypto_async_request *req)
257 {
258 	return crypto_transfer_request(engine, req, true);
259 }
260 
261 /**
262  * crypto_transfer_aead_request_to_engine - transfer one aead_request
263  * to list into the engine queue
264  * @engine: the hardware engine
265  * @req: the request need to be listed into the engine queue
266  */
267 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
268 					   struct aead_request *req)
269 {
270 	return crypto_transfer_request_to_engine(engine, &req->base);
271 }
272 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
273 
274 /**
275  * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
276  * to list into the engine queue
277  * @engine: the hardware engine
278  * @req: the request need to be listed into the engine queue
279  */
280 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
281 					       struct akcipher_request *req)
282 {
283 	return crypto_transfer_request_to_engine(engine, &req->base);
284 }
285 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
286 
287 /**
288  * crypto_transfer_hash_request_to_engine - transfer one ahash_request
289  * to list into the engine queue
290  * @engine: the hardware engine
291  * @req: the request need to be listed into the engine queue
292  */
293 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
294 					   struct ahash_request *req)
295 {
296 	return crypto_transfer_request_to_engine(engine, &req->base);
297 }
298 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
299 
300 /**
301  * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
302  * into the engine queue
303  * @engine: the hardware engine
304  * @req: the request need to be listed into the engine queue
305  */
306 int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
307 					  struct kpp_request *req)
308 {
309 	return crypto_transfer_request_to_engine(engine, &req->base);
310 }
311 EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
312 
313 /**
314  * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
315  * to list into the engine queue
316  * @engine: the hardware engine
317  * @req: the request need to be listed into the engine queue
318  */
319 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
320 					       struct skcipher_request *req)
321 {
322 	return crypto_transfer_request_to_engine(engine, &req->base);
323 }
324 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
325 
326 /**
327  * crypto_finalize_aead_request - finalize one aead_request if
328  * the request is done
329  * @engine: the hardware engine
330  * @req: the request need to be finalized
331  * @err: error number
332  */
333 void crypto_finalize_aead_request(struct crypto_engine *engine,
334 				  struct aead_request *req, int err)
335 {
336 	return crypto_finalize_request(engine, &req->base, err);
337 }
338 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
339 
340 /**
341  * crypto_finalize_akcipher_request - finalize one akcipher_request if
342  * the request is done
343  * @engine: the hardware engine
344  * @req: the request need to be finalized
345  * @err: error number
346  */
347 void crypto_finalize_akcipher_request(struct crypto_engine *engine,
348 				      struct akcipher_request *req, int err)
349 {
350 	return crypto_finalize_request(engine, &req->base, err);
351 }
352 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
353 
354 /**
355  * crypto_finalize_hash_request - finalize one ahash_request if
356  * the request is done
357  * @engine: the hardware engine
358  * @req: the request need to be finalized
359  * @err: error number
360  */
361 void crypto_finalize_hash_request(struct crypto_engine *engine,
362 				  struct ahash_request *req, int err)
363 {
364 	return crypto_finalize_request(engine, &req->base, err);
365 }
366 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
367 
368 /**
369  * crypto_finalize_kpp_request - finalize one kpp_request if the request is done
370  * @engine: the hardware engine
371  * @req: the request need to be finalized
372  * @err: error number
373  */
374 void crypto_finalize_kpp_request(struct crypto_engine *engine,
375 				 struct kpp_request *req, int err)
376 {
377 	return crypto_finalize_request(engine, &req->base, err);
378 }
379 EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
380 
381 /**
382  * crypto_finalize_skcipher_request - finalize one skcipher_request if
383  * the request is done
384  * @engine: the hardware engine
385  * @req: the request need to be finalized
386  * @err: error number
387  */
388 void crypto_finalize_skcipher_request(struct crypto_engine *engine,
389 				      struct skcipher_request *req, int err)
390 {
391 	return crypto_finalize_request(engine, &req->base, err);
392 }
393 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
394 
395 /**
396  * crypto_engine_start - start the hardware engine
397  * @engine: the hardware engine need to be started
398  *
399  * Return 0 on success, else on fail.
400  */
401 int crypto_engine_start(struct crypto_engine *engine)
402 {
403 	unsigned long flags;
404 
405 	spin_lock_irqsave(&engine->queue_lock, flags);
406 
407 	if (engine->running || engine->busy) {
408 		spin_unlock_irqrestore(&engine->queue_lock, flags);
409 		return -EBUSY;
410 	}
411 
412 	engine->running = true;
413 	spin_unlock_irqrestore(&engine->queue_lock, flags);
414 
415 	kthread_queue_work(engine->kworker, &engine->pump_requests);
416 
417 	return 0;
418 }
419 EXPORT_SYMBOL_GPL(crypto_engine_start);
420 
421 /**
422  * crypto_engine_stop - stop the hardware engine
423  * @engine: the hardware engine need to be stopped
424  *
425  * Return 0 on success, else on fail.
426  */
427 int crypto_engine_stop(struct crypto_engine *engine)
428 {
429 	unsigned long flags;
430 	unsigned int limit = 500;
431 	int ret = 0;
432 
433 	spin_lock_irqsave(&engine->queue_lock, flags);
434 
435 	/*
436 	 * If the engine queue is not empty or the engine is on busy state,
437 	 * we need to wait for a while to pump the requests of engine queue.
438 	 */
439 	while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
440 		spin_unlock_irqrestore(&engine->queue_lock, flags);
441 		msleep(20);
442 		spin_lock_irqsave(&engine->queue_lock, flags);
443 	}
444 
445 	if (crypto_queue_len(&engine->queue) || engine->busy)
446 		ret = -EBUSY;
447 	else
448 		engine->running = false;
449 
450 	spin_unlock_irqrestore(&engine->queue_lock, flags);
451 
452 	if (ret)
453 		dev_warn(engine->dev, "could not stop engine\n");
454 
455 	return ret;
456 }
457 EXPORT_SYMBOL_GPL(crypto_engine_stop);
458 
459 /**
460  * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
461  * and initialize it by setting the maximum number of entries in the software
462  * crypto-engine queue.
463  * @dev: the device attached with one hardware engine
464  * @retry_support: whether hardware has support for retry mechanism
465  * @cbk_do_batch: pointer to a callback function to be invoked when executing
466  *                a batch of requests.
467  *                This has the form:
468  *                callback(struct crypto_engine *engine)
469  *                where:
470  *                engine: the crypto engine structure.
471  * @rt: whether this queue is set to run as a realtime task
472  * @qlen: maximum size of the crypto-engine queue
473  *
474  * This must be called from context that can sleep.
475  * Return: the crypto engine structure on success, else NULL.
476  */
477 struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
478 						       bool retry_support,
479 						       int (*cbk_do_batch)(struct crypto_engine *engine),
480 						       bool rt, int qlen)
481 {
482 	struct crypto_engine *engine;
483 
484 	if (!dev)
485 		return NULL;
486 
487 	engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
488 	if (!engine)
489 		return NULL;
490 
491 	engine->dev = dev;
492 	engine->rt = rt;
493 	engine->running = false;
494 	engine->busy = false;
495 	engine->idling = false;
496 	engine->retry_support = retry_support;
497 	engine->priv_data = dev;
498 	/*
499 	 * Batch requests is possible only if
500 	 * hardware has support for retry mechanism.
501 	 */
502 	engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
503 
504 	snprintf(engine->name, sizeof(engine->name),
505 		 "%s-engine", dev_name(dev));
506 
507 	crypto_init_queue(&engine->queue, qlen);
508 	spin_lock_init(&engine->queue_lock);
509 
510 	engine->kworker = kthread_run_worker(0, "%s", engine->name);
511 	if (IS_ERR(engine->kworker)) {
512 		dev_err(dev, "failed to create crypto request pump task\n");
513 		return NULL;
514 	}
515 	kthread_init_work(&engine->pump_requests, crypto_pump_work);
516 
517 	if (engine->rt) {
518 		dev_info(dev, "will run requests pump with realtime priority\n");
519 		sched_set_fifo(engine->kworker->task);
520 	}
521 
522 	return engine;
523 }
524 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
525 
526 /**
527  * crypto_engine_alloc_init - allocate crypto hardware engine structure and
528  * initialize it.
529  * @dev: the device attached with one hardware engine
530  * @rt: whether this queue is set to run as a realtime task
531  *
532  * This must be called from context that can sleep.
533  * Return: the crypto engine structure on success, else NULL.
534  */
535 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
536 {
537 	return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
538 						CRYPTO_ENGINE_MAX_QLEN);
539 }
540 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
541 
542 /**
543  * crypto_engine_exit - free the resources of hardware engine when exit
544  * @engine: the hardware engine need to be freed
545  */
546 void crypto_engine_exit(struct crypto_engine *engine)
547 {
548 	int ret;
549 
550 	ret = crypto_engine_stop(engine);
551 	if (ret)
552 		return;
553 
554 	kthread_destroy_worker(engine->kworker);
555 }
556 EXPORT_SYMBOL_GPL(crypto_engine_exit);
557 
558 int crypto_engine_register_aead(struct aead_engine_alg *alg)
559 {
560 	if (!alg->op.do_one_request)
561 		return -EINVAL;
562 	return crypto_register_aead(&alg->base);
563 }
564 EXPORT_SYMBOL_GPL(crypto_engine_register_aead);
565 
566 void crypto_engine_unregister_aead(struct aead_engine_alg *alg)
567 {
568 	crypto_unregister_aead(&alg->base);
569 }
570 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead);
571 
572 int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count)
573 {
574 	int i, ret;
575 
576 	for (i = 0; i < count; i++) {
577 		ret = crypto_engine_register_aead(&algs[i]);
578 		if (ret)
579 			goto err;
580 	}
581 
582 	return 0;
583 
584 err:
585 	crypto_engine_unregister_aeads(algs, i);
586 
587 	return ret;
588 }
589 EXPORT_SYMBOL_GPL(crypto_engine_register_aeads);
590 
591 void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count)
592 {
593 	int i;
594 
595 	for (i = count - 1; i >= 0; --i)
596 		crypto_engine_unregister_aead(&algs[i]);
597 }
598 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads);
599 
600 int crypto_engine_register_ahash(struct ahash_engine_alg *alg)
601 {
602 	if (!alg->op.do_one_request)
603 		return -EINVAL;
604 	return crypto_register_ahash(&alg->base);
605 }
606 EXPORT_SYMBOL_GPL(crypto_engine_register_ahash);
607 
608 void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg)
609 {
610 	crypto_unregister_ahash(&alg->base);
611 }
612 EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash);
613 
614 int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count)
615 {
616 	int i, ret;
617 
618 	for (i = 0; i < count; i++) {
619 		ret = crypto_engine_register_ahash(&algs[i]);
620 		if (ret)
621 			goto err;
622 	}
623 
624 	return 0;
625 
626 err:
627 	crypto_engine_unregister_ahashes(algs, i);
628 
629 	return ret;
630 }
631 EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes);
632 
633 void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
634 				      int count)
635 {
636 	int i;
637 
638 	for (i = count - 1; i >= 0; --i)
639 		crypto_engine_unregister_ahash(&algs[i]);
640 }
641 EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes);
642 
643 int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg)
644 {
645 	if (!alg->op.do_one_request)
646 		return -EINVAL;
647 	return crypto_register_akcipher(&alg->base);
648 }
649 EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher);
650 
651 void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg)
652 {
653 	crypto_unregister_akcipher(&alg->base);
654 }
655 EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher);
656 
657 int crypto_engine_register_kpp(struct kpp_engine_alg *alg)
658 {
659 	if (!alg->op.do_one_request)
660 		return -EINVAL;
661 	return crypto_register_kpp(&alg->base);
662 }
663 EXPORT_SYMBOL_GPL(crypto_engine_register_kpp);
664 
665 void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg)
666 {
667 	crypto_unregister_kpp(&alg->base);
668 }
669 EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp);
670 
671 int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg)
672 {
673 	if (!alg->op.do_one_request)
674 		return -EINVAL;
675 	return crypto_register_skcipher(&alg->base);
676 }
677 EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher);
678 
679 void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg)
680 {
681 	return crypto_unregister_skcipher(&alg->base);
682 }
683 EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher);
684 
685 int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
686 				     int count)
687 {
688 	int i, ret;
689 
690 	for (i = 0; i < count; i++) {
691 		ret = crypto_engine_register_skcipher(&algs[i]);
692 		if (ret)
693 			goto err;
694 	}
695 
696 	return 0;
697 
698 err:
699 	crypto_engine_unregister_skciphers(algs, i);
700 
701 	return ret;
702 }
703 EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers);
704 
705 void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
706 					int count)
707 {
708 	int i;
709 
710 	for (i = count - 1; i >= 0; --i)
711 		crypto_engine_unregister_skcipher(&algs[i]);
712 }
713 EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers);
714 
715 MODULE_LICENSE("GPL");
716 MODULE_DESCRIPTION("Crypto hardware engine framework");
717