xref: /linux/drivers/crypto/caam/caampkc.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * caam - Freescale FSL CAAM support for Public Key Cryptography
3  *
4  * Copyright 2016 Freescale Semiconductor, Inc.
5  *
6  * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
7  * all the desired key parameters, input and output pointers.
8  */
9 #include "compat.h"
10 #include "regs.h"
11 #include "intern.h"
12 #include "jr.h"
13 #include "error.h"
14 #include "desc_constr.h"
15 #include "sg_sw_sec4.h"
16 #include "caampkc.h"
17 
18 #define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
19 #define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
20 				 sizeof(struct rsa_priv_f1_pdb))
21 
22 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
23 			 struct akcipher_request *req)
24 {
25 	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
26 	dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
27 
28 	if (edesc->sec4_sg_bytes)
29 		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
30 				 DMA_TO_DEVICE);
31 }
32 
33 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
34 			  struct akcipher_request *req)
35 {
36 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
37 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
38 	struct caam_rsa_key *key = &ctx->key;
39 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
40 
41 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
42 	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
43 }
44 
45 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
46 			      struct akcipher_request *req)
47 {
48 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
49 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
50 	struct caam_rsa_key *key = &ctx->key;
51 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
52 
53 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
54 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
55 }
56 
57 /* RSA Job Completion handler */
58 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
59 {
60 	struct akcipher_request *req = context;
61 	struct rsa_edesc *edesc;
62 
63 	if (err)
64 		caam_jr_strstatus(dev, err);
65 
66 	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
67 
68 	rsa_pub_unmap(dev, edesc, req);
69 	rsa_io_unmap(dev, edesc, req);
70 	kfree(edesc);
71 
72 	akcipher_request_complete(req, err);
73 }
74 
75 static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
76 			     void *context)
77 {
78 	struct akcipher_request *req = context;
79 	struct rsa_edesc *edesc;
80 
81 	if (err)
82 		caam_jr_strstatus(dev, err);
83 
84 	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
85 
86 	rsa_priv_f1_unmap(dev, edesc, req);
87 	rsa_io_unmap(dev, edesc, req);
88 	kfree(edesc);
89 
90 	akcipher_request_complete(req, err);
91 }
92 
93 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
94 					 size_t desclen)
95 {
96 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
97 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
98 	struct device *dev = ctx->dev;
99 	struct rsa_edesc *edesc;
100 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
101 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
102 	int sgc;
103 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
104 	int src_nents, dst_nents;
105 
106 	src_nents = sg_nents_for_len(req->src, req->src_len);
107 	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
108 
109 	if (src_nents > 1)
110 		sec4_sg_len = src_nents;
111 	if (dst_nents > 1)
112 		sec4_sg_len += dst_nents;
113 
114 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
115 
116 	/* allocate space for base edesc, hw desc commands and link tables */
117 	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
118 			GFP_DMA | flags);
119 	if (!edesc)
120 		return ERR_PTR(-ENOMEM);
121 
122 	sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
123 	if (unlikely(!sgc)) {
124 		dev_err(dev, "unable to map source\n");
125 		goto src_fail;
126 	}
127 
128 	sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
129 	if (unlikely(!sgc)) {
130 		dev_err(dev, "unable to map destination\n");
131 		goto dst_fail;
132 	}
133 
134 	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
135 
136 	sec4_sg_index = 0;
137 	if (src_nents > 1) {
138 		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
139 		sec4_sg_index += src_nents;
140 	}
141 	if (dst_nents > 1)
142 		sg_to_sec4_sg_last(req->dst, dst_nents,
143 				   edesc->sec4_sg + sec4_sg_index, 0);
144 
145 	/* Save nents for later use in Job Descriptor */
146 	edesc->src_nents = src_nents;
147 	edesc->dst_nents = dst_nents;
148 
149 	if (!sec4_sg_bytes)
150 		return edesc;
151 
152 	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
153 					    sec4_sg_bytes, DMA_TO_DEVICE);
154 	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
155 		dev_err(dev, "unable to map S/G table\n");
156 		goto sec4_sg_fail;
157 	}
158 
159 	edesc->sec4_sg_bytes = sec4_sg_bytes;
160 
161 	return edesc;
162 
163 sec4_sg_fail:
164 	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
165 dst_fail:
166 	dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
167 src_fail:
168 	kfree(edesc);
169 	return ERR_PTR(-ENOMEM);
170 }
171 
172 static int set_rsa_pub_pdb(struct akcipher_request *req,
173 			   struct rsa_edesc *edesc)
174 {
175 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
176 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
177 	struct caam_rsa_key *key = &ctx->key;
178 	struct device *dev = ctx->dev;
179 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
180 	int sec4_sg_index = 0;
181 
182 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
183 	if (dma_mapping_error(dev, pdb->n_dma)) {
184 		dev_err(dev, "Unable to map RSA modulus memory\n");
185 		return -ENOMEM;
186 	}
187 
188 	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
189 	if (dma_mapping_error(dev, pdb->e_dma)) {
190 		dev_err(dev, "Unable to map RSA public exponent memory\n");
191 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
192 		return -ENOMEM;
193 	}
194 
195 	if (edesc->src_nents > 1) {
196 		pdb->sgf |= RSA_PDB_SGF_F;
197 		pdb->f_dma = edesc->sec4_sg_dma;
198 		sec4_sg_index += edesc->src_nents;
199 	} else {
200 		pdb->f_dma = sg_dma_address(req->src);
201 	}
202 
203 	if (edesc->dst_nents > 1) {
204 		pdb->sgf |= RSA_PDB_SGF_G;
205 		pdb->g_dma = edesc->sec4_sg_dma +
206 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
207 	} else {
208 		pdb->g_dma = sg_dma_address(req->dst);
209 	}
210 
211 	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
212 	pdb->f_len = req->src_len;
213 
214 	return 0;
215 }
216 
217 static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
218 			       struct rsa_edesc *edesc)
219 {
220 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
221 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
222 	struct caam_rsa_key *key = &ctx->key;
223 	struct device *dev = ctx->dev;
224 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
225 	int sec4_sg_index = 0;
226 
227 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
228 	if (dma_mapping_error(dev, pdb->n_dma)) {
229 		dev_err(dev, "Unable to map modulus memory\n");
230 		return -ENOMEM;
231 	}
232 
233 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
234 	if (dma_mapping_error(dev, pdb->d_dma)) {
235 		dev_err(dev, "Unable to map RSA private exponent memory\n");
236 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
237 		return -ENOMEM;
238 	}
239 
240 	if (edesc->src_nents > 1) {
241 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
242 		pdb->g_dma = edesc->sec4_sg_dma;
243 		sec4_sg_index += edesc->src_nents;
244 	} else {
245 		pdb->g_dma = sg_dma_address(req->src);
246 	}
247 
248 	if (edesc->dst_nents > 1) {
249 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
250 		pdb->f_dma = edesc->sec4_sg_dma +
251 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
252 	} else {
253 		pdb->f_dma = sg_dma_address(req->dst);
254 	}
255 
256 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
257 
258 	return 0;
259 }
260 
261 static int caam_rsa_enc(struct akcipher_request *req)
262 {
263 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
264 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
265 	struct caam_rsa_key *key = &ctx->key;
266 	struct device *jrdev = ctx->dev;
267 	struct rsa_edesc *edesc;
268 	int ret;
269 
270 	if (unlikely(!key->n || !key->e))
271 		return -EINVAL;
272 
273 	if (req->dst_len < key->n_sz) {
274 		req->dst_len = key->n_sz;
275 		dev_err(jrdev, "Output buffer length less than parameter n\n");
276 		return -EOVERFLOW;
277 	}
278 
279 	/* Allocate extended descriptor */
280 	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
281 	if (IS_ERR(edesc))
282 		return PTR_ERR(edesc);
283 
284 	/* Set RSA Encrypt Protocol Data Block */
285 	ret = set_rsa_pub_pdb(req, edesc);
286 	if (ret)
287 		goto init_fail;
288 
289 	/* Initialize Job Descriptor */
290 	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
291 
292 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
293 	if (!ret)
294 		return -EINPROGRESS;
295 
296 	rsa_pub_unmap(jrdev, edesc, req);
297 
298 init_fail:
299 	rsa_io_unmap(jrdev, edesc, req);
300 	kfree(edesc);
301 	return ret;
302 }
303 
304 static int caam_rsa_dec(struct akcipher_request *req)
305 {
306 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
307 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
308 	struct caam_rsa_key *key = &ctx->key;
309 	struct device *jrdev = ctx->dev;
310 	struct rsa_edesc *edesc;
311 	int ret;
312 
313 	if (unlikely(!key->n || !key->d))
314 		return -EINVAL;
315 
316 	if (req->dst_len < key->n_sz) {
317 		req->dst_len = key->n_sz;
318 		dev_err(jrdev, "Output buffer length less than parameter n\n");
319 		return -EOVERFLOW;
320 	}
321 
322 	/* Allocate extended descriptor */
323 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
324 	if (IS_ERR(edesc))
325 		return PTR_ERR(edesc);
326 
327 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
328 	ret = set_rsa_priv_f1_pdb(req, edesc);
329 	if (ret)
330 		goto init_fail;
331 
332 	/* Initialize Job Descriptor */
333 	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
334 
335 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
336 	if (!ret)
337 		return -EINPROGRESS;
338 
339 	rsa_priv_f1_unmap(jrdev, edesc, req);
340 
341 init_fail:
342 	rsa_io_unmap(jrdev, edesc, req);
343 	kfree(edesc);
344 	return ret;
345 }
346 
347 static void caam_rsa_free_key(struct caam_rsa_key *key)
348 {
349 	kzfree(key->d);
350 	kfree(key->e);
351 	kfree(key->n);
352 	key->d = NULL;
353 	key->e = NULL;
354 	key->n = NULL;
355 	key->d_sz = 0;
356 	key->e_sz = 0;
357 	key->n_sz = 0;
358 }
359 
360 /**
361  * caam_read_raw_data - Read a raw byte stream as a positive integer.
362  * The function skips buffer's leading zeros, copies the remained data
363  * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
364  * the address of the new buffer.
365  *
366  * @buf   : The data to read
367  * @nbytes: The amount of data to read
368  */
369 static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
370 {
371 	u8 *val;
372 
373 	while (!*buf && *nbytes) {
374 		buf++;
375 		(*nbytes)--;
376 	}
377 
378 	val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
379 	if (!val)
380 		return NULL;
381 
382 	memcpy(val, buf, *nbytes);
383 
384 	return val;
385 }
386 
387 static int caam_rsa_check_key_length(unsigned int len)
388 {
389 	if (len > 4096)
390 		return -EINVAL;
391 	return 0;
392 }
393 
394 static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
395 				unsigned int keylen)
396 {
397 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
398 	struct rsa_key raw_key = {NULL};
399 	struct caam_rsa_key *rsa_key = &ctx->key;
400 	int ret;
401 
402 	/* Free the old RSA key if any */
403 	caam_rsa_free_key(rsa_key);
404 
405 	ret = rsa_parse_pub_key(&raw_key, key, keylen);
406 	if (ret)
407 		return ret;
408 
409 	/* Copy key in DMA zone */
410 	rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
411 	if (!rsa_key->e)
412 		goto err;
413 
414 	/*
415 	 * Skip leading zeros and copy the positive integer to a buffer
416 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
417 	 * expects a positive integer for the RSA modulus and uses its length as
418 	 * decryption output length.
419 	 */
420 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
421 	if (!rsa_key->n)
422 		goto err;
423 
424 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
425 		caam_rsa_free_key(rsa_key);
426 		return -EINVAL;
427 	}
428 
429 	rsa_key->e_sz = raw_key.e_sz;
430 	rsa_key->n_sz = raw_key.n_sz;
431 
432 	memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
433 
434 	return 0;
435 err:
436 	caam_rsa_free_key(rsa_key);
437 	return -ENOMEM;
438 }
439 
440 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
441 				 unsigned int keylen)
442 {
443 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
444 	struct rsa_key raw_key = {NULL};
445 	struct caam_rsa_key *rsa_key = &ctx->key;
446 	int ret;
447 
448 	/* Free the old RSA key if any */
449 	caam_rsa_free_key(rsa_key);
450 
451 	ret = rsa_parse_priv_key(&raw_key, key, keylen);
452 	if (ret)
453 		return ret;
454 
455 	/* Copy key in DMA zone */
456 	rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
457 	if (!rsa_key->d)
458 		goto err;
459 
460 	rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
461 	if (!rsa_key->e)
462 		goto err;
463 
464 	/*
465 	 * Skip leading zeros and copy the positive integer to a buffer
466 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
467 	 * expects a positive integer for the RSA modulus and uses its length as
468 	 * decryption output length.
469 	 */
470 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
471 	if (!rsa_key->n)
472 		goto err;
473 
474 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
475 		caam_rsa_free_key(rsa_key);
476 		return -EINVAL;
477 	}
478 
479 	rsa_key->d_sz = raw_key.d_sz;
480 	rsa_key->e_sz = raw_key.e_sz;
481 	rsa_key->n_sz = raw_key.n_sz;
482 
483 	memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
484 	memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
485 
486 	return 0;
487 
488 err:
489 	caam_rsa_free_key(rsa_key);
490 	return -ENOMEM;
491 }
492 
493 static int caam_rsa_max_size(struct crypto_akcipher *tfm)
494 {
495 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
496 	struct caam_rsa_key *key = &ctx->key;
497 
498 	return (key->n) ? key->n_sz : -EINVAL;
499 }
500 
501 /* Per session pkc's driver context creation function */
502 static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
503 {
504 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
505 
506 	ctx->dev = caam_jr_alloc();
507 
508 	if (IS_ERR(ctx->dev)) {
509 		pr_err("Job Ring Device allocation for transform failed\n");
510 		return PTR_ERR(ctx->dev);
511 	}
512 
513 	return 0;
514 }
515 
516 /* Per session pkc's driver context cleanup function */
517 static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
518 {
519 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
520 	struct caam_rsa_key *key = &ctx->key;
521 
522 	caam_rsa_free_key(key);
523 	caam_jr_free(ctx->dev);
524 }
525 
526 static struct akcipher_alg caam_rsa = {
527 	.encrypt = caam_rsa_enc,
528 	.decrypt = caam_rsa_dec,
529 	.sign = caam_rsa_dec,
530 	.verify = caam_rsa_enc,
531 	.set_pub_key = caam_rsa_set_pub_key,
532 	.set_priv_key = caam_rsa_set_priv_key,
533 	.max_size = caam_rsa_max_size,
534 	.init = caam_rsa_init_tfm,
535 	.exit = caam_rsa_exit_tfm,
536 	.base = {
537 		.cra_name = "rsa",
538 		.cra_driver_name = "rsa-caam",
539 		.cra_priority = 3000,
540 		.cra_module = THIS_MODULE,
541 		.cra_ctxsize = sizeof(struct caam_rsa_ctx),
542 	},
543 };
544 
545 /* Public Key Cryptography module initialization handler */
546 static int __init caam_pkc_init(void)
547 {
548 	struct device_node *dev_node;
549 	struct platform_device *pdev;
550 	struct device *ctrldev;
551 	struct caam_drv_private *priv;
552 	u32 cha_inst, pk_inst;
553 	int err;
554 
555 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
556 	if (!dev_node) {
557 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
558 		if (!dev_node)
559 			return -ENODEV;
560 	}
561 
562 	pdev = of_find_device_by_node(dev_node);
563 	if (!pdev) {
564 		of_node_put(dev_node);
565 		return -ENODEV;
566 	}
567 
568 	ctrldev = &pdev->dev;
569 	priv = dev_get_drvdata(ctrldev);
570 	of_node_put(dev_node);
571 
572 	/*
573 	 * If priv is NULL, it's probably because the caam driver wasn't
574 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
575 	 */
576 	if (!priv)
577 		return -ENODEV;
578 
579 	/* Determine public key hardware accelerator presence. */
580 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
581 	pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
582 
583 	/* Do not register algorithms if PKHA is not present. */
584 	if (!pk_inst)
585 		return -ENODEV;
586 
587 	err = crypto_register_akcipher(&caam_rsa);
588 	if (err)
589 		dev_warn(ctrldev, "%s alg registration failed\n",
590 			 caam_rsa.base.cra_driver_name);
591 	else
592 		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
593 
594 	return err;
595 }
596 
597 static void __exit caam_pkc_exit(void)
598 {
599 	crypto_unregister_akcipher(&caam_rsa);
600 }
601 
602 module_init(caam_pkc_init);
603 module_exit(caam_pkc_exit);
604 
605 MODULE_LICENSE("Dual BSD/GPL");
606 MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
607 MODULE_AUTHOR("Freescale Semiconductor");
608