xref: /linux/drivers/crypto/caam/caampkc.c (revision 4dd4d5e486ebdeb48dbc558237d4ba8aab8917d5)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * caam - Freescale FSL CAAM support for Public Key Cryptography
4  *
5  * Copyright 2016 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019, 2023 NXP
7  *
8  * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9  * all the desired key parameters, input and output pointers.
10  */
11 #include "compat.h"
12 #include "regs.h"
13 #include "intern.h"
14 #include "jr.h"
15 #include "error.h"
16 #include "desc_constr.h"
17 #include "sg_sw_sec4.h"
18 #include "caampkc.h"
19 #include <crypto/internal/engine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/kernel.h>
22 
23 #define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
24 #define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
25 				 SIZEOF_RSA_PRIV_F1_PDB)
26 #define DESC_RSA_PRIV_F2_LEN	(2 * CAAM_CMD_SZ + \
27 				 SIZEOF_RSA_PRIV_F2_PDB)
28 #define DESC_RSA_PRIV_F3_LEN	(2 * CAAM_CMD_SZ + \
29 				 SIZEOF_RSA_PRIV_F3_PDB)
30 #define CAAM_RSA_MAX_INPUT_SIZE	512 /* for a 4096-bit modulus */
31 
32 /* buffer filled with zeros, used for padding */
33 static u8 *zero_buffer;
34 
35 /*
36  * variable used to avoid double free of resources in case
37  * algorithm registration was unsuccessful
38  */
39 static bool init_done;
40 
41 struct caam_akcipher_alg {
42 	struct akcipher_alg akcipher;
43 	bool registered;
44 };
45 
46 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
47 			 struct akcipher_request *req)
48 {
49 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
50 
51 	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
52 	dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
53 
54 	if (edesc->sec4_sg_bytes)
55 		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
56 				 DMA_TO_DEVICE);
57 }
58 
59 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
60 			  struct akcipher_request *req)
61 {
62 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
63 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
64 	struct caam_rsa_key *key = &ctx->key;
65 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
66 
67 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
68 	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
69 }
70 
71 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
72 			      struct akcipher_request *req)
73 {
74 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
75 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
76 	struct caam_rsa_key *key = &ctx->key;
77 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
78 
79 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
80 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
81 }
82 
83 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
84 			      struct akcipher_request *req)
85 {
86 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
87 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
88 	struct caam_rsa_key *key = &ctx->key;
89 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
90 	size_t p_sz = key->p_sz;
91 	size_t q_sz = key->q_sz;
92 
93 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
94 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
95 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
96 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
97 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
98 }
99 
100 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
101 			      struct akcipher_request *req)
102 {
103 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
104 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
105 	struct caam_rsa_key *key = &ctx->key;
106 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
107 	size_t p_sz = key->p_sz;
108 	size_t q_sz = key->q_sz;
109 
110 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
111 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
112 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
113 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
114 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
115 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
116 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
117 }
118 
119 /* RSA Job Completion handler */
120 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
121 {
122 	struct akcipher_request *req = context;
123 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
124 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
125 	struct rsa_edesc *edesc;
126 	int ecode = 0;
127 	bool has_bklog;
128 
129 	if (err)
130 		ecode = caam_jr_strstatus(dev, err);
131 
132 	edesc = req_ctx->edesc;
133 	has_bklog = edesc->bklog;
134 
135 	rsa_pub_unmap(dev, edesc, req);
136 	rsa_io_unmap(dev, edesc, req);
137 	kfree(edesc);
138 
139 	/*
140 	 * If no backlog flag, the completion of the request is done
141 	 * by CAAM, not crypto engine.
142 	 */
143 	if (!has_bklog)
144 		akcipher_request_complete(req, ecode);
145 	else
146 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
147 }
148 
149 static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
150 			    void *context)
151 {
152 	struct akcipher_request *req = context;
153 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
154 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
155 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
156 	struct caam_rsa_key *key = &ctx->key;
157 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
158 	struct rsa_edesc *edesc;
159 	int ecode = 0;
160 	bool has_bklog;
161 
162 	if (err)
163 		ecode = caam_jr_strstatus(dev, err);
164 
165 	edesc = req_ctx->edesc;
166 	has_bklog = edesc->bklog;
167 
168 	switch (key->priv_form) {
169 	case FORM1:
170 		rsa_priv_f1_unmap(dev, edesc, req);
171 		break;
172 	case FORM2:
173 		rsa_priv_f2_unmap(dev, edesc, req);
174 		break;
175 	case FORM3:
176 		rsa_priv_f3_unmap(dev, edesc, req);
177 	}
178 
179 	rsa_io_unmap(dev, edesc, req);
180 	kfree(edesc);
181 
182 	/*
183 	 * If no backlog flag, the completion of the request is done
184 	 * by CAAM, not crypto engine.
185 	 */
186 	if (!has_bklog)
187 		akcipher_request_complete(req, ecode);
188 	else
189 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
190 }
191 
192 /**
193  * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
194  *                                from a given scatterlist
195  *
196  * @sgl   : scatterlist to count zeros from
197  * @nbytes: number of zeros, in bytes, to strip
198  * @flags : operation flags
199  */
200 static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
201 					unsigned int nbytes,
202 					unsigned int flags)
203 {
204 	struct sg_mapping_iter miter;
205 	int lzeros, ents;
206 	unsigned int len;
207 	unsigned int tbytes = nbytes;
208 	const u8 *buff;
209 
210 	ents = sg_nents_for_len(sgl, nbytes);
211 	if (ents < 0)
212 		return ents;
213 
214 	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
215 
216 	lzeros = 0;
217 	len = 0;
218 	while (nbytes > 0) {
219 		/* do not strip more than given bytes */
220 		while (len && !*buff && lzeros < nbytes) {
221 			lzeros++;
222 			len--;
223 			buff++;
224 		}
225 
226 		if (len && *buff)
227 			break;
228 
229 		if (!sg_miter_next(&miter))
230 			break;
231 
232 		buff = miter.addr;
233 		len = miter.length;
234 
235 		nbytes -= lzeros;
236 		lzeros = 0;
237 	}
238 
239 	miter.consumed = lzeros;
240 	sg_miter_stop(&miter);
241 	nbytes -= lzeros;
242 
243 	return tbytes - nbytes;
244 }
245 
246 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
247 					 size_t desclen)
248 {
249 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
250 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
251 	struct device *dev = ctx->dev;
252 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
253 	struct caam_rsa_key *key = &ctx->key;
254 	struct rsa_edesc *edesc;
255 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
256 		       GFP_KERNEL : GFP_ATOMIC;
257 	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
258 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
259 	int src_nents, dst_nents;
260 	int mapped_src_nents, mapped_dst_nents;
261 	unsigned int diff_size = 0;
262 	int lzeros;
263 
264 	if (req->src_len > key->n_sz) {
265 		/*
266 		 * strip leading zeros and
267 		 * return the number of zeros to skip
268 		 */
269 		lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
270 						      key->n_sz, sg_flags);
271 		if (lzeros < 0)
272 			return ERR_PTR(lzeros);
273 
274 		req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
275 						      lzeros);
276 		req_ctx->fixup_src_len = req->src_len - lzeros;
277 	} else {
278 		/*
279 		 * input src is less then n key modulus,
280 		 * so there will be zero padding
281 		 */
282 		diff_size = key->n_sz - req->src_len;
283 		req_ctx->fixup_src = req->src;
284 		req_ctx->fixup_src_len = req->src_len;
285 	}
286 
287 	src_nents = sg_nents_for_len(req_ctx->fixup_src,
288 				     req_ctx->fixup_src_len);
289 	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
290 
291 	mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
292 				      DMA_TO_DEVICE);
293 	if (unlikely(!mapped_src_nents)) {
294 		dev_err(dev, "unable to map source\n");
295 		return ERR_PTR(-ENOMEM);
296 	}
297 	mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
298 				      DMA_FROM_DEVICE);
299 	if (unlikely(!mapped_dst_nents)) {
300 		dev_err(dev, "unable to map destination\n");
301 		goto src_fail;
302 	}
303 
304 	if (!diff_size && mapped_src_nents == 1)
305 		sec4_sg_len = 0; /* no need for an input hw s/g table */
306 	else
307 		sec4_sg_len = mapped_src_nents + !!diff_size;
308 	sec4_sg_index = sec4_sg_len;
309 
310 	if (mapped_dst_nents > 1)
311 		sec4_sg_len += pad_sg_nents(mapped_dst_nents);
312 	else
313 		sec4_sg_len = pad_sg_nents(sec4_sg_len);
314 
315 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
316 
317 	/* allocate space for base edesc, hw desc commands and link tables */
318 	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, flags);
319 	if (!edesc)
320 		goto dst_fail;
321 
322 	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
323 	if (diff_size)
324 		dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
325 				   0);
326 
327 	if (sec4_sg_index)
328 		sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
329 				   edesc->sec4_sg + !!diff_size, 0);
330 
331 	if (mapped_dst_nents > 1)
332 		sg_to_sec4_sg_last(req->dst, req->dst_len,
333 				   edesc->sec4_sg + sec4_sg_index, 0);
334 
335 	/* Save nents for later use in Job Descriptor */
336 	edesc->src_nents = src_nents;
337 	edesc->dst_nents = dst_nents;
338 
339 	req_ctx->edesc = edesc;
340 
341 	if (!sec4_sg_bytes)
342 		return edesc;
343 
344 	edesc->mapped_src_nents = mapped_src_nents;
345 	edesc->mapped_dst_nents = mapped_dst_nents;
346 
347 	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
348 					    sec4_sg_bytes, DMA_TO_DEVICE);
349 	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
350 		dev_err(dev, "unable to map S/G table\n");
351 		goto sec4_sg_fail;
352 	}
353 
354 	edesc->sec4_sg_bytes = sec4_sg_bytes;
355 
356 	print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
357 			     DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
358 			     edesc->sec4_sg_bytes, 1);
359 
360 	return edesc;
361 
362 sec4_sg_fail:
363 	kfree(edesc);
364 dst_fail:
365 	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
366 src_fail:
367 	dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
368 	return ERR_PTR(-ENOMEM);
369 }
370 
371 static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
372 {
373 	struct akcipher_request *req = container_of(areq,
374 						    struct akcipher_request,
375 						    base);
376 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
377 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
378 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
379 	struct device *jrdev = ctx->dev;
380 	u32 *desc = req_ctx->edesc->hw_desc;
381 	int ret;
382 
383 	req_ctx->edesc->bklog = true;
384 
385 	ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
386 
387 	if (ret == -ENOSPC && engine->retry_support)
388 		return ret;
389 
390 	if (ret != -EINPROGRESS) {
391 		rsa_pub_unmap(jrdev, req_ctx->edesc, req);
392 		rsa_io_unmap(jrdev, req_ctx->edesc, req);
393 		kfree(req_ctx->edesc);
394 	} else {
395 		ret = 0;
396 	}
397 
398 	return ret;
399 }
400 
401 static int set_rsa_pub_pdb(struct akcipher_request *req,
402 			   struct rsa_edesc *edesc)
403 {
404 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
405 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
406 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
407 	struct caam_rsa_key *key = &ctx->key;
408 	struct device *dev = ctx->dev;
409 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
410 	int sec4_sg_index = 0;
411 
412 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
413 	if (dma_mapping_error(dev, pdb->n_dma)) {
414 		dev_err(dev, "Unable to map RSA modulus memory\n");
415 		return -ENOMEM;
416 	}
417 
418 	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
419 	if (dma_mapping_error(dev, pdb->e_dma)) {
420 		dev_err(dev, "Unable to map RSA public exponent memory\n");
421 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
422 		return -ENOMEM;
423 	}
424 
425 	if (edesc->mapped_src_nents > 1) {
426 		pdb->sgf |= RSA_PDB_SGF_F;
427 		pdb->f_dma = edesc->sec4_sg_dma;
428 		sec4_sg_index += edesc->mapped_src_nents;
429 	} else {
430 		pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
431 	}
432 
433 	if (edesc->mapped_dst_nents > 1) {
434 		pdb->sgf |= RSA_PDB_SGF_G;
435 		pdb->g_dma = edesc->sec4_sg_dma +
436 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
437 	} else {
438 		pdb->g_dma = sg_dma_address(req->dst);
439 	}
440 
441 	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
442 	pdb->f_len = req_ctx->fixup_src_len;
443 
444 	return 0;
445 }
446 
447 static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
448 			       struct rsa_edesc *edesc)
449 {
450 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
451 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
452 	struct caam_rsa_key *key = &ctx->key;
453 	struct device *dev = ctx->dev;
454 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
455 	int sec4_sg_index = 0;
456 
457 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
458 	if (dma_mapping_error(dev, pdb->n_dma)) {
459 		dev_err(dev, "Unable to map modulus memory\n");
460 		return -ENOMEM;
461 	}
462 
463 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
464 	if (dma_mapping_error(dev, pdb->d_dma)) {
465 		dev_err(dev, "Unable to map RSA private exponent memory\n");
466 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
467 		return -ENOMEM;
468 	}
469 
470 	if (edesc->mapped_src_nents > 1) {
471 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
472 		pdb->g_dma = edesc->sec4_sg_dma;
473 		sec4_sg_index += edesc->mapped_src_nents;
474 
475 	} else {
476 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
477 
478 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
479 	}
480 
481 	if (edesc->mapped_dst_nents > 1) {
482 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
483 		pdb->f_dma = edesc->sec4_sg_dma +
484 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
485 	} else {
486 		pdb->f_dma = sg_dma_address(req->dst);
487 	}
488 
489 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
490 
491 	return 0;
492 }
493 
494 static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
495 			       struct rsa_edesc *edesc)
496 {
497 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
498 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
499 	struct caam_rsa_key *key = &ctx->key;
500 	struct device *dev = ctx->dev;
501 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
502 	int sec4_sg_index = 0;
503 	size_t p_sz = key->p_sz;
504 	size_t q_sz = key->q_sz;
505 
506 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
507 	if (dma_mapping_error(dev, pdb->d_dma)) {
508 		dev_err(dev, "Unable to map RSA private exponent memory\n");
509 		return -ENOMEM;
510 	}
511 
512 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
513 	if (dma_mapping_error(dev, pdb->p_dma)) {
514 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
515 		goto unmap_d;
516 	}
517 
518 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
519 	if (dma_mapping_error(dev, pdb->q_dma)) {
520 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
521 		goto unmap_p;
522 	}
523 
524 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
525 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
526 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
527 		goto unmap_q;
528 	}
529 
530 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
531 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
532 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
533 		goto unmap_tmp1;
534 	}
535 
536 	if (edesc->mapped_src_nents > 1) {
537 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
538 		pdb->g_dma = edesc->sec4_sg_dma;
539 		sec4_sg_index += edesc->mapped_src_nents;
540 	} else {
541 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
542 
543 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
544 	}
545 
546 	if (edesc->mapped_dst_nents > 1) {
547 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
548 		pdb->f_dma = edesc->sec4_sg_dma +
549 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
550 	} else {
551 		pdb->f_dma = sg_dma_address(req->dst);
552 	}
553 
554 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
555 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
556 
557 	return 0;
558 
559 unmap_tmp1:
560 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
561 unmap_q:
562 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
563 unmap_p:
564 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
565 unmap_d:
566 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
567 
568 	return -ENOMEM;
569 }
570 
571 static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
572 			       struct rsa_edesc *edesc)
573 {
574 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
575 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
576 	struct caam_rsa_key *key = &ctx->key;
577 	struct device *dev = ctx->dev;
578 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
579 	int sec4_sg_index = 0;
580 	size_t p_sz = key->p_sz;
581 	size_t q_sz = key->q_sz;
582 
583 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
584 	if (dma_mapping_error(dev, pdb->p_dma)) {
585 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
586 		return -ENOMEM;
587 	}
588 
589 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
590 	if (dma_mapping_error(dev, pdb->q_dma)) {
591 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
592 		goto unmap_p;
593 	}
594 
595 	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
596 	if (dma_mapping_error(dev, pdb->dp_dma)) {
597 		dev_err(dev, "Unable to map RSA exponent dp memory\n");
598 		goto unmap_q;
599 	}
600 
601 	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
602 	if (dma_mapping_error(dev, pdb->dq_dma)) {
603 		dev_err(dev, "Unable to map RSA exponent dq memory\n");
604 		goto unmap_dp;
605 	}
606 
607 	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
608 	if (dma_mapping_error(dev, pdb->c_dma)) {
609 		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
610 		goto unmap_dq;
611 	}
612 
613 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
614 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
615 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
616 		goto unmap_qinv;
617 	}
618 
619 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
620 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
621 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
622 		goto unmap_tmp1;
623 	}
624 
625 	if (edesc->mapped_src_nents > 1) {
626 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
627 		pdb->g_dma = edesc->sec4_sg_dma;
628 		sec4_sg_index += edesc->mapped_src_nents;
629 	} else {
630 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
631 
632 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
633 	}
634 
635 	if (edesc->mapped_dst_nents > 1) {
636 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
637 		pdb->f_dma = edesc->sec4_sg_dma +
638 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
639 	} else {
640 		pdb->f_dma = sg_dma_address(req->dst);
641 	}
642 
643 	pdb->sgf |= key->n_sz;
644 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
645 
646 	return 0;
647 
648 unmap_tmp1:
649 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
650 unmap_qinv:
651 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
652 unmap_dq:
653 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
654 unmap_dp:
655 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
656 unmap_q:
657 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
658 unmap_p:
659 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
660 
661 	return -ENOMEM;
662 }
663 
664 static int akcipher_enqueue_req(struct device *jrdev,
665 				void (*cbk)(struct device *jrdev, u32 *desc,
666 					    u32 err, void *context),
667 				struct akcipher_request *req)
668 {
669 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
670 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
671 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
672 	struct caam_rsa_key *key = &ctx->key;
673 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
674 	struct rsa_edesc *edesc = req_ctx->edesc;
675 	u32 *desc = edesc->hw_desc;
676 	int ret;
677 
678 	req_ctx->akcipher_op_done = cbk;
679 	/*
680 	 * Only the backlog request are sent to crypto-engine since the others
681 	 * can be handled by CAAM, if free, especially since JR has up to 1024
682 	 * entries (more than the 10 entries from crypto-engine).
683 	 */
684 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
685 		ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
686 								 req);
687 	else
688 		ret = caam_jr_enqueue(jrdev, desc, cbk, req);
689 
690 	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
691 		switch (key->priv_form) {
692 		case FORM1:
693 			rsa_priv_f1_unmap(jrdev, edesc, req);
694 			break;
695 		case FORM2:
696 			rsa_priv_f2_unmap(jrdev, edesc, req);
697 			break;
698 		case FORM3:
699 			rsa_priv_f3_unmap(jrdev, edesc, req);
700 			break;
701 		default:
702 			rsa_pub_unmap(jrdev, edesc, req);
703 		}
704 		rsa_io_unmap(jrdev, edesc, req);
705 		kfree(edesc);
706 	}
707 
708 	return ret;
709 }
710 
711 static int caam_rsa_enc(struct akcipher_request *req)
712 {
713 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
714 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
715 	struct caam_rsa_key *key = &ctx->key;
716 	struct device *jrdev = ctx->dev;
717 	struct rsa_edesc *edesc;
718 	int ret;
719 
720 	if (unlikely(!key->n || !key->e))
721 		return -EINVAL;
722 
723 	if (req->dst_len < key->n_sz) {
724 		req->dst_len = key->n_sz;
725 		dev_err(jrdev, "Output buffer length less than parameter n\n");
726 		return -EOVERFLOW;
727 	}
728 
729 	/* Allocate extended descriptor */
730 	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
731 	if (IS_ERR(edesc))
732 		return PTR_ERR(edesc);
733 
734 	/* Set RSA Encrypt Protocol Data Block */
735 	ret = set_rsa_pub_pdb(req, edesc);
736 	if (ret)
737 		goto init_fail;
738 
739 	/* Initialize Job Descriptor */
740 	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
741 
742 	return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
743 
744 init_fail:
745 	rsa_io_unmap(jrdev, edesc, req);
746 	kfree(edesc);
747 	return ret;
748 }
749 
750 static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
751 {
752 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
753 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
754 	struct device *jrdev = ctx->dev;
755 	struct rsa_edesc *edesc;
756 	int ret;
757 
758 	/* Allocate extended descriptor */
759 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
760 	if (IS_ERR(edesc))
761 		return PTR_ERR(edesc);
762 
763 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
764 	ret = set_rsa_priv_f1_pdb(req, edesc);
765 	if (ret)
766 		goto init_fail;
767 
768 	/* Initialize Job Descriptor */
769 	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
770 
771 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
772 
773 init_fail:
774 	rsa_io_unmap(jrdev, edesc, req);
775 	kfree(edesc);
776 	return ret;
777 }
778 
779 static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
780 {
781 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
782 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
783 	struct device *jrdev = ctx->dev;
784 	struct rsa_edesc *edesc;
785 	int ret;
786 
787 	/* Allocate extended descriptor */
788 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
789 	if (IS_ERR(edesc))
790 		return PTR_ERR(edesc);
791 
792 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
793 	ret = set_rsa_priv_f2_pdb(req, edesc);
794 	if (ret)
795 		goto init_fail;
796 
797 	/* Initialize Job Descriptor */
798 	init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
799 
800 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
801 
802 init_fail:
803 	rsa_io_unmap(jrdev, edesc, req);
804 	kfree(edesc);
805 	return ret;
806 }
807 
808 static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
809 {
810 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
811 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
812 	struct device *jrdev = ctx->dev;
813 	struct rsa_edesc *edesc;
814 	int ret;
815 
816 	/* Allocate extended descriptor */
817 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
818 	if (IS_ERR(edesc))
819 		return PTR_ERR(edesc);
820 
821 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
822 	ret = set_rsa_priv_f3_pdb(req, edesc);
823 	if (ret)
824 		goto init_fail;
825 
826 	/* Initialize Job Descriptor */
827 	init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
828 
829 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
830 
831 init_fail:
832 	rsa_io_unmap(jrdev, edesc, req);
833 	kfree(edesc);
834 	return ret;
835 }
836 
837 static int caam_rsa_dec(struct akcipher_request *req)
838 {
839 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
840 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
841 	struct caam_rsa_key *key = &ctx->key;
842 	int ret;
843 
844 	if (unlikely(!key->n || !key->d))
845 		return -EINVAL;
846 
847 	if (req->dst_len < key->n_sz) {
848 		req->dst_len = key->n_sz;
849 		dev_err(ctx->dev, "Output buffer length less than parameter n\n");
850 		return -EOVERFLOW;
851 	}
852 
853 	if (key->priv_form == FORM3)
854 		ret = caam_rsa_dec_priv_f3(req);
855 	else if (key->priv_form == FORM2)
856 		ret = caam_rsa_dec_priv_f2(req);
857 	else
858 		ret = caam_rsa_dec_priv_f1(req);
859 
860 	return ret;
861 }
862 
863 static void caam_rsa_free_key(struct caam_rsa_key *key)
864 {
865 	kfree_sensitive(key->d);
866 	kfree_sensitive(key->p);
867 	kfree_sensitive(key->q);
868 	kfree_sensitive(key->dp);
869 	kfree_sensitive(key->dq);
870 	kfree_sensitive(key->qinv);
871 	kfree_sensitive(key->tmp1);
872 	kfree_sensitive(key->tmp2);
873 	kfree(key->e);
874 	kfree(key->n);
875 	memset(key, 0, sizeof(*key));
876 }
877 
878 static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
879 {
880 	while (!**ptr && *nbytes) {
881 		(*ptr)++;
882 		(*nbytes)--;
883 	}
884 }
885 
886 /**
887  * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
888  * dP, dQ and qInv could decode to less than corresponding p, q length, as the
889  * BER-encoding requires that the minimum number of bytes be used to encode the
890  * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
891  * length.
892  *
893  * @ptr   : pointer to {dP, dQ, qInv} CRT member
894  * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
895  * @dstlen: length in bytes of corresponding p or q prime factor
896  */
897 static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
898 {
899 	u8 *dst;
900 
901 	caam_rsa_drop_leading_zeros(&ptr, &nbytes);
902 	if (!nbytes)
903 		return NULL;
904 
905 	dst = kzalloc(dstlen, GFP_KERNEL);
906 	if (!dst)
907 		return NULL;
908 
909 	memcpy(dst + (dstlen - nbytes), ptr, nbytes);
910 
911 	return dst;
912 }
913 
914 /**
915  * caam_read_raw_data - Read a raw byte stream as a positive integer.
916  * The function skips buffer's leading zeros, copies the remained data
917  * to a buffer allocated in the GFP_KERNEL zone and returns
918  * the address of the new buffer.
919  *
920  * @buf   : The data to read
921  * @nbytes: The amount of data to read
922  */
923 static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
924 {
925 
926 	caam_rsa_drop_leading_zeros(&buf, nbytes);
927 	if (!*nbytes)
928 		return NULL;
929 
930 	return kmemdup(buf, *nbytes, GFP_KERNEL);
931 }
932 
933 static int caam_rsa_check_key_length(unsigned int len)
934 {
935 	if (len > 4096)
936 		return -EINVAL;
937 	return 0;
938 }
939 
940 static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
941 				unsigned int keylen)
942 {
943 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
944 	struct rsa_key raw_key = {NULL};
945 	struct caam_rsa_key *rsa_key = &ctx->key;
946 	int ret;
947 
948 	/* Free the old RSA key if any */
949 	caam_rsa_free_key(rsa_key);
950 
951 	ret = rsa_parse_pub_key(&raw_key, key, keylen);
952 	if (ret)
953 		return ret;
954 
955 	/* Copy key in DMA zone */
956 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
957 	if (!rsa_key->e)
958 		goto err;
959 
960 	/*
961 	 * Skip leading zeros and copy the positive integer to a buffer
962 	 * allocated in the GFP_KERNEL zone. The decryption descriptor
963 	 * expects a positive integer for the RSA modulus and uses its length as
964 	 * decryption output length.
965 	 */
966 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
967 	if (!rsa_key->n)
968 		goto err;
969 
970 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
971 		caam_rsa_free_key(rsa_key);
972 		return -EINVAL;
973 	}
974 
975 	rsa_key->e_sz = raw_key.e_sz;
976 	rsa_key->n_sz = raw_key.n_sz;
977 
978 	return 0;
979 err:
980 	caam_rsa_free_key(rsa_key);
981 	return -ENOMEM;
982 }
983 
984 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
985 				       struct rsa_key *raw_key)
986 {
987 	struct caam_rsa_key *rsa_key = &ctx->key;
988 	size_t p_sz = raw_key->p_sz;
989 	size_t q_sz = raw_key->q_sz;
990 	unsigned aligned_size;
991 
992 	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
993 	if (!rsa_key->p)
994 		return;
995 	rsa_key->p_sz = p_sz;
996 
997 	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
998 	if (!rsa_key->q)
999 		goto free_p;
1000 	rsa_key->q_sz = q_sz;
1001 
1002 	aligned_size = ALIGN(raw_key->p_sz, dma_get_cache_alignment());
1003 	rsa_key->tmp1 = kzalloc(aligned_size, GFP_KERNEL);
1004 	if (!rsa_key->tmp1)
1005 		goto free_q;
1006 
1007 	aligned_size = ALIGN(raw_key->q_sz, dma_get_cache_alignment());
1008 	rsa_key->tmp2 = kzalloc(aligned_size, GFP_KERNEL);
1009 	if (!rsa_key->tmp2)
1010 		goto free_tmp1;
1011 
1012 	rsa_key->priv_form = FORM2;
1013 
1014 	rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
1015 	if (!rsa_key->dp)
1016 		goto free_tmp2;
1017 
1018 	rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
1019 	if (!rsa_key->dq)
1020 		goto free_dp;
1021 
1022 	rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
1023 					  q_sz);
1024 	if (!rsa_key->qinv)
1025 		goto free_dq;
1026 
1027 	rsa_key->priv_form = FORM3;
1028 
1029 	return;
1030 
1031 free_dq:
1032 	kfree_sensitive(rsa_key->dq);
1033 free_dp:
1034 	kfree_sensitive(rsa_key->dp);
1035 free_tmp2:
1036 	kfree_sensitive(rsa_key->tmp2);
1037 free_tmp1:
1038 	kfree_sensitive(rsa_key->tmp1);
1039 free_q:
1040 	kfree_sensitive(rsa_key->q);
1041 free_p:
1042 	kfree_sensitive(rsa_key->p);
1043 }
1044 
1045 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
1046 				 unsigned int keylen)
1047 {
1048 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
1049 	struct rsa_key raw_key = {NULL};
1050 	struct caam_rsa_key *rsa_key = &ctx->key;
1051 	int ret;
1052 
1053 	/* Free the old RSA key if any */
1054 	caam_rsa_free_key(rsa_key);
1055 
1056 	ret = rsa_parse_priv_key(&raw_key, key, keylen);
1057 	if (ret)
1058 		return ret;
1059 
1060 	/* Copy key in DMA zone */
1061 	rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_KERNEL);
1062 	if (!rsa_key->d)
1063 		goto err;
1064 
1065 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
1066 	if (!rsa_key->e)
1067 		goto err;
1068 
1069 	/*
1070 	 * Skip leading zeros and copy the positive integer to a buffer
1071 	 * allocated in the GFP_KERNEL zone. The decryption descriptor
1072 	 * expects a positive integer for the RSA modulus and uses its length as
1073 	 * decryption output length.
1074 	 */
1075 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
1076 	if (!rsa_key->n)
1077 		goto err;
1078 
1079 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
1080 		caam_rsa_free_key(rsa_key);
1081 		return -EINVAL;
1082 	}
1083 
1084 	rsa_key->d_sz = raw_key.d_sz;
1085 	rsa_key->e_sz = raw_key.e_sz;
1086 	rsa_key->n_sz = raw_key.n_sz;
1087 
1088 	caam_rsa_set_priv_key_form(ctx, &raw_key);
1089 
1090 	return 0;
1091 
1092 err:
1093 	caam_rsa_free_key(rsa_key);
1094 	return -ENOMEM;
1095 }
1096 
1097 static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
1098 {
1099 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
1100 
1101 	return ctx->key.n_sz;
1102 }
1103 
1104 /* Per session pkc's driver context creation function */
1105 static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1106 {
1107 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
1108 
1109 	akcipher_set_reqsize(tfm, sizeof(struct caam_rsa_req_ctx));
1110 
1111 	ctx->dev = caam_jr_alloc();
1112 
1113 	if (IS_ERR(ctx->dev)) {
1114 		pr_err("Job Ring Device allocation for transform failed\n");
1115 		return PTR_ERR(ctx->dev);
1116 	}
1117 
1118 	ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1119 					  CAAM_RSA_MAX_INPUT_SIZE - 1,
1120 					  DMA_TO_DEVICE);
1121 	if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1122 		dev_err(ctx->dev, "unable to map padding\n");
1123 		caam_jr_free(ctx->dev);
1124 		return -ENOMEM;
1125 	}
1126 
1127 	ctx->enginectx.op.do_one_request = akcipher_do_one_req;
1128 
1129 	return 0;
1130 }
1131 
1132 /* Per session pkc's driver context cleanup function */
1133 static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1134 {
1135 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
1136 	struct caam_rsa_key *key = &ctx->key;
1137 
1138 	dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1139 			 1, DMA_TO_DEVICE);
1140 	caam_rsa_free_key(key);
1141 	caam_jr_free(ctx->dev);
1142 }
1143 
1144 static struct caam_akcipher_alg caam_rsa = {
1145 	.akcipher = {
1146 		.encrypt = caam_rsa_enc,
1147 		.decrypt = caam_rsa_dec,
1148 		.set_pub_key = caam_rsa_set_pub_key,
1149 		.set_priv_key = caam_rsa_set_priv_key,
1150 		.max_size = caam_rsa_max_size,
1151 		.init = caam_rsa_init_tfm,
1152 		.exit = caam_rsa_exit_tfm,
1153 		.base = {
1154 			.cra_name = "rsa",
1155 			.cra_driver_name = "rsa-caam",
1156 			.cra_priority = 3000,
1157 			.cra_module = THIS_MODULE,
1158 			.cra_ctxsize = sizeof(struct caam_rsa_ctx) +
1159 				       CRYPTO_DMA_PADDING,
1160 		},
1161 	}
1162 };
1163 
1164 /* Public Key Cryptography module initialization handler */
1165 int caam_pkc_init(struct device *ctrldev)
1166 {
1167 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1168 	u32 pk_inst, pkha;
1169 	int err;
1170 	init_done = false;
1171 
1172 	/* Determine public key hardware accelerator presence. */
1173 	if (priv->era < 10) {
1174 		pk_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
1175 			   CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1176 	} else {
1177 		pkha = rd_reg32(&priv->jr[0]->vreg.pkha);
1178 		pk_inst = pkha & CHA_VER_NUM_MASK;
1179 
1180 		/*
1181 		 * Newer CAAMs support partially disabled functionality. If this is the
1182 		 * case, the number is non-zero, but this bit is set to indicate that
1183 		 * no encryption or decryption is supported. Only signing and verifying
1184 		 * is supported.
1185 		 */
1186 		if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
1187 			pk_inst = 0;
1188 	}
1189 
1190 	/* Do not register algorithms if PKHA is not present. */
1191 	if (!pk_inst)
1192 		return 0;
1193 
1194 	/* allocate zero buffer, used for padding input */
1195 	zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_KERNEL);
1196 	if (!zero_buffer)
1197 		return -ENOMEM;
1198 
1199 	err = crypto_register_akcipher(&caam_rsa.akcipher);
1200 
1201 	if (err) {
1202 		kfree(zero_buffer);
1203 		dev_warn(ctrldev, "%s alg registration failed\n",
1204 			 caam_rsa.akcipher.base.cra_driver_name);
1205 	} else {
1206 		init_done = true;
1207 		caam_rsa.registered = true;
1208 		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1209 	}
1210 
1211 	return err;
1212 }
1213 
1214 void caam_pkc_exit(void)
1215 {
1216 	if (!init_done)
1217 		return;
1218 
1219 	if (caam_rsa.registered)
1220 		crypto_unregister_akcipher(&caam_rsa.akcipher);
1221 
1222 	kfree(zero_buffer);
1223 }
1224