xref: /titanic_52/usr/src/uts/common/crypto/io/dca_rsa.c (revision fb9f9b975cb9214fec5dab37d461199adab9b964)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Deimos - cryptographic acceleration based upon Broadcom 582x.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/ddi.h>
35 #include <sys/sunddi.h>
36 #include <sys/kmem.h>
37 #include <sys/note.h>
38 #include <sys/crypto/common.h>
39 #include <sys/crypto/spi.h>
40 #include <sys/crypto/ioctl.h>
41 #include <sys/crypto/dca.h>
42 
43 
44 static void dca_rsaverifydone(dca_request_t *, int);
45 static void dca_rsadone(dca_request_t *, int);
46 
47 /* Exported function prototypes */
48 int dca_rsastart(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
49     crypto_req_handle_t, int);
50 int dca_rsainit(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, int);
51 void dca_rsactxfree(void *);
52 int dca_rsaatomic(crypto_provider_handle_t, crypto_session_id_t,
53     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
54     int, crypto_req_handle_t, int);
55 
56 /* Local function prototypes */
57 static int dca_pkcs1_padding(dca_t *dca, caddr_t buf, int flen, int tlen,
58     int private);
59 static int dca_pkcs1_unpadding(char *buf, int *tlen, int flen, int mode);
60 static int dca_x509_padding(caddr_t buf, int flen, int tlen);
61 static int dca_x509_unpadding(char *buf, int tlen, int flen, int mode);
62 static int decrypt_error_code(int mode, int decrypt, int verify, int def);
63 
64 
65 int dca_rsastart(crypto_ctx_t *ctx, crypto_data_t *in, crypto_data_t *out,
66     crypto_req_handle_t req, int mode)
67 {
68 	dca_request_t		*reqp = ctx->cc_provider_private;
69 	dca_t			*dca = ctx->cc_provider;
70 	caddr_t			daddr;
71 	int			rv = CRYPTO_QUEUED;
72 	int			len;
73 
74 	/*
75 	 * In-place operations (in == out) are indicated by having a
76 	 * NULL output. In this case set the out to point to the in.
77 	 * Note that this only works for CKM_RSA_X_509 without any padding
78 	 */
79 	if (!out) {
80 		DBG(dca, DWARN, "Using inline since output buffer is NULL.");
81 		out = in;
82 	}
83 
84 	/* We don't support non-contiguous buffers for RSA */
85 	if (dca_sgcheck(dca, in, DCA_SG_CONTIG) ||
86 	    dca_sgcheck(dca, out, DCA_SG_CONTIG)) {
87 		rv = CRYPTO_NOT_SUPPORTED;
88 		goto errout;
89 	}
90 
91 	len = dca_length(in);
92 
93 	/* Extracting the key attributes is now done in dca_rsainit(). */
94 	if (mode == DCA_RSA_ENC || mode == DCA_RSA_SIGN ||
95 	    mode == DCA_RSA_SIGNR) {
96 		/*
97 		 * Return length needed to store the output.
98 		 * For sign, sign-recover, and encrypt, the output buffer
99 		 * should not be smaller than modlen since PKCS or X_509
100 		 * padding will be applied
101 		 */
102 		if (dca_length(out) < reqp->dr_ctx.modlen) {
103 			DBG(dca, DWARN,
104 			    "dca_rsastart: output buffer too short (%d < %d)",
105 			    dca_length(out), reqp->dr_ctx.modlen);
106 			out->cd_length = reqp->dr_ctx.modlen;
107 			rv = CRYPTO_BUFFER_TOO_SMALL;
108 			goto errout;
109 		}
110 	}
111 	if (out != in && out->cd_length > reqp->dr_ctx.modlen)
112 		out->cd_length = reqp->dr_ctx.modlen;
113 
114 	/* The input length should not be bigger than the modulus */
115 	if (len > reqp->dr_ctx.modlen) {
116 		rv = decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_LEN_RANGE,
117 		    CRYPTO_SIGNATURE_LEN_RANGE, CRYPTO_DATA_LEN_RANGE);
118 		goto errout;
119 	}
120 
121 	/*
122 	 * For decryption, verify, and verifyRecover, the input length should
123 	 * not be less than the modulus
124 	 */
125 	if (len < reqp->dr_ctx.modlen && (mode == DCA_RSA_DEC ||
126 	    mode == DCA_RSA_VRFY || mode == DCA_RSA_VRFYR)) {
127 		rv = decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_LEN_RANGE,
128 		    CRYPTO_SIGNATURE_LEN_RANGE, CRYPTO_DATA_LEN_RANGE);
129 		goto errout;
130 	}
131 
132 	/*
133 	 * For decryption and verifyRecover, the output buffer should not
134 	 * be less than the modulus
135 	 */
136 	if (out->cd_length < reqp->dr_ctx.modlen && (mode == DCA_RSA_DEC ||
137 	    mode == DCA_RSA_VRFYR) &&
138 	    reqp->dr_ctx.ctx_cm_type == RSA_X_509_MECH_INFO_TYPE) {
139 		out->cd_length = reqp->dr_ctx.modlen;
140 		rv = CRYPTO_BUFFER_TOO_SMALL;
141 		goto errout;
142 	}
143 
144 	/* For decrypt and verify, the input should not be less than output */
145 	if (out && len < out->cd_length) {
146 		if ((rv = decrypt_error_code(mode,
147 		    CRYPTO_ENCRYPTED_DATA_LEN_RANGE,
148 		    CRYPTO_SIGNATURE_LEN_RANGE, CRYPTO_SUCCESS)) !=
149 		    CRYPTO_SUCCESS)
150 			goto errout;
151 	}
152 
153 	if ((daddr = dca_bufdaddr(in)) == NULL && len > 0) {
154 		rv = CRYPTO_ARGUMENTS_BAD;
155 		goto errout;
156 	}
157 
158 	if (dca_numcmp(daddr, len, (char *)reqp->dr_ctx.mod,
159 	    reqp->dr_ctx.modlen) > 0) {
160 		DBG(dca, DWARN,
161 		    "dca_rsastart: input larger (numerically) than modulus!");
162 		rv = decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_INVALID,
163 		    CRYPTO_SIGNATURE_INVALID, CRYPTO_DATA_INVALID);
164 		goto errout;
165 	}
166 
167 	reqp->dr_byte_stat = -1;
168 	reqp->dr_in = in;
169 	reqp->dr_out = out;
170 	reqp->dr_kcf_req = req;
171 	if (mode == DCA_RSA_VRFY)
172 		reqp->dr_callback = dca_rsaverifydone;
173 	else
174 		reqp->dr_callback = dca_rsadone;
175 
176 	dca_reverse(daddr, reqp->dr_ibuf_kaddr, len, reqp->dr_pkt_length);
177 	if (mode == DCA_RSA_ENC || mode == DCA_RSA_SIGN ||
178 	    mode == DCA_RSA_SIGNR) {
179 		/*
180 		 * Needs to pad appropriately for encrypt, sign, and
181 		 * sign_recover
182 		 */
183 		if (reqp->dr_ctx.ctx_cm_type == RSA_PKCS_MECH_INFO_TYPE) {
184 			if ((rv = dca_pkcs1_padding(dca, reqp->dr_ibuf_kaddr,
185 			    len, reqp->dr_ctx.modlen, reqp->dr_ctx.pqfix)) !=
186 			    CRYPTO_QUEUED)
187 				goto errout;
188 		} else if (reqp->dr_ctx.ctx_cm_type ==
189 		    RSA_X_509_MECH_INFO_TYPE) {
190 			if ((rv = dca_x509_padding(reqp->dr_ibuf_kaddr,
191 			    len, reqp->dr_pkt_length)) != CRYPTO_QUEUED)
192 				goto errout;
193 		}
194 	}
195 	reqp->dr_ctx.mode = mode;
196 
197 	/*
198 	 * Since the max RSA input size is 256 bytes (2048 bits), the firstx
199 	 * page (at least 4096 bytes) in the pre-mapped buffer is large enough.
200 	 * Therefore, we use this first page for RSA.
201 	 */
202 	reqp->dr_in_paddr = reqp->dr_ibuf_head.dc_buffer_paddr;
203 	reqp->dr_in_next = 0;
204 	reqp->dr_in_len = reqp->dr_pkt_length;
205 	reqp->dr_out_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
206 	reqp->dr_out_next = 0;
207 	reqp->dr_out_len = reqp->dr_pkt_length;
208 
209 	/* schedule the work by doing a submit */
210 	rv = dca_start(dca, reqp, MCR2, 1);
211 
212 
213 errout:
214 	if (rv != CRYPTO_QUEUED && rv != CRYPTO_BUFFER_TOO_SMALL)
215 		(void) dca_free_context(ctx);
216 
217 	return (rv);
218 }
219 
220 void
221 dca_rsadone(dca_request_t *reqp, int errno)
222 {
223 	if (errno == CRYPTO_SUCCESS) {
224 		int	outsz = reqp->dr_out->cd_length;
225 		caddr_t	daddr;
226 
227 		(void) ddi_dma_sync(reqp->dr_obuf_dmah, 0, reqp->dr_out_len,
228 		    DDI_DMA_SYNC_FORKERNEL);
229 		if (dca_check_dma_handle(reqp->dr_dca, reqp->dr_obuf_dmah,
230 		    DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
231 			reqp->destroy = TRUE;
232 			errno = CRYPTO_DEVICE_ERROR;
233 			goto errout;
234 		}
235 
236 		if (reqp->dr_ctx.mode == DCA_RSA_DEC ||
237 		    reqp->dr_ctx.mode == DCA_RSA_VRFY ||
238 		    reqp->dr_ctx.mode == DCA_RSA_VRFYR) {
239 			/*
240 			 * Needs to unpad appropriately for decrypt, verify,
241 			 * and verify_recover
242 			 */
243 			if (reqp->dr_ctx.ctx_cm_type ==
244 			    RSA_PKCS_MECH_INFO_TYPE) {
245 				errno = dca_pkcs1_unpadding(
246 				    reqp->dr_obuf_kaddr, &outsz,
247 				    reqp->dr_ctx.modlen, reqp->dr_ctx.mode);
248 
249 				/* check for bad data errors */
250 				if (errno != CRYPTO_SUCCESS &&
251 				    errno != CRYPTO_BUFFER_TOO_SMALL) {
252 					goto errout;
253 				}
254 				if (dca_bufdaddr(reqp->dr_out) == NULL) {
255 					errno = CRYPTO_BUFFER_TOO_SMALL;
256 				}
257 				if (errno == CRYPTO_BUFFER_TOO_SMALL) {
258 					reqp->dr_out->cd_length = outsz;
259 					goto errout;
260 				}
261 				/* Reset the output data length */
262 				reqp->dr_out->cd_length = outsz;
263 			} else if (reqp->dr_ctx.ctx_cm_type ==
264 			    RSA_X_509_MECH_INFO_TYPE) {
265 				if ((errno = dca_x509_unpadding(
266 				    reqp->dr_obuf_kaddr, outsz,
267 				    reqp->dr_pkt_length, reqp->dr_ctx.mode)) !=
268 				    CRYPTO_SUCCESS)
269 					goto errout;
270 			}
271 		}
272 
273 		if ((daddr = dca_bufdaddr(reqp->dr_out)) == NULL) {
274 			DBG(reqp->dr_dca, DINTR,
275 			    "dca_rsadone: reqp->dr_out is bad");
276 			errno = CRYPTO_ARGUMENTS_BAD;
277 			goto errout;
278 		}
279 		/*
280 		 * Note that there may be some number of null bytes
281 		 * at the end of the source (result), but we don't care
282 		 * about them -- they are place holders only and are
283 		 * truncated here.
284 		 */
285 		dca_reverse(reqp->dr_obuf_kaddr, daddr, outsz, outsz);
286 	}
287 errout:
288 	ASSERT(reqp->dr_kcf_req != NULL);
289 
290 	/* notify framework that request is completed */
291 	crypto_op_notification(reqp->dr_kcf_req, errno);
292 	DBG(reqp->dr_dca, DINTR,
293 	    "dca_rsadone: returning 0x%x to the kef via crypto_op_notification",
294 	    errno);
295 
296 	/*
297 	 * For non-atomic operations, reqp will be freed in the kCF
298 	 * callback function since it may be needed again if
299 	 * CRYPTO_BUFFER_TOO_SMALL is returned to kCF
300 	 */
301 	if (reqp->dr_ctx.atomic) {
302 		crypto_ctx_t ctx;
303 		ctx.cc_provider_private = reqp;
304 		dca_rsactxfree(&ctx);
305 	}
306 }
307 
308 void
309 dca_rsaverifydone(dca_request_t *reqp, int errno)
310 {
311 	if (errno == CRYPTO_SUCCESS) {
312 		char	scratch[RSA_MAX_KEY_LEN];
313 		int	outsz = reqp->dr_out->cd_length;
314 		caddr_t	daddr;
315 
316 		/*
317 		 * ASSUMPTION: the signature length was already
318 		 * checked on the way in, and it is a valid length.
319 		 */
320 		(void) ddi_dma_sync(reqp->dr_obuf_dmah, 0, outsz,
321 		    DDI_DMA_SYNC_FORKERNEL);
322 		if (dca_check_dma_handle(reqp->dr_dca, reqp->dr_obuf_dmah,
323 		    DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
324 			reqp->destroy = TRUE;
325 			errno = CRYPTO_DEVICE_ERROR;
326 			goto errout;
327 		}
328 
329 		if (reqp->dr_ctx.mode == DCA_RSA_DEC ||
330 		    reqp->dr_ctx.mode == DCA_RSA_VRFY ||
331 		    reqp->dr_ctx.mode == DCA_RSA_VRFYR) {
332 			/*
333 			 * Needs to unpad appropriately for decrypt, verify,
334 			 * and verify_recover
335 			 */
336 			if (reqp->dr_ctx.ctx_cm_type ==
337 			    RSA_PKCS_MECH_INFO_TYPE) {
338 				errno = dca_pkcs1_unpadding(
339 				    reqp->dr_obuf_kaddr, &outsz,
340 				    reqp->dr_ctx.modlen, reqp->dr_ctx.mode);
341 
342 				/* check for bad data errors */
343 				if (errno != CRYPTO_SUCCESS &&
344 				    errno != CRYPTO_BUFFER_TOO_SMALL) {
345 					goto errout;
346 				}
347 				if (dca_bufdaddr(reqp->dr_out) == NULL) {
348 					errno = CRYPTO_BUFFER_TOO_SMALL;
349 				}
350 				if (errno == CRYPTO_BUFFER_TOO_SMALL) {
351 					reqp->dr_out->cd_length = outsz;
352 					goto errout;
353 				}
354 				/* Reset the output data length */
355 				reqp->dr_out->cd_length = outsz;
356 			} else if (reqp->dr_ctx.ctx_cm_type ==
357 			    RSA_X_509_MECH_INFO_TYPE) {
358 				if ((errno = dca_x509_unpadding(
359 				    reqp->dr_obuf_kaddr, outsz,
360 				    reqp->dr_pkt_length, reqp->dr_ctx.mode)) !=
361 				    CRYPTO_SUCCESS)
362 					goto errout;
363 			}
364 		}
365 
366 		dca_reverse(reqp->dr_obuf_kaddr, scratch, outsz, outsz);
367 
368 		if ((daddr = dca_bufdaddr(reqp->dr_out)) == NULL) {
369 			errno = CRYPTO_ARGUMENTS_BAD;
370 			goto errout;
371 		}
372 		if (dca_numcmp(daddr, reqp->dr_out->cd_length, scratch,
373 		    outsz) != 0) {
374 			/* VERIFY FAILED */
375 			errno = CRYPTO_SIGNATURE_INVALID;
376 		}
377 	}
378 errout:
379 	ASSERT(reqp->dr_kcf_req != NULL);
380 
381 	/* notify framework that request is completed */
382 	crypto_op_notification(reqp->dr_kcf_req, errno);
383 	DBG(reqp->dr_dca, DINTR,
384 	    "dca_rsaverifydone: rtn 0x%x to the kef via crypto_op_notification",
385 	    errno);
386 
387 	/*
388 	 * For non-atomic operations, reqp will be freed in the kCF
389 	 * callback function since it may be needed again if
390 	 * CRYPTO_BUFFER_TOO_SMALL is returned to kCF
391 	 */
392 	if (reqp->dr_ctx.atomic) {
393 		crypto_ctx_t ctx;
394 		ctx.cc_provider_private = reqp;
395 		dca_rsactxfree(&ctx);
396 	}
397 }
398 
399 /*
400  * Setup either a public or a private RSA key for subsequent uses
401  */
402 int
403 dca_rsainit(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
404     crypto_key_t *key, int kmflag)
405 {
406 	crypto_object_attribute_t	*attr;
407 	unsigned			expname = 0;
408 	void				*attrdata;
409 	int rv;
410 
411 	uchar_t			*exp;
412 	uchar_t			*p;
413 	uchar_t			*q;
414 	uchar_t			*dp;
415 	uchar_t			*dq;
416 	uchar_t			*pinv;
417 
418 	unsigned		explen = 0;
419 	unsigned		plen = 0;
420 	unsigned		qlen = 0;
421 	unsigned		dplen = 0;
422 	unsigned		dqlen = 0;
423 	unsigned		pinvlen = 0;
424 
425 	unsigned		modbits, expbits, pbits, qbits;
426 	unsigned		modfix, expfix, pqfix = 0;
427 	uint16_t		ctxlen;
428 	caddr_t			kaddr;
429 	dca_request_t		*reqp = NULL;
430 	dca_t			*dca = (dca_t *)ctx->cc_provider;
431 
432 	DBG(NULL, DENTRY, "dca_rsainit: start");
433 
434 	if ((reqp = dca_getreq(dca, MCR2, 1)) == NULL) {
435 		DBG(NULL, DWARN,
436 		    "dca_rsainit: unable to allocate request for RSA");
437 		rv = CRYPTO_HOST_MEMORY;
438 		goto errout;
439 	}
440 
441 	reqp->dr_ctx.ctx_cm_type = mechanism->cm_type;
442 	ctx->cc_provider_private = reqp;
443 
444 	/*
445 	 * Key type can be either RAW, or REFERENCE, or ATTR_LIST (VALUE).
446 	 * Only ATTR_LIST is supported on Deimos for RSA.
447 	 */
448 	if ((attr = dca_get_key_attr(key)) == NULL) {
449 		DBG(NULL, DWARN, "dca_rsainit: key attributes missing");
450 		rv = CRYPTO_KEY_TYPE_INCONSISTENT;
451 		goto errout;
452 	}
453 
454 	if (dca_find_attribute(attr, key->ck_count, CKA_PUBLIC_EXPONENT))
455 		expname = CKA_PUBLIC_EXPONENT;
456 
457 	/*
458 	 * RSA public key has only public exponent. RSA private key must have
459 	 * private exponent. However, it may also have public exponent.
460 	 * Thus, the existance of a private exponent indicates a private key.
461 	 */
462 	if (dca_find_attribute(attr, key->ck_count, CKA_PRIVATE_EXPONENT))
463 		expname = CKA_PRIVATE_EXPONENT;
464 
465 	if (!expname) {
466 		DBG(NULL, DWARN, "dca_rsainit: no exponent in key");
467 		rv = CRYPTO_ARGUMENTS_BAD;
468 		goto errout;
469 	}
470 
471 	/* Modulus */
472 	if ((rv = dca_attr_lookup_uint8_array(attr, key->ck_count, CKA_MODULUS,
473 	    &attrdata, &(reqp->dr_ctx.modlen))) != CRYPTO_SUCCESS) {
474 		DBG(NULL, DWARN, "dca_rsainit: failed to retrieve modulus");
475 		goto errout;
476 	}
477 	if ((reqp->dr_ctx.modlen == 0) ||
478 	    (reqp->dr_ctx.modlen > RSA_MAX_KEY_LEN)) {
479 		DBG(NULL, DWARN, "dca_rsainit: bad modulus size");
480 		rv = CRYPTO_ARGUMENTS_BAD;
481 		goto errout;
482 	}
483 	if ((reqp->dr_ctx.mod = kmem_alloc(reqp->dr_ctx.modlen, kmflag)) ==
484 	    NULL) {
485 		rv = CRYPTO_HOST_MEMORY;
486 		goto errout;
487 	}
488 	bcopy(attrdata, reqp->dr_ctx.mod, reqp->dr_ctx.modlen);
489 
490 	/* Exponent */
491 	if ((rv = dca_attr_lookup_uint8_array(attr, key->ck_count, expname,
492 	    (void **) &exp, &explen)) != CRYPTO_SUCCESS) {
493 		DBG(NULL, DWARN, "dca_rsainit: failed to retrieve exponent");
494 		goto errout;
495 	}
496 	if ((explen == 0) || (explen > RSA_MAX_KEY_LEN)) {
497 		DBG(NULL, DWARN, "dca_rsainit: bad exponent size");
498 		rv = CRYPTO_ARGUMENTS_BAD;
499 		goto errout;
500 	}
501 
502 	/* Lookup private attributes */
503 	if (expname == CKA_PRIVATE_EXPONENT) {
504 		/* Prime 1 */
505 		(void) dca_attr_lookup_uint8_array(attr, key->ck_count,
506 		    CKA_PRIME_1, (void **)&q, &qlen);
507 
508 		/* Prime 2 */
509 		(void) dca_attr_lookup_uint8_array(attr, key->ck_count,
510 		    CKA_PRIME_2, (void **)&p, &plen);
511 
512 		/* Exponent 1 */
513 		(void) dca_attr_lookup_uint8_array(attr, key->ck_count,
514 		    CKA_EXPONENT_1, (void **)&dq, &dqlen);
515 
516 		/* Exponent 2 */
517 		(void) dca_attr_lookup_uint8_array(attr, key->ck_count,
518 		    CKA_EXPONENT_2, (void **)&dp, &dplen);
519 
520 		/* Coefficient */
521 		(void) dca_attr_lookup_uint8_array(attr, key->ck_count,
522 		    CKA_COEFFICIENT, (void **)&pinv, &pinvlen);
523 	}
524 
525 	modbits = dca_bitlen(reqp->dr_ctx.mod, reqp->dr_ctx.modlen);
526 	expbits = dca_bitlen(exp, explen);
527 
528 	if ((modfix = dca_padfull(modbits)) == 0) {
529 		DBG(NULL, DWARN, "dca_rsainit: modulus too long");
530 		rv = CRYPTO_KEY_SIZE_RANGE;
531 		goto errout;
532 	}
533 	expfix =  ROUNDUP(explen, sizeof (uint32_t));
534 
535 	if (plen && qlen && dplen && dqlen && pinvlen) {
536 		unsigned pfix, qfix;
537 		qbits = dca_bitlen(q, qlen);
538 		pbits = dca_bitlen(p, plen);
539 		qfix = dca_padhalf(qbits);
540 		pfix = dca_padhalf(pbits);
541 		if (pfix & qfix)
542 			pqfix = max(pfix, qfix);
543 	}
544 
545 	if (pqfix) {
546 		reqp->dr_job_stat = DS_RSAPRIVATE;
547 		reqp->dr_pkt_length = 2 * pqfix;
548 	} else {
549 		reqp->dr_job_stat = DS_RSAPUBLIC;
550 		reqp->dr_pkt_length = modfix;
551 	}
552 
553 	if (pqfix) {
554 		/*
555 		 * NOTE: chip's notion of p vs. q is reversed from
556 		 * PKCS#11.  We use the chip's notion in our variable
557 		 * naming.
558 		 */
559 		ctxlen = 8 + pqfix * 5;
560 
561 		/* write out the context structure */
562 		PUTCTX16(reqp, CTX_CMD, CMD_RSAPRIVATE);
563 		PUTCTX16(reqp, CTX_LENGTH, ctxlen);
564 		/* exponent and modulus length in bits!!! */
565 		PUTCTX16(reqp, CTX_RSAQLEN, qbits);
566 		PUTCTX16(reqp, CTX_RSAPLEN, pbits);
567 
568 		kaddr = reqp->dr_ctx_kaddr + CTX_RSABIGNUMS;
569 
570 		/* store the bignums */
571 		dca_reverse(p, kaddr, plen, pqfix);
572 		kaddr += pqfix;
573 
574 		dca_reverse(q, kaddr, qlen, pqfix);
575 		kaddr += pqfix;
576 
577 		dca_reverse(dp, kaddr, dplen, pqfix);
578 		kaddr += pqfix;
579 
580 		dca_reverse(dq, kaddr, dqlen, pqfix);
581 		kaddr += pqfix;
582 
583 		dca_reverse(pinv, kaddr, pinvlen, pqfix);
584 		kaddr += pqfix;
585 	} else {
586 		ctxlen = 8 + modfix + expfix;
587 		/* write out the context structure */
588 		PUTCTX16(reqp, CTX_CMD, CMD_RSAPUBLIC);
589 		PUTCTX16(reqp, CTX_LENGTH, (uint16_t)ctxlen);
590 		/* exponent and modulus length in bits!!! */
591 		PUTCTX16(reqp, CTX_RSAEXPLEN, expbits);
592 		PUTCTX16(reqp, CTX_RSAMODLEN, modbits);
593 
594 		kaddr = reqp->dr_ctx_kaddr + CTX_RSABIGNUMS;
595 
596 		/* store the bignums */
597 		dca_reverse(reqp->dr_ctx.mod, kaddr, reqp->dr_ctx.modlen,
598 		    modfix);
599 		kaddr += modfix;
600 
601 		dca_reverse(exp, kaddr, explen, expfix);
602 		kaddr += expfix;
603 	}
604 
605 	reqp->dr_ctx.pqfix = pqfix;
606 
607 errout:
608 	if (rv != CRYPTO_SUCCESS)
609 		dca_rsactxfree(ctx);
610 
611 	return (rv);
612 }
613 
614 void
615 dca_rsactxfree(void *arg)
616 {
617 	crypto_ctx_t	*ctx = (crypto_ctx_t *)arg;
618 	dca_request_t	*reqp = ctx->cc_provider_private;
619 
620 	if (reqp == NULL)
621 		return;
622 
623 	if (reqp->dr_ctx.mod)
624 		kmem_free(reqp->dr_ctx.mod, reqp->dr_ctx.modlen);
625 
626 	reqp->dr_ctx.mode = 0;
627 	reqp->dr_ctx.ctx_cm_type = 0;
628 	reqp->dr_ctx.mod = NULL;
629 	reqp->dr_ctx.modlen = 0;
630 	reqp->dr_ctx.pqfix = 0;
631 	reqp->dr_ctx.atomic = 0;
632 
633 	if (reqp->destroy)
634 		dca_destroyreq(reqp);
635 	else
636 		dca_freereq(reqp);
637 
638 	ctx->cc_provider_private = NULL;
639 }
640 
641 int
642 dca_rsaatomic(crypto_provider_handle_t provider,
643     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
644     crypto_key_t *key, crypto_data_t *input, crypto_data_t *output,
645     int kmflag, crypto_req_handle_t req, int mode)
646 {
647 	crypto_ctx_t	ctx;	/* on the stack */
648 	int		rv;
649 
650 	ctx.cc_provider = provider;
651 	ctx.cc_session = session_id;
652 
653 	rv = dca_rsainit(&ctx, mechanism, key, kmflag);
654 	if (rv != CRYPTO_SUCCESS) {
655 		DBG(NULL, DWARN, "dca_rsaatomic: dca_rsainit() failed");
656 		/* The content of ctx should have been freed already */
657 		return (rv);
658 	}
659 
660 	/*
661 	 * Set the atomic flag so that the hardware callback function
662 	 * will free the context.
663 	 */
664 	((dca_request_t *)ctx.cc_provider_private)->dr_ctx.atomic = 1;
665 
666 	rv = dca_rsastart(&ctx, input, output, req, mode);
667 
668 	/*
669 	 * The context will be freed in the hardware callback function if it
670 	 * is queued
671 	 */
672 	if (rv != CRYPTO_QUEUED)
673 		dca_rsactxfree(&ctx);
674 
675 	return (rv);
676 }
677 
678 
679 /*
680  * For RSA_PKCS padding and unpadding:
681  * 1. The minimum padding is 11 bytes.
682  * 2. The first and the last bytes must 0.
683  * 3. The second byte is 1 for private and 2 for public keys.
684  * 4. Pad with 0xff for private and non-zero random for public keys.
685  */
686 static int
687 dca_pkcs1_padding(dca_t *dca, caddr_t buf, int flen, int tlen, int private)
688 {
689 	int i;
690 
691 	DBG(NULL, DENTRY,
692 	    "dca_pkcs1_padding: tlen: %d, flen: %d: private: %d\n",
693 	    tlen, flen, private);
694 
695 	if (flen > tlen - 11)
696 		return (CRYPTO_DATA_LEN_RANGE);
697 
698 	if (private) {
699 		/* Padding for private encrypt */
700 		buf[flen] = '\0';
701 		for (i = flen + 1; i < tlen - 2; i++) {
702 			buf[i] = (unsigned char) 0xff;
703 		}
704 		buf[tlen - 2] = 1;
705 		buf[tlen - 1] = 0;
706 	} else {
707 		/* Padding for public encrypt */
708 		buf[flen] = '\0';
709 
710 		if (dca_random_buffer(dca, &buf[flen+1], tlen - flen - 3) !=
711 		    CRYPTO_SUCCESS)
712 			return (CRYPTO_RANDOM_NO_RNG);
713 
714 		buf[tlen - 2] = 2;
715 		buf[tlen - 1] = 0;
716 	}
717 
718 	return (CRYPTO_QUEUED);
719 }
720 
721 static int
722 dca_pkcs1_unpadding(char *buf, int *tlen, int flen, int mode)
723 {
724 	int i;
725 	const unsigned char *p;
726 	unsigned char type;
727 
728 	DBG(NULL, DENTRY, "dca_pkcs1_unpadding: tlen: %d, flen: %d\n",
729 	    *tlen, flen);
730 
731 	p = (unsigned char *) buf + (flen-1);
732 	if (*(p--) != 0)
733 		return decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_INVALID,
734 		    CRYPTO_SIGNATURE_INVALID, CRYPTO_DATA_INVALID);
735 
736 	/* It is ok if the data length is 0 after removing the padding */
737 	type = *(p--);
738 	if (type == 01) {
739 		for (i = flen - 3; i >= 0; i--) {
740 			if (*p != 0xff) {
741 				if (*p == '\0') {
742 					p--;
743 					break;
744 				} else {
745 					return decrypt_error_code(mode,
746 					    CRYPTO_ENCRYPTED_DATA_INVALID,
747 					    CRYPTO_SIGNATURE_INVALID,
748 					    CRYPTO_DATA_INVALID);
749 				}
750 			}
751 			p--;
752 		}
753 	} else if (type == 02) {
754 		for (i = flen - 3; i >= 0; i--) {
755 			if (*p == '\0') {
756 				p--;
757 				break;
758 			}
759 			p--;
760 		}
761 	} else {
762 		return decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_INVALID,
763 		    CRYPTO_SIGNATURE_INVALID, CRYPTO_DATA_INVALID);
764 	}
765 
766 	/* i < 0 means did not find the end of the padding */
767 	if (i < 0)
768 		return decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_INVALID,
769 		    CRYPTO_SIGNATURE_INVALID, CRYPTO_DATA_INVALID);
770 
771 	if (i > *tlen) {
772 		*tlen = i;
773 		return (CRYPTO_BUFFER_TOO_SMALL);
774 	}
775 
776 	if (flen - i < 11)
777 		return decrypt_error_code(mode,
778 		    CRYPTO_ENCRYPTED_DATA_LEN_RANGE,
779 		    CRYPTO_SIGNATURE_LEN_RANGE, CRYPTO_DATA_LEN_RANGE);
780 
781 	/* Return the unpadded length to the caller */
782 	*tlen = i;
783 
784 	return (CRYPTO_SUCCESS);
785 }
786 
787 /*
788  * For RSA_X_509 padding and unpadding, pad all 0s before actual data.
789  * Note that the data will be in reverse order.
790  */
791 static int
792 dca_x509_padding(caddr_t buf, int flen, int tlen)
793 {
794 	DBG(NULL, DENTRY, "dca_x509_padding: tlen: %d, flen: %d\n",
795 	    tlen, flen);
796 
797 	bzero(buf+tlen, tlen - flen);
798 
799 	return (CRYPTO_QUEUED);
800 }
801 
802 /* ARGSUSED */
803 static int
804 dca_x509_unpadding(char *buf, int tlen, int flen, int mode)
805 {
806 	int i;
807 	const unsigned char *p;
808 
809 	DBG(NULL, DENTRY, "dca_x509_unpadding: tlen: %d, flen: %d\n",
810 	    tlen, flen);
811 
812 	p = (unsigned char *) buf + flen;
813 	for (i = tlen; i < flen; i++) {
814 		if (*(--p) != 0)
815 			return (CRYPTO_SIGNATURE_INVALID);
816 	}
817 
818 	return (CRYPTO_SUCCESS);
819 }
820 
821 static int decrypt_error_code(int mode, int decrypt, int verify, int def)
822 {
823 	switch (mode) {
824 	case DCA_RSA_DEC:
825 		return (decrypt);
826 	case DCA_RSA_VRFY:
827 	case DCA_RSA_VRFYR:
828 		return (verify);
829 	default:
830 		return (def);
831 	}
832 }
833