1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 /*
30 * Deimos - cryptographic acceleration based upon Broadcom 582x.
31 */
32
33 #include <sys/types.h>
34 #include <sys/ddi.h>
35 #include <sys/sunddi.h>
36 #include <sys/kmem.h>
37 #include <sys/note.h>
38 #include <sys/crypto/spi.h>
39 #include <sys/crypto/dca.h>
40
41
42 static void dca_rsaverifydone(dca_request_t *, int);
43 static void dca_rsadone(dca_request_t *, int);
44
45 /* Exported function prototypes */
46 int dca_rsastart(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
47 crypto_req_handle_t, int);
48 int dca_rsainit(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, int);
49 void dca_rsactxfree(void *);
50 int dca_rsaatomic(crypto_provider_handle_t, crypto_session_id_t,
51 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
52 int, crypto_req_handle_t, int);
53
54 /* Local function prototypes */
55 static int dca_pkcs1_padding(dca_t *dca, caddr_t buf, int flen, int tlen,
56 int private);
57 static int dca_pkcs1_unpadding(char *buf, int *tlen, int flen, int mode);
58 static int dca_x509_padding(caddr_t buf, int flen, int tlen);
59 static int dca_x509_unpadding(char *buf, int tlen, int flen, int mode);
60 static int decrypt_error_code(int mode, int decrypt, int verify, int def);
61
62
dca_rsastart(crypto_ctx_t * ctx,crypto_data_t * in,crypto_data_t * out,crypto_req_handle_t req,int mode)63 int dca_rsastart(crypto_ctx_t *ctx, crypto_data_t *in, crypto_data_t *out,
64 crypto_req_handle_t req, int mode)
65 {
66 dca_request_t *reqp = ctx->cc_provider_private;
67 dca_t *dca = ctx->cc_provider;
68 caddr_t daddr;
69 int rv = CRYPTO_QUEUED;
70 int len;
71
72 /* We don't support non-contiguous buffers for RSA */
73 if (dca_sgcheck(dca, in, DCA_SG_CONTIG) ||
74 dca_sgcheck(dca, out, DCA_SG_CONTIG)) {
75 rv = CRYPTO_NOT_SUPPORTED;
76 goto errout;
77 }
78
79 len = dca_length(in);
80
81 /* Extracting the key attributes is now done in dca_rsainit(). */
82 if (mode == DCA_RSA_ENC || mode == DCA_RSA_SIGN ||
83 mode == DCA_RSA_SIGNR) {
84 /*
85 * Return length needed to store the output.
86 * For sign, sign-recover, and encrypt, the output buffer
87 * should not be smaller than modlen since PKCS or X_509
88 * padding will be applied
89 */
90 if (dca_length(out) < reqp->dr_ctx.modlen) {
91 DBG(dca, DWARN,
92 "dca_rsastart: output buffer too short (%d < %d)",
93 dca_length(out), reqp->dr_ctx.modlen);
94 out->cd_length = reqp->dr_ctx.modlen;
95 rv = CRYPTO_BUFFER_TOO_SMALL;
96 goto errout;
97 }
98 }
99 if (out != in && out->cd_length > reqp->dr_ctx.modlen)
100 out->cd_length = reqp->dr_ctx.modlen;
101
102 /* The input length should not be bigger than the modulus */
103 if (len > reqp->dr_ctx.modlen) {
104 rv = decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_LEN_RANGE,
105 CRYPTO_SIGNATURE_LEN_RANGE, CRYPTO_DATA_LEN_RANGE);
106 goto errout;
107 }
108
109 /*
110 * For decryption, verify, and verifyRecover, the input length should
111 * not be less than the modulus
112 */
113 if (len < reqp->dr_ctx.modlen && (mode == DCA_RSA_DEC ||
114 mode == DCA_RSA_VRFY || mode == DCA_RSA_VRFYR)) {
115 rv = decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_LEN_RANGE,
116 CRYPTO_SIGNATURE_LEN_RANGE, CRYPTO_DATA_LEN_RANGE);
117 goto errout;
118 }
119
120 /*
121 * For decryption and verifyRecover, the output buffer should not
122 * be less than the modulus
123 */
124 if (out->cd_length < reqp->dr_ctx.modlen && (mode == DCA_RSA_DEC ||
125 mode == DCA_RSA_VRFYR) &&
126 reqp->dr_ctx.ctx_cm_type == RSA_X_509_MECH_INFO_TYPE) {
127 out->cd_length = reqp->dr_ctx.modlen;
128 rv = CRYPTO_BUFFER_TOO_SMALL;
129 goto errout;
130 }
131
132 /* For decrypt and verify, the input should not be less than output */
133 if (out && len < out->cd_length) {
134 if ((rv = decrypt_error_code(mode,
135 CRYPTO_ENCRYPTED_DATA_LEN_RANGE,
136 CRYPTO_SIGNATURE_LEN_RANGE, CRYPTO_SUCCESS)) !=
137 CRYPTO_SUCCESS)
138 goto errout;
139 }
140
141 if ((daddr = dca_bufdaddr(in)) == NULL && len > 0) {
142 rv = CRYPTO_ARGUMENTS_BAD;
143 goto errout;
144 }
145
146 if (dca_numcmp(daddr, len, (char *)reqp->dr_ctx.mod,
147 reqp->dr_ctx.modlen) > 0) {
148 DBG(dca, DWARN,
149 "dca_rsastart: input larger (numerically) than modulus!");
150 rv = decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_INVALID,
151 CRYPTO_SIGNATURE_INVALID, CRYPTO_DATA_INVALID);
152 goto errout;
153 }
154
155 reqp->dr_byte_stat = -1;
156 reqp->dr_in = in;
157 reqp->dr_out = out;
158 reqp->dr_kcf_req = req;
159 if (mode == DCA_RSA_VRFY)
160 reqp->dr_callback = dca_rsaverifydone;
161 else
162 reqp->dr_callback = dca_rsadone;
163
164 dca_reverse(daddr, reqp->dr_ibuf_kaddr, len, reqp->dr_pkt_length);
165 if (mode == DCA_RSA_ENC || mode == DCA_RSA_SIGN ||
166 mode == DCA_RSA_SIGNR) {
167 /*
168 * Needs to pad appropriately for encrypt, sign, and
169 * sign_recover
170 */
171 if (reqp->dr_ctx.ctx_cm_type == RSA_PKCS_MECH_INFO_TYPE) {
172 if ((rv = dca_pkcs1_padding(dca, reqp->dr_ibuf_kaddr,
173 len, reqp->dr_ctx.modlen, reqp->dr_ctx.pqfix)) !=
174 CRYPTO_QUEUED)
175 goto errout;
176 } else if (reqp->dr_ctx.ctx_cm_type ==
177 RSA_X_509_MECH_INFO_TYPE) {
178 if ((rv = dca_x509_padding(reqp->dr_ibuf_kaddr,
179 len, reqp->dr_pkt_length)) != CRYPTO_QUEUED)
180 goto errout;
181 }
182 }
183 reqp->dr_ctx.mode = mode;
184
185 /*
186 * Since the max RSA input size is 256 bytes (2048 bits), the firstx
187 * page (at least 4096 bytes) in the pre-mapped buffer is large enough.
188 * Therefore, we use this first page for RSA.
189 */
190 reqp->dr_in_paddr = reqp->dr_ibuf_head.dc_buffer_paddr;
191 reqp->dr_in_next = 0;
192 reqp->dr_in_len = reqp->dr_pkt_length;
193 reqp->dr_out_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
194 reqp->dr_out_next = 0;
195 reqp->dr_out_len = reqp->dr_pkt_length;
196
197 /* schedule the work by doing a submit */
198 rv = dca_start(dca, reqp, MCR2, 1);
199
200
201 errout:
202 if (rv != CRYPTO_QUEUED && rv != CRYPTO_BUFFER_TOO_SMALL)
203 (void) dca_free_context(ctx);
204
205 return (rv);
206 }
207
208 void
dca_rsadone(dca_request_t * reqp,int errno)209 dca_rsadone(dca_request_t *reqp, int errno)
210 {
211 if (errno == CRYPTO_SUCCESS) {
212 int outsz = reqp->dr_out->cd_length;
213 caddr_t daddr;
214
215 (void) ddi_dma_sync(reqp->dr_obuf_dmah, 0, reqp->dr_out_len,
216 DDI_DMA_SYNC_FORKERNEL);
217 if (dca_check_dma_handle(reqp->dr_dca, reqp->dr_obuf_dmah,
218 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
219 reqp->destroy = TRUE;
220 errno = CRYPTO_DEVICE_ERROR;
221 goto errout;
222 }
223
224 if (reqp->dr_ctx.mode == DCA_RSA_DEC ||
225 reqp->dr_ctx.mode == DCA_RSA_VRFY ||
226 reqp->dr_ctx.mode == DCA_RSA_VRFYR) {
227 /*
228 * Needs to unpad appropriately for decrypt, verify,
229 * and verify_recover
230 */
231 if (reqp->dr_ctx.ctx_cm_type ==
232 RSA_PKCS_MECH_INFO_TYPE) {
233 errno = dca_pkcs1_unpadding(
234 reqp->dr_obuf_kaddr, &outsz,
235 reqp->dr_ctx.modlen, reqp->dr_ctx.mode);
236
237 /* check for bad data errors */
238 if (errno != CRYPTO_SUCCESS &&
239 errno != CRYPTO_BUFFER_TOO_SMALL) {
240 goto errout;
241 }
242 if (dca_bufdaddr(reqp->dr_out) == NULL) {
243 errno = CRYPTO_BUFFER_TOO_SMALL;
244 }
245 if (errno == CRYPTO_BUFFER_TOO_SMALL) {
246 reqp->dr_out->cd_length = outsz;
247 goto errout;
248 }
249 /* Reset the output data length */
250 reqp->dr_out->cd_length = outsz;
251 } else if (reqp->dr_ctx.ctx_cm_type ==
252 RSA_X_509_MECH_INFO_TYPE) {
253 if ((errno = dca_x509_unpadding(
254 reqp->dr_obuf_kaddr, outsz,
255 reqp->dr_pkt_length, reqp->dr_ctx.mode)) !=
256 CRYPTO_SUCCESS)
257 goto errout;
258 }
259 }
260
261 if ((daddr = dca_bufdaddr(reqp->dr_out)) == NULL) {
262 DBG(reqp->dr_dca, DINTR,
263 "dca_rsadone: reqp->dr_out is bad");
264 errno = CRYPTO_ARGUMENTS_BAD;
265 goto errout;
266 }
267 /*
268 * Note that there may be some number of null bytes
269 * at the end of the source (result), but we don't care
270 * about them -- they are place holders only and are
271 * truncated here.
272 */
273 dca_reverse(reqp->dr_obuf_kaddr, daddr, outsz, outsz);
274 }
275 errout:
276 ASSERT(reqp->dr_kcf_req != NULL);
277
278 /* notify framework that request is completed */
279 crypto_op_notification(reqp->dr_kcf_req, errno);
280 DBG(reqp->dr_dca, DINTR,
281 "dca_rsadone: returning 0x%x to the kef via crypto_op_notification",
282 errno);
283
284 /*
285 * For non-atomic operations, reqp will be freed in the kCF
286 * callback function since it may be needed again if
287 * CRYPTO_BUFFER_TOO_SMALL is returned to kCF
288 */
289 if (reqp->dr_ctx.atomic) {
290 crypto_ctx_t ctx;
291 ctx.cc_provider_private = reqp;
292 dca_rsactxfree(&ctx);
293 }
294 }
295
296 void
dca_rsaverifydone(dca_request_t * reqp,int errno)297 dca_rsaverifydone(dca_request_t *reqp, int errno)
298 {
299 if (errno == CRYPTO_SUCCESS) {
300 char scratch[RSA_MAX_KEY_LEN];
301 int outsz = reqp->dr_out->cd_length;
302 caddr_t daddr;
303
304 /*
305 * ASSUMPTION: the signature length was already
306 * checked on the way in, and it is a valid length.
307 */
308 (void) ddi_dma_sync(reqp->dr_obuf_dmah, 0, outsz,
309 DDI_DMA_SYNC_FORKERNEL);
310 if (dca_check_dma_handle(reqp->dr_dca, reqp->dr_obuf_dmah,
311 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
312 reqp->destroy = TRUE;
313 errno = CRYPTO_DEVICE_ERROR;
314 goto errout;
315 }
316
317 if (reqp->dr_ctx.mode == DCA_RSA_DEC ||
318 reqp->dr_ctx.mode == DCA_RSA_VRFY ||
319 reqp->dr_ctx.mode == DCA_RSA_VRFYR) {
320 /*
321 * Needs to unpad appropriately for decrypt, verify,
322 * and verify_recover
323 */
324 if (reqp->dr_ctx.ctx_cm_type ==
325 RSA_PKCS_MECH_INFO_TYPE) {
326 errno = dca_pkcs1_unpadding(
327 reqp->dr_obuf_kaddr, &outsz,
328 reqp->dr_ctx.modlen, reqp->dr_ctx.mode);
329
330 /* check for bad data errors */
331 if (errno != CRYPTO_SUCCESS &&
332 errno != CRYPTO_BUFFER_TOO_SMALL) {
333 goto errout;
334 }
335 if (dca_bufdaddr(reqp->dr_out) == NULL) {
336 errno = CRYPTO_BUFFER_TOO_SMALL;
337 }
338 if (errno == CRYPTO_BUFFER_TOO_SMALL) {
339 reqp->dr_out->cd_length = outsz;
340 goto errout;
341 }
342 /* Reset the output data length */
343 reqp->dr_out->cd_length = outsz;
344 } else if (reqp->dr_ctx.ctx_cm_type ==
345 RSA_X_509_MECH_INFO_TYPE) {
346 if ((errno = dca_x509_unpadding(
347 reqp->dr_obuf_kaddr, outsz,
348 reqp->dr_pkt_length, reqp->dr_ctx.mode)) !=
349 CRYPTO_SUCCESS)
350 goto errout;
351 }
352 }
353
354 dca_reverse(reqp->dr_obuf_kaddr, scratch, outsz, outsz);
355
356 if ((daddr = dca_bufdaddr(reqp->dr_out)) == NULL) {
357 errno = CRYPTO_ARGUMENTS_BAD;
358 goto errout;
359 }
360 if (dca_numcmp(daddr, reqp->dr_out->cd_length, scratch,
361 outsz) != 0) {
362 /* VERIFY FAILED */
363 errno = CRYPTO_SIGNATURE_INVALID;
364 }
365 }
366 errout:
367 ASSERT(reqp->dr_kcf_req != NULL);
368
369 /* notify framework that request is completed */
370 crypto_op_notification(reqp->dr_kcf_req, errno);
371 DBG(reqp->dr_dca, DINTR,
372 "dca_rsaverifydone: rtn 0x%x to the kef via crypto_op_notification",
373 errno);
374
375 /*
376 * For non-atomic operations, reqp will be freed in the kCF
377 * callback function since it may be needed again if
378 * CRYPTO_BUFFER_TOO_SMALL is returned to kCF
379 */
380 if (reqp->dr_ctx.atomic) {
381 crypto_ctx_t ctx;
382 ctx.cc_provider_private = reqp;
383 dca_rsactxfree(&ctx);
384 }
385 }
386
387 /*
388 * Setup either a public or a private RSA key for subsequent uses
389 */
390 int
dca_rsainit(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,int kmflag)391 dca_rsainit(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
392 crypto_key_t *key, int kmflag)
393 {
394 crypto_object_attribute_t *attr;
395 unsigned expname = 0;
396 void *attrdata;
397 int rv;
398
399 uchar_t *exp;
400 uchar_t *p;
401 uchar_t *q;
402 uchar_t *dp;
403 uchar_t *dq;
404 uchar_t *pinv;
405
406 unsigned explen = 0;
407 unsigned plen = 0;
408 unsigned qlen = 0;
409 unsigned dplen = 0;
410 unsigned dqlen = 0;
411 unsigned pinvlen = 0;
412
413 unsigned modbits, expbits, pbits, qbits;
414 unsigned modfix, expfix, pqfix = 0;
415 uint16_t ctxlen;
416 caddr_t kaddr;
417 dca_request_t *reqp = NULL;
418 dca_t *dca = (dca_t *)ctx->cc_provider;
419
420 DBG(NULL, DENTRY, "dca_rsainit: start");
421
422 if ((reqp = dca_getreq(dca, MCR2, 1)) == NULL) {
423 DBG(NULL, DWARN,
424 "dca_rsainit: unable to allocate request for RSA");
425 rv = CRYPTO_HOST_MEMORY;
426 goto errout;
427 }
428
429 reqp->dr_ctx.ctx_cm_type = mechanism->cm_type;
430 ctx->cc_provider_private = reqp;
431
432 /*
433 * Key type can be either RAW, or REFERENCE, or ATTR_LIST (VALUE).
434 * Only ATTR_LIST is supported on Deimos for RSA.
435 */
436 if ((attr = dca_get_key_attr(key)) == NULL) {
437 DBG(NULL, DWARN, "dca_rsainit: key attributes missing");
438 rv = CRYPTO_KEY_TYPE_INCONSISTENT;
439 goto errout;
440 }
441
442 if (dca_find_attribute(attr, key->ck_count, CKA_PUBLIC_EXPONENT))
443 expname = CKA_PUBLIC_EXPONENT;
444
445 /*
446 * RSA public key has only public exponent. RSA private key must have
447 * private exponent. However, it may also have public exponent.
448 * Thus, the existance of a private exponent indicates a private key.
449 */
450 if (dca_find_attribute(attr, key->ck_count, CKA_PRIVATE_EXPONENT))
451 expname = CKA_PRIVATE_EXPONENT;
452
453 if (!expname) {
454 DBG(NULL, DWARN, "dca_rsainit: no exponent in key");
455 rv = CRYPTO_ARGUMENTS_BAD;
456 goto errout;
457 }
458
459 /* Modulus */
460 if ((rv = dca_attr_lookup_uint8_array(attr, key->ck_count, CKA_MODULUS,
461 &attrdata, &(reqp->dr_ctx.modlen))) != CRYPTO_SUCCESS) {
462 DBG(NULL, DWARN, "dca_rsainit: failed to retrieve modulus");
463 goto errout;
464 }
465 if ((reqp->dr_ctx.modlen == 0) ||
466 (reqp->dr_ctx.modlen > RSA_MAX_KEY_LEN)) {
467 DBG(NULL, DWARN, "dca_rsainit: bad modulus size");
468 rv = CRYPTO_ARGUMENTS_BAD;
469 goto errout;
470 }
471 if ((reqp->dr_ctx.mod = kmem_alloc(reqp->dr_ctx.modlen, kmflag)) ==
472 NULL) {
473 rv = CRYPTO_HOST_MEMORY;
474 goto errout;
475 }
476 bcopy(attrdata, reqp->dr_ctx.mod, reqp->dr_ctx.modlen);
477
478 /* Exponent */
479 if ((rv = dca_attr_lookup_uint8_array(attr, key->ck_count, expname,
480 (void **) &exp, &explen)) != CRYPTO_SUCCESS) {
481 DBG(NULL, DWARN, "dca_rsainit: failed to retrieve exponent");
482 goto errout;
483 }
484 if ((explen == 0) || (explen > RSA_MAX_KEY_LEN)) {
485 DBG(NULL, DWARN, "dca_rsainit: bad exponent size");
486 rv = CRYPTO_ARGUMENTS_BAD;
487 goto errout;
488 }
489
490 /* Lookup private attributes */
491 if (expname == CKA_PRIVATE_EXPONENT) {
492 /* Prime 1 */
493 (void) dca_attr_lookup_uint8_array(attr, key->ck_count,
494 CKA_PRIME_1, (void **)&q, &qlen);
495
496 /* Prime 2 */
497 (void) dca_attr_lookup_uint8_array(attr, key->ck_count,
498 CKA_PRIME_2, (void **)&p, &plen);
499
500 /* Exponent 1 */
501 (void) dca_attr_lookup_uint8_array(attr, key->ck_count,
502 CKA_EXPONENT_1, (void **)&dq, &dqlen);
503
504 /* Exponent 2 */
505 (void) dca_attr_lookup_uint8_array(attr, key->ck_count,
506 CKA_EXPONENT_2, (void **)&dp, &dplen);
507
508 /* Coefficient */
509 (void) dca_attr_lookup_uint8_array(attr, key->ck_count,
510 CKA_COEFFICIENT, (void **)&pinv, &pinvlen);
511 }
512
513 modbits = dca_bitlen(reqp->dr_ctx.mod, reqp->dr_ctx.modlen);
514 expbits = dca_bitlen(exp, explen);
515
516 if ((modfix = dca_padfull(modbits)) == 0) {
517 DBG(NULL, DWARN, "dca_rsainit: modulus too long");
518 rv = CRYPTO_KEY_SIZE_RANGE;
519 goto errout;
520 }
521 expfix = ROUNDUP(explen, sizeof (uint32_t));
522
523 if (plen && qlen && dplen && dqlen && pinvlen) {
524 unsigned pfix, qfix;
525 qbits = dca_bitlen(q, qlen);
526 pbits = dca_bitlen(p, plen);
527 qfix = dca_padhalf(qbits);
528 pfix = dca_padhalf(pbits);
529 if (pfix & qfix)
530 pqfix = max(pfix, qfix);
531 }
532
533 if (pqfix) {
534 reqp->dr_job_stat = DS_RSAPRIVATE;
535 reqp->dr_pkt_length = 2 * pqfix;
536 } else {
537 reqp->dr_job_stat = DS_RSAPUBLIC;
538 reqp->dr_pkt_length = modfix;
539 }
540
541 if (pqfix) {
542 /*
543 * NOTE: chip's notion of p vs. q is reversed from
544 * PKCS#11. We use the chip's notion in our variable
545 * naming.
546 */
547 ctxlen = 8 + pqfix * 5;
548
549 /* write out the context structure */
550 PUTCTX16(reqp, CTX_CMD, CMD_RSAPRIVATE);
551 PUTCTX16(reqp, CTX_LENGTH, ctxlen);
552 /* exponent and modulus length in bits!!! */
553 PUTCTX16(reqp, CTX_RSAQLEN, qbits);
554 PUTCTX16(reqp, CTX_RSAPLEN, pbits);
555
556 kaddr = reqp->dr_ctx_kaddr + CTX_RSABIGNUMS;
557
558 /* store the bignums */
559 dca_reverse(p, kaddr, plen, pqfix);
560 kaddr += pqfix;
561
562 dca_reverse(q, kaddr, qlen, pqfix);
563 kaddr += pqfix;
564
565 dca_reverse(dp, kaddr, dplen, pqfix);
566 kaddr += pqfix;
567
568 dca_reverse(dq, kaddr, dqlen, pqfix);
569 kaddr += pqfix;
570
571 dca_reverse(pinv, kaddr, pinvlen, pqfix);
572 kaddr += pqfix;
573 } else {
574 ctxlen = 8 + modfix + expfix;
575 /* write out the context structure */
576 PUTCTX16(reqp, CTX_CMD, CMD_RSAPUBLIC);
577 PUTCTX16(reqp, CTX_LENGTH, (uint16_t)ctxlen);
578 /* exponent and modulus length in bits!!! */
579 PUTCTX16(reqp, CTX_RSAEXPLEN, expbits);
580 PUTCTX16(reqp, CTX_RSAMODLEN, modbits);
581
582 kaddr = reqp->dr_ctx_kaddr + CTX_RSABIGNUMS;
583
584 /* store the bignums */
585 dca_reverse(reqp->dr_ctx.mod, kaddr, reqp->dr_ctx.modlen,
586 modfix);
587 kaddr += modfix;
588
589 dca_reverse(exp, kaddr, explen, expfix);
590 kaddr += expfix;
591 }
592
593 reqp->dr_ctx.pqfix = pqfix;
594
595 errout:
596 if (rv != CRYPTO_SUCCESS)
597 dca_rsactxfree(ctx);
598
599 return (rv);
600 }
601
602 void
dca_rsactxfree(void * arg)603 dca_rsactxfree(void *arg)
604 {
605 crypto_ctx_t *ctx = (crypto_ctx_t *)arg;
606 dca_request_t *reqp = ctx->cc_provider_private;
607
608 if (reqp == NULL)
609 return;
610
611 if (reqp->dr_ctx.mod)
612 kmem_free(reqp->dr_ctx.mod, reqp->dr_ctx.modlen);
613
614 reqp->dr_ctx.mode = 0;
615 reqp->dr_ctx.ctx_cm_type = 0;
616 reqp->dr_ctx.mod = NULL;
617 reqp->dr_ctx.modlen = 0;
618 reqp->dr_ctx.pqfix = 0;
619 reqp->dr_ctx.atomic = 0;
620
621 if (reqp->destroy)
622 dca_destroyreq(reqp);
623 else
624 dca_freereq(reqp);
625
626 ctx->cc_provider_private = NULL;
627 }
628
629 int
dca_rsaatomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * input,crypto_data_t * output,int kmflag,crypto_req_handle_t req,int mode)630 dca_rsaatomic(crypto_provider_handle_t provider,
631 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
632 crypto_key_t *key, crypto_data_t *input, crypto_data_t *output,
633 int kmflag, crypto_req_handle_t req, int mode)
634 {
635 crypto_ctx_t ctx; /* on the stack */
636 int rv;
637
638 ctx.cc_provider = provider;
639 ctx.cc_session = session_id;
640
641 rv = dca_rsainit(&ctx, mechanism, key, kmflag);
642 if (rv != CRYPTO_SUCCESS) {
643 DBG(NULL, DWARN, "dca_rsaatomic: dca_rsainit() failed");
644 /* The content of ctx should have been freed already */
645 return (rv);
646 }
647
648 /*
649 * Set the atomic flag so that the hardware callback function
650 * will free the context.
651 */
652 ((dca_request_t *)ctx.cc_provider_private)->dr_ctx.atomic = 1;
653
654 /* check for inplace ops */
655 if (input == output) {
656 ((dca_request_t *)ctx.cc_provider_private)->dr_flags
657 |= DR_INPLACE;
658 }
659
660 rv = dca_rsastart(&ctx, input, output, req, mode);
661
662 /*
663 * The context will be freed in the hardware callback function if it
664 * is queued
665 */
666 if (rv != CRYPTO_QUEUED)
667 dca_rsactxfree(&ctx);
668
669 return (rv);
670 }
671
672
673 /*
674 * For RSA_PKCS padding and unpadding:
675 * 1. The minimum padding is 11 bytes.
676 * 2. The first and the last bytes must 0.
677 * 3. The second byte is 1 for private and 2 for public keys.
678 * 4. Pad with 0xff for private and non-zero random for public keys.
679 */
680 static int
dca_pkcs1_padding(dca_t * dca,caddr_t buf,int flen,int tlen,int private)681 dca_pkcs1_padding(dca_t *dca, caddr_t buf, int flen, int tlen, int private)
682 {
683 int i;
684
685 DBG(NULL, DENTRY,
686 "dca_pkcs1_padding: tlen: %d, flen: %d: private: %d\n",
687 tlen, flen, private);
688
689 if (flen > tlen - 11)
690 return (CRYPTO_DATA_LEN_RANGE);
691
692 if (private) {
693 /* Padding for private encrypt */
694 buf[flen] = '\0';
695 for (i = flen + 1; i < tlen - 2; i++) {
696 buf[i] = (unsigned char) 0xff;
697 }
698 buf[tlen - 2] = 1;
699 buf[tlen - 1] = 0;
700 } else {
701 /* Padding for public encrypt */
702 buf[flen] = '\0';
703
704 if (dca_random_buffer(dca, &buf[flen+1], tlen - flen - 3) !=
705 CRYPTO_SUCCESS)
706 return (CRYPTO_RANDOM_NO_RNG);
707
708 buf[tlen - 2] = 2;
709 buf[tlen - 1] = 0;
710 }
711
712 return (CRYPTO_QUEUED);
713 }
714
715 static int
dca_pkcs1_unpadding(char * buf,int * tlen,int flen,int mode)716 dca_pkcs1_unpadding(char *buf, int *tlen, int flen, int mode)
717 {
718 int i;
719 const unsigned char *p;
720 unsigned char type;
721
722 DBG(NULL, DENTRY, "dca_pkcs1_unpadding: tlen: %d, flen: %d\n",
723 *tlen, flen);
724
725 p = (unsigned char *) buf + (flen-1);
726 if (*(p--) != 0)
727 return decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_INVALID,
728 CRYPTO_SIGNATURE_INVALID, CRYPTO_DATA_INVALID);
729
730 /* It is ok if the data length is 0 after removing the padding */
731 type = *(p--);
732 if (type == 01) {
733 for (i = flen - 3; i >= 0; i--) {
734 if (*p != 0xff) {
735 if (*p == '\0') {
736 p--;
737 break;
738 } else {
739 return decrypt_error_code(mode,
740 CRYPTO_ENCRYPTED_DATA_INVALID,
741 CRYPTO_SIGNATURE_INVALID,
742 CRYPTO_DATA_INVALID);
743 }
744 }
745 p--;
746 }
747 } else if (type == 02) {
748 for (i = flen - 3; i >= 0; i--) {
749 if (*p == '\0') {
750 p--;
751 break;
752 }
753 p--;
754 }
755 } else {
756 return decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_INVALID,
757 CRYPTO_SIGNATURE_INVALID, CRYPTO_DATA_INVALID);
758 }
759
760 /* i < 0 means did not find the end of the padding */
761 if (i < 0)
762 return decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_INVALID,
763 CRYPTO_SIGNATURE_INVALID, CRYPTO_DATA_INVALID);
764
765 if (i > *tlen) {
766 *tlen = i;
767 return (CRYPTO_BUFFER_TOO_SMALL);
768 }
769
770 if (flen - i < 11)
771 return decrypt_error_code(mode,
772 CRYPTO_ENCRYPTED_DATA_LEN_RANGE,
773 CRYPTO_SIGNATURE_LEN_RANGE, CRYPTO_DATA_LEN_RANGE);
774
775 /* Return the unpadded length to the caller */
776 *tlen = i;
777
778 return (CRYPTO_SUCCESS);
779 }
780
781 /*
782 * For RSA_X_509 padding and unpadding, pad all 0s before actual data.
783 * Note that the data will be in reverse order.
784 */
785 static int
dca_x509_padding(caddr_t buf,int flen,int tlen)786 dca_x509_padding(caddr_t buf, int flen, int tlen)
787 {
788 DBG(NULL, DENTRY, "dca_x509_padding: tlen: %d, flen: %d\n",
789 tlen, flen);
790
791 bzero(buf+tlen, tlen - flen);
792
793 return (CRYPTO_QUEUED);
794 }
795
796 /* ARGSUSED */
797 static int
dca_x509_unpadding(char * buf,int tlen,int flen,int mode)798 dca_x509_unpadding(char *buf, int tlen, int flen, int mode)
799 {
800 int i;
801 const unsigned char *p;
802
803 DBG(NULL, DENTRY, "dca_x509_unpadding: tlen: %d, flen: %d\n",
804 tlen, flen);
805
806 p = (unsigned char *) buf + flen;
807 for (i = tlen; i < flen; i++) {
808 if (*(--p) != 0)
809 return (CRYPTO_SIGNATURE_INVALID);
810 }
811
812 return (CRYPTO_SUCCESS);
813 }
814
decrypt_error_code(int mode,int decrypt,int verify,int def)815 static int decrypt_error_code(int mode, int decrypt, int verify, int def)
816 {
817 switch (mode) {
818 case DCA_RSA_DEC:
819 return (decrypt);
820 case DCA_RSA_VRFY:
821 case DCA_RSA_VRFYR:
822 return (verify);
823 default:
824 return (def);
825 }
826 }
827