xref: /titanic_41/usr/src/uts/common/crypto/io/dca_3des.c (revision 4b56a00321e0ce508e55cc5e43e3ad7b00005a39)
1 
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 /*
29  * Deimos - cryptographic acceleration based upon Broadcom 582x.
30  */
31 
32 #include <sys/types.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/kmem.h>
36 #include <sys/note.h>
37 #include <sys/crypto/common.h>
38 #include <sys/crypto/spi.h>
39 #include <sys/crypto/dca.h>
40 
41 #if defined(__i386) || defined(__amd64)
42 #include <sys/byteorder.h>
43 #define	UNALIGNED_POINTERS_PERMITTED
44 #endif
45 
46 /*
47  * 3DES implementation.
48  */
49 
50 static int dca_3desstart(dca_t *, uint32_t, dca_request_t *);
51 static void dca_3desdone(dca_request_t *, int);
52 
53 
54 int
dca_3des(crypto_ctx_t * ctx,crypto_data_t * in,crypto_data_t * out,crypto_req_handle_t req,int flags)55 dca_3des(crypto_ctx_t *ctx, crypto_data_t *in,
56     crypto_data_t *out, crypto_req_handle_t req, int flags)
57 {
58 	int			len;
59 	int			rv;
60 	dca_request_t		*reqp = ctx->cc_provider_private;
61 	dca_request_t		*des_ctx = ctx->cc_provider_private;
62 	dca_t			*dca = ctx->cc_provider;
63 	crypto_data_t		*nin = &reqp->dr_ctx.in_dup;
64 
65 	len = dca_length(in);
66 	if (len % DESBLOCK) {
67 		DBG(dca, DWARN, "input not an integral number of DES blocks");
68 		(void) dca_free_context(ctx);
69 		if (flags & DR_DECRYPT) {
70 			return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
71 		} else {
72 			return (CRYPTO_DATA_LEN_RANGE);
73 		}
74 	}
75 
76 	/*
77 	 * If cd_miscdata non-null then this contains the IV.
78 	 */
79 	if (in->cd_miscdata != NULL) {
80 #ifdef UNALIGNED_POINTERS_PERMITTED
81 		uint32_t	*p = (uint32_t *)in->cd_miscdata;
82 		des_ctx->dr_ctx.iv[0] = htonl(p[0]);
83 		des_ctx->dr_ctx.iv[1] = htonl(p[1]);
84 #else
85 		uchar_t	*p = (uchar_t *)in->cd_miscdata;
86 		des_ctx->dr_ctx.iv[0] = p[0]<<24 | p[1]<<16 | p[2]<<8 | p[3];
87 		des_ctx->dr_ctx.iv[1] = p[4]<<24 | p[5]<<16 | p[6]<<8 | p[7];
88 #endif	/* UNALIGNED_POINTERS_PERMITTED */
89 	}
90 
91 	if (len > dca_length(out)) {
92 		DBG(dca, DWARN, "inadequate output space (need %d, got %d)",
93 		    len, dca_length(out));
94 		out->cd_length = len;
95 		/* Do not free the context since the app will call again */
96 		return (CRYPTO_BUFFER_TOO_SMALL);
97 	}
98 
99 	if ((rv = dca_verifyio(in, out)) != CRYPTO_SUCCESS) {
100 		(void) dca_free_context(ctx);
101 		return (rv);
102 	}
103 
104 	/* special handling for null-sized input buffers */
105 	if (len == 0) {
106 		out->cd_length = 0;
107 		(void) dca_free_context(ctx);
108 		return (CRYPTO_SUCCESS);
109 	}
110 
111 	/*
112 	 * Make a local copy of the input crypto_data_t structure. This
113 	 * allows it to be manipulated locally and for dealing with in-place
114 	 * data (ie in == out). Note that "nin" has been pre-allocated,
115 	 * and only fields are copied, not actual data.
116 	 */
117 	if ((rv = dca_dupcrypto(in, nin)) != CRYPTO_SUCCESS) {
118 		(void) dca_free_context(ctx);
119 		return (rv);
120 	}
121 
122 	/* Set output to zero ready to take the processed data */
123 	out->cd_length = 0;
124 
125 	reqp->dr_kcf_req = req;
126 	reqp->dr_in = nin;
127 	reqp->dr_out = out;
128 	reqp->dr_job_stat = DS_3DESJOBS;
129 	reqp->dr_byte_stat = DS_3DESBYTES;
130 
131 	rv = dca_3desstart(dca, flags, reqp);
132 
133 	/* Context will be freed in the kCF callback function otherwise */
134 	if (rv != CRYPTO_QUEUED && rv != CRYPTO_BUFFER_TOO_SMALL) {
135 		(void) dca_free_context(ctx);
136 	}
137 	return (rv);
138 }
139 
140 
141 void
dca_3desctxfree(void * arg)142 dca_3desctxfree(void *arg)
143 {
144 	crypto_ctx_t	*ctx = (crypto_ctx_t *)arg;
145 	dca_request_t	*des_ctx = ctx->cc_provider_private;
146 
147 	if (des_ctx == NULL)
148 		return;
149 
150 	des_ctx->dr_ctx.atomic = 0;
151 	des_ctx->dr_ctx.ctx_cm_type = 0;
152 	ctx->cc_provider_private = NULL;
153 
154 	if (des_ctx->destroy)
155 		dca_destroyreq(des_ctx);
156 	else
157 		/* Return it to the pool */
158 		dca_freereq(des_ctx);
159 }
160 
161 int
dca_3desupdate(crypto_ctx_t * ctx,crypto_data_t * in,crypto_data_t * out,crypto_req_handle_t req,int flags)162 dca_3desupdate(crypto_ctx_t *ctx, crypto_data_t *in,
163     crypto_data_t *out, crypto_req_handle_t req, int flags)
164 {
165 	int			len;
166 	int			rawlen;
167 	int			rv;
168 	dca_request_t		*reqp = ctx->cc_provider_private;
169 	dca_request_t		*des_ctx = ctx->cc_provider_private;
170 	dca_t			*dca = ctx->cc_provider;
171 	crypto_data_t		*nin = &reqp->dr_ctx.in_dup;
172 
173 	rawlen = dca_length(in) + des_ctx->dr_ctx.residlen;
174 
175 	len = ROUNDDOWN(rawlen, DESBLOCK);
176 	/*
177 	 * If cd_miscdata non-null then this contains the IV.
178 	 */
179 	if (in->cd_miscdata != NULL) {
180 #ifdef UNALIGNED_POINTERS_PERMITTED
181 		uint32_t	*p = (uint32_t *)in->cd_miscdata;
182 		des_ctx->dr_ctx.iv[0] = htonl(p[0]);
183 		des_ctx->dr_ctx.iv[1] = htonl(p[1]);
184 #else
185 		uchar_t	*p = (uchar_t *)in->cd_miscdata;
186 		des_ctx->dr_ctx.iv[0] = p[0]<<24 | p[1]<<16 | p[2]<<8 | p[3];
187 		des_ctx->dr_ctx.iv[1] = p[4]<<24 | p[5]<<16 | p[6]<<8 | p[7];
188 #endif	/* UNALIGNED_POINTERS_PERMITTED */
189 	}
190 
191 	if (len > dca_length(out)) {
192 		DBG(dca, DWARN, "not enough output space (need %d, got %d)",
193 		    len, dca_length(out));
194 		out->cd_length = len;
195 		/* Do not free the context since the app will call again */
196 		return (CRYPTO_BUFFER_TOO_SMALL);
197 	}
198 
199 	if ((rv = dca_verifyio(in, out)) != CRYPTO_SUCCESS) {
200 		(void) dca_free_context(ctx);
201 		return (rv);
202 	}
203 
204 	reqp->dr_kcf_req = req;
205 
206 	/*
207 	 * From here on out, we are committed.
208 	 */
209 
210 	if (len == 0) {
211 		/*
212 		 * No blocks being encrypted, so we just accumulate the
213 		 * input for the next pass and return.
214 		 */
215 		if ((rv = dca_getbufbytes(in, 0,
216 		    (rawlen % DESBLOCK) - des_ctx->dr_ctx.residlen,
217 		    des_ctx->dr_ctx.resid + des_ctx->dr_ctx.residlen)) !=
218 		    CRYPTO_SUCCESS) {
219 			DBG(dca, DWARN,
220 	    "dca_3desupdate: dca_getbufbytes() failed for residual only pass");
221 			dca_freereq(reqp);
222 			return (rv);
223 		}
224 		des_ctx->dr_ctx.residlen = rawlen % DESBLOCK;
225 
226 		out->cd_length = 0;
227 		/*
228 		 * Do not free the context here since it will be done
229 		 * in the final function
230 		 */
231 		return (CRYPTO_SUCCESS);
232 	}
233 
234 	/*
235 	 * Set up rbuf for previous residual data.
236 	 */
237 	if (des_ctx->dr_ctx.residlen) {
238 		bcopy(des_ctx->dr_ctx.resid, des_ctx->dr_ctx.activeresid,
239 		    des_ctx->dr_ctx.residlen);
240 		des_ctx->dr_ctx.activeresidlen = des_ctx->dr_ctx.residlen;
241 	}
242 
243 	/*
244 	 * Locate and save residual data for next encrypt_update.
245 	 */
246 	if ((rv = dca_getbufbytes(in, len - des_ctx->dr_ctx.residlen,
247 	    rawlen % DESBLOCK, des_ctx->dr_ctx.resid)) != CRYPTO_SUCCESS) {
248 		DBG(dca, DWARN, "dca_3desupdate: dca_getbufbytes() failed");
249 		(void) dca_free_context(ctx);
250 		return (rv);
251 	}
252 
253 	/* Calculate new residual length. */
254 	des_ctx->dr_ctx.residlen = rawlen % DESBLOCK;
255 
256 	/*
257 	 * Make a local copy of the input crypto_data_t structure. This
258 	 * allows it to be manipulated locally and for dealing with in-place
259 	 * data (ie in == out).
260 	 */
261 	if ((rv = dca_dupcrypto(in, nin)) != CRYPTO_SUCCESS) {
262 		(void) dca_free_context(ctx);
263 		return (rv);
264 	}
265 
266 	/* Set output to zero ready to take the processed data */
267 	out->cd_length = 0;
268 
269 	reqp->dr_in = nin;
270 	reqp->dr_out = out;
271 	reqp->dr_job_stat = DS_3DESJOBS;
272 	reqp->dr_byte_stat = DS_3DESBYTES;
273 
274 	rv = dca_3desstart(dca, flags, reqp);
275 
276 	/*
277 	 * As this is multi-part the context is cleared on success
278 	 * (CRYPTO_QUEUED) in dca_3desfinal().
279 	 */
280 
281 	if (rv != CRYPTO_QUEUED && rv != CRYPTO_BUFFER_TOO_SMALL) {
282 		(void) dca_free_context(ctx);
283 	}
284 	return (rv);
285 }
286 
287 int
dca_3desfinal(crypto_ctx_t * ctx,crypto_data_t * out,int mode)288 dca_3desfinal(crypto_ctx_t *ctx, crypto_data_t *out, int mode)
289 {
290 	dca_request_t	*des_ctx = ctx->cc_provider_private;
291 	dca_t		*dca = ctx->cc_provider;
292 	int		rv = CRYPTO_SUCCESS;
293 
294 	ASSERT(ctx->cc_provider_private != NULL);
295 	/*
296 	 * There must be no unprocessed ciphertext/plaintext.
297 	 * This happens if the length of the last data is
298 	 * not a multiple of the DES block length.
299 	 */
300 	if (des_ctx->dr_ctx.residlen != 0) {
301 		DBG(dca, DWARN, "dca_3desfinal: invalid nonzero residual");
302 		if (mode & DR_DECRYPT) {
303 			rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
304 		} else {
305 			rv = CRYPTO_DATA_LEN_RANGE;
306 		}
307 	}
308 	(void) dca_free_context(ctx);
309 	out->cd_length = 0;
310 	return (rv);
311 }
312 
313 int
dca_3desatomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * input,crypto_data_t * output,int kmflag,crypto_req_handle_t req,int mode)314 dca_3desatomic(crypto_provider_handle_t provider,
315     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
316     crypto_key_t *key, crypto_data_t *input, crypto_data_t *output,
317     int kmflag, crypto_req_handle_t req, int mode)
318 {
319 	crypto_ctx_t	ctx;	/* on the stack */
320 	int		rv;
321 
322 	ctx.cc_provider = provider;
323 	ctx.cc_session = session_id;
324 
325 	/*
326 	 * Input must be a multiple of the block size. This test only
327 	 * works for non-padded mechanisms when the blocksize is 2^N.
328 	 */
329 	if ((dca_length(input) & (DESBLOCK - 1)) != 0) {
330 		DBG(NULL, DWARN, "dca_3desatomic: input not multiple of BS");
331 		if (mode & DR_DECRYPT) {
332 			return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
333 		} else {
334 			return (CRYPTO_DATA_LEN_RANGE);
335 		}
336 	}
337 
338 	rv = dca_3desctxinit(&ctx, mechanism, key, kmflag, mode);
339 	if (rv != CRYPTO_SUCCESS) {
340 		DBG(NULL, DWARN, "dca_3desatomic: dca_3desctxinit() failed");
341 		return (rv);
342 	}
343 
344 	/*
345 	 * Set the atomic flag so that the hardware callback function
346 	 * will free the context.
347 	 */
348 	((dca_request_t *)ctx.cc_provider_private)->dr_ctx.atomic = 1;
349 
350 	/* check for inplace ops */
351 	if (input == output) {
352 		((dca_request_t *)ctx.cc_provider_private)->dr_flags
353 		    |= DR_INPLACE;
354 	}
355 
356 	rv = dca_3des(&ctx, input, output, req, mode);
357 	if ((rv != CRYPTO_QUEUED) && (rv != CRYPTO_SUCCESS)) {
358 		DBG(NULL, DWARN, "dca_3desatomic: dca_3des() failed");
359 		output->cd_length = 0;
360 	}
361 
362 	/*
363 	 * The features of dca_3desfinal() are implemented within
364 	 * dca_3desdone() due to the asynchronous nature of dca_3des().
365 	 */
366 
367 	/*
368 	 * The context will be freed in the hardware callback function if it
369 	 * is queued
370 	 */
371 	if (rv != CRYPTO_QUEUED)
372 		dca_3desctxfree(&ctx);
373 
374 	return (rv);
375 }
376 
377 int
dca_3desstart(dca_t * dca,uint32_t flags,dca_request_t * reqp)378 dca_3desstart(dca_t *dca, uint32_t flags, dca_request_t *reqp)
379 {
380 	size_t		len;
381 	crypto_data_t	*in = reqp->dr_in;
382 	int		rv;
383 	dca_request_t	*ctx = reqp;
384 	uint32_t	iv[2];
385 
386 	/*
387 	 * Preconditions:
388 	 * 1) in and out point to the "right" buffers.
389 	 * 2) in->b_bcount - in->b_resid == initial offset
390 	 * 3) likewise for out
391 	 * 4) there is enough space in the output
392 	 * 5) we perform a block for block encrypt
393 	 */
394 	len = ctx->dr_ctx.activeresidlen + dca_length(in);
395 	len = ROUNDDOWN(min(len, MAXPACKET), DESBLOCK);
396 	reqp->dr_pkt_length = (uint16_t)len;
397 
398 	/* collect IVs for this pass */
399 	iv[0] = ctx->dr_ctx.iv[0];
400 	iv[1] = ctx->dr_ctx.iv[1];
401 
402 	/*
403 	 * And also, for decrypt, collect the IV for the next pass.  For
404 	 * decrypt, the IV must be collected BEFORE decryption, or else
405 	 * we will lose it.  (For encrypt, we grab the IV AFTER encryption,
406 	 * in dca_3desdone.
407 	 */
408 	if (flags & DR_DECRYPT) {
409 		uchar_t		ivstore[DESBLOCK];
410 #ifdef UNALIGNED_POINTERS_PERMITTED
411 		uint32_t	*ivp = (uint32_t *)ivstore;
412 #else
413 		uchar_t		*ivp = ivstore;
414 #endif	/* UNALIGNED_POINTERS_PERMITTED */
415 
416 		/* get last 8 bytes of ciphertext for IV of next op */
417 		/*
418 		 * If we're processing only a DESBLOCKS worth of data
419 		 * and there is active residual present then it will be
420 		 * needed for the IV also.
421 		 */
422 		if ((len == DESBLOCK) && ctx->dr_ctx.activeresidlen) {
423 			/* Bring the active residual into play */
424 			bcopy(ctx->dr_ctx.activeresid, ivstore,
425 			    ctx->dr_ctx.activeresidlen);
426 			rv = dca_getbufbytes(in, 0,
427 			    DESBLOCK - ctx->dr_ctx.activeresidlen,
428 			    ivstore + ctx->dr_ctx.activeresidlen);
429 		} else {
430 			rv = dca_getbufbytes(in,
431 			    len - DESBLOCK - ctx->dr_ctx.activeresidlen,
432 			    DESBLOCK, ivstore);
433 		}
434 
435 		if (rv != CRYPTO_SUCCESS) {
436 			DBG(dca, DWARN,
437 			    "dca_3desstart: dca_getbufbytes() failed");
438 			return (rv);
439 		}
440 
441 		/* store as a pair of native 32-bit values */
442 #ifdef UNALIGNED_POINTERS_PERMITTED
443 		ctx->dr_ctx.iv[0] = htonl(ivp[0]);
444 		ctx->dr_ctx.iv[1] = htonl(ivp[1]);
445 #else
446 		ctx->dr_ctx.iv[0] =
447 		    ivp[0]<<24 | ivp[1]<<16 | ivp[2]<<8 | ivp[3];
448 		ctx->dr_ctx.iv[1] =
449 		    ivp[4]<<24 | ivp[5]<<16 | ivp[6]<<8 | ivp[7];
450 #endif	/* UNALIGNED_POINTERS_PERMITTED */
451 	}
452 
453 	/* For now we force a pullup.  Add direct DMA later. */
454 	reqp->dr_flags &= ~(DR_SCATTER | DR_GATHER);
455 	if ((len < dca_mindma) || (ctx->dr_ctx.activeresidlen > 0) ||
456 	    dca_sgcheck(dca, reqp->dr_in, DCA_SG_CONTIG) ||
457 	    dca_sgcheck(dca, reqp->dr_out, DCA_SG_WALIGN)) {
458 		reqp->dr_flags |= DR_SCATTER | DR_GATHER;
459 	}
460 
461 	/* Try to do direct DMA. */
462 	if (!(reqp->dr_flags & (DR_SCATTER | DR_GATHER))) {
463 		if (dca_bindchains(reqp, len, len) == DDI_SUCCESS) {
464 			reqp->dr_in->cd_offset += len;
465 			reqp->dr_in->cd_length -= len;
466 		} else {
467 			DBG(dca, DWARN,
468 			    "dca_3desstart: dca_bindchains() failed");
469 			return (CRYPTO_DEVICE_ERROR);
470 		}
471 	}
472 
473 	/* gather the data into the device */
474 	if (reqp->dr_flags & DR_GATHER) {
475 		rv = dca_resid_gather(in, (char *)ctx->dr_ctx.activeresid,
476 		    &ctx->dr_ctx.activeresidlen, reqp->dr_ibuf_kaddr, len);
477 		if (rv != CRYPTO_SUCCESS) {
478 			DBG(dca, DWARN,
479 			    "dca_3desstart: dca_resid_gather() failed");
480 			return (rv);
481 		}
482 		/*
483 		 * Setup for scattering the result back out
484 		 * The output buffer is a multi-entry chain for x86 and
485 		 * a single entry chain for Sparc.
486 		 * Use the actual length if the first entry is sufficient.
487 		 */
488 		(void) ddi_dma_sync(reqp->dr_ibuf_dmah, 0, len,
489 		    DDI_DMA_SYNC_FORDEV);
490 		if (dca_check_dma_handle(dca, reqp->dr_ibuf_dmah,
491 		    DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
492 			reqp->destroy = TRUE;
493 			return (CRYPTO_DEVICE_ERROR);
494 		}
495 
496 		reqp->dr_in_paddr = reqp->dr_ibuf_head.dc_buffer_paddr;
497 		reqp->dr_in_next = reqp->dr_ibuf_head.dc_next_paddr;
498 		if (len > reqp->dr_ibuf_head.dc_buffer_length)
499 			reqp->dr_in_len = reqp->dr_ibuf_head.dc_buffer_length;
500 		else
501 			reqp->dr_in_len = len;
502 	}
503 	/*
504 	 * Setup for scattering the result back out
505 	 * The output buffer is a multi-entry chain for x86 and
506 	 * a single entry chain for Sparc.
507 	 * Use the actual length if the first entry is sufficient.
508 	 */
509 	if (reqp->dr_flags & DR_SCATTER) {
510 		reqp->dr_out_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
511 		reqp->dr_out_next = reqp->dr_obuf_head.dc_next_paddr;
512 		if (len > reqp->dr_obuf_head.dc_buffer_length)
513 			reqp->dr_out_len = reqp->dr_obuf_head.dc_buffer_length;
514 		else
515 			reqp->dr_out_len = len;
516 	}
517 
518 	reqp->dr_flags |= flags;
519 	reqp->dr_callback = dca_3desdone;
520 
521 	/* write out the context structure */
522 	PUTCTX32(reqp, CTX_3DESIVHI, iv[0]);
523 	PUTCTX32(reqp, CTX_3DESIVLO, iv[1]);
524 
525 	/* schedule the work by doing a submit */
526 	return (dca_start(dca, reqp, MCR1, 1));
527 }
528 
529 void
dca_3desdone(dca_request_t * reqp,int errno)530 dca_3desdone(dca_request_t *reqp, int errno)
531 {
532 	crypto_data_t	*out = reqp->dr_out;
533 	dca_request_t	*ctx = reqp;
534 	ASSERT(ctx != NULL);
535 
536 	if (errno == CRYPTO_SUCCESS) {
537 		size_t		off;
538 		/*
539 		 * Save the offset: this has to be done *before* dca_scatter
540 		 * modifies the buffer.  We take the initial offset into the
541 		 * first buf, and add that to the total packet size to find
542 		 * the end of the packet.
543 		 */
544 		off = dca_length(out) + reqp->dr_pkt_length - DESBLOCK;
545 
546 		if (reqp->dr_flags & DR_SCATTER) {
547 			(void) ddi_dma_sync(reqp->dr_obuf_dmah, 0,
548 			    reqp->dr_out_len, DDI_DMA_SYNC_FORKERNEL);
549 			if (dca_check_dma_handle(reqp->dr_dca,
550 			    reqp->dr_obuf_dmah, DCA_FM_ECLASS_NONE) !=
551 			    DDI_SUCCESS) {
552 				reqp->destroy = TRUE;
553 				errno = CRYPTO_DEVICE_ERROR;
554 				goto errout;
555 			}
556 
557 			errno = dca_scatter(reqp->dr_obuf_kaddr,
558 			    reqp->dr_out, reqp->dr_out_len, 0);
559 			if (errno != CRYPTO_SUCCESS) {
560 				DBG(NULL, DWARN,
561 				    "dca_3desdone: dca_scatter() failed");
562 				goto errout;
563 			}
564 
565 		} else {
566 			/* we've processed some more data */
567 			out->cd_length += reqp->dr_pkt_length;
568 		}
569 
570 
571 		/*
572 		 * For encryption only, we have to grab the IV for the
573 		 * next pass AFTER encryption.
574 		 */
575 		if (reqp->dr_flags & DR_ENCRYPT) {
576 			uchar_t		ivstore[DESBLOCK];
577 #ifdef UNALIGNED_POINTERS_PERMITTED
578 			uint32_t	*iv = (uint32_t *)ivstore;
579 #else
580 			uchar_t		*iv = ivstore;
581 #endif	/* UNALIGNED_POINTERS_PERMITTED */
582 
583 			/* get last 8 bytes for IV of next op */
584 			errno = dca_getbufbytes(out, off, DESBLOCK,
585 			    (uchar_t *)iv);
586 			if (errno != CRYPTO_SUCCESS) {
587 				DBG(NULL, DWARN,
588 				    "dca_3desdone: dca_getbufbytes() failed");
589 				goto errout;
590 			}
591 
592 			/* store as a pair of native 32-bit values */
593 #ifdef UNALIGNED_POINTERS_PERMITTED
594 			ctx->dr_ctx.iv[0] = htonl(iv[0]);
595 			ctx->dr_ctx.iv[1] = htonl(iv[1]);
596 #else
597 			ctx->dr_ctx.iv[0] =
598 			    iv[0]<<24 | iv[1]<<16 | iv[2]<<8 | iv[3];
599 			ctx->dr_ctx.iv[1] =
600 			    iv[4]<<24 | iv[5]<<16 | iv[6]<<8 | iv[7];
601 #endif	/* UNALIGNED_POINTERS_PERMITTED */
602 		}
603 
604 		/*
605 		 * If there is more to do, then reschedule another
606 		 * pass.
607 		 */
608 		if (dca_length(reqp->dr_in) >= 8) {
609 			errno = dca_3desstart(reqp->dr_dca, reqp->dr_flags,
610 			    reqp);
611 			if (errno == CRYPTO_QUEUED) {
612 				return;
613 			}
614 		}
615 	}
616 
617 errout:
618 
619 	/*
620 	 * If this is an atomic operation perform the final function
621 	 * tasks (equivalent to to dca_3desfinal()).
622 	 */
623 	if (reqp->dr_ctx.atomic) {
624 		if ((errno == CRYPTO_SUCCESS) && (ctx->dr_ctx.residlen != 0)) {
625 			DBG(NULL, DWARN,
626 			    "dca_3desdone: invalid nonzero residual");
627 			if (reqp->dr_flags & DR_DECRYPT) {
628 				errno = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
629 			} else {
630 				errno = CRYPTO_DATA_LEN_RANGE;
631 			}
632 		}
633 	}
634 
635 	ASSERT(reqp->dr_kcf_req != NULL);
636 	/* notify framework that request is completed */
637 	crypto_op_notification(reqp->dr_kcf_req, errno);
638 	DBG(NULL, DINTR,
639 	    "dca_3desdone: returning %d to the kef via crypto_op_notification",
640 	    errno);
641 
642 	/* This has to be done after notifing the framework */
643 	if (reqp->dr_ctx.atomic) {
644 		reqp->dr_context = NULL;
645 		reqp->dr_ctx.atomic = 0;
646 		reqp->dr_ctx.ctx_cm_type = 0;
647 		if (reqp->destroy)
648 			dca_destroyreq(reqp);
649 		else
650 			dca_freereq(reqp);
651 	}
652 }
653 
654 /* ARGSUSED */
655 int
dca_3desctxinit(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,int kmflag,int flags)656 dca_3desctxinit(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
657     crypto_key_t *key, int kmflag, int flags)
658 {
659 	dca_request_t	*des_ctx;
660 	dca_t		*dca = ctx->cc_provider;
661 #ifdef UNALIGNED_POINTERS_PERMITTED
662 	uint32_t	*param;
663 	uint32_t	*value32;
664 #else
665 	uchar_t		*param;
666 #endif	/* UNALIGNED_POINTERS_PERMITTED */
667 	uchar_t		*value;
668 	size_t		paramsz;
669 	unsigned	len;
670 	int		i, j;
671 
672 	paramsz = mechanism->cm_param_len;
673 #ifdef UNALIGNED_POINTERS_PERMITTED
674 	param = (uint32_t *)mechanism->cm_param;
675 #else
676 	param = (uchar_t *)mechanism->cm_param;
677 #endif	/* UNALIGNED_POINTERS_PERMITTED */
678 
679 	if ((paramsz != 0) && (paramsz != DES_IV_LEN)) {
680 		DBG(NULL, DWARN,
681 		    "dca_3desctxinit: parameter(IV) length not %d (%d)",
682 		    DES_IV_LEN, paramsz);
683 		return (CRYPTO_MECHANISM_PARAM_INVALID);
684 	}
685 
686 	if ((des_ctx = dca_getreq(dca, MCR1, 1)) == NULL) {
687 		dca_error(dca, "unable to allocate request for 3DES");
688 		return (CRYPTO_HOST_MEMORY);
689 	}
690 	/*
691 	 * Identify and store the IV as a pair of native 32-bit words.
692 	 *
693 	 * If cm_param == NULL then the IV comes from the cd_miscdata field
694 	 * in the crypto_data structure.
695 	 */
696 	if (param != NULL) {
697 		ASSERT(paramsz == DES_IV_LEN);
698 #ifdef UNALIGNED_POINTERS_PERMITTED
699 		des_ctx->dr_ctx.iv[0] = htonl(param[0]);
700 		des_ctx->dr_ctx.iv[1] = htonl(param[1]);
701 #else
702 		des_ctx->dr_ctx.iv[0] = param[0]<<24 | param[1]<<16 |
703 		    param[2]<<8 | param[3];
704 		des_ctx->dr_ctx.iv[1] = param[4]<<24 | param[5]<<16 |
705 		    param[6]<<8 | param[7];
706 #endif	/* UNALIGNED_POINTERS_PERMITTED */
707 	}
708 	des_ctx->dr_ctx.residlen = 0;
709 	des_ctx->dr_ctx.activeresidlen = 0;
710 	des_ctx->dr_ctx.ctx_cm_type = mechanism->cm_type;
711 	ctx->cc_provider_private = des_ctx;
712 
713 	if (key->ck_format != CRYPTO_KEY_RAW) {
714 		DBG(NULL, DWARN,
715 	"dca_3desctxinit: only raw crypto key type support with DES/3DES");
716 		dca_3desctxfree(ctx);
717 		return (CRYPTO_KEY_TYPE_INCONSISTENT);
718 	}
719 
720 	len = key->ck_length;
721 	value = (uchar_t *)key->ck_data;
722 
723 	if (flags & DR_TRIPLE) {
724 		/* 3DES */
725 		switch (len) {
726 		case 192:
727 			for (i = 0; i < 6; i++) {
728 				des_ctx->dr_ctx.key[i] = 0;
729 				for (j = 0; j < 4; j++) {
730 					des_ctx->dr_ctx.key[i] <<= 8;
731 					des_ctx->dr_ctx.key[i] |= *value;
732 					value++;
733 				}
734 			}
735 			break;
736 
737 		case 128:
738 			for (i = 0; i < 4; i++) {
739 				des_ctx->dr_ctx.key[i] = 0;
740 				for (j = 0; j < 4; j++) {
741 					des_ctx->dr_ctx.key[i] <<= 8;
742 					des_ctx->dr_ctx.key[i] |= *value;
743 					value++;
744 				}
745 			}
746 			des_ctx->dr_ctx.key[4] = des_ctx->dr_ctx.key[0];
747 			des_ctx->dr_ctx.key[5] = des_ctx->dr_ctx.key[1];
748 			break;
749 
750 		default:
751 			DBG(NULL, DWARN, "Incorrect 3DES keysize (%d)", len);
752 			dca_3desctxfree(ctx);
753 			return (CRYPTO_KEY_SIZE_RANGE);
754 		}
755 	} else {
756 		/* single DES */
757 		if (len != 64) {
758 			DBG(NULL, DWARN, "Incorrect DES keysize (%d)", len);
759 			dca_3desctxfree(ctx);
760 			return (CRYPTO_KEY_SIZE_RANGE);
761 		}
762 
763 #ifdef UNALIGNED_POINTERS_PERMITTED
764 		value32 = (uint32_t *)value;
765 		des_ctx->dr_ctx.key[0] = htonl(value32[0]);
766 		des_ctx->dr_ctx.key[1] = htonl(value32[1]);
767 #else
768 		des_ctx->dr_ctx.key[0] =
769 		    value[0]<<24 | value[1]<<16 | value[2]<<8 | value[3];
770 		des_ctx->dr_ctx.key[1] =
771 		    value[4]<<24 | value[5]<<16 | value[6]<<8 | value[7];
772 #endif	/* UNALIGNED_POINTERS_PERMITTED */
773 
774 		/* for single des just repeat des key */
775 		des_ctx->dr_ctx.key[4] =
776 		    des_ctx->dr_ctx.key[2] = des_ctx->dr_ctx.key[0];
777 		des_ctx->dr_ctx.key[5] =
778 		    des_ctx->dr_ctx.key[3] = des_ctx->dr_ctx.key[1];
779 	}
780 
781 	/*
782 	 * Setup the context here so that we do not need to setup it up
783 	 * for every update
784 	 */
785 	PUTCTX16(des_ctx, CTX_LENGTH, CTX_3DES_LENGTH);
786 	PUTCTX16(des_ctx, CTX_CMD, CMD_3DES);
787 	PUTCTX32(des_ctx, CTX_3DESDIRECTION,
788 	    flags & DR_ENCRYPT ? CTX_3DES_ENCRYPT : CTX_3DES_DECRYPT);
789 	PUTCTX32(des_ctx, CTX_3DESKEY1HI, des_ctx->dr_ctx.key[0]);
790 	PUTCTX32(des_ctx, CTX_3DESKEY1LO, des_ctx->dr_ctx.key[1]);
791 	PUTCTX32(des_ctx, CTX_3DESKEY2HI, des_ctx->dr_ctx.key[2]);
792 	PUTCTX32(des_ctx, CTX_3DESKEY2LO, des_ctx->dr_ctx.key[3]);
793 	PUTCTX32(des_ctx, CTX_3DESKEY3HI, des_ctx->dr_ctx.key[4]);
794 	PUTCTX32(des_ctx, CTX_3DESKEY3LO, des_ctx->dr_ctx.key[5]);
795 
796 	return (CRYPTO_SUCCESS);
797 }
798