xref: /titanic_50/usr/src/uts/common/crypto/io/dca.c (revision c77a61a72b5ecdc507d6cf104142edd371a16c84)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Deimos - cryptographic acceleration based upon Broadcom 582x.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/modctl.h>
35 #include <sys/conf.h>
36 #include <sys/devops.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/cmn_err.h>
40 #include <sys/varargs.h>
41 #include <sys/file.h>
42 #include <sys/stat.h>
43 #include <sys/kmem.h>
44 #include <sys/ioccom.h>
45 #include <sys/open.h>
46 #include <sys/cred.h>
47 #include <sys/kstat.h>
48 #include <sys/strsun.h>
49 #include <sys/note.h>
50 #include <sys/crypto/common.h>
51 #include <sys/crypto/spi.h>
52 #include <sys/ddifm.h>
53 #include <sys/fm/protocol.h>
54 #include <sys/fm/util.h>
55 #include <sys/fm/io/ddi.h>
56 #include <sys/crypto/dca.h>
57 
58 /*
59  * Core Deimos driver.
60  */
61 
62 static void		dca_enlist2(dca_listnode_t *, dca_listnode_t *,
63     kmutex_t *);
64 static void		dca_rmlist2(dca_listnode_t *node, kmutex_t *);
65 static dca_listnode_t	*dca_delist2(dca_listnode_t *q, kmutex_t *);
66 static void		dca_free_context_list(dca_t *dca);
67 static int		dca_free_context_low(crypto_ctx_t *ctx);
68 static int		dca_attach(dev_info_t *, ddi_attach_cmd_t);
69 static int		dca_detach(dev_info_t *, ddi_detach_cmd_t);
70 static int		dca_suspend(dca_t *);
71 static int		dca_resume(dca_t *);
72 static int		dca_init(dca_t *);
73 static int		dca_reset(dca_t *, int);
74 static int		dca_initworklist(dca_t *, dca_worklist_t *);
75 static void		dca_uninit(dca_t *);
76 static void		dca_initq(dca_listnode_t *);
77 static void		dca_enqueue(dca_listnode_t *, dca_listnode_t *);
78 static dca_listnode_t	*dca_dequeue(dca_listnode_t *);
79 static dca_listnode_t	*dca_unqueue(dca_listnode_t *);
80 static dca_request_t	*dca_newreq(dca_t *);
81 static dca_work_t	*dca_getwork(dca_t *, int);
82 static void		dca_freework(dca_work_t *);
83 static dca_work_t	*dca_newwork(dca_t *);
84 static void		dca_destroywork(dca_work_t *);
85 static void		dca_schedule(dca_t *, int);
86 static void		dca_reclaim(dca_t *, int);
87 static uint_t		dca_intr(char *);
88 static void		dca_failure(dca_t *, ddi_fault_location_t,
89 			    dca_fma_eclass_t index, uint64_t, int, char *, ...);
90 static void		dca_jobtimeout(void *);
91 static int		dca_drain(dca_t *);
92 static void		dca_undrain(dca_t *);
93 static void		dca_rejectjobs(dca_t *);
94 
95 #ifdef	SCHEDDELAY
96 static void		dca_schedtimeout(void *);
97 #endif
98 
99 /*
100  * We want these inlined for performance.
101  */
102 #ifndef	DEBUG
103 #pragma inline(dca_freereq, dca_getreq, dca_freework, dca_getwork)
104 #pragma inline(dca_enqueue, dca_dequeue, dca_rmqueue, dca_done)
105 #pragma inline(dca_reverse, dca_length)
106 #endif
107 
108 /*
109  * Device operations.
110  */
111 static struct dev_ops devops = {
112 	DEVO_REV,		/* devo_rev */
113 	0,			/* devo_refcnt */
114 	nodev,			/* devo_getinfo */
115 	nulldev,		/* devo_identify */
116 	nulldev,		/* devo_probe */
117 	dca_attach,		/* devo_attach */
118 	dca_detach,		/* devo_detach */
119 	nodev,			/* devo_reset */
120 	NULL,			/* devo_cb_ops */
121 	NULL,			/* devo_bus_ops */
122 	ddi_power		/* devo_power */
123 };
124 
125 #define	IDENT		"PCI Crypto Accelerator 2.0"
126 #define	IDENT_SYM	"Crypto Accel Sym 2.0"
127 #define	IDENT_ASYM	"Crypto Accel Asym 2.0"
128 
129 /* Space-padded, will be filled in dynamically during registration */
130 #define	IDENT3	"PCI Crypto Accelerator Mod 2.0"
131 
132 #define	VENDOR	"Sun Microsystems, Inc."
133 
134 #define	STALETIME	(30 * SECOND)
135 
136 #define	crypto_prov_notify	crypto_provider_notification
137 		/* A 28 char function name doesn't leave much line space */
138 
139 /*
140  * Module linkage.
141  */
142 static struct modldrv modldrv = {
143 	&mod_driverops,		/* drv_modops */
144 	IDENT,			/* drv_linkinfo */
145 	&devops,		/* drv_dev_ops */
146 };
147 
148 extern struct mod_ops mod_cryptoops;
149 
150 static struct modlcrypto modlcrypto = {
151 	&mod_cryptoops,
152 	IDENT3
153 };
154 
155 static struct modlinkage modlinkage = {
156 	MODREV_1,		/* ml_rev */
157 	&modldrv,		/* ml_linkage */
158 	&modlcrypto,
159 	NULL
160 };
161 
162 /*
163  * CSPI information (entry points, provider info, etc.)
164  */
165 
166 /* Mechanisms for the symmetric cipher provider */
167 static crypto_mech_info_t dca_mech_info_tab1[] = {
168 	/* DES-CBC */
169 	{SUN_CKM_DES_CBC, DES_CBC_MECH_INFO_TYPE,
170 	    CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
171 	    CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
172 	    DES_KEY_LEN, DES_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
173 	/* 3DES-CBC */
174 	{SUN_CKM_DES3_CBC, DES3_CBC_MECH_INFO_TYPE,
175 	    CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
176 	    CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
177 	    DES3_KEY_LEN, DES3_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
178 };
179 
180 /* Mechanisms for the asymmetric cipher provider */
181 static crypto_mech_info_t dca_mech_info_tab2[] = {
182 	/* DSA */
183 	{SUN_CKM_DSA, DSA_MECH_INFO_TYPE,
184 	    CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY |
185 	    CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_VERIFY_ATOMIC,
186 	    DSA_MIN_KEY_LEN * 8, DSA_MAX_KEY_LEN * 8,
187 	    CRYPTO_KEYSIZE_UNIT_IN_BITS},
188 
189 	/* RSA */
190 	{SUN_CKM_RSA_X_509, RSA_X_509_MECH_INFO_TYPE,
191 	    CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
192 	    CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
193 	    CRYPTO_FG_VERIFY_RECOVER |
194 	    CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
195 	    CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
196 	    CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
197 	    RSA_MIN_KEY_LEN * 8, RSA_MAX_KEY_LEN * 8,
198 	    CRYPTO_KEYSIZE_UNIT_IN_BITS},
199 	{SUN_CKM_RSA_PKCS, RSA_PKCS_MECH_INFO_TYPE,
200 	    CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
201 	    CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
202 	    CRYPTO_FG_VERIFY_RECOVER |
203 	    CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
204 	    CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
205 	    CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
206 	    RSA_MIN_KEY_LEN * 8, RSA_MAX_KEY_LEN * 8,
207 	    CRYPTO_KEYSIZE_UNIT_IN_BITS}
208 };
209 
210 static void dca_provider_status(crypto_provider_handle_t, uint_t *);
211 
212 static crypto_control_ops_t dca_control_ops = {
213 	dca_provider_status
214 };
215 
216 static int dca_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
217     crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
218 static int dca_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
219     crypto_req_handle_t);
220 static int dca_encrypt_update(crypto_ctx_t *, crypto_data_t *,
221     crypto_data_t *, crypto_req_handle_t);
222 static int dca_encrypt_final(crypto_ctx_t *, crypto_data_t *,
223     crypto_req_handle_t);
224 static int dca_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
225     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
226     crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
227 
228 static int dca_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
229     crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
230 static int dca_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
231     crypto_req_handle_t);
232 static int dca_decrypt_update(crypto_ctx_t *, crypto_data_t *,
233     crypto_data_t *, crypto_req_handle_t);
234 static int dca_decrypt_final(crypto_ctx_t *, crypto_data_t *,
235     crypto_req_handle_t);
236 static int dca_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
237     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
238     crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
239 
240 static crypto_cipher_ops_t dca_cipher_ops = {
241 	dca_encrypt_init,
242 	dca_encrypt,
243 	dca_encrypt_update,
244 	dca_encrypt_final,
245 	dca_encrypt_atomic,
246 	dca_decrypt_init,
247 	dca_decrypt,
248 	dca_decrypt_update,
249 	dca_decrypt_final,
250 	dca_decrypt_atomic
251 };
252 
253 static int dca_sign_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
254     crypto_spi_ctx_template_t, crypto_req_handle_t);
255 static int dca_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
256     crypto_req_handle_t);
257 static int dca_sign_update(crypto_ctx_t *, crypto_data_t *,
258     crypto_req_handle_t);
259 static int dca_sign_final(crypto_ctx_t *, crypto_data_t *,
260     crypto_req_handle_t);
261 static int dca_sign_atomic(crypto_provider_handle_t, crypto_session_id_t,
262     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
263     crypto_spi_ctx_template_t, crypto_req_handle_t);
264 static int dca_sign_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
265     crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
266 static int dca_sign_recover(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
267     crypto_req_handle_t);
268 static int dca_sign_recover_atomic(crypto_provider_handle_t,
269     crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
270     crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
271 
272 static crypto_sign_ops_t dca_sign_ops = {
273 	dca_sign_init,
274 	dca_sign,
275 	dca_sign_update,
276 	dca_sign_final,
277 	dca_sign_atomic,
278 	dca_sign_recover_init,
279 	dca_sign_recover,
280 	dca_sign_recover_atomic
281 };
282 
283 static int dca_verify_init(crypto_ctx_t *, crypto_mechanism_t *,
284     crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
285 static int dca_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
286     crypto_req_handle_t);
287 static int dca_verify_update(crypto_ctx_t *, crypto_data_t *,
288     crypto_req_handle_t);
289 static int dca_verify_final(crypto_ctx_t *, crypto_data_t *,
290     crypto_req_handle_t);
291 static int dca_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
292     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
293     crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
294 static int dca_verify_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
295     crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
296 static int dca_verify_recover(crypto_ctx_t *, crypto_data_t *,
297     crypto_data_t *, crypto_req_handle_t);
298 static int dca_verify_recover_atomic(crypto_provider_handle_t,
299     crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
300     crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
301 
302 static crypto_verify_ops_t dca_verify_ops = {
303 	dca_verify_init,
304 	dca_verify,
305 	dca_verify_update,
306 	dca_verify_final,
307 	dca_verify_atomic,
308 	dca_verify_recover_init,
309 	dca_verify_recover,
310 	dca_verify_recover_atomic
311 };
312 
313 static int dca_generate_random(crypto_provider_handle_t, crypto_session_id_t,
314     uchar_t *, size_t, crypto_req_handle_t);
315 
316 static crypto_random_number_ops_t dca_random_number_ops = {
317 	NULL,
318 	dca_generate_random
319 };
320 
321 static int ext_info_sym(crypto_provider_handle_t prov,
322     crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
323 static int ext_info_asym(crypto_provider_handle_t prov,
324     crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
325 static int ext_info_base(crypto_provider_handle_t prov,
326     crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id);
327 
328 static crypto_provider_management_ops_t dca_provmanage_ops_1 = {
329 	ext_info_sym,		/* ext_info */
330 	NULL,			/* init_token */
331 	NULL,			/* init_pin */
332 	NULL			/* set_pin */
333 };
334 
335 static crypto_provider_management_ops_t dca_provmanage_ops_2 = {
336 	ext_info_asym,		/* ext_info */
337 	NULL,			/* init_token */
338 	NULL,			/* init_pin */
339 	NULL			/* set_pin */
340 };
341 
342 int dca_free_context(crypto_ctx_t *);
343 
344 static crypto_ctx_ops_t dca_ctx_ops = {
345 	NULL,
346 	dca_free_context
347 };
348 
349 /* Operations for the symmetric cipher provider */
350 static crypto_ops_t dca_crypto_ops1 = {
351 	&dca_control_ops,
352 	NULL,				/* digest_ops */
353 	&dca_cipher_ops,
354 	NULL,				/* mac_ops */
355 	NULL,				/* sign_ops */
356 	NULL,				/* verify_ops */
357 	NULL,				/* dual_ops */
358 	NULL,				/* cipher_mac_ops */
359 	NULL,				/* random_number_ops */
360 	NULL,				/* session_ops */
361 	NULL,				/* object_ops */
362 	NULL,				/* key_ops */
363 	&dca_provmanage_ops_1,		/* management_ops */
364 	&dca_ctx_ops
365 };
366 
367 /* Operations for the asymmetric cipher provider */
368 static crypto_ops_t dca_crypto_ops2 = {
369 	&dca_control_ops,
370 	NULL,				/* digest_ops */
371 	&dca_cipher_ops,
372 	NULL,				/* mac_ops */
373 	&dca_sign_ops,
374 	&dca_verify_ops,
375 	NULL,				/* dual_ops */
376 	NULL,				/* cipher_mac_ops */
377 	&dca_random_number_ops,
378 	NULL,				/* session_ops */
379 	NULL,				/* object_ops */
380 	NULL,				/* key_ops */
381 	&dca_provmanage_ops_2,		/* management_ops */
382 	&dca_ctx_ops
383 };
384 
385 /* Provider information for the symmetric cipher provider */
386 static crypto_provider_info_t dca_prov_info1 = {
387 	CRYPTO_SPI_VERSION_1,
388 	NULL,				/* pi_provider_description */
389 	CRYPTO_HW_PROVIDER,
390 	NULL,				/* pi_provider_dev */
391 	NULL,				/* pi_provider_handle */
392 	&dca_crypto_ops1,
393 	sizeof (dca_mech_info_tab1)/sizeof (crypto_mech_info_t),
394 	dca_mech_info_tab1,
395 	0,				/* pi_logical_provider_count */
396 	NULL				/* pi_logical_providers */
397 };
398 
399 /* Provider information for the asymmetric cipher provider */
400 static crypto_provider_info_t dca_prov_info2 = {
401 	CRYPTO_SPI_VERSION_1,
402 	NULL,				/* pi_provider_description */
403 	CRYPTO_HW_PROVIDER,
404 	NULL,				/* pi_provider_dev */
405 	NULL,				/* pi_provider_handle */
406 	&dca_crypto_ops2,
407 	sizeof (dca_mech_info_tab2)/sizeof (crypto_mech_info_t),
408 	dca_mech_info_tab2,
409 	0,				/* pi_logical_provider_count */
410 	NULL				/* pi_logical_providers */
411 };
412 
413 /* Convenience macros */
414 /* Retrieve the softc and instance number from a SPI crypto context */
415 #define	DCA_SOFTC_FROM_CTX(ctx, softc, instance) {		\
416 	(softc) = (dca_t *)(ctx)->cc_provider;			\
417 	(instance) = ddi_get_instance((softc)->dca_dip);	\
418 }
419 
420 #define	DCA_MECH_FROM_CTX(ctx) \
421 	(((dca_request_t *)(ctx)->cc_provider_private)->dr_ctx.ctx_cm_type)
422 
423 static int dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
424     caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
425     dca_chain_t *head, int *n_chain);
426 static uint64_t dca_ena(uint64_t ena);
427 static caddr_t dca_bufdaddr_out(crypto_data_t *data);
428 static char *dca_fma_eclass_string(char *model, dca_fma_eclass_t index);
429 static int dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
430     dca_fma_eclass_t eclass_index);
431 
432 static void dca_fma_init(dca_t *dca);
433 static void dca_fma_fini(dca_t *dca);
434 static int dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
435     const void *impl_data);
436 
437 
438 static dca_device_t dca_devices[] = {
439 	/* Broadcom vanilla variants */
440 	{	0x14e4, 0x5820, "Broadcom 5820" },
441 	{	0x14e4, 0x5821, "Broadcom 5821" },
442 	{	0x14e4, 0x5822, "Broadcom 5822" },
443 	{	0x14e4, 0x5825, "Broadcom 5825" },
444 	/* Sun specific OEMd variants */
445 	{	0x108e, 0x5454, "SCA" },
446 	{	0x108e, 0x5455, "SCA 1000" },
447 	{	0x108e, 0x5457, "SCA 500" },
448 	/* subsysid should be 0x5457, but got 0x1 from HW. Assume both here. */
449 	{	0x108e, 0x1, "SCA 500" },
450 };
451 
452 /*
453  * Device attributes.
454  */
455 static struct ddi_device_acc_attr dca_regsattr = {
456 	DDI_DEVICE_ATTR_V0,
457 	DDI_STRUCTURE_LE_ACC,
458 	DDI_STRICTORDER_ACC,
459 	DDI_FLAGERR_ACC
460 };
461 
462 static struct ddi_device_acc_attr dca_devattr = {
463 	DDI_DEVICE_ATTR_V0,
464 	DDI_STRUCTURE_LE_ACC,
465 	DDI_STRICTORDER_ACC,
466 	DDI_FLAGERR_ACC
467 };
468 
469 #if !defined(i386) && !defined(__i386)
470 static struct ddi_device_acc_attr dca_bufattr = {
471 	DDI_DEVICE_ATTR_V0,
472 	DDI_NEVERSWAP_ACC,
473 	DDI_STRICTORDER_ACC,
474 	DDI_FLAGERR_ACC
475 };
476 #endif
477 
478 static struct ddi_dma_attr dca_dmaattr = {
479 	DMA_ATTR_V0,		/* dma_attr_version */
480 	0x0,			/* dma_attr_addr_lo */
481 	0xffffffffUL,		/* dma_attr_addr_hi */
482 	0x00ffffffUL,		/* dma_attr_count_max */
483 	0x40,			/* dma_attr_align */
484 	0x40,			/* dma_attr_burstsizes */
485 	0x1,			/* dma_attr_minxfer */
486 	0x00ffffffUL,		/* dma_attr_maxxfer */
487 	0xffffffffUL,		/* dma_attr_seg */
488 #if defined(i386) || defined(__i386) || defined(__amd64)
489 	512,			/* dma_attr_sgllen */
490 #else
491 	1,			/* dma_attr_sgllen */
492 #endif
493 	1,			/* dma_attr_granular */
494 	DDI_DMA_FLAGERR		/* dma_attr_flags */
495 };
496 
497 static void	*dca_state = NULL;
498 int	dca_mindma = 2500;
499 
500 /*
501  * FMA eclass string definitions. Note that these string arrays must be
502  * consistent with the dca_fma_eclass_t enum.
503  */
504 static char *dca_fma_eclass_sca1000[] = {
505 	"sca1000.hw.device",
506 	"sca1000.hw.timeout",
507 	"sca1000.none"
508 };
509 
510 static char *dca_fma_eclass_sca500[] = {
511 	"sca500.hw.device",
512 	"sca500.hw.timeout",
513 	"sca500.none"
514 };
515 
516 /*
517  * DDI entry points.
518  */
519 int
520 _init(void)
521 {
522 	int rv;
523 
524 	DBG(NULL, DMOD, "dca: in _init");
525 
526 	if ((rv = ddi_soft_state_init(&dca_state, sizeof (dca_t), 1)) != 0) {
527 		/* this should *never* happen! */
528 		return (rv);
529 	}
530 
531 	if ((rv = mod_install(&modlinkage)) != 0) {
532 		/* cleanup here */
533 		ddi_soft_state_fini(&dca_state);
534 		return (rv);
535 	}
536 
537 	return (0);
538 }
539 
540 int
541 _fini(void)
542 {
543 	int rv;
544 
545 	DBG(NULL, DMOD, "dca: in _fini");
546 
547 	if ((rv = mod_remove(&modlinkage)) == 0) {
548 		/* cleanup here */
549 		ddi_soft_state_fini(&dca_state);
550 	}
551 	return (rv);
552 }
553 
554 int
555 _info(struct modinfo *modinfop)
556 {
557 	DBG(NULL, DMOD, "dca: in _info");
558 
559 	return (mod_info(&modlinkage, modinfop));
560 }
561 
562 int
563 dca_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
564 {
565 	ddi_acc_handle_t	pci;
566 	int			instance;
567 	ddi_iblock_cookie_t	ibc;
568 	int			intr_added = 0;
569 	dca_t			*dca;
570 	ushort_t		venid;
571 	ushort_t		devid;
572 	ushort_t		revid;
573 	ushort_t		subsysid;
574 	ushort_t		subvenid;
575 	int			i;
576 	int			ret;
577 	char			ID[64];
578 	static char		*unknowndev = "Unknown device";
579 
580 #if DEBUG
581 	/* these are only used for debugging */
582 	ushort_t		pcicomm;
583 	ushort_t		pcistat;
584 	uchar_t			cachelinesz;
585 	uchar_t			mingnt;
586 	uchar_t			maxlat;
587 	uchar_t			lattmr;
588 #endif
589 
590 	instance = ddi_get_instance(dip);
591 
592 	DBG(NULL, DMOD, "dca: in dca_attach() for %d", instance);
593 
594 	switch (cmd) {
595 	case DDI_RESUME:
596 		if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
597 			dca_diperror(dip, "no soft state in detach");
598 			return (DDI_FAILURE);
599 		}
600 		/* assumption: we won't be DDI_DETACHed until we return */
601 		return (dca_resume(dca));
602 	case DDI_ATTACH:
603 		break;
604 	default:
605 		return (DDI_FAILURE);
606 	}
607 
608 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
609 		dca_diperror(dip, "slot does not support PCI bus-master");
610 		return (DDI_FAILURE);
611 	}
612 
613 	if (ddi_intr_hilevel(dip, 0) != 0) {
614 		dca_diperror(dip, "hilevel interrupts not supported");
615 		return (DDI_FAILURE);
616 	}
617 
618 	if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
619 		dca_diperror(dip, "unable to setup PCI config handle");
620 		return (DDI_FAILURE);
621 	}
622 
623 	/* common PCI attributes */
624 	venid = pci_config_get16(pci, PCI_VENID);
625 	devid = pci_config_get16(pci, PCI_DEVID);
626 	revid = pci_config_get8(pci, PCI_REVID);
627 	subvenid = pci_config_get16(pci, PCI_SUBVENID);
628 	subsysid = pci_config_get16(pci, PCI_SUBSYSID);
629 
630 	/*
631 	 * Broadcom-specific timings.
632 	 * We disable these timers/counters since they can cause
633 	 * incorrect false failures when the bus is just a little
634 	 * bit slow, or busy.
635 	 */
636 	pci_config_put8(pci, PCI_TRDYTO, 0);
637 	pci_config_put8(pci, PCI_RETRIES, 0);
638 
639 	/* initialize PCI access settings */
640 	pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
641 	    PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
642 
643 	/* set up our PCI latency timer */
644 	pci_config_put8(pci, PCI_LATTMR, 0x40);
645 
646 #if DEBUG
647 	/* read registers (for debugging) */
648 	pcicomm = pci_config_get16(pci, PCI_COMM);
649 	pcistat = pci_config_get16(pci, PCI_STATUS);
650 	cachelinesz = pci_config_get8(pci, PCI_CACHELINESZ);
651 	mingnt = pci_config_get8(pci, PCI_MINGNT);
652 	maxlat = pci_config_get8(pci, PCI_MAXLAT);
653 	lattmr = pci_config_get8(pci, PCI_LATTMR);
654 #endif
655 
656 	pci_config_teardown(&pci);
657 
658 	if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS) {
659 		dca_diperror(dip, "unable to get iblock cookie");
660 		return (DDI_FAILURE);
661 	}
662 
663 	if (ddi_soft_state_zalloc(dca_state, instance) != DDI_SUCCESS) {
664 		dca_diperror(dip, "unable to allocate soft state");
665 		return (DDI_FAILURE);
666 	}
667 
668 	dca = ddi_get_soft_state(dca_state, instance);
669 	ASSERT(dca != NULL);
670 	dca->dca_dip = dip;
671 	WORKLIST(dca, MCR1)->dwl_prov = NULL;
672 	WORKLIST(dca, MCR2)->dwl_prov = NULL;
673 	/* figure pagesize */
674 	dca->dca_pagesize = ddi_ptob(dip, 1);
675 
676 	/*
677 	 * Search for the device in our supported devices table.  This
678 	 * is here for two reasons.  First, we want to ensure that
679 	 * only Sun-qualified (and presumably Sun-labeled) devices can
680 	 * be used with this driver.  Second, some devices have
681 	 * specific differences.  E.g. the 5821 has support for a
682 	 * special mode of RC4, deeper queues, power management, and
683 	 * other changes.  Also, the export versions of some of these
684 	 * chips don't support RC4 or 3DES, so we catch that here.
685 	 *
686 	 * Note that we only look at the upper nibble of the device
687 	 * id, which is used to distinguish export vs. domestic
688 	 * versions of the chip.  (The lower nibble is used for
689 	 * stepping information.)
690 	 */
691 	for (i = 0; i < (sizeof (dca_devices) / sizeof (dca_device_t)); i++) {
692 		/*
693 		 * Try to match the subsystem information first.
694 		 */
695 		if (subvenid && (subvenid == dca_devices[i].dd_vendor_id) &&
696 		    subsysid && (subsysid == dca_devices[i].dd_device_id)) {
697 			dca->dca_model = dca_devices[i].dd_model;
698 			dca->dca_devid = dca_devices[i].dd_device_id;
699 			break;
700 		}
701 		/*
702 		 * Failing that, try the generic vendor and device id.
703 		 * Even if we find a match, we keep searching anyway,
704 		 * since we would prefer to find a match based on the
705 		 * subsystem ids.
706 		 */
707 		if ((venid == dca_devices[i].dd_vendor_id) &&
708 		    (devid == dca_devices[i].dd_device_id)) {
709 			dca->dca_model = dca_devices[i].dd_model;
710 			dca->dca_devid = dca_devices[i].dd_device_id;
711 		}
712 	}
713 	/* try and handle an unrecognized device */
714 	if (dca->dca_model == NULL) {
715 		dca->dca_model = unknowndev;
716 		dca_error(dca, "device not recognized, not supported");
717 		DBG(dca, DPCI, "i=%d venid=%x devid=%x rev=%d",
718 		    i, venid, devid, revid);
719 	}
720 
721 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "description",
722 	    dca->dca_model) != DDI_SUCCESS) {
723 		dca_error(dca, "unable to create description property");
724 		return (DDI_FAILURE);
725 	}
726 
727 	DBG(dca, DPCI, "PCI command=0x%x status=%x cachelinesz=%x",
728 	    pcicomm, pcistat, cachelinesz);
729 	DBG(dca, DPCI, "mingnt=0x%x maxlat=0x%x lattmr=0x%x",
730 	    mingnt, maxlat, lattmr);
731 
732 	/*
733 	 * initialize locks, etc.
734 	 */
735 	(void) mutex_init(&dca->dca_intrlock, NULL, MUTEX_DRIVER, ibc);
736 
737 	/* use RNGSHA1 by default */
738 	if (ddi_getprop(DDI_DEV_T_ANY, dip,
739 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rngdirect", 0) == 0) {
740 		dca->dca_flags |= DCA_RNGSHA1;
741 	}
742 
743 	/* initialize FMA */
744 	dca_fma_init(dca);
745 
746 	/* initialize some key data structures */
747 	if (dca_init(dca) != DDI_SUCCESS) {
748 		goto failed;
749 	}
750 
751 	/* initialize kstats */
752 	dca_ksinit(dca);
753 
754 	/* setup access to registers */
755 	if (ddi_regs_map_setup(dip, 1, (caddr_t *)&dca->dca_regs,
756 	    0, 0, &dca_regsattr, &dca->dca_regs_handle) != DDI_SUCCESS) {
757 		dca_error(dca, "unable to map registers");
758 		goto failed;
759 	}
760 
761 	DBG(dca, DCHATTY, "MCR1 = %x", GETCSR(dca, CSR_MCR1));
762 	DBG(dca, DCHATTY, "CONTROL = %x", GETCSR(dca, CSR_DMACTL));
763 	DBG(dca, DCHATTY, "STATUS = %x", GETCSR(dca, CSR_DMASTAT));
764 	DBG(dca, DCHATTY, "DMAEA = %x", GETCSR(dca, CSR_DMAEA));
765 	DBG(dca, DCHATTY, "MCR2 = %x", GETCSR(dca, CSR_MCR2));
766 
767 	/* reset the chip */
768 	if (dca_reset(dca, 0) < 0) {
769 		goto failed;
770 	}
771 
772 	/* initialize the chip */
773 	PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
774 	if (dca_check_acc_handle(dca, dca->dca_regs_handle,
775 	    DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
776 		goto failed;
777 	}
778 
779 	/* add the interrupt */
780 	if (ddi_add_intr(dip, 0, &dca->dca_icookie, NULL, dca_intr,
781 	    (void *)dca) != DDI_SUCCESS) {
782 		DBG(dca, DWARN, "ddi_add_intr failed");
783 		goto failed;
784 	} else {
785 		intr_added = 1;
786 	}
787 
788 	/* enable interrupts on the device */
789 	/*
790 	 * XXX: Note, 5820A1 errata indicates that this may clobber
791 	 * bits 24 and 23, which affect the speed of the RNG.  Since
792 	 * we always want to run in full-speed mode, this should be
793 	 * harmless.
794 	 */
795 	if (dca->dca_devid == 0x5825) {
796 		/* for 5825 - increase the DMA read size */
797 		SETBIT(dca, CSR_DMACTL,
798 		    DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
799 	} else {
800 		SETBIT(dca, CSR_DMACTL,
801 		    DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
802 	}
803 	if (dca_check_acc_handle(dca, dca->dca_regs_handle,
804 	    DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
805 		goto failed;
806 	}
807 
808 	/* register MCR1 with the crypto framework */
809 	/* Be careful not to exceed 32 chars */
810 	(void) sprintf(ID, "%s/%d %s",
811 	    ddi_driver_name(dip), ddi_get_instance(dip), IDENT_SYM);
812 	dca_prov_info1.pi_provider_description = ID;
813 	dca_prov_info1.pi_provider_dev.pd_hw = dip;
814 	dca_prov_info1.pi_provider_handle = dca;
815 	if ((ret = crypto_register_provider(&dca_prov_info1,
816 	    &WORKLIST(dca, MCR1)->dwl_prov)) != CRYPTO_SUCCESS) {
817 		cmn_err(CE_WARN,
818 		    "crypto_register_provider() failed (%d) for MCR1", ret);
819 		goto failed;
820 	}
821 
822 	/* register MCR2 with the crypto framework */
823 	/* Be careful not to exceed 32 chars */
824 	(void) sprintf(ID, "%s/%d %s",
825 	    ddi_driver_name(dip), ddi_get_instance(dip), IDENT_ASYM);
826 	dca_prov_info2.pi_provider_description = ID;
827 	dca_prov_info2.pi_provider_dev.pd_hw = dip;
828 	dca_prov_info2.pi_provider_handle = dca;
829 	if ((ret = crypto_register_provider(&dca_prov_info2,
830 	    &WORKLIST(dca, MCR2)->dwl_prov)) != CRYPTO_SUCCESS) {
831 		cmn_err(CE_WARN,
832 		    "crypto_register_provider() failed (%d) for MCR2", ret);
833 		goto failed;
834 	}
835 
836 	crypto_prov_notify(WORKLIST(dca, MCR1)->dwl_prov,
837 		CRYPTO_PROVIDER_READY);
838 	crypto_prov_notify(WORKLIST(dca, MCR2)->dwl_prov,
839 		CRYPTO_PROVIDER_READY);
840 
841 	/* Initialize the local random number pool for this instance */
842 	if ((ret = dca_random_init(dca)) != CRYPTO_SUCCESS) {
843 		goto failed;
844 	}
845 
846 	mutex_enter(&dca->dca_intrlock);
847 	dca->dca_jobtid = timeout(dca_jobtimeout, (void *)dca,
848 	    drv_usectohz(SECOND));
849 	mutex_exit(&dca->dca_intrlock);
850 
851 	ddi_set_driver_private(dip, (caddr_t)dca);
852 
853 	ddi_report_dev(dip);
854 
855 	if (ddi_get_devstate(dca->dca_dip) != DDI_DEVSTATE_UP) {
856 		ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_RESTORED);
857 	}
858 
859 	return (DDI_SUCCESS);
860 
861 failed:
862 	/* unregister from the crypto framework */
863 	if (WORKLIST(dca, MCR1)->dwl_prov != NULL) {
864 	    (void) crypto_unregister_provider(WORKLIST(dca, MCR1)->dwl_prov);
865 	}
866 	if (WORKLIST(dca, MCR2)->dwl_prov != NULL) {
867 	    (void) crypto_unregister_provider(WORKLIST(dca, MCR2)->dwl_prov);
868 	}
869 	if (intr_added) {
870 		CLRBIT(dca, CSR_DMACTL,
871 		    DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
872 		/* unregister intr handler */
873 		ddi_remove_intr(dip, 0, dca->dca_icookie);
874 	}
875 	if (dca->dca_regs_handle) {
876 		ddi_regs_map_free(&dca->dca_regs_handle);
877 	}
878 	if (dca->dca_intrstats) {
879 		kstat_delete(dca->dca_intrstats);
880 	}
881 	if (dca->dca_ksp) {
882 		kstat_delete(dca->dca_ksp);
883 	}
884 	dca_uninit(dca);
885 
886 	/* finalize FMA */
887 	dca_fma_fini(dca);
888 
889 	mutex_destroy(&dca->dca_intrlock);
890 	ddi_soft_state_free(dca_state, instance);
891 	return (DDI_FAILURE);
892 
893 }
894 
895 int
896 dca_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
897 {
898 	int		instance;
899 	dca_t		*dca;
900 	timeout_id_t	tid;
901 
902 	instance = ddi_get_instance(dip);
903 
904 	DBG(NULL, DMOD, "dca: in dca_detach() for %d", instance);
905 
906 	switch (cmd) {
907 	case DDI_SUSPEND:
908 		if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
909 			dca_diperror(dip, "no soft state in detach");
910 			return (DDI_FAILURE);
911 		}
912 		/* assumption: we won't be DDI_DETACHed until we return */
913 		return (dca_suspend(dca));
914 
915 	case DDI_DETACH:
916 		break;
917 	default:
918 		return (DDI_FAILURE);
919 	}
920 
921 	if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
922 		dca_diperror(dip, "no soft state in detach");
923 		return (DDI_FAILURE);
924 	}
925 
926 	/*
927 	 * Unregister from kCF.
928 	 * This needs to be done at the beginning of detach.
929 	 */
930 	if (WORKLIST(dca, MCR1)->dwl_prov != NULL) {
931 	    if (crypto_unregister_provider(WORKLIST(dca, MCR1)->dwl_prov) !=
932 		CRYPTO_SUCCESS) {
933 		    dca_error(dca, "unable to unregister MCR1 from kcf");
934 		    return (DDI_FAILURE);
935 	    }
936 	}
937 
938 	if (WORKLIST(dca, MCR2)->dwl_prov != NULL) {
939 	    if (crypto_unregister_provider(WORKLIST(dca, MCR2)->dwl_prov) !=
940 		CRYPTO_SUCCESS) {
941 		    dca_error(dca, "unable to unregister MCR2 from kcf");
942 		    return (DDI_FAILURE);
943 	    }
944 	}
945 
946 	/*
947 	 * Cleanup the private context list. Once the
948 	 * crypto_unregister_provider returns, it is safe to do so.
949 	 */
950 	dca_free_context_list(dca);
951 
952 	/* Cleanup the local random number pool */
953 	dca_random_fini(dca);
954 
955 	/* send any jobs in the waitq back to kCF */
956 	dca_rejectjobs(dca);
957 
958 	/* untimeout the timeouts */
959 	mutex_enter(&dca->dca_intrlock);
960 	tid = dca->dca_jobtid;
961 	dca->dca_jobtid = 0;
962 	mutex_exit(&dca->dca_intrlock);
963 	if (tid) {
964 		(void) untimeout(tid);
965 	}
966 
967 	/* disable device interrupts */
968 	CLRBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
969 
970 	/* unregister interrupt handlers */
971 	ddi_remove_intr(dip, 0, dca->dca_icookie);
972 
973 	/* release our regs handle */
974 	ddi_regs_map_free(&dca->dca_regs_handle);
975 
976 	/* toss out kstats */
977 	if (dca->dca_intrstats) {
978 		kstat_delete(dca->dca_intrstats);
979 	}
980 	if (dca->dca_ksp) {
981 		kstat_delete(dca->dca_ksp);
982 	}
983 
984 	mutex_destroy(&dca->dca_intrlock);
985 	dca_uninit(dca);
986 
987 	/* finalize FMA */
988 	dca_fma_fini(dca);
989 
990 	ddi_soft_state_free(dca_state, instance);
991 
992 	return (DDI_SUCCESS);
993 }
994 
995 int
996 dca_resume(dca_t *dca)
997 {
998 	ddi_acc_handle_t	pci;
999 
1000 	if (pci_config_setup(dca->dca_dip, &pci) != DDI_SUCCESS) {
1001 		dca_error(dca, "unable to setup PCI config handle");
1002 		return (DDI_FAILURE);
1003 	}
1004 
1005 	/*
1006 	 * Reprogram registers in PCI configuration space.
1007 	 */
1008 
1009 	/* Broadcom-specific timers -- we disable them. */
1010 	pci_config_put8(pci, PCI_TRDYTO, 0);
1011 	pci_config_put8(pci, PCI_RETRIES, 0);
1012 
1013 	/* initialize PCI access settings */
1014 	pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
1015 	    PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
1016 
1017 	/* set up our PCI latency timer */
1018 	pci_config_put8(pci, PCI_LATTMR, 0x40);
1019 
1020 	pci_config_teardown(&pci);
1021 
1022 	if (dca_reset(dca, 0) < 0) {
1023 		dca_error(dca, "unable to reset device during resume");
1024 		return (DDI_FAILURE);
1025 	}
1026 
1027 	/*
1028 	 * Now restore the card-specific CSRs.
1029 	 */
1030 
1031 	/* restore endianness settings */
1032 	PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
1033 	if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1034 	    DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1035 		return (DDI_FAILURE);
1036 
1037 	/* restore interrupt enables */
1038 	if (dca->dca_devid == 0x5825) {
1039 		/* for 5825 set 256 byte read size to improve performance */
1040 		SETBIT(dca, CSR_DMACTL,
1041 		    DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
1042 	} else {
1043 		SETBIT(dca, CSR_DMACTL,
1044 		    DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
1045 	}
1046 	if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1047 	    DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1048 		return (DDI_FAILURE);
1049 
1050 	/* resume scheduling jobs on the device */
1051 	dca_undrain(dca);
1052 
1053 	return (DDI_SUCCESS);
1054 }
1055 
1056 int
1057 dca_suspend(dca_t *dca)
1058 {
1059 	if ((dca_drain(dca)) != 0) {
1060 		return (DDI_FAILURE);
1061 	}
1062 	if (dca_reset(dca, 0) < 0) {
1063 		dca_error(dca, "unable to reset device during suspend");
1064 		return (DDI_FAILURE);
1065 	}
1066 	return (DDI_SUCCESS);
1067 }
1068 
1069 /*
1070  * Hardware access stuff.
1071  */
1072 int
1073 dca_reset(dca_t *dca, int failreset)
1074 {
1075 	int i;
1076 
1077 	if (dca->dca_regs_handle == NULL) {
1078 		return (-1);
1079 	}
1080 
1081 	PUTCSR(dca, CSR_DMACTL, DMACTL_RESET);
1082 	if (!failreset) {
1083 		if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1084 		    DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1085 			return (-1);
1086 	}
1087 
1088 	/* now wait for a reset */
1089 	for (i = 1; i < 100; i++) {
1090 		uint32_t	dmactl;
1091 		drv_usecwait(100);
1092 		dmactl = GETCSR(dca, CSR_DMACTL);
1093 		if (!failreset) {
1094 			if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1095 			    DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1096 				return (-1);
1097 		}
1098 		if ((dmactl & DMACTL_RESET) == 0) {
1099 			DBG(dca, DCHATTY, "reset in %d usec", i * 100);
1100 			return (0);
1101 		}
1102 	}
1103 	if (!failreset) {
1104 		dca_failure(dca, DDI_DEVICE_FAULT,
1105 		    DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1106 		    "timeout waiting for reset after %d usec", i * 100);
1107 	}
1108 	return (-1);
1109 }
1110 
1111 int
1112 dca_initworklist(dca_t *dca, dca_worklist_t *wlp)
1113 {
1114 	int	i;
1115 	int	reqprealloc = wlp->dwl_hiwater + (MAXWORK * MAXREQSPERMCR);
1116 
1117 	/*
1118 	 * Set up work queue.
1119 	 */
1120 	mutex_init(&wlp->dwl_lock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1121 	mutex_init(&wlp->dwl_freereqslock, NULL, MUTEX_DRIVER,
1122 	    dca->dca_icookie);
1123 	mutex_init(&wlp->dwl_freelock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1124 	cv_init(&wlp->dwl_cv, NULL, CV_DRIVER, NULL);
1125 
1126 	mutex_enter(&wlp->dwl_lock);
1127 
1128 	dca_initq(&wlp->dwl_freereqs);
1129 	dca_initq(&wlp->dwl_waitq);
1130 	dca_initq(&wlp->dwl_freework);
1131 	dca_initq(&wlp->dwl_runq);
1132 
1133 	for (i = 0; i < MAXWORK; i++) {
1134 		dca_work_t		*workp;
1135 
1136 		if ((workp = dca_newwork(dca)) == NULL) {
1137 			dca_error(dca, "unable to allocate work");
1138 			mutex_exit(&wlp->dwl_lock);
1139 			return (DDI_FAILURE);
1140 		}
1141 		workp->dw_wlp = wlp;
1142 		dca_freework(workp);
1143 	}
1144 	mutex_exit(&wlp->dwl_lock);
1145 
1146 	for (i = 0; i < reqprealloc; i++) {
1147 		dca_request_t *reqp;
1148 
1149 		if ((reqp = dca_newreq(dca)) == NULL) {
1150 			dca_error(dca, "unable to allocate request");
1151 			return (DDI_FAILURE);
1152 		}
1153 		reqp->dr_dca = dca;
1154 		reqp->dr_wlp = wlp;
1155 		dca_freereq(reqp);
1156 	}
1157 	return (DDI_SUCCESS);
1158 }
1159 
1160 int
1161 dca_init(dca_t *dca)
1162 {
1163 	dca_worklist_t		*wlp;
1164 
1165 	/* Initialize the private context list and the corresponding lock. */
1166 	mutex_init(&dca->dca_ctx_list_lock, NULL, MUTEX_DRIVER, NULL);
1167 	dca_initq(&dca->dca_ctx_list);
1168 
1169 	/*
1170 	 * MCR1 algorithms.
1171 	 */
1172 	wlp = WORKLIST(dca, MCR1);
1173 	(void) sprintf(wlp->dwl_name, "dca%d:mcr1",
1174 		ddi_get_instance(dca->dca_dip));
1175 	wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1176 	    dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1177 	    "mcr1_lowater", MCR1LOWATER);
1178 	wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1179 	    dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1180 	    "mcr1_hiwater", MCR1HIWATER);
1181 	wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1182 	    dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1183 	    "mcr1_maxreqs", MCR1MAXREQS), MAXREQSPERMCR);
1184 	wlp->dwl_dca = dca;
1185 	wlp->dwl_mcr = MCR1;
1186 	if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1187 		return (DDI_FAILURE);
1188 	}
1189 
1190 	/*
1191 	 * MCR2 algorithms.
1192 	 */
1193 	wlp = WORKLIST(dca, MCR2);
1194 	(void) sprintf(wlp->dwl_name, "dca%d:mcr2",
1195 		ddi_get_instance(dca->dca_dip));
1196 	wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1197 	    dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1198 	    "mcr2_lowater", MCR2LOWATER);
1199 	wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1200 	    dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1201 	    "mcr2_hiwater", MCR2HIWATER);
1202 	wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1203 	    dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1204 	    "mcr2_maxreqs", MCR2MAXREQS), MAXREQSPERMCR);
1205 	wlp->dwl_dca = dca;
1206 	wlp->dwl_mcr = MCR2;
1207 	if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1208 		return (DDI_FAILURE);
1209 	}
1210 	return (DDI_SUCCESS);
1211 }
1212 
1213 /*
1214  * Uninitialize worklists.  This routine should only be called when no
1215  * active jobs (hence DMA mappings) exist.  One way to ensure this is
1216  * to unregister from kCF before calling this routine.  (This is done
1217  * e.g. in detach(9e).)
1218  */
1219 void
1220 dca_uninit(dca_t *dca)
1221 {
1222 	int	mcr;
1223 
1224 	mutex_destroy(&dca->dca_ctx_list_lock);
1225 
1226 	for (mcr = MCR1; mcr <= MCR2; mcr++) {
1227 		dca_worklist_t	*wlp = WORKLIST(dca, mcr);
1228 		dca_work_t	*workp;
1229 		dca_request_t	*reqp;
1230 
1231 		if (dca->dca_regs_handle == NULL) {
1232 			continue;
1233 		}
1234 
1235 		mutex_enter(&wlp->dwl_lock);
1236 		while ((workp = dca_getwork(dca, mcr)) != NULL) {
1237 			dca_destroywork(workp);
1238 		}
1239 		mutex_exit(&wlp->dwl_lock);
1240 		while ((reqp = dca_getreq(dca, mcr, 0)) != NULL) {
1241 			dca_destroyreq(reqp);
1242 		}
1243 
1244 		mutex_destroy(&wlp->dwl_lock);
1245 		mutex_destroy(&wlp->dwl_freereqslock);
1246 		mutex_destroy(&wlp->dwl_freelock);
1247 		cv_destroy(&wlp->dwl_cv);
1248 		wlp->dwl_prov = NULL;
1249 	}
1250 }
1251 
1252 static void
1253 dca_enlist2(dca_listnode_t *q, dca_listnode_t *node, kmutex_t *lock)
1254 {
1255 	if (!q || !node)
1256 		return;
1257 
1258 	mutex_enter(lock);
1259 	node->dl_next2 = q;
1260 	node->dl_prev2 = q->dl_prev2;
1261 	node->dl_next2->dl_prev2 = node;
1262 	node->dl_prev2->dl_next2 = node;
1263 	mutex_exit(lock);
1264 }
1265 
1266 static void
1267 dca_rmlist2(dca_listnode_t *node, kmutex_t *lock)
1268 {
1269 	if (!node)
1270 		return;
1271 
1272 	mutex_enter(lock);
1273 	node->dl_next2->dl_prev2 = node->dl_prev2;
1274 	node->dl_prev2->dl_next2 = node->dl_next2;
1275 	node->dl_next2 = NULL;
1276 	node->dl_prev2 = NULL;
1277 	mutex_exit(lock);
1278 }
1279 
1280 static dca_listnode_t *
1281 dca_delist2(dca_listnode_t *q, kmutex_t *lock)
1282 {
1283 	dca_listnode_t *node;
1284 
1285 	mutex_enter(lock);
1286 	if ((node = q->dl_next2) == q) {
1287 		mutex_exit(lock);
1288 		return (NULL);
1289 	}
1290 
1291 	node->dl_next2->dl_prev2 = node->dl_prev2;
1292 	node->dl_prev2->dl_next2 = node->dl_next2;
1293 	node->dl_next2 = NULL;
1294 	node->dl_prev2 = NULL;
1295 	mutex_exit(lock);
1296 
1297 	return (node);
1298 }
1299 
1300 void
1301 dca_initq(dca_listnode_t *q)
1302 {
1303 	q->dl_next = q;
1304 	q->dl_prev = q;
1305 	q->dl_next2 = q;
1306 	q->dl_prev2 = q;
1307 }
1308 
1309 void
1310 dca_enqueue(dca_listnode_t *q, dca_listnode_t *node)
1311 {
1312 	/*
1313 	 * Enqueue submits at the "tail" of the list, i.e. just
1314 	 * behind the sentinel.
1315 	 */
1316 	node->dl_next = q;
1317 	node->dl_prev = q->dl_prev;
1318 	node->dl_next->dl_prev = node;
1319 	node->dl_prev->dl_next = node;
1320 }
1321 
1322 void
1323 dca_rmqueue(dca_listnode_t *node)
1324 {
1325 	node->dl_next->dl_prev = node->dl_prev;
1326 	node->dl_prev->dl_next = node->dl_next;
1327 	node->dl_next = NULL;
1328 	node->dl_prev = NULL;
1329 }
1330 
1331 dca_listnode_t *
1332 dca_dequeue(dca_listnode_t *q)
1333 {
1334 	dca_listnode_t *node;
1335 	/*
1336 	 * Dequeue takes from the "head" of the list, i.e. just after
1337 	 * the sentinel.
1338 	 */
1339 	if ((node = q->dl_next) == q) {
1340 		/* queue is empty */
1341 		return (NULL);
1342 	}
1343 	dca_rmqueue(node);
1344 	return (node);
1345 }
1346 
1347 /* this is the opposite of dequeue, it takes things off in LIFO order */
1348 dca_listnode_t *
1349 dca_unqueue(dca_listnode_t *q)
1350 {
1351 	dca_listnode_t *node;
1352 	/*
1353 	 * unqueue takes from the "tail" of the list, i.e. just before
1354 	 * the sentinel.
1355 	 */
1356 	if ((node = q->dl_prev) == q) {;
1357 		/* queue is empty */
1358 		return (NULL);
1359 	}
1360 	dca_rmqueue(node);
1361 	return (node);
1362 }
1363 
1364 dca_listnode_t *
1365 dca_peekqueue(dca_listnode_t *q)
1366 {
1367 	dca_listnode_t *node;
1368 
1369 	if ((node = q->dl_next) == q) {
1370 		return (NULL);
1371 	} else {
1372 		return (node);
1373 	}
1374 }
1375 
1376 /*
1377  * Interrupt service routine.
1378  */
1379 uint_t
1380 dca_intr(char *arg)
1381 {
1382 	dca_t		*dca = (dca_t *)arg;
1383 	uint32_t	status;
1384 
1385 	mutex_enter(&dca->dca_intrlock);
1386 	status = GETCSR(dca, CSR_DMASTAT);
1387 	PUTCSR(dca, CSR_DMASTAT, status & DMASTAT_INTERRUPTS);
1388 	if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1389 	    DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
1390 		mutex_exit(&dca->dca_intrlock);
1391 		return ((uint_t)DDI_FAILURE);
1392 	}
1393 
1394 	DBG(dca, DINTR, "interrupted, status = 0x%x!", status);
1395 
1396 	if ((status & DMASTAT_INTERRUPTS) == 0) {
1397 		/* increment spurious interrupt kstat */
1398 		if (dca->dca_intrstats) {
1399 			KIOIP(dca)->intrs[KSTAT_INTR_SPURIOUS]++;
1400 		}
1401 		mutex_exit(&dca->dca_intrlock);
1402 		return (DDI_INTR_UNCLAIMED);
1403 	}
1404 
1405 	if (dca->dca_intrstats) {
1406 		KIOIP(dca)->intrs[KSTAT_INTR_HARD]++;
1407 	}
1408 	if (status & DMASTAT_MCR1INT) {
1409 		DBG(dca, DINTR, "MCR1 interrupted");
1410 		mutex_enter(&(WORKLIST(dca, MCR1)->dwl_lock));
1411 		dca_schedule(dca, MCR1);
1412 		dca_reclaim(dca, MCR1);
1413 		mutex_exit(&(WORKLIST(dca, MCR1)->dwl_lock));
1414 	}
1415 
1416 	if (status & DMASTAT_MCR2INT) {
1417 		DBG(dca, DINTR, "MCR2 interrupted");
1418 		mutex_enter(&(WORKLIST(dca, MCR2)->dwl_lock));
1419 		dca_schedule(dca, MCR2);
1420 		dca_reclaim(dca, MCR2);
1421 		mutex_exit(&(WORKLIST(dca, MCR2)->dwl_lock));
1422 	}
1423 
1424 	if (status & DMASTAT_ERRINT) {
1425 		uint32_t	erraddr;
1426 		erraddr = GETCSR(dca, CSR_DMAEA);
1427 		mutex_exit(&dca->dca_intrlock);
1428 
1429 		/*
1430 		 * bit 1 of the error address indicates failure during
1431 		 * read if set, during write otherwise.
1432 		 */
1433 		dca_failure(dca, DDI_DEVICE_FAULT,
1434 		    DCA_FM_ECLASS_HW_DEVICE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1435 		    "DMA master access error %s address 0x%x",
1436 		    erraddr & 0x1 ? "reading" : "writing", erraddr & ~1);
1437 		return (DDI_INTR_CLAIMED);
1438 	}
1439 
1440 	mutex_exit(&dca->dca_intrlock);
1441 
1442 	return (DDI_INTR_CLAIMED);
1443 }
1444 
1445 /*
1446  * Reverse a string of bytes from s1 into s2.  The reversal happens
1447  * from the tail of s1.  If len1 < len2, then null bytes will be
1448  * padded to the end of s2.  If len2 < len1, then (presumably null)
1449  * bytes will be dropped from the start of s1.
1450  *
1451  * The rationale here is that when s1 (source) is shorter, then we
1452  * are reversing from big-endian ordering, into device ordering, and
1453  * want to add some extra nulls to the tail (MSB) side of the device.
1454  *
1455  * Similarly, when s2 (dest) is shorter, then we are truncating what
1456  * are presumably null MSB bits from the device.
1457  *
1458  * There is an expectation when reversing from the device back into
1459  * big-endian, that the number of bytes to reverse and the target size
1460  * will match, and no truncation or padding occurs.
1461  */
1462 void
1463 dca_reverse(void *s1, void *s2, int len1, int len2)
1464 {
1465 	caddr_t	src, dst;
1466 
1467 	if (len1 == 0) {
1468 		if (len2) {
1469 			bzero(s2, len2);
1470 		}
1471 		return;
1472 	}
1473 	src = (caddr_t)s1 + len1 - 1;
1474 	dst = s2;
1475 	while ((src >= (caddr_t)s1) && (len2)) {
1476 		*dst++ = *src--;
1477 		len2--;
1478 	}
1479 	while (len2 > 0) {
1480 		*dst++ = 0;
1481 		len2--;
1482 	}
1483 }
1484 
1485 uint16_t
1486 dca_padfull(int num)
1487 {
1488 	if (num <= 512) {
1489 		return (BITS2BYTES(512));
1490 	}
1491 	if (num <= 768) {
1492 		return (BITS2BYTES(768));
1493 	}
1494 	if (num <= 1024) {
1495 		return (BITS2BYTES(1024));
1496 	}
1497 	if (num <= 1536) {
1498 		return (BITS2BYTES(1536));
1499 	}
1500 	if (num <= 2048) {
1501 		return (BITS2BYTES(2048));
1502 	}
1503 	return (0);
1504 }
1505 
1506 uint16_t
1507 dca_padhalf(int num)
1508 {
1509 	if (num <= 256) {
1510 		return (BITS2BYTES(256));
1511 	}
1512 	if (num <= 384) {
1513 		return (BITS2BYTES(384));
1514 	}
1515 	if (num <= 512) {
1516 		return (BITS2BYTES(512));
1517 	}
1518 	if (num <= 768) {
1519 		return (BITS2BYTES(768));
1520 	}
1521 	if (num <= 1024) {
1522 		return (BITS2BYTES(1024));
1523 	}
1524 	return (0);
1525 }
1526 
1527 dca_work_t *
1528 dca_newwork(dca_t *dca)
1529 {
1530 	dca_work_t		*workp;
1531 	size_t			size;
1532 	ddi_dma_cookie_t	c;
1533 	unsigned		nc;
1534 	int			rv;
1535 
1536 	workp = kmem_zalloc(sizeof (dca_work_t), KM_SLEEP);
1537 
1538 	rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1539 	    DDI_DMA_SLEEP, NULL, &workp->dw_mcr_dmah);
1540 	if (rv != 0) {
1541 		dca_error(dca, "unable to alloc MCR DMA handle");
1542 		dca_destroywork(workp);
1543 		return (NULL);
1544 	}
1545 
1546 	rv = ddi_dma_mem_alloc(workp->dw_mcr_dmah,
1547 	    ROUNDUP(MCR_SIZE, dca->dca_pagesize),
1548 	    &dca_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1549 	    &workp->dw_mcr_kaddr, &size, &workp->dw_mcr_acch);
1550 	if (rv != 0) {
1551 		dca_error(dca, "unable to alloc MCR DMA memory");
1552 		dca_destroywork(workp);
1553 		return (NULL);
1554 	}
1555 
1556 	rv = ddi_dma_addr_bind_handle(workp->dw_mcr_dmah, NULL,
1557 	    workp->dw_mcr_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR,
1558 	    DDI_DMA_SLEEP, NULL, &c, &nc);
1559 	if (rv != DDI_DMA_MAPPED) {
1560 		dca_error(dca, "unable to map MCR DMA memory");
1561 		dca_destroywork(workp);
1562 		return (NULL);
1563 	}
1564 
1565 	workp->dw_mcr_paddr = c.dmac_address;
1566 	return (workp);
1567 }
1568 
1569 void
1570 dca_destroywork(dca_work_t *workp)
1571 {
1572 	if (workp->dw_mcr_paddr) {
1573 		(void) ddi_dma_unbind_handle(workp->dw_mcr_dmah);
1574 	}
1575 	if (workp->dw_mcr_acch) {
1576 		ddi_dma_mem_free(&workp->dw_mcr_acch);
1577 	}
1578 	if (workp->dw_mcr_dmah) {
1579 		ddi_dma_free_handle(&workp->dw_mcr_dmah);
1580 	}
1581 	kmem_free(workp, sizeof (dca_work_t));
1582 }
1583 
1584 dca_request_t *
1585 dca_newreq(dca_t *dca)
1586 {
1587 	dca_request_t		*reqp;
1588 	size_t			size;
1589 	ddi_dma_cookie_t	c;
1590 	unsigned		nc;
1591 	int			rv;
1592 	int			n_chain = 0;
1593 
1594 	size = (DESC_SIZE * MAXFRAGS) + CTX_MAXLENGTH;
1595 
1596 	reqp = kmem_zalloc(sizeof (dca_request_t), KM_SLEEP);
1597 
1598 	reqp->dr_dca = dca;
1599 
1600 	/*
1601 	 * Setup the DMA region for the context and descriptors.
1602 	 */
1603 	rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, DDI_DMA_SLEEP,
1604 	    NULL, &reqp->dr_ctx_dmah);
1605 	if (rv != DDI_SUCCESS) {
1606 		dca_error(dca, "failure allocating request DMA handle");
1607 		dca_destroyreq(reqp);
1608 		return (NULL);
1609 	}
1610 
1611 	/* for driver hardening, allocate in whole pages */
1612 	rv = ddi_dma_mem_alloc(reqp->dr_ctx_dmah,
1613 	    ROUNDUP(size, dca->dca_pagesize), &dca_devattr, DDI_DMA_CONSISTENT,
1614 	    DDI_DMA_SLEEP, NULL, &reqp->dr_ctx_kaddr, &size,
1615 	    &reqp->dr_ctx_acch);
1616 	if (rv != DDI_SUCCESS) {
1617 		dca_error(dca, "unable to alloc request DMA memory");
1618 		dca_destroyreq(reqp);
1619 		return (NULL);
1620 	}
1621 
1622 	rv = ddi_dma_addr_bind_handle(reqp->dr_ctx_dmah, NULL,
1623 	    reqp->dr_ctx_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_WRITE,
1624 	    DDI_DMA_SLEEP, 0, &c, &nc);
1625 	if (rv != DDI_DMA_MAPPED) {
1626 		dca_error(dca, "failed binding request DMA handle");
1627 		dca_destroyreq(reqp);
1628 		return (NULL);
1629 	}
1630 	reqp->dr_ctx_paddr = c.dmac_address;
1631 
1632 	reqp->dr_dma_size = size;
1633 
1634 	/*
1635 	 * Set up the dma for our scratch/shared buffers.
1636 	 */
1637 	rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1638 	    DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_dmah);
1639 	if (rv != DDI_SUCCESS) {
1640 		dca_error(dca, "failure allocating ibuf DMA handle");
1641 		dca_destroyreq(reqp);
1642 		return (NULL);
1643 	}
1644 	rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1645 	    DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_dmah);
1646 	if (rv != DDI_SUCCESS) {
1647 		dca_error(dca, "failure allocating obuf DMA handle");
1648 		dca_destroyreq(reqp);
1649 		return (NULL);
1650 	}
1651 
1652 	rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1653 	    DDI_DMA_SLEEP, NULL, &reqp->dr_chain_in_dmah);
1654 	if (rv != DDI_SUCCESS) {
1655 		dca_error(dca, "failure allocating chain_in DMA handle");
1656 		dca_destroyreq(reqp);
1657 		return (NULL);
1658 	}
1659 
1660 	rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1661 	    DDI_DMA_SLEEP, NULL, &reqp->dr_chain_out_dmah);
1662 	if (rv != DDI_SUCCESS) {
1663 		dca_error(dca, "failure allocating chain_out DMA handle");
1664 		dca_destroyreq(reqp);
1665 		return (NULL);
1666 	}
1667 
1668 	/*
1669 	 * for driver hardening, allocate in whole pages.
1670 	 */
1671 	size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
1672 #if defined(i386) || defined(__i386)
1673 	/*
1674 	 * Use kmem_alloc instead of ddi_dma_mem_alloc here since the latter
1675 	 * may fail on x86 platform if a physically contigous memory chunk
1676 	 * cannot be found. From initial testing, we did not see performance
1677 	 * degration as seen on Sparc.
1678 	 */
1679 	if ((reqp->dr_ibuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) {
1680 		dca_error(dca, "unable to alloc request ibuf memory");
1681 		dca_destroyreq(reqp);
1682 		return (NULL);
1683 	}
1684 	if ((reqp->dr_obuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) {
1685 		dca_error(dca, "unable to alloc request obuf memory");
1686 		dca_destroyreq(reqp);
1687 		return (NULL);
1688 	}
1689 #else
1690 	/*
1691 	 * We could kmem_alloc for sparc too. However, it gives worse
1692 	 * performance when transfering more than one page data. For example,
1693 	 * using 4 threads and 12032 byte data and 3DES on 900MHZ sparc system,
1694 	 * kmem_alloc uses 80% CPU and ddi_dma_mem_alloc uses 50% CPU for
1695 	 * the same throughput.
1696 	 */
1697 	rv = ddi_dma_mem_alloc(reqp->dr_ibuf_dmah,
1698 	    size, &dca_bufattr,
1699 	    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_kaddr,
1700 	    &size, &reqp->dr_ibuf_acch);
1701 	if (rv != DDI_SUCCESS) {
1702 		dca_error(dca, "unable to alloc request DMA memory");
1703 		dca_destroyreq(reqp);
1704 		return (NULL);
1705 	}
1706 
1707 	rv = ddi_dma_mem_alloc(reqp->dr_obuf_dmah,
1708 	    size, &dca_bufattr,
1709 	    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_kaddr,
1710 	    &size, &reqp->dr_obuf_acch);
1711 	if (rv != DDI_SUCCESS) {
1712 		dca_error(dca, "unable to alloc request DMA memory");
1713 		dca_destroyreq(reqp);
1714 		return (NULL);
1715 	}
1716 #endif
1717 
1718 	/* Skip the used portion in the context page */
1719 	reqp->dr_offset = CTX_MAXLENGTH;
1720 	if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1721 	    reqp->dr_ibuf_kaddr, reqp->dr_ibuf_dmah,
1722 	    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1723 	    &reqp->dr_ibuf_head, &n_chain)) != DDI_SUCCESS) {
1724 		(void) dca_destroyreq(reqp);
1725 		return (NULL);
1726 	}
1727 	reqp->dr_ibuf_paddr = reqp->dr_ibuf_head.dc_buffer_paddr;
1728 	/* Skip the space used by the input buffer */
1729 	reqp->dr_offset += DESC_SIZE * n_chain;
1730 
1731 	if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1732 	    reqp->dr_obuf_kaddr, reqp->dr_obuf_dmah,
1733 	    DDI_DMA_READ | DDI_DMA_STREAMING,
1734 	    &reqp->dr_obuf_head, &n_chain)) != DDI_SUCCESS) {
1735 		(void) dca_destroyreq(reqp);
1736 		return (NULL);
1737 	}
1738 	reqp->dr_obuf_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
1739 	/* Skip the space used by the output buffer */
1740 	reqp->dr_offset += DESC_SIZE * n_chain;
1741 
1742 	DBG(dca, DCHATTY, "CTX is 0x%p, phys 0x%x, len %d",
1743 	    reqp->dr_ctx_kaddr, reqp->dr_ctx_paddr, CTX_MAXLENGTH);
1744 	return (reqp);
1745 }
1746 
1747 void
1748 dca_destroyreq(dca_request_t *reqp)
1749 {
1750 #if defined(i386) || defined(__i386)
1751 	dca_t		*dca = reqp->dr_dca;
1752 	size_t		size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
1753 #endif
1754 
1755 	/*
1756 	 * Clean up DMA for the context structure.
1757 	 */
1758 	if (reqp->dr_ctx_paddr) {
1759 		(void) ddi_dma_unbind_handle(reqp->dr_ctx_dmah);
1760 	}
1761 
1762 	if (reqp->dr_ctx_acch) {
1763 		ddi_dma_mem_free(&reqp->dr_ctx_acch);
1764 	}
1765 
1766 	if (reqp->dr_ctx_dmah) {
1767 		ddi_dma_free_handle(&reqp->dr_ctx_dmah);
1768 	}
1769 
1770 	/*
1771 	 * Clean up DMA for the scratch buffer.
1772 	 */
1773 #if defined(i386) || defined(__i386)
1774 	if (reqp->dr_ibuf_dmah) {
1775 		(void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
1776 		ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
1777 	}
1778 	if (reqp->dr_obuf_dmah) {
1779 		(void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
1780 		ddi_dma_free_handle(&reqp->dr_obuf_dmah);
1781 	}
1782 
1783 	kmem_free(reqp->dr_ibuf_kaddr, size);
1784 	kmem_free(reqp->dr_obuf_kaddr, size);
1785 #else
1786 	if (reqp->dr_ibuf_paddr) {
1787 		(void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
1788 	}
1789 	if (reqp->dr_obuf_paddr) {
1790 		(void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
1791 	}
1792 
1793 	if (reqp->dr_ibuf_acch) {
1794 		ddi_dma_mem_free(&reqp->dr_ibuf_acch);
1795 	}
1796 	if (reqp->dr_obuf_acch) {
1797 		ddi_dma_mem_free(&reqp->dr_obuf_acch);
1798 	}
1799 
1800 	if (reqp->dr_ibuf_dmah) {
1801 		ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
1802 	}
1803 	if (reqp->dr_obuf_dmah) {
1804 		ddi_dma_free_handle(&reqp->dr_obuf_dmah);
1805 	}
1806 #endif
1807 	/*
1808 	 * These two DMA handles should have been unbinded in
1809 	 * dca_unbindchains() function
1810 	 */
1811 	if (reqp->dr_chain_in_dmah) {
1812 		ddi_dma_free_handle(&reqp->dr_chain_in_dmah);
1813 	}
1814 	if (reqp->dr_chain_out_dmah) {
1815 		ddi_dma_free_handle(&reqp->dr_chain_out_dmah);
1816 	}
1817 
1818 	kmem_free(reqp, sizeof (dca_request_t));
1819 }
1820 
1821 dca_work_t *
1822 dca_getwork(dca_t *dca, int mcr)
1823 {
1824 	dca_worklist_t	*wlp = WORKLIST(dca, mcr);
1825 	dca_work_t	*workp;
1826 
1827 	mutex_enter(&wlp->dwl_freelock);
1828 	workp = (dca_work_t *)dca_dequeue(&wlp->dwl_freework);
1829 	mutex_exit(&wlp->dwl_freelock);
1830 	if (workp) {
1831 		int	nreqs;
1832 		bzero(workp->dw_mcr_kaddr, 8);
1833 
1834 		/* clear out old requests */
1835 		for (nreqs = 0; nreqs < MAXREQSPERMCR; nreqs++) {
1836 			workp->dw_reqs[nreqs] = NULL;
1837 		}
1838 	}
1839 	return (workp);
1840 }
1841 
1842 void
1843 dca_freework(dca_work_t *workp)
1844 {
1845 	mutex_enter(&workp->dw_wlp->dwl_freelock);
1846 	dca_enqueue(&workp->dw_wlp->dwl_freework, (dca_listnode_t *)workp);
1847 	mutex_exit(&workp->dw_wlp->dwl_freelock);
1848 }
1849 
1850 dca_request_t *
1851 dca_getreq(dca_t *dca, int mcr, int tryhard)
1852 {
1853 	dca_worklist_t	*wlp = WORKLIST(dca, mcr);
1854 	dca_request_t	*reqp;
1855 
1856 	mutex_enter(&wlp->dwl_freereqslock);
1857 	reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_freereqs);
1858 	mutex_exit(&wlp->dwl_freereqslock);
1859 	if (reqp) {
1860 		reqp->dr_flags = 0;
1861 		reqp->dr_callback = NULL;
1862 	} else if (tryhard) {
1863 		/*
1864 		 * failed to get a free one, try an allocation, the hard way.
1865 		 * XXX: Kstat desired here.
1866 		 */
1867 		if ((reqp = dca_newreq(dca)) != NULL) {
1868 			reqp->dr_wlp = wlp;
1869 			reqp->dr_dca = dca;
1870 			reqp->dr_flags = 0;
1871 			reqp->dr_callback = NULL;
1872 		}
1873 	}
1874 	return (reqp);
1875 }
1876 
1877 void
1878 dca_freereq(dca_request_t *reqp)
1879 {
1880 	reqp->dr_kcf_req = NULL;
1881 	if (!(reqp->dr_flags & DR_NOCACHE)) {
1882 		mutex_enter(&reqp->dr_wlp->dwl_freereqslock);
1883 		dca_enqueue(&reqp->dr_wlp->dwl_freereqs,
1884 		    (dca_listnode_t *)reqp);
1885 		mutex_exit(&reqp->dr_wlp->dwl_freereqslock);
1886 	}
1887 }
1888 
1889 /*
1890  * Binds user buffers to DMA handles dynamically. On Sparc, a user buffer
1891  * is mapped to a single physicall address. On x86, a user buffer is mapped
1892  * to multiple physically addresses. These phsyical addresses are chained
1893  * using the method specified in Broadcom BCM5820 specification
1894  */
1895 int
1896 dca_bindchains(dca_request_t *reqp, size_t incnt, size_t outcnt)
1897 {
1898 	int			rv;
1899 	caddr_t			kaddr;
1900 	uint_t			flags;
1901 	int			n_chain = 0;
1902 
1903 	if (reqp->dr_flags & DR_INPLACE) {
1904 		flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
1905 	} else {
1906 		flags = DDI_DMA_WRITE | DDI_DMA_STREAMING;
1907 	}
1908 
1909 	/* first the input */
1910 	if (incnt) {
1911 		if ((kaddr = dca_bufdaddr(reqp->dr_in)) == NULL) {
1912 			DBG(NULL, DWARN, "unrecognised crypto data format");
1913 			return (DDI_FAILURE);
1914 		}
1915 		if ((rv = dca_bindchains_one(reqp, incnt, reqp->dr_offset,
1916 		    kaddr, reqp->dr_chain_in_dmah, flags,
1917 		    &reqp->dr_chain_in_head, &n_chain)) != DDI_SUCCESS) {
1918 			(void) dca_unbindchains(reqp);
1919 			return (rv);
1920 		}
1921 
1922 		/*
1923 		 * The offset and length are altered by the calling routine
1924 		 * reqp->dr_in->cd_offset += incnt;
1925 		 * reqp->dr_in->cd_length -= incnt;
1926 		 */
1927 		/* Save the first one in the chain for MCR */
1928 		reqp->dr_in_paddr = reqp->dr_chain_in_head.dc_buffer_paddr;
1929 		reqp->dr_in_next = reqp->dr_chain_in_head.dc_next_paddr;
1930 		reqp->dr_in_len = reqp->dr_chain_in_head.dc_buffer_length;
1931 	} else {
1932 		reqp->dr_in_paddr = NULL;
1933 		reqp->dr_in_next = 0;
1934 		reqp->dr_in_len = 0;
1935 	}
1936 
1937 	if (reqp->dr_flags & DR_INPLACE) {
1938 		reqp->dr_out_paddr = reqp->dr_in_paddr;
1939 		reqp->dr_out_len = reqp->dr_in_len;
1940 		reqp->dr_out_next = reqp->dr_in_next;
1941 		return (DDI_SUCCESS);
1942 	}
1943 
1944 	/* then the output */
1945 	if (outcnt) {
1946 		flags = DDI_DMA_READ | DDI_DMA_STREAMING;
1947 		if ((kaddr = dca_bufdaddr_out(reqp->dr_out)) == NULL) {
1948 			DBG(NULL, DWARN, "unrecognised crypto data format");
1949 			(void) dca_unbindchains(reqp);
1950 			return (DDI_FAILURE);
1951 		}
1952 		rv = dca_bindchains_one(reqp, outcnt, reqp->dr_offset +
1953 		    n_chain * DESC_SIZE, kaddr, reqp->dr_chain_out_dmah,
1954 		    flags, &reqp->dr_chain_out_head, &n_chain);
1955 		if (rv != DDI_SUCCESS) {
1956 			(void) dca_unbindchains(reqp);
1957 			return (DDI_FAILURE);
1958 		}
1959 
1960 		/* Save the first one in the chain for MCR */
1961 		reqp->dr_out_paddr = reqp->dr_chain_out_head.dc_buffer_paddr;
1962 		reqp->dr_out_next = reqp->dr_chain_out_head.dc_next_paddr;
1963 		reqp->dr_out_len = reqp->dr_chain_out_head.dc_buffer_length;
1964 	} else {
1965 		reqp->dr_out_paddr = NULL;
1966 		reqp->dr_out_next = 0;
1967 		reqp->dr_out_len = 0;
1968 	}
1969 
1970 	return (DDI_SUCCESS);
1971 }
1972 
1973 /*
1974  * Unbind the user buffers from the DMA handles.
1975  */
1976 int
1977 dca_unbindchains(dca_request_t *reqp)
1978 {
1979 	int rv = DDI_SUCCESS;
1980 	int rv1 = DDI_SUCCESS;
1981 
1982 	/* Clear the input chain */
1983 	if (reqp->dr_chain_in_head.dc_buffer_paddr != NULL) {
1984 		(void) ddi_dma_unbind_handle(reqp->dr_chain_in_dmah);
1985 		reqp->dr_chain_in_head.dc_buffer_paddr = 0;
1986 	}
1987 
1988 	/* Clear the output chain */
1989 	if (reqp->dr_chain_out_head.dc_buffer_paddr != NULL) {
1990 		(void) ddi_dma_unbind_handle(reqp->dr_chain_out_dmah);
1991 		reqp->dr_chain_out_head.dc_buffer_paddr = 0;
1992 	}
1993 
1994 	return ((rv != DDI_SUCCESS)? rv : rv1);
1995 }
1996 
1997 /*
1998  * Build either input chain or output chain. It is single-item chain for Sparc,
1999  * and possible mutiple-item chain for x86.
2000  */
2001 static int
2002 dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
2003     caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
2004     dca_chain_t *head, int *n_chain)
2005 {
2006 	ddi_dma_cookie_t	c;
2007 	uint_t			nc;
2008 	int			rv;
2009 	caddr_t			chain_kaddr_pre;
2010 	caddr_t			chain_kaddr;
2011 	uint32_t		chain_paddr;
2012 	int 			i;
2013 
2014 	/* Advance past the context structure to the starting address */
2015 	chain_paddr = reqp->dr_ctx_paddr + dr_offset;
2016 	chain_kaddr = reqp->dr_ctx_kaddr + dr_offset;
2017 
2018 	/*
2019 	 * Bind the kernel address to the DMA handle. On x86, the actual
2020 	 * buffer is mapped into multiple physical addresses. On Sparc,
2021 	 * the actual buffer is mapped into a single address.
2022 	 */
2023 	rv = ddi_dma_addr_bind_handle(handle,
2024 	    NULL, kaddr, cnt, flags, DDI_DMA_DONTWAIT, NULL, &c, &nc);
2025 	if (rv != DDI_DMA_MAPPED) {
2026 		return (DDI_FAILURE);
2027 	}
2028 
2029 	(void) ddi_dma_sync(handle, 0, cnt, DDI_DMA_SYNC_FORDEV);
2030 	if ((rv = dca_check_dma_handle(reqp->dr_dca, handle,
2031 	    DCA_FM_ECLASS_NONE)) != DDI_SUCCESS) {
2032 		reqp->destroy = TRUE;
2033 		return (rv);
2034 	}
2035 
2036 	*n_chain = nc;
2037 
2038 	/* Setup the data buffer chain for DMA transfer */
2039 	chain_kaddr_pre = NULL;
2040 	head->dc_buffer_paddr = 0;
2041 	head->dc_next_paddr = 0;
2042 	head->dc_buffer_length = 0;
2043 	for (i = 0; i < nc; i++) {
2044 		/* PIO */
2045 		PUTDESC32(reqp, chain_kaddr, DESC_BUFADDR, c.dmac_address);
2046 		PUTDESC16(reqp, chain_kaddr, DESC_RSVD, 0);
2047 		PUTDESC16(reqp, chain_kaddr, DESC_LENGTH, c.dmac_size);
2048 
2049 		/* Remember the head of the chain */
2050 		if (head->dc_buffer_paddr == 0) {
2051 			head->dc_buffer_paddr = c.dmac_address;
2052 			head->dc_buffer_length = c.dmac_size;
2053 		}
2054 
2055 		/* Link to the previous one if one exists */
2056 		if (chain_kaddr_pre) {
2057 			PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT,
2058 			    chain_paddr);
2059 			if (head->dc_next_paddr == 0)
2060 				head->dc_next_paddr = chain_paddr;
2061 		}
2062 		chain_kaddr_pre = chain_kaddr;
2063 
2064 		/* Maintain pointers */
2065 		chain_paddr += DESC_SIZE;
2066 		chain_kaddr += DESC_SIZE;
2067 
2068 		/* Retrieve the next cookie if there is one */
2069 		if (i < nc-1)
2070 			ddi_dma_nextcookie(handle, &c);
2071 	}
2072 
2073 	/* Set the next pointer in the last entry to NULL */
2074 	PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 0);
2075 
2076 	return (DDI_SUCCESS);
2077 }
2078 
2079 /*
2080  * Schedule some work.
2081  */
2082 int
2083 dca_start(dca_t *dca, dca_request_t *reqp, int mcr, int dosched)
2084 {
2085 	dca_worklist_t	*wlp = WORKLIST(dca, mcr);
2086 
2087 	mutex_enter(&wlp->dwl_lock);
2088 
2089 	DBG(dca, DCHATTY, "req=%p, in=%p, out=%p, ctx=%p, ibuf=%p, obuf=%p",
2090 	    reqp, reqp->dr_in, reqp->dr_out, reqp->dr_ctx_kaddr,
2091 	    reqp->dr_ibuf_kaddr, reqp->dr_obuf_kaddr);
2092 	DBG(dca, DCHATTY, "ctx paddr = %x, ibuf paddr = %x, obuf paddr = %x",
2093 	    reqp->dr_ctx_paddr, reqp->dr_ibuf_paddr, reqp->dr_obuf_paddr);
2094 	/* sync out the entire context and descriptor chains */
2095 	(void) ddi_dma_sync(reqp->dr_ctx_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2096 	if (dca_check_dma_handle(dca, reqp->dr_ctx_dmah,
2097 	    DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2098 		reqp->destroy = TRUE;
2099 		mutex_exit(&wlp->dwl_lock);
2100 		return (CRYPTO_DEVICE_ERROR);
2101 	}
2102 
2103 	dca_enqueue(&wlp->dwl_waitq, (dca_listnode_t *)reqp);
2104 	wlp->dwl_count++;
2105 	wlp->dwl_lastsubmit = ddi_get_lbolt();
2106 	reqp->dr_wlp = wlp;
2107 
2108 	if ((wlp->dwl_count == wlp->dwl_hiwater) && (wlp->dwl_busy == 0)) {
2109 		/* we are fully loaded now, let kCF know */
2110 
2111 		wlp->dwl_flowctl++;
2112 		wlp->dwl_busy = 1;
2113 
2114 		crypto_prov_notify(wlp->dwl_prov, CRYPTO_PROVIDER_BUSY);
2115 	}
2116 
2117 	if (dosched) {
2118 #ifdef	SCHEDDELAY
2119 		/* possibly wait for more work to arrive */
2120 		if (wlp->dwl_count >= wlp->dwl_reqspermcr) {
2121 			dca_schedule(dca, mcr);
2122 		} else if (!wlp->dwl_schedtid) {
2123 			/* wait 1 msec for more work before doing it */
2124 			wlp->dwl_schedtid = timeout(dca_schedtimeout,
2125 			    (void *)wlp, drv_usectohz(MSEC));
2126 		}
2127 #else
2128 		dca_schedule(dca, mcr);
2129 #endif
2130 	}
2131 	mutex_exit(&wlp->dwl_lock);
2132 
2133 	return (CRYPTO_QUEUED);
2134 }
2135 
2136 void
2137 dca_schedule(dca_t *dca, int mcr)
2138 {
2139 	dca_worklist_t	*wlp = WORKLIST(dca, mcr);
2140 	int		csr;
2141 	int		full;
2142 	uint32_t	status;
2143 
2144 	ASSERT(mutex_owned(&wlp->dwl_lock));
2145 	/*
2146 	 * If the card is draining or has an outstanding failure,
2147 	 * don't schedule any more work on it right now
2148 	 */
2149 	if (wlp->dwl_drain || (dca->dca_flags & DCA_FAILED)) {
2150 		return;
2151 	}
2152 
2153 	if (mcr == MCR2) {
2154 		csr = CSR_MCR2;
2155 		full = DMASTAT_MCR2FULL;
2156 	} else {
2157 		csr = CSR_MCR1;
2158 		full = DMASTAT_MCR1FULL;
2159 	}
2160 
2161 	for (;;) {
2162 		dca_work_t	*workp;
2163 		uint32_t	offset;
2164 		int		nreqs;
2165 
2166 		status = GETCSR(dca, CSR_DMASTAT);
2167 		if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2168 		    DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
2169 			return;
2170 
2171 		if ((status & full) != 0)
2172 			break;
2173 
2174 #ifdef	SCHEDDELAY
2175 		/* if there isn't enough to do, don't bother now */
2176 		if ((wlp->dwl_count < wlp->dwl_reqspermcr) &&
2177 		    (ddi_get_lbolt() < (wlp->dwl_lastsubmit +
2178 			drv_usectohz(MSEC)))) {
2179 			/* wait a bit longer... */
2180 			if (wlp->dwl_schedtid == 0) {
2181 				wlp->dwl_schedtid = timeout(dca_schedtimeout,
2182 				    (void *)wlp, drv_usectohz(MSEC));
2183 			}
2184 			return;
2185 		}
2186 #endif
2187 
2188 		/* grab a work structure */
2189 		workp = dca_getwork(dca, mcr);
2190 
2191 		if (workp == NULL) {
2192 			/*
2193 			 * There must be work ready to be reclaimed,
2194 			 * in this case, since the chip can only hold
2195 			 * less work outstanding than there are total.
2196 			 */
2197 			dca_reclaim(dca, mcr);
2198 			continue;
2199 		}
2200 
2201 		nreqs = 0;
2202 		offset = MCR_CTXADDR;
2203 
2204 		while (nreqs < wlp->dwl_reqspermcr) {
2205 			dca_request_t	*reqp;
2206 
2207 			reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_waitq);
2208 			if (reqp == NULL) {
2209 				/* nothing left to process */
2210 				break;
2211 			}
2212 			/*
2213 			 * Update flow control.
2214 			 */
2215 			wlp->dwl_count--;
2216 			if ((wlp->dwl_count == wlp->dwl_lowater) &&
2217 			    (wlp->dwl_busy))  {
2218 				wlp->dwl_busy = 0;
2219 				crypto_prov_notify(wlp->dwl_prov,
2220 				    CRYPTO_PROVIDER_READY);
2221 			}
2222 
2223 			/*
2224 			 * Context address.
2225 			 */
2226 			PUTMCR32(workp, offset, reqp->dr_ctx_paddr);
2227 			offset += 4;
2228 
2229 			/*
2230 			 * Input chain.
2231 			 */
2232 			/* input buffer address */
2233 			PUTMCR32(workp, offset, reqp->dr_in_paddr);
2234 			offset += 4;
2235 			/* next input buffer entry */
2236 			PUTMCR32(workp, offset, reqp->dr_in_next);
2237 			offset += 4;
2238 			/* input buffer length */
2239 			PUTMCR16(workp, offset, reqp->dr_in_len);
2240 			offset += 2;
2241 			/* zero the reserved field */
2242 			PUTMCR16(workp, offset, 0);
2243 			offset += 2;
2244 
2245 			/*
2246 			 * Overall length.
2247 			 */
2248 			/* reserved field */
2249 			PUTMCR16(workp, offset, 0);
2250 			offset += 2;
2251 			/* total packet length */
2252 			PUTMCR16(workp, offset, reqp->dr_pkt_length);
2253 			offset += 2;
2254 
2255 			/*
2256 			 * Output chain.
2257 			 */
2258 			/* output buffer address */
2259 			PUTMCR32(workp, offset, reqp->dr_out_paddr);
2260 			offset += 4;
2261 			/* next output buffer entry */
2262 			PUTMCR32(workp, offset, reqp->dr_out_next);
2263 			offset += 4;
2264 			/* output buffer length */
2265 			PUTMCR16(workp, offset, reqp->dr_out_len);
2266 			offset += 2;
2267 			/* zero the reserved field */
2268 			PUTMCR16(workp, offset, 0);
2269 			offset += 2;
2270 
2271 			/*
2272 			 * Note submission.
2273 			 */
2274 			workp->dw_reqs[nreqs] = reqp;
2275 			nreqs++;
2276 		}
2277 
2278 		if (nreqs == 0) {
2279 			/* nothing in the queue! */
2280 			dca_freework(workp);
2281 			return;
2282 		}
2283 
2284 		wlp->dwl_submit++;
2285 
2286 		PUTMCR16(workp, MCR_FLAGS, 0);
2287 		PUTMCR16(workp, MCR_COUNT, nreqs);
2288 
2289 		DBG(dca, DCHATTY,
2290 		    "posting work (phys %x, virt 0x%p) (%d reqs) to MCR%d",
2291 		    workp->dw_mcr_paddr, workp->dw_mcr_kaddr,
2292 		    nreqs, mcr);
2293 
2294 		workp->dw_lbolt = ddi_get_lbolt();
2295 		/* Make sure MCR is synced out to device. */
2296 		(void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 0,
2297 			DDI_DMA_SYNC_FORDEV);
2298 		if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2299 		    DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2300 			dca_destroywork(workp);
2301 			return;
2302 		}
2303 
2304 		PUTCSR(dca, csr, workp->dw_mcr_paddr);
2305 		if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2306 		    DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2307 			dca_destroywork(workp);
2308 			return;
2309 		} else {
2310 			dca_enqueue(&wlp->dwl_runq, (dca_listnode_t *)workp);
2311 		}
2312 
2313 		DBG(dca, DCHATTY, "posted");
2314 	}
2315 }
2316 
2317 /*
2318  * Reclaim completed work, called in interrupt context.
2319  */
2320 void
2321 dca_reclaim(dca_t *dca, int mcr)
2322 {
2323 	dca_worklist_t	*wlp = WORKLIST(dca, mcr);
2324 	dca_work_t	*workp;
2325 	ushort_t	flags;
2326 	int		nreclaimed = 0;
2327 	int		i;
2328 
2329 	DBG(dca, DRECLAIM, "worklist = 0x%p (MCR%d)", wlp, mcr);
2330 	ASSERT(mutex_owned(&wlp->dwl_lock));
2331 	/*
2332 	 * For each MCR in the submitted (runq), we check to see if
2333 	 * it has been processed.  If so, then we note each individual
2334 	 * job in the MCR, and and do the completion processing for
2335 	 * each of such job.
2336 	 */
2337 	for (;;) {
2338 
2339 		workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2340 		if (workp == NULL) {
2341 			break;
2342 		}
2343 
2344 		/* only sync the MCR flags, since that's all we need */
2345 		(void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 4,
2346 			DDI_DMA_SYNC_FORKERNEL);
2347 		if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2348 		    DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2349 			dca_rmqueue((dca_listnode_t *)workp);
2350 			dca_destroywork(workp);
2351 			return;
2352 		}
2353 
2354 		flags = GETMCR16(workp, MCR_FLAGS);
2355 		if ((flags & MCRFLAG_FINISHED) == 0) {
2356 			/* chip is still working on it */
2357 			DBG(dca, DRECLAIM,
2358 			    "chip still working on it (MCR%d)", mcr);
2359 			break;
2360 		}
2361 
2362 		/* its really for us, so remove it from the queue */
2363 		dca_rmqueue((dca_listnode_t *)workp);
2364 
2365 		/* if we were draining, signal on the cv */
2366 		if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2367 			cv_signal(&wlp->dwl_cv);
2368 		}
2369 
2370 		/* update statistics, done under the lock */
2371 		for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2372 			dca_request_t *reqp = workp->dw_reqs[i];
2373 			if (reqp == NULL) {
2374 				continue;
2375 			}
2376 			if (reqp->dr_byte_stat >= 0) {
2377 				dca->dca_stats[reqp->dr_byte_stat] +=
2378 				    reqp->dr_pkt_length;
2379 			}
2380 			if (reqp->dr_job_stat >= 0) {
2381 				dca->dca_stats[reqp->dr_job_stat]++;
2382 			}
2383 		}
2384 		mutex_exit(&wlp->dwl_lock);
2385 
2386 		for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2387 			dca_request_t *reqp = workp->dw_reqs[i];
2388 
2389 			if (reqp == NULL) {
2390 				continue;
2391 			}
2392 
2393 			/* Do the callback. */
2394 			workp->dw_reqs[i] = NULL;
2395 			dca_done(reqp, CRYPTO_SUCCESS);
2396 
2397 			nreclaimed++;
2398 		}
2399 
2400 		/* now we can release the work */
2401 		dca_freework(workp);
2402 
2403 		mutex_enter(&wlp->dwl_lock);
2404 	}
2405 	DBG(dca, DRECLAIM, "reclaimed %d cmds", nreclaimed);
2406 }
2407 
2408 int
2409 dca_length(crypto_data_t *cdata)
2410 {
2411 	return (cdata->cd_length);
2412 }
2413 
2414 /*
2415  * This is the callback function called from the interrupt when a kCF job
2416  * completes.  It does some driver-specific things, and then calls the
2417  * kCF-provided callback.  Finally, it cleans up the state for the work
2418  * request and drops the reference count to allow for DR.
2419  */
2420 void
2421 dca_done(dca_request_t *reqp, int err)
2422 {
2423 	uint64_t	ena = 0;
2424 
2425 	/* unbind any chains we were using */
2426 	if (dca_unbindchains(reqp) != DDI_SUCCESS) {
2427 		/* DMA failure */
2428 		ena = dca_ena(ena);
2429 		dca_failure(reqp->dr_dca, DDI_DATAPATH_FAULT,
2430 		    DCA_FM_ECLASS_NONE, ena, CRYPTO_DEVICE_ERROR,
2431 		    "fault on buffer DMA handle");
2432 		if (err == CRYPTO_SUCCESS) {
2433 			err = CRYPTO_DEVICE_ERROR;
2434 		}
2435 	}
2436 
2437 	if (reqp->dr_callback != NULL) {
2438 		reqp->dr_callback(reqp, err);
2439 	} else {
2440 		dca_freereq(reqp);
2441 	}
2442 }
2443 
2444 /*
2445  * Call this when a failure is detected.  It will reset the chip,
2446  * log a message, alert kCF, and mark jobs in the runq as failed.
2447  */
2448 /* ARGSUSED */
2449 void
2450 dca_failure(dca_t *dca, ddi_fault_location_t loc, dca_fma_eclass_t index,
2451     uint64_t ena, int errno, char *mess, ...)
2452 {
2453 	va_list	ap;
2454 	char	buf[256];
2455 	int	mcr;
2456 	char	*eclass;
2457 	int	have_mutex;
2458 
2459 	va_start(ap, mess);
2460 	(void) vsprintf(buf, mess, ap);
2461 	va_end(ap);
2462 
2463 	eclass = dca_fma_eclass_string(dca->dca_model, index);
2464 
2465 	if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) &&
2466 	    index != DCA_FM_ECLASS_NONE) {
2467 		ddi_fm_ereport_post(dca->dca_dip, eclass, ena,
2468 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
2469 		    FM_EREPORT_VERS0, NULL);
2470 
2471 		/* Report the impact of the failure to the DDI. */
2472 		ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_LOST);
2473 	} else {
2474 		/* Just log the error string to the message log */
2475 		dca_error(dca, buf);
2476 	}
2477 
2478 	/*
2479 	 * Indicate a failure (keeps schedule from running).
2480 	 */
2481 	dca->dca_flags |= DCA_FAILED;
2482 
2483 	/*
2484 	 * Reset the chip.  This should also have as a side effect, the
2485 	 * disabling of all interrupts from the device.
2486 	 */
2487 	(void) dca_reset(dca, 1);
2488 
2489 	/*
2490 	 * Report the failure to kCF.
2491 	 */
2492 	for (mcr = MCR1; mcr <= MCR2; mcr++) {
2493 		if (WORKLIST(dca, mcr)->dwl_prov) {
2494 			crypto_prov_notify(WORKLIST(dca, mcr)->dwl_prov,
2495 			    CRYPTO_PROVIDER_FAILED);
2496 		}
2497 	}
2498 
2499 	/*
2500 	 * Return jobs not sent to hardware back to kCF.
2501 	 */
2502 	dca_rejectjobs(dca);
2503 
2504 	/*
2505 	 * From this point on, no new work should be arriving, and the
2506 	 * chip should not be doing any active DMA.
2507 	 */
2508 
2509 	/*
2510 	 * Now find all the work submitted to the device and fail
2511 	 * them.
2512 	 */
2513 	for (mcr = MCR1; mcr <= MCR2; mcr++) {
2514 		dca_worklist_t	*wlp;
2515 		int		i;
2516 
2517 		wlp = WORKLIST(dca, mcr);
2518 
2519 		if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2520 			continue;
2521 		}
2522 		for (;;) {
2523 			dca_work_t	*workp;
2524 
2525 			have_mutex = mutex_tryenter(&wlp->dwl_lock);
2526 			workp = (dca_work_t *)dca_dequeue(&wlp->dwl_runq);
2527 			if (workp == NULL) {
2528 				if (have_mutex)
2529 					mutex_exit(&wlp->dwl_lock);
2530 				break;
2531 			}
2532 			mutex_exit(&wlp->dwl_lock);
2533 
2534 			/*
2535 			 * Free up requests
2536 			 */
2537 			for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2538 				dca_request_t *reqp = workp->dw_reqs[i];
2539 				if (reqp) {
2540 					if (reqp->dr_flags & DR_INPLACE) {
2541 						dca_done(reqp, errno);
2542 					} else {
2543 						/*
2544 						 * cause it to get retried
2545 						 * elsewhere (software)
2546 						 */
2547 						dca_done(reqp, CRYPTO_FAILED);
2548 					}
2549 					workp->dw_reqs[i] = NULL;
2550 				}
2551 			}
2552 
2553 			mutex_enter(&wlp->dwl_lock);
2554 			/*
2555 			 * If waiting to drain, signal on the waiter.
2556 			 */
2557 			if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2558 				cv_signal(&wlp->dwl_cv);
2559 			}
2560 
2561 			/*
2562 			 * Return the work and request structures to
2563 			 * the free pool.
2564 			 */
2565 			dca_freework(workp);
2566 			if (have_mutex)
2567 				mutex_exit(&wlp->dwl_lock);
2568 		}
2569 	}
2570 
2571 }
2572 
2573 #ifdef	SCHEDDELAY
2574 /*
2575  * Reschedule worklist as needed.
2576  */
2577 void
2578 dca_schedtimeout(void *arg)
2579 {
2580 	dca_worklist_t	*wlp = (dca_worklist_t *)arg;
2581 	mutex_enter(&wlp->dwl_lock);
2582 	wlp->dwl_schedtid = 0;
2583 	dca_schedule(wlp->dwl_dca, wlp->dwl_mcr);
2584 	mutex_exit(&wlp->dwl_lock);
2585 }
2586 #endif
2587 
2588 /*
2589  * Check for stalled jobs.
2590  */
2591 void
2592 dca_jobtimeout(void *arg)
2593 {
2594 	int		mcr;
2595 	dca_t		*dca = (dca_t *)arg;
2596 	int		hung = 0;
2597 
2598 	for (mcr = MCR1; mcr <= MCR2; mcr++) {
2599 		dca_worklist_t	*wlp = WORKLIST(dca, mcr);
2600 		dca_work_t	*workp;
2601 		clock_t		when;
2602 
2603 		mutex_enter(&wlp->dwl_lock);
2604 		when = ddi_get_lbolt();
2605 
2606 		workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2607 		if (workp == NULL) {
2608 			/* nothing sitting in the queue */
2609 			mutex_exit(&wlp->dwl_lock);
2610 			continue;
2611 		}
2612 
2613 		if ((when - workp->dw_lbolt) < drv_usectohz(STALETIME)) {
2614 			/* request has been queued for less than STALETIME */
2615 			mutex_exit(&wlp->dwl_lock);
2616 			continue;
2617 		}
2618 
2619 		/* job has been sitting around for over 1 second, badness */
2620 		DBG(dca, DWARN, "stale job (0x%p) found in MCR%d!", workp,
2621 		    mcr);
2622 
2623 		/* put it back in the queue, until we reset the chip */
2624 		hung++;
2625 		mutex_exit(&wlp->dwl_lock);
2626 	}
2627 
2628 	if (hung) {
2629 		dca_failure(dca, DDI_DEVICE_FAULT,
2630 		    DCA_FM_ECLASS_HW_TIMEOUT, dca_ena(0), CRYPTO_DEVICE_ERROR,
2631 		    "timeout processing job.)");
2632 	}
2633 
2634 	/* reschedule ourself */
2635 	mutex_enter(&dca->dca_intrlock);
2636 	if (dca->dca_jobtid == 0) {
2637 		/* timeout has been canceled, prior to DR */
2638 		mutex_exit(&dca->dca_intrlock);
2639 		return;
2640 	}
2641 
2642 	/* check again in 1 second */
2643 	dca->dca_jobtid = timeout(dca_jobtimeout, arg,
2644 	    drv_usectohz(SECOND));
2645 	mutex_exit(&dca->dca_intrlock);
2646 }
2647 
2648 /*
2649  * This returns all jobs back to kCF.  It assumes that processing
2650  * on the worklist has halted.
2651  */
2652 void
2653 dca_rejectjobs(dca_t *dca)
2654 {
2655 	int mcr;
2656 	int have_mutex;
2657 	for (mcr = MCR1; mcr <= MCR2; mcr++) {
2658 		dca_worklist_t	*wlp = WORKLIST(dca, mcr);
2659 		dca_request_t	*reqp;
2660 
2661 		if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2662 			continue;
2663 		}
2664 		have_mutex = mutex_tryenter(&wlp->dwl_lock);
2665 		for (;;) {
2666 			reqp = (dca_request_t *)dca_unqueue(&wlp->dwl_waitq);
2667 			if (reqp == NULL) {
2668 				break;
2669 			}
2670 			/* update flow control */
2671 			wlp->dwl_count--;
2672 			if ((wlp->dwl_count == wlp->dwl_lowater) &&
2673 			    (wlp->dwl_busy))  {
2674 				wlp->dwl_busy = 0;
2675 				crypto_prov_notify(wlp->dwl_prov,
2676 				    CRYPTO_PROVIDER_READY);
2677 			}
2678 			mutex_exit(&wlp->dwl_lock);
2679 
2680 			(void) dca_unbindchains(reqp);
2681 			reqp->dr_callback(reqp, EAGAIN);
2682 			mutex_enter(&wlp->dwl_lock);
2683 		}
2684 		if (have_mutex)
2685 			mutex_exit(&wlp->dwl_lock);
2686 	}
2687 }
2688 
2689 int
2690 dca_drain(dca_t *dca)
2691 {
2692 	int mcr;
2693 	for (mcr = MCR1; mcr <= MCR2; mcr++) {
2694 #ifdef	SCHEDDELAY
2695 		timeout_id_t	tid;
2696 #endif
2697 		dca_worklist_t *wlp = WORKLIST(dca, mcr);
2698 
2699 		mutex_enter(&wlp->dwl_lock);
2700 		wlp->dwl_drain = 1;
2701 
2702 		/* give it up to a second to drain from the chip */
2703 		if (!QEMPTY(&wlp->dwl_runq)) {
2704 			(void) cv_timedwait(&wlp->dwl_cv, &wlp->dwl_lock,
2705 			    ddi_get_time() + drv_usectohz(STALETIME));
2706 
2707 			if (!QEMPTY(&wlp->dwl_runq)) {
2708 				dca_error(dca, "unable to drain device");
2709 				mutex_exit(&wlp->dwl_lock);
2710 				dca_undrain(dca);
2711 				return (EBUSY);
2712 			}
2713 		}
2714 
2715 #ifdef	SCHEDDELAY
2716 		tid = wlp->dwl_schedtid;
2717 		mutex_exit(&wlp->dwl_lock);
2718 
2719 		/*
2720 		 * untimeout outside the lock -- this is safe because we
2721 		 * have set the drain flag, so dca_schedule() will not
2722 		 * reschedule another timeout
2723 		 */
2724 		if (tid) {
2725 			untimeout(tid);
2726 		}
2727 #else
2728 		mutex_exit(&wlp->dwl_lock);
2729 #endif
2730 	}
2731 	return (0);
2732 }
2733 
2734 void
2735 dca_undrain(dca_t *dca)
2736 {
2737 	int	mcr;
2738 
2739 	for (mcr = MCR1; mcr <= MCR2; mcr++) {
2740 		dca_worklist_t	*wlp = WORKLIST(dca, mcr);
2741 		mutex_enter(&wlp->dwl_lock);
2742 		wlp->dwl_drain = 0;
2743 		dca_schedule(dca, mcr);
2744 		mutex_exit(&wlp->dwl_lock);
2745 	}
2746 }
2747 
2748 /*
2749  * Duplicate the crypto_data_t structure, but point to the original
2750  * buffers.
2751  */
2752 int
2753 dca_dupcrypto(crypto_data_t *input, crypto_data_t *ninput)
2754 {
2755 	ninput->cd_format = input->cd_format;
2756 	ninput->cd_offset = input->cd_offset;
2757 	ninput->cd_length = input->cd_length;
2758 	ninput->cd_miscdata = input->cd_miscdata;
2759 
2760 	switch (input->cd_format) {
2761 	case CRYPTO_DATA_RAW:
2762 		ninput->cd_raw.iov_base = input->cd_raw.iov_base;
2763 		ninput->cd_raw.iov_len = input->cd_raw.iov_len;
2764 		break;
2765 
2766 	case CRYPTO_DATA_UIO:
2767 		ninput->cd_uio = input->cd_uio;
2768 		break;
2769 
2770 	case CRYPTO_DATA_MBLK:
2771 		ninput->cd_mp = input->cd_mp;
2772 		break;
2773 
2774 	default:
2775 		DBG(NULL, DWARN,
2776 		    "dca_dupcrypto: unrecognised crypto data format");
2777 		return (CRYPTO_FAILED);
2778 	}
2779 
2780 	return (CRYPTO_SUCCESS);
2781 }
2782 
2783 /*
2784  * Performs validation checks on the input and output data structures.
2785  */
2786 int
2787 dca_verifyio(crypto_data_t *input, crypto_data_t *output)
2788 {
2789 	int	rv = CRYPTO_SUCCESS;
2790 
2791 	switch (input->cd_format) {
2792 	case CRYPTO_DATA_RAW:
2793 		break;
2794 
2795 	case CRYPTO_DATA_UIO:
2796 		/* we support only kernel buffer */
2797 		if (input->cd_uio->uio_segflg != UIO_SYSSPACE) {
2798 			DBG(NULL, DWARN, "non kernel input uio buffer");
2799 			rv = CRYPTO_ARGUMENTS_BAD;
2800 		}
2801 		break;
2802 
2803 	case CRYPTO_DATA_MBLK:
2804 		break;
2805 
2806 	default:
2807 		DBG(NULL, DWARN, "unrecognised input crypto data format");
2808 		rv = CRYPTO_ARGUMENTS_BAD;
2809 	}
2810 
2811 	switch (output->cd_format) {
2812 	case CRYPTO_DATA_RAW:
2813 		break;
2814 
2815 	case CRYPTO_DATA_UIO:
2816 		/* we support only kernel buffer */
2817 		if (output->cd_uio->uio_segflg != UIO_SYSSPACE) {
2818 			DBG(NULL, DWARN, "non kernel output uio buffer");
2819 			rv = CRYPTO_ARGUMENTS_BAD;
2820 		}
2821 		break;
2822 
2823 	case CRYPTO_DATA_MBLK:
2824 		break;
2825 
2826 	default:
2827 		DBG(NULL, DWARN, "unrecognised output crypto data format");
2828 		rv = CRYPTO_ARGUMENTS_BAD;
2829 	}
2830 
2831 	return (rv);
2832 }
2833 
2834 /*
2835  * data: source crypto_data_t struct
2836  * off:	offset into the source before commencing copy
2837  * count: the amount of data to copy
2838  * dest: destination buffer
2839  */
2840 int
2841 dca_getbufbytes(crypto_data_t *data, size_t off, int count, uchar_t *dest)
2842 {
2843 	int rv = CRYPTO_SUCCESS;
2844 	uio_t *uiop;
2845 	uint_t vec_idx;
2846 	size_t cur_len;
2847 	mblk_t *mp;
2848 
2849 	if (count == 0) {
2850 		/* We don't want anything so we're done. */
2851 		return (rv);
2852 	}
2853 
2854 	/*
2855 	 * Sanity check that we haven't specified a length greater than the
2856 	 * offset adjusted size of the buffer.
2857 	 */
2858 	if (count > (data->cd_length - off)) {
2859 		return (CRYPTO_DATA_LEN_RANGE);
2860 	}
2861 
2862 	/* Add the internal crypto_data offset to the requested offset. */
2863 	off += data->cd_offset;
2864 
2865 	switch (data->cd_format) {
2866 	case CRYPTO_DATA_RAW:
2867 		bcopy(data->cd_raw.iov_base + off, dest, count);
2868 		break;
2869 
2870 	case CRYPTO_DATA_UIO:
2871 		/*
2872 		 * Jump to the first iovec containing data to be
2873 		 * processed.
2874 		 */
2875 		uiop = data->cd_uio;
2876 		for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
2877 		    off >= uiop->uio_iov[vec_idx].iov_len;
2878 		    off -= uiop->uio_iov[vec_idx++].iov_len);
2879 		if (vec_idx == uiop->uio_iovcnt) {
2880 			/*
2881 			 * The caller specified an offset that is larger than
2882 			 * the total size of the buffers it provided.
2883 			 */
2884 			return (CRYPTO_DATA_LEN_RANGE);
2885 		}
2886 
2887 		/*
2888 		 * Now process the iovecs.
2889 		 */
2890 		while (vec_idx < uiop->uio_iovcnt && count > 0) {
2891 			cur_len = min(uiop->uio_iov[vec_idx].iov_len -
2892 			    off, count);
2893 			bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
2894 			    cur_len);
2895 			count -= cur_len;
2896 			dest += cur_len;
2897 			vec_idx++;
2898 			off = 0;
2899 		}
2900 
2901 		if (vec_idx == uiop->uio_iovcnt && count > 0) {
2902 			/*
2903 			 * The end of the specified iovec's was reached but
2904 			 * the length requested could not be processed
2905 			 * (requested to digest more data than it provided).
2906 			 */
2907 			return (CRYPTO_DATA_LEN_RANGE);
2908 		}
2909 		break;
2910 
2911 	case CRYPTO_DATA_MBLK:
2912 		/*
2913 		 * Jump to the first mblk_t containing data to be processed.
2914 		 */
2915 		for (mp = data->cd_mp; mp != NULL && off >= MBLKL(mp);
2916 		    off -= MBLKL(mp), mp = mp->b_cont);
2917 		if (mp == NULL) {
2918 			/*
2919 			 * The caller specified an offset that is larger than
2920 			 * the total size of the buffers it provided.
2921 			 */
2922 			return (CRYPTO_DATA_LEN_RANGE);
2923 		}
2924 
2925 		/*
2926 		 * Now do the processing on the mblk chain.
2927 		 */
2928 		while (mp != NULL && count > 0) {
2929 			cur_len = min(MBLKL(mp) - off, count);
2930 			bcopy((char *)(mp->b_rptr + off), dest, cur_len);
2931 			count -= cur_len;
2932 			dest += cur_len;
2933 			mp = mp->b_cont;
2934 			off = 0;
2935 		}
2936 
2937 		if (mp == NULL && count > 0) {
2938 			/*
2939 			 * The end of the mblk was reached but the length
2940 			 * requested could not be processed, (requested to
2941 			 * digest more data than it provided).
2942 			 */
2943 			return (CRYPTO_DATA_LEN_RANGE);
2944 		}
2945 		break;
2946 
2947 	default:
2948 		DBG(NULL, DWARN, "unrecognised crypto data format");
2949 		rv = CRYPTO_ARGUMENTS_BAD;
2950 	}
2951 	return (rv);
2952 }
2953 
2954 
2955 /*
2956  * Performs the input, output or hard scatter/gather checks on the specified
2957  * crypto_data_t struct. Returns true if the data is scatter/gather in nature
2958  * ie fails the test.
2959  */
2960 int
2961 dca_sgcheck(dca_t *dca, crypto_data_t *data, dca_sg_param_t val)
2962 {
2963 	uio_t *uiop;
2964 	mblk_t *mp;
2965 	int rv = FALSE;
2966 
2967 	switch (val) {
2968 	case DCA_SG_CONTIG:
2969 		/*
2970 		 * Check for a contiguous data buffer.
2971 		 */
2972 		switch (data->cd_format) {
2973 		case CRYPTO_DATA_RAW:
2974 			/* Contiguous in nature */
2975 			break;
2976 
2977 		case CRYPTO_DATA_UIO:
2978 			if (data->cd_uio->uio_iovcnt > 1)
2979 				rv = TRUE;
2980 			break;
2981 
2982 		case CRYPTO_DATA_MBLK:
2983 			mp = data->cd_mp;
2984 			if (mp->b_cont != NULL)
2985 				rv = TRUE;
2986 			break;
2987 
2988 		default:
2989 			DBG(NULL, DWARN, "unrecognised crypto data format");
2990 		}
2991 		break;
2992 
2993 	case DCA_SG_WALIGN:
2994 		/*
2995 		 * Check for a contiguous data buffer that is 32-bit word
2996 		 * aligned and is of word multiples in size.
2997 		 */
2998 		switch (data->cd_format) {
2999 		case CRYPTO_DATA_RAW:
3000 			if ((data->cd_raw.iov_len % sizeof (uint32_t)) ||
3001 			    ((uintptr_t)data->cd_raw.iov_base %
3002 			    sizeof (uint32_t))) {
3003 				rv = TRUE;
3004 			}
3005 			break;
3006 
3007 		case CRYPTO_DATA_UIO:
3008 			uiop = data->cd_uio;
3009 			if (uiop->uio_iovcnt > 1) {
3010 				return (TRUE);
3011 			}
3012 			/* So there is only one iovec */
3013 			if ((uiop->uio_iov[0].iov_len % sizeof (uint32_t)) ||
3014 			    ((uintptr_t)uiop->uio_iov[0].iov_base %
3015 			    sizeof (uint32_t))) {
3016 				rv = TRUE;
3017 			}
3018 			break;
3019 
3020 		case CRYPTO_DATA_MBLK:
3021 			mp = data->cd_mp;
3022 			if (mp->b_cont != NULL) {
3023 				return (TRUE);
3024 			}
3025 			/* So there is only one mblk in the chain */
3026 			if ((MBLKL(mp) % sizeof (uint32_t)) ||
3027 			    ((uintptr_t)mp->b_rptr % sizeof (uint32_t))) {
3028 				rv = TRUE;
3029 			}
3030 			break;
3031 
3032 		default:
3033 			DBG(NULL, DWARN, "unrecognised crypto data format");
3034 		}
3035 		break;
3036 
3037 	case DCA_SG_PALIGN:
3038 		/*
3039 		 * Check that the data buffer is page aligned and is of
3040 		 * page multiples in size.
3041 		 */
3042 		switch (data->cd_format) {
3043 		case CRYPTO_DATA_RAW:
3044 			if ((data->cd_length % dca->dca_pagesize) ||
3045 			    ((uintptr_t)data->cd_raw.iov_base %
3046 			    dca->dca_pagesize)) {
3047 				rv = TRUE;
3048 			}
3049 			break;
3050 
3051 		case CRYPTO_DATA_UIO:
3052 			uiop = data->cd_uio;
3053 			if ((uiop->uio_iov[0].iov_len % dca->dca_pagesize) ||
3054 			    ((uintptr_t)uiop->uio_iov[0].iov_base %
3055 			    dca->dca_pagesize)) {
3056 				rv = TRUE;
3057 			}
3058 			break;
3059 
3060 		case CRYPTO_DATA_MBLK:
3061 			mp = data->cd_mp;
3062 			if ((MBLKL(mp) % dca->dca_pagesize) ||
3063 			    ((uintptr_t)mp->b_rptr % dca->dca_pagesize)) {
3064 				rv = TRUE;
3065 			}
3066 			break;
3067 
3068 		default:
3069 			DBG(NULL, DWARN, "unrecognised crypto data format");
3070 		}
3071 		break;
3072 
3073 	default:
3074 		DBG(NULL, DWARN, "unrecognised scatter/gather param type");
3075 	}
3076 
3077 	return (rv);
3078 }
3079 
3080 /*
3081  * Increments the cd_offset and decrements the cd_length as the data is
3082  * gathered from the crypto_data_t struct.
3083  * The data is reverse-copied into the dest buffer if the flag is true.
3084  */
3085 int
3086 dca_gather(crypto_data_t *in, char *dest, int count, int reverse)
3087 {
3088 	int	rv = CRYPTO_SUCCESS;
3089 	uint_t	vec_idx;
3090 	uio_t	*uiop;
3091 	off_t	off = in->cd_offset;
3092 	size_t	cur_len;
3093 	mblk_t	*mp;
3094 
3095 	switch (in->cd_format) {
3096 	case CRYPTO_DATA_RAW:
3097 		if (count > in->cd_length) {
3098 			/*
3099 			 * The caller specified a length greater than the
3100 			 * size of the buffer.
3101 			 */
3102 			return (CRYPTO_DATA_LEN_RANGE);
3103 		}
3104 		if (reverse)
3105 			dca_reverse(in->cd_raw.iov_base + off, dest, count,
3106 			    count);
3107 		else
3108 			bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3109 		in->cd_offset += count;
3110 		in->cd_length -= count;
3111 		break;
3112 
3113 	case CRYPTO_DATA_UIO:
3114 		/*
3115 		 * Jump to the first iovec containing data to be processed.
3116 		 */
3117 		uiop = in->cd_uio;
3118 		for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3119 		    off >= uiop->uio_iov[vec_idx].iov_len;
3120 		    off -= uiop->uio_iov[vec_idx++].iov_len);
3121 		if (vec_idx == uiop->uio_iovcnt) {
3122 			/*
3123 			 * The caller specified an offset that is larger than
3124 			 * the total size of the buffers it provided.
3125 			 */
3126 			return (CRYPTO_DATA_LEN_RANGE);
3127 		}
3128 
3129 		/*
3130 		 * Now process the iovecs.
3131 		 */
3132 		while (vec_idx < uiop->uio_iovcnt && count > 0) {
3133 			cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3134 			    off, count);
3135 			count -= cur_len;
3136 			if (reverse) {
3137 				/* Fill the dest buffer from the end */
3138 				dca_reverse(uiop->uio_iov[vec_idx].iov_base +
3139 				    off, dest+count, cur_len, cur_len);
3140 			} else {
3141 				bcopy(uiop->uio_iov[vec_idx].iov_base + off,
3142 				    dest, cur_len);
3143 				dest += cur_len;
3144 			}
3145 			in->cd_offset += cur_len;
3146 			in->cd_length -= cur_len;
3147 			vec_idx++;
3148 			off = 0;
3149 		}
3150 
3151 		if (vec_idx == uiop->uio_iovcnt && count > 0) {
3152 			/*
3153 			 * The end of the specified iovec's was reached but
3154 			 * the length requested could not be processed
3155 			 * (requested to digest more data than it provided).
3156 			 */
3157 			return (CRYPTO_DATA_LEN_RANGE);
3158 		}
3159 		break;
3160 
3161 	case CRYPTO_DATA_MBLK:
3162 		/*
3163 		 * Jump to the first mblk_t containing data to be processed.
3164 		 */
3165 		for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3166 		    off -= MBLKL(mp), mp = mp->b_cont);
3167 		if (mp == NULL) {
3168 			/*
3169 			 * The caller specified an offset that is larger than
3170 			 * the total size of the buffers it provided.
3171 			 */
3172 			return (CRYPTO_DATA_LEN_RANGE);
3173 		}
3174 
3175 		/*
3176 		 * Now do the processing on the mblk chain.
3177 		 */
3178 		while (mp != NULL && count > 0) {
3179 			cur_len = min(MBLKL(mp) - off, count);
3180 			count -= cur_len;
3181 			if (reverse) {
3182 				/* Fill the dest buffer from the end */
3183 				dca_reverse((char *)(mp->b_rptr + off),
3184 				    dest+count, cur_len, cur_len);
3185 			} else {
3186 				bcopy((char *)(mp->b_rptr + off), dest,
3187 				    cur_len);
3188 				dest += cur_len;
3189 			}
3190 			in->cd_offset += cur_len;
3191 			in->cd_length -= cur_len;
3192 			mp = mp->b_cont;
3193 			off = 0;
3194 		}
3195 
3196 		if (mp == NULL && count > 0) {
3197 			/*
3198 			 * The end of the mblk was reached but the length
3199 			 * requested could not be processed, (requested to
3200 			 * digest more data than it provided).
3201 			 */
3202 			return (CRYPTO_DATA_LEN_RANGE);
3203 		}
3204 		break;
3205 
3206 	default:
3207 		DBG(NULL, DWARN, "dca_gather: unrecognised crypto data format");
3208 		rv = CRYPTO_ARGUMENTS_BAD;
3209 	}
3210 	return (rv);
3211 }
3212 
3213 /*
3214  * Increments the cd_offset and decrements the cd_length as the data is
3215  * gathered from the crypto_data_t struct.
3216  */
3217 int
3218 dca_resid_gather(crypto_data_t *in, char *resid, int *residlen, char *dest,
3219     int count)
3220 {
3221 	int	rv = CRYPTO_SUCCESS;
3222 	caddr_t	baddr;
3223 	uint_t	vec_idx;
3224 	uio_t	*uiop;
3225 	off_t	off = in->cd_offset;
3226 	size_t	cur_len;
3227 	mblk_t	*mp;
3228 
3229 	/* Process the residual first */
3230 	if (*residlen > 0) {
3231 		uint_t	num = min(count, *residlen);
3232 		bcopy(resid, dest, num);
3233 		*residlen -= num;
3234 		if (*residlen > 0) {
3235 			/*
3236 			 * Requested amount 'count' is less than what's in
3237 			 * the residual, so shuffle any remaining resid to
3238 			 * the front.
3239 			 */
3240 			baddr = resid + num;
3241 			bcopy(baddr, resid, *residlen);
3242 		}
3243 		dest += num;
3244 		count -= num;
3245 	}
3246 
3247 	/* Now process what's in the crypto_data_t structs */
3248 	switch (in->cd_format) {
3249 	case CRYPTO_DATA_RAW:
3250 		if (count > in->cd_length) {
3251 			/*
3252 			 * The caller specified a length greater than the
3253 			 * size of the buffer.
3254 			 */
3255 			return (CRYPTO_DATA_LEN_RANGE);
3256 		}
3257 		bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3258 		in->cd_offset += count;
3259 		in->cd_length -= count;
3260 		break;
3261 
3262 	case CRYPTO_DATA_UIO:
3263 		/*
3264 		 * Jump to the first iovec containing data to be processed.
3265 		 */
3266 		uiop = in->cd_uio;
3267 		for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3268 		    off >= uiop->uio_iov[vec_idx].iov_len;
3269 		    off -= uiop->uio_iov[vec_idx++].iov_len);
3270 		if (vec_idx == uiop->uio_iovcnt) {
3271 			/*
3272 			 * The caller specified an offset that is larger than
3273 			 * the total size of the buffers it provided.
3274 			 */
3275 			return (CRYPTO_DATA_LEN_RANGE);
3276 		}
3277 
3278 		/*
3279 		 * Now process the iovecs.
3280 		 */
3281 		while (vec_idx < uiop->uio_iovcnt && count > 0) {
3282 			cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3283 			    off, count);
3284 			bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
3285 			    cur_len);
3286 			count -= cur_len;
3287 			dest += cur_len;
3288 			in->cd_offset += cur_len;
3289 			in->cd_length -= cur_len;
3290 			vec_idx++;
3291 			off = 0;
3292 		}
3293 
3294 		if (vec_idx == uiop->uio_iovcnt && count > 0) {
3295 			/*
3296 			 * The end of the specified iovec's was reached but
3297 			 * the length requested could not be processed
3298 			 * (requested to digest more data than it provided).
3299 			 */
3300 			return (CRYPTO_DATA_LEN_RANGE);
3301 		}
3302 		break;
3303 
3304 	case CRYPTO_DATA_MBLK:
3305 		/*
3306 		 * Jump to the first mblk_t containing data to be processed.
3307 		 */
3308 		for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3309 		    off -= MBLKL(mp), mp = mp->b_cont);
3310 		if (mp == NULL) {
3311 			/*
3312 			 * The caller specified an offset that is larger than
3313 			 * the total size of the buffers it provided.
3314 			 */
3315 			return (CRYPTO_DATA_LEN_RANGE);
3316 		}
3317 
3318 		/*
3319 		 * Now do the processing on the mblk chain.
3320 		 */
3321 		while (mp != NULL && count > 0) {
3322 			cur_len = min(MBLKL(mp) - off, count);
3323 			bcopy((char *)(mp->b_rptr + off), dest, cur_len);
3324 			count -= cur_len;
3325 			dest += cur_len;
3326 			in->cd_offset += cur_len;
3327 			in->cd_length -= cur_len;
3328 			mp = mp->b_cont;
3329 			off = 0;
3330 		}
3331 
3332 		if (mp == NULL && count > 0) {
3333 			/*
3334 			 * The end of the mblk was reached but the length
3335 			 * requested could not be processed, (requested to
3336 			 * digest more data than it provided).
3337 			 */
3338 			return (CRYPTO_DATA_LEN_RANGE);
3339 		}
3340 		break;
3341 
3342 	default:
3343 		DBG(NULL, DWARN,
3344 		    "dca_resid_gather: unrecognised crypto data format");
3345 		rv = CRYPTO_ARGUMENTS_BAD;
3346 	}
3347 	return (rv);
3348 }
3349 
3350 /*
3351  * Appends the data to the crypto_data_t struct increasing cd_length.
3352  * cd_offset is left unchanged.
3353  * Data is reverse-copied if the flag is TRUE.
3354  */
3355 int
3356 dca_scatter(const char *src, crypto_data_t *out, int count, int reverse)
3357 {
3358 	int	rv = CRYPTO_SUCCESS;
3359 	off_t	offset = out->cd_offset + out->cd_length;
3360 	uint_t	vec_idx;
3361 	uio_t	*uiop;
3362 	size_t	cur_len;
3363 	mblk_t	*mp;
3364 
3365 	switch (out->cd_format) {
3366 	case CRYPTO_DATA_RAW:
3367 		if (out->cd_raw.iov_len - offset < count) {
3368 			/* Trying to write out more than space available. */
3369 			return (CRYPTO_DATA_LEN_RANGE);
3370 		}
3371 		if (reverse)
3372 			dca_reverse((void*) src, out->cd_raw.iov_base + offset,
3373 			    count, count);
3374 		else
3375 			bcopy(src, out->cd_raw.iov_base + offset, count);
3376 		out->cd_length += count;
3377 		break;
3378 
3379 	case CRYPTO_DATA_UIO:
3380 		/*
3381 		 * Jump to the first iovec that can be written to.
3382 		 */
3383 		uiop = out->cd_uio;
3384 		for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3385 		    offset >= uiop->uio_iov[vec_idx].iov_len;
3386 		    offset -= uiop->uio_iov[vec_idx++].iov_len);
3387 		if (vec_idx == uiop->uio_iovcnt) {
3388 			/*
3389 			 * The caller specified an offset that is larger than
3390 			 * the total size of the buffers it provided.
3391 			 */
3392 			return (CRYPTO_DATA_LEN_RANGE);
3393 		}
3394 
3395 		/*
3396 		 * Now process the iovecs.
3397 		 */
3398 		while (vec_idx < uiop->uio_iovcnt && count > 0) {
3399 			cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3400 			    offset, count);
3401 			count -= cur_len;
3402 			if (reverse) {
3403 				dca_reverse((void*) (src+count),
3404 				    uiop->uio_iov[vec_idx].iov_base +
3405 				    offset, cur_len, cur_len);
3406 			} else {
3407 				bcopy(src, uiop->uio_iov[vec_idx].iov_base +
3408 				    offset, cur_len);
3409 				src += cur_len;
3410 			}
3411 			out->cd_length += cur_len;
3412 			vec_idx++;
3413 			offset = 0;
3414 		}
3415 
3416 		if (vec_idx == uiop->uio_iovcnt && count > 0) {
3417 			/*
3418 			 * The end of the specified iovec's was reached but
3419 			 * the length requested could not be processed
3420 			 * (requested to write more data than space provided).
3421 			 */
3422 			return (CRYPTO_DATA_LEN_RANGE);
3423 		}
3424 		break;
3425 
3426 	case CRYPTO_DATA_MBLK:
3427 		/*
3428 		 * Jump to the first mblk_t that can be written to.
3429 		 */
3430 		for (mp = out->cd_mp; mp != NULL && offset >= MBLKL(mp);
3431 		    offset -= MBLKL(mp), mp = mp->b_cont);
3432 		if (mp == NULL) {
3433 			/*
3434 			 * The caller specified an offset that is larger than
3435 			 * the total size of the buffers it provided.
3436 			 */
3437 			return (CRYPTO_DATA_LEN_RANGE);
3438 		}
3439 
3440 		/*
3441 		 * Now do the processing on the mblk chain.
3442 		 */
3443 		while (mp != NULL && count > 0) {
3444 			cur_len = min(MBLKL(mp) - offset, count);
3445 			count -= cur_len;
3446 			if (reverse) {
3447 				dca_reverse((void*) (src+count),
3448 				    (char *)(mp->b_rptr + offset), cur_len,
3449 				    cur_len);
3450 			} else {
3451 				bcopy(src, (char *)(mp->b_rptr + offset),
3452 				    cur_len);
3453 				src += cur_len;
3454 			}
3455 			out->cd_length += cur_len;
3456 			mp = mp->b_cont;
3457 			offset = 0;
3458 		}
3459 
3460 		if (mp == NULL && count > 0) {
3461 			/*
3462 			 * The end of the mblk was reached but the length
3463 			 * requested could not be processed, (requested to
3464 			 * digest more data than it provided).
3465 			 */
3466 			return (CRYPTO_DATA_LEN_RANGE);
3467 		}
3468 		break;
3469 
3470 	default:
3471 		DBG(NULL, DWARN, "unrecognised crypto data format");
3472 		rv = CRYPTO_ARGUMENTS_BAD;
3473 	}
3474 	return (rv);
3475 }
3476 
3477 /*
3478  * Compare two byte arrays in reverse order.
3479  * Return 0 if they are identical, 1 otherwise.
3480  */
3481 int
3482 dca_bcmp_reverse(const void *s1, const void *s2, size_t n)
3483 {
3484 	int i;
3485 	caddr_t src, dst;
3486 
3487 	if (!n)
3488 		return (0);
3489 
3490 	src = ((caddr_t)s1) + n - 1;
3491 	dst = (caddr_t)s2;
3492 	for (i = 0; i < n; i++) {
3493 		if (*src != *dst)
3494 			return (1);
3495 		src--;
3496 		dst++;
3497 	}
3498 
3499 	return (0);
3500 }
3501 
3502 
3503 /*
3504  * This calculates the size of a bignum in bits, specifically not counting
3505  * leading zero bits.  This size calculation must be done *before* any
3506  * endian reversal takes place (i.e. the numbers are in absolute big-endian
3507  * order.)
3508  */
3509 int
3510 dca_bitlen(unsigned char *bignum, int bytelen)
3511 {
3512 	unsigned char	msbyte;
3513 	int		i, j;
3514 
3515 	for (i = 0; i < bytelen - 1; i++) {
3516 		if (bignum[i] != 0) {
3517 			break;
3518 		}
3519 	}
3520 	msbyte = bignum[i];
3521 	for (j = 8; j > 1; j--) {
3522 		if (msbyte & 0x80) {
3523 			break;
3524 		}
3525 		msbyte <<= 1;
3526 	}
3527 	return ((8 * (bytelen - i - 1)) + j);
3528 }
3529 
3530 /*
3531  * This compares to bignums (in big-endian order).  It ignores leading
3532  * null bytes.  The result semantics follow bcmp, mempcmp, strcmp, etc.
3533  */
3534 int
3535 dca_numcmp(caddr_t n1, int n1len, caddr_t n2, int n2len)
3536 {
3537 	while ((n1len > 1) && (*n1 == 0)) {
3538 		n1len--;
3539 		n1++;
3540 	}
3541 	while ((n2len > 1) && (*n2 == 0)) {
3542 		n2len--;
3543 		n2++;
3544 	}
3545 	if (n1len != n2len) {
3546 		return (n1len - n2len);
3547 	}
3548 	while ((n1len > 1) && (*n1 == *n2)) {
3549 		n1++;
3550 		n2++;
3551 		n1len--;
3552 	}
3553 	return ((int)(*(uchar_t *)n1) - (int)(*(uchar_t *)n2));
3554 }
3555 
3556 /*
3557  * Return array of key attributes.
3558  */
3559 crypto_object_attribute_t *
3560 dca_get_key_attr(crypto_key_t *key)
3561 {
3562 	if ((key->ck_format != CRYPTO_KEY_ATTR_LIST) ||
3563 	    (key->ck_count == 0)) {
3564 		return (NULL);
3565 	}
3566 
3567 	return (key->ck_attrs);
3568 }
3569 
3570 /*
3571  * If attribute type exists valp points to it's 32-bit value.
3572  */
3573 int
3574 dca_attr_lookup_uint32(crypto_object_attribute_t *attrp, uint_t atnum,
3575     uint64_t atype, uint32_t *valp)
3576 {
3577 	crypto_object_attribute_t	*bap;
3578 
3579 	bap = dca_find_attribute(attrp, atnum, atype);
3580 	if (bap == NULL) {
3581 		return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3582 	}
3583 
3584 	*valp = *bap->oa_value;
3585 
3586 	return (CRYPTO_SUCCESS);
3587 }
3588 
3589 /*
3590  * If attribute type exists data contains the start address of the value,
3591  * and numelems contains it's length.
3592  */
3593 int
3594 dca_attr_lookup_uint8_array(crypto_object_attribute_t *attrp, uint_t atnum,
3595     uint64_t atype, void **data, unsigned int *numelems)
3596 {
3597 	crypto_object_attribute_t	*bap;
3598 
3599 	bap = dca_find_attribute(attrp, atnum, atype);
3600 	if (bap == NULL) {
3601 		return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3602 	}
3603 
3604 	*data = bap->oa_value;
3605 	*numelems = bap->oa_value_len;
3606 
3607 	return (CRYPTO_SUCCESS);
3608 }
3609 
3610 /*
3611  * Finds entry of specified name. If it is not found dca_find_attribute returns
3612  * NULL.
3613  */
3614 crypto_object_attribute_t *
3615 dca_find_attribute(crypto_object_attribute_t *attrp, uint_t atnum,
3616     uint64_t atype)
3617 {
3618 	while (atnum) {
3619 		if (attrp->oa_type == atype)
3620 			return (attrp);
3621 		atnum--;
3622 		attrp++;
3623 	}
3624 	return (NULL);
3625 }
3626 
3627 /*
3628  * Return the address of the first data buffer. If the data format is
3629  * unrecognised return NULL.
3630  */
3631 caddr_t
3632 dca_bufdaddr(crypto_data_t *data)
3633 {
3634 	switch (data->cd_format) {
3635 	case CRYPTO_DATA_RAW:
3636 		return (data->cd_raw.iov_base + data->cd_offset);
3637 	case CRYPTO_DATA_UIO:
3638 		return (data->cd_uio->uio_iov[0].iov_base + data->cd_offset);
3639 	case CRYPTO_DATA_MBLK:
3640 		return ((char *)data->cd_mp->b_rptr + data->cd_offset);
3641 	default:
3642 		DBG(NULL, DWARN,
3643 		    "dca_bufdaddr: unrecognised crypto data format");
3644 		return (NULL);
3645 	}
3646 }
3647 
3648 static caddr_t
3649 dca_bufdaddr_out(crypto_data_t *data)
3650 {
3651 	size_t offset = data->cd_offset + data->cd_length;
3652 
3653 	switch (data->cd_format) {
3654 	case CRYPTO_DATA_RAW:
3655 		return (data->cd_raw.iov_base + offset);
3656 	case CRYPTO_DATA_UIO:
3657 		return (data->cd_uio->uio_iov[0].iov_base + offset);
3658 	case CRYPTO_DATA_MBLK:
3659 		return ((char *)data->cd_mp->b_rptr + offset);
3660 	default:
3661 		DBG(NULL, DWARN,
3662 		    "dca_bufdaddr_out: unrecognised crypto data format");
3663 		return (NULL);
3664 	}
3665 }
3666 
3667 /*
3668  * Control entry points.
3669  */
3670 
3671 /* ARGSUSED */
3672 static void
3673 dca_provider_status(crypto_provider_handle_t provider, uint_t *status)
3674 {
3675 	*status = CRYPTO_PROVIDER_READY;
3676 }
3677 
3678 /*
3679  * Cipher (encrypt/decrypt) entry points.
3680  */
3681 
3682 /* ARGSUSED */
3683 static int
3684 dca_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3685     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3686     crypto_req_handle_t req)
3687 {
3688 	int error = CRYPTO_FAILED;
3689 	dca_t *softc;
3690 	/* LINTED E_FUNC_SET_NOT_USED */
3691 	int instance;
3692 
3693 	/* extract softc and instance number from context */
3694 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3695 	DBG(softc, DENTRY, "dca_encrypt_init: started");
3696 
3697 	/* check mechanism */
3698 	switch (mechanism->cm_type) {
3699 	case DES_CBC_MECH_INFO_TYPE:
3700 		error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3701 		    DR_ENCRYPT);
3702 		break;
3703 	case DES3_CBC_MECH_INFO_TYPE:
3704 		error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3705 		    DR_ENCRYPT | DR_TRIPLE);
3706 		break;
3707 	case RSA_PKCS_MECH_INFO_TYPE:
3708 	case RSA_X_509_MECH_INFO_TYPE:
3709 		error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3710 		break;
3711 	default:
3712 		cmn_err(CE_WARN, "dca_encrypt_init: unexpected mech type "
3713 		    "0x%llx\n", (unsigned long long)mechanism->cm_type);
3714 		error = CRYPTO_MECHANISM_INVALID;
3715 	}
3716 
3717 	DBG(softc, DENTRY, "dca_encrypt_init: done, err = 0x%x", error);
3718 
3719 	if (error == CRYPTO_SUCCESS)
3720 		dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3721 		    &softc->dca_ctx_list_lock);
3722 
3723 	return (error);
3724 }
3725 
3726 /* ARGSUSED */
3727 static int
3728 dca_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3729     crypto_data_t *ciphertext, crypto_req_handle_t req)
3730 {
3731 	int error = CRYPTO_FAILED;
3732 	dca_t *softc;
3733 	/* LINTED E_FUNC_SET_NOT_USED */
3734 	int instance;
3735 
3736 	if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3737 		return (CRYPTO_OPERATION_NOT_INITIALIZED);
3738 
3739 	/* extract softc and instance number from context */
3740 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3741 	DBG(softc, DENTRY, "dca_encrypt: started");
3742 
3743 	/* check mechanism */
3744 	switch (DCA_MECH_FROM_CTX(ctx)) {
3745 	case DES_CBC_MECH_INFO_TYPE:
3746 		error = dca_3des(ctx, plaintext, ciphertext, req, DR_ENCRYPT);
3747 		break;
3748 	case DES3_CBC_MECH_INFO_TYPE:
3749 		error = dca_3des(ctx, plaintext, ciphertext, req,
3750 		    DR_ENCRYPT | DR_TRIPLE);
3751 		break;
3752 	case RSA_PKCS_MECH_INFO_TYPE:
3753 	case RSA_X_509_MECH_INFO_TYPE:
3754 		error = dca_rsastart(ctx, plaintext, ciphertext, req,
3755 		    DCA_RSA_ENC);
3756 		break;
3757 	default:
3758 		/* Should never reach here */
3759 		cmn_err(CE_WARN, "dca_encrypt: unexpected mech type "
3760 		    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3761 		error = CRYPTO_MECHANISM_INVALID;
3762 	}
3763 
3764 	if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
3765 	    (error != CRYPTO_BUFFER_TOO_SMALL)) {
3766 		ciphertext->cd_length = 0;
3767 	}
3768 
3769 	DBG(softc, DENTRY, "dca_encrypt: done, err = 0x%x", error);
3770 
3771 	return (error);
3772 }
3773 
3774 /* ARGSUSED */
3775 static int
3776 dca_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3777     crypto_data_t *ciphertext, crypto_req_handle_t req)
3778 {
3779 	int error = CRYPTO_FAILED;
3780 	dca_t *softc;
3781 	/* LINTED E_FUNC_SET_NOT_USED */
3782 	int instance;
3783 
3784 	if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3785 		return (CRYPTO_OPERATION_NOT_INITIALIZED);
3786 
3787 	/* extract softc and instance number from context */
3788 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3789 	DBG(softc, DENTRY, "dca_encrypt_update: started");
3790 
3791 	/* check mechanism */
3792 	switch (DCA_MECH_FROM_CTX(ctx)) {
3793 	case DES_CBC_MECH_INFO_TYPE:
3794 		error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3795 		    DR_ENCRYPT);
3796 		break;
3797 	case DES3_CBC_MECH_INFO_TYPE:
3798 		error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3799 		    DR_ENCRYPT | DR_TRIPLE);
3800 		break;
3801 	default:
3802 		/* Should never reach here */
3803 		cmn_err(CE_WARN, "dca_encrypt_update: unexpected mech type "
3804 		    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3805 		error = CRYPTO_MECHANISM_INVALID;
3806 	}
3807 
3808 	DBG(softc, DENTRY, "dca_encrypt_update: done, err = 0x%x", error);
3809 
3810 	return (error);
3811 }
3812 
3813 /* ARGSUSED */
3814 static int
3815 dca_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3816     crypto_req_handle_t req)
3817 {
3818 	int error = CRYPTO_FAILED;
3819 	dca_t *softc;
3820 	/* LINTED E_FUNC_SET_NOT_USED */
3821 	int instance;
3822 
3823 	if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3824 		return (CRYPTO_OPERATION_NOT_INITIALIZED);
3825 
3826 	/* extract softc and instance number from context */
3827 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3828 	DBG(softc, DENTRY, "dca_encrypt_final: started");
3829 
3830 	/* check mechanism */
3831 	switch (DCA_MECH_FROM_CTX(ctx)) {
3832 	case DES_CBC_MECH_INFO_TYPE:
3833 		error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT);
3834 		break;
3835 	case DES3_CBC_MECH_INFO_TYPE:
3836 		error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT | DR_TRIPLE);
3837 		break;
3838 	default:
3839 		/* Should never reach here */
3840 		cmn_err(CE_WARN, "dca_encrypt_final: unexpected mech type "
3841 		    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3842 		error = CRYPTO_MECHANISM_INVALID;
3843 	}
3844 
3845 	DBG(softc, DENTRY, "dca_encrypt_final: done, err = 0x%x", error);
3846 
3847 	return (error);
3848 }
3849 
3850 /* ARGSUSED */
3851 static int
3852 dca_encrypt_atomic(crypto_provider_handle_t provider,
3853     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
3854     crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
3855     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
3856 {
3857 	int error = CRYPTO_FAILED;
3858 	dca_t *softc = (dca_t *)provider;
3859 
3860 	DBG(softc, DENTRY, "dca_encrypt_atomic: started");
3861 
3862 	if (ctx_template != NULL)
3863 		return (CRYPTO_ARGUMENTS_BAD);
3864 
3865 	/* check mechanism */
3866 	switch (mechanism->cm_type) {
3867 	case DES_CBC_MECH_INFO_TYPE:
3868 		error = dca_3desatomic(provider, session_id, mechanism, key,
3869 		    plaintext, ciphertext, KM_SLEEP, req,
3870 		    DR_ENCRYPT | DR_ATOMIC);
3871 		break;
3872 	case DES3_CBC_MECH_INFO_TYPE:
3873 		error = dca_3desatomic(provider, session_id, mechanism, key,
3874 		    plaintext, ciphertext, KM_SLEEP, req,
3875 		    DR_ENCRYPT | DR_TRIPLE | DR_ATOMIC);
3876 		break;
3877 	case RSA_PKCS_MECH_INFO_TYPE:
3878 	case RSA_X_509_MECH_INFO_TYPE:
3879 		error = dca_rsaatomic(provider, session_id, mechanism, key,
3880 		    plaintext, ciphertext, KM_SLEEP, req, DCA_RSA_ENC);
3881 		break;
3882 	default:
3883 		cmn_err(CE_WARN, "dca_encrypt_atomic: unexpected mech type "
3884 		    "0x%llx\n", (unsigned long long)mechanism->cm_type);
3885 		error = CRYPTO_MECHANISM_INVALID;
3886 	}
3887 
3888 	if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
3889 		ciphertext->cd_length = 0;
3890 	}
3891 
3892 	DBG(softc, DENTRY, "dca_encrypt_atomic: done, err = 0x%x", error);
3893 
3894 	return (error);
3895 }
3896 
3897 /* ARGSUSED */
3898 static int
3899 dca_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3900     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3901     crypto_req_handle_t req)
3902 {
3903 	int error = CRYPTO_FAILED;
3904 	dca_t *softc;
3905 	/* LINTED E_FUNC_SET_NOT_USED */
3906 	int instance;
3907 
3908 	/* extract softc and instance number from context */
3909 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3910 	DBG(softc, DENTRY, "dca_decrypt_init: started");
3911 
3912 	/* check mechanism */
3913 	switch (mechanism->cm_type) {
3914 	case DES_CBC_MECH_INFO_TYPE:
3915 		error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3916 		    DR_DECRYPT);
3917 		break;
3918 	case DES3_CBC_MECH_INFO_TYPE:
3919 		error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3920 		    DR_DECRYPT | DR_TRIPLE);
3921 		break;
3922 	case RSA_PKCS_MECH_INFO_TYPE:
3923 	case RSA_X_509_MECH_INFO_TYPE:
3924 		error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3925 		break;
3926 	default:
3927 		cmn_err(CE_WARN, "dca_decrypt_init: unexpected mech type "
3928 		    "0x%llx\n", (unsigned long long)mechanism->cm_type);
3929 		error = CRYPTO_MECHANISM_INVALID;
3930 	}
3931 
3932 	DBG(softc, DENTRY, "dca_decrypt_init: done, err = 0x%x", error);
3933 
3934 	if (error == CRYPTO_SUCCESS)
3935 		dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3936 		    &softc->dca_ctx_list_lock);
3937 
3938 	return (error);
3939 }
3940 
3941 /* ARGSUSED */
3942 static int
3943 dca_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3944     crypto_data_t *plaintext, crypto_req_handle_t req)
3945 {
3946 	int error = CRYPTO_FAILED;
3947 	dca_t *softc;
3948 	/* LINTED E_FUNC_SET_NOT_USED */
3949 	int instance;
3950 
3951 	if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3952 		return (CRYPTO_OPERATION_NOT_INITIALIZED);
3953 
3954 	/* extract softc and instance number from context */
3955 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3956 	DBG(softc, DENTRY, "dca_decrypt: started");
3957 
3958 	/* check mechanism */
3959 	switch (DCA_MECH_FROM_CTX(ctx)) {
3960 	case DES_CBC_MECH_INFO_TYPE:
3961 		error = dca_3des(ctx, ciphertext, plaintext, req, DR_DECRYPT);
3962 		break;
3963 	case DES3_CBC_MECH_INFO_TYPE:
3964 		error = dca_3des(ctx, ciphertext, plaintext, req,
3965 		    DR_DECRYPT | DR_TRIPLE);
3966 		break;
3967 	case RSA_PKCS_MECH_INFO_TYPE:
3968 	case RSA_X_509_MECH_INFO_TYPE:
3969 		error = dca_rsastart(ctx, ciphertext, plaintext, req,
3970 		    DCA_RSA_DEC);
3971 		break;
3972 	default:
3973 		/* Should never reach here */
3974 		cmn_err(CE_WARN, "dca_decrypt: unexpected mech type "
3975 		    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3976 		error = CRYPTO_MECHANISM_INVALID;
3977 	}
3978 
3979 	if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
3980 	    (error != CRYPTO_BUFFER_TOO_SMALL)) {
3981 		if (plaintext)
3982 			plaintext->cd_length = 0;
3983 	}
3984 
3985 	DBG(softc, DENTRY, "dca_decrypt: done, err = 0x%x", error);
3986 
3987 	return (error);
3988 }
3989 
3990 /* ARGSUSED */
3991 static int
3992 dca_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3993     crypto_data_t *plaintext, crypto_req_handle_t req)
3994 {
3995 	int error = CRYPTO_FAILED;
3996 	dca_t *softc;
3997 	/* LINTED E_FUNC_SET_NOT_USED */
3998 	int instance;
3999 
4000 	if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4001 		return (CRYPTO_OPERATION_NOT_INITIALIZED);
4002 
4003 	/* extract softc and instance number from context */
4004 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4005 	DBG(softc, DENTRY, "dca_decrypt_update: started");
4006 
4007 	/* check mechanism */
4008 	switch (DCA_MECH_FROM_CTX(ctx)) {
4009 	case DES_CBC_MECH_INFO_TYPE:
4010 		error = dca_3desupdate(ctx, ciphertext, plaintext, req,
4011 		    DR_DECRYPT);
4012 		break;
4013 	case DES3_CBC_MECH_INFO_TYPE:
4014 		error = dca_3desupdate(ctx, ciphertext, plaintext, req,
4015 		    DR_DECRYPT | DR_TRIPLE);
4016 		break;
4017 	default:
4018 		/* Should never reach here */
4019 		cmn_err(CE_WARN, "dca_decrypt_update: unexpected mech type "
4020 		    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4021 		error = CRYPTO_MECHANISM_INVALID;
4022 	}
4023 
4024 	DBG(softc, DENTRY, "dca_decrypt_update: done, err = 0x%x", error);
4025 
4026 	return (error);
4027 }
4028 
4029 /* ARGSUSED */
4030 static int
4031 dca_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *plaintext,
4032     crypto_req_handle_t req)
4033 {
4034 	int error = CRYPTO_FAILED;
4035 	dca_t *softc;
4036 	/* LINTED E_FUNC_SET_NOT_USED */
4037 	int instance;
4038 
4039 	if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4040 		return (CRYPTO_OPERATION_NOT_INITIALIZED);
4041 
4042 	/* extract softc and instance number from context */
4043 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4044 	DBG(softc, DENTRY, "dca_decrypt_final: started");
4045 
4046 	/* check mechanism */
4047 	switch (DCA_MECH_FROM_CTX(ctx)) {
4048 	case DES_CBC_MECH_INFO_TYPE:
4049 		error = dca_3desfinal(ctx, plaintext, DR_DECRYPT);
4050 		break;
4051 	case DES3_CBC_MECH_INFO_TYPE:
4052 		error = dca_3desfinal(ctx, plaintext, DR_DECRYPT | DR_TRIPLE);
4053 		break;
4054 	default:
4055 		/* Should never reach here */
4056 		cmn_err(CE_WARN, "dca_decrypt_final: unexpected mech type "
4057 		    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4058 		error = CRYPTO_MECHANISM_INVALID;
4059 	}
4060 
4061 	DBG(softc, DENTRY, "dca_decrypt_final: done, err = 0x%x", error);
4062 
4063 	return (error);
4064 }
4065 
4066 /* ARGSUSED */
4067 static int
4068 dca_decrypt_atomic(crypto_provider_handle_t provider,
4069     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4070     crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
4071     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4072 {
4073 	int error = CRYPTO_FAILED;
4074 	dca_t *softc = (dca_t *)provider;
4075 
4076 	DBG(softc, DENTRY, "dca_decrypt_atomic: started");
4077 
4078 	if (ctx_template != NULL)
4079 		return (CRYPTO_ARGUMENTS_BAD);
4080 
4081 	/* check mechanism */
4082 	switch (mechanism->cm_type) {
4083 	case DES_CBC_MECH_INFO_TYPE:
4084 		error = dca_3desatomic(provider, session_id, mechanism, key,
4085 		    ciphertext, plaintext, KM_SLEEP, req,
4086 		    DR_DECRYPT | DR_ATOMIC);
4087 		break;
4088 	case DES3_CBC_MECH_INFO_TYPE:
4089 		error = dca_3desatomic(provider, session_id, mechanism, key,
4090 		    ciphertext, plaintext, KM_SLEEP, req,
4091 		    DR_DECRYPT | DR_TRIPLE | DR_ATOMIC);
4092 		break;
4093 	case RSA_PKCS_MECH_INFO_TYPE:
4094 	case RSA_X_509_MECH_INFO_TYPE:
4095 		error = dca_rsaatomic(provider, session_id, mechanism, key,
4096 		    ciphertext, plaintext, KM_SLEEP, req, DCA_RSA_DEC);
4097 		break;
4098 	default:
4099 		cmn_err(CE_WARN, "dca_decrypt_atomic: unexpected mech type "
4100 		    "0x%llx\n", (unsigned long long)mechanism->cm_type);
4101 		error = CRYPTO_MECHANISM_INVALID;
4102 	}
4103 
4104 	if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
4105 		plaintext->cd_length = 0;
4106 	}
4107 
4108 	DBG(softc, DENTRY, "dca_decrypt_atomic: done, err = 0x%x", error);
4109 
4110 	return (error);
4111 }
4112 
4113 /*
4114  * Sign entry points.
4115  */
4116 
4117 /* ARGSUSED */
4118 static int
4119 dca_sign_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4120     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4121     crypto_req_handle_t req)
4122 {
4123 	int error = CRYPTO_FAILED;
4124 	dca_t *softc;
4125 	/* LINTED E_FUNC_SET_NOT_USED */
4126 	int instance;
4127 
4128 	/* extract softc and instance number from context */
4129 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4130 	DBG(softc, DENTRY, "dca_sign_init: started\n");
4131 
4132 	if (ctx_template != NULL)
4133 		return (CRYPTO_ARGUMENTS_BAD);
4134 
4135 	/* check mechanism */
4136 	switch (mechanism->cm_type) {
4137 	case RSA_PKCS_MECH_INFO_TYPE:
4138 	case RSA_X_509_MECH_INFO_TYPE:
4139 		error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4140 		break;
4141 	case DSA_MECH_INFO_TYPE:
4142 		error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4143 		    DCA_DSA_SIGN);
4144 		break;
4145 	default:
4146 		cmn_err(CE_WARN, "dca_sign_init: unexpected mech type "
4147 		    "0x%llx\n", (unsigned long long)mechanism->cm_type);
4148 		error = CRYPTO_MECHANISM_INVALID;
4149 	}
4150 
4151 	DBG(softc, DENTRY, "dca_sign_init: done, err = 0x%x", error);
4152 
4153 	if (error == CRYPTO_SUCCESS)
4154 		dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4155 		    &softc->dca_ctx_list_lock);
4156 
4157 	return (error);
4158 }
4159 
4160 static int
4161 dca_sign(crypto_ctx_t *ctx, crypto_data_t *data,
4162     crypto_data_t *signature, crypto_req_handle_t req)
4163 {
4164 	int error = CRYPTO_FAILED;
4165 	dca_t *softc;
4166 	/* LINTED E_FUNC_SET_NOT_USED */
4167 	int instance;
4168 
4169 	if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4170 		return (CRYPTO_OPERATION_NOT_INITIALIZED);
4171 
4172 	/* extract softc and instance number from context */
4173 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4174 	DBG(softc, DENTRY, "dca_sign: started\n");
4175 
4176 	/* check mechanism */
4177 	switch (DCA_MECH_FROM_CTX(ctx)) {
4178 	case RSA_PKCS_MECH_INFO_TYPE:
4179 	case RSA_X_509_MECH_INFO_TYPE:
4180 		error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGN);
4181 		break;
4182 	case DSA_MECH_INFO_TYPE:
4183 		error = dca_dsa_sign(ctx, data, signature, req);
4184 		break;
4185 	default:
4186 		cmn_err(CE_WARN, "dca_sign: unexpected mech type "
4187 		    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4188 		error = CRYPTO_MECHANISM_INVALID;
4189 	}
4190 
4191 	DBG(softc, DENTRY, "dca_sign: done, err = 0x%x", error);
4192 
4193 	return (error);
4194 }
4195 
4196 /* ARGSUSED */
4197 static int
4198 dca_sign_update(crypto_ctx_t *ctx, crypto_data_t *data,
4199     crypto_req_handle_t req)
4200 {
4201 	int error = CRYPTO_MECHANISM_INVALID;
4202 	dca_t *softc;
4203 	/* LINTED E_FUNC_SET_NOT_USED */
4204 	int instance;
4205 
4206 	if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4207 		return (CRYPTO_OPERATION_NOT_INITIALIZED);
4208 
4209 	/* extract softc and instance number from context */
4210 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4211 	DBG(softc, DENTRY, "dca_sign_update: started\n");
4212 
4213 	cmn_err(CE_WARN, "dca_sign_update: unexpected mech type "
4214 	    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4215 
4216 	DBG(softc, DENTRY, "dca_sign_update: done, err = 0x%x", error);
4217 
4218 	return (error);
4219 }
4220 
4221 /* ARGSUSED */
4222 static int
4223 dca_sign_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4224     crypto_req_handle_t req)
4225 {
4226 	int error = CRYPTO_MECHANISM_INVALID;
4227 	dca_t *softc;
4228 	/* LINTED E_FUNC_SET_NOT_USED */
4229 	int instance;
4230 
4231 	if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4232 		return (CRYPTO_OPERATION_NOT_INITIALIZED);
4233 
4234 	/* extract softc and instance number from context */
4235 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4236 	DBG(softc, DENTRY, "dca_sign_final: started\n");
4237 
4238 	cmn_err(CE_WARN, "dca_sign_final: unexpected mech type "
4239 	    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4240 
4241 	DBG(softc, DENTRY, "dca_sign_final: done, err = 0x%x", error);
4242 
4243 	return (error);
4244 }
4245 
4246 static int
4247 dca_sign_atomic(crypto_provider_handle_t provider,
4248     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4249     crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4250     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4251 {
4252 	int error = CRYPTO_FAILED;
4253 	dca_t *softc = (dca_t *)provider;
4254 
4255 	DBG(softc, DENTRY, "dca_sign_atomic: started\n");
4256 
4257 	if (ctx_template != NULL)
4258 		return (CRYPTO_ARGUMENTS_BAD);
4259 
4260 	/* check mechanism */
4261 	switch (mechanism->cm_type) {
4262 	case RSA_PKCS_MECH_INFO_TYPE:
4263 	case RSA_X_509_MECH_INFO_TYPE:
4264 		error = dca_rsaatomic(provider, session_id, mechanism, key,
4265 		    data, signature, KM_SLEEP, req, DCA_RSA_SIGN);
4266 		break;
4267 	case DSA_MECH_INFO_TYPE:
4268 		error = dca_dsaatomic(provider, session_id, mechanism, key,
4269 		    data, signature, KM_SLEEP, req, DCA_DSA_SIGN);
4270 		break;
4271 	default:
4272 		cmn_err(CE_WARN, "dca_sign_atomic: unexpected mech type "
4273 		    "0x%llx\n", (unsigned long long)mechanism->cm_type);
4274 		error = CRYPTO_MECHANISM_INVALID;
4275 	}
4276 
4277 	DBG(softc, DENTRY, "dca_sign_atomic: done, err = 0x%x", error);
4278 
4279 	return (error);
4280 }
4281 
4282 /* ARGSUSED */
4283 static int
4284 dca_sign_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4285     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4286     crypto_req_handle_t req)
4287 {
4288 	int error = CRYPTO_FAILED;
4289 	dca_t *softc;
4290 	/* LINTED E_FUNC_SET_NOT_USED */
4291 	int instance;
4292 
4293 	/* extract softc and instance number from context */
4294 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4295 	DBG(softc, DENTRY, "dca_sign_recover_init: started\n");
4296 
4297 	if (ctx_template != NULL)
4298 		return (CRYPTO_ARGUMENTS_BAD);
4299 
4300 	/* check mechanism */
4301 	switch (mechanism->cm_type) {
4302 	case RSA_PKCS_MECH_INFO_TYPE:
4303 	case RSA_X_509_MECH_INFO_TYPE:
4304 		error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4305 		break;
4306 	default:
4307 		cmn_err(CE_WARN, "dca_sign_recover_init: unexpected mech type "
4308 		    "0x%llx\n", (unsigned long long)mechanism->cm_type);
4309 		error = CRYPTO_MECHANISM_INVALID;
4310 	}
4311 
4312 	DBG(softc, DENTRY, "dca_sign_recover_init: done, err = 0x%x", error);
4313 
4314 	if (error == CRYPTO_SUCCESS)
4315 		dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4316 		    &softc->dca_ctx_list_lock);
4317 
4318 	return (error);
4319 }
4320 
4321 static int
4322 dca_sign_recover(crypto_ctx_t *ctx, crypto_data_t *data,
4323     crypto_data_t *signature, crypto_req_handle_t req)
4324 {
4325 	int error = CRYPTO_FAILED;
4326 	dca_t *softc;
4327 	/* LINTED E_FUNC_SET_NOT_USED */
4328 	int instance;
4329 
4330 	if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4331 		return (CRYPTO_OPERATION_NOT_INITIALIZED);
4332 
4333 	/* extract softc and instance number from context */
4334 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4335 	DBG(softc, DENTRY, "dca_sign_recover: started\n");
4336 
4337 	/* check mechanism */
4338 	switch (DCA_MECH_FROM_CTX(ctx)) {
4339 	case RSA_PKCS_MECH_INFO_TYPE:
4340 	case RSA_X_509_MECH_INFO_TYPE:
4341 		error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGNR);
4342 		break;
4343 	default:
4344 		cmn_err(CE_WARN, "dca_sign_recover: unexpected mech type "
4345 		    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4346 		error = CRYPTO_MECHANISM_INVALID;
4347 	}
4348 
4349 	DBG(softc, DENTRY, "dca_sign_recover: done, err = 0x%x", error);
4350 
4351 	return (error);
4352 }
4353 
4354 static int
4355 dca_sign_recover_atomic(crypto_provider_handle_t provider,
4356     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4357     crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4358     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4359 {
4360 	int error = CRYPTO_FAILED;
4361 	dca_t *softc = (dca_t *)provider;
4362 	/* LINTED E_FUNC_SET_NOT_USED */
4363 	int instance;
4364 
4365 	instance = ddi_get_instance(softc->dca_dip);
4366 	DBG(softc, DENTRY, "dca_sign_recover_atomic: started\n");
4367 
4368 	if (ctx_template != NULL)
4369 		return (CRYPTO_ARGUMENTS_BAD);
4370 
4371 	/* check mechanism */
4372 	switch (mechanism->cm_type) {
4373 	case RSA_PKCS_MECH_INFO_TYPE:
4374 	case RSA_X_509_MECH_INFO_TYPE:
4375 		error = dca_rsaatomic(provider, session_id, mechanism, key,
4376 		    data, signature, KM_SLEEP, req, DCA_RSA_SIGNR);
4377 		break;
4378 	default:
4379 		cmn_err(CE_WARN, "dca_sign_recover_atomic: unexpected mech type"
4380 		    " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4381 		error = CRYPTO_MECHANISM_INVALID;
4382 	}
4383 
4384 	DBG(softc, DENTRY, "dca_sign_recover_atomic: done, err = 0x%x", error);
4385 
4386 	return (error);
4387 }
4388 
4389 /*
4390  * Verify entry points.
4391  */
4392 
4393 /* ARGSUSED */
4394 static int
4395 dca_verify_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4396     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4397     crypto_req_handle_t req)
4398 {
4399 	int error = CRYPTO_FAILED;
4400 	dca_t *softc;
4401 	/* LINTED E_FUNC_SET_NOT_USED */
4402 	int instance;
4403 
4404 	/* extract softc and instance number from context */
4405 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4406 	DBG(softc, DENTRY, "dca_verify_init: started\n");
4407 
4408 	if (ctx_template != NULL)
4409 		return (CRYPTO_ARGUMENTS_BAD);
4410 
4411 	/* check mechanism */
4412 	switch (mechanism->cm_type) {
4413 	case RSA_PKCS_MECH_INFO_TYPE:
4414 	case RSA_X_509_MECH_INFO_TYPE:
4415 		error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4416 		break;
4417 	case DSA_MECH_INFO_TYPE:
4418 		error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4419 		    DCA_DSA_VRFY);
4420 		break;
4421 	default:
4422 		cmn_err(CE_WARN, "dca_verify_init: unexpected mech type "
4423 		    "0x%llx\n", (unsigned long long)mechanism->cm_type);
4424 		error = CRYPTO_MECHANISM_INVALID;
4425 	}
4426 
4427 	DBG(softc, DENTRY, "dca_verify_init: done, err = 0x%x", error);
4428 
4429 	if (error == CRYPTO_SUCCESS)
4430 		dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4431 		    &softc->dca_ctx_list_lock);
4432 
4433 	return (error);
4434 }
4435 
4436 static int
4437 dca_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *signature,
4438     crypto_req_handle_t req)
4439 {
4440 	int error = CRYPTO_FAILED;
4441 	dca_t *softc;
4442 	/* LINTED E_FUNC_SET_NOT_USED */
4443 	int instance;
4444 
4445 	if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4446 		return (CRYPTO_OPERATION_NOT_INITIALIZED);
4447 
4448 	/* extract softc and instance number from context */
4449 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4450 	DBG(softc, DENTRY, "dca_verify: started\n");
4451 
4452 	/* check mechanism */
4453 	switch (DCA_MECH_FROM_CTX(ctx)) {
4454 	case RSA_PKCS_MECH_INFO_TYPE:
4455 	case RSA_X_509_MECH_INFO_TYPE:
4456 		error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFY);
4457 		break;
4458 	case DSA_MECH_INFO_TYPE:
4459 		error = dca_dsa_verify(ctx, data, signature, req);
4460 		break;
4461 	default:
4462 		cmn_err(CE_WARN, "dca_verify: unexpected mech type "
4463 		    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4464 		error = CRYPTO_MECHANISM_INVALID;
4465 	}
4466 
4467 	DBG(softc, DENTRY, "dca_verify: done, err = 0x%x", error);
4468 
4469 	return (error);
4470 }
4471 
4472 /* ARGSUSED */
4473 static int
4474 dca_verify_update(crypto_ctx_t *ctx, crypto_data_t *data,
4475     crypto_req_handle_t req)
4476 {
4477 	int error = CRYPTO_MECHANISM_INVALID;
4478 	dca_t *softc;
4479 	/* LINTED E_FUNC_SET_NOT_USED */
4480 	int instance;
4481 
4482 	if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4483 		return (CRYPTO_OPERATION_NOT_INITIALIZED);
4484 
4485 	/* extract softc and instance number from context */
4486 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4487 	DBG(softc, DENTRY, "dca_verify_update: started\n");
4488 
4489 	cmn_err(CE_WARN, "dca_verify_update: unexpected mech type "
4490 	    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4491 
4492 	DBG(softc, DENTRY, "dca_verify_update: done, err = 0x%x", error);
4493 
4494 	return (error);
4495 }
4496 
4497 /* ARGSUSED */
4498 static int
4499 dca_verify_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4500     crypto_req_handle_t req)
4501 {
4502 	int error = CRYPTO_MECHANISM_INVALID;
4503 	dca_t *softc;
4504 	/* LINTED E_FUNC_SET_NOT_USED */
4505 	int instance;
4506 
4507 	if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4508 		return (CRYPTO_OPERATION_NOT_INITIALIZED);
4509 
4510 	/* extract softc and instance number from context */
4511 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4512 	DBG(softc, DENTRY, "dca_verify_final: started\n");
4513 
4514 	cmn_err(CE_WARN, "dca_verify_final: unexpected mech type "
4515 	    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4516 
4517 	DBG(softc, DENTRY, "dca_verify_final: done, err = 0x%x", error);
4518 
4519 	return (error);
4520 }
4521 
4522 static int
4523 dca_verify_atomic(crypto_provider_handle_t provider,
4524     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4525     crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4526     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4527 {
4528 	int error = CRYPTO_FAILED;
4529 	dca_t *softc = (dca_t *)provider;
4530 
4531 	DBG(softc, DENTRY, "dca_verify_atomic: started\n");
4532 
4533 	if (ctx_template != NULL)
4534 		return (CRYPTO_ARGUMENTS_BAD);
4535 
4536 	/* check mechanism */
4537 	switch (mechanism->cm_type) {
4538 	case RSA_PKCS_MECH_INFO_TYPE:
4539 	case RSA_X_509_MECH_INFO_TYPE:
4540 		error = dca_rsaatomic(provider, session_id, mechanism, key,
4541 		    signature, data, KM_SLEEP, req, DCA_RSA_VRFY);
4542 		break;
4543 	case DSA_MECH_INFO_TYPE:
4544 		error = dca_dsaatomic(provider, session_id, mechanism, key,
4545 		    data, signature, KM_SLEEP, req, DCA_DSA_VRFY);
4546 		break;
4547 	default:
4548 		cmn_err(CE_WARN, "dca_verify_atomic: unexpected mech type "
4549 		    "0x%llx\n", (unsigned long long)mechanism->cm_type);
4550 		error = CRYPTO_MECHANISM_INVALID;
4551 	}
4552 
4553 	DBG(softc, DENTRY, "dca_verify_atomic: done, err = 0x%x", error);
4554 
4555 	return (error);
4556 }
4557 
4558 /* ARGSUSED */
4559 static int
4560 dca_verify_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4561     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4562     crypto_req_handle_t req)
4563 {
4564 	int error = CRYPTO_MECHANISM_INVALID;
4565 	dca_t *softc;
4566 	/* LINTED E_FUNC_SET_NOT_USED */
4567 	int instance;
4568 
4569 	/* extract softc and instance number from context */
4570 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4571 	DBG(softc, DENTRY, "dca_verify_recover_init: started\n");
4572 
4573 	if (ctx_template != NULL)
4574 		return (CRYPTO_ARGUMENTS_BAD);
4575 
4576 	/* check mechanism */
4577 	switch (mechanism->cm_type) {
4578 	case RSA_PKCS_MECH_INFO_TYPE:
4579 	case RSA_X_509_MECH_INFO_TYPE:
4580 		error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4581 		break;
4582 	default:
4583 		cmn_err(CE_WARN, "dca_verify_recover_init: unexpected mech type"
4584 		    " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4585 	}
4586 
4587 	DBG(softc, DENTRY, "dca_verify_recover_init: done, err = 0x%x", error);
4588 
4589 	if (error == CRYPTO_SUCCESS)
4590 		dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4591 		    &softc->dca_ctx_list_lock);
4592 
4593 	return (error);
4594 }
4595 
4596 static int
4597 dca_verify_recover(crypto_ctx_t *ctx, crypto_data_t *signature,
4598     crypto_data_t *data, crypto_req_handle_t req)
4599 {
4600 	int error = CRYPTO_MECHANISM_INVALID;
4601 	dca_t *softc;
4602 	/* LINTED E_FUNC_SET_NOT_USED */
4603 	int instance;
4604 
4605 	if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4606 		return (CRYPTO_OPERATION_NOT_INITIALIZED);
4607 
4608 	/* extract softc and instance number from context */
4609 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4610 	DBG(softc, DENTRY, "dca_verify_recover: started\n");
4611 
4612 	/* check mechanism */
4613 	switch (DCA_MECH_FROM_CTX(ctx)) {
4614 	case RSA_PKCS_MECH_INFO_TYPE:
4615 	case RSA_X_509_MECH_INFO_TYPE:
4616 		error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFYR);
4617 		break;
4618 	default:
4619 		cmn_err(CE_WARN, "dca_verify_recover: unexpected mech type "
4620 		    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4621 	}
4622 
4623 	DBG(softc, DENTRY, "dca_verify_recover: done, err = 0x%x", error);
4624 
4625 	return (error);
4626 }
4627 
4628 static int
4629 dca_verify_recover_atomic(crypto_provider_handle_t provider,
4630     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4631     crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4632     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4633 {
4634 	int error = CRYPTO_MECHANISM_INVALID;
4635 	dca_t *softc = (dca_t *)provider;
4636 
4637 	DBG(softc, DENTRY, "dca_verify_recover_atomic: started\n");
4638 
4639 	if (ctx_template != NULL)
4640 		return (CRYPTO_ARGUMENTS_BAD);
4641 
4642 	/* check mechanism */
4643 	switch (mechanism->cm_type) {
4644 	case RSA_PKCS_MECH_INFO_TYPE:
4645 	case RSA_X_509_MECH_INFO_TYPE:
4646 		error = dca_rsaatomic(provider, session_id, mechanism, key,
4647 		    signature, data, KM_SLEEP, req, DCA_RSA_VRFYR);
4648 		break;
4649 	default:
4650 		cmn_err(CE_WARN, "dca_verify_recover_atomic: unexpected mech "
4651 		    "type 0x%llx\n", (unsigned long long)mechanism->cm_type);
4652 		error = CRYPTO_MECHANISM_INVALID;
4653 	}
4654 
4655 	DBG(softc, DENTRY,
4656 	    "dca_verify_recover_atomic: done, err = 0x%x", error);
4657 
4658 	return (error);
4659 }
4660 
4661 /*
4662  * Random number entry points.
4663  */
4664 
4665 /* ARGSUSED */
4666 static int
4667 dca_generate_random(crypto_provider_handle_t provider,
4668     crypto_session_id_t session_id,
4669     uchar_t *buf, size_t len, crypto_req_handle_t req)
4670 {
4671 	int error = CRYPTO_FAILED;
4672 	dca_t *softc = (dca_t *)provider;
4673 	/* LINTED E_FUNC_SET_NOT_USED */
4674 	int instance;
4675 
4676 	instance = ddi_get_instance(softc->dca_dip);
4677 	DBG(softc, DENTRY, "dca_generate_random: started");
4678 
4679 	error = dca_rng(softc, buf, len, req);
4680 
4681 	DBG(softc, DENTRY, "dca_generate_random: done, err = 0x%x", error);
4682 
4683 	return (error);
4684 }
4685 
4686 /*
4687  * Context management entry points.
4688  */
4689 
4690 int
4691 dca_free_context(crypto_ctx_t *ctx)
4692 {
4693 	int error = CRYPTO_SUCCESS;
4694 	dca_t *softc;
4695 	/* LINTED E_FUNC_SET_NOT_USED */
4696 	int instance;
4697 
4698 	/* extract softc and instance number from context */
4699 	DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4700 	DBG(softc, DENTRY, "dca_free_context: entered");
4701 
4702 	if (ctx->cc_provider_private == NULL)
4703 		return (error);
4704 
4705 	dca_rmlist2(ctx->cc_provider_private, &softc->dca_ctx_list_lock);
4706 
4707 	error = dca_free_context_low(ctx);
4708 
4709 	DBG(softc, DENTRY, "dca_free_context: done, err = 0x%x", error);
4710 
4711 	return (error);
4712 }
4713 
4714 static int
4715 dca_free_context_low(crypto_ctx_t *ctx)
4716 {
4717 	int error = CRYPTO_SUCCESS;
4718 
4719 	/* check mechanism */
4720 	switch (DCA_MECH_FROM_CTX(ctx)) {
4721 	case DES_CBC_MECH_INFO_TYPE:
4722 	case DES3_CBC_MECH_INFO_TYPE:
4723 		dca_3desctxfree(ctx);
4724 		break;
4725 	case RSA_PKCS_MECH_INFO_TYPE:
4726 	case RSA_X_509_MECH_INFO_TYPE:
4727 		dca_rsactxfree(ctx);
4728 		break;
4729 	case DSA_MECH_INFO_TYPE:
4730 		dca_dsactxfree(ctx);
4731 		break;
4732 	default:
4733 		/* Should never reach here */
4734 		cmn_err(CE_WARN, "dca_free_context_low: unexpected mech type "
4735 		    "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4736 		error = CRYPTO_MECHANISM_INVALID;
4737 	}
4738 
4739 	return (error);
4740 }
4741 
4742 
4743 /* Free any unfreed private context. It is called in detach. */
4744 static void
4745 dca_free_context_list(dca_t *dca)
4746 {
4747 	dca_listnode_t	*node;
4748 	crypto_ctx_t	ctx;
4749 
4750 	(void) memset(&ctx, 0, sizeof (ctx));
4751 	ctx.cc_provider = dca;
4752 
4753 	while ((node = dca_delist2(&dca->dca_ctx_list,
4754 	    &dca->dca_ctx_list_lock)) != NULL) {
4755 		ctx.cc_provider_private = node;
4756 		(void) dca_free_context_low(&ctx);
4757 	}
4758 }
4759 
4760 static int
4761 ext_info_sym(crypto_provider_handle_t prov,
4762     crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4763 {
4764 	return (ext_info_base(prov, ext_info, cfreq, IDENT_SYM));
4765 }
4766 
4767 static int
4768 ext_info_asym(crypto_provider_handle_t prov,
4769     crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4770 {
4771 	int rv;
4772 
4773 	rv = ext_info_base(prov, ext_info, cfreq, IDENT_ASYM);
4774 	/* The asymmetric cipher slot supports random */
4775 	ext_info->ei_flags |= CRYPTO_EXTF_RNG;
4776 
4777 	return (rv);
4778 }
4779 
4780 /* ARGSUSED */
4781 static int
4782 ext_info_base(crypto_provider_handle_t prov,
4783     crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id)
4784 {
4785 	dca_t   *dca = (dca_t *)prov;
4786 	int len;
4787 
4788 	/* Label */
4789 	(void) sprintf((char *)ext_info->ei_label, "%s/%d %s",
4790 	    ddi_driver_name(dca->dca_dip), ddi_get_instance(dca->dca_dip), id);
4791 	len = strlen((char *)ext_info->ei_label);
4792 	(void) memset(ext_info->ei_label + len, ' ',
4793 	    CRYPTO_EXT_SIZE_LABEL - len);
4794 
4795 	/* Manufacturer ID */
4796 	(void) sprintf((char *)ext_info->ei_manufacturerID, "%s",
4797 		DCA_MANUFACTURER_ID);
4798 	len = strlen((char *)ext_info->ei_manufacturerID);
4799 	(void) memset(ext_info->ei_manufacturerID + len, ' ',
4800 	    CRYPTO_EXT_SIZE_MANUF - len);
4801 
4802 	/* Model */
4803 	(void) sprintf((char *)ext_info->ei_model, dca->dca_model);
4804 
4805 	DBG(dca, DWARN, "kCF MODEL: %s", (char *)ext_info->ei_model);
4806 
4807 	len = strlen((char *)ext_info->ei_model);
4808 	(void) memset(ext_info->ei_model + len, ' ',
4809 		CRYPTO_EXT_SIZE_MODEL - len);
4810 
4811 	/* Serial Number. Blank for Deimos */
4812 	(void) memset(ext_info->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL);
4813 
4814 	ext_info->ei_flags = CRYPTO_EXTF_WRITE_PROTECTED;
4815 
4816 	ext_info->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO;
4817 	ext_info->ei_max_pin_len = CRYPTO_UNAVAILABLE_INFO;
4818 	ext_info->ei_min_pin_len = CRYPTO_UNAVAILABLE_INFO;
4819 	ext_info->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO;
4820 	ext_info->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO;
4821 	ext_info->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO;
4822 	ext_info->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO;
4823 	ext_info->ei_hardware_version.cv_major = 0;
4824 	ext_info->ei_hardware_version.cv_minor = 0;
4825 	ext_info->ei_firmware_version.cv_major = 0;
4826 	ext_info->ei_firmware_version.cv_minor = 0;
4827 
4828 	/* Time. No need to be supplied for token without a clock */
4829 	ext_info->ei_time[0] = '\000';
4830 
4831 	return (CRYPTO_SUCCESS);
4832 }
4833 
4834 static void
4835 dca_fma_init(dca_t *dca)
4836 {
4837 	ddi_iblock_cookie_t fm_ibc;
4838 	int fm_capabilities = DDI_FM_EREPORT_CAPABLE |
4839 		DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
4840 		DDI_FM_ERRCB_CAPABLE;
4841 
4842 	/* Read FMA capabilities from dca.conf file (if present) */
4843 	dca->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip,
4844 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
4845 	    fm_capabilities);
4846 
4847 	DBG(dca, DWARN, "dca->fm_capabilities = 0x%x", dca->fm_capabilities);
4848 
4849 	/* Only register with IO Fault Services if we have some capability */
4850 	if (dca->fm_capabilities) {
4851 		dca_regsattr.devacc_attr_access = DDI_FLAGERR_ACC;
4852 		dca_devattr.devacc_attr_access = DDI_FLAGERR_ACC;
4853 		dca_dmaattr.dma_attr_flags = DDI_DMA_FLAGERR;
4854 
4855 		/* Register capabilities with IO Fault Services */
4856 		ddi_fm_init(dca->dca_dip, &dca->fm_capabilities, &fm_ibc);
4857 		DBG(dca, DWARN, "fm_capable() =  0x%x",
4858 		    ddi_fm_capable(dca->dca_dip));
4859 
4860 		/*
4861 		 * Initialize pci ereport capabilities if ereport capable
4862 		 */
4863 		if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4864 		    DDI_FM_ERRCB_CAP(dca->fm_capabilities))
4865 			pci_ereport_setup(dca->dca_dip);
4866 
4867 		/*
4868 		 * Initialize callback mutex and register error callback if
4869 		 * error callback capable.
4870 		 */
4871 		if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4872 			ddi_fm_handler_register(dca->dca_dip, dca_fm_error_cb,
4873 			    (void *)dca);
4874 		}
4875 	} else {
4876 		/*
4877 		 * These fields have to be cleared of FMA if there are no
4878 		 * FMA capabilities at runtime.
4879 		 */
4880 		dca_regsattr.devacc_attr_access = DDI_DEFAULT_ACC;
4881 		dca_devattr.devacc_attr_access = DDI_DEFAULT_ACC;
4882 		dca_dmaattr.dma_attr_flags = 0;
4883 	}
4884 }
4885 
4886 
4887 static void
4888 dca_fma_fini(dca_t *dca)
4889 {
4890 	/* Only unregister FMA capabilities if we registered some */
4891 	if (dca->fm_capabilities) {
4892 
4893 		/*
4894 		 * Release any resources allocated by pci_ereport_setup()
4895 		 */
4896 		if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4897 		    DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4898 			pci_ereport_teardown(dca->dca_dip);
4899 		}
4900 
4901 		/*
4902 		 * Free callback mutex and un-register error callback if
4903 		 * error callback capable.
4904 		 */
4905 		if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4906 			ddi_fm_handler_unregister(dca->dca_dip);
4907 		}
4908 
4909 		/* Unregister from IO Fault Services */
4910 		ddi_fm_fini(dca->dca_dip);
4911 		DBG(dca, DWARN, "fm_capable() = 0x%x",
4912 		    ddi_fm_capable(dca->dca_dip));
4913 	}
4914 }
4915 
4916 
4917 /*
4918  * The IO fault service error handling callback function
4919  */
4920 /*ARGSUSED*/
4921 static int
4922 dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4923 {
4924 	dca_t		*dca = (dca_t *)impl_data;
4925 
4926 	pci_ereport_post(dip, err, NULL);
4927 	if (err->fme_status == DDI_FM_FATAL) {
4928 		dca_failure(dca, DDI_DATAPATH_FAULT,
4929 		    DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
4930 		    "fault PCI in FMA callback.");
4931 	}
4932 	return (err->fme_status);
4933 }
4934 
4935 
4936 static int
4937 dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
4938     dca_fma_eclass_t eclass_index)
4939 {
4940 	ddi_fm_error_t	de;
4941 	int		version = 0;
4942 
4943 	ddi_fm_acc_err_get(handle, &de, version);
4944 	if (de.fme_status != DDI_FM_OK) {
4945 		dca_failure(dca, DDI_DATAPATH_FAULT,
4946 		    eclass_index, fm_ena_increment(de.fme_ena),
4947 		    CRYPTO_DEVICE_ERROR, "");
4948 		return (DDI_FAILURE);
4949 	}
4950 
4951 	return (DDI_SUCCESS);
4952 }
4953 
4954 int
4955 dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle,
4956     dca_fma_eclass_t eclass_index)
4957 {
4958 	ddi_fm_error_t	de;
4959 	int		version = 0;
4960 
4961 	ddi_fm_dma_err_get(handle, &de, version);
4962 	if (de.fme_status != DDI_FM_OK) {
4963 		dca_failure(dca, DDI_DATAPATH_FAULT,
4964 		    eclass_index, fm_ena_increment(de.fme_ena),
4965 		    CRYPTO_DEVICE_ERROR, "");
4966 		return (DDI_FAILURE);
4967 	}
4968 	return (DDI_SUCCESS);
4969 }
4970 
4971 static uint64_t
4972 dca_ena(uint64_t ena)
4973 {
4974 	if (ena == 0)
4975 		ena = fm_ena_generate(0, FM_ENA_FMT1);
4976 	else
4977 		ena = fm_ena_increment(ena);
4978 	return (ena);
4979 }
4980 
4981 static char *
4982 dca_fma_eclass_string(char *model, dca_fma_eclass_t index)
4983 {
4984 	if (strstr(model, "500"))
4985 		return (dca_fma_eclass_sca500[index]);
4986 	else
4987 		return (dca_fma_eclass_sca1000[index]);
4988 }
4989