1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27
28 /*
29 * Deimos - cryptographic acceleration based upon Broadcom 582x.
30 */
31
32 #include <sys/types.h>
33 #include <sys/modctl.h>
34 #include <sys/conf.h>
35 #include <sys/devops.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/cmn_err.h>
39 #include <sys/varargs.h>
40 #include <sys/file.h>
41 #include <sys/stat.h>
42 #include <sys/kmem.h>
43 #include <sys/ioccom.h>
44 #include <sys/open.h>
45 #include <sys/cred.h>
46 #include <sys/kstat.h>
47 #include <sys/strsun.h>
48 #include <sys/note.h>
49 #include <sys/crypto/common.h>
50 #include <sys/crypto/spi.h>
51 #include <sys/ddifm.h>
52 #include <sys/fm/protocol.h>
53 #include <sys/fm/util.h>
54 #include <sys/fm/io/ddi.h>
55 #include <sys/crypto/dca.h>
56
57 /*
58 * Core Deimos driver.
59 */
60
61 static void dca_enlist2(dca_listnode_t *, dca_listnode_t *,
62 kmutex_t *);
63 static void dca_rmlist2(dca_listnode_t *node, kmutex_t *);
64 static dca_listnode_t *dca_delist2(dca_listnode_t *q, kmutex_t *);
65 static void dca_free_context_list(dca_t *dca);
66 static int dca_free_context_low(crypto_ctx_t *ctx);
67 static int dca_attach(dev_info_t *, ddi_attach_cmd_t);
68 static int dca_detach(dev_info_t *, ddi_detach_cmd_t);
69 static int dca_suspend(dca_t *);
70 static int dca_resume(dca_t *);
71 static int dca_init(dca_t *);
72 static int dca_reset(dca_t *, int);
73 static int dca_initworklist(dca_t *, dca_worklist_t *);
74 static void dca_uninit(dca_t *);
75 static void dca_initq(dca_listnode_t *);
76 static void dca_enqueue(dca_listnode_t *, dca_listnode_t *);
77 static dca_listnode_t *dca_dequeue(dca_listnode_t *);
78 static dca_listnode_t *dca_unqueue(dca_listnode_t *);
79 static dca_request_t *dca_newreq(dca_t *);
80 static dca_work_t *dca_getwork(dca_t *, int);
81 static void dca_freework(dca_work_t *);
82 static dca_work_t *dca_newwork(dca_t *);
83 static void dca_destroywork(dca_work_t *);
84 static void dca_schedule(dca_t *, int);
85 static void dca_reclaim(dca_t *, int);
86 static uint_t dca_intr(char *);
87 static void dca_failure(dca_t *, ddi_fault_location_t,
88 dca_fma_eclass_t index, uint64_t, int, char *, ...);
89 static void dca_jobtimeout(void *);
90 static int dca_drain(dca_t *);
91 static void dca_undrain(dca_t *);
92 static void dca_rejectjobs(dca_t *);
93
94 #ifdef SCHEDDELAY
95 static void dca_schedtimeout(void *);
96 #endif
97
98 /*
99 * We want these inlined for performance.
100 */
101 #ifndef DEBUG
102 #pragma inline(dca_freereq, dca_getreq, dca_freework, dca_getwork)
103 #pragma inline(dca_enqueue, dca_dequeue, dca_rmqueue, dca_done)
104 #pragma inline(dca_reverse, dca_length)
105 #endif
106
107 /*
108 * Device operations.
109 */
110 static struct dev_ops devops = {
111 DEVO_REV, /* devo_rev */
112 0, /* devo_refcnt */
113 nodev, /* devo_getinfo */
114 nulldev, /* devo_identify */
115 nulldev, /* devo_probe */
116 dca_attach, /* devo_attach */
117 dca_detach, /* devo_detach */
118 nodev, /* devo_reset */
119 NULL, /* devo_cb_ops */
120 NULL, /* devo_bus_ops */
121 ddi_power, /* devo_power */
122 ddi_quiesce_not_supported, /* devo_quiesce */
123 };
124
125 #define IDENT "PCI Crypto Accelerator"
126 #define IDENT_SYM "Crypto Accel Sym 2.0"
127 #define IDENT_ASYM "Crypto Accel Asym 2.0"
128
129 /* Space-padded, will be filled in dynamically during registration */
130 #define IDENT3 "PCI Crypto Accelerator Mod 2.0"
131
132 #define VENDOR "Sun Microsystems, Inc."
133
134 #define STALETIME (30 * SECOND)
135
136 #define crypto_prov_notify crypto_provider_notification
137 /* A 28 char function name doesn't leave much line space */
138
139 /*
140 * Module linkage.
141 */
142 static struct modldrv modldrv = {
143 &mod_driverops, /* drv_modops */
144 IDENT, /* drv_linkinfo */
145 &devops, /* drv_dev_ops */
146 };
147
148 extern struct mod_ops mod_cryptoops;
149
150 static struct modlcrypto modlcrypto = {
151 &mod_cryptoops,
152 IDENT3
153 };
154
155 static struct modlinkage modlinkage = {
156 MODREV_1, /* ml_rev */
157 &modldrv, /* ml_linkage */
158 &modlcrypto,
159 NULL
160 };
161
162 /*
163 * CSPI information (entry points, provider info, etc.)
164 */
165
166 /* Mechanisms for the symmetric cipher provider */
167 static crypto_mech_info_t dca_mech_info_tab1[] = {
168 /* DES-CBC */
169 {SUN_CKM_DES_CBC, DES_CBC_MECH_INFO_TYPE,
170 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
171 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
172 DES_KEY_LEN, DES_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
173 /* 3DES-CBC */
174 {SUN_CKM_DES3_CBC, DES3_CBC_MECH_INFO_TYPE,
175 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
176 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
177 DES3_MIN_KEY_LEN, DES3_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
178 };
179
180 /* Mechanisms for the asymmetric cipher provider */
181 static crypto_mech_info_t dca_mech_info_tab2[] = {
182 /* DSA */
183 {SUN_CKM_DSA, DSA_MECH_INFO_TYPE,
184 CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY |
185 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_VERIFY_ATOMIC,
186 CRYPTO_BYTES2BITS(DSA_MIN_KEY_LEN),
187 CRYPTO_BYTES2BITS(DSA_MAX_KEY_LEN),
188 CRYPTO_KEYSIZE_UNIT_IN_BITS},
189
190 /* RSA */
191 {SUN_CKM_RSA_X_509, RSA_X_509_MECH_INFO_TYPE,
192 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
193 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
194 CRYPTO_FG_VERIFY_RECOVER |
195 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
196 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
197 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
198 CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN),
199 CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN),
200 CRYPTO_KEYSIZE_UNIT_IN_BITS},
201 {SUN_CKM_RSA_PKCS, RSA_PKCS_MECH_INFO_TYPE,
202 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
203 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
204 CRYPTO_FG_VERIFY_RECOVER |
205 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
206 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
207 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
208 CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN),
209 CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN),
210 CRYPTO_KEYSIZE_UNIT_IN_BITS}
211 };
212
213 static void dca_provider_status(crypto_provider_handle_t, uint_t *);
214
215 static crypto_control_ops_t dca_control_ops = {
216 dca_provider_status
217 };
218
219 static int dca_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
220 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
221 static int dca_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
222 crypto_req_handle_t);
223 static int dca_encrypt_update(crypto_ctx_t *, crypto_data_t *,
224 crypto_data_t *, crypto_req_handle_t);
225 static int dca_encrypt_final(crypto_ctx_t *, crypto_data_t *,
226 crypto_req_handle_t);
227 static int dca_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
228 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
229 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
230
231 static int dca_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
232 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
233 static int dca_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
234 crypto_req_handle_t);
235 static int dca_decrypt_update(crypto_ctx_t *, crypto_data_t *,
236 crypto_data_t *, crypto_req_handle_t);
237 static int dca_decrypt_final(crypto_ctx_t *, crypto_data_t *,
238 crypto_req_handle_t);
239 static int dca_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
240 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
241 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
242
243 static crypto_cipher_ops_t dca_cipher_ops = {
244 dca_encrypt_init,
245 dca_encrypt,
246 dca_encrypt_update,
247 dca_encrypt_final,
248 dca_encrypt_atomic,
249 dca_decrypt_init,
250 dca_decrypt,
251 dca_decrypt_update,
252 dca_decrypt_final,
253 dca_decrypt_atomic
254 };
255
256 static int dca_sign_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
257 crypto_spi_ctx_template_t, crypto_req_handle_t);
258 static int dca_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
259 crypto_req_handle_t);
260 static int dca_sign_update(crypto_ctx_t *, crypto_data_t *,
261 crypto_req_handle_t);
262 static int dca_sign_final(crypto_ctx_t *, crypto_data_t *,
263 crypto_req_handle_t);
264 static int dca_sign_atomic(crypto_provider_handle_t, crypto_session_id_t,
265 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
266 crypto_spi_ctx_template_t, crypto_req_handle_t);
267 static int dca_sign_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
268 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
269 static int dca_sign_recover(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
270 crypto_req_handle_t);
271 static int dca_sign_recover_atomic(crypto_provider_handle_t,
272 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
273 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
274
275 static crypto_sign_ops_t dca_sign_ops = {
276 dca_sign_init,
277 dca_sign,
278 dca_sign_update,
279 dca_sign_final,
280 dca_sign_atomic,
281 dca_sign_recover_init,
282 dca_sign_recover,
283 dca_sign_recover_atomic
284 };
285
286 static int dca_verify_init(crypto_ctx_t *, crypto_mechanism_t *,
287 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
288 static int dca_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
289 crypto_req_handle_t);
290 static int dca_verify_update(crypto_ctx_t *, crypto_data_t *,
291 crypto_req_handle_t);
292 static int dca_verify_final(crypto_ctx_t *, crypto_data_t *,
293 crypto_req_handle_t);
294 static int dca_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
295 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
296 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
297 static int dca_verify_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
298 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
299 static int dca_verify_recover(crypto_ctx_t *, crypto_data_t *,
300 crypto_data_t *, crypto_req_handle_t);
301 static int dca_verify_recover_atomic(crypto_provider_handle_t,
302 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
303 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
304
305 static crypto_verify_ops_t dca_verify_ops = {
306 dca_verify_init,
307 dca_verify,
308 dca_verify_update,
309 dca_verify_final,
310 dca_verify_atomic,
311 dca_verify_recover_init,
312 dca_verify_recover,
313 dca_verify_recover_atomic
314 };
315
316 static int dca_generate_random(crypto_provider_handle_t, crypto_session_id_t,
317 uchar_t *, size_t, crypto_req_handle_t);
318
319 static crypto_random_number_ops_t dca_random_number_ops = {
320 NULL,
321 dca_generate_random
322 };
323
324 static int ext_info_sym(crypto_provider_handle_t prov,
325 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
326 static int ext_info_asym(crypto_provider_handle_t prov,
327 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
328 static int ext_info_base(crypto_provider_handle_t prov,
329 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id);
330
331 static crypto_provider_management_ops_t dca_provmanage_ops_1 = {
332 ext_info_sym, /* ext_info */
333 NULL, /* init_token */
334 NULL, /* init_pin */
335 NULL /* set_pin */
336 };
337
338 static crypto_provider_management_ops_t dca_provmanage_ops_2 = {
339 ext_info_asym, /* ext_info */
340 NULL, /* init_token */
341 NULL, /* init_pin */
342 NULL /* set_pin */
343 };
344
345 int dca_free_context(crypto_ctx_t *);
346
347 static crypto_ctx_ops_t dca_ctx_ops = {
348 NULL,
349 dca_free_context
350 };
351
352 /* Operations for the symmetric cipher provider */
353 static crypto_ops_t dca_crypto_ops1 = {
354 &dca_control_ops,
355 NULL, /* digest_ops */
356 &dca_cipher_ops,
357 NULL, /* mac_ops */
358 NULL, /* sign_ops */
359 NULL, /* verify_ops */
360 NULL, /* dual_ops */
361 NULL, /* cipher_mac_ops */
362 NULL, /* random_number_ops */
363 NULL, /* session_ops */
364 NULL, /* object_ops */
365 NULL, /* key_ops */
366 &dca_provmanage_ops_1, /* management_ops */
367 &dca_ctx_ops
368 };
369
370 /* Operations for the asymmetric cipher provider */
371 static crypto_ops_t dca_crypto_ops2 = {
372 &dca_control_ops,
373 NULL, /* digest_ops */
374 &dca_cipher_ops,
375 NULL, /* mac_ops */
376 &dca_sign_ops,
377 &dca_verify_ops,
378 NULL, /* dual_ops */
379 NULL, /* cipher_mac_ops */
380 &dca_random_number_ops,
381 NULL, /* session_ops */
382 NULL, /* object_ops */
383 NULL, /* key_ops */
384 &dca_provmanage_ops_2, /* management_ops */
385 &dca_ctx_ops
386 };
387
388 /* Provider information for the symmetric cipher provider */
389 static crypto_provider_info_t dca_prov_info1 = {
390 CRYPTO_SPI_VERSION_1,
391 NULL, /* pi_provider_description */
392 CRYPTO_HW_PROVIDER,
393 NULL, /* pi_provider_dev */
394 NULL, /* pi_provider_handle */
395 &dca_crypto_ops1,
396 sizeof (dca_mech_info_tab1)/sizeof (crypto_mech_info_t),
397 dca_mech_info_tab1,
398 0, /* pi_logical_provider_count */
399 NULL /* pi_logical_providers */
400 };
401
402 /* Provider information for the asymmetric cipher provider */
403 static crypto_provider_info_t dca_prov_info2 = {
404 CRYPTO_SPI_VERSION_1,
405 NULL, /* pi_provider_description */
406 CRYPTO_HW_PROVIDER,
407 NULL, /* pi_provider_dev */
408 NULL, /* pi_provider_handle */
409 &dca_crypto_ops2,
410 sizeof (dca_mech_info_tab2)/sizeof (crypto_mech_info_t),
411 dca_mech_info_tab2,
412 0, /* pi_logical_provider_count */
413 NULL /* pi_logical_providers */
414 };
415
416 /* Convenience macros */
417 #define DCA_SOFTC_FROM_CTX(ctx) ((dca_t *)(ctx)->cc_provider)
418 #define DCA_MECH_FROM_CTX(ctx) \
419 (((dca_request_t *)(ctx)->cc_provider_private)->dr_ctx.ctx_cm_type)
420
421 static int dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
422 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
423 dca_chain_t *head, int *n_chain);
424 static uint64_t dca_ena(uint64_t ena);
425 static caddr_t dca_bufdaddr_out(crypto_data_t *data);
426 static char *dca_fma_eclass_string(char *model, dca_fma_eclass_t index);
427 static int dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
428 dca_fma_eclass_t eclass_index);
429
430 static void dca_fma_init(dca_t *dca);
431 static void dca_fma_fini(dca_t *dca);
432 static int dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
433 const void *impl_data);
434
435
436 static dca_device_t dca_devices[] = {
437 /* Broadcom vanilla variants */
438 { 0x14e4, 0x5820, "Broadcom 5820" },
439 { 0x14e4, 0x5821, "Broadcom 5821" },
440 { 0x14e4, 0x5822, "Broadcom 5822" },
441 { 0x14e4, 0x5825, "Broadcom 5825" },
442 /* Sun specific OEMd variants */
443 { 0x108e, 0x5454, "SCA" },
444 { 0x108e, 0x5455, "SCA 1000" },
445 { 0x108e, 0x5457, "SCA 500" },
446 /* subsysid should be 0x5457, but got 0x1 from HW. Assume both here. */
447 { 0x108e, 0x1, "SCA 500" },
448 };
449
450 /*
451 * Device attributes.
452 */
453 static struct ddi_device_acc_attr dca_regsattr = {
454 DDI_DEVICE_ATTR_V1,
455 DDI_STRUCTURE_LE_ACC,
456 DDI_STRICTORDER_ACC,
457 DDI_FLAGERR_ACC
458 };
459
460 static struct ddi_device_acc_attr dca_devattr = {
461 DDI_DEVICE_ATTR_V0,
462 DDI_STRUCTURE_LE_ACC,
463 DDI_STRICTORDER_ACC
464 };
465
466 static struct ddi_device_acc_attr dca_bufattr = {
467 DDI_DEVICE_ATTR_V0,
468 DDI_NEVERSWAP_ACC,
469 DDI_STRICTORDER_ACC
470 };
471
472 static struct ddi_dma_attr dca_dmaattr = {
473 DMA_ATTR_V0, /* dma_attr_version */
474 0x0, /* dma_attr_addr_lo */
475 0xffffffffUL, /* dma_attr_addr_hi */
476 0x00ffffffUL, /* dma_attr_count_max */
477 0x40, /* dma_attr_align */
478 0x40, /* dma_attr_burstsizes */
479 0x1, /* dma_attr_minxfer */
480 0x00ffffffUL, /* dma_attr_maxxfer */
481 0xffffffffUL, /* dma_attr_seg */
482 #if defined(__x86)
483 512, /* dma_attr_sgllen */
484 #else
485 1, /* dma_attr_sgllen */
486 #endif
487 1, /* dma_attr_granular */
488 DDI_DMA_FLAGERR /* dma_attr_flags */
489 };
490
491 static void *dca_state = NULL;
492 int dca_mindma = 2500;
493
494 /*
495 * FMA eclass string definitions. Note that these string arrays must be
496 * consistent with the dca_fma_eclass_t enum.
497 */
498 static char *dca_fma_eclass_sca1000[] = {
499 "sca1000.hw.device",
500 "sca1000.hw.timeout",
501 "sca1000.none"
502 };
503
504 static char *dca_fma_eclass_sca500[] = {
505 "sca500.hw.device",
506 "sca500.hw.timeout",
507 "sca500.none"
508 };
509
510 /*
511 * DDI entry points.
512 */
513 int
_init(void)514 _init(void)
515 {
516 int rv;
517
518 DBG(NULL, DMOD, "dca: in _init");
519
520 if ((rv = ddi_soft_state_init(&dca_state, sizeof (dca_t), 1)) != 0) {
521 /* this should *never* happen! */
522 return (rv);
523 }
524
525 if ((rv = mod_install(&modlinkage)) != 0) {
526 /* cleanup here */
527 ddi_soft_state_fini(&dca_state);
528 return (rv);
529 }
530
531 return (0);
532 }
533
534 int
_fini(void)535 _fini(void)
536 {
537 int rv;
538
539 DBG(NULL, DMOD, "dca: in _fini");
540
541 if ((rv = mod_remove(&modlinkage)) == 0) {
542 /* cleanup here */
543 ddi_soft_state_fini(&dca_state);
544 }
545 return (rv);
546 }
547
548 int
_info(struct modinfo * modinfop)549 _info(struct modinfo *modinfop)
550 {
551 DBG(NULL, DMOD, "dca: in _info");
552
553 return (mod_info(&modlinkage, modinfop));
554 }
555
556 int
dca_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)557 dca_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
558 {
559 ddi_acc_handle_t pci;
560 int instance;
561 ddi_iblock_cookie_t ibc;
562 int intr_added = 0;
563 dca_t *dca;
564 ushort_t venid;
565 ushort_t devid;
566 ushort_t revid;
567 ushort_t subsysid;
568 ushort_t subvenid;
569 int i;
570 int ret;
571 char ID[64];
572 static char *unknowndev = "Unknown device";
573
574 #if DEBUG
575 /* these are only used for debugging */
576 ushort_t pcicomm;
577 ushort_t pcistat;
578 uchar_t cachelinesz;
579 uchar_t mingnt;
580 uchar_t maxlat;
581 uchar_t lattmr;
582 #endif
583
584 instance = ddi_get_instance(dip);
585
586 DBG(NULL, DMOD, "dca: in dca_attach() for %d", instance);
587
588 switch (cmd) {
589 case DDI_RESUME:
590 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
591 dca_diperror(dip, "no soft state in detach");
592 return (DDI_FAILURE);
593 }
594 /* assumption: we won't be DDI_DETACHed until we return */
595 return (dca_resume(dca));
596 case DDI_ATTACH:
597 break;
598 default:
599 return (DDI_FAILURE);
600 }
601
602 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
603 dca_diperror(dip, "slot does not support PCI bus-master");
604 return (DDI_FAILURE);
605 }
606
607 if (ddi_intr_hilevel(dip, 0) != 0) {
608 dca_diperror(dip, "hilevel interrupts not supported");
609 return (DDI_FAILURE);
610 }
611
612 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
613 dca_diperror(dip, "unable to setup PCI config handle");
614 return (DDI_FAILURE);
615 }
616
617 /* common PCI attributes */
618 venid = pci_config_get16(pci, PCI_VENID);
619 devid = pci_config_get16(pci, PCI_DEVID);
620 revid = pci_config_get8(pci, PCI_REVID);
621 subvenid = pci_config_get16(pci, PCI_SUBVENID);
622 subsysid = pci_config_get16(pci, PCI_SUBSYSID);
623
624 /*
625 * Broadcom-specific timings.
626 * We disable these timers/counters since they can cause
627 * incorrect false failures when the bus is just a little
628 * bit slow, or busy.
629 */
630 pci_config_put8(pci, PCI_TRDYTO, 0);
631 pci_config_put8(pci, PCI_RETRIES, 0);
632
633 /* initialize PCI access settings */
634 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
635 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
636
637 /* set up our PCI latency timer */
638 pci_config_put8(pci, PCI_LATTMR, 0x40);
639
640 #if DEBUG
641 /* read registers (for debugging) */
642 pcicomm = pci_config_get16(pci, PCI_COMM);
643 pcistat = pci_config_get16(pci, PCI_STATUS);
644 cachelinesz = pci_config_get8(pci, PCI_CACHELINESZ);
645 mingnt = pci_config_get8(pci, PCI_MINGNT);
646 maxlat = pci_config_get8(pci, PCI_MAXLAT);
647 lattmr = pci_config_get8(pci, PCI_LATTMR);
648 #endif
649
650 pci_config_teardown(&pci);
651
652 if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS) {
653 dca_diperror(dip, "unable to get iblock cookie");
654 return (DDI_FAILURE);
655 }
656
657 if (ddi_soft_state_zalloc(dca_state, instance) != DDI_SUCCESS) {
658 dca_diperror(dip, "unable to allocate soft state");
659 return (DDI_FAILURE);
660 }
661
662 dca = ddi_get_soft_state(dca_state, instance);
663 ASSERT(dca != NULL);
664 dca->dca_dip = dip;
665 WORKLIST(dca, MCR1)->dwl_prov = 0;
666 WORKLIST(dca, MCR2)->dwl_prov = 0;
667 /* figure pagesize */
668 dca->dca_pagesize = ddi_ptob(dip, 1);
669
670 /*
671 * Search for the device in our supported devices table. This
672 * is here for two reasons. First, we want to ensure that
673 * only Sun-qualified (and presumably Sun-labeled) devices can
674 * be used with this driver. Second, some devices have
675 * specific differences. E.g. the 5821 has support for a
676 * special mode of RC4, deeper queues, power management, and
677 * other changes. Also, the export versions of some of these
678 * chips don't support RC4 or 3DES, so we catch that here.
679 *
680 * Note that we only look at the upper nibble of the device
681 * id, which is used to distinguish export vs. domestic
682 * versions of the chip. (The lower nibble is used for
683 * stepping information.)
684 */
685 for (i = 0; i < (sizeof (dca_devices) / sizeof (dca_device_t)); i++) {
686 /*
687 * Try to match the subsystem information first.
688 */
689 if (subvenid && (subvenid == dca_devices[i].dd_vendor_id) &&
690 subsysid && (subsysid == dca_devices[i].dd_device_id)) {
691 dca->dca_model = dca_devices[i].dd_model;
692 dca->dca_devid = dca_devices[i].dd_device_id;
693 break;
694 }
695 /*
696 * Failing that, try the generic vendor and device id.
697 * Even if we find a match, we keep searching anyway,
698 * since we would prefer to find a match based on the
699 * subsystem ids.
700 */
701 if ((venid == dca_devices[i].dd_vendor_id) &&
702 (devid == dca_devices[i].dd_device_id)) {
703 dca->dca_model = dca_devices[i].dd_model;
704 dca->dca_devid = dca_devices[i].dd_device_id;
705 }
706 }
707 /* try and handle an unrecognized device */
708 if (dca->dca_model == NULL) {
709 dca->dca_model = unknowndev;
710 dca_error(dca, "device not recognized, not supported");
711 DBG(dca, DPCI, "i=%d venid=%x devid=%x rev=%d",
712 i, venid, devid, revid);
713 }
714
715 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "description",
716 dca->dca_model) != DDI_SUCCESS) {
717 dca_error(dca, "unable to create description property");
718 return (DDI_FAILURE);
719 }
720
721 DBG(dca, DPCI, "PCI command=0x%x status=%x cachelinesz=%x",
722 pcicomm, pcistat, cachelinesz);
723 DBG(dca, DPCI, "mingnt=0x%x maxlat=0x%x lattmr=0x%x",
724 mingnt, maxlat, lattmr);
725
726 /*
727 * initialize locks, etc.
728 */
729 (void) mutex_init(&dca->dca_intrlock, NULL, MUTEX_DRIVER, ibc);
730
731 /* use RNGSHA1 by default */
732 if (ddi_getprop(DDI_DEV_T_ANY, dip,
733 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rngdirect", 0) == 0) {
734 dca->dca_flags |= DCA_RNGSHA1;
735 }
736
737 /* initialize FMA */
738 dca_fma_init(dca);
739
740 /* initialize some key data structures */
741 if (dca_init(dca) != DDI_SUCCESS) {
742 goto failed;
743 }
744
745 /* initialize kstats */
746 dca_ksinit(dca);
747
748 /* setup access to registers */
749 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&dca->dca_regs,
750 0, 0, &dca_regsattr, &dca->dca_regs_handle) != DDI_SUCCESS) {
751 dca_error(dca, "unable to map registers");
752 goto failed;
753 }
754
755 DBG(dca, DCHATTY, "MCR1 = %x", GETCSR(dca, CSR_MCR1));
756 DBG(dca, DCHATTY, "CONTROL = %x", GETCSR(dca, CSR_DMACTL));
757 DBG(dca, DCHATTY, "STATUS = %x", GETCSR(dca, CSR_DMASTAT));
758 DBG(dca, DCHATTY, "DMAEA = %x", GETCSR(dca, CSR_DMAEA));
759 DBG(dca, DCHATTY, "MCR2 = %x", GETCSR(dca, CSR_MCR2));
760
761 /* reset the chip */
762 if (dca_reset(dca, 0) < 0) {
763 goto failed;
764 }
765
766 /* initialize the chip */
767 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
768 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
769 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
770 goto failed;
771 }
772
773 /* add the interrupt */
774 if (ddi_add_intr(dip, 0, &dca->dca_icookie, NULL, dca_intr,
775 (void *)dca) != DDI_SUCCESS) {
776 DBG(dca, DWARN, "ddi_add_intr failed");
777 goto failed;
778 } else {
779 intr_added = 1;
780 }
781
782 /* enable interrupts on the device */
783 /*
784 * XXX: Note, 5820A1 errata indicates that this may clobber
785 * bits 24 and 23, which affect the speed of the RNG. Since
786 * we always want to run in full-speed mode, this should be
787 * harmless.
788 */
789 if (dca->dca_devid == 0x5825) {
790 /* for 5825 - increase the DMA read size */
791 SETBIT(dca, CSR_DMACTL,
792 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
793 } else {
794 SETBIT(dca, CSR_DMACTL,
795 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
796 }
797 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
798 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
799 goto failed;
800 }
801
802 /* register MCR1 with the crypto framework */
803 /* Be careful not to exceed 32 chars */
804 (void) sprintf(ID, "%s/%d %s",
805 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_SYM);
806 dca_prov_info1.pi_provider_description = ID;
807 dca_prov_info1.pi_provider_dev.pd_hw = dip;
808 dca_prov_info1.pi_provider_handle = dca;
809 if ((ret = crypto_register_provider(&dca_prov_info1,
810 &WORKLIST(dca, MCR1)->dwl_prov)) != CRYPTO_SUCCESS) {
811 cmn_err(CE_WARN,
812 "crypto_register_provider() failed (%d) for MCR1", ret);
813 goto failed;
814 }
815
816 /* register MCR2 with the crypto framework */
817 /* Be careful not to exceed 32 chars */
818 (void) sprintf(ID, "%s/%d %s",
819 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_ASYM);
820 dca_prov_info2.pi_provider_description = ID;
821 dca_prov_info2.pi_provider_dev.pd_hw = dip;
822 dca_prov_info2.pi_provider_handle = dca;
823 if ((ret = crypto_register_provider(&dca_prov_info2,
824 &WORKLIST(dca, MCR2)->dwl_prov)) != CRYPTO_SUCCESS) {
825 cmn_err(CE_WARN,
826 "crypto_register_provider() failed (%d) for MCR2", ret);
827 goto failed;
828 }
829
830 crypto_prov_notify(WORKLIST(dca, MCR1)->dwl_prov,
831 CRYPTO_PROVIDER_READY);
832 crypto_prov_notify(WORKLIST(dca, MCR2)->dwl_prov,
833 CRYPTO_PROVIDER_READY);
834
835 /* Initialize the local random number pool for this instance */
836 if ((ret = dca_random_init(dca)) != CRYPTO_SUCCESS) {
837 goto failed;
838 }
839
840 mutex_enter(&dca->dca_intrlock);
841 dca->dca_jobtid = timeout(dca_jobtimeout, (void *)dca,
842 drv_usectohz(SECOND));
843 mutex_exit(&dca->dca_intrlock);
844
845 ddi_set_driver_private(dip, (caddr_t)dca);
846
847 ddi_report_dev(dip);
848
849 if (ddi_get_devstate(dca->dca_dip) != DDI_DEVSTATE_UP) {
850 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_RESTORED);
851 }
852
853 return (DDI_SUCCESS);
854
855 failed:
856 /* unregister from the crypto framework */
857 if (WORKLIST(dca, MCR1)->dwl_prov != 0) {
858 (void) crypto_unregister_provider(
859 WORKLIST(dca, MCR1)->dwl_prov);
860 }
861 if (WORKLIST(dca, MCR2)->dwl_prov != 0) {
862 (void) crypto_unregister_provider(
863 WORKLIST(dca, MCR2)->dwl_prov);
864 }
865 if (intr_added) {
866 CLRBIT(dca, CSR_DMACTL,
867 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
868 /* unregister intr handler */
869 ddi_remove_intr(dip, 0, dca->dca_icookie);
870 }
871 if (dca->dca_regs_handle) {
872 ddi_regs_map_free(&dca->dca_regs_handle);
873 }
874 if (dca->dca_intrstats) {
875 kstat_delete(dca->dca_intrstats);
876 }
877 if (dca->dca_ksp) {
878 kstat_delete(dca->dca_ksp);
879 }
880 dca_uninit(dca);
881
882 /* finalize FMA */
883 dca_fma_fini(dca);
884
885 mutex_destroy(&dca->dca_intrlock);
886 ddi_soft_state_free(dca_state, instance);
887 return (DDI_FAILURE);
888
889 }
890
891 int
dca_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)892 dca_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
893 {
894 int instance;
895 dca_t *dca;
896 timeout_id_t tid;
897
898 instance = ddi_get_instance(dip);
899
900 DBG(NULL, DMOD, "dca: in dca_detach() for %d", instance);
901
902 switch (cmd) {
903 case DDI_SUSPEND:
904 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
905 dca_diperror(dip, "no soft state in detach");
906 return (DDI_FAILURE);
907 }
908 /* assumption: we won't be DDI_DETACHed until we return */
909 return (dca_suspend(dca));
910
911 case DDI_DETACH:
912 break;
913 default:
914 return (DDI_FAILURE);
915 }
916
917 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
918 dca_diperror(dip, "no soft state in detach");
919 return (DDI_FAILURE);
920 }
921
922 /*
923 * Unregister from kCF.
924 * This needs to be done at the beginning of detach.
925 */
926 if (WORKLIST(dca, MCR1)->dwl_prov != 0) {
927 if (crypto_unregister_provider(
928 WORKLIST(dca, MCR1)->dwl_prov) != CRYPTO_SUCCESS) {
929 dca_error(dca, "unable to unregister MCR1 from kcf");
930 return (DDI_FAILURE);
931 }
932 }
933
934 if (WORKLIST(dca, MCR2)->dwl_prov != 0) {
935 if (crypto_unregister_provider(
936 WORKLIST(dca, MCR2)->dwl_prov) != CRYPTO_SUCCESS) {
937 dca_error(dca, "unable to unregister MCR2 from kcf");
938 return (DDI_FAILURE);
939 }
940 }
941
942 /*
943 * Cleanup the private context list. Once the
944 * crypto_unregister_provider returns, it is safe to do so.
945 */
946 dca_free_context_list(dca);
947
948 /* Cleanup the local random number pool */
949 dca_random_fini(dca);
950
951 /* send any jobs in the waitq back to kCF */
952 dca_rejectjobs(dca);
953
954 /* untimeout the timeouts */
955 mutex_enter(&dca->dca_intrlock);
956 tid = dca->dca_jobtid;
957 dca->dca_jobtid = 0;
958 mutex_exit(&dca->dca_intrlock);
959 if (tid) {
960 (void) untimeout(tid);
961 }
962
963 /* disable device interrupts */
964 CLRBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
965
966 /* unregister interrupt handlers */
967 ddi_remove_intr(dip, 0, dca->dca_icookie);
968
969 /* release our regs handle */
970 ddi_regs_map_free(&dca->dca_regs_handle);
971
972 /* toss out kstats */
973 if (dca->dca_intrstats) {
974 kstat_delete(dca->dca_intrstats);
975 }
976 if (dca->dca_ksp) {
977 kstat_delete(dca->dca_ksp);
978 }
979
980 mutex_destroy(&dca->dca_intrlock);
981 dca_uninit(dca);
982
983 /* finalize FMA */
984 dca_fma_fini(dca);
985
986 ddi_soft_state_free(dca_state, instance);
987
988 return (DDI_SUCCESS);
989 }
990
991 int
dca_resume(dca_t * dca)992 dca_resume(dca_t *dca)
993 {
994 ddi_acc_handle_t pci;
995
996 if (pci_config_setup(dca->dca_dip, &pci) != DDI_SUCCESS) {
997 dca_error(dca, "unable to setup PCI config handle");
998 return (DDI_FAILURE);
999 }
1000
1001 /*
1002 * Reprogram registers in PCI configuration space.
1003 */
1004
1005 /* Broadcom-specific timers -- we disable them. */
1006 pci_config_put8(pci, PCI_TRDYTO, 0);
1007 pci_config_put8(pci, PCI_RETRIES, 0);
1008
1009 /* initialize PCI access settings */
1010 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
1011 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
1012
1013 /* set up our PCI latency timer */
1014 pci_config_put8(pci, PCI_LATTMR, 0x40);
1015
1016 pci_config_teardown(&pci);
1017
1018 if (dca_reset(dca, 0) < 0) {
1019 dca_error(dca, "unable to reset device during resume");
1020 return (DDI_FAILURE);
1021 }
1022
1023 /*
1024 * Now restore the card-specific CSRs.
1025 */
1026
1027 /* restore endianness settings */
1028 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
1029 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1030 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1031 return (DDI_FAILURE);
1032
1033 /* restore interrupt enables */
1034 if (dca->dca_devid == 0x5825) {
1035 /* for 5825 set 256 byte read size to improve performance */
1036 SETBIT(dca, CSR_DMACTL,
1037 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
1038 } else {
1039 SETBIT(dca, CSR_DMACTL,
1040 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
1041 }
1042 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1043 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1044 return (DDI_FAILURE);
1045
1046 /* resume scheduling jobs on the device */
1047 dca_undrain(dca);
1048
1049 return (DDI_SUCCESS);
1050 }
1051
1052 int
dca_suspend(dca_t * dca)1053 dca_suspend(dca_t *dca)
1054 {
1055 if ((dca_drain(dca)) != 0) {
1056 return (DDI_FAILURE);
1057 }
1058 if (dca_reset(dca, 0) < 0) {
1059 dca_error(dca, "unable to reset device during suspend");
1060 return (DDI_FAILURE);
1061 }
1062 return (DDI_SUCCESS);
1063 }
1064
1065 /*
1066 * Hardware access stuff.
1067 */
1068 int
dca_reset(dca_t * dca,int failreset)1069 dca_reset(dca_t *dca, int failreset)
1070 {
1071 int i;
1072
1073 if (dca->dca_regs_handle == NULL) {
1074 return (-1);
1075 }
1076
1077 PUTCSR(dca, CSR_DMACTL, DMACTL_RESET);
1078 if (!failreset) {
1079 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1080 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1081 return (-1);
1082 }
1083
1084 /* now wait for a reset */
1085 for (i = 1; i < 100; i++) {
1086 uint32_t dmactl;
1087 drv_usecwait(100);
1088 dmactl = GETCSR(dca, CSR_DMACTL);
1089 if (!failreset) {
1090 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1091 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1092 return (-1);
1093 }
1094 if ((dmactl & DMACTL_RESET) == 0) {
1095 DBG(dca, DCHATTY, "reset in %d usec", i * 100);
1096 return (0);
1097 }
1098 }
1099 if (!failreset) {
1100 dca_failure(dca, DDI_DEVICE_FAULT,
1101 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1102 "timeout waiting for reset after %d usec", i * 100);
1103 }
1104 return (-1);
1105 }
1106
1107 int
dca_initworklist(dca_t * dca,dca_worklist_t * wlp)1108 dca_initworklist(dca_t *dca, dca_worklist_t *wlp)
1109 {
1110 int i;
1111 int reqprealloc = wlp->dwl_hiwater + (MAXWORK * MAXREQSPERMCR);
1112
1113 /*
1114 * Set up work queue.
1115 */
1116 mutex_init(&wlp->dwl_lock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1117 mutex_init(&wlp->dwl_freereqslock, NULL, MUTEX_DRIVER,
1118 dca->dca_icookie);
1119 mutex_init(&wlp->dwl_freelock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1120 cv_init(&wlp->dwl_cv, NULL, CV_DRIVER, NULL);
1121
1122 mutex_enter(&wlp->dwl_lock);
1123
1124 dca_initq(&wlp->dwl_freereqs);
1125 dca_initq(&wlp->dwl_waitq);
1126 dca_initq(&wlp->dwl_freework);
1127 dca_initq(&wlp->dwl_runq);
1128
1129 for (i = 0; i < MAXWORK; i++) {
1130 dca_work_t *workp;
1131
1132 if ((workp = dca_newwork(dca)) == NULL) {
1133 dca_error(dca, "unable to allocate work");
1134 mutex_exit(&wlp->dwl_lock);
1135 return (DDI_FAILURE);
1136 }
1137 workp->dw_wlp = wlp;
1138 dca_freework(workp);
1139 }
1140 mutex_exit(&wlp->dwl_lock);
1141
1142 for (i = 0; i < reqprealloc; i++) {
1143 dca_request_t *reqp;
1144
1145 if ((reqp = dca_newreq(dca)) == NULL) {
1146 dca_error(dca, "unable to allocate request");
1147 return (DDI_FAILURE);
1148 }
1149 reqp->dr_dca = dca;
1150 reqp->dr_wlp = wlp;
1151 dca_freereq(reqp);
1152 }
1153 return (DDI_SUCCESS);
1154 }
1155
1156 int
dca_init(dca_t * dca)1157 dca_init(dca_t *dca)
1158 {
1159 dca_worklist_t *wlp;
1160
1161 /* Initialize the private context list and the corresponding lock. */
1162 mutex_init(&dca->dca_ctx_list_lock, NULL, MUTEX_DRIVER, NULL);
1163 dca_initq(&dca->dca_ctx_list);
1164
1165 /*
1166 * MCR1 algorithms.
1167 */
1168 wlp = WORKLIST(dca, MCR1);
1169 (void) sprintf(wlp->dwl_name, "dca%d:mcr1",
1170 ddi_get_instance(dca->dca_dip));
1171 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1172 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1173 "mcr1_lowater", MCR1LOWATER);
1174 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1175 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1176 "mcr1_hiwater", MCR1HIWATER);
1177 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1178 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1179 "mcr1_maxreqs", MCR1MAXREQS), MAXREQSPERMCR);
1180 wlp->dwl_dca = dca;
1181 wlp->dwl_mcr = MCR1;
1182 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1183 return (DDI_FAILURE);
1184 }
1185
1186 /*
1187 * MCR2 algorithms.
1188 */
1189 wlp = WORKLIST(dca, MCR2);
1190 (void) sprintf(wlp->dwl_name, "dca%d:mcr2",
1191 ddi_get_instance(dca->dca_dip));
1192 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1193 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1194 "mcr2_lowater", MCR2LOWATER);
1195 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1196 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1197 "mcr2_hiwater", MCR2HIWATER);
1198 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1199 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1200 "mcr2_maxreqs", MCR2MAXREQS), MAXREQSPERMCR);
1201 wlp->dwl_dca = dca;
1202 wlp->dwl_mcr = MCR2;
1203 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1204 return (DDI_FAILURE);
1205 }
1206 return (DDI_SUCCESS);
1207 }
1208
1209 /*
1210 * Uninitialize worklists. This routine should only be called when no
1211 * active jobs (hence DMA mappings) exist. One way to ensure this is
1212 * to unregister from kCF before calling this routine. (This is done
1213 * e.g. in detach(9e).)
1214 */
1215 void
dca_uninit(dca_t * dca)1216 dca_uninit(dca_t *dca)
1217 {
1218 int mcr;
1219
1220 mutex_destroy(&dca->dca_ctx_list_lock);
1221
1222 for (mcr = MCR1; mcr <= MCR2; mcr++) {
1223 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1224 dca_work_t *workp;
1225 dca_request_t *reqp;
1226
1227 if (dca->dca_regs_handle == NULL) {
1228 continue;
1229 }
1230
1231 mutex_enter(&wlp->dwl_lock);
1232 while ((workp = dca_getwork(dca, mcr)) != NULL) {
1233 dca_destroywork(workp);
1234 }
1235 mutex_exit(&wlp->dwl_lock);
1236 while ((reqp = dca_getreq(dca, mcr, 0)) != NULL) {
1237 dca_destroyreq(reqp);
1238 }
1239
1240 mutex_destroy(&wlp->dwl_lock);
1241 mutex_destroy(&wlp->dwl_freereqslock);
1242 mutex_destroy(&wlp->dwl_freelock);
1243 cv_destroy(&wlp->dwl_cv);
1244 wlp->dwl_prov = 0;
1245 }
1246 }
1247
1248 static void
dca_enlist2(dca_listnode_t * q,dca_listnode_t * node,kmutex_t * lock)1249 dca_enlist2(dca_listnode_t *q, dca_listnode_t *node, kmutex_t *lock)
1250 {
1251 if (!q || !node)
1252 return;
1253
1254 mutex_enter(lock);
1255 node->dl_next2 = q;
1256 node->dl_prev2 = q->dl_prev2;
1257 node->dl_next2->dl_prev2 = node;
1258 node->dl_prev2->dl_next2 = node;
1259 mutex_exit(lock);
1260 }
1261
1262 static void
dca_rmlist2(dca_listnode_t * node,kmutex_t * lock)1263 dca_rmlist2(dca_listnode_t *node, kmutex_t *lock)
1264 {
1265 if (!node)
1266 return;
1267
1268 mutex_enter(lock);
1269 node->dl_next2->dl_prev2 = node->dl_prev2;
1270 node->dl_prev2->dl_next2 = node->dl_next2;
1271 node->dl_next2 = NULL;
1272 node->dl_prev2 = NULL;
1273 mutex_exit(lock);
1274 }
1275
1276 static dca_listnode_t *
dca_delist2(dca_listnode_t * q,kmutex_t * lock)1277 dca_delist2(dca_listnode_t *q, kmutex_t *lock)
1278 {
1279 dca_listnode_t *node;
1280
1281 mutex_enter(lock);
1282 if ((node = q->dl_next2) == q) {
1283 mutex_exit(lock);
1284 return (NULL);
1285 }
1286
1287 node->dl_next2->dl_prev2 = node->dl_prev2;
1288 node->dl_prev2->dl_next2 = node->dl_next2;
1289 node->dl_next2 = NULL;
1290 node->dl_prev2 = NULL;
1291 mutex_exit(lock);
1292
1293 return (node);
1294 }
1295
1296 void
dca_initq(dca_listnode_t * q)1297 dca_initq(dca_listnode_t *q)
1298 {
1299 q->dl_next = q;
1300 q->dl_prev = q;
1301 q->dl_next2 = q;
1302 q->dl_prev2 = q;
1303 }
1304
1305 void
dca_enqueue(dca_listnode_t * q,dca_listnode_t * node)1306 dca_enqueue(dca_listnode_t *q, dca_listnode_t *node)
1307 {
1308 /*
1309 * Enqueue submits at the "tail" of the list, i.e. just
1310 * behind the sentinel.
1311 */
1312 node->dl_next = q;
1313 node->dl_prev = q->dl_prev;
1314 node->dl_next->dl_prev = node;
1315 node->dl_prev->dl_next = node;
1316 }
1317
1318 void
dca_rmqueue(dca_listnode_t * node)1319 dca_rmqueue(dca_listnode_t *node)
1320 {
1321 node->dl_next->dl_prev = node->dl_prev;
1322 node->dl_prev->dl_next = node->dl_next;
1323 node->dl_next = NULL;
1324 node->dl_prev = NULL;
1325 }
1326
1327 dca_listnode_t *
dca_dequeue(dca_listnode_t * q)1328 dca_dequeue(dca_listnode_t *q)
1329 {
1330 dca_listnode_t *node;
1331 /*
1332 * Dequeue takes from the "head" of the list, i.e. just after
1333 * the sentinel.
1334 */
1335 if ((node = q->dl_next) == q) {
1336 /* queue is empty */
1337 return (NULL);
1338 }
1339 dca_rmqueue(node);
1340 return (node);
1341 }
1342
1343 /* this is the opposite of dequeue, it takes things off in LIFO order */
1344 dca_listnode_t *
dca_unqueue(dca_listnode_t * q)1345 dca_unqueue(dca_listnode_t *q)
1346 {
1347 dca_listnode_t *node;
1348 /*
1349 * unqueue takes from the "tail" of the list, i.e. just before
1350 * the sentinel.
1351 */
1352 if ((node = q->dl_prev) == q) {
1353 /* queue is empty */
1354 return (NULL);
1355 }
1356 dca_rmqueue(node);
1357 return (node);
1358 }
1359
1360 dca_listnode_t *
dca_peekqueue(dca_listnode_t * q)1361 dca_peekqueue(dca_listnode_t *q)
1362 {
1363 dca_listnode_t *node;
1364
1365 if ((node = q->dl_next) == q) {
1366 return (NULL);
1367 } else {
1368 return (node);
1369 }
1370 }
1371
1372 /*
1373 * Interrupt service routine.
1374 */
1375 uint_t
dca_intr(char * arg)1376 dca_intr(char *arg)
1377 {
1378 dca_t *dca = (dca_t *)arg;
1379 uint32_t status;
1380
1381 mutex_enter(&dca->dca_intrlock);
1382 status = GETCSR(dca, CSR_DMASTAT);
1383 PUTCSR(dca, CSR_DMASTAT, status & DMASTAT_INTERRUPTS);
1384 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1385 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
1386 mutex_exit(&dca->dca_intrlock);
1387 return ((uint_t)DDI_FAILURE);
1388 }
1389
1390 DBG(dca, DINTR, "interrupted, status = 0x%x!", status);
1391
1392 if ((status & DMASTAT_INTERRUPTS) == 0) {
1393 /* increment spurious interrupt kstat */
1394 if (dca->dca_intrstats) {
1395 KIOIP(dca)->intrs[KSTAT_INTR_SPURIOUS]++;
1396 }
1397 mutex_exit(&dca->dca_intrlock);
1398 return (DDI_INTR_UNCLAIMED);
1399 }
1400
1401 if (dca->dca_intrstats) {
1402 KIOIP(dca)->intrs[KSTAT_INTR_HARD]++;
1403 }
1404 if (status & DMASTAT_MCR1INT) {
1405 DBG(dca, DINTR, "MCR1 interrupted");
1406 mutex_enter(&(WORKLIST(dca, MCR1)->dwl_lock));
1407 dca_schedule(dca, MCR1);
1408 dca_reclaim(dca, MCR1);
1409 mutex_exit(&(WORKLIST(dca, MCR1)->dwl_lock));
1410 }
1411
1412 if (status & DMASTAT_MCR2INT) {
1413 DBG(dca, DINTR, "MCR2 interrupted");
1414 mutex_enter(&(WORKLIST(dca, MCR2)->dwl_lock));
1415 dca_schedule(dca, MCR2);
1416 dca_reclaim(dca, MCR2);
1417 mutex_exit(&(WORKLIST(dca, MCR2)->dwl_lock));
1418 }
1419
1420 if (status & DMASTAT_ERRINT) {
1421 uint32_t erraddr;
1422 erraddr = GETCSR(dca, CSR_DMAEA);
1423 mutex_exit(&dca->dca_intrlock);
1424
1425 /*
1426 * bit 1 of the error address indicates failure during
1427 * read if set, during write otherwise.
1428 */
1429 dca_failure(dca, DDI_DEVICE_FAULT,
1430 DCA_FM_ECLASS_HW_DEVICE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1431 "DMA master access error %s address 0x%x",
1432 erraddr & 0x1 ? "reading" : "writing", erraddr & ~1);
1433 return (DDI_INTR_CLAIMED);
1434 }
1435
1436 mutex_exit(&dca->dca_intrlock);
1437
1438 return (DDI_INTR_CLAIMED);
1439 }
1440
1441 /*
1442 * Reverse a string of bytes from s1 into s2. The reversal happens
1443 * from the tail of s1. If len1 < len2, then null bytes will be
1444 * padded to the end of s2. If len2 < len1, then (presumably null)
1445 * bytes will be dropped from the start of s1.
1446 *
1447 * The rationale here is that when s1 (source) is shorter, then we
1448 * are reversing from big-endian ordering, into device ordering, and
1449 * want to add some extra nulls to the tail (MSB) side of the device.
1450 *
1451 * Similarly, when s2 (dest) is shorter, then we are truncating what
1452 * are presumably null MSB bits from the device.
1453 *
1454 * There is an expectation when reversing from the device back into
1455 * big-endian, that the number of bytes to reverse and the target size
1456 * will match, and no truncation or padding occurs.
1457 */
1458 void
dca_reverse(void * s1,void * s2,int len1,int len2)1459 dca_reverse(void *s1, void *s2, int len1, int len2)
1460 {
1461 caddr_t src, dst;
1462
1463 if (len1 == 0) {
1464 if (len2) {
1465 bzero(s2, len2);
1466 }
1467 return;
1468 }
1469 src = (caddr_t)s1 + len1 - 1;
1470 dst = s2;
1471 while ((src >= (caddr_t)s1) && (len2)) {
1472 *dst++ = *src--;
1473 len2--;
1474 }
1475 while (len2 > 0) {
1476 *dst++ = 0;
1477 len2--;
1478 }
1479 }
1480
1481 uint16_t
dca_padfull(int num)1482 dca_padfull(int num)
1483 {
1484 if (num <= 512) {
1485 return (BITS2BYTES(512));
1486 }
1487 if (num <= 768) {
1488 return (BITS2BYTES(768));
1489 }
1490 if (num <= 1024) {
1491 return (BITS2BYTES(1024));
1492 }
1493 if (num <= 1536) {
1494 return (BITS2BYTES(1536));
1495 }
1496 if (num <= 2048) {
1497 return (BITS2BYTES(2048));
1498 }
1499 return (0);
1500 }
1501
1502 uint16_t
dca_padhalf(int num)1503 dca_padhalf(int num)
1504 {
1505 if (num <= 256) {
1506 return (BITS2BYTES(256));
1507 }
1508 if (num <= 384) {
1509 return (BITS2BYTES(384));
1510 }
1511 if (num <= 512) {
1512 return (BITS2BYTES(512));
1513 }
1514 if (num <= 768) {
1515 return (BITS2BYTES(768));
1516 }
1517 if (num <= 1024) {
1518 return (BITS2BYTES(1024));
1519 }
1520 return (0);
1521 }
1522
1523 dca_work_t *
dca_newwork(dca_t * dca)1524 dca_newwork(dca_t *dca)
1525 {
1526 dca_work_t *workp;
1527 size_t size;
1528 ddi_dma_cookie_t c;
1529 unsigned nc;
1530 int rv;
1531
1532 workp = kmem_zalloc(sizeof (dca_work_t), KM_SLEEP);
1533
1534 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1535 DDI_DMA_SLEEP, NULL, &workp->dw_mcr_dmah);
1536 if (rv != 0) {
1537 dca_error(dca, "unable to alloc MCR DMA handle");
1538 dca_destroywork(workp);
1539 return (NULL);
1540 }
1541
1542 rv = ddi_dma_mem_alloc(workp->dw_mcr_dmah,
1543 ROUNDUP(MCR_SIZE, dca->dca_pagesize),
1544 &dca_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1545 &workp->dw_mcr_kaddr, &size, &workp->dw_mcr_acch);
1546 if (rv != 0) {
1547 dca_error(dca, "unable to alloc MCR DMA memory");
1548 dca_destroywork(workp);
1549 return (NULL);
1550 }
1551
1552 rv = ddi_dma_addr_bind_handle(workp->dw_mcr_dmah, NULL,
1553 workp->dw_mcr_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR,
1554 DDI_DMA_SLEEP, NULL, &c, &nc);
1555 if (rv != DDI_DMA_MAPPED) {
1556 dca_error(dca, "unable to map MCR DMA memory");
1557 dca_destroywork(workp);
1558 return (NULL);
1559 }
1560
1561 workp->dw_mcr_paddr = c.dmac_address;
1562 return (workp);
1563 }
1564
1565 void
dca_destroywork(dca_work_t * workp)1566 dca_destroywork(dca_work_t *workp)
1567 {
1568 if (workp->dw_mcr_paddr) {
1569 (void) ddi_dma_unbind_handle(workp->dw_mcr_dmah);
1570 }
1571 if (workp->dw_mcr_acch) {
1572 ddi_dma_mem_free(&workp->dw_mcr_acch);
1573 }
1574 if (workp->dw_mcr_dmah) {
1575 ddi_dma_free_handle(&workp->dw_mcr_dmah);
1576 }
1577 kmem_free(workp, sizeof (dca_work_t));
1578 }
1579
1580 dca_request_t *
dca_newreq(dca_t * dca)1581 dca_newreq(dca_t *dca)
1582 {
1583 dca_request_t *reqp;
1584 size_t size;
1585 ddi_dma_cookie_t c;
1586 unsigned nc;
1587 int rv;
1588 int n_chain = 0;
1589
1590 size = (DESC_SIZE * MAXFRAGS) + CTX_MAXLENGTH;
1591
1592 reqp = kmem_zalloc(sizeof (dca_request_t), KM_SLEEP);
1593
1594 reqp->dr_dca = dca;
1595
1596 /*
1597 * Setup the DMA region for the context and descriptors.
1598 */
1599 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, DDI_DMA_SLEEP,
1600 NULL, &reqp->dr_ctx_dmah);
1601 if (rv != DDI_SUCCESS) {
1602 dca_error(dca, "failure allocating request DMA handle");
1603 dca_destroyreq(reqp);
1604 return (NULL);
1605 }
1606
1607 /* for driver hardening, allocate in whole pages */
1608 rv = ddi_dma_mem_alloc(reqp->dr_ctx_dmah,
1609 ROUNDUP(size, dca->dca_pagesize), &dca_devattr, DDI_DMA_CONSISTENT,
1610 DDI_DMA_SLEEP, NULL, &reqp->dr_ctx_kaddr, &size,
1611 &reqp->dr_ctx_acch);
1612 if (rv != DDI_SUCCESS) {
1613 dca_error(dca, "unable to alloc request DMA memory");
1614 dca_destroyreq(reqp);
1615 return (NULL);
1616 }
1617
1618 rv = ddi_dma_addr_bind_handle(reqp->dr_ctx_dmah, NULL,
1619 reqp->dr_ctx_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_WRITE,
1620 DDI_DMA_SLEEP, 0, &c, &nc);
1621 if (rv != DDI_DMA_MAPPED) {
1622 dca_error(dca, "failed binding request DMA handle");
1623 dca_destroyreq(reqp);
1624 return (NULL);
1625 }
1626 reqp->dr_ctx_paddr = c.dmac_address;
1627
1628 reqp->dr_dma_size = size;
1629
1630 /*
1631 * Set up the dma for our scratch/shared buffers.
1632 */
1633 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1634 DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_dmah);
1635 if (rv != DDI_SUCCESS) {
1636 dca_error(dca, "failure allocating ibuf DMA handle");
1637 dca_destroyreq(reqp);
1638 return (NULL);
1639 }
1640 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1641 DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_dmah);
1642 if (rv != DDI_SUCCESS) {
1643 dca_error(dca, "failure allocating obuf DMA handle");
1644 dca_destroyreq(reqp);
1645 return (NULL);
1646 }
1647
1648 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1649 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_in_dmah);
1650 if (rv != DDI_SUCCESS) {
1651 dca_error(dca, "failure allocating chain_in DMA handle");
1652 dca_destroyreq(reqp);
1653 return (NULL);
1654 }
1655
1656 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1657 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_out_dmah);
1658 if (rv != DDI_SUCCESS) {
1659 dca_error(dca, "failure allocating chain_out DMA handle");
1660 dca_destroyreq(reqp);
1661 return (NULL);
1662 }
1663
1664 /*
1665 * for driver hardening, allocate in whole pages.
1666 */
1667 size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
1668 /*
1669 * We could kmem_alloc for Sparc too. However, it gives worse
1670 * performance when transferring more than one page data. For example,
1671 * using 4 threads and 12032 byte data and 3DES on 900MHZ Sparc system,
1672 * kmem_alloc uses 80% CPU and ddi_dma_mem_alloc uses 50% CPU for
1673 * the same throughput.
1674 */
1675 rv = ddi_dma_mem_alloc(reqp->dr_ibuf_dmah,
1676 size, &dca_bufattr,
1677 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_kaddr,
1678 &size, &reqp->dr_ibuf_acch);
1679 if (rv != DDI_SUCCESS) {
1680 dca_error(dca, "unable to alloc request DMA memory");
1681 dca_destroyreq(reqp);
1682 return (NULL);
1683 }
1684
1685 rv = ddi_dma_mem_alloc(reqp->dr_obuf_dmah,
1686 size, &dca_bufattr,
1687 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_kaddr,
1688 &size, &reqp->dr_obuf_acch);
1689 if (rv != DDI_SUCCESS) {
1690 dca_error(dca, "unable to alloc request DMA memory");
1691 dca_destroyreq(reqp);
1692 return (NULL);
1693 }
1694
1695 /* Skip the used portion in the context page */
1696 reqp->dr_offset = CTX_MAXLENGTH;
1697 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1698 reqp->dr_ibuf_kaddr, reqp->dr_ibuf_dmah,
1699 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1700 &reqp->dr_ibuf_head, &n_chain)) != DDI_SUCCESS) {
1701 (void) dca_destroyreq(reqp);
1702 return (NULL);
1703 }
1704 reqp->dr_ibuf_paddr = reqp->dr_ibuf_head.dc_buffer_paddr;
1705 /* Skip the space used by the input buffer */
1706 reqp->dr_offset += DESC_SIZE * n_chain;
1707
1708 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1709 reqp->dr_obuf_kaddr, reqp->dr_obuf_dmah,
1710 DDI_DMA_READ | DDI_DMA_STREAMING,
1711 &reqp->dr_obuf_head, &n_chain)) != DDI_SUCCESS) {
1712 (void) dca_destroyreq(reqp);
1713 return (NULL);
1714 }
1715 reqp->dr_obuf_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
1716 /* Skip the space used by the output buffer */
1717 reqp->dr_offset += DESC_SIZE * n_chain;
1718
1719 DBG(dca, DCHATTY, "CTX is 0x%p, phys 0x%x, len %d",
1720 reqp->dr_ctx_kaddr, reqp->dr_ctx_paddr, CTX_MAXLENGTH);
1721 return (reqp);
1722 }
1723
1724 void
dca_destroyreq(dca_request_t * reqp)1725 dca_destroyreq(dca_request_t *reqp)
1726 {
1727
1728 /*
1729 * Clean up DMA for the context structure.
1730 */
1731 if (reqp->dr_ctx_paddr) {
1732 (void) ddi_dma_unbind_handle(reqp->dr_ctx_dmah);
1733 }
1734
1735 if (reqp->dr_ctx_acch) {
1736 ddi_dma_mem_free(&reqp->dr_ctx_acch);
1737 }
1738
1739 if (reqp->dr_ctx_dmah) {
1740 ddi_dma_free_handle(&reqp->dr_ctx_dmah);
1741 }
1742
1743 /*
1744 * Clean up DMA for the scratch buffer.
1745 */
1746 if (reqp->dr_ibuf_paddr) {
1747 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
1748 }
1749 if (reqp->dr_obuf_paddr) {
1750 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
1751 }
1752
1753 if (reqp->dr_ibuf_acch) {
1754 ddi_dma_mem_free(&reqp->dr_ibuf_acch);
1755 }
1756 if (reqp->dr_obuf_acch) {
1757 ddi_dma_mem_free(&reqp->dr_obuf_acch);
1758 }
1759
1760 if (reqp->dr_ibuf_dmah) {
1761 ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
1762 }
1763 if (reqp->dr_obuf_dmah) {
1764 ddi_dma_free_handle(&reqp->dr_obuf_dmah);
1765 }
1766 /*
1767 * These two DMA handles should have been unbinded in
1768 * dca_unbindchains() function
1769 */
1770 if (reqp->dr_chain_in_dmah) {
1771 ddi_dma_free_handle(&reqp->dr_chain_in_dmah);
1772 }
1773 if (reqp->dr_chain_out_dmah) {
1774 ddi_dma_free_handle(&reqp->dr_chain_out_dmah);
1775 }
1776
1777 kmem_free(reqp, sizeof (dca_request_t));
1778 }
1779
1780 dca_work_t *
dca_getwork(dca_t * dca,int mcr)1781 dca_getwork(dca_t *dca, int mcr)
1782 {
1783 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1784 dca_work_t *workp;
1785
1786 mutex_enter(&wlp->dwl_freelock);
1787 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_freework);
1788 mutex_exit(&wlp->dwl_freelock);
1789 if (workp) {
1790 int nreqs;
1791 bzero(workp->dw_mcr_kaddr, 8);
1792
1793 /* clear out old requests */
1794 for (nreqs = 0; nreqs < MAXREQSPERMCR; nreqs++) {
1795 workp->dw_reqs[nreqs] = NULL;
1796 }
1797 }
1798 return (workp);
1799 }
1800
1801 void
dca_freework(dca_work_t * workp)1802 dca_freework(dca_work_t *workp)
1803 {
1804 mutex_enter(&workp->dw_wlp->dwl_freelock);
1805 dca_enqueue(&workp->dw_wlp->dwl_freework, (dca_listnode_t *)workp);
1806 mutex_exit(&workp->dw_wlp->dwl_freelock);
1807 }
1808
1809 dca_request_t *
dca_getreq(dca_t * dca,int mcr,int tryhard)1810 dca_getreq(dca_t *dca, int mcr, int tryhard)
1811 {
1812 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1813 dca_request_t *reqp;
1814
1815 mutex_enter(&wlp->dwl_freereqslock);
1816 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_freereqs);
1817 mutex_exit(&wlp->dwl_freereqslock);
1818 if (reqp) {
1819 reqp->dr_flags = 0;
1820 reqp->dr_callback = NULL;
1821 } else if (tryhard) {
1822 /*
1823 * failed to get a free one, try an allocation, the hard way.
1824 * XXX: Kstat desired here.
1825 */
1826 if ((reqp = dca_newreq(dca)) != NULL) {
1827 reqp->dr_wlp = wlp;
1828 reqp->dr_dca = dca;
1829 reqp->dr_flags = 0;
1830 reqp->dr_callback = NULL;
1831 }
1832 }
1833 return (reqp);
1834 }
1835
1836 void
dca_freereq(dca_request_t * reqp)1837 dca_freereq(dca_request_t *reqp)
1838 {
1839 reqp->dr_kcf_req = NULL;
1840 if (!(reqp->dr_flags & DR_NOCACHE)) {
1841 mutex_enter(&reqp->dr_wlp->dwl_freereqslock);
1842 dca_enqueue(&reqp->dr_wlp->dwl_freereqs,
1843 (dca_listnode_t *)reqp);
1844 mutex_exit(&reqp->dr_wlp->dwl_freereqslock);
1845 }
1846 }
1847
1848 /*
1849 * Binds user buffers to DMA handles dynamically. On Sparc, a user buffer
1850 * is mapped to a single physical address. On x86, a user buffer is mapped
1851 * to multiple physical addresses. These physical addresses are chained
1852 * using the method specified in Broadcom BCM5820 specification.
1853 */
1854 int
dca_bindchains(dca_request_t * reqp,size_t incnt,size_t outcnt)1855 dca_bindchains(dca_request_t *reqp, size_t incnt, size_t outcnt)
1856 {
1857 int rv;
1858 caddr_t kaddr;
1859 uint_t flags;
1860 int n_chain = 0;
1861
1862 if (reqp->dr_flags & DR_INPLACE) {
1863 flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
1864 } else {
1865 flags = DDI_DMA_WRITE | DDI_DMA_STREAMING;
1866 }
1867
1868 /* first the input */
1869 if (incnt) {
1870 if ((kaddr = dca_bufdaddr(reqp->dr_in)) == NULL) {
1871 DBG(NULL, DWARN, "unrecognised crypto data format");
1872 return (DDI_FAILURE);
1873 }
1874 if ((rv = dca_bindchains_one(reqp, incnt, reqp->dr_offset,
1875 kaddr, reqp->dr_chain_in_dmah, flags,
1876 &reqp->dr_chain_in_head, &n_chain)) != DDI_SUCCESS) {
1877 (void) dca_unbindchains(reqp);
1878 return (rv);
1879 }
1880
1881 /*
1882 * The offset and length are altered by the calling routine
1883 * reqp->dr_in->cd_offset += incnt;
1884 * reqp->dr_in->cd_length -= incnt;
1885 */
1886 /* Save the first one in the chain for MCR */
1887 reqp->dr_in_paddr = reqp->dr_chain_in_head.dc_buffer_paddr;
1888 reqp->dr_in_next = reqp->dr_chain_in_head.dc_next_paddr;
1889 reqp->dr_in_len = reqp->dr_chain_in_head.dc_buffer_length;
1890 } else {
1891 reqp->dr_in_paddr = 0;
1892 reqp->dr_in_next = 0;
1893 reqp->dr_in_len = 0;
1894 }
1895
1896 if (reqp->dr_flags & DR_INPLACE) {
1897 reqp->dr_out_paddr = reqp->dr_in_paddr;
1898 reqp->dr_out_len = reqp->dr_in_len;
1899 reqp->dr_out_next = reqp->dr_in_next;
1900 return (DDI_SUCCESS);
1901 }
1902
1903 /* then the output */
1904 if (outcnt) {
1905 flags = DDI_DMA_READ | DDI_DMA_STREAMING;
1906 if ((kaddr = dca_bufdaddr_out(reqp->dr_out)) == NULL) {
1907 DBG(NULL, DWARN, "unrecognised crypto data format");
1908 (void) dca_unbindchains(reqp);
1909 return (DDI_FAILURE);
1910 }
1911 rv = dca_bindchains_one(reqp, outcnt, reqp->dr_offset +
1912 n_chain * DESC_SIZE, kaddr, reqp->dr_chain_out_dmah,
1913 flags, &reqp->dr_chain_out_head, &n_chain);
1914 if (rv != DDI_SUCCESS) {
1915 (void) dca_unbindchains(reqp);
1916 return (DDI_FAILURE);
1917 }
1918
1919 /* Save the first one in the chain for MCR */
1920 reqp->dr_out_paddr = reqp->dr_chain_out_head.dc_buffer_paddr;
1921 reqp->dr_out_next = reqp->dr_chain_out_head.dc_next_paddr;
1922 reqp->dr_out_len = reqp->dr_chain_out_head.dc_buffer_length;
1923 } else {
1924 reqp->dr_out_paddr = 0;
1925 reqp->dr_out_next = 0;
1926 reqp->dr_out_len = 0;
1927 }
1928
1929 return (DDI_SUCCESS);
1930 }
1931
1932 /*
1933 * Unbind the user buffers from the DMA handles.
1934 */
1935 int
dca_unbindchains(dca_request_t * reqp)1936 dca_unbindchains(dca_request_t *reqp)
1937 {
1938 int rv = DDI_SUCCESS;
1939 int rv1 = DDI_SUCCESS;
1940
1941 /* Clear the input chain */
1942 if (reqp->dr_chain_in_head.dc_buffer_paddr != 0) {
1943 (void) ddi_dma_unbind_handle(reqp->dr_chain_in_dmah);
1944 reqp->dr_chain_in_head.dc_buffer_paddr = 0;
1945 }
1946
1947 if (reqp->dr_flags & DR_INPLACE) {
1948 return (rv);
1949 }
1950
1951 /* Clear the output chain */
1952 if (reqp->dr_chain_out_head.dc_buffer_paddr != 0) {
1953 (void) ddi_dma_unbind_handle(reqp->dr_chain_out_dmah);
1954 reqp->dr_chain_out_head.dc_buffer_paddr = 0;
1955 }
1956
1957 return ((rv != DDI_SUCCESS)? rv : rv1);
1958 }
1959
1960 /*
1961 * Build either input chain or output chain. It is single-item chain for Sparc,
1962 * and possible mutiple-item chain for x86.
1963 */
1964 static int
dca_bindchains_one(dca_request_t * reqp,size_t cnt,int dr_offset,caddr_t kaddr,ddi_dma_handle_t handle,uint_t flags,dca_chain_t * head,int * n_chain)1965 dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
1966 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
1967 dca_chain_t *head, int *n_chain)
1968 {
1969 ddi_dma_cookie_t c;
1970 uint_t nc;
1971 int rv;
1972 caddr_t chain_kaddr_pre;
1973 caddr_t chain_kaddr;
1974 uint32_t chain_paddr;
1975 int i;
1976
1977 /* Advance past the context structure to the starting address */
1978 chain_paddr = reqp->dr_ctx_paddr + dr_offset;
1979 chain_kaddr = reqp->dr_ctx_kaddr + dr_offset;
1980
1981 /*
1982 * Bind the kernel address to the DMA handle. On x86, the actual
1983 * buffer is mapped into multiple physical addresses. On Sparc,
1984 * the actual buffer is mapped into a single address.
1985 */
1986 rv = ddi_dma_addr_bind_handle(handle,
1987 NULL, kaddr, cnt, flags, DDI_DMA_DONTWAIT, NULL, &c, &nc);
1988 if (rv != DDI_DMA_MAPPED) {
1989 return (DDI_FAILURE);
1990 }
1991
1992 (void) ddi_dma_sync(handle, 0, cnt, DDI_DMA_SYNC_FORDEV);
1993 if ((rv = dca_check_dma_handle(reqp->dr_dca, handle,
1994 DCA_FM_ECLASS_NONE)) != DDI_SUCCESS) {
1995 reqp->destroy = TRUE;
1996 return (rv);
1997 }
1998
1999 *n_chain = nc;
2000
2001 /* Setup the data buffer chain for DMA transfer */
2002 chain_kaddr_pre = NULL;
2003 head->dc_buffer_paddr = 0;
2004 head->dc_next_paddr = 0;
2005 head->dc_buffer_length = 0;
2006 for (i = 0; i < nc; i++) {
2007 /* PIO */
2008 PUTDESC32(reqp, chain_kaddr, DESC_BUFADDR, c.dmac_address);
2009 PUTDESC16(reqp, chain_kaddr, DESC_RSVD, 0);
2010 PUTDESC16(reqp, chain_kaddr, DESC_LENGTH, c.dmac_size);
2011
2012 /* Remember the head of the chain */
2013 if (head->dc_buffer_paddr == 0) {
2014 head->dc_buffer_paddr = c.dmac_address;
2015 head->dc_buffer_length = c.dmac_size;
2016 }
2017
2018 /* Link to the previous one if one exists */
2019 if (chain_kaddr_pre) {
2020 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT,
2021 chain_paddr);
2022 if (head->dc_next_paddr == 0)
2023 head->dc_next_paddr = chain_paddr;
2024 }
2025 chain_kaddr_pre = chain_kaddr;
2026
2027 /* Maintain pointers */
2028 chain_paddr += DESC_SIZE;
2029 chain_kaddr += DESC_SIZE;
2030
2031 /* Retrieve the next cookie if there is one */
2032 if (i < nc-1)
2033 ddi_dma_nextcookie(handle, &c);
2034 }
2035
2036 /* Set the next pointer in the last entry to NULL */
2037 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 0);
2038
2039 return (DDI_SUCCESS);
2040 }
2041
2042 /*
2043 * Schedule some work.
2044 */
2045 int
dca_start(dca_t * dca,dca_request_t * reqp,int mcr,int dosched)2046 dca_start(dca_t *dca, dca_request_t *reqp, int mcr, int dosched)
2047 {
2048 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2049
2050 mutex_enter(&wlp->dwl_lock);
2051
2052 DBG(dca, DCHATTY, "req=%p, in=%p, out=%p, ctx=%p, ibuf=%p, obuf=%p",
2053 reqp, reqp->dr_in, reqp->dr_out, reqp->dr_ctx_kaddr,
2054 reqp->dr_ibuf_kaddr, reqp->dr_obuf_kaddr);
2055 DBG(dca, DCHATTY, "ctx paddr = %x, ibuf paddr = %x, obuf paddr = %x",
2056 reqp->dr_ctx_paddr, reqp->dr_ibuf_paddr, reqp->dr_obuf_paddr);
2057 /* sync out the entire context and descriptor chains */
2058 (void) ddi_dma_sync(reqp->dr_ctx_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2059 if (dca_check_dma_handle(dca, reqp->dr_ctx_dmah,
2060 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2061 reqp->destroy = TRUE;
2062 mutex_exit(&wlp->dwl_lock);
2063 return (CRYPTO_DEVICE_ERROR);
2064 }
2065
2066 dca_enqueue(&wlp->dwl_waitq, (dca_listnode_t *)reqp);
2067 wlp->dwl_count++;
2068 wlp->dwl_lastsubmit = ddi_get_lbolt();
2069 reqp->dr_wlp = wlp;
2070
2071 if ((wlp->dwl_count == wlp->dwl_hiwater) && (wlp->dwl_busy == 0)) {
2072 /* we are fully loaded now, let kCF know */
2073
2074 wlp->dwl_flowctl++;
2075 wlp->dwl_busy = 1;
2076
2077 crypto_prov_notify(wlp->dwl_prov, CRYPTO_PROVIDER_BUSY);
2078 }
2079
2080 if (dosched) {
2081 #ifdef SCHEDDELAY
2082 /* possibly wait for more work to arrive */
2083 if (wlp->dwl_count >= wlp->dwl_reqspermcr) {
2084 dca_schedule(dca, mcr);
2085 } else if (!wlp->dwl_schedtid) {
2086 /* wait 1 msec for more work before doing it */
2087 wlp->dwl_schedtid = timeout(dca_schedtimeout,
2088 (void *)wlp, drv_usectohz(MSEC));
2089 }
2090 #else
2091 dca_schedule(dca, mcr);
2092 #endif
2093 }
2094 mutex_exit(&wlp->dwl_lock);
2095
2096 return (CRYPTO_QUEUED);
2097 }
2098
2099 void
dca_schedule(dca_t * dca,int mcr)2100 dca_schedule(dca_t *dca, int mcr)
2101 {
2102 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2103 int csr;
2104 int full;
2105 uint32_t status;
2106
2107 ASSERT(mutex_owned(&wlp->dwl_lock));
2108 /*
2109 * If the card is draining or has an outstanding failure,
2110 * don't schedule any more work on it right now
2111 */
2112 if (wlp->dwl_drain || (dca->dca_flags & DCA_FAILED)) {
2113 return;
2114 }
2115
2116 if (mcr == MCR2) {
2117 csr = CSR_MCR2;
2118 full = DMASTAT_MCR2FULL;
2119 } else {
2120 csr = CSR_MCR1;
2121 full = DMASTAT_MCR1FULL;
2122 }
2123
2124 for (;;) {
2125 dca_work_t *workp;
2126 uint32_t offset;
2127 int nreqs;
2128
2129 status = GETCSR(dca, CSR_DMASTAT);
2130 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2131 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
2132 return;
2133
2134 if ((status & full) != 0)
2135 break;
2136
2137 #ifdef SCHEDDELAY
2138 /* if there isn't enough to do, don't bother now */
2139 if ((wlp->dwl_count < wlp->dwl_reqspermcr) &&
2140 (ddi_get_lbolt() < (wlp->dwl_lastsubmit +
2141 drv_usectohz(MSEC)))) {
2142 /* wait a bit longer... */
2143 if (wlp->dwl_schedtid == 0) {
2144 wlp->dwl_schedtid = timeout(dca_schedtimeout,
2145 (void *)wlp, drv_usectohz(MSEC));
2146 }
2147 return;
2148 }
2149 #endif
2150
2151 /* grab a work structure */
2152 workp = dca_getwork(dca, mcr);
2153
2154 if (workp == NULL) {
2155 /*
2156 * There must be work ready to be reclaimed,
2157 * in this case, since the chip can only hold
2158 * less work outstanding than there are total.
2159 */
2160 dca_reclaim(dca, mcr);
2161 continue;
2162 }
2163
2164 nreqs = 0;
2165 offset = MCR_CTXADDR;
2166
2167 while (nreqs < wlp->dwl_reqspermcr) {
2168 dca_request_t *reqp;
2169
2170 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_waitq);
2171 if (reqp == NULL) {
2172 /* nothing left to process */
2173 break;
2174 }
2175 /*
2176 * Update flow control.
2177 */
2178 wlp->dwl_count--;
2179 if ((wlp->dwl_count == wlp->dwl_lowater) &&
2180 (wlp->dwl_busy)) {
2181 wlp->dwl_busy = 0;
2182 crypto_prov_notify(wlp->dwl_prov,
2183 CRYPTO_PROVIDER_READY);
2184 }
2185
2186 /*
2187 * Context address.
2188 */
2189 PUTMCR32(workp, offset, reqp->dr_ctx_paddr);
2190 offset += 4;
2191
2192 /*
2193 * Input chain.
2194 */
2195 /* input buffer address */
2196 PUTMCR32(workp, offset, reqp->dr_in_paddr);
2197 offset += 4;
2198 /* next input buffer entry */
2199 PUTMCR32(workp, offset, reqp->dr_in_next);
2200 offset += 4;
2201 /* input buffer length */
2202 PUTMCR16(workp, offset, reqp->dr_in_len);
2203 offset += 2;
2204 /* zero the reserved field */
2205 PUTMCR16(workp, offset, 0);
2206 offset += 2;
2207
2208 /*
2209 * Overall length.
2210 */
2211 /* reserved field */
2212 PUTMCR16(workp, offset, 0);
2213 offset += 2;
2214 /* total packet length */
2215 PUTMCR16(workp, offset, reqp->dr_pkt_length);
2216 offset += 2;
2217
2218 /*
2219 * Output chain.
2220 */
2221 /* output buffer address */
2222 PUTMCR32(workp, offset, reqp->dr_out_paddr);
2223 offset += 4;
2224 /* next output buffer entry */
2225 PUTMCR32(workp, offset, reqp->dr_out_next);
2226 offset += 4;
2227 /* output buffer length */
2228 PUTMCR16(workp, offset, reqp->dr_out_len);
2229 offset += 2;
2230 /* zero the reserved field */
2231 PUTMCR16(workp, offset, 0);
2232 offset += 2;
2233
2234 /*
2235 * Note submission.
2236 */
2237 workp->dw_reqs[nreqs] = reqp;
2238 nreqs++;
2239 }
2240
2241 if (nreqs == 0) {
2242 /* nothing in the queue! */
2243 dca_freework(workp);
2244 return;
2245 }
2246
2247 wlp->dwl_submit++;
2248
2249 PUTMCR16(workp, MCR_FLAGS, 0);
2250 PUTMCR16(workp, MCR_COUNT, nreqs);
2251
2252 DBG(dca, DCHATTY,
2253 "posting work (phys %x, virt 0x%p) (%d reqs) to MCR%d",
2254 workp->dw_mcr_paddr, workp->dw_mcr_kaddr,
2255 nreqs, mcr);
2256
2257 workp->dw_lbolt = ddi_get_lbolt();
2258 /* Make sure MCR is synced out to device. */
2259 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 0,
2260 DDI_DMA_SYNC_FORDEV);
2261 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2262 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2263 dca_destroywork(workp);
2264 return;
2265 }
2266
2267 PUTCSR(dca, csr, workp->dw_mcr_paddr);
2268 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2269 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2270 dca_destroywork(workp);
2271 return;
2272 } else {
2273 dca_enqueue(&wlp->dwl_runq, (dca_listnode_t *)workp);
2274 }
2275
2276 DBG(dca, DCHATTY, "posted");
2277 }
2278 }
2279
2280 /*
2281 * Reclaim completed work, called in interrupt context.
2282 */
2283 void
dca_reclaim(dca_t * dca,int mcr)2284 dca_reclaim(dca_t *dca, int mcr)
2285 {
2286 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2287 dca_work_t *workp;
2288 ushort_t flags;
2289 int nreclaimed = 0;
2290 int i;
2291
2292 DBG(dca, DRECLAIM, "worklist = 0x%p (MCR%d)", wlp, mcr);
2293 ASSERT(mutex_owned(&wlp->dwl_lock));
2294 /*
2295 * For each MCR in the submitted (runq), we check to see if
2296 * it has been processed. If so, then we note each individual
2297 * job in the MCR, and and do the completion processing for
2298 * each of such job.
2299 */
2300 for (;;) {
2301
2302 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2303 if (workp == NULL) {
2304 break;
2305 }
2306
2307 /* only sync the MCR flags, since that's all we need */
2308 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 4,
2309 DDI_DMA_SYNC_FORKERNEL);
2310 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2311 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2312 dca_rmqueue((dca_listnode_t *)workp);
2313 dca_destroywork(workp);
2314 return;
2315 }
2316
2317 flags = GETMCR16(workp, MCR_FLAGS);
2318 if ((flags & MCRFLAG_FINISHED) == 0) {
2319 /* chip is still working on it */
2320 DBG(dca, DRECLAIM,
2321 "chip still working on it (MCR%d)", mcr);
2322 break;
2323 }
2324
2325 /* its really for us, so remove it from the queue */
2326 dca_rmqueue((dca_listnode_t *)workp);
2327
2328 /* if we were draining, signal on the cv */
2329 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2330 cv_signal(&wlp->dwl_cv);
2331 }
2332
2333 /* update statistics, done under the lock */
2334 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2335 dca_request_t *reqp = workp->dw_reqs[i];
2336 if (reqp == NULL) {
2337 continue;
2338 }
2339 if (reqp->dr_byte_stat >= 0) {
2340 dca->dca_stats[reqp->dr_byte_stat] +=
2341 reqp->dr_pkt_length;
2342 }
2343 if (reqp->dr_job_stat >= 0) {
2344 dca->dca_stats[reqp->dr_job_stat]++;
2345 }
2346 }
2347 mutex_exit(&wlp->dwl_lock);
2348
2349 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2350 dca_request_t *reqp = workp->dw_reqs[i];
2351
2352 if (reqp == NULL) {
2353 continue;
2354 }
2355
2356 /* Do the callback. */
2357 workp->dw_reqs[i] = NULL;
2358 dca_done(reqp, CRYPTO_SUCCESS);
2359
2360 nreclaimed++;
2361 }
2362
2363 /* now we can release the work */
2364 dca_freework(workp);
2365
2366 mutex_enter(&wlp->dwl_lock);
2367 }
2368 DBG(dca, DRECLAIM, "reclaimed %d cmds", nreclaimed);
2369 }
2370
2371 int
dca_length(crypto_data_t * cdata)2372 dca_length(crypto_data_t *cdata)
2373 {
2374 return (cdata->cd_length);
2375 }
2376
2377 /*
2378 * This is the callback function called from the interrupt when a kCF job
2379 * completes. It does some driver-specific things, and then calls the
2380 * kCF-provided callback. Finally, it cleans up the state for the work
2381 * request and drops the reference count to allow for DR.
2382 */
2383 void
dca_done(dca_request_t * reqp,int err)2384 dca_done(dca_request_t *reqp, int err)
2385 {
2386 uint64_t ena = 0;
2387
2388 /* unbind any chains we were using */
2389 if (dca_unbindchains(reqp) != DDI_SUCCESS) {
2390 /* DMA failure */
2391 ena = dca_ena(ena);
2392 dca_failure(reqp->dr_dca, DDI_DATAPATH_FAULT,
2393 DCA_FM_ECLASS_NONE, ena, CRYPTO_DEVICE_ERROR,
2394 "fault on buffer DMA handle");
2395 if (err == CRYPTO_SUCCESS) {
2396 err = CRYPTO_DEVICE_ERROR;
2397 }
2398 }
2399
2400 if (reqp->dr_callback != NULL) {
2401 reqp->dr_callback(reqp, err);
2402 } else {
2403 dca_freereq(reqp);
2404 }
2405 }
2406
2407 /*
2408 * Call this when a failure is detected. It will reset the chip,
2409 * log a message, alert kCF, and mark jobs in the runq as failed.
2410 */
2411 /* ARGSUSED */
2412 void
dca_failure(dca_t * dca,ddi_fault_location_t loc,dca_fma_eclass_t index,uint64_t ena,int errno,char * mess,...)2413 dca_failure(dca_t *dca, ddi_fault_location_t loc, dca_fma_eclass_t index,
2414 uint64_t ena, int errno, char *mess, ...)
2415 {
2416 va_list ap;
2417 char buf[256];
2418 int mcr;
2419 char *eclass;
2420 int have_mutex;
2421
2422 va_start(ap, mess);
2423 (void) vsprintf(buf, mess, ap);
2424 va_end(ap);
2425
2426 eclass = dca_fma_eclass_string(dca->dca_model, index);
2427
2428 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) &&
2429 index != DCA_FM_ECLASS_NONE) {
2430 ddi_fm_ereport_post(dca->dca_dip, eclass, ena,
2431 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
2432 FM_EREPORT_VERS0, NULL);
2433
2434 /* Report the impact of the failure to the DDI. */
2435 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_LOST);
2436 } else {
2437 /* Just log the error string to the message log */
2438 dca_error(dca, buf);
2439 }
2440
2441 /*
2442 * Indicate a failure (keeps schedule from running).
2443 */
2444 dca->dca_flags |= DCA_FAILED;
2445
2446 /*
2447 * Reset the chip. This should also have as a side effect, the
2448 * disabling of all interrupts from the device.
2449 */
2450 (void) dca_reset(dca, 1);
2451
2452 /*
2453 * Report the failure to kCF.
2454 */
2455 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2456 if (WORKLIST(dca, mcr)->dwl_prov) {
2457 crypto_prov_notify(WORKLIST(dca, mcr)->dwl_prov,
2458 CRYPTO_PROVIDER_FAILED);
2459 }
2460 }
2461
2462 /*
2463 * Return jobs not sent to hardware back to kCF.
2464 */
2465 dca_rejectjobs(dca);
2466
2467 /*
2468 * From this point on, no new work should be arriving, and the
2469 * chip should not be doing any active DMA.
2470 */
2471
2472 /*
2473 * Now find all the work submitted to the device and fail
2474 * them.
2475 */
2476 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2477 dca_worklist_t *wlp;
2478 int i;
2479
2480 wlp = WORKLIST(dca, mcr);
2481
2482 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2483 continue;
2484 }
2485 for (;;) {
2486 dca_work_t *workp;
2487
2488 have_mutex = mutex_tryenter(&wlp->dwl_lock);
2489 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_runq);
2490 if (workp == NULL) {
2491 if (have_mutex)
2492 mutex_exit(&wlp->dwl_lock);
2493 break;
2494 }
2495 mutex_exit(&wlp->dwl_lock);
2496
2497 /*
2498 * Free up requests
2499 */
2500 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2501 dca_request_t *reqp = workp->dw_reqs[i];
2502 if (reqp) {
2503 dca_done(reqp, errno);
2504 workp->dw_reqs[i] = NULL;
2505 }
2506 }
2507
2508 mutex_enter(&wlp->dwl_lock);
2509 /*
2510 * If waiting to drain, signal on the waiter.
2511 */
2512 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2513 cv_signal(&wlp->dwl_cv);
2514 }
2515
2516 /*
2517 * Return the work and request structures to
2518 * the free pool.
2519 */
2520 dca_freework(workp);
2521 if (have_mutex)
2522 mutex_exit(&wlp->dwl_lock);
2523 }
2524 }
2525
2526 }
2527
2528 #ifdef SCHEDDELAY
2529 /*
2530 * Reschedule worklist as needed.
2531 */
2532 void
dca_schedtimeout(void * arg)2533 dca_schedtimeout(void *arg)
2534 {
2535 dca_worklist_t *wlp = (dca_worklist_t *)arg;
2536 mutex_enter(&wlp->dwl_lock);
2537 wlp->dwl_schedtid = 0;
2538 dca_schedule(wlp->dwl_dca, wlp->dwl_mcr);
2539 mutex_exit(&wlp->dwl_lock);
2540 }
2541 #endif
2542
2543 /*
2544 * Check for stalled jobs.
2545 */
2546 void
dca_jobtimeout(void * arg)2547 dca_jobtimeout(void *arg)
2548 {
2549 int mcr;
2550 dca_t *dca = (dca_t *)arg;
2551 int hung = 0;
2552
2553 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2554 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2555 dca_work_t *workp;
2556 clock_t when;
2557
2558 mutex_enter(&wlp->dwl_lock);
2559 when = ddi_get_lbolt();
2560
2561 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2562 if (workp == NULL) {
2563 /* nothing sitting in the queue */
2564 mutex_exit(&wlp->dwl_lock);
2565 continue;
2566 }
2567
2568 if ((when - workp->dw_lbolt) < drv_usectohz(STALETIME)) {
2569 /* request has been queued for less than STALETIME */
2570 mutex_exit(&wlp->dwl_lock);
2571 continue;
2572 }
2573
2574 /* job has been sitting around for over 1 second, badness */
2575 DBG(dca, DWARN, "stale job (0x%p) found in MCR%d!", workp,
2576 mcr);
2577
2578 /* put it back in the queue, until we reset the chip */
2579 hung++;
2580 mutex_exit(&wlp->dwl_lock);
2581 }
2582
2583 if (hung) {
2584 dca_failure(dca, DDI_DEVICE_FAULT,
2585 DCA_FM_ECLASS_HW_TIMEOUT, dca_ena(0), CRYPTO_DEVICE_ERROR,
2586 "timeout processing job.)");
2587 }
2588
2589 /* reschedule ourself */
2590 mutex_enter(&dca->dca_intrlock);
2591 if (dca->dca_jobtid == 0) {
2592 /* timeout has been canceled, prior to DR */
2593 mutex_exit(&dca->dca_intrlock);
2594 return;
2595 }
2596
2597 /* check again in 1 second */
2598 dca->dca_jobtid = timeout(dca_jobtimeout, arg,
2599 drv_usectohz(SECOND));
2600 mutex_exit(&dca->dca_intrlock);
2601 }
2602
2603 /*
2604 * This returns all jobs back to kCF. It assumes that processing
2605 * on the worklist has halted.
2606 */
2607 void
dca_rejectjobs(dca_t * dca)2608 dca_rejectjobs(dca_t *dca)
2609 {
2610 int mcr;
2611 int have_mutex;
2612 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2613 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2614 dca_request_t *reqp;
2615
2616 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2617 continue;
2618 }
2619 have_mutex = mutex_tryenter(&wlp->dwl_lock);
2620 for (;;) {
2621 reqp = (dca_request_t *)dca_unqueue(&wlp->dwl_waitq);
2622 if (reqp == NULL) {
2623 break;
2624 }
2625 /* update flow control */
2626 wlp->dwl_count--;
2627 if ((wlp->dwl_count == wlp->dwl_lowater) &&
2628 (wlp->dwl_busy)) {
2629 wlp->dwl_busy = 0;
2630 crypto_prov_notify(wlp->dwl_prov,
2631 CRYPTO_PROVIDER_READY);
2632 }
2633 mutex_exit(&wlp->dwl_lock);
2634
2635 (void) dca_unbindchains(reqp);
2636 reqp->dr_callback(reqp, EAGAIN);
2637 mutex_enter(&wlp->dwl_lock);
2638 }
2639 if (have_mutex)
2640 mutex_exit(&wlp->dwl_lock);
2641 }
2642 }
2643
2644 int
dca_drain(dca_t * dca)2645 dca_drain(dca_t *dca)
2646 {
2647 int mcr;
2648 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2649 #ifdef SCHEDDELAY
2650 timeout_id_t tid;
2651 #endif
2652 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2653
2654 mutex_enter(&wlp->dwl_lock);
2655 wlp->dwl_drain = 1;
2656
2657 /* give it up to a second to drain from the chip */
2658 if (!QEMPTY(&wlp->dwl_runq)) {
2659 (void) cv_reltimedwait(&wlp->dwl_cv, &wlp->dwl_lock,
2660 drv_usectohz(STALETIME), TR_CLOCK_TICK);
2661
2662 if (!QEMPTY(&wlp->dwl_runq)) {
2663 dca_error(dca, "unable to drain device");
2664 mutex_exit(&wlp->dwl_lock);
2665 dca_undrain(dca);
2666 return (EBUSY);
2667 }
2668 }
2669
2670 #ifdef SCHEDDELAY
2671 tid = wlp->dwl_schedtid;
2672 mutex_exit(&wlp->dwl_lock);
2673
2674 /*
2675 * untimeout outside the lock -- this is safe because we
2676 * have set the drain flag, so dca_schedule() will not
2677 * reschedule another timeout
2678 */
2679 if (tid) {
2680 untimeout(tid);
2681 }
2682 #else
2683 mutex_exit(&wlp->dwl_lock);
2684 #endif
2685 }
2686 return (0);
2687 }
2688
2689 void
dca_undrain(dca_t * dca)2690 dca_undrain(dca_t *dca)
2691 {
2692 int mcr;
2693
2694 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2695 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2696 mutex_enter(&wlp->dwl_lock);
2697 wlp->dwl_drain = 0;
2698 dca_schedule(dca, mcr);
2699 mutex_exit(&wlp->dwl_lock);
2700 }
2701 }
2702
2703 /*
2704 * Duplicate the crypto_data_t structure, but point to the original
2705 * buffers.
2706 */
2707 int
dca_dupcrypto(crypto_data_t * input,crypto_data_t * ninput)2708 dca_dupcrypto(crypto_data_t *input, crypto_data_t *ninput)
2709 {
2710 ninput->cd_format = input->cd_format;
2711 ninput->cd_offset = input->cd_offset;
2712 ninput->cd_length = input->cd_length;
2713 ninput->cd_miscdata = input->cd_miscdata;
2714
2715 switch (input->cd_format) {
2716 case CRYPTO_DATA_RAW:
2717 ninput->cd_raw.iov_base = input->cd_raw.iov_base;
2718 ninput->cd_raw.iov_len = input->cd_raw.iov_len;
2719 break;
2720
2721 case CRYPTO_DATA_UIO:
2722 ninput->cd_uio = input->cd_uio;
2723 break;
2724
2725 case CRYPTO_DATA_MBLK:
2726 ninput->cd_mp = input->cd_mp;
2727 break;
2728
2729 default:
2730 DBG(NULL, DWARN,
2731 "dca_dupcrypto: unrecognised crypto data format");
2732 return (CRYPTO_FAILED);
2733 }
2734
2735 return (CRYPTO_SUCCESS);
2736 }
2737
2738 /*
2739 * Performs validation checks on the input and output data structures.
2740 */
2741 int
dca_verifyio(crypto_data_t * input,crypto_data_t * output)2742 dca_verifyio(crypto_data_t *input, crypto_data_t *output)
2743 {
2744 int rv = CRYPTO_SUCCESS;
2745
2746 switch (input->cd_format) {
2747 case CRYPTO_DATA_RAW:
2748 break;
2749
2750 case CRYPTO_DATA_UIO:
2751 /* we support only kernel buffer */
2752 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) {
2753 DBG(NULL, DWARN, "non kernel input uio buffer");
2754 rv = CRYPTO_ARGUMENTS_BAD;
2755 }
2756 break;
2757
2758 case CRYPTO_DATA_MBLK:
2759 break;
2760
2761 default:
2762 DBG(NULL, DWARN, "unrecognised input crypto data format");
2763 rv = CRYPTO_ARGUMENTS_BAD;
2764 }
2765
2766 switch (output->cd_format) {
2767 case CRYPTO_DATA_RAW:
2768 break;
2769
2770 case CRYPTO_DATA_UIO:
2771 /* we support only kernel buffer */
2772 if (output->cd_uio->uio_segflg != UIO_SYSSPACE) {
2773 DBG(NULL, DWARN, "non kernel output uio buffer");
2774 rv = CRYPTO_ARGUMENTS_BAD;
2775 }
2776 break;
2777
2778 case CRYPTO_DATA_MBLK:
2779 break;
2780
2781 default:
2782 DBG(NULL, DWARN, "unrecognised output crypto data format");
2783 rv = CRYPTO_ARGUMENTS_BAD;
2784 }
2785
2786 return (rv);
2787 }
2788
2789 /*
2790 * data: source crypto_data_t struct
2791 * off: offset into the source before commencing copy
2792 * count: the amount of data to copy
2793 * dest: destination buffer
2794 */
2795 int
dca_getbufbytes(crypto_data_t * data,size_t off,int count,uchar_t * dest)2796 dca_getbufbytes(crypto_data_t *data, size_t off, int count, uchar_t *dest)
2797 {
2798 int rv = CRYPTO_SUCCESS;
2799 uio_t *uiop;
2800 uint_t vec_idx;
2801 size_t cur_len;
2802 mblk_t *mp;
2803
2804 if (count == 0) {
2805 /* We don't want anything so we're done. */
2806 return (rv);
2807 }
2808
2809 /*
2810 * Sanity check that we haven't specified a length greater than the
2811 * offset adjusted size of the buffer.
2812 */
2813 if (count > (data->cd_length - off)) {
2814 return (CRYPTO_DATA_LEN_RANGE);
2815 }
2816
2817 /* Add the internal crypto_data offset to the requested offset. */
2818 off += data->cd_offset;
2819
2820 switch (data->cd_format) {
2821 case CRYPTO_DATA_RAW:
2822 bcopy(data->cd_raw.iov_base + off, dest, count);
2823 break;
2824
2825 case CRYPTO_DATA_UIO:
2826 /*
2827 * Jump to the first iovec containing data to be
2828 * processed.
2829 */
2830 uiop = data->cd_uio;
2831 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
2832 off >= uiop->uio_iov[vec_idx].iov_len;
2833 off -= uiop->uio_iov[vec_idx++].iov_len)
2834 ;
2835 if (vec_idx == uiop->uio_iovcnt) {
2836 /*
2837 * The caller specified an offset that is larger than
2838 * the total size of the buffers it provided.
2839 */
2840 return (CRYPTO_DATA_LEN_RANGE);
2841 }
2842
2843 /*
2844 * Now process the iovecs.
2845 */
2846 while (vec_idx < uiop->uio_iovcnt && count > 0) {
2847 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
2848 off, count);
2849 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
2850 cur_len);
2851 count -= cur_len;
2852 dest += cur_len;
2853 vec_idx++;
2854 off = 0;
2855 }
2856
2857 if (vec_idx == uiop->uio_iovcnt && count > 0) {
2858 /*
2859 * The end of the specified iovec's was reached but
2860 * the length requested could not be processed
2861 * (requested to digest more data than it provided).
2862 */
2863 return (CRYPTO_DATA_LEN_RANGE);
2864 }
2865 break;
2866
2867 case CRYPTO_DATA_MBLK:
2868 /*
2869 * Jump to the first mblk_t containing data to be processed.
2870 */
2871 for (mp = data->cd_mp; mp != NULL && off >= MBLKL(mp);
2872 off -= MBLKL(mp), mp = mp->b_cont)
2873 ;
2874 if (mp == NULL) {
2875 /*
2876 * The caller specified an offset that is larger than
2877 * the total size of the buffers it provided.
2878 */
2879 return (CRYPTO_DATA_LEN_RANGE);
2880 }
2881
2882 /*
2883 * Now do the processing on the mblk chain.
2884 */
2885 while (mp != NULL && count > 0) {
2886 cur_len = min(MBLKL(mp) - off, count);
2887 bcopy((char *)(mp->b_rptr + off), dest, cur_len);
2888 count -= cur_len;
2889 dest += cur_len;
2890 mp = mp->b_cont;
2891 off = 0;
2892 }
2893
2894 if (mp == NULL && count > 0) {
2895 /*
2896 * The end of the mblk was reached but the length
2897 * requested could not be processed, (requested to
2898 * digest more data than it provided).
2899 */
2900 return (CRYPTO_DATA_LEN_RANGE);
2901 }
2902 break;
2903
2904 default:
2905 DBG(NULL, DWARN, "unrecognised crypto data format");
2906 rv = CRYPTO_ARGUMENTS_BAD;
2907 }
2908 return (rv);
2909 }
2910
2911
2912 /*
2913 * Performs the input, output or hard scatter/gather checks on the specified
2914 * crypto_data_t struct. Returns true if the data is scatter/gather in nature
2915 * ie fails the test.
2916 */
2917 int
dca_sgcheck(dca_t * dca,crypto_data_t * data,dca_sg_param_t val)2918 dca_sgcheck(dca_t *dca, crypto_data_t *data, dca_sg_param_t val)
2919 {
2920 uio_t *uiop;
2921 mblk_t *mp;
2922 int rv = FALSE;
2923
2924 switch (val) {
2925 case DCA_SG_CONTIG:
2926 /*
2927 * Check for a contiguous data buffer.
2928 */
2929 switch (data->cd_format) {
2930 case CRYPTO_DATA_RAW:
2931 /* Contiguous in nature */
2932 break;
2933
2934 case CRYPTO_DATA_UIO:
2935 if (data->cd_uio->uio_iovcnt > 1)
2936 rv = TRUE;
2937 break;
2938
2939 case CRYPTO_DATA_MBLK:
2940 mp = data->cd_mp;
2941 if (mp->b_cont != NULL)
2942 rv = TRUE;
2943 break;
2944
2945 default:
2946 DBG(NULL, DWARN, "unrecognised crypto data format");
2947 }
2948 break;
2949
2950 case DCA_SG_WALIGN:
2951 /*
2952 * Check for a contiguous data buffer that is 32-bit word
2953 * aligned and is of word multiples in size.
2954 */
2955 switch (data->cd_format) {
2956 case CRYPTO_DATA_RAW:
2957 if ((data->cd_raw.iov_len % sizeof (uint32_t)) ||
2958 ((uintptr_t)data->cd_raw.iov_base %
2959 sizeof (uint32_t))) {
2960 rv = TRUE;
2961 }
2962 break;
2963
2964 case CRYPTO_DATA_UIO:
2965 uiop = data->cd_uio;
2966 if (uiop->uio_iovcnt > 1) {
2967 return (TRUE);
2968 }
2969 /* So there is only one iovec */
2970 if ((uiop->uio_iov[0].iov_len % sizeof (uint32_t)) ||
2971 ((uintptr_t)uiop->uio_iov[0].iov_base %
2972 sizeof (uint32_t))) {
2973 rv = TRUE;
2974 }
2975 break;
2976
2977 case CRYPTO_DATA_MBLK:
2978 mp = data->cd_mp;
2979 if (mp->b_cont != NULL) {
2980 return (TRUE);
2981 }
2982 /* So there is only one mblk in the chain */
2983 if ((MBLKL(mp) % sizeof (uint32_t)) ||
2984 ((uintptr_t)mp->b_rptr % sizeof (uint32_t))) {
2985 rv = TRUE;
2986 }
2987 break;
2988
2989 default:
2990 DBG(NULL, DWARN, "unrecognised crypto data format");
2991 }
2992 break;
2993
2994 case DCA_SG_PALIGN:
2995 /*
2996 * Check that the data buffer is page aligned and is of
2997 * page multiples in size.
2998 */
2999 switch (data->cd_format) {
3000 case CRYPTO_DATA_RAW:
3001 if ((data->cd_length % dca->dca_pagesize) ||
3002 ((uintptr_t)data->cd_raw.iov_base %
3003 dca->dca_pagesize)) {
3004 rv = TRUE;
3005 }
3006 break;
3007
3008 case CRYPTO_DATA_UIO:
3009 uiop = data->cd_uio;
3010 if ((uiop->uio_iov[0].iov_len % dca->dca_pagesize) ||
3011 ((uintptr_t)uiop->uio_iov[0].iov_base %
3012 dca->dca_pagesize)) {
3013 rv = TRUE;
3014 }
3015 break;
3016
3017 case CRYPTO_DATA_MBLK:
3018 mp = data->cd_mp;
3019 if ((MBLKL(mp) % dca->dca_pagesize) ||
3020 ((uintptr_t)mp->b_rptr % dca->dca_pagesize)) {
3021 rv = TRUE;
3022 }
3023 break;
3024
3025 default:
3026 DBG(NULL, DWARN, "unrecognised crypto data format");
3027 }
3028 break;
3029
3030 default:
3031 DBG(NULL, DWARN, "unrecognised scatter/gather param type");
3032 }
3033
3034 return (rv);
3035 }
3036
3037 /*
3038 * Increments the cd_offset and decrements the cd_length as the data is
3039 * gathered from the crypto_data_t struct.
3040 * The data is reverse-copied into the dest buffer if the flag is true.
3041 */
3042 int
dca_gather(crypto_data_t * in,char * dest,int count,int reverse)3043 dca_gather(crypto_data_t *in, char *dest, int count, int reverse)
3044 {
3045 int rv = CRYPTO_SUCCESS;
3046 uint_t vec_idx;
3047 uio_t *uiop;
3048 off_t off = in->cd_offset;
3049 size_t cur_len;
3050 mblk_t *mp;
3051
3052 switch (in->cd_format) {
3053 case CRYPTO_DATA_RAW:
3054 if (count > in->cd_length) {
3055 /*
3056 * The caller specified a length greater than the
3057 * size of the buffer.
3058 */
3059 return (CRYPTO_DATA_LEN_RANGE);
3060 }
3061 if (reverse)
3062 dca_reverse(in->cd_raw.iov_base + off, dest, count,
3063 count);
3064 else
3065 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3066 in->cd_offset += count;
3067 in->cd_length -= count;
3068 break;
3069
3070 case CRYPTO_DATA_UIO:
3071 /*
3072 * Jump to the first iovec containing data to be processed.
3073 */
3074 uiop = in->cd_uio;
3075 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3076 off >= uiop->uio_iov[vec_idx].iov_len;
3077 off -= uiop->uio_iov[vec_idx++].iov_len)
3078 ;
3079 if (vec_idx == uiop->uio_iovcnt) {
3080 /*
3081 * The caller specified an offset that is larger than
3082 * the total size of the buffers it provided.
3083 */
3084 return (CRYPTO_DATA_LEN_RANGE);
3085 }
3086
3087 /*
3088 * Now process the iovecs.
3089 */
3090 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3091 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3092 off, count);
3093 count -= cur_len;
3094 if (reverse) {
3095 /* Fill the dest buffer from the end */
3096 dca_reverse(uiop->uio_iov[vec_idx].iov_base +
3097 off, dest+count, cur_len, cur_len);
3098 } else {
3099 bcopy(uiop->uio_iov[vec_idx].iov_base + off,
3100 dest, cur_len);
3101 dest += cur_len;
3102 }
3103 in->cd_offset += cur_len;
3104 in->cd_length -= cur_len;
3105 vec_idx++;
3106 off = 0;
3107 }
3108
3109 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3110 /*
3111 * The end of the specified iovec's was reached but
3112 * the length requested could not be processed
3113 * (requested to digest more data than it provided).
3114 */
3115 return (CRYPTO_DATA_LEN_RANGE);
3116 }
3117 break;
3118
3119 case CRYPTO_DATA_MBLK:
3120 /*
3121 * Jump to the first mblk_t containing data to be processed.
3122 */
3123 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3124 off -= MBLKL(mp), mp = mp->b_cont)
3125 ;
3126 if (mp == NULL) {
3127 /*
3128 * The caller specified an offset that is larger than
3129 * the total size of the buffers it provided.
3130 */
3131 return (CRYPTO_DATA_LEN_RANGE);
3132 }
3133
3134 /*
3135 * Now do the processing on the mblk chain.
3136 */
3137 while (mp != NULL && count > 0) {
3138 cur_len = min(MBLKL(mp) - off, count);
3139 count -= cur_len;
3140 if (reverse) {
3141 /* Fill the dest buffer from the end */
3142 dca_reverse((char *)(mp->b_rptr + off),
3143 dest+count, cur_len, cur_len);
3144 } else {
3145 bcopy((char *)(mp->b_rptr + off), dest,
3146 cur_len);
3147 dest += cur_len;
3148 }
3149 in->cd_offset += cur_len;
3150 in->cd_length -= cur_len;
3151 mp = mp->b_cont;
3152 off = 0;
3153 }
3154
3155 if (mp == NULL && count > 0) {
3156 /*
3157 * The end of the mblk was reached but the length
3158 * requested could not be processed, (requested to
3159 * digest more data than it provided).
3160 */
3161 return (CRYPTO_DATA_LEN_RANGE);
3162 }
3163 break;
3164
3165 default:
3166 DBG(NULL, DWARN, "dca_gather: unrecognised crypto data format");
3167 rv = CRYPTO_ARGUMENTS_BAD;
3168 }
3169 return (rv);
3170 }
3171
3172 /*
3173 * Increments the cd_offset and decrements the cd_length as the data is
3174 * gathered from the crypto_data_t struct.
3175 */
3176 int
dca_resid_gather(crypto_data_t * in,char * resid,int * residlen,char * dest,int count)3177 dca_resid_gather(crypto_data_t *in, char *resid, int *residlen, char *dest,
3178 int count)
3179 {
3180 int rv = CRYPTO_SUCCESS;
3181 caddr_t baddr;
3182 uint_t vec_idx;
3183 uio_t *uiop;
3184 off_t off = in->cd_offset;
3185 size_t cur_len;
3186 mblk_t *mp;
3187
3188 /* Process the residual first */
3189 if (*residlen > 0) {
3190 uint_t num = min(count, *residlen);
3191 bcopy(resid, dest, num);
3192 *residlen -= num;
3193 if (*residlen > 0) {
3194 /*
3195 * Requested amount 'count' is less than what's in
3196 * the residual, so shuffle any remaining resid to
3197 * the front.
3198 */
3199 baddr = resid + num;
3200 bcopy(baddr, resid, *residlen);
3201 }
3202 dest += num;
3203 count -= num;
3204 }
3205
3206 /* Now process what's in the crypto_data_t structs */
3207 switch (in->cd_format) {
3208 case CRYPTO_DATA_RAW:
3209 if (count > in->cd_length) {
3210 /*
3211 * The caller specified a length greater than the
3212 * size of the buffer.
3213 */
3214 return (CRYPTO_DATA_LEN_RANGE);
3215 }
3216 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3217 in->cd_offset += count;
3218 in->cd_length -= count;
3219 break;
3220
3221 case CRYPTO_DATA_UIO:
3222 /*
3223 * Jump to the first iovec containing data to be processed.
3224 */
3225 uiop = in->cd_uio;
3226 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3227 off >= uiop->uio_iov[vec_idx].iov_len;
3228 off -= uiop->uio_iov[vec_idx++].iov_len)
3229 ;
3230 if (vec_idx == uiop->uio_iovcnt) {
3231 /*
3232 * The caller specified an offset that is larger than
3233 * the total size of the buffers it provided.
3234 */
3235 return (CRYPTO_DATA_LEN_RANGE);
3236 }
3237
3238 /*
3239 * Now process the iovecs.
3240 */
3241 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3242 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3243 off, count);
3244 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
3245 cur_len);
3246 count -= cur_len;
3247 dest += cur_len;
3248 in->cd_offset += cur_len;
3249 in->cd_length -= cur_len;
3250 vec_idx++;
3251 off = 0;
3252 }
3253
3254 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3255 /*
3256 * The end of the specified iovec's was reached but
3257 * the length requested could not be processed
3258 * (requested to digest more data than it provided).
3259 */
3260 return (CRYPTO_DATA_LEN_RANGE);
3261 }
3262 break;
3263
3264 case CRYPTO_DATA_MBLK:
3265 /*
3266 * Jump to the first mblk_t containing data to be processed.
3267 */
3268 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3269 off -= MBLKL(mp), mp = mp->b_cont)
3270 ;
3271 if (mp == NULL) {
3272 /*
3273 * The caller specified an offset that is larger than
3274 * the total size of the buffers it provided.
3275 */
3276 return (CRYPTO_DATA_LEN_RANGE);
3277 }
3278
3279 /*
3280 * Now do the processing on the mblk chain.
3281 */
3282 while (mp != NULL && count > 0) {
3283 cur_len = min(MBLKL(mp) - off, count);
3284 bcopy((char *)(mp->b_rptr + off), dest, cur_len);
3285 count -= cur_len;
3286 dest += cur_len;
3287 in->cd_offset += cur_len;
3288 in->cd_length -= cur_len;
3289 mp = mp->b_cont;
3290 off = 0;
3291 }
3292
3293 if (mp == NULL && count > 0) {
3294 /*
3295 * The end of the mblk was reached but the length
3296 * requested could not be processed, (requested to
3297 * digest more data than it provided).
3298 */
3299 return (CRYPTO_DATA_LEN_RANGE);
3300 }
3301 break;
3302
3303 default:
3304 DBG(NULL, DWARN,
3305 "dca_resid_gather: unrecognised crypto data format");
3306 rv = CRYPTO_ARGUMENTS_BAD;
3307 }
3308 return (rv);
3309 }
3310
3311 /*
3312 * Appends the data to the crypto_data_t struct increasing cd_length.
3313 * cd_offset is left unchanged.
3314 * Data is reverse-copied if the flag is TRUE.
3315 */
3316 int
dca_scatter(const char * src,crypto_data_t * out,int count,int reverse)3317 dca_scatter(const char *src, crypto_data_t *out, int count, int reverse)
3318 {
3319 int rv = CRYPTO_SUCCESS;
3320 off_t offset = out->cd_offset + out->cd_length;
3321 uint_t vec_idx;
3322 uio_t *uiop;
3323 size_t cur_len;
3324 mblk_t *mp;
3325
3326 switch (out->cd_format) {
3327 case CRYPTO_DATA_RAW:
3328 if (out->cd_raw.iov_len - offset < count) {
3329 /* Trying to write out more than space available. */
3330 return (CRYPTO_DATA_LEN_RANGE);
3331 }
3332 if (reverse)
3333 dca_reverse((void*) src, out->cd_raw.iov_base + offset,
3334 count, count);
3335 else
3336 bcopy(src, out->cd_raw.iov_base + offset, count);
3337 out->cd_length += count;
3338 break;
3339
3340 case CRYPTO_DATA_UIO:
3341 /*
3342 * Jump to the first iovec that can be written to.
3343 */
3344 uiop = out->cd_uio;
3345 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3346 offset >= uiop->uio_iov[vec_idx].iov_len;
3347 offset -= uiop->uio_iov[vec_idx++].iov_len)
3348 ;
3349 if (vec_idx == uiop->uio_iovcnt) {
3350 /*
3351 * The caller specified an offset that is larger than
3352 * the total size of the buffers it provided.
3353 */
3354 return (CRYPTO_DATA_LEN_RANGE);
3355 }
3356
3357 /*
3358 * Now process the iovecs.
3359 */
3360 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3361 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3362 offset, count);
3363 count -= cur_len;
3364 if (reverse) {
3365 dca_reverse((void*) (src+count),
3366 uiop->uio_iov[vec_idx].iov_base +
3367 offset, cur_len, cur_len);
3368 } else {
3369 bcopy(src, uiop->uio_iov[vec_idx].iov_base +
3370 offset, cur_len);
3371 src += cur_len;
3372 }
3373 out->cd_length += cur_len;
3374 vec_idx++;
3375 offset = 0;
3376 }
3377
3378 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3379 /*
3380 * The end of the specified iovec's was reached but
3381 * the length requested could not be processed
3382 * (requested to write more data than space provided).
3383 */
3384 return (CRYPTO_DATA_LEN_RANGE);
3385 }
3386 break;
3387
3388 case CRYPTO_DATA_MBLK:
3389 /*
3390 * Jump to the first mblk_t that can be written to.
3391 */
3392 for (mp = out->cd_mp; mp != NULL && offset >= MBLKL(mp);
3393 offset -= MBLKL(mp), mp = mp->b_cont)
3394 ;
3395 if (mp == NULL) {
3396 /*
3397 * The caller specified an offset that is larger than
3398 * the total size of the buffers it provided.
3399 */
3400 return (CRYPTO_DATA_LEN_RANGE);
3401 }
3402
3403 /*
3404 * Now do the processing on the mblk chain.
3405 */
3406 while (mp != NULL && count > 0) {
3407 cur_len = min(MBLKL(mp) - offset, count);
3408 count -= cur_len;
3409 if (reverse) {
3410 dca_reverse((void*) (src+count),
3411 (char *)(mp->b_rptr + offset), cur_len,
3412 cur_len);
3413 } else {
3414 bcopy(src, (char *)(mp->b_rptr + offset),
3415 cur_len);
3416 src += cur_len;
3417 }
3418 out->cd_length += cur_len;
3419 mp = mp->b_cont;
3420 offset = 0;
3421 }
3422
3423 if (mp == NULL && count > 0) {
3424 /*
3425 * The end of the mblk was reached but the length
3426 * requested could not be processed, (requested to
3427 * digest more data than it provided).
3428 */
3429 return (CRYPTO_DATA_LEN_RANGE);
3430 }
3431 break;
3432
3433 default:
3434 DBG(NULL, DWARN, "unrecognised crypto data format");
3435 rv = CRYPTO_ARGUMENTS_BAD;
3436 }
3437 return (rv);
3438 }
3439
3440 /*
3441 * Compare two byte arrays in reverse order.
3442 * Return 0 if they are identical, 1 otherwise.
3443 */
3444 int
dca_bcmp_reverse(const void * s1,const void * s2,size_t n)3445 dca_bcmp_reverse(const void *s1, const void *s2, size_t n)
3446 {
3447 int i;
3448 caddr_t src, dst;
3449
3450 if (!n)
3451 return (0);
3452
3453 src = ((caddr_t)s1) + n - 1;
3454 dst = (caddr_t)s2;
3455 for (i = 0; i < n; i++) {
3456 if (*src != *dst)
3457 return (1);
3458 src--;
3459 dst++;
3460 }
3461
3462 return (0);
3463 }
3464
3465
3466 /*
3467 * This calculates the size of a bignum in bits, specifically not counting
3468 * leading zero bits. This size calculation must be done *before* any
3469 * endian reversal takes place (i.e. the numbers are in absolute big-endian
3470 * order.)
3471 */
3472 int
dca_bitlen(unsigned char * bignum,int bytelen)3473 dca_bitlen(unsigned char *bignum, int bytelen)
3474 {
3475 unsigned char msbyte;
3476 int i, j;
3477
3478 for (i = 0; i < bytelen - 1; i++) {
3479 if (bignum[i] != 0) {
3480 break;
3481 }
3482 }
3483 msbyte = bignum[i];
3484 for (j = 8; j > 1; j--) {
3485 if (msbyte & 0x80) {
3486 break;
3487 }
3488 msbyte <<= 1;
3489 }
3490 return ((8 * (bytelen - i - 1)) + j);
3491 }
3492
3493 /*
3494 * This compares to bignums (in big-endian order). It ignores leading
3495 * null bytes. The result semantics follow bcmp, mempcmp, strcmp, etc.
3496 */
3497 int
dca_numcmp(caddr_t n1,int n1len,caddr_t n2,int n2len)3498 dca_numcmp(caddr_t n1, int n1len, caddr_t n2, int n2len)
3499 {
3500 while ((n1len > 1) && (*n1 == 0)) {
3501 n1len--;
3502 n1++;
3503 }
3504 while ((n2len > 1) && (*n2 == 0)) {
3505 n2len--;
3506 n2++;
3507 }
3508 if (n1len != n2len) {
3509 return (n1len - n2len);
3510 }
3511 while ((n1len > 1) && (*n1 == *n2)) {
3512 n1++;
3513 n2++;
3514 n1len--;
3515 }
3516 return ((int)(*(uchar_t *)n1) - (int)(*(uchar_t *)n2));
3517 }
3518
3519 /*
3520 * Return array of key attributes.
3521 */
3522 crypto_object_attribute_t *
dca_get_key_attr(crypto_key_t * key)3523 dca_get_key_attr(crypto_key_t *key)
3524 {
3525 if ((key->ck_format != CRYPTO_KEY_ATTR_LIST) ||
3526 (key->ck_count == 0)) {
3527 return (NULL);
3528 }
3529
3530 return (key->ck_attrs);
3531 }
3532
3533 /*
3534 * If attribute type exists valp points to it's 32-bit value.
3535 */
3536 int
dca_attr_lookup_uint32(crypto_object_attribute_t * attrp,uint_t atnum,uint64_t atype,uint32_t * valp)3537 dca_attr_lookup_uint32(crypto_object_attribute_t *attrp, uint_t atnum,
3538 uint64_t atype, uint32_t *valp)
3539 {
3540 crypto_object_attribute_t *bap;
3541
3542 bap = dca_find_attribute(attrp, atnum, atype);
3543 if (bap == NULL) {
3544 return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3545 }
3546
3547 *valp = *bap->oa_value;
3548
3549 return (CRYPTO_SUCCESS);
3550 }
3551
3552 /*
3553 * If attribute type exists data contains the start address of the value,
3554 * and numelems contains it's length.
3555 */
3556 int
dca_attr_lookup_uint8_array(crypto_object_attribute_t * attrp,uint_t atnum,uint64_t atype,void ** data,unsigned int * numelems)3557 dca_attr_lookup_uint8_array(crypto_object_attribute_t *attrp, uint_t atnum,
3558 uint64_t atype, void **data, unsigned int *numelems)
3559 {
3560 crypto_object_attribute_t *bap;
3561
3562 bap = dca_find_attribute(attrp, atnum, atype);
3563 if (bap == NULL) {
3564 return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3565 }
3566
3567 *data = bap->oa_value;
3568 *numelems = bap->oa_value_len;
3569
3570 return (CRYPTO_SUCCESS);
3571 }
3572
3573 /*
3574 * Finds entry of specified name. If it is not found dca_find_attribute returns
3575 * NULL.
3576 */
3577 crypto_object_attribute_t *
dca_find_attribute(crypto_object_attribute_t * attrp,uint_t atnum,uint64_t atype)3578 dca_find_attribute(crypto_object_attribute_t *attrp, uint_t atnum,
3579 uint64_t atype)
3580 {
3581 while (atnum) {
3582 if (attrp->oa_type == atype)
3583 return (attrp);
3584 atnum--;
3585 attrp++;
3586 }
3587 return (NULL);
3588 }
3589
3590 /*
3591 * Return the address of the first data buffer. If the data format is
3592 * unrecognised return NULL.
3593 */
3594 caddr_t
dca_bufdaddr(crypto_data_t * data)3595 dca_bufdaddr(crypto_data_t *data)
3596 {
3597 switch (data->cd_format) {
3598 case CRYPTO_DATA_RAW:
3599 return (data->cd_raw.iov_base + data->cd_offset);
3600 case CRYPTO_DATA_UIO:
3601 return (data->cd_uio->uio_iov[0].iov_base + data->cd_offset);
3602 case CRYPTO_DATA_MBLK:
3603 return ((char *)data->cd_mp->b_rptr + data->cd_offset);
3604 default:
3605 DBG(NULL, DWARN,
3606 "dca_bufdaddr: unrecognised crypto data format");
3607 return (NULL);
3608 }
3609 }
3610
3611 static caddr_t
dca_bufdaddr_out(crypto_data_t * data)3612 dca_bufdaddr_out(crypto_data_t *data)
3613 {
3614 size_t offset = data->cd_offset + data->cd_length;
3615
3616 switch (data->cd_format) {
3617 case CRYPTO_DATA_RAW:
3618 return (data->cd_raw.iov_base + offset);
3619 case CRYPTO_DATA_UIO:
3620 return (data->cd_uio->uio_iov[0].iov_base + offset);
3621 case CRYPTO_DATA_MBLK:
3622 return ((char *)data->cd_mp->b_rptr + offset);
3623 default:
3624 DBG(NULL, DWARN,
3625 "dca_bufdaddr_out: unrecognised crypto data format");
3626 return (NULL);
3627 }
3628 }
3629
3630 /*
3631 * Control entry points.
3632 */
3633
3634 /* ARGSUSED */
3635 static void
dca_provider_status(crypto_provider_handle_t provider,uint_t * status)3636 dca_provider_status(crypto_provider_handle_t provider, uint_t *status)
3637 {
3638 *status = CRYPTO_PROVIDER_READY;
3639 }
3640
3641 /*
3642 * Cipher (encrypt/decrypt) entry points.
3643 */
3644
3645 /* ARGSUSED */
3646 static int
dca_encrypt_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)3647 dca_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3648 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3649 crypto_req_handle_t req)
3650 {
3651 int error = CRYPTO_FAILED;
3652 dca_t *softc;
3653
3654 softc = DCA_SOFTC_FROM_CTX(ctx);
3655 DBG(softc, DENTRY, "dca_encrypt_init: started");
3656
3657 /* check mechanism */
3658 switch (mechanism->cm_type) {
3659 case DES_CBC_MECH_INFO_TYPE:
3660 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3661 DR_ENCRYPT);
3662 break;
3663 case DES3_CBC_MECH_INFO_TYPE:
3664 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3665 DR_ENCRYPT | DR_TRIPLE);
3666 break;
3667 case RSA_PKCS_MECH_INFO_TYPE:
3668 case RSA_X_509_MECH_INFO_TYPE:
3669 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3670 break;
3671 default:
3672 cmn_err(CE_WARN, "dca_encrypt_init: unexpected mech type "
3673 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3674 error = CRYPTO_MECHANISM_INVALID;
3675 }
3676
3677 DBG(softc, DENTRY, "dca_encrypt_init: done, err = 0x%x", error);
3678
3679 if (error == CRYPTO_SUCCESS)
3680 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3681 &softc->dca_ctx_list_lock);
3682
3683 return (error);
3684 }
3685
3686 /* ARGSUSED */
3687 static int
dca_encrypt(crypto_ctx_t * ctx,crypto_data_t * plaintext,crypto_data_t * ciphertext,crypto_req_handle_t req)3688 dca_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3689 crypto_data_t *ciphertext, crypto_req_handle_t req)
3690 {
3691 int error = CRYPTO_FAILED;
3692 dca_t *softc;
3693
3694 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3695 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3696
3697 softc = DCA_SOFTC_FROM_CTX(ctx);
3698 DBG(softc, DENTRY, "dca_encrypt: started");
3699
3700 /* handle inplace ops */
3701 if (!ciphertext) {
3702 dca_request_t *reqp = ctx->cc_provider_private;
3703 reqp->dr_flags |= DR_INPLACE;
3704 ciphertext = plaintext;
3705 }
3706
3707 /* check mechanism */
3708 switch (DCA_MECH_FROM_CTX(ctx)) {
3709 case DES_CBC_MECH_INFO_TYPE:
3710 error = dca_3des(ctx, plaintext, ciphertext, req, DR_ENCRYPT);
3711 break;
3712 case DES3_CBC_MECH_INFO_TYPE:
3713 error = dca_3des(ctx, plaintext, ciphertext, req,
3714 DR_ENCRYPT | DR_TRIPLE);
3715 break;
3716 case RSA_PKCS_MECH_INFO_TYPE:
3717 case RSA_X_509_MECH_INFO_TYPE:
3718 error = dca_rsastart(ctx, plaintext, ciphertext, req,
3719 DCA_RSA_ENC);
3720 break;
3721 default:
3722 /* Should never reach here */
3723 cmn_err(CE_WARN, "dca_encrypt: unexpected mech type "
3724 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3725 error = CRYPTO_MECHANISM_INVALID;
3726 }
3727
3728 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
3729 (error != CRYPTO_BUFFER_TOO_SMALL)) {
3730 ciphertext->cd_length = 0;
3731 }
3732
3733 DBG(softc, DENTRY, "dca_encrypt: done, err = 0x%x", error);
3734
3735 return (error);
3736 }
3737
3738 /* ARGSUSED */
3739 static int
dca_encrypt_update(crypto_ctx_t * ctx,crypto_data_t * plaintext,crypto_data_t * ciphertext,crypto_req_handle_t req)3740 dca_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3741 crypto_data_t *ciphertext, crypto_req_handle_t req)
3742 {
3743 int error = CRYPTO_FAILED;
3744 dca_t *softc;
3745
3746 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3747 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3748
3749 softc = DCA_SOFTC_FROM_CTX(ctx);
3750 DBG(softc, DENTRY, "dca_encrypt_update: started");
3751
3752 /* handle inplace ops */
3753 if (!ciphertext) {
3754 dca_request_t *reqp = ctx->cc_provider_private;
3755 reqp->dr_flags |= DR_INPLACE;
3756 ciphertext = plaintext;
3757 }
3758
3759 /* check mechanism */
3760 switch (DCA_MECH_FROM_CTX(ctx)) {
3761 case DES_CBC_MECH_INFO_TYPE:
3762 error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3763 DR_ENCRYPT);
3764 break;
3765 case DES3_CBC_MECH_INFO_TYPE:
3766 error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3767 DR_ENCRYPT | DR_TRIPLE);
3768 break;
3769 default:
3770 /* Should never reach here */
3771 cmn_err(CE_WARN, "dca_encrypt_update: unexpected mech type "
3772 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3773 error = CRYPTO_MECHANISM_INVALID;
3774 }
3775
3776 DBG(softc, DENTRY, "dca_encrypt_update: done, err = 0x%x", error);
3777
3778 return (error);
3779 }
3780
3781 /* ARGSUSED */
3782 static int
dca_encrypt_final(crypto_ctx_t * ctx,crypto_data_t * ciphertext,crypto_req_handle_t req)3783 dca_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3784 crypto_req_handle_t req)
3785 {
3786 int error = CRYPTO_FAILED;
3787 dca_t *softc;
3788
3789 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3790 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3791
3792 softc = DCA_SOFTC_FROM_CTX(ctx);
3793 DBG(softc, DENTRY, "dca_encrypt_final: started");
3794
3795 /* check mechanism */
3796 switch (DCA_MECH_FROM_CTX(ctx)) {
3797 case DES_CBC_MECH_INFO_TYPE:
3798 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT);
3799 break;
3800 case DES3_CBC_MECH_INFO_TYPE:
3801 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT | DR_TRIPLE);
3802 break;
3803 default:
3804 /* Should never reach here */
3805 cmn_err(CE_WARN, "dca_encrypt_final: unexpected mech type "
3806 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3807 error = CRYPTO_MECHANISM_INVALID;
3808 }
3809
3810 DBG(softc, DENTRY, "dca_encrypt_final: done, err = 0x%x", error);
3811
3812 return (error);
3813 }
3814
3815 /* ARGSUSED */
3816 static int
dca_encrypt_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * plaintext,crypto_data_t * ciphertext,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)3817 dca_encrypt_atomic(crypto_provider_handle_t provider,
3818 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
3819 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
3820 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
3821 {
3822 int error = CRYPTO_FAILED;
3823 dca_t *softc = (dca_t *)provider;
3824
3825 DBG(softc, DENTRY, "dca_encrypt_atomic: started");
3826
3827 if (ctx_template != NULL)
3828 return (CRYPTO_ARGUMENTS_BAD);
3829
3830 /* handle inplace ops */
3831 if (!ciphertext) {
3832 ciphertext = plaintext;
3833 }
3834
3835 /* check mechanism */
3836 switch (mechanism->cm_type) {
3837 case DES_CBC_MECH_INFO_TYPE:
3838 error = dca_3desatomic(provider, session_id, mechanism, key,
3839 plaintext, ciphertext, KM_SLEEP, req,
3840 DR_ENCRYPT | DR_ATOMIC);
3841 break;
3842 case DES3_CBC_MECH_INFO_TYPE:
3843 error = dca_3desatomic(provider, session_id, mechanism, key,
3844 plaintext, ciphertext, KM_SLEEP, req,
3845 DR_ENCRYPT | DR_TRIPLE | DR_ATOMIC);
3846 break;
3847 case RSA_PKCS_MECH_INFO_TYPE:
3848 case RSA_X_509_MECH_INFO_TYPE:
3849 error = dca_rsaatomic(provider, session_id, mechanism, key,
3850 plaintext, ciphertext, KM_SLEEP, req, DCA_RSA_ENC);
3851 break;
3852 default:
3853 cmn_err(CE_WARN, "dca_encrypt_atomic: unexpected mech type "
3854 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3855 error = CRYPTO_MECHANISM_INVALID;
3856 }
3857
3858 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
3859 ciphertext->cd_length = 0;
3860 }
3861
3862 DBG(softc, DENTRY, "dca_encrypt_atomic: done, err = 0x%x", error);
3863
3864 return (error);
3865 }
3866
3867 /* ARGSUSED */
3868 static int
dca_decrypt_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)3869 dca_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3870 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3871 crypto_req_handle_t req)
3872 {
3873 int error = CRYPTO_FAILED;
3874 dca_t *softc;
3875
3876 softc = DCA_SOFTC_FROM_CTX(ctx);
3877 DBG(softc, DENTRY, "dca_decrypt_init: started");
3878
3879 /* check mechanism */
3880 switch (mechanism->cm_type) {
3881 case DES_CBC_MECH_INFO_TYPE:
3882 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3883 DR_DECRYPT);
3884 break;
3885 case DES3_CBC_MECH_INFO_TYPE:
3886 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3887 DR_DECRYPT | DR_TRIPLE);
3888 break;
3889 case RSA_PKCS_MECH_INFO_TYPE:
3890 case RSA_X_509_MECH_INFO_TYPE:
3891 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3892 break;
3893 default:
3894 cmn_err(CE_WARN, "dca_decrypt_init: unexpected mech type "
3895 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3896 error = CRYPTO_MECHANISM_INVALID;
3897 }
3898
3899 DBG(softc, DENTRY, "dca_decrypt_init: done, err = 0x%x", error);
3900
3901 if (error == CRYPTO_SUCCESS)
3902 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3903 &softc->dca_ctx_list_lock);
3904
3905 return (error);
3906 }
3907
3908 /* ARGSUSED */
3909 static int
dca_decrypt(crypto_ctx_t * ctx,crypto_data_t * ciphertext,crypto_data_t * plaintext,crypto_req_handle_t req)3910 dca_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3911 crypto_data_t *plaintext, crypto_req_handle_t req)
3912 {
3913 int error = CRYPTO_FAILED;
3914 dca_t *softc;
3915
3916 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3917 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3918
3919 softc = DCA_SOFTC_FROM_CTX(ctx);
3920 DBG(softc, DENTRY, "dca_decrypt: started");
3921
3922 /* handle inplace ops */
3923 if (!plaintext) {
3924 dca_request_t *reqp = ctx->cc_provider_private;
3925 reqp->dr_flags |= DR_INPLACE;
3926 plaintext = ciphertext;
3927 }
3928
3929 /* check mechanism */
3930 switch (DCA_MECH_FROM_CTX(ctx)) {
3931 case DES_CBC_MECH_INFO_TYPE:
3932 error = dca_3des(ctx, ciphertext, plaintext, req, DR_DECRYPT);
3933 break;
3934 case DES3_CBC_MECH_INFO_TYPE:
3935 error = dca_3des(ctx, ciphertext, plaintext, req,
3936 DR_DECRYPT | DR_TRIPLE);
3937 break;
3938 case RSA_PKCS_MECH_INFO_TYPE:
3939 case RSA_X_509_MECH_INFO_TYPE:
3940 error = dca_rsastart(ctx, ciphertext, plaintext, req,
3941 DCA_RSA_DEC);
3942 break;
3943 default:
3944 /* Should never reach here */
3945 cmn_err(CE_WARN, "dca_decrypt: unexpected mech type "
3946 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3947 error = CRYPTO_MECHANISM_INVALID;
3948 }
3949
3950 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
3951 (error != CRYPTO_BUFFER_TOO_SMALL)) {
3952 if (plaintext)
3953 plaintext->cd_length = 0;
3954 }
3955
3956 DBG(softc, DENTRY, "dca_decrypt: done, err = 0x%x", error);
3957
3958 return (error);
3959 }
3960
3961 /* ARGSUSED */
3962 static int
dca_decrypt_update(crypto_ctx_t * ctx,crypto_data_t * ciphertext,crypto_data_t * plaintext,crypto_req_handle_t req)3963 dca_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3964 crypto_data_t *plaintext, crypto_req_handle_t req)
3965 {
3966 int error = CRYPTO_FAILED;
3967 dca_t *softc;
3968
3969 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3970 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3971
3972 softc = DCA_SOFTC_FROM_CTX(ctx);
3973 DBG(softc, DENTRY, "dca_decrypt_update: started");
3974
3975 /* handle inplace ops */
3976 if (!plaintext) {
3977 dca_request_t *reqp = ctx->cc_provider_private;
3978 reqp->dr_flags |= DR_INPLACE;
3979 plaintext = ciphertext;
3980 }
3981
3982 /* check mechanism */
3983 switch (DCA_MECH_FROM_CTX(ctx)) {
3984 case DES_CBC_MECH_INFO_TYPE:
3985 error = dca_3desupdate(ctx, ciphertext, plaintext, req,
3986 DR_DECRYPT);
3987 break;
3988 case DES3_CBC_MECH_INFO_TYPE:
3989 error = dca_3desupdate(ctx, ciphertext, plaintext, req,
3990 DR_DECRYPT | DR_TRIPLE);
3991 break;
3992 default:
3993 /* Should never reach here */
3994 cmn_err(CE_WARN, "dca_decrypt_update: unexpected mech type "
3995 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3996 error = CRYPTO_MECHANISM_INVALID;
3997 }
3998
3999 DBG(softc, DENTRY, "dca_decrypt_update: done, err = 0x%x", error);
4000
4001 return (error);
4002 }
4003
4004 /* ARGSUSED */
4005 static int
dca_decrypt_final(crypto_ctx_t * ctx,crypto_data_t * plaintext,crypto_req_handle_t req)4006 dca_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *plaintext,
4007 crypto_req_handle_t req)
4008 {
4009 int error = CRYPTO_FAILED;
4010 dca_t *softc;
4011
4012 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4013 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4014
4015 softc = DCA_SOFTC_FROM_CTX(ctx);
4016 DBG(softc, DENTRY, "dca_decrypt_final: started");
4017
4018 /* check mechanism */
4019 switch (DCA_MECH_FROM_CTX(ctx)) {
4020 case DES_CBC_MECH_INFO_TYPE:
4021 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT);
4022 break;
4023 case DES3_CBC_MECH_INFO_TYPE:
4024 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT | DR_TRIPLE);
4025 break;
4026 default:
4027 /* Should never reach here */
4028 cmn_err(CE_WARN, "dca_decrypt_final: unexpected mech type "
4029 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4030 error = CRYPTO_MECHANISM_INVALID;
4031 }
4032
4033 DBG(softc, DENTRY, "dca_decrypt_final: done, err = 0x%x", error);
4034
4035 return (error);
4036 }
4037
4038 /* ARGSUSED */
4039 static int
dca_decrypt_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * ciphertext,crypto_data_t * plaintext,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4040 dca_decrypt_atomic(crypto_provider_handle_t provider,
4041 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4042 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
4043 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4044 {
4045 int error = CRYPTO_FAILED;
4046 dca_t *softc = (dca_t *)provider;
4047
4048 DBG(softc, DENTRY, "dca_decrypt_atomic: started");
4049
4050 if (ctx_template != NULL)
4051 return (CRYPTO_ARGUMENTS_BAD);
4052
4053 /* handle inplace ops */
4054 if (!plaintext) {
4055 plaintext = ciphertext;
4056 }
4057
4058 /* check mechanism */
4059 switch (mechanism->cm_type) {
4060 case DES_CBC_MECH_INFO_TYPE:
4061 error = dca_3desatomic(provider, session_id, mechanism, key,
4062 ciphertext, plaintext, KM_SLEEP, req,
4063 DR_DECRYPT | DR_ATOMIC);
4064 break;
4065 case DES3_CBC_MECH_INFO_TYPE:
4066 error = dca_3desatomic(provider, session_id, mechanism, key,
4067 ciphertext, plaintext, KM_SLEEP, req,
4068 DR_DECRYPT | DR_TRIPLE | DR_ATOMIC);
4069 break;
4070 case RSA_PKCS_MECH_INFO_TYPE:
4071 case RSA_X_509_MECH_INFO_TYPE:
4072 error = dca_rsaatomic(provider, session_id, mechanism, key,
4073 ciphertext, plaintext, KM_SLEEP, req, DCA_RSA_DEC);
4074 break;
4075 default:
4076 cmn_err(CE_WARN, "dca_decrypt_atomic: unexpected mech type "
4077 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4078 error = CRYPTO_MECHANISM_INVALID;
4079 }
4080
4081 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
4082 plaintext->cd_length = 0;
4083 }
4084
4085 DBG(softc, DENTRY, "dca_decrypt_atomic: done, err = 0x%x", error);
4086
4087 return (error);
4088 }
4089
4090 /*
4091 * Sign entry points.
4092 */
4093
4094 /* ARGSUSED */
4095 static int
dca_sign_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4096 dca_sign_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4097 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4098 crypto_req_handle_t req)
4099 {
4100 int error = CRYPTO_FAILED;
4101 dca_t *softc;
4102
4103 softc = DCA_SOFTC_FROM_CTX(ctx);
4104 DBG(softc, DENTRY, "dca_sign_init: started\n");
4105
4106 if (ctx_template != NULL)
4107 return (CRYPTO_ARGUMENTS_BAD);
4108
4109 /* check mechanism */
4110 switch (mechanism->cm_type) {
4111 case RSA_PKCS_MECH_INFO_TYPE:
4112 case RSA_X_509_MECH_INFO_TYPE:
4113 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4114 break;
4115 case DSA_MECH_INFO_TYPE:
4116 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4117 DCA_DSA_SIGN);
4118 break;
4119 default:
4120 cmn_err(CE_WARN, "dca_sign_init: unexpected mech type "
4121 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4122 error = CRYPTO_MECHANISM_INVALID;
4123 }
4124
4125 DBG(softc, DENTRY, "dca_sign_init: done, err = 0x%x", error);
4126
4127 if (error == CRYPTO_SUCCESS)
4128 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4129 &softc->dca_ctx_list_lock);
4130
4131 return (error);
4132 }
4133
4134 static int
dca_sign(crypto_ctx_t * ctx,crypto_data_t * data,crypto_data_t * signature,crypto_req_handle_t req)4135 dca_sign(crypto_ctx_t *ctx, crypto_data_t *data,
4136 crypto_data_t *signature, crypto_req_handle_t req)
4137 {
4138 int error = CRYPTO_FAILED;
4139 dca_t *softc;
4140
4141 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4142 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4143
4144 softc = DCA_SOFTC_FROM_CTX(ctx);
4145 DBG(softc, DENTRY, "dca_sign: started\n");
4146
4147 /* check mechanism */
4148 switch (DCA_MECH_FROM_CTX(ctx)) {
4149 case RSA_PKCS_MECH_INFO_TYPE:
4150 case RSA_X_509_MECH_INFO_TYPE:
4151 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGN);
4152 break;
4153 case DSA_MECH_INFO_TYPE:
4154 error = dca_dsa_sign(ctx, data, signature, req);
4155 break;
4156 default:
4157 cmn_err(CE_WARN, "dca_sign: unexpected mech type "
4158 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4159 error = CRYPTO_MECHANISM_INVALID;
4160 }
4161
4162 DBG(softc, DENTRY, "dca_sign: done, err = 0x%x", error);
4163
4164 return (error);
4165 }
4166
4167 /* ARGSUSED */
4168 static int
dca_sign_update(crypto_ctx_t * ctx,crypto_data_t * data,crypto_req_handle_t req)4169 dca_sign_update(crypto_ctx_t *ctx, crypto_data_t *data,
4170 crypto_req_handle_t req)
4171 {
4172 int error = CRYPTO_MECHANISM_INVALID;
4173 dca_t *softc;
4174
4175 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4176 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4177
4178 softc = DCA_SOFTC_FROM_CTX(ctx);
4179 DBG(softc, DENTRY, "dca_sign_update: started\n");
4180
4181 cmn_err(CE_WARN, "dca_sign_update: unexpected mech type "
4182 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4183
4184 DBG(softc, DENTRY, "dca_sign_update: done, err = 0x%x", error);
4185
4186 return (error);
4187 }
4188
4189 /* ARGSUSED */
4190 static int
dca_sign_final(crypto_ctx_t * ctx,crypto_data_t * signature,crypto_req_handle_t req)4191 dca_sign_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4192 crypto_req_handle_t req)
4193 {
4194 int error = CRYPTO_MECHANISM_INVALID;
4195 dca_t *softc;
4196
4197 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4198 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4199
4200 softc = DCA_SOFTC_FROM_CTX(ctx);
4201 DBG(softc, DENTRY, "dca_sign_final: started\n");
4202
4203 cmn_err(CE_WARN, "dca_sign_final: unexpected mech type "
4204 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4205
4206 DBG(softc, DENTRY, "dca_sign_final: done, err = 0x%x", error);
4207
4208 return (error);
4209 }
4210
4211 static int
dca_sign_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * signature,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4212 dca_sign_atomic(crypto_provider_handle_t provider,
4213 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4214 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4215 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4216 {
4217 int error = CRYPTO_FAILED;
4218 dca_t *softc = (dca_t *)provider;
4219
4220 DBG(softc, DENTRY, "dca_sign_atomic: started\n");
4221
4222 if (ctx_template != NULL)
4223 return (CRYPTO_ARGUMENTS_BAD);
4224
4225 /* check mechanism */
4226 switch (mechanism->cm_type) {
4227 case RSA_PKCS_MECH_INFO_TYPE:
4228 case RSA_X_509_MECH_INFO_TYPE:
4229 error = dca_rsaatomic(provider, session_id, mechanism, key,
4230 data, signature, KM_SLEEP, req, DCA_RSA_SIGN);
4231 break;
4232 case DSA_MECH_INFO_TYPE:
4233 error = dca_dsaatomic(provider, session_id, mechanism, key,
4234 data, signature, KM_SLEEP, req, DCA_DSA_SIGN);
4235 break;
4236 default:
4237 cmn_err(CE_WARN, "dca_sign_atomic: unexpected mech type "
4238 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4239 error = CRYPTO_MECHANISM_INVALID;
4240 }
4241
4242 DBG(softc, DENTRY, "dca_sign_atomic: done, err = 0x%x", error);
4243
4244 return (error);
4245 }
4246
4247 /* ARGSUSED */
4248 static int
dca_sign_recover_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4249 dca_sign_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4250 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4251 crypto_req_handle_t req)
4252 {
4253 int error = CRYPTO_FAILED;
4254 dca_t *softc;
4255
4256 softc = DCA_SOFTC_FROM_CTX(ctx);
4257 DBG(softc, DENTRY, "dca_sign_recover_init: started\n");
4258
4259 if (ctx_template != NULL)
4260 return (CRYPTO_ARGUMENTS_BAD);
4261
4262 /* check mechanism */
4263 switch (mechanism->cm_type) {
4264 case RSA_PKCS_MECH_INFO_TYPE:
4265 case RSA_X_509_MECH_INFO_TYPE:
4266 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4267 break;
4268 default:
4269 cmn_err(CE_WARN, "dca_sign_recover_init: unexpected mech type "
4270 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4271 error = CRYPTO_MECHANISM_INVALID;
4272 }
4273
4274 DBG(softc, DENTRY, "dca_sign_recover_init: done, err = 0x%x", error);
4275
4276 if (error == CRYPTO_SUCCESS)
4277 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4278 &softc->dca_ctx_list_lock);
4279
4280 return (error);
4281 }
4282
4283 static int
dca_sign_recover(crypto_ctx_t * ctx,crypto_data_t * data,crypto_data_t * signature,crypto_req_handle_t req)4284 dca_sign_recover(crypto_ctx_t *ctx, crypto_data_t *data,
4285 crypto_data_t *signature, crypto_req_handle_t req)
4286 {
4287 int error = CRYPTO_FAILED;
4288 dca_t *softc;
4289
4290 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4291 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4292
4293 softc = DCA_SOFTC_FROM_CTX(ctx);
4294 DBG(softc, DENTRY, "dca_sign_recover: started\n");
4295
4296 /* check mechanism */
4297 switch (DCA_MECH_FROM_CTX(ctx)) {
4298 case RSA_PKCS_MECH_INFO_TYPE:
4299 case RSA_X_509_MECH_INFO_TYPE:
4300 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGNR);
4301 break;
4302 default:
4303 cmn_err(CE_WARN, "dca_sign_recover: unexpected mech type "
4304 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4305 error = CRYPTO_MECHANISM_INVALID;
4306 }
4307
4308 DBG(softc, DENTRY, "dca_sign_recover: done, err = 0x%x", error);
4309
4310 return (error);
4311 }
4312
4313 static int
dca_sign_recover_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * signature,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4314 dca_sign_recover_atomic(crypto_provider_handle_t provider,
4315 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4316 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4317 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4318 {
4319 int error = CRYPTO_FAILED;
4320 dca_t *softc = (dca_t *)provider;
4321
4322 DBG(softc, DENTRY, "dca_sign_recover_atomic: started\n");
4323
4324 if (ctx_template != NULL)
4325 return (CRYPTO_ARGUMENTS_BAD);
4326
4327 /* check mechanism */
4328 switch (mechanism->cm_type) {
4329 case RSA_PKCS_MECH_INFO_TYPE:
4330 case RSA_X_509_MECH_INFO_TYPE:
4331 error = dca_rsaatomic(provider, session_id, mechanism, key,
4332 data, signature, KM_SLEEP, req, DCA_RSA_SIGNR);
4333 break;
4334 default:
4335 cmn_err(CE_WARN, "dca_sign_recover_atomic: unexpected mech type"
4336 " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4337 error = CRYPTO_MECHANISM_INVALID;
4338 }
4339
4340 DBG(softc, DENTRY, "dca_sign_recover_atomic: done, err = 0x%x", error);
4341
4342 return (error);
4343 }
4344
4345 /*
4346 * Verify entry points.
4347 */
4348
4349 /* ARGSUSED */
4350 static int
dca_verify_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4351 dca_verify_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4352 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4353 crypto_req_handle_t req)
4354 {
4355 int error = CRYPTO_FAILED;
4356 dca_t *softc;
4357
4358 softc = DCA_SOFTC_FROM_CTX(ctx);
4359 DBG(softc, DENTRY, "dca_verify_init: started\n");
4360
4361 if (ctx_template != NULL)
4362 return (CRYPTO_ARGUMENTS_BAD);
4363
4364 /* check mechanism */
4365 switch (mechanism->cm_type) {
4366 case RSA_PKCS_MECH_INFO_TYPE:
4367 case RSA_X_509_MECH_INFO_TYPE:
4368 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4369 break;
4370 case DSA_MECH_INFO_TYPE:
4371 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4372 DCA_DSA_VRFY);
4373 break;
4374 default:
4375 cmn_err(CE_WARN, "dca_verify_init: unexpected mech type "
4376 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4377 error = CRYPTO_MECHANISM_INVALID;
4378 }
4379
4380 DBG(softc, DENTRY, "dca_verify_init: done, err = 0x%x", error);
4381
4382 if (error == CRYPTO_SUCCESS)
4383 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4384 &softc->dca_ctx_list_lock);
4385
4386 return (error);
4387 }
4388
4389 static int
dca_verify(crypto_ctx_t * ctx,crypto_data_t * data,crypto_data_t * signature,crypto_req_handle_t req)4390 dca_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *signature,
4391 crypto_req_handle_t req)
4392 {
4393 int error = CRYPTO_FAILED;
4394 dca_t *softc;
4395
4396 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4397 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4398
4399 softc = DCA_SOFTC_FROM_CTX(ctx);
4400 DBG(softc, DENTRY, "dca_verify: started\n");
4401
4402 /* check mechanism */
4403 switch (DCA_MECH_FROM_CTX(ctx)) {
4404 case RSA_PKCS_MECH_INFO_TYPE:
4405 case RSA_X_509_MECH_INFO_TYPE:
4406 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFY);
4407 break;
4408 case DSA_MECH_INFO_TYPE:
4409 error = dca_dsa_verify(ctx, data, signature, req);
4410 break;
4411 default:
4412 cmn_err(CE_WARN, "dca_verify: unexpected mech type "
4413 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4414 error = CRYPTO_MECHANISM_INVALID;
4415 }
4416
4417 DBG(softc, DENTRY, "dca_verify: done, err = 0x%x", error);
4418
4419 return (error);
4420 }
4421
4422 /* ARGSUSED */
4423 static int
dca_verify_update(crypto_ctx_t * ctx,crypto_data_t * data,crypto_req_handle_t req)4424 dca_verify_update(crypto_ctx_t *ctx, crypto_data_t *data,
4425 crypto_req_handle_t req)
4426 {
4427 int error = CRYPTO_MECHANISM_INVALID;
4428 dca_t *softc;
4429
4430 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4431 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4432
4433 softc = DCA_SOFTC_FROM_CTX(ctx);
4434 DBG(softc, DENTRY, "dca_verify_update: started\n");
4435
4436 cmn_err(CE_WARN, "dca_verify_update: unexpected mech type "
4437 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4438
4439 DBG(softc, DENTRY, "dca_verify_update: done, err = 0x%x", error);
4440
4441 return (error);
4442 }
4443
4444 /* ARGSUSED */
4445 static int
dca_verify_final(crypto_ctx_t * ctx,crypto_data_t * signature,crypto_req_handle_t req)4446 dca_verify_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4447 crypto_req_handle_t req)
4448 {
4449 int error = CRYPTO_MECHANISM_INVALID;
4450 dca_t *softc;
4451
4452 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4453 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4454
4455 softc = DCA_SOFTC_FROM_CTX(ctx);
4456 DBG(softc, DENTRY, "dca_verify_final: started\n");
4457
4458 cmn_err(CE_WARN, "dca_verify_final: unexpected mech type "
4459 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4460
4461 DBG(softc, DENTRY, "dca_verify_final: done, err = 0x%x", error);
4462
4463 return (error);
4464 }
4465
4466 static int
dca_verify_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * signature,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4467 dca_verify_atomic(crypto_provider_handle_t provider,
4468 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4469 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4470 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4471 {
4472 int error = CRYPTO_FAILED;
4473 dca_t *softc = (dca_t *)provider;
4474
4475 DBG(softc, DENTRY, "dca_verify_atomic: started\n");
4476
4477 if (ctx_template != NULL)
4478 return (CRYPTO_ARGUMENTS_BAD);
4479
4480 /* check mechanism */
4481 switch (mechanism->cm_type) {
4482 case RSA_PKCS_MECH_INFO_TYPE:
4483 case RSA_X_509_MECH_INFO_TYPE:
4484 error = dca_rsaatomic(provider, session_id, mechanism, key,
4485 signature, data, KM_SLEEP, req, DCA_RSA_VRFY);
4486 break;
4487 case DSA_MECH_INFO_TYPE:
4488 error = dca_dsaatomic(provider, session_id, mechanism, key,
4489 data, signature, KM_SLEEP, req, DCA_DSA_VRFY);
4490 break;
4491 default:
4492 cmn_err(CE_WARN, "dca_verify_atomic: unexpected mech type "
4493 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4494 error = CRYPTO_MECHANISM_INVALID;
4495 }
4496
4497 DBG(softc, DENTRY, "dca_verify_atomic: done, err = 0x%x", error);
4498
4499 return (error);
4500 }
4501
4502 /* ARGSUSED */
4503 static int
dca_verify_recover_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4504 dca_verify_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4505 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4506 crypto_req_handle_t req)
4507 {
4508 int error = CRYPTO_MECHANISM_INVALID;
4509 dca_t *softc;
4510
4511 softc = DCA_SOFTC_FROM_CTX(ctx);
4512 DBG(softc, DENTRY, "dca_verify_recover_init: started\n");
4513
4514 if (ctx_template != NULL)
4515 return (CRYPTO_ARGUMENTS_BAD);
4516
4517 /* check mechanism */
4518 switch (mechanism->cm_type) {
4519 case RSA_PKCS_MECH_INFO_TYPE:
4520 case RSA_X_509_MECH_INFO_TYPE:
4521 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4522 break;
4523 default:
4524 cmn_err(CE_WARN, "dca_verify_recover_init: unexpected mech type"
4525 " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4526 }
4527
4528 DBG(softc, DENTRY, "dca_verify_recover_init: done, err = 0x%x", error);
4529
4530 if (error == CRYPTO_SUCCESS)
4531 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4532 &softc->dca_ctx_list_lock);
4533
4534 return (error);
4535 }
4536
4537 static int
dca_verify_recover(crypto_ctx_t * ctx,crypto_data_t * signature,crypto_data_t * data,crypto_req_handle_t req)4538 dca_verify_recover(crypto_ctx_t *ctx, crypto_data_t *signature,
4539 crypto_data_t *data, crypto_req_handle_t req)
4540 {
4541 int error = CRYPTO_MECHANISM_INVALID;
4542 dca_t *softc;
4543
4544 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4545 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4546
4547 softc = DCA_SOFTC_FROM_CTX(ctx);
4548 DBG(softc, DENTRY, "dca_verify_recover: started\n");
4549
4550 /* check mechanism */
4551 switch (DCA_MECH_FROM_CTX(ctx)) {
4552 case RSA_PKCS_MECH_INFO_TYPE:
4553 case RSA_X_509_MECH_INFO_TYPE:
4554 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFYR);
4555 break;
4556 default:
4557 cmn_err(CE_WARN, "dca_verify_recover: unexpected mech type "
4558 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4559 }
4560
4561 DBG(softc, DENTRY, "dca_verify_recover: done, err = 0x%x", error);
4562
4563 return (error);
4564 }
4565
4566 static int
dca_verify_recover_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * signature,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4567 dca_verify_recover_atomic(crypto_provider_handle_t provider,
4568 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4569 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4570 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4571 {
4572 int error = CRYPTO_MECHANISM_INVALID;
4573 dca_t *softc = (dca_t *)provider;
4574
4575 DBG(softc, DENTRY, "dca_verify_recover_atomic: started\n");
4576
4577 if (ctx_template != NULL)
4578 return (CRYPTO_ARGUMENTS_BAD);
4579
4580 /* check mechanism */
4581 switch (mechanism->cm_type) {
4582 case RSA_PKCS_MECH_INFO_TYPE:
4583 case RSA_X_509_MECH_INFO_TYPE:
4584 error = dca_rsaatomic(provider, session_id, mechanism, key,
4585 signature, data, KM_SLEEP, req, DCA_RSA_VRFYR);
4586 break;
4587 default:
4588 cmn_err(CE_WARN, "dca_verify_recover_atomic: unexpected mech "
4589 "type 0x%llx\n", (unsigned long long)mechanism->cm_type);
4590 error = CRYPTO_MECHANISM_INVALID;
4591 }
4592
4593 DBG(softc, DENTRY,
4594 "dca_verify_recover_atomic: done, err = 0x%x", error);
4595
4596 return (error);
4597 }
4598
4599 /*
4600 * Random number entry points.
4601 */
4602
4603 /* ARGSUSED */
4604 static int
dca_generate_random(crypto_provider_handle_t provider,crypto_session_id_t session_id,uchar_t * buf,size_t len,crypto_req_handle_t req)4605 dca_generate_random(crypto_provider_handle_t provider,
4606 crypto_session_id_t session_id,
4607 uchar_t *buf, size_t len, crypto_req_handle_t req)
4608 {
4609 int error = CRYPTO_FAILED;
4610 dca_t *softc = (dca_t *)provider;
4611
4612 DBG(softc, DENTRY, "dca_generate_random: started");
4613
4614 error = dca_rng(softc, buf, len, req);
4615
4616 DBG(softc, DENTRY, "dca_generate_random: done, err = 0x%x", error);
4617
4618 return (error);
4619 }
4620
4621 /*
4622 * Context management entry points.
4623 */
4624
4625 int
dca_free_context(crypto_ctx_t * ctx)4626 dca_free_context(crypto_ctx_t *ctx)
4627 {
4628 int error = CRYPTO_SUCCESS;
4629 dca_t *softc;
4630
4631 softc = DCA_SOFTC_FROM_CTX(ctx);
4632 DBG(softc, DENTRY, "dca_free_context: entered");
4633
4634 if (ctx->cc_provider_private == NULL)
4635 return (error);
4636
4637 dca_rmlist2(ctx->cc_provider_private, &softc->dca_ctx_list_lock);
4638
4639 error = dca_free_context_low(ctx);
4640
4641 DBG(softc, DENTRY, "dca_free_context: done, err = 0x%x", error);
4642
4643 return (error);
4644 }
4645
4646 static int
dca_free_context_low(crypto_ctx_t * ctx)4647 dca_free_context_low(crypto_ctx_t *ctx)
4648 {
4649 int error = CRYPTO_SUCCESS;
4650
4651 /* check mechanism */
4652 switch (DCA_MECH_FROM_CTX(ctx)) {
4653 case DES_CBC_MECH_INFO_TYPE:
4654 case DES3_CBC_MECH_INFO_TYPE:
4655 dca_3desctxfree(ctx);
4656 break;
4657 case RSA_PKCS_MECH_INFO_TYPE:
4658 case RSA_X_509_MECH_INFO_TYPE:
4659 dca_rsactxfree(ctx);
4660 break;
4661 case DSA_MECH_INFO_TYPE:
4662 dca_dsactxfree(ctx);
4663 break;
4664 default:
4665 /* Should never reach here */
4666 cmn_err(CE_WARN, "dca_free_context_low: unexpected mech type "
4667 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4668 error = CRYPTO_MECHANISM_INVALID;
4669 }
4670
4671 return (error);
4672 }
4673
4674
4675 /* Free any unfreed private context. It is called in detach. */
4676 static void
dca_free_context_list(dca_t * dca)4677 dca_free_context_list(dca_t *dca)
4678 {
4679 dca_listnode_t *node;
4680 crypto_ctx_t ctx;
4681
4682 (void) memset(&ctx, 0, sizeof (ctx));
4683 ctx.cc_provider = dca;
4684
4685 while ((node = dca_delist2(&dca->dca_ctx_list,
4686 &dca->dca_ctx_list_lock)) != NULL) {
4687 ctx.cc_provider_private = node;
4688 (void) dca_free_context_low(&ctx);
4689 }
4690 }
4691
4692 static int
ext_info_sym(crypto_provider_handle_t prov,crypto_provider_ext_info_t * ext_info,crypto_req_handle_t cfreq)4693 ext_info_sym(crypto_provider_handle_t prov,
4694 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4695 {
4696 return (ext_info_base(prov, ext_info, cfreq, IDENT_SYM));
4697 }
4698
4699 static int
ext_info_asym(crypto_provider_handle_t prov,crypto_provider_ext_info_t * ext_info,crypto_req_handle_t cfreq)4700 ext_info_asym(crypto_provider_handle_t prov,
4701 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4702 {
4703 int rv;
4704
4705 rv = ext_info_base(prov, ext_info, cfreq, IDENT_ASYM);
4706 /* The asymmetric cipher slot supports random */
4707 ext_info->ei_flags |= CRYPTO_EXTF_RNG;
4708
4709 return (rv);
4710 }
4711
4712 /* ARGSUSED */
4713 static int
ext_info_base(crypto_provider_handle_t prov,crypto_provider_ext_info_t * ext_info,crypto_req_handle_t cfreq,char * id)4714 ext_info_base(crypto_provider_handle_t prov,
4715 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id)
4716 {
4717 dca_t *dca = (dca_t *)prov;
4718 int len;
4719
4720 /* Label */
4721 (void) sprintf((char *)ext_info->ei_label, "%s/%d %s",
4722 ddi_driver_name(dca->dca_dip), ddi_get_instance(dca->dca_dip), id);
4723 len = strlen((char *)ext_info->ei_label);
4724 (void) memset(ext_info->ei_label + len, ' ',
4725 CRYPTO_EXT_SIZE_LABEL - len);
4726
4727 /* Manufacturer ID */
4728 (void) sprintf((char *)ext_info->ei_manufacturerID, "%s",
4729 DCA_MANUFACTURER_ID);
4730 len = strlen((char *)ext_info->ei_manufacturerID);
4731 (void) memset(ext_info->ei_manufacturerID + len, ' ',
4732 CRYPTO_EXT_SIZE_MANUF - len);
4733
4734 /* Model */
4735 (void) sprintf((char *)ext_info->ei_model, dca->dca_model);
4736
4737 DBG(dca, DWARN, "kCF MODEL: %s", (char *)ext_info->ei_model);
4738
4739 len = strlen((char *)ext_info->ei_model);
4740 (void) memset(ext_info->ei_model + len, ' ',
4741 CRYPTO_EXT_SIZE_MODEL - len);
4742
4743 /* Serial Number. Blank for Deimos */
4744 (void) memset(ext_info->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL);
4745
4746 ext_info->ei_flags = CRYPTO_EXTF_WRITE_PROTECTED;
4747
4748 ext_info->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO;
4749 ext_info->ei_max_pin_len = CRYPTO_UNAVAILABLE_INFO;
4750 ext_info->ei_min_pin_len = CRYPTO_UNAVAILABLE_INFO;
4751 ext_info->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO;
4752 ext_info->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO;
4753 ext_info->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO;
4754 ext_info->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO;
4755 ext_info->ei_hardware_version.cv_major = 0;
4756 ext_info->ei_hardware_version.cv_minor = 0;
4757 ext_info->ei_firmware_version.cv_major = 0;
4758 ext_info->ei_firmware_version.cv_minor = 0;
4759
4760 /* Time. No need to be supplied for token without a clock */
4761 ext_info->ei_time[0] = '\000';
4762
4763 return (CRYPTO_SUCCESS);
4764 }
4765
4766 static void
dca_fma_init(dca_t * dca)4767 dca_fma_init(dca_t *dca)
4768 {
4769 ddi_iblock_cookie_t fm_ibc;
4770 int fm_capabilities = DDI_FM_EREPORT_CAPABLE |
4771 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
4772 DDI_FM_ERRCB_CAPABLE;
4773
4774 /* Read FMA capabilities from dca.conf file (if present) */
4775 dca->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip,
4776 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
4777 fm_capabilities);
4778
4779 DBG(dca, DWARN, "dca->fm_capabilities = 0x%x", dca->fm_capabilities);
4780
4781 /* Only register with IO Fault Services if we have some capability */
4782 if (dca->fm_capabilities) {
4783 dca_regsattr.devacc_attr_access = DDI_FLAGERR_ACC;
4784 dca_dmaattr.dma_attr_flags = DDI_DMA_FLAGERR;
4785
4786 /* Register capabilities with IO Fault Services */
4787 ddi_fm_init(dca->dca_dip, &dca->fm_capabilities, &fm_ibc);
4788 DBG(dca, DWARN, "fm_capable() = 0x%x",
4789 ddi_fm_capable(dca->dca_dip));
4790
4791 /*
4792 * Initialize pci ereport capabilities if ereport capable
4793 */
4794 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4795 DDI_FM_ERRCB_CAP(dca->fm_capabilities))
4796 pci_ereport_setup(dca->dca_dip);
4797
4798 /*
4799 * Initialize callback mutex and register error callback if
4800 * error callback capable.
4801 */
4802 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4803 ddi_fm_handler_register(dca->dca_dip, dca_fm_error_cb,
4804 (void *)dca);
4805 }
4806 } else {
4807 /*
4808 * These fields have to be cleared of FMA if there are no
4809 * FMA capabilities at runtime.
4810 */
4811 dca_regsattr.devacc_attr_access = DDI_DEFAULT_ACC;
4812 dca_dmaattr.dma_attr_flags = 0;
4813 }
4814 }
4815
4816
4817 static void
dca_fma_fini(dca_t * dca)4818 dca_fma_fini(dca_t *dca)
4819 {
4820 /* Only unregister FMA capabilities if we registered some */
4821 if (dca->fm_capabilities) {
4822
4823 /*
4824 * Release any resources allocated by pci_ereport_setup()
4825 */
4826 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4827 DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4828 pci_ereport_teardown(dca->dca_dip);
4829 }
4830
4831 /*
4832 * Free callback mutex and un-register error callback if
4833 * error callback capable.
4834 */
4835 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4836 ddi_fm_handler_unregister(dca->dca_dip);
4837 }
4838
4839 /* Unregister from IO Fault Services */
4840 ddi_fm_fini(dca->dca_dip);
4841 DBG(dca, DWARN, "fm_capable() = 0x%x",
4842 ddi_fm_capable(dca->dca_dip));
4843 }
4844 }
4845
4846
4847 /*
4848 * The IO fault service error handling callback function
4849 */
4850 /*ARGSUSED*/
4851 static int
dca_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)4852 dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4853 {
4854 dca_t *dca = (dca_t *)impl_data;
4855
4856 pci_ereport_post(dip, err, NULL);
4857 if (err->fme_status == DDI_FM_FATAL) {
4858 dca_failure(dca, DDI_DATAPATH_FAULT,
4859 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
4860 "fault PCI in FMA callback.");
4861 }
4862 return (err->fme_status);
4863 }
4864
4865
4866 static int
dca_check_acc_handle(dca_t * dca,ddi_acc_handle_t handle,dca_fma_eclass_t eclass_index)4867 dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
4868 dca_fma_eclass_t eclass_index)
4869 {
4870 ddi_fm_error_t de;
4871 int version = 0;
4872
4873 ddi_fm_acc_err_get(handle, &de, version);
4874 if (de.fme_status != DDI_FM_OK) {
4875 dca_failure(dca, DDI_DATAPATH_FAULT,
4876 eclass_index, fm_ena_increment(de.fme_ena),
4877 CRYPTO_DEVICE_ERROR, "");
4878 return (DDI_FAILURE);
4879 }
4880
4881 return (DDI_SUCCESS);
4882 }
4883
4884 int
dca_check_dma_handle(dca_t * dca,ddi_dma_handle_t handle,dca_fma_eclass_t eclass_index)4885 dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle,
4886 dca_fma_eclass_t eclass_index)
4887 {
4888 ddi_fm_error_t de;
4889 int version = 0;
4890
4891 ddi_fm_dma_err_get(handle, &de, version);
4892 if (de.fme_status != DDI_FM_OK) {
4893 dca_failure(dca, DDI_DATAPATH_FAULT,
4894 eclass_index, fm_ena_increment(de.fme_ena),
4895 CRYPTO_DEVICE_ERROR, "");
4896 return (DDI_FAILURE);
4897 }
4898 return (DDI_SUCCESS);
4899 }
4900
4901 static uint64_t
dca_ena(uint64_t ena)4902 dca_ena(uint64_t ena)
4903 {
4904 if (ena == 0)
4905 ena = fm_ena_generate(0, FM_ENA_FMT1);
4906 else
4907 ena = fm_ena_increment(ena);
4908 return (ena);
4909 }
4910
4911 static char *
dca_fma_eclass_string(char * model,dca_fma_eclass_t index)4912 dca_fma_eclass_string(char *model, dca_fma_eclass_t index)
4913 {
4914 if (strstr(model, "500"))
4915 return (dca_fma_eclass_sca500[index]);
4916 else
4917 return (dca_fma_eclass_sca1000[index]);
4918 }
4919