1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27
28 /*
29 * Deimos - cryptographic acceleration based upon Broadcom 582x.
30 */
31
32 #include <sys/types.h>
33 #include <sys/modctl.h>
34 #include <sys/conf.h>
35 #include <sys/devops.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/cmn_err.h>
39 #include <sys/varargs.h>
40 #include <sys/file.h>
41 #include <sys/stat.h>
42 #include <sys/kmem.h>
43 #include <sys/ioccom.h>
44 #include <sys/open.h>
45 #include <sys/cred.h>
46 #include <sys/kstat.h>
47 #include <sys/strsun.h>
48 #include <sys/note.h>
49 #include <sys/crypto/common.h>
50 #include <sys/crypto/spi.h>
51 #include <sys/ddifm.h>
52 #include <sys/fm/protocol.h>
53 #include <sys/fm/util.h>
54 #include <sys/fm/io/ddi.h>
55 #include <sys/crypto/dca.h>
56
57 /*
58 * Core Deimos driver.
59 */
60
61 static void dca_enlist2(dca_listnode_t *, dca_listnode_t *,
62 kmutex_t *);
63 static void dca_rmlist2(dca_listnode_t *node, kmutex_t *);
64 static dca_listnode_t *dca_delist2(dca_listnode_t *q, kmutex_t *);
65 static void dca_free_context_list(dca_t *dca);
66 static int dca_free_context_low(crypto_ctx_t *ctx);
67 static int dca_attach(dev_info_t *, ddi_attach_cmd_t);
68 static int dca_detach(dev_info_t *, ddi_detach_cmd_t);
69 static int dca_suspend(dca_t *);
70 static int dca_resume(dca_t *);
71 static int dca_init(dca_t *);
72 static int dca_reset(dca_t *, int);
73 static int dca_initworklist(dca_t *, dca_worklist_t *);
74 static void dca_uninit(dca_t *);
75 static void dca_initq(dca_listnode_t *);
76 static void dca_enqueue(dca_listnode_t *, dca_listnode_t *);
77 static dca_listnode_t *dca_dequeue(dca_listnode_t *);
78 static dca_listnode_t *dca_unqueue(dca_listnode_t *);
79 static dca_request_t *dca_newreq(dca_t *);
80 static dca_work_t *dca_getwork(dca_t *, int);
81 static void dca_freework(dca_work_t *);
82 static dca_work_t *dca_newwork(dca_t *);
83 static void dca_destroywork(dca_work_t *);
84 static void dca_schedule(dca_t *, int);
85 static void dca_reclaim(dca_t *, int);
86 static uint_t dca_intr(char *);
87 static void dca_failure(dca_t *, ddi_fault_location_t,
88 dca_fma_eclass_t index, uint64_t, int, char *, ...);
89 static void dca_jobtimeout(void *);
90 static int dca_drain(dca_t *);
91 static void dca_undrain(dca_t *);
92 static void dca_rejectjobs(dca_t *);
93
94 #ifdef SCHEDDELAY
95 static void dca_schedtimeout(void *);
96 #endif
97
98 /*
99 * Device operations.
100 */
101 static struct dev_ops devops = {
102 DEVO_REV, /* devo_rev */
103 0, /* devo_refcnt */
104 nodev, /* devo_getinfo */
105 nulldev, /* devo_identify */
106 nulldev, /* devo_probe */
107 dca_attach, /* devo_attach */
108 dca_detach, /* devo_detach */
109 nodev, /* devo_reset */
110 NULL, /* devo_cb_ops */
111 NULL, /* devo_bus_ops */
112 ddi_power, /* devo_power */
113 ddi_quiesce_not_supported, /* devo_quiesce */
114 };
115
116 #define IDENT "PCI Crypto Accelerator"
117 #define IDENT_SYM "Crypto Accel Sym 2.0"
118 #define IDENT_ASYM "Crypto Accel Asym 2.0"
119
120 /* Space-padded, will be filled in dynamically during registration */
121 #define IDENT3 "PCI Crypto Accelerator Mod 2.0"
122
123 #define VENDOR "Sun Microsystems, Inc."
124
125 #define STALETIME (30 * SECOND)
126
127 #define crypto_prov_notify crypto_provider_notification
128 /* A 28 char function name doesn't leave much line space */
129
130 /*
131 * Module linkage.
132 */
133 static struct modldrv modldrv = {
134 &mod_driverops, /* drv_modops */
135 IDENT, /* drv_linkinfo */
136 &devops, /* drv_dev_ops */
137 };
138
139 extern struct mod_ops mod_cryptoops;
140
141 static struct modlcrypto modlcrypto = {
142 &mod_cryptoops,
143 IDENT3
144 };
145
146 static struct modlinkage modlinkage = {
147 MODREV_1, /* ml_rev */
148 &modldrv, /* ml_linkage */
149 &modlcrypto,
150 NULL
151 };
152
153 /*
154 * CSPI information (entry points, provider info, etc.)
155 */
156
157 /* Mechanisms for the symmetric cipher provider */
158 static crypto_mech_info_t dca_mech_info_tab1[] = {
159 /* DES-CBC */
160 {SUN_CKM_DES_CBC, DES_CBC_MECH_INFO_TYPE,
161 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
162 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
163 DES_KEY_LEN, DES_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
164 /* 3DES-CBC */
165 {SUN_CKM_DES3_CBC, DES3_CBC_MECH_INFO_TYPE,
166 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
167 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
168 DES3_MIN_KEY_LEN, DES3_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
169 };
170
171 /* Mechanisms for the asymmetric cipher provider */
172 static crypto_mech_info_t dca_mech_info_tab2[] = {
173 /* DSA */
174 {SUN_CKM_DSA, DSA_MECH_INFO_TYPE,
175 CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY |
176 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_VERIFY_ATOMIC,
177 CRYPTO_BYTES2BITS(DSA_MIN_KEY_LEN),
178 CRYPTO_BYTES2BITS(DSA_MAX_KEY_LEN),
179 CRYPTO_KEYSIZE_UNIT_IN_BITS},
180
181 /* RSA */
182 {SUN_CKM_RSA_X_509, RSA_X_509_MECH_INFO_TYPE,
183 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
184 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
185 CRYPTO_FG_VERIFY_RECOVER |
186 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
187 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
188 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
189 CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN),
190 CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN),
191 CRYPTO_KEYSIZE_UNIT_IN_BITS},
192 {SUN_CKM_RSA_PKCS, RSA_PKCS_MECH_INFO_TYPE,
193 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
194 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
195 CRYPTO_FG_VERIFY_RECOVER |
196 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
197 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
198 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
199 CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN),
200 CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN),
201 CRYPTO_KEYSIZE_UNIT_IN_BITS}
202 };
203
204 static void dca_provider_status(crypto_provider_handle_t, uint_t *);
205
206 static crypto_control_ops_t dca_control_ops = {
207 dca_provider_status
208 };
209
210 static int dca_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
211 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
212 static int dca_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
213 crypto_req_handle_t);
214 static int dca_encrypt_update(crypto_ctx_t *, crypto_data_t *,
215 crypto_data_t *, crypto_req_handle_t);
216 static int dca_encrypt_final(crypto_ctx_t *, crypto_data_t *,
217 crypto_req_handle_t);
218 static int dca_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
219 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
220 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
221
222 static int dca_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
223 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
224 static int dca_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
225 crypto_req_handle_t);
226 static int dca_decrypt_update(crypto_ctx_t *, crypto_data_t *,
227 crypto_data_t *, crypto_req_handle_t);
228 static int dca_decrypt_final(crypto_ctx_t *, crypto_data_t *,
229 crypto_req_handle_t);
230 static int dca_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
231 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
232 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
233
234 static crypto_cipher_ops_t dca_cipher_ops = {
235 dca_encrypt_init,
236 dca_encrypt,
237 dca_encrypt_update,
238 dca_encrypt_final,
239 dca_encrypt_atomic,
240 dca_decrypt_init,
241 dca_decrypt,
242 dca_decrypt_update,
243 dca_decrypt_final,
244 dca_decrypt_atomic
245 };
246
247 static int dca_sign_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
248 crypto_spi_ctx_template_t, crypto_req_handle_t);
249 static int dca_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
250 crypto_req_handle_t);
251 static int dca_sign_update(crypto_ctx_t *, crypto_data_t *,
252 crypto_req_handle_t);
253 static int dca_sign_final(crypto_ctx_t *, crypto_data_t *,
254 crypto_req_handle_t);
255 static int dca_sign_atomic(crypto_provider_handle_t, crypto_session_id_t,
256 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
257 crypto_spi_ctx_template_t, crypto_req_handle_t);
258 static int dca_sign_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
259 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
260 static int dca_sign_recover(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
261 crypto_req_handle_t);
262 static int dca_sign_recover_atomic(crypto_provider_handle_t,
263 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
264 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
265
266 static crypto_sign_ops_t dca_sign_ops = {
267 dca_sign_init,
268 dca_sign,
269 dca_sign_update,
270 dca_sign_final,
271 dca_sign_atomic,
272 dca_sign_recover_init,
273 dca_sign_recover,
274 dca_sign_recover_atomic
275 };
276
277 static int dca_verify_init(crypto_ctx_t *, crypto_mechanism_t *,
278 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
279 static int dca_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
280 crypto_req_handle_t);
281 static int dca_verify_update(crypto_ctx_t *, crypto_data_t *,
282 crypto_req_handle_t);
283 static int dca_verify_final(crypto_ctx_t *, crypto_data_t *,
284 crypto_req_handle_t);
285 static int dca_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
286 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
287 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
288 static int dca_verify_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
289 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
290 static int dca_verify_recover(crypto_ctx_t *, crypto_data_t *,
291 crypto_data_t *, crypto_req_handle_t);
292 static int dca_verify_recover_atomic(crypto_provider_handle_t,
293 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
294 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
295
296 static crypto_verify_ops_t dca_verify_ops = {
297 dca_verify_init,
298 dca_verify,
299 dca_verify_update,
300 dca_verify_final,
301 dca_verify_atomic,
302 dca_verify_recover_init,
303 dca_verify_recover,
304 dca_verify_recover_atomic
305 };
306
307 static int dca_generate_random(crypto_provider_handle_t, crypto_session_id_t,
308 uchar_t *, size_t, crypto_req_handle_t);
309
310 static crypto_random_number_ops_t dca_random_number_ops = {
311 NULL,
312 dca_generate_random
313 };
314
315 static int ext_info_sym(crypto_provider_handle_t prov,
316 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
317 static int ext_info_asym(crypto_provider_handle_t prov,
318 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
319 static int ext_info_base(crypto_provider_handle_t prov,
320 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id);
321
322 static crypto_provider_management_ops_t dca_provmanage_ops_1 = {
323 ext_info_sym, /* ext_info */
324 NULL, /* init_token */
325 NULL, /* init_pin */
326 NULL /* set_pin */
327 };
328
329 static crypto_provider_management_ops_t dca_provmanage_ops_2 = {
330 ext_info_asym, /* ext_info */
331 NULL, /* init_token */
332 NULL, /* init_pin */
333 NULL /* set_pin */
334 };
335
336 int dca_free_context(crypto_ctx_t *);
337
338 static crypto_ctx_ops_t dca_ctx_ops = {
339 NULL,
340 dca_free_context
341 };
342
343 /* Operations for the symmetric cipher provider */
344 static crypto_ops_t dca_crypto_ops1 = {
345 &dca_control_ops,
346 NULL, /* digest_ops */
347 &dca_cipher_ops,
348 NULL, /* mac_ops */
349 NULL, /* sign_ops */
350 NULL, /* verify_ops */
351 NULL, /* dual_ops */
352 NULL, /* cipher_mac_ops */
353 NULL, /* random_number_ops */
354 NULL, /* session_ops */
355 NULL, /* object_ops */
356 NULL, /* key_ops */
357 &dca_provmanage_ops_1, /* management_ops */
358 &dca_ctx_ops
359 };
360
361 /* Operations for the asymmetric cipher provider */
362 static crypto_ops_t dca_crypto_ops2 = {
363 &dca_control_ops,
364 NULL, /* digest_ops */
365 &dca_cipher_ops,
366 NULL, /* mac_ops */
367 &dca_sign_ops,
368 &dca_verify_ops,
369 NULL, /* dual_ops */
370 NULL, /* cipher_mac_ops */
371 &dca_random_number_ops,
372 NULL, /* session_ops */
373 NULL, /* object_ops */
374 NULL, /* key_ops */
375 &dca_provmanage_ops_2, /* management_ops */
376 &dca_ctx_ops
377 };
378
379 /* Provider information for the symmetric cipher provider */
380 static crypto_provider_info_t dca_prov_info1 = {
381 CRYPTO_SPI_VERSION_1,
382 NULL, /* pi_provider_description */
383 CRYPTO_HW_PROVIDER,
384 NULL, /* pi_provider_dev */
385 NULL, /* pi_provider_handle */
386 &dca_crypto_ops1,
387 sizeof (dca_mech_info_tab1)/sizeof (crypto_mech_info_t),
388 dca_mech_info_tab1,
389 0, /* pi_logical_provider_count */
390 NULL /* pi_logical_providers */
391 };
392
393 /* Provider information for the asymmetric cipher provider */
394 static crypto_provider_info_t dca_prov_info2 = {
395 CRYPTO_SPI_VERSION_1,
396 NULL, /* pi_provider_description */
397 CRYPTO_HW_PROVIDER,
398 NULL, /* pi_provider_dev */
399 NULL, /* pi_provider_handle */
400 &dca_crypto_ops2,
401 sizeof (dca_mech_info_tab2)/sizeof (crypto_mech_info_t),
402 dca_mech_info_tab2,
403 0, /* pi_logical_provider_count */
404 NULL /* pi_logical_providers */
405 };
406
407 /* Convenience macros */
408 #define DCA_SOFTC_FROM_CTX(ctx) ((dca_t *)(ctx)->cc_provider)
409 #define DCA_MECH_FROM_CTX(ctx) \
410 (((dca_request_t *)(ctx)->cc_provider_private)->dr_ctx.ctx_cm_type)
411
412 static int dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
413 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
414 dca_chain_t *head, int *n_chain);
415 static uint64_t dca_ena(uint64_t ena);
416 static caddr_t dca_bufdaddr_out(crypto_data_t *data);
417 static char *dca_fma_eclass_string(char *model, dca_fma_eclass_t index);
418 static int dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
419 dca_fma_eclass_t eclass_index);
420
421 static void dca_fma_init(dca_t *dca);
422 static void dca_fma_fini(dca_t *dca);
423 static int dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
424 const void *impl_data);
425
426
427 static dca_device_t dca_devices[] = {
428 /* Broadcom vanilla variants */
429 { 0x14e4, 0x5820, "Broadcom 5820" },
430 { 0x14e4, 0x5821, "Broadcom 5821" },
431 { 0x14e4, 0x5822, "Broadcom 5822" },
432 { 0x14e4, 0x5825, "Broadcom 5825" },
433 /* Sun specific OEMd variants */
434 { 0x108e, 0x5454, "SCA" },
435 { 0x108e, 0x5455, "SCA 1000" },
436 { 0x108e, 0x5457, "SCA 500" },
437 /* subsysid should be 0x5457, but got 0x1 from HW. Assume both here. */
438 { 0x108e, 0x1, "SCA 500" },
439 };
440
441 /*
442 * Device attributes.
443 */
444 static struct ddi_device_acc_attr dca_regsattr = {
445 DDI_DEVICE_ATTR_V1,
446 DDI_STRUCTURE_LE_ACC,
447 DDI_STRICTORDER_ACC,
448 DDI_FLAGERR_ACC
449 };
450
451 static struct ddi_device_acc_attr dca_devattr = {
452 DDI_DEVICE_ATTR_V0,
453 DDI_STRUCTURE_LE_ACC,
454 DDI_STRICTORDER_ACC
455 };
456
457 static struct ddi_device_acc_attr dca_bufattr = {
458 DDI_DEVICE_ATTR_V0,
459 DDI_NEVERSWAP_ACC,
460 DDI_STRICTORDER_ACC
461 };
462
463 static struct ddi_dma_attr dca_dmaattr = {
464 DMA_ATTR_V0, /* dma_attr_version */
465 0x0, /* dma_attr_addr_lo */
466 0xffffffffUL, /* dma_attr_addr_hi */
467 0x00ffffffUL, /* dma_attr_count_max */
468 0x40, /* dma_attr_align */
469 0x40, /* dma_attr_burstsizes */
470 0x1, /* dma_attr_minxfer */
471 0x00ffffffUL, /* dma_attr_maxxfer */
472 0xffffffffUL, /* dma_attr_seg */
473 #if defined(__x86)
474 512, /* dma_attr_sgllen */
475 #else
476 1, /* dma_attr_sgllen */
477 #endif
478 1, /* dma_attr_granular */
479 DDI_DMA_FLAGERR /* dma_attr_flags */
480 };
481
482 static void *dca_state = NULL;
483 int dca_mindma = 2500;
484
485 /*
486 * FMA eclass string definitions. Note that these string arrays must be
487 * consistent with the dca_fma_eclass_t enum.
488 */
489 static char *dca_fma_eclass_sca1000[] = {
490 "sca1000.hw.device",
491 "sca1000.hw.timeout",
492 "sca1000.none"
493 };
494
495 static char *dca_fma_eclass_sca500[] = {
496 "sca500.hw.device",
497 "sca500.hw.timeout",
498 "sca500.none"
499 };
500
501 /*
502 * DDI entry points.
503 */
504 int
_init(void)505 _init(void)
506 {
507 int rv;
508
509 DBG(NULL, DMOD, "dca: in _init");
510
511 if ((rv = ddi_soft_state_init(&dca_state, sizeof (dca_t), 1)) != 0) {
512 /* this should *never* happen! */
513 return (rv);
514 }
515
516 if ((rv = mod_install(&modlinkage)) != 0) {
517 /* cleanup here */
518 ddi_soft_state_fini(&dca_state);
519 return (rv);
520 }
521
522 return (0);
523 }
524
525 int
_fini(void)526 _fini(void)
527 {
528 int rv;
529
530 DBG(NULL, DMOD, "dca: in _fini");
531
532 if ((rv = mod_remove(&modlinkage)) == 0) {
533 /* cleanup here */
534 ddi_soft_state_fini(&dca_state);
535 }
536 return (rv);
537 }
538
539 int
_info(struct modinfo * modinfop)540 _info(struct modinfo *modinfop)
541 {
542 DBG(NULL, DMOD, "dca: in _info");
543
544 return (mod_info(&modlinkage, modinfop));
545 }
546
547 int
dca_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)548 dca_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
549 {
550 ddi_acc_handle_t pci;
551 int instance;
552 ddi_iblock_cookie_t ibc;
553 int intr_added = 0;
554 dca_t *dca;
555 ushort_t venid;
556 ushort_t devid;
557 ushort_t revid;
558 ushort_t subsysid;
559 ushort_t subvenid;
560 int i;
561 int ret;
562 char ID[64];
563 static char *unknowndev = "Unknown device";
564
565 #if DEBUG
566 /* these are only used for debugging */
567 ushort_t pcicomm;
568 ushort_t pcistat;
569 uchar_t cachelinesz;
570 uchar_t mingnt;
571 uchar_t maxlat;
572 uchar_t lattmr;
573 #endif
574
575 instance = ddi_get_instance(dip);
576
577 DBG(NULL, DMOD, "dca: in dca_attach() for %d", instance);
578
579 switch (cmd) {
580 case DDI_RESUME:
581 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
582 dca_diperror(dip, "no soft state in detach");
583 return (DDI_FAILURE);
584 }
585 /* assumption: we won't be DDI_DETACHed until we return */
586 return (dca_resume(dca));
587 case DDI_ATTACH:
588 break;
589 default:
590 return (DDI_FAILURE);
591 }
592
593 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
594 dca_diperror(dip, "slot does not support PCI bus-master");
595 return (DDI_FAILURE);
596 }
597
598 if (ddi_intr_hilevel(dip, 0) != 0) {
599 dca_diperror(dip, "hilevel interrupts not supported");
600 return (DDI_FAILURE);
601 }
602
603 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
604 dca_diperror(dip, "unable to setup PCI config handle");
605 return (DDI_FAILURE);
606 }
607
608 /* common PCI attributes */
609 venid = pci_config_get16(pci, PCI_VENID);
610 devid = pci_config_get16(pci, PCI_DEVID);
611 revid = pci_config_get8(pci, PCI_REVID);
612 subvenid = pci_config_get16(pci, PCI_SUBVENID);
613 subsysid = pci_config_get16(pci, PCI_SUBSYSID);
614
615 /*
616 * Broadcom-specific timings.
617 * We disable these timers/counters since they can cause
618 * incorrect false failures when the bus is just a little
619 * bit slow, or busy.
620 */
621 pci_config_put8(pci, PCI_TRDYTO, 0);
622 pci_config_put8(pci, PCI_RETRIES, 0);
623
624 /* initialize PCI access settings */
625 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
626 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
627
628 /* set up our PCI latency timer */
629 pci_config_put8(pci, PCI_LATTMR, 0x40);
630
631 #if DEBUG
632 /* read registers (for debugging) */
633 pcicomm = pci_config_get16(pci, PCI_COMM);
634 pcistat = pci_config_get16(pci, PCI_STATUS);
635 cachelinesz = pci_config_get8(pci, PCI_CACHELINESZ);
636 mingnt = pci_config_get8(pci, PCI_MINGNT);
637 maxlat = pci_config_get8(pci, PCI_MAXLAT);
638 lattmr = pci_config_get8(pci, PCI_LATTMR);
639 #endif
640
641 pci_config_teardown(&pci);
642
643 if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS) {
644 dca_diperror(dip, "unable to get iblock cookie");
645 return (DDI_FAILURE);
646 }
647
648 if (ddi_soft_state_zalloc(dca_state, instance) != DDI_SUCCESS) {
649 dca_diperror(dip, "unable to allocate soft state");
650 return (DDI_FAILURE);
651 }
652
653 dca = ddi_get_soft_state(dca_state, instance);
654 ASSERT(dca != NULL);
655 dca->dca_dip = dip;
656 WORKLIST(dca, MCR1)->dwl_prov = 0;
657 WORKLIST(dca, MCR2)->dwl_prov = 0;
658 /* figure pagesize */
659 dca->dca_pagesize = ddi_ptob(dip, 1);
660
661 /*
662 * Search for the device in our supported devices table. This
663 * is here for two reasons. First, we want to ensure that
664 * only Sun-qualified (and presumably Sun-labeled) devices can
665 * be used with this driver. Second, some devices have
666 * specific differences. E.g. the 5821 has support for a
667 * special mode of RC4, deeper queues, power management, and
668 * other changes. Also, the export versions of some of these
669 * chips don't support RC4 or 3DES, so we catch that here.
670 *
671 * Note that we only look at the upper nibble of the device
672 * id, which is used to distinguish export vs. domestic
673 * versions of the chip. (The lower nibble is used for
674 * stepping information.)
675 */
676 for (i = 0; i < (sizeof (dca_devices) / sizeof (dca_device_t)); i++) {
677 /*
678 * Try to match the subsystem information first.
679 */
680 if (subvenid && (subvenid == dca_devices[i].dd_vendor_id) &&
681 subsysid && (subsysid == dca_devices[i].dd_device_id)) {
682 dca->dca_model = dca_devices[i].dd_model;
683 dca->dca_devid = dca_devices[i].dd_device_id;
684 break;
685 }
686 /*
687 * Failing that, try the generic vendor and device id.
688 * Even if we find a match, we keep searching anyway,
689 * since we would prefer to find a match based on the
690 * subsystem ids.
691 */
692 if ((venid == dca_devices[i].dd_vendor_id) &&
693 (devid == dca_devices[i].dd_device_id)) {
694 dca->dca_model = dca_devices[i].dd_model;
695 dca->dca_devid = dca_devices[i].dd_device_id;
696 }
697 }
698 /* try and handle an unrecognized device */
699 if (dca->dca_model == NULL) {
700 dca->dca_model = unknowndev;
701 dca_error(dca, "device not recognized, not supported");
702 DBG(dca, DPCI, "i=%d venid=%x devid=%x rev=%d",
703 i, venid, devid, revid);
704 }
705
706 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "description",
707 dca->dca_model) != DDI_SUCCESS) {
708 dca_error(dca, "unable to create description property");
709 return (DDI_FAILURE);
710 }
711
712 DBG(dca, DPCI, "PCI command=0x%x status=%x cachelinesz=%x",
713 pcicomm, pcistat, cachelinesz);
714 DBG(dca, DPCI, "mingnt=0x%x maxlat=0x%x lattmr=0x%x",
715 mingnt, maxlat, lattmr);
716
717 /*
718 * initialize locks, etc.
719 */
720 (void) mutex_init(&dca->dca_intrlock, NULL, MUTEX_DRIVER, ibc);
721
722 /* use RNGSHA1 by default */
723 if (ddi_getprop(DDI_DEV_T_ANY, dip,
724 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rngdirect", 0) == 0) {
725 dca->dca_flags |= DCA_RNGSHA1;
726 }
727
728 /* initialize FMA */
729 dca_fma_init(dca);
730
731 /* initialize some key data structures */
732 if (dca_init(dca) != DDI_SUCCESS) {
733 goto failed;
734 }
735
736 /* initialize kstats */
737 dca_ksinit(dca);
738
739 /* setup access to registers */
740 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&dca->dca_regs,
741 0, 0, &dca_regsattr, &dca->dca_regs_handle) != DDI_SUCCESS) {
742 dca_error(dca, "unable to map registers");
743 goto failed;
744 }
745
746 DBG(dca, DCHATTY, "MCR1 = %x", GETCSR(dca, CSR_MCR1));
747 DBG(dca, DCHATTY, "CONTROL = %x", GETCSR(dca, CSR_DMACTL));
748 DBG(dca, DCHATTY, "STATUS = %x", GETCSR(dca, CSR_DMASTAT));
749 DBG(dca, DCHATTY, "DMAEA = %x", GETCSR(dca, CSR_DMAEA));
750 DBG(dca, DCHATTY, "MCR2 = %x", GETCSR(dca, CSR_MCR2));
751
752 /* reset the chip */
753 if (dca_reset(dca, 0) < 0) {
754 goto failed;
755 }
756
757 /* initialize the chip */
758 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
759 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
760 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
761 goto failed;
762 }
763
764 /* add the interrupt */
765 if (ddi_add_intr(dip, 0, &dca->dca_icookie, NULL, dca_intr,
766 (void *)dca) != DDI_SUCCESS) {
767 DBG(dca, DWARN, "ddi_add_intr failed");
768 goto failed;
769 } else {
770 intr_added = 1;
771 }
772
773 /* enable interrupts on the device */
774 /*
775 * XXX: Note, 5820A1 errata indicates that this may clobber
776 * bits 24 and 23, which affect the speed of the RNG. Since
777 * we always want to run in full-speed mode, this should be
778 * harmless.
779 */
780 if (dca->dca_devid == 0x5825) {
781 /* for 5825 - increase the DMA read size */
782 SETBIT(dca, CSR_DMACTL,
783 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
784 } else {
785 SETBIT(dca, CSR_DMACTL,
786 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
787 }
788 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
789 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
790 goto failed;
791 }
792
793 /* register MCR1 with the crypto framework */
794 /* Be careful not to exceed 32 chars */
795 (void) sprintf(ID, "%s/%d %s",
796 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_SYM);
797 dca_prov_info1.pi_provider_description = ID;
798 dca_prov_info1.pi_provider_dev.pd_hw = dip;
799 dca_prov_info1.pi_provider_handle = dca;
800 if ((ret = crypto_register_provider(&dca_prov_info1,
801 &WORKLIST(dca, MCR1)->dwl_prov)) != CRYPTO_SUCCESS) {
802 cmn_err(CE_WARN,
803 "crypto_register_provider() failed (%d) for MCR1", ret);
804 goto failed;
805 }
806
807 /* register MCR2 with the crypto framework */
808 /* Be careful not to exceed 32 chars */
809 (void) sprintf(ID, "%s/%d %s",
810 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_ASYM);
811 dca_prov_info2.pi_provider_description = ID;
812 dca_prov_info2.pi_provider_dev.pd_hw = dip;
813 dca_prov_info2.pi_provider_handle = dca;
814 if ((ret = crypto_register_provider(&dca_prov_info2,
815 &WORKLIST(dca, MCR2)->dwl_prov)) != CRYPTO_SUCCESS) {
816 cmn_err(CE_WARN,
817 "crypto_register_provider() failed (%d) for MCR2", ret);
818 goto failed;
819 }
820
821 crypto_prov_notify(WORKLIST(dca, MCR1)->dwl_prov,
822 CRYPTO_PROVIDER_READY);
823 crypto_prov_notify(WORKLIST(dca, MCR2)->dwl_prov,
824 CRYPTO_PROVIDER_READY);
825
826 /* Initialize the local random number pool for this instance */
827 if ((ret = dca_random_init(dca)) != CRYPTO_SUCCESS) {
828 goto failed;
829 }
830
831 mutex_enter(&dca->dca_intrlock);
832 dca->dca_jobtid = timeout(dca_jobtimeout, (void *)dca,
833 drv_usectohz(SECOND));
834 mutex_exit(&dca->dca_intrlock);
835
836 ddi_set_driver_private(dip, (caddr_t)dca);
837
838 ddi_report_dev(dip);
839
840 if (ddi_get_devstate(dca->dca_dip) != DDI_DEVSTATE_UP) {
841 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_RESTORED);
842 }
843
844 return (DDI_SUCCESS);
845
846 failed:
847 /* unregister from the crypto framework */
848 if (WORKLIST(dca, MCR1)->dwl_prov != 0) {
849 (void) crypto_unregister_provider(
850 WORKLIST(dca, MCR1)->dwl_prov);
851 }
852 if (WORKLIST(dca, MCR2)->dwl_prov != 0) {
853 (void) crypto_unregister_provider(
854 WORKLIST(dca, MCR2)->dwl_prov);
855 }
856 if (intr_added) {
857 CLRBIT(dca, CSR_DMACTL,
858 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
859 /* unregister intr handler */
860 ddi_remove_intr(dip, 0, dca->dca_icookie);
861 }
862 if (dca->dca_regs_handle) {
863 ddi_regs_map_free(&dca->dca_regs_handle);
864 }
865 if (dca->dca_intrstats) {
866 kstat_delete(dca->dca_intrstats);
867 }
868 if (dca->dca_ksp) {
869 kstat_delete(dca->dca_ksp);
870 }
871 dca_uninit(dca);
872
873 /* finalize FMA */
874 dca_fma_fini(dca);
875
876 mutex_destroy(&dca->dca_intrlock);
877 ddi_soft_state_free(dca_state, instance);
878 return (DDI_FAILURE);
879
880 }
881
882 int
dca_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)883 dca_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
884 {
885 int instance;
886 dca_t *dca;
887 timeout_id_t tid;
888
889 instance = ddi_get_instance(dip);
890
891 DBG(NULL, DMOD, "dca: in dca_detach() for %d", instance);
892
893 switch (cmd) {
894 case DDI_SUSPEND:
895 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
896 dca_diperror(dip, "no soft state in detach");
897 return (DDI_FAILURE);
898 }
899 /* assumption: we won't be DDI_DETACHed until we return */
900 return (dca_suspend(dca));
901
902 case DDI_DETACH:
903 break;
904 default:
905 return (DDI_FAILURE);
906 }
907
908 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
909 dca_diperror(dip, "no soft state in detach");
910 return (DDI_FAILURE);
911 }
912
913 /*
914 * Unregister from kCF.
915 * This needs to be done at the beginning of detach.
916 */
917 if (WORKLIST(dca, MCR1)->dwl_prov != 0) {
918 if (crypto_unregister_provider(
919 WORKLIST(dca, MCR1)->dwl_prov) != CRYPTO_SUCCESS) {
920 dca_error(dca, "unable to unregister MCR1 from kcf");
921 return (DDI_FAILURE);
922 }
923 }
924
925 if (WORKLIST(dca, MCR2)->dwl_prov != 0) {
926 if (crypto_unregister_provider(
927 WORKLIST(dca, MCR2)->dwl_prov) != CRYPTO_SUCCESS) {
928 dca_error(dca, "unable to unregister MCR2 from kcf");
929 return (DDI_FAILURE);
930 }
931 }
932
933 /*
934 * Cleanup the private context list. Once the
935 * crypto_unregister_provider returns, it is safe to do so.
936 */
937 dca_free_context_list(dca);
938
939 /* Cleanup the local random number pool */
940 dca_random_fini(dca);
941
942 /* send any jobs in the waitq back to kCF */
943 dca_rejectjobs(dca);
944
945 /* untimeout the timeouts */
946 mutex_enter(&dca->dca_intrlock);
947 tid = dca->dca_jobtid;
948 dca->dca_jobtid = 0;
949 mutex_exit(&dca->dca_intrlock);
950 if (tid) {
951 (void) untimeout(tid);
952 }
953
954 /* disable device interrupts */
955 CLRBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
956
957 /* unregister interrupt handlers */
958 ddi_remove_intr(dip, 0, dca->dca_icookie);
959
960 /* release our regs handle */
961 ddi_regs_map_free(&dca->dca_regs_handle);
962
963 /* toss out kstats */
964 if (dca->dca_intrstats) {
965 kstat_delete(dca->dca_intrstats);
966 }
967 if (dca->dca_ksp) {
968 kstat_delete(dca->dca_ksp);
969 }
970
971 mutex_destroy(&dca->dca_intrlock);
972 dca_uninit(dca);
973
974 /* finalize FMA */
975 dca_fma_fini(dca);
976
977 ddi_soft_state_free(dca_state, instance);
978
979 return (DDI_SUCCESS);
980 }
981
982 int
dca_resume(dca_t * dca)983 dca_resume(dca_t *dca)
984 {
985 ddi_acc_handle_t pci;
986
987 if (pci_config_setup(dca->dca_dip, &pci) != DDI_SUCCESS) {
988 dca_error(dca, "unable to setup PCI config handle");
989 return (DDI_FAILURE);
990 }
991
992 /*
993 * Reprogram registers in PCI configuration space.
994 */
995
996 /* Broadcom-specific timers -- we disable them. */
997 pci_config_put8(pci, PCI_TRDYTO, 0);
998 pci_config_put8(pci, PCI_RETRIES, 0);
999
1000 /* initialize PCI access settings */
1001 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
1002 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
1003
1004 /* set up our PCI latency timer */
1005 pci_config_put8(pci, PCI_LATTMR, 0x40);
1006
1007 pci_config_teardown(&pci);
1008
1009 if (dca_reset(dca, 0) < 0) {
1010 dca_error(dca, "unable to reset device during resume");
1011 return (DDI_FAILURE);
1012 }
1013
1014 /*
1015 * Now restore the card-specific CSRs.
1016 */
1017
1018 /* restore endianness settings */
1019 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
1020 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1021 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1022 return (DDI_FAILURE);
1023
1024 /* restore interrupt enables */
1025 if (dca->dca_devid == 0x5825) {
1026 /* for 5825 set 256 byte read size to improve performance */
1027 SETBIT(dca, CSR_DMACTL,
1028 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
1029 } else {
1030 SETBIT(dca, CSR_DMACTL,
1031 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
1032 }
1033 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1034 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1035 return (DDI_FAILURE);
1036
1037 /* resume scheduling jobs on the device */
1038 dca_undrain(dca);
1039
1040 return (DDI_SUCCESS);
1041 }
1042
1043 int
dca_suspend(dca_t * dca)1044 dca_suspend(dca_t *dca)
1045 {
1046 if ((dca_drain(dca)) != 0) {
1047 return (DDI_FAILURE);
1048 }
1049 if (dca_reset(dca, 0) < 0) {
1050 dca_error(dca, "unable to reset device during suspend");
1051 return (DDI_FAILURE);
1052 }
1053 return (DDI_SUCCESS);
1054 }
1055
1056 /*
1057 * Hardware access stuff.
1058 */
1059 int
dca_reset(dca_t * dca,int failreset)1060 dca_reset(dca_t *dca, int failreset)
1061 {
1062 int i;
1063
1064 if (dca->dca_regs_handle == NULL) {
1065 return (-1);
1066 }
1067
1068 PUTCSR(dca, CSR_DMACTL, DMACTL_RESET);
1069 if (!failreset) {
1070 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1071 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1072 return (-1);
1073 }
1074
1075 /* now wait for a reset */
1076 for (i = 1; i < 100; i++) {
1077 uint32_t dmactl;
1078 drv_usecwait(100);
1079 dmactl = GETCSR(dca, CSR_DMACTL);
1080 if (!failreset) {
1081 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1082 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1083 return (-1);
1084 }
1085 if ((dmactl & DMACTL_RESET) == 0) {
1086 DBG(dca, DCHATTY, "reset in %d usec", i * 100);
1087 return (0);
1088 }
1089 }
1090 if (!failreset) {
1091 dca_failure(dca, DDI_DEVICE_FAULT,
1092 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1093 "timeout waiting for reset after %d usec", i * 100);
1094 }
1095 return (-1);
1096 }
1097
1098 int
dca_initworklist(dca_t * dca,dca_worklist_t * wlp)1099 dca_initworklist(dca_t *dca, dca_worklist_t *wlp)
1100 {
1101 int i;
1102 int reqprealloc = wlp->dwl_hiwater + (MAXWORK * MAXREQSPERMCR);
1103
1104 /*
1105 * Set up work queue.
1106 */
1107 mutex_init(&wlp->dwl_lock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1108 mutex_init(&wlp->dwl_freereqslock, NULL, MUTEX_DRIVER,
1109 dca->dca_icookie);
1110 mutex_init(&wlp->dwl_freelock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1111 cv_init(&wlp->dwl_cv, NULL, CV_DRIVER, NULL);
1112
1113 mutex_enter(&wlp->dwl_lock);
1114
1115 dca_initq(&wlp->dwl_freereqs);
1116 dca_initq(&wlp->dwl_waitq);
1117 dca_initq(&wlp->dwl_freework);
1118 dca_initq(&wlp->dwl_runq);
1119
1120 for (i = 0; i < MAXWORK; i++) {
1121 dca_work_t *workp;
1122
1123 if ((workp = dca_newwork(dca)) == NULL) {
1124 dca_error(dca, "unable to allocate work");
1125 mutex_exit(&wlp->dwl_lock);
1126 return (DDI_FAILURE);
1127 }
1128 workp->dw_wlp = wlp;
1129 dca_freework(workp);
1130 }
1131 mutex_exit(&wlp->dwl_lock);
1132
1133 for (i = 0; i < reqprealloc; i++) {
1134 dca_request_t *reqp;
1135
1136 if ((reqp = dca_newreq(dca)) == NULL) {
1137 dca_error(dca, "unable to allocate request");
1138 return (DDI_FAILURE);
1139 }
1140 reqp->dr_dca = dca;
1141 reqp->dr_wlp = wlp;
1142 dca_freereq(reqp);
1143 }
1144 return (DDI_SUCCESS);
1145 }
1146
1147 int
dca_init(dca_t * dca)1148 dca_init(dca_t *dca)
1149 {
1150 dca_worklist_t *wlp;
1151
1152 /* Initialize the private context list and the corresponding lock. */
1153 mutex_init(&dca->dca_ctx_list_lock, NULL, MUTEX_DRIVER, NULL);
1154 dca_initq(&dca->dca_ctx_list);
1155
1156 /*
1157 * MCR1 algorithms.
1158 */
1159 wlp = WORKLIST(dca, MCR1);
1160 (void) sprintf(wlp->dwl_name, "dca%d:mcr1",
1161 ddi_get_instance(dca->dca_dip));
1162 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1163 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1164 "mcr1_lowater", MCR1LOWATER);
1165 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1166 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1167 "mcr1_hiwater", MCR1HIWATER);
1168 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1169 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1170 "mcr1_maxreqs", MCR1MAXREQS), MAXREQSPERMCR);
1171 wlp->dwl_dca = dca;
1172 wlp->dwl_mcr = MCR1;
1173 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1174 return (DDI_FAILURE);
1175 }
1176
1177 /*
1178 * MCR2 algorithms.
1179 */
1180 wlp = WORKLIST(dca, MCR2);
1181 (void) sprintf(wlp->dwl_name, "dca%d:mcr2",
1182 ddi_get_instance(dca->dca_dip));
1183 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1184 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1185 "mcr2_lowater", MCR2LOWATER);
1186 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1187 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1188 "mcr2_hiwater", MCR2HIWATER);
1189 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1190 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1191 "mcr2_maxreqs", MCR2MAXREQS), MAXREQSPERMCR);
1192 wlp->dwl_dca = dca;
1193 wlp->dwl_mcr = MCR2;
1194 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1195 return (DDI_FAILURE);
1196 }
1197 return (DDI_SUCCESS);
1198 }
1199
1200 /*
1201 * Uninitialize worklists. This routine should only be called when no
1202 * active jobs (hence DMA mappings) exist. One way to ensure this is
1203 * to unregister from kCF before calling this routine. (This is done
1204 * e.g. in detach(9e).)
1205 */
1206 void
dca_uninit(dca_t * dca)1207 dca_uninit(dca_t *dca)
1208 {
1209 int mcr;
1210
1211 mutex_destroy(&dca->dca_ctx_list_lock);
1212
1213 for (mcr = MCR1; mcr <= MCR2; mcr++) {
1214 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1215 dca_work_t *workp;
1216 dca_request_t *reqp;
1217
1218 if (dca->dca_regs_handle == NULL) {
1219 continue;
1220 }
1221
1222 mutex_enter(&wlp->dwl_lock);
1223 while ((workp = dca_getwork(dca, mcr)) != NULL) {
1224 dca_destroywork(workp);
1225 }
1226 mutex_exit(&wlp->dwl_lock);
1227 while ((reqp = dca_getreq(dca, mcr, 0)) != NULL) {
1228 dca_destroyreq(reqp);
1229 }
1230
1231 mutex_destroy(&wlp->dwl_lock);
1232 mutex_destroy(&wlp->dwl_freereqslock);
1233 mutex_destroy(&wlp->dwl_freelock);
1234 cv_destroy(&wlp->dwl_cv);
1235 wlp->dwl_prov = 0;
1236 }
1237 }
1238
1239 static void
dca_enlist2(dca_listnode_t * q,dca_listnode_t * node,kmutex_t * lock)1240 dca_enlist2(dca_listnode_t *q, dca_listnode_t *node, kmutex_t *lock)
1241 {
1242 if (!q || !node)
1243 return;
1244
1245 mutex_enter(lock);
1246 node->dl_next2 = q;
1247 node->dl_prev2 = q->dl_prev2;
1248 node->dl_next2->dl_prev2 = node;
1249 node->dl_prev2->dl_next2 = node;
1250 mutex_exit(lock);
1251 }
1252
1253 static void
dca_rmlist2(dca_listnode_t * node,kmutex_t * lock)1254 dca_rmlist2(dca_listnode_t *node, kmutex_t *lock)
1255 {
1256 if (!node)
1257 return;
1258
1259 mutex_enter(lock);
1260 node->dl_next2->dl_prev2 = node->dl_prev2;
1261 node->dl_prev2->dl_next2 = node->dl_next2;
1262 node->dl_next2 = NULL;
1263 node->dl_prev2 = NULL;
1264 mutex_exit(lock);
1265 }
1266
1267 static dca_listnode_t *
dca_delist2(dca_listnode_t * q,kmutex_t * lock)1268 dca_delist2(dca_listnode_t *q, kmutex_t *lock)
1269 {
1270 dca_listnode_t *node;
1271
1272 mutex_enter(lock);
1273 if ((node = q->dl_next2) == q) {
1274 mutex_exit(lock);
1275 return (NULL);
1276 }
1277
1278 node->dl_next2->dl_prev2 = node->dl_prev2;
1279 node->dl_prev2->dl_next2 = node->dl_next2;
1280 node->dl_next2 = NULL;
1281 node->dl_prev2 = NULL;
1282 mutex_exit(lock);
1283
1284 return (node);
1285 }
1286
1287 void
dca_initq(dca_listnode_t * q)1288 dca_initq(dca_listnode_t *q)
1289 {
1290 q->dl_next = q;
1291 q->dl_prev = q;
1292 q->dl_next2 = q;
1293 q->dl_prev2 = q;
1294 }
1295
1296 void
dca_enqueue(dca_listnode_t * q,dca_listnode_t * node)1297 dca_enqueue(dca_listnode_t *q, dca_listnode_t *node)
1298 {
1299 /*
1300 * Enqueue submits at the "tail" of the list, i.e. just
1301 * behind the sentinel.
1302 */
1303 node->dl_next = q;
1304 node->dl_prev = q->dl_prev;
1305 node->dl_next->dl_prev = node;
1306 node->dl_prev->dl_next = node;
1307 }
1308
1309 void
dca_rmqueue(dca_listnode_t * node)1310 dca_rmqueue(dca_listnode_t *node)
1311 {
1312 node->dl_next->dl_prev = node->dl_prev;
1313 node->dl_prev->dl_next = node->dl_next;
1314 node->dl_next = NULL;
1315 node->dl_prev = NULL;
1316 }
1317
1318 dca_listnode_t *
dca_dequeue(dca_listnode_t * q)1319 dca_dequeue(dca_listnode_t *q)
1320 {
1321 dca_listnode_t *node;
1322 /*
1323 * Dequeue takes from the "head" of the list, i.e. just after
1324 * the sentinel.
1325 */
1326 if ((node = q->dl_next) == q) {
1327 /* queue is empty */
1328 return (NULL);
1329 }
1330 dca_rmqueue(node);
1331 return (node);
1332 }
1333
1334 /* this is the opposite of dequeue, it takes things off in LIFO order */
1335 dca_listnode_t *
dca_unqueue(dca_listnode_t * q)1336 dca_unqueue(dca_listnode_t *q)
1337 {
1338 dca_listnode_t *node;
1339 /*
1340 * unqueue takes from the "tail" of the list, i.e. just before
1341 * the sentinel.
1342 */
1343 if ((node = q->dl_prev) == q) {
1344 /* queue is empty */
1345 return (NULL);
1346 }
1347 dca_rmqueue(node);
1348 return (node);
1349 }
1350
1351 dca_listnode_t *
dca_peekqueue(dca_listnode_t * q)1352 dca_peekqueue(dca_listnode_t *q)
1353 {
1354 dca_listnode_t *node;
1355
1356 if ((node = q->dl_next) == q) {
1357 return (NULL);
1358 } else {
1359 return (node);
1360 }
1361 }
1362
1363 /*
1364 * Interrupt service routine.
1365 */
1366 uint_t
dca_intr(char * arg)1367 dca_intr(char *arg)
1368 {
1369 dca_t *dca = (dca_t *)arg;
1370 uint32_t status;
1371
1372 mutex_enter(&dca->dca_intrlock);
1373 status = GETCSR(dca, CSR_DMASTAT);
1374 PUTCSR(dca, CSR_DMASTAT, status & DMASTAT_INTERRUPTS);
1375 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1376 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
1377 mutex_exit(&dca->dca_intrlock);
1378 return ((uint_t)DDI_FAILURE);
1379 }
1380
1381 DBG(dca, DINTR, "interrupted, status = 0x%x!", status);
1382
1383 if ((status & DMASTAT_INTERRUPTS) == 0) {
1384 /* increment spurious interrupt kstat */
1385 if (dca->dca_intrstats) {
1386 KIOIP(dca)->intrs[KSTAT_INTR_SPURIOUS]++;
1387 }
1388 mutex_exit(&dca->dca_intrlock);
1389 return (DDI_INTR_UNCLAIMED);
1390 }
1391
1392 if (dca->dca_intrstats) {
1393 KIOIP(dca)->intrs[KSTAT_INTR_HARD]++;
1394 }
1395 if (status & DMASTAT_MCR1INT) {
1396 DBG(dca, DINTR, "MCR1 interrupted");
1397 mutex_enter(&(WORKLIST(dca, MCR1)->dwl_lock));
1398 dca_schedule(dca, MCR1);
1399 dca_reclaim(dca, MCR1);
1400 mutex_exit(&(WORKLIST(dca, MCR1)->dwl_lock));
1401 }
1402
1403 if (status & DMASTAT_MCR2INT) {
1404 DBG(dca, DINTR, "MCR2 interrupted");
1405 mutex_enter(&(WORKLIST(dca, MCR2)->dwl_lock));
1406 dca_schedule(dca, MCR2);
1407 dca_reclaim(dca, MCR2);
1408 mutex_exit(&(WORKLIST(dca, MCR2)->dwl_lock));
1409 }
1410
1411 if (status & DMASTAT_ERRINT) {
1412 uint32_t erraddr;
1413 erraddr = GETCSR(dca, CSR_DMAEA);
1414 mutex_exit(&dca->dca_intrlock);
1415
1416 /*
1417 * bit 1 of the error address indicates failure during
1418 * read if set, during write otherwise.
1419 */
1420 dca_failure(dca, DDI_DEVICE_FAULT,
1421 DCA_FM_ECLASS_HW_DEVICE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1422 "DMA master access error %s address 0x%x",
1423 erraddr & 0x1 ? "reading" : "writing", erraddr & ~1);
1424 return (DDI_INTR_CLAIMED);
1425 }
1426
1427 mutex_exit(&dca->dca_intrlock);
1428
1429 return (DDI_INTR_CLAIMED);
1430 }
1431
1432 /*
1433 * Reverse a string of bytes from s1 into s2. The reversal happens
1434 * from the tail of s1. If len1 < len2, then null bytes will be
1435 * padded to the end of s2. If len2 < len1, then (presumably null)
1436 * bytes will be dropped from the start of s1.
1437 *
1438 * The rationale here is that when s1 (source) is shorter, then we
1439 * are reversing from big-endian ordering, into device ordering, and
1440 * want to add some extra nulls to the tail (MSB) side of the device.
1441 *
1442 * Similarly, when s2 (dest) is shorter, then we are truncating what
1443 * are presumably null MSB bits from the device.
1444 *
1445 * There is an expectation when reversing from the device back into
1446 * big-endian, that the number of bytes to reverse and the target size
1447 * will match, and no truncation or padding occurs.
1448 */
1449 void
dca_reverse(void * s1,void * s2,int len1,int len2)1450 dca_reverse(void *s1, void *s2, int len1, int len2)
1451 {
1452 caddr_t src, dst;
1453
1454 if (len1 == 0) {
1455 if (len2) {
1456 bzero(s2, len2);
1457 }
1458 return;
1459 }
1460 src = (caddr_t)s1 + len1 - 1;
1461 dst = s2;
1462 while ((src >= (caddr_t)s1) && (len2)) {
1463 *dst++ = *src--;
1464 len2--;
1465 }
1466 while (len2 > 0) {
1467 *dst++ = 0;
1468 len2--;
1469 }
1470 }
1471
1472 uint16_t
dca_padfull(int num)1473 dca_padfull(int num)
1474 {
1475 if (num <= 512) {
1476 return (BITS2BYTES(512));
1477 }
1478 if (num <= 768) {
1479 return (BITS2BYTES(768));
1480 }
1481 if (num <= 1024) {
1482 return (BITS2BYTES(1024));
1483 }
1484 if (num <= 1536) {
1485 return (BITS2BYTES(1536));
1486 }
1487 if (num <= 2048) {
1488 return (BITS2BYTES(2048));
1489 }
1490 return (0);
1491 }
1492
1493 uint16_t
dca_padhalf(int num)1494 dca_padhalf(int num)
1495 {
1496 if (num <= 256) {
1497 return (BITS2BYTES(256));
1498 }
1499 if (num <= 384) {
1500 return (BITS2BYTES(384));
1501 }
1502 if (num <= 512) {
1503 return (BITS2BYTES(512));
1504 }
1505 if (num <= 768) {
1506 return (BITS2BYTES(768));
1507 }
1508 if (num <= 1024) {
1509 return (BITS2BYTES(1024));
1510 }
1511 return (0);
1512 }
1513
1514 dca_work_t *
dca_newwork(dca_t * dca)1515 dca_newwork(dca_t *dca)
1516 {
1517 dca_work_t *workp;
1518 size_t size;
1519 ddi_dma_cookie_t c;
1520 unsigned nc;
1521 int rv;
1522
1523 workp = kmem_zalloc(sizeof (dca_work_t), KM_SLEEP);
1524
1525 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1526 DDI_DMA_SLEEP, NULL, &workp->dw_mcr_dmah);
1527 if (rv != 0) {
1528 dca_error(dca, "unable to alloc MCR DMA handle");
1529 dca_destroywork(workp);
1530 return (NULL);
1531 }
1532
1533 rv = ddi_dma_mem_alloc(workp->dw_mcr_dmah,
1534 ROUNDUP(MCR_SIZE, dca->dca_pagesize),
1535 &dca_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1536 &workp->dw_mcr_kaddr, &size, &workp->dw_mcr_acch);
1537 if (rv != 0) {
1538 dca_error(dca, "unable to alloc MCR DMA memory");
1539 dca_destroywork(workp);
1540 return (NULL);
1541 }
1542
1543 rv = ddi_dma_addr_bind_handle(workp->dw_mcr_dmah, NULL,
1544 workp->dw_mcr_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR,
1545 DDI_DMA_SLEEP, NULL, &c, &nc);
1546 if (rv != DDI_DMA_MAPPED) {
1547 dca_error(dca, "unable to map MCR DMA memory");
1548 dca_destroywork(workp);
1549 return (NULL);
1550 }
1551
1552 workp->dw_mcr_paddr = c.dmac_address;
1553 return (workp);
1554 }
1555
1556 void
dca_destroywork(dca_work_t * workp)1557 dca_destroywork(dca_work_t *workp)
1558 {
1559 if (workp->dw_mcr_paddr) {
1560 (void) ddi_dma_unbind_handle(workp->dw_mcr_dmah);
1561 }
1562 if (workp->dw_mcr_acch) {
1563 ddi_dma_mem_free(&workp->dw_mcr_acch);
1564 }
1565 if (workp->dw_mcr_dmah) {
1566 ddi_dma_free_handle(&workp->dw_mcr_dmah);
1567 }
1568 kmem_free(workp, sizeof (dca_work_t));
1569 }
1570
1571 dca_request_t *
dca_newreq(dca_t * dca)1572 dca_newreq(dca_t *dca)
1573 {
1574 dca_request_t *reqp;
1575 size_t size;
1576 ddi_dma_cookie_t c;
1577 unsigned nc;
1578 int rv;
1579 int n_chain = 0;
1580
1581 size = (DESC_SIZE * MAXFRAGS) + CTX_MAXLENGTH;
1582
1583 reqp = kmem_zalloc(sizeof (dca_request_t), KM_SLEEP);
1584
1585 reqp->dr_dca = dca;
1586
1587 /*
1588 * Setup the DMA region for the context and descriptors.
1589 */
1590 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, DDI_DMA_SLEEP,
1591 NULL, &reqp->dr_ctx_dmah);
1592 if (rv != DDI_SUCCESS) {
1593 dca_error(dca, "failure allocating request DMA handle");
1594 dca_destroyreq(reqp);
1595 return (NULL);
1596 }
1597
1598 /* for driver hardening, allocate in whole pages */
1599 rv = ddi_dma_mem_alloc(reqp->dr_ctx_dmah,
1600 ROUNDUP(size, dca->dca_pagesize), &dca_devattr, DDI_DMA_CONSISTENT,
1601 DDI_DMA_SLEEP, NULL, &reqp->dr_ctx_kaddr, &size,
1602 &reqp->dr_ctx_acch);
1603 if (rv != DDI_SUCCESS) {
1604 dca_error(dca, "unable to alloc request DMA memory");
1605 dca_destroyreq(reqp);
1606 return (NULL);
1607 }
1608
1609 rv = ddi_dma_addr_bind_handle(reqp->dr_ctx_dmah, NULL,
1610 reqp->dr_ctx_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_WRITE,
1611 DDI_DMA_SLEEP, 0, &c, &nc);
1612 if (rv != DDI_DMA_MAPPED) {
1613 dca_error(dca, "failed binding request DMA handle");
1614 dca_destroyreq(reqp);
1615 return (NULL);
1616 }
1617 reqp->dr_ctx_paddr = c.dmac_address;
1618
1619 reqp->dr_dma_size = size;
1620
1621 /*
1622 * Set up the dma for our scratch/shared buffers.
1623 */
1624 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1625 DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_dmah);
1626 if (rv != DDI_SUCCESS) {
1627 dca_error(dca, "failure allocating ibuf DMA handle");
1628 dca_destroyreq(reqp);
1629 return (NULL);
1630 }
1631 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1632 DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_dmah);
1633 if (rv != DDI_SUCCESS) {
1634 dca_error(dca, "failure allocating obuf DMA handle");
1635 dca_destroyreq(reqp);
1636 return (NULL);
1637 }
1638
1639 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1640 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_in_dmah);
1641 if (rv != DDI_SUCCESS) {
1642 dca_error(dca, "failure allocating chain_in DMA handle");
1643 dca_destroyreq(reqp);
1644 return (NULL);
1645 }
1646
1647 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1648 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_out_dmah);
1649 if (rv != DDI_SUCCESS) {
1650 dca_error(dca, "failure allocating chain_out DMA handle");
1651 dca_destroyreq(reqp);
1652 return (NULL);
1653 }
1654
1655 /*
1656 * for driver hardening, allocate in whole pages.
1657 */
1658 size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
1659 /*
1660 * We could kmem_alloc for Sparc too. However, it gives worse
1661 * performance when transferring more than one page data. For example,
1662 * using 4 threads and 12032 byte data and 3DES on 900MHZ Sparc system,
1663 * kmem_alloc uses 80% CPU and ddi_dma_mem_alloc uses 50% CPU for
1664 * the same throughput.
1665 */
1666 rv = ddi_dma_mem_alloc(reqp->dr_ibuf_dmah,
1667 size, &dca_bufattr,
1668 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_kaddr,
1669 &size, &reqp->dr_ibuf_acch);
1670 if (rv != DDI_SUCCESS) {
1671 dca_error(dca, "unable to alloc request DMA memory");
1672 dca_destroyreq(reqp);
1673 return (NULL);
1674 }
1675
1676 rv = ddi_dma_mem_alloc(reqp->dr_obuf_dmah,
1677 size, &dca_bufattr,
1678 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_kaddr,
1679 &size, &reqp->dr_obuf_acch);
1680 if (rv != DDI_SUCCESS) {
1681 dca_error(dca, "unable to alloc request DMA memory");
1682 dca_destroyreq(reqp);
1683 return (NULL);
1684 }
1685
1686 /* Skip the used portion in the context page */
1687 reqp->dr_offset = CTX_MAXLENGTH;
1688 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1689 reqp->dr_ibuf_kaddr, reqp->dr_ibuf_dmah,
1690 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1691 &reqp->dr_ibuf_head, &n_chain)) != DDI_SUCCESS) {
1692 (void) dca_destroyreq(reqp);
1693 return (NULL);
1694 }
1695 reqp->dr_ibuf_paddr = reqp->dr_ibuf_head.dc_buffer_paddr;
1696 /* Skip the space used by the input buffer */
1697 reqp->dr_offset += DESC_SIZE * n_chain;
1698
1699 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1700 reqp->dr_obuf_kaddr, reqp->dr_obuf_dmah,
1701 DDI_DMA_READ | DDI_DMA_STREAMING,
1702 &reqp->dr_obuf_head, &n_chain)) != DDI_SUCCESS) {
1703 (void) dca_destroyreq(reqp);
1704 return (NULL);
1705 }
1706 reqp->dr_obuf_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
1707 /* Skip the space used by the output buffer */
1708 reqp->dr_offset += DESC_SIZE * n_chain;
1709
1710 DBG(dca, DCHATTY, "CTX is 0x%p, phys 0x%x, len %d",
1711 reqp->dr_ctx_kaddr, reqp->dr_ctx_paddr, CTX_MAXLENGTH);
1712 return (reqp);
1713 }
1714
1715 void
dca_destroyreq(dca_request_t * reqp)1716 dca_destroyreq(dca_request_t *reqp)
1717 {
1718
1719 /*
1720 * Clean up DMA for the context structure.
1721 */
1722 if (reqp->dr_ctx_paddr) {
1723 (void) ddi_dma_unbind_handle(reqp->dr_ctx_dmah);
1724 }
1725
1726 if (reqp->dr_ctx_acch) {
1727 ddi_dma_mem_free(&reqp->dr_ctx_acch);
1728 }
1729
1730 if (reqp->dr_ctx_dmah) {
1731 ddi_dma_free_handle(&reqp->dr_ctx_dmah);
1732 }
1733
1734 /*
1735 * Clean up DMA for the scratch buffer.
1736 */
1737 if (reqp->dr_ibuf_paddr) {
1738 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
1739 }
1740 if (reqp->dr_obuf_paddr) {
1741 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
1742 }
1743
1744 if (reqp->dr_ibuf_acch) {
1745 ddi_dma_mem_free(&reqp->dr_ibuf_acch);
1746 }
1747 if (reqp->dr_obuf_acch) {
1748 ddi_dma_mem_free(&reqp->dr_obuf_acch);
1749 }
1750
1751 if (reqp->dr_ibuf_dmah) {
1752 ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
1753 }
1754 if (reqp->dr_obuf_dmah) {
1755 ddi_dma_free_handle(&reqp->dr_obuf_dmah);
1756 }
1757 /*
1758 * These two DMA handles should have been unbinded in
1759 * dca_unbindchains() function
1760 */
1761 if (reqp->dr_chain_in_dmah) {
1762 ddi_dma_free_handle(&reqp->dr_chain_in_dmah);
1763 }
1764 if (reqp->dr_chain_out_dmah) {
1765 ddi_dma_free_handle(&reqp->dr_chain_out_dmah);
1766 }
1767
1768 kmem_free(reqp, sizeof (dca_request_t));
1769 }
1770
1771 dca_work_t *
dca_getwork(dca_t * dca,int mcr)1772 dca_getwork(dca_t *dca, int mcr)
1773 {
1774 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1775 dca_work_t *workp;
1776
1777 mutex_enter(&wlp->dwl_freelock);
1778 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_freework);
1779 mutex_exit(&wlp->dwl_freelock);
1780 if (workp) {
1781 int nreqs;
1782 bzero(workp->dw_mcr_kaddr, 8);
1783
1784 /* clear out old requests */
1785 for (nreqs = 0; nreqs < MAXREQSPERMCR; nreqs++) {
1786 workp->dw_reqs[nreqs] = NULL;
1787 }
1788 }
1789 return (workp);
1790 }
1791
1792 void
dca_freework(dca_work_t * workp)1793 dca_freework(dca_work_t *workp)
1794 {
1795 mutex_enter(&workp->dw_wlp->dwl_freelock);
1796 dca_enqueue(&workp->dw_wlp->dwl_freework, (dca_listnode_t *)workp);
1797 mutex_exit(&workp->dw_wlp->dwl_freelock);
1798 }
1799
1800 dca_request_t *
dca_getreq(dca_t * dca,int mcr,int tryhard)1801 dca_getreq(dca_t *dca, int mcr, int tryhard)
1802 {
1803 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1804 dca_request_t *reqp;
1805
1806 mutex_enter(&wlp->dwl_freereqslock);
1807 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_freereqs);
1808 mutex_exit(&wlp->dwl_freereqslock);
1809 if (reqp) {
1810 reqp->dr_flags = 0;
1811 reqp->dr_callback = NULL;
1812 } else if (tryhard) {
1813 /*
1814 * failed to get a free one, try an allocation, the hard way.
1815 * XXX: Kstat desired here.
1816 */
1817 if ((reqp = dca_newreq(dca)) != NULL) {
1818 reqp->dr_wlp = wlp;
1819 reqp->dr_dca = dca;
1820 reqp->dr_flags = 0;
1821 reqp->dr_callback = NULL;
1822 }
1823 }
1824 return (reqp);
1825 }
1826
1827 void
dca_freereq(dca_request_t * reqp)1828 dca_freereq(dca_request_t *reqp)
1829 {
1830 reqp->dr_kcf_req = NULL;
1831 if (!(reqp->dr_flags & DR_NOCACHE)) {
1832 mutex_enter(&reqp->dr_wlp->dwl_freereqslock);
1833 dca_enqueue(&reqp->dr_wlp->dwl_freereqs,
1834 (dca_listnode_t *)reqp);
1835 mutex_exit(&reqp->dr_wlp->dwl_freereqslock);
1836 }
1837 }
1838
1839 /*
1840 * Binds user buffers to DMA handles dynamically. On Sparc, a user buffer
1841 * is mapped to a single physical address. On x86, a user buffer is mapped
1842 * to multiple physical addresses. These physical addresses are chained
1843 * using the method specified in Broadcom BCM5820 specification.
1844 */
1845 int
dca_bindchains(dca_request_t * reqp,size_t incnt,size_t outcnt)1846 dca_bindchains(dca_request_t *reqp, size_t incnt, size_t outcnt)
1847 {
1848 int rv;
1849 caddr_t kaddr;
1850 uint_t flags;
1851 int n_chain = 0;
1852
1853 if (reqp->dr_flags & DR_INPLACE) {
1854 flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
1855 } else {
1856 flags = DDI_DMA_WRITE | DDI_DMA_STREAMING;
1857 }
1858
1859 /* first the input */
1860 if (incnt) {
1861 if ((kaddr = dca_bufdaddr(reqp->dr_in)) == NULL) {
1862 DBG(NULL, DWARN, "unrecognised crypto data format");
1863 return (DDI_FAILURE);
1864 }
1865 if ((rv = dca_bindchains_one(reqp, incnt, reqp->dr_offset,
1866 kaddr, reqp->dr_chain_in_dmah, flags,
1867 &reqp->dr_chain_in_head, &n_chain)) != DDI_SUCCESS) {
1868 (void) dca_unbindchains(reqp);
1869 return (rv);
1870 }
1871
1872 /*
1873 * The offset and length are altered by the calling routine
1874 * reqp->dr_in->cd_offset += incnt;
1875 * reqp->dr_in->cd_length -= incnt;
1876 */
1877 /* Save the first one in the chain for MCR */
1878 reqp->dr_in_paddr = reqp->dr_chain_in_head.dc_buffer_paddr;
1879 reqp->dr_in_next = reqp->dr_chain_in_head.dc_next_paddr;
1880 reqp->dr_in_len = reqp->dr_chain_in_head.dc_buffer_length;
1881 } else {
1882 reqp->dr_in_paddr = 0;
1883 reqp->dr_in_next = 0;
1884 reqp->dr_in_len = 0;
1885 }
1886
1887 if (reqp->dr_flags & DR_INPLACE) {
1888 reqp->dr_out_paddr = reqp->dr_in_paddr;
1889 reqp->dr_out_len = reqp->dr_in_len;
1890 reqp->dr_out_next = reqp->dr_in_next;
1891 return (DDI_SUCCESS);
1892 }
1893
1894 /* then the output */
1895 if (outcnt) {
1896 flags = DDI_DMA_READ | DDI_DMA_STREAMING;
1897 if ((kaddr = dca_bufdaddr_out(reqp->dr_out)) == NULL) {
1898 DBG(NULL, DWARN, "unrecognised crypto data format");
1899 (void) dca_unbindchains(reqp);
1900 return (DDI_FAILURE);
1901 }
1902 rv = dca_bindchains_one(reqp, outcnt, reqp->dr_offset +
1903 n_chain * DESC_SIZE, kaddr, reqp->dr_chain_out_dmah,
1904 flags, &reqp->dr_chain_out_head, &n_chain);
1905 if (rv != DDI_SUCCESS) {
1906 (void) dca_unbindchains(reqp);
1907 return (DDI_FAILURE);
1908 }
1909
1910 /* Save the first one in the chain for MCR */
1911 reqp->dr_out_paddr = reqp->dr_chain_out_head.dc_buffer_paddr;
1912 reqp->dr_out_next = reqp->dr_chain_out_head.dc_next_paddr;
1913 reqp->dr_out_len = reqp->dr_chain_out_head.dc_buffer_length;
1914 } else {
1915 reqp->dr_out_paddr = 0;
1916 reqp->dr_out_next = 0;
1917 reqp->dr_out_len = 0;
1918 }
1919
1920 return (DDI_SUCCESS);
1921 }
1922
1923 /*
1924 * Unbind the user buffers from the DMA handles.
1925 */
1926 int
dca_unbindchains(dca_request_t * reqp)1927 dca_unbindchains(dca_request_t *reqp)
1928 {
1929 int rv = DDI_SUCCESS;
1930 int rv1 = DDI_SUCCESS;
1931
1932 /* Clear the input chain */
1933 if (reqp->dr_chain_in_head.dc_buffer_paddr != 0) {
1934 (void) ddi_dma_unbind_handle(reqp->dr_chain_in_dmah);
1935 reqp->dr_chain_in_head.dc_buffer_paddr = 0;
1936 }
1937
1938 if (reqp->dr_flags & DR_INPLACE) {
1939 return (rv);
1940 }
1941
1942 /* Clear the output chain */
1943 if (reqp->dr_chain_out_head.dc_buffer_paddr != 0) {
1944 (void) ddi_dma_unbind_handle(reqp->dr_chain_out_dmah);
1945 reqp->dr_chain_out_head.dc_buffer_paddr = 0;
1946 }
1947
1948 return ((rv != DDI_SUCCESS)? rv : rv1);
1949 }
1950
1951 /*
1952 * Build either input chain or output chain. It is single-item chain for Sparc,
1953 * and possible mutiple-item chain for x86.
1954 */
1955 static int
dca_bindchains_one(dca_request_t * reqp,size_t cnt,int dr_offset,caddr_t kaddr,ddi_dma_handle_t handle,uint_t flags,dca_chain_t * head,int * n_chain)1956 dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
1957 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
1958 dca_chain_t *head, int *n_chain)
1959 {
1960 ddi_dma_cookie_t c;
1961 uint_t nc;
1962 int rv;
1963 caddr_t chain_kaddr_pre;
1964 caddr_t chain_kaddr;
1965 uint32_t chain_paddr;
1966 int i;
1967
1968 /* Advance past the context structure to the starting address */
1969 chain_paddr = reqp->dr_ctx_paddr + dr_offset;
1970 chain_kaddr = reqp->dr_ctx_kaddr + dr_offset;
1971
1972 /*
1973 * Bind the kernel address to the DMA handle. On x86, the actual
1974 * buffer is mapped into multiple physical addresses. On Sparc,
1975 * the actual buffer is mapped into a single address.
1976 */
1977 rv = ddi_dma_addr_bind_handle(handle,
1978 NULL, kaddr, cnt, flags, DDI_DMA_DONTWAIT, NULL, &c, &nc);
1979 if (rv != DDI_DMA_MAPPED) {
1980 return (DDI_FAILURE);
1981 }
1982
1983 (void) ddi_dma_sync(handle, 0, cnt, DDI_DMA_SYNC_FORDEV);
1984 if ((rv = dca_check_dma_handle(reqp->dr_dca, handle,
1985 DCA_FM_ECLASS_NONE)) != DDI_SUCCESS) {
1986 reqp->destroy = TRUE;
1987 return (rv);
1988 }
1989
1990 *n_chain = nc;
1991
1992 /* Setup the data buffer chain for DMA transfer */
1993 chain_kaddr_pre = NULL;
1994 head->dc_buffer_paddr = 0;
1995 head->dc_next_paddr = 0;
1996 head->dc_buffer_length = 0;
1997 for (i = 0; i < nc; i++) {
1998 /* PIO */
1999 PUTDESC32(reqp, chain_kaddr, DESC_BUFADDR, c.dmac_address);
2000 PUTDESC16(reqp, chain_kaddr, DESC_RSVD, 0);
2001 PUTDESC16(reqp, chain_kaddr, DESC_LENGTH, c.dmac_size);
2002
2003 /* Remember the head of the chain */
2004 if (head->dc_buffer_paddr == 0) {
2005 head->dc_buffer_paddr = c.dmac_address;
2006 head->dc_buffer_length = c.dmac_size;
2007 }
2008
2009 /* Link to the previous one if one exists */
2010 if (chain_kaddr_pre) {
2011 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT,
2012 chain_paddr);
2013 if (head->dc_next_paddr == 0)
2014 head->dc_next_paddr = chain_paddr;
2015 }
2016 chain_kaddr_pre = chain_kaddr;
2017
2018 /* Maintain pointers */
2019 chain_paddr += DESC_SIZE;
2020 chain_kaddr += DESC_SIZE;
2021
2022 /* Retrieve the next cookie if there is one */
2023 if (i < nc-1)
2024 ddi_dma_nextcookie(handle, &c);
2025 }
2026
2027 /* Set the next pointer in the last entry to NULL */
2028 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 0);
2029
2030 return (DDI_SUCCESS);
2031 }
2032
2033 /*
2034 * Schedule some work.
2035 */
2036 int
dca_start(dca_t * dca,dca_request_t * reqp,int mcr,int dosched)2037 dca_start(dca_t *dca, dca_request_t *reqp, int mcr, int dosched)
2038 {
2039 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2040
2041 mutex_enter(&wlp->dwl_lock);
2042
2043 DBG(dca, DCHATTY, "req=%p, in=%p, out=%p, ctx=%p, ibuf=%p, obuf=%p",
2044 reqp, reqp->dr_in, reqp->dr_out, reqp->dr_ctx_kaddr,
2045 reqp->dr_ibuf_kaddr, reqp->dr_obuf_kaddr);
2046 DBG(dca, DCHATTY, "ctx paddr = %x, ibuf paddr = %x, obuf paddr = %x",
2047 reqp->dr_ctx_paddr, reqp->dr_ibuf_paddr, reqp->dr_obuf_paddr);
2048 /* sync out the entire context and descriptor chains */
2049 (void) ddi_dma_sync(reqp->dr_ctx_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2050 if (dca_check_dma_handle(dca, reqp->dr_ctx_dmah,
2051 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2052 reqp->destroy = TRUE;
2053 mutex_exit(&wlp->dwl_lock);
2054 return (CRYPTO_DEVICE_ERROR);
2055 }
2056
2057 dca_enqueue(&wlp->dwl_waitq, (dca_listnode_t *)reqp);
2058 wlp->dwl_count++;
2059 wlp->dwl_lastsubmit = ddi_get_lbolt();
2060 reqp->dr_wlp = wlp;
2061
2062 if ((wlp->dwl_count == wlp->dwl_hiwater) && (wlp->dwl_busy == 0)) {
2063 /* we are fully loaded now, let kCF know */
2064
2065 wlp->dwl_flowctl++;
2066 wlp->dwl_busy = 1;
2067
2068 crypto_prov_notify(wlp->dwl_prov, CRYPTO_PROVIDER_BUSY);
2069 }
2070
2071 if (dosched) {
2072 #ifdef SCHEDDELAY
2073 /* possibly wait for more work to arrive */
2074 if (wlp->dwl_count >= wlp->dwl_reqspermcr) {
2075 dca_schedule(dca, mcr);
2076 } else if (!wlp->dwl_schedtid) {
2077 /* wait 1 msec for more work before doing it */
2078 wlp->dwl_schedtid = timeout(dca_schedtimeout,
2079 (void *)wlp, drv_usectohz(MSEC));
2080 }
2081 #else
2082 dca_schedule(dca, mcr);
2083 #endif
2084 }
2085 mutex_exit(&wlp->dwl_lock);
2086
2087 return (CRYPTO_QUEUED);
2088 }
2089
2090 void
dca_schedule(dca_t * dca,int mcr)2091 dca_schedule(dca_t *dca, int mcr)
2092 {
2093 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2094 int csr;
2095 int full;
2096 uint32_t status;
2097
2098 ASSERT(mutex_owned(&wlp->dwl_lock));
2099 /*
2100 * If the card is draining or has an outstanding failure,
2101 * don't schedule any more work on it right now
2102 */
2103 if (wlp->dwl_drain || (dca->dca_flags & DCA_FAILED)) {
2104 return;
2105 }
2106
2107 if (mcr == MCR2) {
2108 csr = CSR_MCR2;
2109 full = DMASTAT_MCR2FULL;
2110 } else {
2111 csr = CSR_MCR1;
2112 full = DMASTAT_MCR1FULL;
2113 }
2114
2115 for (;;) {
2116 dca_work_t *workp;
2117 uint32_t offset;
2118 int nreqs;
2119
2120 status = GETCSR(dca, CSR_DMASTAT);
2121 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2122 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
2123 return;
2124
2125 if ((status & full) != 0)
2126 break;
2127
2128 #ifdef SCHEDDELAY
2129 /* if there isn't enough to do, don't bother now */
2130 if ((wlp->dwl_count < wlp->dwl_reqspermcr) &&
2131 (ddi_get_lbolt() < (wlp->dwl_lastsubmit +
2132 drv_usectohz(MSEC)))) {
2133 /* wait a bit longer... */
2134 if (wlp->dwl_schedtid == 0) {
2135 wlp->dwl_schedtid = timeout(dca_schedtimeout,
2136 (void *)wlp, drv_usectohz(MSEC));
2137 }
2138 return;
2139 }
2140 #endif
2141
2142 /* grab a work structure */
2143 workp = dca_getwork(dca, mcr);
2144
2145 if (workp == NULL) {
2146 /*
2147 * There must be work ready to be reclaimed,
2148 * in this case, since the chip can only hold
2149 * less work outstanding than there are total.
2150 */
2151 dca_reclaim(dca, mcr);
2152 continue;
2153 }
2154
2155 nreqs = 0;
2156 offset = MCR_CTXADDR;
2157
2158 while (nreqs < wlp->dwl_reqspermcr) {
2159 dca_request_t *reqp;
2160
2161 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_waitq);
2162 if (reqp == NULL) {
2163 /* nothing left to process */
2164 break;
2165 }
2166 /*
2167 * Update flow control.
2168 */
2169 wlp->dwl_count--;
2170 if ((wlp->dwl_count == wlp->dwl_lowater) &&
2171 (wlp->dwl_busy)) {
2172 wlp->dwl_busy = 0;
2173 crypto_prov_notify(wlp->dwl_prov,
2174 CRYPTO_PROVIDER_READY);
2175 }
2176
2177 /*
2178 * Context address.
2179 */
2180 PUTMCR32(workp, offset, reqp->dr_ctx_paddr);
2181 offset += 4;
2182
2183 /*
2184 * Input chain.
2185 */
2186 /* input buffer address */
2187 PUTMCR32(workp, offset, reqp->dr_in_paddr);
2188 offset += 4;
2189 /* next input buffer entry */
2190 PUTMCR32(workp, offset, reqp->dr_in_next);
2191 offset += 4;
2192 /* input buffer length */
2193 PUTMCR16(workp, offset, reqp->dr_in_len);
2194 offset += 2;
2195 /* zero the reserved field */
2196 PUTMCR16(workp, offset, 0);
2197 offset += 2;
2198
2199 /*
2200 * Overall length.
2201 */
2202 /* reserved field */
2203 PUTMCR16(workp, offset, 0);
2204 offset += 2;
2205 /* total packet length */
2206 PUTMCR16(workp, offset, reqp->dr_pkt_length);
2207 offset += 2;
2208
2209 /*
2210 * Output chain.
2211 */
2212 /* output buffer address */
2213 PUTMCR32(workp, offset, reqp->dr_out_paddr);
2214 offset += 4;
2215 /* next output buffer entry */
2216 PUTMCR32(workp, offset, reqp->dr_out_next);
2217 offset += 4;
2218 /* output buffer length */
2219 PUTMCR16(workp, offset, reqp->dr_out_len);
2220 offset += 2;
2221 /* zero the reserved field */
2222 PUTMCR16(workp, offset, 0);
2223 offset += 2;
2224
2225 /*
2226 * Note submission.
2227 */
2228 workp->dw_reqs[nreqs] = reqp;
2229 nreqs++;
2230 }
2231
2232 if (nreqs == 0) {
2233 /* nothing in the queue! */
2234 dca_freework(workp);
2235 return;
2236 }
2237
2238 wlp->dwl_submit++;
2239
2240 PUTMCR16(workp, MCR_FLAGS, 0);
2241 PUTMCR16(workp, MCR_COUNT, nreqs);
2242
2243 DBG(dca, DCHATTY,
2244 "posting work (phys %x, virt 0x%p) (%d reqs) to MCR%d",
2245 workp->dw_mcr_paddr, workp->dw_mcr_kaddr,
2246 nreqs, mcr);
2247
2248 workp->dw_lbolt = ddi_get_lbolt();
2249 /* Make sure MCR is synced out to device. */
2250 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 0,
2251 DDI_DMA_SYNC_FORDEV);
2252 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2253 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2254 dca_destroywork(workp);
2255 return;
2256 }
2257
2258 PUTCSR(dca, csr, workp->dw_mcr_paddr);
2259 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2260 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2261 dca_destroywork(workp);
2262 return;
2263 } else {
2264 dca_enqueue(&wlp->dwl_runq, (dca_listnode_t *)workp);
2265 }
2266
2267 DBG(dca, DCHATTY, "posted");
2268 }
2269 }
2270
2271 /*
2272 * Reclaim completed work, called in interrupt context.
2273 */
2274 void
dca_reclaim(dca_t * dca,int mcr)2275 dca_reclaim(dca_t *dca, int mcr)
2276 {
2277 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2278 dca_work_t *workp;
2279 ushort_t flags;
2280 int nreclaimed = 0;
2281 int i;
2282
2283 DBG(dca, DRECLAIM, "worklist = 0x%p (MCR%d)", wlp, mcr);
2284 ASSERT(mutex_owned(&wlp->dwl_lock));
2285 /*
2286 * For each MCR in the submitted (runq), we check to see if
2287 * it has been processed. If so, then we note each individual
2288 * job in the MCR, and and do the completion processing for
2289 * each of such job.
2290 */
2291 for (;;) {
2292
2293 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2294 if (workp == NULL) {
2295 break;
2296 }
2297
2298 /* only sync the MCR flags, since that's all we need */
2299 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 4,
2300 DDI_DMA_SYNC_FORKERNEL);
2301 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2302 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2303 dca_rmqueue((dca_listnode_t *)workp);
2304 dca_destroywork(workp);
2305 return;
2306 }
2307
2308 flags = GETMCR16(workp, MCR_FLAGS);
2309 if ((flags & MCRFLAG_FINISHED) == 0) {
2310 /* chip is still working on it */
2311 DBG(dca, DRECLAIM,
2312 "chip still working on it (MCR%d)", mcr);
2313 break;
2314 }
2315
2316 /* its really for us, so remove it from the queue */
2317 dca_rmqueue((dca_listnode_t *)workp);
2318
2319 /* if we were draining, signal on the cv */
2320 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2321 cv_signal(&wlp->dwl_cv);
2322 }
2323
2324 /* update statistics, done under the lock */
2325 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2326 dca_request_t *reqp = workp->dw_reqs[i];
2327 if (reqp == NULL) {
2328 continue;
2329 }
2330 if (reqp->dr_byte_stat >= 0) {
2331 dca->dca_stats[reqp->dr_byte_stat] +=
2332 reqp->dr_pkt_length;
2333 }
2334 if (reqp->dr_job_stat >= 0) {
2335 dca->dca_stats[reqp->dr_job_stat]++;
2336 }
2337 }
2338 mutex_exit(&wlp->dwl_lock);
2339
2340 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2341 dca_request_t *reqp = workp->dw_reqs[i];
2342
2343 if (reqp == NULL) {
2344 continue;
2345 }
2346
2347 /* Do the callback. */
2348 workp->dw_reqs[i] = NULL;
2349 dca_done(reqp, CRYPTO_SUCCESS);
2350
2351 nreclaimed++;
2352 }
2353
2354 /* now we can release the work */
2355 dca_freework(workp);
2356
2357 mutex_enter(&wlp->dwl_lock);
2358 }
2359 DBG(dca, DRECLAIM, "reclaimed %d cmds", nreclaimed);
2360 }
2361
2362 int
dca_length(crypto_data_t * cdata)2363 dca_length(crypto_data_t *cdata)
2364 {
2365 return (cdata->cd_length);
2366 }
2367
2368 /*
2369 * This is the callback function called from the interrupt when a kCF job
2370 * completes. It does some driver-specific things, and then calls the
2371 * kCF-provided callback. Finally, it cleans up the state for the work
2372 * request and drops the reference count to allow for DR.
2373 */
2374 void
dca_done(dca_request_t * reqp,int err)2375 dca_done(dca_request_t *reqp, int err)
2376 {
2377 uint64_t ena = 0;
2378
2379 /* unbind any chains we were using */
2380 if (dca_unbindchains(reqp) != DDI_SUCCESS) {
2381 /* DMA failure */
2382 ena = dca_ena(ena);
2383 dca_failure(reqp->dr_dca, DDI_DATAPATH_FAULT,
2384 DCA_FM_ECLASS_NONE, ena, CRYPTO_DEVICE_ERROR,
2385 "fault on buffer DMA handle");
2386 if (err == CRYPTO_SUCCESS) {
2387 err = CRYPTO_DEVICE_ERROR;
2388 }
2389 }
2390
2391 if (reqp->dr_callback != NULL) {
2392 reqp->dr_callback(reqp, err);
2393 } else {
2394 dca_freereq(reqp);
2395 }
2396 }
2397
2398 /*
2399 * Call this when a failure is detected. It will reset the chip,
2400 * log a message, alert kCF, and mark jobs in the runq as failed.
2401 */
2402 /* ARGSUSED */
2403 void
dca_failure(dca_t * dca,ddi_fault_location_t loc,dca_fma_eclass_t index,uint64_t ena,int errno,char * mess,...)2404 dca_failure(dca_t *dca, ddi_fault_location_t loc, dca_fma_eclass_t index,
2405 uint64_t ena, int errno, char *mess, ...)
2406 {
2407 va_list ap;
2408 char buf[256];
2409 int mcr;
2410 char *eclass;
2411 int have_mutex;
2412
2413 va_start(ap, mess);
2414 (void) vsprintf(buf, mess, ap);
2415 va_end(ap);
2416
2417 eclass = dca_fma_eclass_string(dca->dca_model, index);
2418
2419 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) &&
2420 index != DCA_FM_ECLASS_NONE) {
2421 ddi_fm_ereport_post(dca->dca_dip, eclass, ena,
2422 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
2423 FM_EREPORT_VERS0, NULL);
2424
2425 /* Report the impact of the failure to the DDI. */
2426 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_LOST);
2427 } else {
2428 /* Just log the error string to the message log */
2429 dca_error(dca, buf);
2430 }
2431
2432 /*
2433 * Indicate a failure (keeps schedule from running).
2434 */
2435 dca->dca_flags |= DCA_FAILED;
2436
2437 /*
2438 * Reset the chip. This should also have as a side effect, the
2439 * disabling of all interrupts from the device.
2440 */
2441 (void) dca_reset(dca, 1);
2442
2443 /*
2444 * Report the failure to kCF.
2445 */
2446 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2447 if (WORKLIST(dca, mcr)->dwl_prov) {
2448 crypto_prov_notify(WORKLIST(dca, mcr)->dwl_prov,
2449 CRYPTO_PROVIDER_FAILED);
2450 }
2451 }
2452
2453 /*
2454 * Return jobs not sent to hardware back to kCF.
2455 */
2456 dca_rejectjobs(dca);
2457
2458 /*
2459 * From this point on, no new work should be arriving, and the
2460 * chip should not be doing any active DMA.
2461 */
2462
2463 /*
2464 * Now find all the work submitted to the device and fail
2465 * them.
2466 */
2467 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2468 dca_worklist_t *wlp;
2469 int i;
2470
2471 wlp = WORKLIST(dca, mcr);
2472
2473 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2474 continue;
2475 }
2476 for (;;) {
2477 dca_work_t *workp;
2478
2479 have_mutex = mutex_tryenter(&wlp->dwl_lock);
2480 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_runq);
2481 if (workp == NULL) {
2482 if (have_mutex)
2483 mutex_exit(&wlp->dwl_lock);
2484 break;
2485 }
2486 mutex_exit(&wlp->dwl_lock);
2487
2488 /*
2489 * Free up requests
2490 */
2491 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2492 dca_request_t *reqp = workp->dw_reqs[i];
2493 if (reqp) {
2494 dca_done(reqp, errno);
2495 workp->dw_reqs[i] = NULL;
2496 }
2497 }
2498
2499 mutex_enter(&wlp->dwl_lock);
2500 /*
2501 * If waiting to drain, signal on the waiter.
2502 */
2503 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2504 cv_signal(&wlp->dwl_cv);
2505 }
2506
2507 /*
2508 * Return the work and request structures to
2509 * the free pool.
2510 */
2511 dca_freework(workp);
2512 if (have_mutex)
2513 mutex_exit(&wlp->dwl_lock);
2514 }
2515 }
2516
2517 }
2518
2519 #ifdef SCHEDDELAY
2520 /*
2521 * Reschedule worklist as needed.
2522 */
2523 void
dca_schedtimeout(void * arg)2524 dca_schedtimeout(void *arg)
2525 {
2526 dca_worklist_t *wlp = (dca_worklist_t *)arg;
2527 mutex_enter(&wlp->dwl_lock);
2528 wlp->dwl_schedtid = 0;
2529 dca_schedule(wlp->dwl_dca, wlp->dwl_mcr);
2530 mutex_exit(&wlp->dwl_lock);
2531 }
2532 #endif
2533
2534 /*
2535 * Check for stalled jobs.
2536 */
2537 void
dca_jobtimeout(void * arg)2538 dca_jobtimeout(void *arg)
2539 {
2540 int mcr;
2541 dca_t *dca = (dca_t *)arg;
2542 int hung = 0;
2543
2544 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2545 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2546 dca_work_t *workp;
2547 clock_t when;
2548
2549 mutex_enter(&wlp->dwl_lock);
2550 when = ddi_get_lbolt();
2551
2552 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2553 if (workp == NULL) {
2554 /* nothing sitting in the queue */
2555 mutex_exit(&wlp->dwl_lock);
2556 continue;
2557 }
2558
2559 if ((when - workp->dw_lbolt) < drv_usectohz(STALETIME)) {
2560 /* request has been queued for less than STALETIME */
2561 mutex_exit(&wlp->dwl_lock);
2562 continue;
2563 }
2564
2565 /* job has been sitting around for over 1 second, badness */
2566 DBG(dca, DWARN, "stale job (0x%p) found in MCR%d!", workp,
2567 mcr);
2568
2569 /* put it back in the queue, until we reset the chip */
2570 hung++;
2571 mutex_exit(&wlp->dwl_lock);
2572 }
2573
2574 if (hung) {
2575 dca_failure(dca, DDI_DEVICE_FAULT,
2576 DCA_FM_ECLASS_HW_TIMEOUT, dca_ena(0), CRYPTO_DEVICE_ERROR,
2577 "timeout processing job.)");
2578 }
2579
2580 /* reschedule ourself */
2581 mutex_enter(&dca->dca_intrlock);
2582 if (dca->dca_jobtid == 0) {
2583 /* timeout has been canceled, prior to DR */
2584 mutex_exit(&dca->dca_intrlock);
2585 return;
2586 }
2587
2588 /* check again in 1 second */
2589 dca->dca_jobtid = timeout(dca_jobtimeout, arg,
2590 drv_usectohz(SECOND));
2591 mutex_exit(&dca->dca_intrlock);
2592 }
2593
2594 /*
2595 * This returns all jobs back to kCF. It assumes that processing
2596 * on the worklist has halted.
2597 */
2598 void
dca_rejectjobs(dca_t * dca)2599 dca_rejectjobs(dca_t *dca)
2600 {
2601 int mcr;
2602 int have_mutex;
2603 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2604 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2605 dca_request_t *reqp;
2606
2607 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2608 continue;
2609 }
2610 have_mutex = mutex_tryenter(&wlp->dwl_lock);
2611 for (;;) {
2612 reqp = (dca_request_t *)dca_unqueue(&wlp->dwl_waitq);
2613 if (reqp == NULL) {
2614 break;
2615 }
2616 /* update flow control */
2617 wlp->dwl_count--;
2618 if ((wlp->dwl_count == wlp->dwl_lowater) &&
2619 (wlp->dwl_busy)) {
2620 wlp->dwl_busy = 0;
2621 crypto_prov_notify(wlp->dwl_prov,
2622 CRYPTO_PROVIDER_READY);
2623 }
2624 mutex_exit(&wlp->dwl_lock);
2625
2626 (void) dca_unbindchains(reqp);
2627 reqp->dr_callback(reqp, EAGAIN);
2628 mutex_enter(&wlp->dwl_lock);
2629 }
2630 if (have_mutex)
2631 mutex_exit(&wlp->dwl_lock);
2632 }
2633 }
2634
2635 int
dca_drain(dca_t * dca)2636 dca_drain(dca_t *dca)
2637 {
2638 int mcr;
2639 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2640 #ifdef SCHEDDELAY
2641 timeout_id_t tid;
2642 #endif
2643 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2644
2645 mutex_enter(&wlp->dwl_lock);
2646 wlp->dwl_drain = 1;
2647
2648 /* give it up to a second to drain from the chip */
2649 if (!QEMPTY(&wlp->dwl_runq)) {
2650 (void) cv_reltimedwait(&wlp->dwl_cv, &wlp->dwl_lock,
2651 drv_usectohz(STALETIME), TR_CLOCK_TICK);
2652
2653 if (!QEMPTY(&wlp->dwl_runq)) {
2654 dca_error(dca, "unable to drain device");
2655 mutex_exit(&wlp->dwl_lock);
2656 dca_undrain(dca);
2657 return (EBUSY);
2658 }
2659 }
2660
2661 #ifdef SCHEDDELAY
2662 tid = wlp->dwl_schedtid;
2663 mutex_exit(&wlp->dwl_lock);
2664
2665 /*
2666 * untimeout outside the lock -- this is safe because we
2667 * have set the drain flag, so dca_schedule() will not
2668 * reschedule another timeout
2669 */
2670 if (tid) {
2671 untimeout(tid);
2672 }
2673 #else
2674 mutex_exit(&wlp->dwl_lock);
2675 #endif
2676 }
2677 return (0);
2678 }
2679
2680 void
dca_undrain(dca_t * dca)2681 dca_undrain(dca_t *dca)
2682 {
2683 int mcr;
2684
2685 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2686 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2687 mutex_enter(&wlp->dwl_lock);
2688 wlp->dwl_drain = 0;
2689 dca_schedule(dca, mcr);
2690 mutex_exit(&wlp->dwl_lock);
2691 }
2692 }
2693
2694 /*
2695 * Duplicate the crypto_data_t structure, but point to the original
2696 * buffers.
2697 */
2698 int
dca_dupcrypto(crypto_data_t * input,crypto_data_t * ninput)2699 dca_dupcrypto(crypto_data_t *input, crypto_data_t *ninput)
2700 {
2701 ninput->cd_format = input->cd_format;
2702 ninput->cd_offset = input->cd_offset;
2703 ninput->cd_length = input->cd_length;
2704 ninput->cd_miscdata = input->cd_miscdata;
2705
2706 switch (input->cd_format) {
2707 case CRYPTO_DATA_RAW:
2708 ninput->cd_raw.iov_base = input->cd_raw.iov_base;
2709 ninput->cd_raw.iov_len = input->cd_raw.iov_len;
2710 break;
2711
2712 case CRYPTO_DATA_UIO:
2713 ninput->cd_uio = input->cd_uio;
2714 break;
2715
2716 case CRYPTO_DATA_MBLK:
2717 ninput->cd_mp = input->cd_mp;
2718 break;
2719
2720 default:
2721 DBG(NULL, DWARN,
2722 "dca_dupcrypto: unrecognised crypto data format");
2723 return (CRYPTO_FAILED);
2724 }
2725
2726 return (CRYPTO_SUCCESS);
2727 }
2728
2729 /*
2730 * Performs validation checks on the input and output data structures.
2731 */
2732 int
dca_verifyio(crypto_data_t * input,crypto_data_t * output)2733 dca_verifyio(crypto_data_t *input, crypto_data_t *output)
2734 {
2735 int rv = CRYPTO_SUCCESS;
2736
2737 switch (input->cd_format) {
2738 case CRYPTO_DATA_RAW:
2739 break;
2740
2741 case CRYPTO_DATA_UIO:
2742 /* we support only kernel buffer */
2743 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) {
2744 DBG(NULL, DWARN, "non kernel input uio buffer");
2745 rv = CRYPTO_ARGUMENTS_BAD;
2746 }
2747 break;
2748
2749 case CRYPTO_DATA_MBLK:
2750 break;
2751
2752 default:
2753 DBG(NULL, DWARN, "unrecognised input crypto data format");
2754 rv = CRYPTO_ARGUMENTS_BAD;
2755 }
2756
2757 switch (output->cd_format) {
2758 case CRYPTO_DATA_RAW:
2759 break;
2760
2761 case CRYPTO_DATA_UIO:
2762 /* we support only kernel buffer */
2763 if (output->cd_uio->uio_segflg != UIO_SYSSPACE) {
2764 DBG(NULL, DWARN, "non kernel output uio buffer");
2765 rv = CRYPTO_ARGUMENTS_BAD;
2766 }
2767 break;
2768
2769 case CRYPTO_DATA_MBLK:
2770 break;
2771
2772 default:
2773 DBG(NULL, DWARN, "unrecognised output crypto data format");
2774 rv = CRYPTO_ARGUMENTS_BAD;
2775 }
2776
2777 return (rv);
2778 }
2779
2780 /*
2781 * data: source crypto_data_t struct
2782 * off: offset into the source before commencing copy
2783 * count: the amount of data to copy
2784 * dest: destination buffer
2785 */
2786 int
dca_getbufbytes(crypto_data_t * data,size_t off,int count,uchar_t * dest)2787 dca_getbufbytes(crypto_data_t *data, size_t off, int count, uchar_t *dest)
2788 {
2789 int rv = CRYPTO_SUCCESS;
2790 uio_t *uiop;
2791 uint_t vec_idx;
2792 size_t cur_len;
2793 mblk_t *mp;
2794
2795 if (count == 0) {
2796 /* We don't want anything so we're done. */
2797 return (rv);
2798 }
2799
2800 /*
2801 * Sanity check that we haven't specified a length greater than the
2802 * offset adjusted size of the buffer.
2803 */
2804 if (count > (data->cd_length - off)) {
2805 return (CRYPTO_DATA_LEN_RANGE);
2806 }
2807
2808 /* Add the internal crypto_data offset to the requested offset. */
2809 off += data->cd_offset;
2810
2811 switch (data->cd_format) {
2812 case CRYPTO_DATA_RAW:
2813 bcopy(data->cd_raw.iov_base + off, dest, count);
2814 break;
2815
2816 case CRYPTO_DATA_UIO:
2817 /*
2818 * Jump to the first iovec containing data to be
2819 * processed.
2820 */
2821 uiop = data->cd_uio;
2822 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
2823 off >= uiop->uio_iov[vec_idx].iov_len;
2824 off -= uiop->uio_iov[vec_idx++].iov_len)
2825 ;
2826 if (vec_idx == uiop->uio_iovcnt) {
2827 /*
2828 * The caller specified an offset that is larger than
2829 * the total size of the buffers it provided.
2830 */
2831 return (CRYPTO_DATA_LEN_RANGE);
2832 }
2833
2834 /*
2835 * Now process the iovecs.
2836 */
2837 while (vec_idx < uiop->uio_iovcnt && count > 0) {
2838 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
2839 off, count);
2840 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
2841 cur_len);
2842 count -= cur_len;
2843 dest += cur_len;
2844 vec_idx++;
2845 off = 0;
2846 }
2847
2848 if (vec_idx == uiop->uio_iovcnt && count > 0) {
2849 /*
2850 * The end of the specified iovec's was reached but
2851 * the length requested could not be processed
2852 * (requested to digest more data than it provided).
2853 */
2854 return (CRYPTO_DATA_LEN_RANGE);
2855 }
2856 break;
2857
2858 case CRYPTO_DATA_MBLK:
2859 /*
2860 * Jump to the first mblk_t containing data to be processed.
2861 */
2862 for (mp = data->cd_mp; mp != NULL && off >= MBLKL(mp);
2863 off -= MBLKL(mp), mp = mp->b_cont)
2864 ;
2865 if (mp == NULL) {
2866 /*
2867 * The caller specified an offset that is larger than
2868 * the total size of the buffers it provided.
2869 */
2870 return (CRYPTO_DATA_LEN_RANGE);
2871 }
2872
2873 /*
2874 * Now do the processing on the mblk chain.
2875 */
2876 while (mp != NULL && count > 0) {
2877 cur_len = min(MBLKL(mp) - off, count);
2878 bcopy((char *)(mp->b_rptr + off), dest, cur_len);
2879 count -= cur_len;
2880 dest += cur_len;
2881 mp = mp->b_cont;
2882 off = 0;
2883 }
2884
2885 if (mp == NULL && count > 0) {
2886 /*
2887 * The end of the mblk was reached but the length
2888 * requested could not be processed, (requested to
2889 * digest more data than it provided).
2890 */
2891 return (CRYPTO_DATA_LEN_RANGE);
2892 }
2893 break;
2894
2895 default:
2896 DBG(NULL, DWARN, "unrecognised crypto data format");
2897 rv = CRYPTO_ARGUMENTS_BAD;
2898 }
2899 return (rv);
2900 }
2901
2902
2903 /*
2904 * Performs the input, output or hard scatter/gather checks on the specified
2905 * crypto_data_t struct. Returns true if the data is scatter/gather in nature
2906 * ie fails the test.
2907 */
2908 int
dca_sgcheck(dca_t * dca,crypto_data_t * data,dca_sg_param_t val)2909 dca_sgcheck(dca_t *dca, crypto_data_t *data, dca_sg_param_t val)
2910 {
2911 uio_t *uiop;
2912 mblk_t *mp;
2913 int rv = FALSE;
2914
2915 switch (val) {
2916 case DCA_SG_CONTIG:
2917 /*
2918 * Check for a contiguous data buffer.
2919 */
2920 switch (data->cd_format) {
2921 case CRYPTO_DATA_RAW:
2922 /* Contiguous in nature */
2923 break;
2924
2925 case CRYPTO_DATA_UIO:
2926 if (data->cd_uio->uio_iovcnt > 1)
2927 rv = TRUE;
2928 break;
2929
2930 case CRYPTO_DATA_MBLK:
2931 mp = data->cd_mp;
2932 if (mp->b_cont != NULL)
2933 rv = TRUE;
2934 break;
2935
2936 default:
2937 DBG(NULL, DWARN, "unrecognised crypto data format");
2938 }
2939 break;
2940
2941 case DCA_SG_WALIGN:
2942 /*
2943 * Check for a contiguous data buffer that is 32-bit word
2944 * aligned and is of word multiples in size.
2945 */
2946 switch (data->cd_format) {
2947 case CRYPTO_DATA_RAW:
2948 if ((data->cd_raw.iov_len % sizeof (uint32_t)) ||
2949 ((uintptr_t)data->cd_raw.iov_base %
2950 sizeof (uint32_t))) {
2951 rv = TRUE;
2952 }
2953 break;
2954
2955 case CRYPTO_DATA_UIO:
2956 uiop = data->cd_uio;
2957 if (uiop->uio_iovcnt > 1) {
2958 return (TRUE);
2959 }
2960 /* So there is only one iovec */
2961 if ((uiop->uio_iov[0].iov_len % sizeof (uint32_t)) ||
2962 ((uintptr_t)uiop->uio_iov[0].iov_base %
2963 sizeof (uint32_t))) {
2964 rv = TRUE;
2965 }
2966 break;
2967
2968 case CRYPTO_DATA_MBLK:
2969 mp = data->cd_mp;
2970 if (mp->b_cont != NULL) {
2971 return (TRUE);
2972 }
2973 /* So there is only one mblk in the chain */
2974 if ((MBLKL(mp) % sizeof (uint32_t)) ||
2975 ((uintptr_t)mp->b_rptr % sizeof (uint32_t))) {
2976 rv = TRUE;
2977 }
2978 break;
2979
2980 default:
2981 DBG(NULL, DWARN, "unrecognised crypto data format");
2982 }
2983 break;
2984
2985 case DCA_SG_PALIGN:
2986 /*
2987 * Check that the data buffer is page aligned and is of
2988 * page multiples in size.
2989 */
2990 switch (data->cd_format) {
2991 case CRYPTO_DATA_RAW:
2992 if ((data->cd_length % dca->dca_pagesize) ||
2993 ((uintptr_t)data->cd_raw.iov_base %
2994 dca->dca_pagesize)) {
2995 rv = TRUE;
2996 }
2997 break;
2998
2999 case CRYPTO_DATA_UIO:
3000 uiop = data->cd_uio;
3001 if ((uiop->uio_iov[0].iov_len % dca->dca_pagesize) ||
3002 ((uintptr_t)uiop->uio_iov[0].iov_base %
3003 dca->dca_pagesize)) {
3004 rv = TRUE;
3005 }
3006 break;
3007
3008 case CRYPTO_DATA_MBLK:
3009 mp = data->cd_mp;
3010 if ((MBLKL(mp) % dca->dca_pagesize) ||
3011 ((uintptr_t)mp->b_rptr % dca->dca_pagesize)) {
3012 rv = TRUE;
3013 }
3014 break;
3015
3016 default:
3017 DBG(NULL, DWARN, "unrecognised crypto data format");
3018 }
3019 break;
3020
3021 default:
3022 DBG(NULL, DWARN, "unrecognised scatter/gather param type");
3023 }
3024
3025 return (rv);
3026 }
3027
3028 /*
3029 * Increments the cd_offset and decrements the cd_length as the data is
3030 * gathered from the crypto_data_t struct.
3031 * The data is reverse-copied into the dest buffer if the flag is true.
3032 */
3033 int
dca_gather(crypto_data_t * in,char * dest,int count,int reverse)3034 dca_gather(crypto_data_t *in, char *dest, int count, int reverse)
3035 {
3036 int rv = CRYPTO_SUCCESS;
3037 uint_t vec_idx;
3038 uio_t *uiop;
3039 off_t off = in->cd_offset;
3040 size_t cur_len;
3041 mblk_t *mp;
3042
3043 switch (in->cd_format) {
3044 case CRYPTO_DATA_RAW:
3045 if (count > in->cd_length) {
3046 /*
3047 * The caller specified a length greater than the
3048 * size of the buffer.
3049 */
3050 return (CRYPTO_DATA_LEN_RANGE);
3051 }
3052 if (reverse)
3053 dca_reverse(in->cd_raw.iov_base + off, dest, count,
3054 count);
3055 else
3056 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3057 in->cd_offset += count;
3058 in->cd_length -= count;
3059 break;
3060
3061 case CRYPTO_DATA_UIO:
3062 /*
3063 * Jump to the first iovec containing data to be processed.
3064 */
3065 uiop = in->cd_uio;
3066 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3067 off >= uiop->uio_iov[vec_idx].iov_len;
3068 off -= uiop->uio_iov[vec_idx++].iov_len)
3069 ;
3070 if (vec_idx == uiop->uio_iovcnt) {
3071 /*
3072 * The caller specified an offset that is larger than
3073 * the total size of the buffers it provided.
3074 */
3075 return (CRYPTO_DATA_LEN_RANGE);
3076 }
3077
3078 /*
3079 * Now process the iovecs.
3080 */
3081 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3082 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3083 off, count);
3084 count -= cur_len;
3085 if (reverse) {
3086 /* Fill the dest buffer from the end */
3087 dca_reverse(uiop->uio_iov[vec_idx].iov_base +
3088 off, dest+count, cur_len, cur_len);
3089 } else {
3090 bcopy(uiop->uio_iov[vec_idx].iov_base + off,
3091 dest, cur_len);
3092 dest += cur_len;
3093 }
3094 in->cd_offset += cur_len;
3095 in->cd_length -= cur_len;
3096 vec_idx++;
3097 off = 0;
3098 }
3099
3100 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3101 /*
3102 * The end of the specified iovec's was reached but
3103 * the length requested could not be processed
3104 * (requested to digest more data than it provided).
3105 */
3106 return (CRYPTO_DATA_LEN_RANGE);
3107 }
3108 break;
3109
3110 case CRYPTO_DATA_MBLK:
3111 /*
3112 * Jump to the first mblk_t containing data to be processed.
3113 */
3114 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3115 off -= MBLKL(mp), mp = mp->b_cont)
3116 ;
3117 if (mp == NULL) {
3118 /*
3119 * The caller specified an offset that is larger than
3120 * the total size of the buffers it provided.
3121 */
3122 return (CRYPTO_DATA_LEN_RANGE);
3123 }
3124
3125 /*
3126 * Now do the processing on the mblk chain.
3127 */
3128 while (mp != NULL && count > 0) {
3129 cur_len = min(MBLKL(mp) - off, count);
3130 count -= cur_len;
3131 if (reverse) {
3132 /* Fill the dest buffer from the end */
3133 dca_reverse((char *)(mp->b_rptr + off),
3134 dest+count, cur_len, cur_len);
3135 } else {
3136 bcopy((char *)(mp->b_rptr + off), dest,
3137 cur_len);
3138 dest += cur_len;
3139 }
3140 in->cd_offset += cur_len;
3141 in->cd_length -= cur_len;
3142 mp = mp->b_cont;
3143 off = 0;
3144 }
3145
3146 if (mp == NULL && count > 0) {
3147 /*
3148 * The end of the mblk was reached but the length
3149 * requested could not be processed, (requested to
3150 * digest more data than it provided).
3151 */
3152 return (CRYPTO_DATA_LEN_RANGE);
3153 }
3154 break;
3155
3156 default:
3157 DBG(NULL, DWARN, "dca_gather: unrecognised crypto data format");
3158 rv = CRYPTO_ARGUMENTS_BAD;
3159 }
3160 return (rv);
3161 }
3162
3163 /*
3164 * Increments the cd_offset and decrements the cd_length as the data is
3165 * gathered from the crypto_data_t struct.
3166 */
3167 int
dca_resid_gather(crypto_data_t * in,char * resid,int * residlen,char * dest,int count)3168 dca_resid_gather(crypto_data_t *in, char *resid, int *residlen, char *dest,
3169 int count)
3170 {
3171 int rv = CRYPTO_SUCCESS;
3172 caddr_t baddr;
3173 uint_t vec_idx;
3174 uio_t *uiop;
3175 off_t off = in->cd_offset;
3176 size_t cur_len;
3177 mblk_t *mp;
3178
3179 /* Process the residual first */
3180 if (*residlen > 0) {
3181 uint_t num = min(count, *residlen);
3182 bcopy(resid, dest, num);
3183 *residlen -= num;
3184 if (*residlen > 0) {
3185 /*
3186 * Requested amount 'count' is less than what's in
3187 * the residual, so shuffle any remaining resid to
3188 * the front.
3189 */
3190 baddr = resid + num;
3191 bcopy(baddr, resid, *residlen);
3192 }
3193 dest += num;
3194 count -= num;
3195 }
3196
3197 /* Now process what's in the crypto_data_t structs */
3198 switch (in->cd_format) {
3199 case CRYPTO_DATA_RAW:
3200 if (count > in->cd_length) {
3201 /*
3202 * The caller specified a length greater than the
3203 * size of the buffer.
3204 */
3205 return (CRYPTO_DATA_LEN_RANGE);
3206 }
3207 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3208 in->cd_offset += count;
3209 in->cd_length -= count;
3210 break;
3211
3212 case CRYPTO_DATA_UIO:
3213 /*
3214 * Jump to the first iovec containing data to be processed.
3215 */
3216 uiop = in->cd_uio;
3217 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3218 off >= uiop->uio_iov[vec_idx].iov_len;
3219 off -= uiop->uio_iov[vec_idx++].iov_len)
3220 ;
3221 if (vec_idx == uiop->uio_iovcnt) {
3222 /*
3223 * The caller specified an offset that is larger than
3224 * the total size of the buffers it provided.
3225 */
3226 return (CRYPTO_DATA_LEN_RANGE);
3227 }
3228
3229 /*
3230 * Now process the iovecs.
3231 */
3232 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3233 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3234 off, count);
3235 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
3236 cur_len);
3237 count -= cur_len;
3238 dest += cur_len;
3239 in->cd_offset += cur_len;
3240 in->cd_length -= cur_len;
3241 vec_idx++;
3242 off = 0;
3243 }
3244
3245 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3246 /*
3247 * The end of the specified iovec's was reached but
3248 * the length requested could not be processed
3249 * (requested to digest more data than it provided).
3250 */
3251 return (CRYPTO_DATA_LEN_RANGE);
3252 }
3253 break;
3254
3255 case CRYPTO_DATA_MBLK:
3256 /*
3257 * Jump to the first mblk_t containing data to be processed.
3258 */
3259 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3260 off -= MBLKL(mp), mp = mp->b_cont)
3261 ;
3262 if (mp == NULL) {
3263 /*
3264 * The caller specified an offset that is larger than
3265 * the total size of the buffers it provided.
3266 */
3267 return (CRYPTO_DATA_LEN_RANGE);
3268 }
3269
3270 /*
3271 * Now do the processing on the mblk chain.
3272 */
3273 while (mp != NULL && count > 0) {
3274 cur_len = min(MBLKL(mp) - off, count);
3275 bcopy((char *)(mp->b_rptr + off), dest, cur_len);
3276 count -= cur_len;
3277 dest += cur_len;
3278 in->cd_offset += cur_len;
3279 in->cd_length -= cur_len;
3280 mp = mp->b_cont;
3281 off = 0;
3282 }
3283
3284 if (mp == NULL && count > 0) {
3285 /*
3286 * The end of the mblk was reached but the length
3287 * requested could not be processed, (requested to
3288 * digest more data than it provided).
3289 */
3290 return (CRYPTO_DATA_LEN_RANGE);
3291 }
3292 break;
3293
3294 default:
3295 DBG(NULL, DWARN,
3296 "dca_resid_gather: unrecognised crypto data format");
3297 rv = CRYPTO_ARGUMENTS_BAD;
3298 }
3299 return (rv);
3300 }
3301
3302 /*
3303 * Appends the data to the crypto_data_t struct increasing cd_length.
3304 * cd_offset is left unchanged.
3305 * Data is reverse-copied if the flag is TRUE.
3306 */
3307 int
dca_scatter(const char * src,crypto_data_t * out,int count,int reverse)3308 dca_scatter(const char *src, crypto_data_t *out, int count, int reverse)
3309 {
3310 int rv = CRYPTO_SUCCESS;
3311 off_t offset = out->cd_offset + out->cd_length;
3312 uint_t vec_idx;
3313 uio_t *uiop;
3314 size_t cur_len;
3315 mblk_t *mp;
3316
3317 switch (out->cd_format) {
3318 case CRYPTO_DATA_RAW:
3319 if (out->cd_raw.iov_len - offset < count) {
3320 /* Trying to write out more than space available. */
3321 return (CRYPTO_DATA_LEN_RANGE);
3322 }
3323 if (reverse)
3324 dca_reverse((void*) src, out->cd_raw.iov_base + offset,
3325 count, count);
3326 else
3327 bcopy(src, out->cd_raw.iov_base + offset, count);
3328 out->cd_length += count;
3329 break;
3330
3331 case CRYPTO_DATA_UIO:
3332 /*
3333 * Jump to the first iovec that can be written to.
3334 */
3335 uiop = out->cd_uio;
3336 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3337 offset >= uiop->uio_iov[vec_idx].iov_len;
3338 offset -= uiop->uio_iov[vec_idx++].iov_len)
3339 ;
3340 if (vec_idx == uiop->uio_iovcnt) {
3341 /*
3342 * The caller specified an offset that is larger than
3343 * the total size of the buffers it provided.
3344 */
3345 return (CRYPTO_DATA_LEN_RANGE);
3346 }
3347
3348 /*
3349 * Now process the iovecs.
3350 */
3351 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3352 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3353 offset, count);
3354 count -= cur_len;
3355 if (reverse) {
3356 dca_reverse((void*) (src+count),
3357 uiop->uio_iov[vec_idx].iov_base +
3358 offset, cur_len, cur_len);
3359 } else {
3360 bcopy(src, uiop->uio_iov[vec_idx].iov_base +
3361 offset, cur_len);
3362 src += cur_len;
3363 }
3364 out->cd_length += cur_len;
3365 vec_idx++;
3366 offset = 0;
3367 }
3368
3369 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3370 /*
3371 * The end of the specified iovec's was reached but
3372 * the length requested could not be processed
3373 * (requested to write more data than space provided).
3374 */
3375 return (CRYPTO_DATA_LEN_RANGE);
3376 }
3377 break;
3378
3379 case CRYPTO_DATA_MBLK:
3380 /*
3381 * Jump to the first mblk_t that can be written to.
3382 */
3383 for (mp = out->cd_mp; mp != NULL && offset >= MBLKL(mp);
3384 offset -= MBLKL(mp), mp = mp->b_cont)
3385 ;
3386 if (mp == NULL) {
3387 /*
3388 * The caller specified an offset that is larger than
3389 * the total size of the buffers it provided.
3390 */
3391 return (CRYPTO_DATA_LEN_RANGE);
3392 }
3393
3394 /*
3395 * Now do the processing on the mblk chain.
3396 */
3397 while (mp != NULL && count > 0) {
3398 cur_len = min(MBLKL(mp) - offset, count);
3399 count -= cur_len;
3400 if (reverse) {
3401 dca_reverse((void*) (src+count),
3402 (char *)(mp->b_rptr + offset), cur_len,
3403 cur_len);
3404 } else {
3405 bcopy(src, (char *)(mp->b_rptr + offset),
3406 cur_len);
3407 src += cur_len;
3408 }
3409 out->cd_length += cur_len;
3410 mp = mp->b_cont;
3411 offset = 0;
3412 }
3413
3414 if (mp == NULL && count > 0) {
3415 /*
3416 * The end of the mblk was reached but the length
3417 * requested could not be processed, (requested to
3418 * digest more data than it provided).
3419 */
3420 return (CRYPTO_DATA_LEN_RANGE);
3421 }
3422 break;
3423
3424 default:
3425 DBG(NULL, DWARN, "unrecognised crypto data format");
3426 rv = CRYPTO_ARGUMENTS_BAD;
3427 }
3428 return (rv);
3429 }
3430
3431 /*
3432 * Compare two byte arrays in reverse order.
3433 * Return 0 if they are identical, 1 otherwise.
3434 */
3435 int
dca_bcmp_reverse(const void * s1,const void * s2,size_t n)3436 dca_bcmp_reverse(const void *s1, const void *s2, size_t n)
3437 {
3438 int i;
3439 caddr_t src, dst;
3440
3441 if (!n)
3442 return (0);
3443
3444 src = ((caddr_t)s1) + n - 1;
3445 dst = (caddr_t)s2;
3446 for (i = 0; i < n; i++) {
3447 if (*src != *dst)
3448 return (1);
3449 src--;
3450 dst++;
3451 }
3452
3453 return (0);
3454 }
3455
3456
3457 /*
3458 * This calculates the size of a bignum in bits, specifically not counting
3459 * leading zero bits. This size calculation must be done *before* any
3460 * endian reversal takes place (i.e. the numbers are in absolute big-endian
3461 * order.)
3462 */
3463 int
dca_bitlen(unsigned char * bignum,int bytelen)3464 dca_bitlen(unsigned char *bignum, int bytelen)
3465 {
3466 unsigned char msbyte;
3467 int i, j;
3468
3469 for (i = 0; i < bytelen - 1; i++) {
3470 if (bignum[i] != 0) {
3471 break;
3472 }
3473 }
3474 msbyte = bignum[i];
3475 for (j = 8; j > 1; j--) {
3476 if (msbyte & 0x80) {
3477 break;
3478 }
3479 msbyte <<= 1;
3480 }
3481 return ((8 * (bytelen - i - 1)) + j);
3482 }
3483
3484 /*
3485 * This compares to bignums (in big-endian order). It ignores leading
3486 * null bytes. The result semantics follow bcmp, mempcmp, strcmp, etc.
3487 */
3488 int
dca_numcmp(caddr_t n1,int n1len,caddr_t n2,int n2len)3489 dca_numcmp(caddr_t n1, int n1len, caddr_t n2, int n2len)
3490 {
3491 while ((n1len > 1) && (*n1 == 0)) {
3492 n1len--;
3493 n1++;
3494 }
3495 while ((n2len > 1) && (*n2 == 0)) {
3496 n2len--;
3497 n2++;
3498 }
3499 if (n1len != n2len) {
3500 return (n1len - n2len);
3501 }
3502 while ((n1len > 1) && (*n1 == *n2)) {
3503 n1++;
3504 n2++;
3505 n1len--;
3506 }
3507 return ((int)(*(uchar_t *)n1) - (int)(*(uchar_t *)n2));
3508 }
3509
3510 /*
3511 * Return array of key attributes.
3512 */
3513 crypto_object_attribute_t *
dca_get_key_attr(crypto_key_t * key)3514 dca_get_key_attr(crypto_key_t *key)
3515 {
3516 if ((key->ck_format != CRYPTO_KEY_ATTR_LIST) ||
3517 (key->ck_count == 0)) {
3518 return (NULL);
3519 }
3520
3521 return (key->ck_attrs);
3522 }
3523
3524 /*
3525 * If attribute type exists valp points to it's 32-bit value.
3526 */
3527 int
dca_attr_lookup_uint32(crypto_object_attribute_t * attrp,uint_t atnum,uint64_t atype,uint32_t * valp)3528 dca_attr_lookup_uint32(crypto_object_attribute_t *attrp, uint_t atnum,
3529 uint64_t atype, uint32_t *valp)
3530 {
3531 crypto_object_attribute_t *bap;
3532
3533 bap = dca_find_attribute(attrp, atnum, atype);
3534 if (bap == NULL) {
3535 return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3536 }
3537
3538 *valp = *bap->oa_value;
3539
3540 return (CRYPTO_SUCCESS);
3541 }
3542
3543 /*
3544 * If attribute type exists data contains the start address of the value,
3545 * and numelems contains it's length.
3546 */
3547 int
dca_attr_lookup_uint8_array(crypto_object_attribute_t * attrp,uint_t atnum,uint64_t atype,void ** data,unsigned int * numelems)3548 dca_attr_lookup_uint8_array(crypto_object_attribute_t *attrp, uint_t atnum,
3549 uint64_t atype, void **data, unsigned int *numelems)
3550 {
3551 crypto_object_attribute_t *bap;
3552
3553 bap = dca_find_attribute(attrp, atnum, atype);
3554 if (bap == NULL) {
3555 return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3556 }
3557
3558 *data = bap->oa_value;
3559 *numelems = bap->oa_value_len;
3560
3561 return (CRYPTO_SUCCESS);
3562 }
3563
3564 /*
3565 * Finds entry of specified name. If it is not found dca_find_attribute returns
3566 * NULL.
3567 */
3568 crypto_object_attribute_t *
dca_find_attribute(crypto_object_attribute_t * attrp,uint_t atnum,uint64_t atype)3569 dca_find_attribute(crypto_object_attribute_t *attrp, uint_t atnum,
3570 uint64_t atype)
3571 {
3572 while (atnum) {
3573 if (attrp->oa_type == atype)
3574 return (attrp);
3575 atnum--;
3576 attrp++;
3577 }
3578 return (NULL);
3579 }
3580
3581 /*
3582 * Return the address of the first data buffer. If the data format is
3583 * unrecognised return NULL.
3584 */
3585 caddr_t
dca_bufdaddr(crypto_data_t * data)3586 dca_bufdaddr(crypto_data_t *data)
3587 {
3588 switch (data->cd_format) {
3589 case CRYPTO_DATA_RAW:
3590 return (data->cd_raw.iov_base + data->cd_offset);
3591 case CRYPTO_DATA_UIO:
3592 return (data->cd_uio->uio_iov[0].iov_base + data->cd_offset);
3593 case CRYPTO_DATA_MBLK:
3594 return ((char *)data->cd_mp->b_rptr + data->cd_offset);
3595 default:
3596 DBG(NULL, DWARN,
3597 "dca_bufdaddr: unrecognised crypto data format");
3598 return (NULL);
3599 }
3600 }
3601
3602 static caddr_t
dca_bufdaddr_out(crypto_data_t * data)3603 dca_bufdaddr_out(crypto_data_t *data)
3604 {
3605 size_t offset = data->cd_offset + data->cd_length;
3606
3607 switch (data->cd_format) {
3608 case CRYPTO_DATA_RAW:
3609 return (data->cd_raw.iov_base + offset);
3610 case CRYPTO_DATA_UIO:
3611 return (data->cd_uio->uio_iov[0].iov_base + offset);
3612 case CRYPTO_DATA_MBLK:
3613 return ((char *)data->cd_mp->b_rptr + offset);
3614 default:
3615 DBG(NULL, DWARN,
3616 "dca_bufdaddr_out: unrecognised crypto data format");
3617 return (NULL);
3618 }
3619 }
3620
3621 /*
3622 * Control entry points.
3623 */
3624
3625 /* ARGSUSED */
3626 static void
dca_provider_status(crypto_provider_handle_t provider,uint_t * status)3627 dca_provider_status(crypto_provider_handle_t provider, uint_t *status)
3628 {
3629 *status = CRYPTO_PROVIDER_READY;
3630 }
3631
3632 /*
3633 * Cipher (encrypt/decrypt) entry points.
3634 */
3635
3636 /* ARGSUSED */
3637 static int
dca_encrypt_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)3638 dca_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3639 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3640 crypto_req_handle_t req)
3641 {
3642 int error = CRYPTO_FAILED;
3643 dca_t *softc;
3644
3645 softc = DCA_SOFTC_FROM_CTX(ctx);
3646 DBG(softc, DENTRY, "dca_encrypt_init: started");
3647
3648 /* check mechanism */
3649 switch (mechanism->cm_type) {
3650 case DES_CBC_MECH_INFO_TYPE:
3651 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3652 DR_ENCRYPT);
3653 break;
3654 case DES3_CBC_MECH_INFO_TYPE:
3655 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3656 DR_ENCRYPT | DR_TRIPLE);
3657 break;
3658 case RSA_PKCS_MECH_INFO_TYPE:
3659 case RSA_X_509_MECH_INFO_TYPE:
3660 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3661 break;
3662 default:
3663 cmn_err(CE_WARN, "dca_encrypt_init: unexpected mech type "
3664 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3665 error = CRYPTO_MECHANISM_INVALID;
3666 }
3667
3668 DBG(softc, DENTRY, "dca_encrypt_init: done, err = 0x%x", error);
3669
3670 if (error == CRYPTO_SUCCESS)
3671 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3672 &softc->dca_ctx_list_lock);
3673
3674 return (error);
3675 }
3676
3677 /* ARGSUSED */
3678 static int
dca_encrypt(crypto_ctx_t * ctx,crypto_data_t * plaintext,crypto_data_t * ciphertext,crypto_req_handle_t req)3679 dca_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3680 crypto_data_t *ciphertext, crypto_req_handle_t req)
3681 {
3682 int error = CRYPTO_FAILED;
3683 dca_t *softc;
3684
3685 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3686 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3687
3688 softc = DCA_SOFTC_FROM_CTX(ctx);
3689 DBG(softc, DENTRY, "dca_encrypt: started");
3690
3691 /* handle inplace ops */
3692 if (!ciphertext) {
3693 dca_request_t *reqp = ctx->cc_provider_private;
3694 reqp->dr_flags |= DR_INPLACE;
3695 ciphertext = plaintext;
3696 }
3697
3698 /* check mechanism */
3699 switch (DCA_MECH_FROM_CTX(ctx)) {
3700 case DES_CBC_MECH_INFO_TYPE:
3701 error = dca_3des(ctx, plaintext, ciphertext, req, DR_ENCRYPT);
3702 break;
3703 case DES3_CBC_MECH_INFO_TYPE:
3704 error = dca_3des(ctx, plaintext, ciphertext, req,
3705 DR_ENCRYPT | DR_TRIPLE);
3706 break;
3707 case RSA_PKCS_MECH_INFO_TYPE:
3708 case RSA_X_509_MECH_INFO_TYPE:
3709 error = dca_rsastart(ctx, plaintext, ciphertext, req,
3710 DCA_RSA_ENC);
3711 break;
3712 default:
3713 /* Should never reach here */
3714 cmn_err(CE_WARN, "dca_encrypt: unexpected mech type "
3715 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3716 error = CRYPTO_MECHANISM_INVALID;
3717 }
3718
3719 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
3720 (error != CRYPTO_BUFFER_TOO_SMALL)) {
3721 ciphertext->cd_length = 0;
3722 }
3723
3724 DBG(softc, DENTRY, "dca_encrypt: done, err = 0x%x", error);
3725
3726 return (error);
3727 }
3728
3729 /* ARGSUSED */
3730 static int
dca_encrypt_update(crypto_ctx_t * ctx,crypto_data_t * plaintext,crypto_data_t * ciphertext,crypto_req_handle_t req)3731 dca_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3732 crypto_data_t *ciphertext, crypto_req_handle_t req)
3733 {
3734 int error = CRYPTO_FAILED;
3735 dca_t *softc;
3736
3737 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3738 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3739
3740 softc = DCA_SOFTC_FROM_CTX(ctx);
3741 DBG(softc, DENTRY, "dca_encrypt_update: started");
3742
3743 /* handle inplace ops */
3744 if (!ciphertext) {
3745 dca_request_t *reqp = ctx->cc_provider_private;
3746 reqp->dr_flags |= DR_INPLACE;
3747 ciphertext = plaintext;
3748 }
3749
3750 /* check mechanism */
3751 switch (DCA_MECH_FROM_CTX(ctx)) {
3752 case DES_CBC_MECH_INFO_TYPE:
3753 error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3754 DR_ENCRYPT);
3755 break;
3756 case DES3_CBC_MECH_INFO_TYPE:
3757 error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3758 DR_ENCRYPT | DR_TRIPLE);
3759 break;
3760 default:
3761 /* Should never reach here */
3762 cmn_err(CE_WARN, "dca_encrypt_update: unexpected mech type "
3763 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3764 error = CRYPTO_MECHANISM_INVALID;
3765 }
3766
3767 DBG(softc, DENTRY, "dca_encrypt_update: done, err = 0x%x", error);
3768
3769 return (error);
3770 }
3771
3772 /* ARGSUSED */
3773 static int
dca_encrypt_final(crypto_ctx_t * ctx,crypto_data_t * ciphertext,crypto_req_handle_t req)3774 dca_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3775 crypto_req_handle_t req)
3776 {
3777 int error = CRYPTO_FAILED;
3778 dca_t *softc;
3779
3780 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3781 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3782
3783 softc = DCA_SOFTC_FROM_CTX(ctx);
3784 DBG(softc, DENTRY, "dca_encrypt_final: started");
3785
3786 /* check mechanism */
3787 switch (DCA_MECH_FROM_CTX(ctx)) {
3788 case DES_CBC_MECH_INFO_TYPE:
3789 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT);
3790 break;
3791 case DES3_CBC_MECH_INFO_TYPE:
3792 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT | DR_TRIPLE);
3793 break;
3794 default:
3795 /* Should never reach here */
3796 cmn_err(CE_WARN, "dca_encrypt_final: unexpected mech type "
3797 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3798 error = CRYPTO_MECHANISM_INVALID;
3799 }
3800
3801 DBG(softc, DENTRY, "dca_encrypt_final: done, err = 0x%x", error);
3802
3803 return (error);
3804 }
3805
3806 /* ARGSUSED */
3807 static int
dca_encrypt_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * plaintext,crypto_data_t * ciphertext,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)3808 dca_encrypt_atomic(crypto_provider_handle_t provider,
3809 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
3810 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
3811 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
3812 {
3813 int error = CRYPTO_FAILED;
3814 dca_t *softc = (dca_t *)provider;
3815
3816 DBG(softc, DENTRY, "dca_encrypt_atomic: started");
3817
3818 if (ctx_template != NULL)
3819 return (CRYPTO_ARGUMENTS_BAD);
3820
3821 /* handle inplace ops */
3822 if (!ciphertext) {
3823 ciphertext = plaintext;
3824 }
3825
3826 /* check mechanism */
3827 switch (mechanism->cm_type) {
3828 case DES_CBC_MECH_INFO_TYPE:
3829 error = dca_3desatomic(provider, session_id, mechanism, key,
3830 plaintext, ciphertext, KM_SLEEP, req,
3831 DR_ENCRYPT | DR_ATOMIC);
3832 break;
3833 case DES3_CBC_MECH_INFO_TYPE:
3834 error = dca_3desatomic(provider, session_id, mechanism, key,
3835 plaintext, ciphertext, KM_SLEEP, req,
3836 DR_ENCRYPT | DR_TRIPLE | DR_ATOMIC);
3837 break;
3838 case RSA_PKCS_MECH_INFO_TYPE:
3839 case RSA_X_509_MECH_INFO_TYPE:
3840 error = dca_rsaatomic(provider, session_id, mechanism, key,
3841 plaintext, ciphertext, KM_SLEEP, req, DCA_RSA_ENC);
3842 break;
3843 default:
3844 cmn_err(CE_WARN, "dca_encrypt_atomic: unexpected mech type "
3845 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3846 error = CRYPTO_MECHANISM_INVALID;
3847 }
3848
3849 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
3850 ciphertext->cd_length = 0;
3851 }
3852
3853 DBG(softc, DENTRY, "dca_encrypt_atomic: done, err = 0x%x", error);
3854
3855 return (error);
3856 }
3857
3858 /* ARGSUSED */
3859 static int
dca_decrypt_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)3860 dca_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3861 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3862 crypto_req_handle_t req)
3863 {
3864 int error = CRYPTO_FAILED;
3865 dca_t *softc;
3866
3867 softc = DCA_SOFTC_FROM_CTX(ctx);
3868 DBG(softc, DENTRY, "dca_decrypt_init: started");
3869
3870 /* check mechanism */
3871 switch (mechanism->cm_type) {
3872 case DES_CBC_MECH_INFO_TYPE:
3873 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3874 DR_DECRYPT);
3875 break;
3876 case DES3_CBC_MECH_INFO_TYPE:
3877 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3878 DR_DECRYPT | DR_TRIPLE);
3879 break;
3880 case RSA_PKCS_MECH_INFO_TYPE:
3881 case RSA_X_509_MECH_INFO_TYPE:
3882 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3883 break;
3884 default:
3885 cmn_err(CE_WARN, "dca_decrypt_init: unexpected mech type "
3886 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3887 error = CRYPTO_MECHANISM_INVALID;
3888 }
3889
3890 DBG(softc, DENTRY, "dca_decrypt_init: done, err = 0x%x", error);
3891
3892 if (error == CRYPTO_SUCCESS)
3893 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3894 &softc->dca_ctx_list_lock);
3895
3896 return (error);
3897 }
3898
3899 /* ARGSUSED */
3900 static int
dca_decrypt(crypto_ctx_t * ctx,crypto_data_t * ciphertext,crypto_data_t * plaintext,crypto_req_handle_t req)3901 dca_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3902 crypto_data_t *plaintext, crypto_req_handle_t req)
3903 {
3904 int error = CRYPTO_FAILED;
3905 dca_t *softc;
3906
3907 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3908 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3909
3910 softc = DCA_SOFTC_FROM_CTX(ctx);
3911 DBG(softc, DENTRY, "dca_decrypt: started");
3912
3913 /* handle inplace ops */
3914 if (!plaintext) {
3915 dca_request_t *reqp = ctx->cc_provider_private;
3916 reqp->dr_flags |= DR_INPLACE;
3917 plaintext = ciphertext;
3918 }
3919
3920 /* check mechanism */
3921 switch (DCA_MECH_FROM_CTX(ctx)) {
3922 case DES_CBC_MECH_INFO_TYPE:
3923 error = dca_3des(ctx, ciphertext, plaintext, req, DR_DECRYPT);
3924 break;
3925 case DES3_CBC_MECH_INFO_TYPE:
3926 error = dca_3des(ctx, ciphertext, plaintext, req,
3927 DR_DECRYPT | DR_TRIPLE);
3928 break;
3929 case RSA_PKCS_MECH_INFO_TYPE:
3930 case RSA_X_509_MECH_INFO_TYPE:
3931 error = dca_rsastart(ctx, ciphertext, plaintext, req,
3932 DCA_RSA_DEC);
3933 break;
3934 default:
3935 /* Should never reach here */
3936 cmn_err(CE_WARN, "dca_decrypt: unexpected mech type "
3937 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3938 error = CRYPTO_MECHANISM_INVALID;
3939 }
3940
3941 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
3942 (error != CRYPTO_BUFFER_TOO_SMALL)) {
3943 if (plaintext)
3944 plaintext->cd_length = 0;
3945 }
3946
3947 DBG(softc, DENTRY, "dca_decrypt: done, err = 0x%x", error);
3948
3949 return (error);
3950 }
3951
3952 /* ARGSUSED */
3953 static int
dca_decrypt_update(crypto_ctx_t * ctx,crypto_data_t * ciphertext,crypto_data_t * plaintext,crypto_req_handle_t req)3954 dca_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3955 crypto_data_t *plaintext, crypto_req_handle_t req)
3956 {
3957 int error = CRYPTO_FAILED;
3958 dca_t *softc;
3959
3960 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3961 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3962
3963 softc = DCA_SOFTC_FROM_CTX(ctx);
3964 DBG(softc, DENTRY, "dca_decrypt_update: started");
3965
3966 /* handle inplace ops */
3967 if (!plaintext) {
3968 dca_request_t *reqp = ctx->cc_provider_private;
3969 reqp->dr_flags |= DR_INPLACE;
3970 plaintext = ciphertext;
3971 }
3972
3973 /* check mechanism */
3974 switch (DCA_MECH_FROM_CTX(ctx)) {
3975 case DES_CBC_MECH_INFO_TYPE:
3976 error = dca_3desupdate(ctx, ciphertext, plaintext, req,
3977 DR_DECRYPT);
3978 break;
3979 case DES3_CBC_MECH_INFO_TYPE:
3980 error = dca_3desupdate(ctx, ciphertext, plaintext, req,
3981 DR_DECRYPT | DR_TRIPLE);
3982 break;
3983 default:
3984 /* Should never reach here */
3985 cmn_err(CE_WARN, "dca_decrypt_update: unexpected mech type "
3986 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3987 error = CRYPTO_MECHANISM_INVALID;
3988 }
3989
3990 DBG(softc, DENTRY, "dca_decrypt_update: done, err = 0x%x", error);
3991
3992 return (error);
3993 }
3994
3995 /* ARGSUSED */
3996 static int
dca_decrypt_final(crypto_ctx_t * ctx,crypto_data_t * plaintext,crypto_req_handle_t req)3997 dca_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3998 crypto_req_handle_t req)
3999 {
4000 int error = CRYPTO_FAILED;
4001 dca_t *softc;
4002
4003 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4004 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4005
4006 softc = DCA_SOFTC_FROM_CTX(ctx);
4007 DBG(softc, DENTRY, "dca_decrypt_final: started");
4008
4009 /* check mechanism */
4010 switch (DCA_MECH_FROM_CTX(ctx)) {
4011 case DES_CBC_MECH_INFO_TYPE:
4012 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT);
4013 break;
4014 case DES3_CBC_MECH_INFO_TYPE:
4015 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT | DR_TRIPLE);
4016 break;
4017 default:
4018 /* Should never reach here */
4019 cmn_err(CE_WARN, "dca_decrypt_final: unexpected mech type "
4020 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4021 error = CRYPTO_MECHANISM_INVALID;
4022 }
4023
4024 DBG(softc, DENTRY, "dca_decrypt_final: done, err = 0x%x", error);
4025
4026 return (error);
4027 }
4028
4029 /* ARGSUSED */
4030 static int
dca_decrypt_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * ciphertext,crypto_data_t * plaintext,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4031 dca_decrypt_atomic(crypto_provider_handle_t provider,
4032 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4033 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
4034 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4035 {
4036 int error = CRYPTO_FAILED;
4037 dca_t *softc = (dca_t *)provider;
4038
4039 DBG(softc, DENTRY, "dca_decrypt_atomic: started");
4040
4041 if (ctx_template != NULL)
4042 return (CRYPTO_ARGUMENTS_BAD);
4043
4044 /* handle inplace ops */
4045 if (!plaintext) {
4046 plaintext = ciphertext;
4047 }
4048
4049 /* check mechanism */
4050 switch (mechanism->cm_type) {
4051 case DES_CBC_MECH_INFO_TYPE:
4052 error = dca_3desatomic(provider, session_id, mechanism, key,
4053 ciphertext, plaintext, KM_SLEEP, req,
4054 DR_DECRYPT | DR_ATOMIC);
4055 break;
4056 case DES3_CBC_MECH_INFO_TYPE:
4057 error = dca_3desatomic(provider, session_id, mechanism, key,
4058 ciphertext, plaintext, KM_SLEEP, req,
4059 DR_DECRYPT | DR_TRIPLE | DR_ATOMIC);
4060 break;
4061 case RSA_PKCS_MECH_INFO_TYPE:
4062 case RSA_X_509_MECH_INFO_TYPE:
4063 error = dca_rsaatomic(provider, session_id, mechanism, key,
4064 ciphertext, plaintext, KM_SLEEP, req, DCA_RSA_DEC);
4065 break;
4066 default:
4067 cmn_err(CE_WARN, "dca_decrypt_atomic: unexpected mech type "
4068 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4069 error = CRYPTO_MECHANISM_INVALID;
4070 }
4071
4072 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
4073 plaintext->cd_length = 0;
4074 }
4075
4076 DBG(softc, DENTRY, "dca_decrypt_atomic: done, err = 0x%x", error);
4077
4078 return (error);
4079 }
4080
4081 /*
4082 * Sign entry points.
4083 */
4084
4085 /* ARGSUSED */
4086 static int
dca_sign_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4087 dca_sign_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4088 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4089 crypto_req_handle_t req)
4090 {
4091 int error = CRYPTO_FAILED;
4092 dca_t *softc;
4093
4094 softc = DCA_SOFTC_FROM_CTX(ctx);
4095 DBG(softc, DENTRY, "dca_sign_init: started\n");
4096
4097 if (ctx_template != NULL)
4098 return (CRYPTO_ARGUMENTS_BAD);
4099
4100 /* check mechanism */
4101 switch (mechanism->cm_type) {
4102 case RSA_PKCS_MECH_INFO_TYPE:
4103 case RSA_X_509_MECH_INFO_TYPE:
4104 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4105 break;
4106 case DSA_MECH_INFO_TYPE:
4107 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4108 DCA_DSA_SIGN);
4109 break;
4110 default:
4111 cmn_err(CE_WARN, "dca_sign_init: unexpected mech type "
4112 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4113 error = CRYPTO_MECHANISM_INVALID;
4114 }
4115
4116 DBG(softc, DENTRY, "dca_sign_init: done, err = 0x%x", error);
4117
4118 if (error == CRYPTO_SUCCESS)
4119 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4120 &softc->dca_ctx_list_lock);
4121
4122 return (error);
4123 }
4124
4125 static int
dca_sign(crypto_ctx_t * ctx,crypto_data_t * data,crypto_data_t * signature,crypto_req_handle_t req)4126 dca_sign(crypto_ctx_t *ctx, crypto_data_t *data,
4127 crypto_data_t *signature, crypto_req_handle_t req)
4128 {
4129 int error = CRYPTO_FAILED;
4130 dca_t *softc;
4131
4132 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4133 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4134
4135 softc = DCA_SOFTC_FROM_CTX(ctx);
4136 DBG(softc, DENTRY, "dca_sign: started\n");
4137
4138 /* check mechanism */
4139 switch (DCA_MECH_FROM_CTX(ctx)) {
4140 case RSA_PKCS_MECH_INFO_TYPE:
4141 case RSA_X_509_MECH_INFO_TYPE:
4142 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGN);
4143 break;
4144 case DSA_MECH_INFO_TYPE:
4145 error = dca_dsa_sign(ctx, data, signature, req);
4146 break;
4147 default:
4148 cmn_err(CE_WARN, "dca_sign: unexpected mech type "
4149 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4150 error = CRYPTO_MECHANISM_INVALID;
4151 }
4152
4153 DBG(softc, DENTRY, "dca_sign: done, err = 0x%x", error);
4154
4155 return (error);
4156 }
4157
4158 /* ARGSUSED */
4159 static int
dca_sign_update(crypto_ctx_t * ctx,crypto_data_t * data,crypto_req_handle_t req)4160 dca_sign_update(crypto_ctx_t *ctx, crypto_data_t *data,
4161 crypto_req_handle_t req)
4162 {
4163 int error = CRYPTO_MECHANISM_INVALID;
4164 dca_t *softc;
4165
4166 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4167 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4168
4169 softc = DCA_SOFTC_FROM_CTX(ctx);
4170 DBG(softc, DENTRY, "dca_sign_update: started\n");
4171
4172 cmn_err(CE_WARN, "dca_sign_update: unexpected mech type "
4173 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4174
4175 DBG(softc, DENTRY, "dca_sign_update: done, err = 0x%x", error);
4176
4177 return (error);
4178 }
4179
4180 /* ARGSUSED */
4181 static int
dca_sign_final(crypto_ctx_t * ctx,crypto_data_t * signature,crypto_req_handle_t req)4182 dca_sign_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4183 crypto_req_handle_t req)
4184 {
4185 int error = CRYPTO_MECHANISM_INVALID;
4186 dca_t *softc;
4187
4188 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4189 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4190
4191 softc = DCA_SOFTC_FROM_CTX(ctx);
4192 DBG(softc, DENTRY, "dca_sign_final: started\n");
4193
4194 cmn_err(CE_WARN, "dca_sign_final: unexpected mech type "
4195 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4196
4197 DBG(softc, DENTRY, "dca_sign_final: done, err = 0x%x", error);
4198
4199 return (error);
4200 }
4201
4202 static int
dca_sign_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * signature,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4203 dca_sign_atomic(crypto_provider_handle_t provider,
4204 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4205 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4206 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4207 {
4208 int error = CRYPTO_FAILED;
4209 dca_t *softc = (dca_t *)provider;
4210
4211 DBG(softc, DENTRY, "dca_sign_atomic: started\n");
4212
4213 if (ctx_template != NULL)
4214 return (CRYPTO_ARGUMENTS_BAD);
4215
4216 /* check mechanism */
4217 switch (mechanism->cm_type) {
4218 case RSA_PKCS_MECH_INFO_TYPE:
4219 case RSA_X_509_MECH_INFO_TYPE:
4220 error = dca_rsaatomic(provider, session_id, mechanism, key,
4221 data, signature, KM_SLEEP, req, DCA_RSA_SIGN);
4222 break;
4223 case DSA_MECH_INFO_TYPE:
4224 error = dca_dsaatomic(provider, session_id, mechanism, key,
4225 data, signature, KM_SLEEP, req, DCA_DSA_SIGN);
4226 break;
4227 default:
4228 cmn_err(CE_WARN, "dca_sign_atomic: unexpected mech type "
4229 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4230 error = CRYPTO_MECHANISM_INVALID;
4231 }
4232
4233 DBG(softc, DENTRY, "dca_sign_atomic: done, err = 0x%x", error);
4234
4235 return (error);
4236 }
4237
4238 /* ARGSUSED */
4239 static int
dca_sign_recover_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4240 dca_sign_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4241 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4242 crypto_req_handle_t req)
4243 {
4244 int error = CRYPTO_FAILED;
4245 dca_t *softc;
4246
4247 softc = DCA_SOFTC_FROM_CTX(ctx);
4248 DBG(softc, DENTRY, "dca_sign_recover_init: started\n");
4249
4250 if (ctx_template != NULL)
4251 return (CRYPTO_ARGUMENTS_BAD);
4252
4253 /* check mechanism */
4254 switch (mechanism->cm_type) {
4255 case RSA_PKCS_MECH_INFO_TYPE:
4256 case RSA_X_509_MECH_INFO_TYPE:
4257 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4258 break;
4259 default:
4260 cmn_err(CE_WARN, "dca_sign_recover_init: unexpected mech type "
4261 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4262 error = CRYPTO_MECHANISM_INVALID;
4263 }
4264
4265 DBG(softc, DENTRY, "dca_sign_recover_init: done, err = 0x%x", error);
4266
4267 if (error == CRYPTO_SUCCESS)
4268 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4269 &softc->dca_ctx_list_lock);
4270
4271 return (error);
4272 }
4273
4274 static int
dca_sign_recover(crypto_ctx_t * ctx,crypto_data_t * data,crypto_data_t * signature,crypto_req_handle_t req)4275 dca_sign_recover(crypto_ctx_t *ctx, crypto_data_t *data,
4276 crypto_data_t *signature, crypto_req_handle_t req)
4277 {
4278 int error = CRYPTO_FAILED;
4279 dca_t *softc;
4280
4281 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4282 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4283
4284 softc = DCA_SOFTC_FROM_CTX(ctx);
4285 DBG(softc, DENTRY, "dca_sign_recover: started\n");
4286
4287 /* check mechanism */
4288 switch (DCA_MECH_FROM_CTX(ctx)) {
4289 case RSA_PKCS_MECH_INFO_TYPE:
4290 case RSA_X_509_MECH_INFO_TYPE:
4291 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGNR);
4292 break;
4293 default:
4294 cmn_err(CE_WARN, "dca_sign_recover: unexpected mech type "
4295 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4296 error = CRYPTO_MECHANISM_INVALID;
4297 }
4298
4299 DBG(softc, DENTRY, "dca_sign_recover: done, err = 0x%x", error);
4300
4301 return (error);
4302 }
4303
4304 static int
dca_sign_recover_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * signature,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4305 dca_sign_recover_atomic(crypto_provider_handle_t provider,
4306 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4307 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4308 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4309 {
4310 int error = CRYPTO_FAILED;
4311 dca_t *softc = (dca_t *)provider;
4312
4313 DBG(softc, DENTRY, "dca_sign_recover_atomic: started\n");
4314
4315 if (ctx_template != NULL)
4316 return (CRYPTO_ARGUMENTS_BAD);
4317
4318 /* check mechanism */
4319 switch (mechanism->cm_type) {
4320 case RSA_PKCS_MECH_INFO_TYPE:
4321 case RSA_X_509_MECH_INFO_TYPE:
4322 error = dca_rsaatomic(provider, session_id, mechanism, key,
4323 data, signature, KM_SLEEP, req, DCA_RSA_SIGNR);
4324 break;
4325 default:
4326 cmn_err(CE_WARN, "dca_sign_recover_atomic: unexpected mech type"
4327 " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4328 error = CRYPTO_MECHANISM_INVALID;
4329 }
4330
4331 DBG(softc, DENTRY, "dca_sign_recover_atomic: done, err = 0x%x", error);
4332
4333 return (error);
4334 }
4335
4336 /*
4337 * Verify entry points.
4338 */
4339
4340 /* ARGSUSED */
4341 static int
dca_verify_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4342 dca_verify_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4343 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4344 crypto_req_handle_t req)
4345 {
4346 int error = CRYPTO_FAILED;
4347 dca_t *softc;
4348
4349 softc = DCA_SOFTC_FROM_CTX(ctx);
4350 DBG(softc, DENTRY, "dca_verify_init: started\n");
4351
4352 if (ctx_template != NULL)
4353 return (CRYPTO_ARGUMENTS_BAD);
4354
4355 /* check mechanism */
4356 switch (mechanism->cm_type) {
4357 case RSA_PKCS_MECH_INFO_TYPE:
4358 case RSA_X_509_MECH_INFO_TYPE:
4359 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4360 break;
4361 case DSA_MECH_INFO_TYPE:
4362 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4363 DCA_DSA_VRFY);
4364 break;
4365 default:
4366 cmn_err(CE_WARN, "dca_verify_init: unexpected mech type "
4367 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4368 error = CRYPTO_MECHANISM_INVALID;
4369 }
4370
4371 DBG(softc, DENTRY, "dca_verify_init: done, err = 0x%x", error);
4372
4373 if (error == CRYPTO_SUCCESS)
4374 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4375 &softc->dca_ctx_list_lock);
4376
4377 return (error);
4378 }
4379
4380 static int
dca_verify(crypto_ctx_t * ctx,crypto_data_t * data,crypto_data_t * signature,crypto_req_handle_t req)4381 dca_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *signature,
4382 crypto_req_handle_t req)
4383 {
4384 int error = CRYPTO_FAILED;
4385 dca_t *softc;
4386
4387 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4388 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4389
4390 softc = DCA_SOFTC_FROM_CTX(ctx);
4391 DBG(softc, DENTRY, "dca_verify: started\n");
4392
4393 /* check mechanism */
4394 switch (DCA_MECH_FROM_CTX(ctx)) {
4395 case RSA_PKCS_MECH_INFO_TYPE:
4396 case RSA_X_509_MECH_INFO_TYPE:
4397 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFY);
4398 break;
4399 case DSA_MECH_INFO_TYPE:
4400 error = dca_dsa_verify(ctx, data, signature, req);
4401 break;
4402 default:
4403 cmn_err(CE_WARN, "dca_verify: unexpected mech type "
4404 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4405 error = CRYPTO_MECHANISM_INVALID;
4406 }
4407
4408 DBG(softc, DENTRY, "dca_verify: done, err = 0x%x", error);
4409
4410 return (error);
4411 }
4412
4413 /* ARGSUSED */
4414 static int
dca_verify_update(crypto_ctx_t * ctx,crypto_data_t * data,crypto_req_handle_t req)4415 dca_verify_update(crypto_ctx_t *ctx, crypto_data_t *data,
4416 crypto_req_handle_t req)
4417 {
4418 int error = CRYPTO_MECHANISM_INVALID;
4419 dca_t *softc;
4420
4421 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4422 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4423
4424 softc = DCA_SOFTC_FROM_CTX(ctx);
4425 DBG(softc, DENTRY, "dca_verify_update: started\n");
4426
4427 cmn_err(CE_WARN, "dca_verify_update: unexpected mech type "
4428 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4429
4430 DBG(softc, DENTRY, "dca_verify_update: done, err = 0x%x", error);
4431
4432 return (error);
4433 }
4434
4435 /* ARGSUSED */
4436 static int
dca_verify_final(crypto_ctx_t * ctx,crypto_data_t * signature,crypto_req_handle_t req)4437 dca_verify_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4438 crypto_req_handle_t req)
4439 {
4440 int error = CRYPTO_MECHANISM_INVALID;
4441 dca_t *softc;
4442
4443 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4444 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4445
4446 softc = DCA_SOFTC_FROM_CTX(ctx);
4447 DBG(softc, DENTRY, "dca_verify_final: started\n");
4448
4449 cmn_err(CE_WARN, "dca_verify_final: unexpected mech type "
4450 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4451
4452 DBG(softc, DENTRY, "dca_verify_final: done, err = 0x%x", error);
4453
4454 return (error);
4455 }
4456
4457 static int
dca_verify_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * signature,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4458 dca_verify_atomic(crypto_provider_handle_t provider,
4459 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4460 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4461 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4462 {
4463 int error = CRYPTO_FAILED;
4464 dca_t *softc = (dca_t *)provider;
4465
4466 DBG(softc, DENTRY, "dca_verify_atomic: started\n");
4467
4468 if (ctx_template != NULL)
4469 return (CRYPTO_ARGUMENTS_BAD);
4470
4471 /* check mechanism */
4472 switch (mechanism->cm_type) {
4473 case RSA_PKCS_MECH_INFO_TYPE:
4474 case RSA_X_509_MECH_INFO_TYPE:
4475 error = dca_rsaatomic(provider, session_id, mechanism, key,
4476 signature, data, KM_SLEEP, req, DCA_RSA_VRFY);
4477 break;
4478 case DSA_MECH_INFO_TYPE:
4479 error = dca_dsaatomic(provider, session_id, mechanism, key,
4480 data, signature, KM_SLEEP, req, DCA_DSA_VRFY);
4481 break;
4482 default:
4483 cmn_err(CE_WARN, "dca_verify_atomic: unexpected mech type "
4484 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4485 error = CRYPTO_MECHANISM_INVALID;
4486 }
4487
4488 DBG(softc, DENTRY, "dca_verify_atomic: done, err = 0x%x", error);
4489
4490 return (error);
4491 }
4492
4493 /* ARGSUSED */
4494 static int
dca_verify_recover_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4495 dca_verify_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4496 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4497 crypto_req_handle_t req)
4498 {
4499 int error = CRYPTO_MECHANISM_INVALID;
4500 dca_t *softc;
4501
4502 softc = DCA_SOFTC_FROM_CTX(ctx);
4503 DBG(softc, DENTRY, "dca_verify_recover_init: started\n");
4504
4505 if (ctx_template != NULL)
4506 return (CRYPTO_ARGUMENTS_BAD);
4507
4508 /* check mechanism */
4509 switch (mechanism->cm_type) {
4510 case RSA_PKCS_MECH_INFO_TYPE:
4511 case RSA_X_509_MECH_INFO_TYPE:
4512 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4513 break;
4514 default:
4515 cmn_err(CE_WARN, "dca_verify_recover_init: unexpected mech type"
4516 " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4517 }
4518
4519 DBG(softc, DENTRY, "dca_verify_recover_init: done, err = 0x%x", error);
4520
4521 if (error == CRYPTO_SUCCESS)
4522 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4523 &softc->dca_ctx_list_lock);
4524
4525 return (error);
4526 }
4527
4528 static int
dca_verify_recover(crypto_ctx_t * ctx,crypto_data_t * signature,crypto_data_t * data,crypto_req_handle_t req)4529 dca_verify_recover(crypto_ctx_t *ctx, crypto_data_t *signature,
4530 crypto_data_t *data, crypto_req_handle_t req)
4531 {
4532 int error = CRYPTO_MECHANISM_INVALID;
4533 dca_t *softc;
4534
4535 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4536 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4537
4538 softc = DCA_SOFTC_FROM_CTX(ctx);
4539 DBG(softc, DENTRY, "dca_verify_recover: started\n");
4540
4541 /* check mechanism */
4542 switch (DCA_MECH_FROM_CTX(ctx)) {
4543 case RSA_PKCS_MECH_INFO_TYPE:
4544 case RSA_X_509_MECH_INFO_TYPE:
4545 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFYR);
4546 break;
4547 default:
4548 cmn_err(CE_WARN, "dca_verify_recover: unexpected mech type "
4549 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4550 }
4551
4552 DBG(softc, DENTRY, "dca_verify_recover: done, err = 0x%x", error);
4553
4554 return (error);
4555 }
4556
4557 static int
dca_verify_recover_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * signature,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)4558 dca_verify_recover_atomic(crypto_provider_handle_t provider,
4559 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4560 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4561 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4562 {
4563 int error = CRYPTO_MECHANISM_INVALID;
4564 dca_t *softc = (dca_t *)provider;
4565
4566 DBG(softc, DENTRY, "dca_verify_recover_atomic: started\n");
4567
4568 if (ctx_template != NULL)
4569 return (CRYPTO_ARGUMENTS_BAD);
4570
4571 /* check mechanism */
4572 switch (mechanism->cm_type) {
4573 case RSA_PKCS_MECH_INFO_TYPE:
4574 case RSA_X_509_MECH_INFO_TYPE:
4575 error = dca_rsaatomic(provider, session_id, mechanism, key,
4576 signature, data, KM_SLEEP, req, DCA_RSA_VRFYR);
4577 break;
4578 default:
4579 cmn_err(CE_WARN, "dca_verify_recover_atomic: unexpected mech "
4580 "type 0x%llx\n", (unsigned long long)mechanism->cm_type);
4581 error = CRYPTO_MECHANISM_INVALID;
4582 }
4583
4584 DBG(softc, DENTRY,
4585 "dca_verify_recover_atomic: done, err = 0x%x", error);
4586
4587 return (error);
4588 }
4589
4590 /*
4591 * Random number entry points.
4592 */
4593
4594 /* ARGSUSED */
4595 static int
dca_generate_random(crypto_provider_handle_t provider,crypto_session_id_t session_id,uchar_t * buf,size_t len,crypto_req_handle_t req)4596 dca_generate_random(crypto_provider_handle_t provider,
4597 crypto_session_id_t session_id,
4598 uchar_t *buf, size_t len, crypto_req_handle_t req)
4599 {
4600 int error = CRYPTO_FAILED;
4601 dca_t *softc = (dca_t *)provider;
4602
4603 DBG(softc, DENTRY, "dca_generate_random: started");
4604
4605 error = dca_rng(softc, buf, len, req);
4606
4607 DBG(softc, DENTRY, "dca_generate_random: done, err = 0x%x", error);
4608
4609 return (error);
4610 }
4611
4612 /*
4613 * Context management entry points.
4614 */
4615
4616 int
dca_free_context(crypto_ctx_t * ctx)4617 dca_free_context(crypto_ctx_t *ctx)
4618 {
4619 int error = CRYPTO_SUCCESS;
4620 dca_t *softc;
4621
4622 softc = DCA_SOFTC_FROM_CTX(ctx);
4623 DBG(softc, DENTRY, "dca_free_context: entered");
4624
4625 if (ctx->cc_provider_private == NULL)
4626 return (error);
4627
4628 dca_rmlist2(ctx->cc_provider_private, &softc->dca_ctx_list_lock);
4629
4630 error = dca_free_context_low(ctx);
4631
4632 DBG(softc, DENTRY, "dca_free_context: done, err = 0x%x", error);
4633
4634 return (error);
4635 }
4636
4637 static int
dca_free_context_low(crypto_ctx_t * ctx)4638 dca_free_context_low(crypto_ctx_t *ctx)
4639 {
4640 int error = CRYPTO_SUCCESS;
4641
4642 /* check mechanism */
4643 switch (DCA_MECH_FROM_CTX(ctx)) {
4644 case DES_CBC_MECH_INFO_TYPE:
4645 case DES3_CBC_MECH_INFO_TYPE:
4646 dca_3desctxfree(ctx);
4647 break;
4648 case RSA_PKCS_MECH_INFO_TYPE:
4649 case RSA_X_509_MECH_INFO_TYPE:
4650 dca_rsactxfree(ctx);
4651 break;
4652 case DSA_MECH_INFO_TYPE:
4653 dca_dsactxfree(ctx);
4654 break;
4655 default:
4656 /* Should never reach here */
4657 cmn_err(CE_WARN, "dca_free_context_low: unexpected mech type "
4658 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4659 error = CRYPTO_MECHANISM_INVALID;
4660 }
4661
4662 return (error);
4663 }
4664
4665
4666 /* Free any unfreed private context. It is called in detach. */
4667 static void
dca_free_context_list(dca_t * dca)4668 dca_free_context_list(dca_t *dca)
4669 {
4670 dca_listnode_t *node;
4671 crypto_ctx_t ctx;
4672
4673 (void) memset(&ctx, 0, sizeof (ctx));
4674 ctx.cc_provider = dca;
4675
4676 while ((node = dca_delist2(&dca->dca_ctx_list,
4677 &dca->dca_ctx_list_lock)) != NULL) {
4678 ctx.cc_provider_private = node;
4679 (void) dca_free_context_low(&ctx);
4680 }
4681 }
4682
4683 static int
ext_info_sym(crypto_provider_handle_t prov,crypto_provider_ext_info_t * ext_info,crypto_req_handle_t cfreq)4684 ext_info_sym(crypto_provider_handle_t prov,
4685 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4686 {
4687 return (ext_info_base(prov, ext_info, cfreq, IDENT_SYM));
4688 }
4689
4690 static int
ext_info_asym(crypto_provider_handle_t prov,crypto_provider_ext_info_t * ext_info,crypto_req_handle_t cfreq)4691 ext_info_asym(crypto_provider_handle_t prov,
4692 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4693 {
4694 int rv;
4695
4696 rv = ext_info_base(prov, ext_info, cfreq, IDENT_ASYM);
4697 /* The asymmetric cipher slot supports random */
4698 ext_info->ei_flags |= CRYPTO_EXTF_RNG;
4699
4700 return (rv);
4701 }
4702
4703 /* ARGSUSED */
4704 static int
ext_info_base(crypto_provider_handle_t prov,crypto_provider_ext_info_t * ext_info,crypto_req_handle_t cfreq,char * id)4705 ext_info_base(crypto_provider_handle_t prov,
4706 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id)
4707 {
4708 dca_t *dca = (dca_t *)prov;
4709 int len;
4710
4711 /* Label */
4712 (void) sprintf((char *)ext_info->ei_label, "%s/%d %s",
4713 ddi_driver_name(dca->dca_dip), ddi_get_instance(dca->dca_dip), id);
4714 len = strlen((char *)ext_info->ei_label);
4715 (void) memset(ext_info->ei_label + len, ' ',
4716 CRYPTO_EXT_SIZE_LABEL - len);
4717
4718 /* Manufacturer ID */
4719 (void) sprintf((char *)ext_info->ei_manufacturerID, "%s",
4720 DCA_MANUFACTURER_ID);
4721 len = strlen((char *)ext_info->ei_manufacturerID);
4722 (void) memset(ext_info->ei_manufacturerID + len, ' ',
4723 CRYPTO_EXT_SIZE_MANUF - len);
4724
4725 /* Model */
4726 (void) sprintf((char *)ext_info->ei_model, dca->dca_model);
4727
4728 DBG(dca, DWARN, "kCF MODEL: %s", (char *)ext_info->ei_model);
4729
4730 len = strlen((char *)ext_info->ei_model);
4731 (void) memset(ext_info->ei_model + len, ' ',
4732 CRYPTO_EXT_SIZE_MODEL - len);
4733
4734 /* Serial Number. Blank for Deimos */
4735 (void) memset(ext_info->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL);
4736
4737 ext_info->ei_flags = CRYPTO_EXTF_WRITE_PROTECTED;
4738
4739 ext_info->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO;
4740 ext_info->ei_max_pin_len = CRYPTO_UNAVAILABLE_INFO;
4741 ext_info->ei_min_pin_len = CRYPTO_UNAVAILABLE_INFO;
4742 ext_info->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO;
4743 ext_info->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO;
4744 ext_info->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO;
4745 ext_info->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO;
4746 ext_info->ei_hardware_version.cv_major = 0;
4747 ext_info->ei_hardware_version.cv_minor = 0;
4748 ext_info->ei_firmware_version.cv_major = 0;
4749 ext_info->ei_firmware_version.cv_minor = 0;
4750
4751 /* Time. No need to be supplied for token without a clock */
4752 ext_info->ei_time[0] = '\000';
4753
4754 return (CRYPTO_SUCCESS);
4755 }
4756
4757 static void
dca_fma_init(dca_t * dca)4758 dca_fma_init(dca_t *dca)
4759 {
4760 ddi_iblock_cookie_t fm_ibc;
4761 int fm_capabilities = DDI_FM_EREPORT_CAPABLE |
4762 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
4763 DDI_FM_ERRCB_CAPABLE;
4764
4765 /* Read FMA capabilities from dca.conf file (if present) */
4766 dca->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip,
4767 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
4768 fm_capabilities);
4769
4770 DBG(dca, DWARN, "dca->fm_capabilities = 0x%x", dca->fm_capabilities);
4771
4772 /* Only register with IO Fault Services if we have some capability */
4773 if (dca->fm_capabilities) {
4774 dca_regsattr.devacc_attr_access = DDI_FLAGERR_ACC;
4775 dca_dmaattr.dma_attr_flags = DDI_DMA_FLAGERR;
4776
4777 /* Register capabilities with IO Fault Services */
4778 ddi_fm_init(dca->dca_dip, &dca->fm_capabilities, &fm_ibc);
4779 DBG(dca, DWARN, "fm_capable() = 0x%x",
4780 ddi_fm_capable(dca->dca_dip));
4781
4782 /*
4783 * Initialize pci ereport capabilities if ereport capable
4784 */
4785 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4786 DDI_FM_ERRCB_CAP(dca->fm_capabilities))
4787 pci_ereport_setup(dca->dca_dip);
4788
4789 /*
4790 * Initialize callback mutex and register error callback if
4791 * error callback capable.
4792 */
4793 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4794 ddi_fm_handler_register(dca->dca_dip, dca_fm_error_cb,
4795 (void *)dca);
4796 }
4797 } else {
4798 /*
4799 * These fields have to be cleared of FMA if there are no
4800 * FMA capabilities at runtime.
4801 */
4802 dca_regsattr.devacc_attr_access = DDI_DEFAULT_ACC;
4803 dca_dmaattr.dma_attr_flags = 0;
4804 }
4805 }
4806
4807
4808 static void
dca_fma_fini(dca_t * dca)4809 dca_fma_fini(dca_t *dca)
4810 {
4811 /* Only unregister FMA capabilities if we registered some */
4812 if (dca->fm_capabilities) {
4813
4814 /*
4815 * Release any resources allocated by pci_ereport_setup()
4816 */
4817 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4818 DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4819 pci_ereport_teardown(dca->dca_dip);
4820 }
4821
4822 /*
4823 * Free callback mutex and un-register error callback if
4824 * error callback capable.
4825 */
4826 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4827 ddi_fm_handler_unregister(dca->dca_dip);
4828 }
4829
4830 /* Unregister from IO Fault Services */
4831 ddi_fm_fini(dca->dca_dip);
4832 DBG(dca, DWARN, "fm_capable() = 0x%x",
4833 ddi_fm_capable(dca->dca_dip));
4834 }
4835 }
4836
4837
4838 /*
4839 * The IO fault service error handling callback function
4840 */
4841 /*ARGSUSED*/
4842 static int
dca_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)4843 dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4844 {
4845 dca_t *dca = (dca_t *)impl_data;
4846
4847 pci_ereport_post(dip, err, NULL);
4848 if (err->fme_status == DDI_FM_FATAL) {
4849 dca_failure(dca, DDI_DATAPATH_FAULT,
4850 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
4851 "fault PCI in FMA callback.");
4852 }
4853 return (err->fme_status);
4854 }
4855
4856
4857 static int
dca_check_acc_handle(dca_t * dca,ddi_acc_handle_t handle,dca_fma_eclass_t eclass_index)4858 dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
4859 dca_fma_eclass_t eclass_index)
4860 {
4861 ddi_fm_error_t de;
4862 int version = 0;
4863
4864 ddi_fm_acc_err_get(handle, &de, version);
4865 if (de.fme_status != DDI_FM_OK) {
4866 dca_failure(dca, DDI_DATAPATH_FAULT,
4867 eclass_index, fm_ena_increment(de.fme_ena),
4868 CRYPTO_DEVICE_ERROR, "");
4869 return (DDI_FAILURE);
4870 }
4871
4872 return (DDI_SUCCESS);
4873 }
4874
4875 int
dca_check_dma_handle(dca_t * dca,ddi_dma_handle_t handle,dca_fma_eclass_t eclass_index)4876 dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle,
4877 dca_fma_eclass_t eclass_index)
4878 {
4879 ddi_fm_error_t de;
4880 int version = 0;
4881
4882 ddi_fm_dma_err_get(handle, &de, version);
4883 if (de.fme_status != DDI_FM_OK) {
4884 dca_failure(dca, DDI_DATAPATH_FAULT,
4885 eclass_index, fm_ena_increment(de.fme_ena),
4886 CRYPTO_DEVICE_ERROR, "");
4887 return (DDI_FAILURE);
4888 }
4889 return (DDI_SUCCESS);
4890 }
4891
4892 static uint64_t
dca_ena(uint64_t ena)4893 dca_ena(uint64_t ena)
4894 {
4895 if (ena == 0)
4896 ena = fm_ena_generate(0, FM_ENA_FMT1);
4897 else
4898 ena = fm_ena_increment(ena);
4899 return (ena);
4900 }
4901
4902 static char *
dca_fma_eclass_string(char * model,dca_fma_eclass_t index)4903 dca_fma_eclass_string(char *model, dca_fma_eclass_t index)
4904 {
4905 if (strstr(model, "500"))
4906 return (dca_fma_eclass_sca500[index]);
4907 else
4908 return (dca_fma_eclass_sca1000[index]);
4909 }
4910