1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 28 /* 29 * Deimos - cryptographic acceleration based upon Broadcom 582x. 30 */ 31 32 #include <sys/types.h> 33 #include <sys/modctl.h> 34 #include <sys/conf.h> 35 #include <sys/devops.h> 36 #include <sys/ddi.h> 37 #include <sys/sunddi.h> 38 #include <sys/cmn_err.h> 39 #include <sys/varargs.h> 40 #include <sys/file.h> 41 #include <sys/stat.h> 42 #include <sys/kmem.h> 43 #include <sys/ioccom.h> 44 #include <sys/open.h> 45 #include <sys/cred.h> 46 #include <sys/kstat.h> 47 #include <sys/strsun.h> 48 #include <sys/note.h> 49 #include <sys/crypto/common.h> 50 #include <sys/crypto/spi.h> 51 #include <sys/ddifm.h> 52 #include <sys/fm/protocol.h> 53 #include <sys/fm/util.h> 54 #include <sys/fm/io/ddi.h> 55 #include <sys/crypto/dca.h> 56 57 /* 58 * Core Deimos driver. 59 */ 60 61 static void dca_enlist2(dca_listnode_t *, dca_listnode_t *, 62 kmutex_t *); 63 static void dca_rmlist2(dca_listnode_t *node, kmutex_t *); 64 static dca_listnode_t *dca_delist2(dca_listnode_t *q, kmutex_t *); 65 static void dca_free_context_list(dca_t *dca); 66 static int dca_free_context_low(crypto_ctx_t *ctx); 67 static int dca_attach(dev_info_t *, ddi_attach_cmd_t); 68 static int dca_detach(dev_info_t *, ddi_detach_cmd_t); 69 static int dca_suspend(dca_t *); 70 static int dca_resume(dca_t *); 71 static int dca_init(dca_t *); 72 static int dca_reset(dca_t *, int); 73 static int dca_initworklist(dca_t *, dca_worklist_t *); 74 static void dca_uninit(dca_t *); 75 static void dca_initq(dca_listnode_t *); 76 static void dca_enqueue(dca_listnode_t *, dca_listnode_t *); 77 static dca_listnode_t *dca_dequeue(dca_listnode_t *); 78 static dca_listnode_t *dca_unqueue(dca_listnode_t *); 79 static dca_request_t *dca_newreq(dca_t *); 80 static dca_work_t *dca_getwork(dca_t *, int); 81 static void dca_freework(dca_work_t *); 82 static dca_work_t *dca_newwork(dca_t *); 83 static void dca_destroywork(dca_work_t *); 84 static void dca_schedule(dca_t *, int); 85 static void dca_reclaim(dca_t *, int); 86 static uint_t dca_intr(char *); 87 static void dca_failure(dca_t *, ddi_fault_location_t, 88 dca_fma_eclass_t index, uint64_t, int, char *, ...); 89 static void dca_jobtimeout(void *); 90 static int dca_drain(dca_t *); 91 static void dca_undrain(dca_t *); 92 static void dca_rejectjobs(dca_t *); 93 94 #ifdef SCHEDDELAY 95 static void dca_schedtimeout(void *); 96 #endif 97 98 /* 99 * We want these inlined for performance. 100 */ 101 #ifndef DEBUG 102 #pragma inline(dca_freereq, dca_getreq, dca_freework, dca_getwork) 103 #pragma inline(dca_enqueue, dca_dequeue, dca_rmqueue, dca_done) 104 #pragma inline(dca_reverse, dca_length) 105 #endif 106 107 /* 108 * Device operations. 109 */ 110 static struct dev_ops devops = { 111 DEVO_REV, /* devo_rev */ 112 0, /* devo_refcnt */ 113 nodev, /* devo_getinfo */ 114 nulldev, /* devo_identify */ 115 nulldev, /* devo_probe */ 116 dca_attach, /* devo_attach */ 117 dca_detach, /* devo_detach */ 118 nodev, /* devo_reset */ 119 NULL, /* devo_cb_ops */ 120 NULL, /* devo_bus_ops */ 121 ddi_power, /* devo_power */ 122 ddi_quiesce_not_supported, /* devo_quiesce */ 123 }; 124 125 #define IDENT "PCI Crypto Accelerator" 126 #define IDENT_SYM "Crypto Accel Sym 2.0" 127 #define IDENT_ASYM "Crypto Accel Asym 2.0" 128 129 /* Space-padded, will be filled in dynamically during registration */ 130 #define IDENT3 "PCI Crypto Accelerator Mod 2.0" 131 132 #define VENDOR "Sun Microsystems, Inc." 133 134 #define STALETIME (30 * SECOND) 135 136 #define crypto_prov_notify crypto_provider_notification 137 /* A 28 char function name doesn't leave much line space */ 138 139 /* 140 * Module linkage. 141 */ 142 static struct modldrv modldrv = { 143 &mod_driverops, /* drv_modops */ 144 IDENT, /* drv_linkinfo */ 145 &devops, /* drv_dev_ops */ 146 }; 147 148 extern struct mod_ops mod_cryptoops; 149 150 static struct modlcrypto modlcrypto = { 151 &mod_cryptoops, 152 IDENT3 153 }; 154 155 static struct modlinkage modlinkage = { 156 MODREV_1, /* ml_rev */ 157 &modldrv, /* ml_linkage */ 158 &modlcrypto, 159 NULL 160 }; 161 162 /* 163 * CSPI information (entry points, provider info, etc.) 164 */ 165 166 /* Mechanisms for the symmetric cipher provider */ 167 static crypto_mech_info_t dca_mech_info_tab1[] = { 168 /* DES-CBC */ 169 {SUN_CKM_DES_CBC, DES_CBC_MECH_INFO_TYPE, 170 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | 171 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC, 172 DES_KEY_LEN, DES_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, 173 /* 3DES-CBC */ 174 {SUN_CKM_DES3_CBC, DES3_CBC_MECH_INFO_TYPE, 175 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | 176 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC, 177 DES3_MIN_KEY_LEN, DES3_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES} 178 }; 179 180 /* Mechanisms for the asymmetric cipher provider */ 181 static crypto_mech_info_t dca_mech_info_tab2[] = { 182 /* DSA */ 183 {SUN_CKM_DSA, DSA_MECH_INFO_TYPE, 184 CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY | 185 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_VERIFY_ATOMIC, 186 DSA_MIN_KEY_LEN * 8, DSA_MAX_KEY_LEN * 8, 187 CRYPTO_KEYSIZE_UNIT_IN_BITS}, 188 189 /* RSA */ 190 {SUN_CKM_RSA_X_509, RSA_X_509_MECH_INFO_TYPE, 191 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN | 192 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY | 193 CRYPTO_FG_VERIFY_RECOVER | 194 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC | 195 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC | 196 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC, 197 RSA_MIN_KEY_LEN * 8, RSA_MAX_KEY_LEN * 8, 198 CRYPTO_KEYSIZE_UNIT_IN_BITS}, 199 {SUN_CKM_RSA_PKCS, RSA_PKCS_MECH_INFO_TYPE, 200 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN | 201 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY | 202 CRYPTO_FG_VERIFY_RECOVER | 203 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC | 204 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC | 205 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC, 206 RSA_MIN_KEY_LEN * 8, RSA_MAX_KEY_LEN * 8, 207 CRYPTO_KEYSIZE_UNIT_IN_BITS} 208 }; 209 210 static void dca_provider_status(crypto_provider_handle_t, uint_t *); 211 212 static crypto_control_ops_t dca_control_ops = { 213 dca_provider_status 214 }; 215 216 static int dca_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *, 217 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 218 static int dca_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 219 crypto_req_handle_t); 220 static int dca_encrypt_update(crypto_ctx_t *, crypto_data_t *, 221 crypto_data_t *, crypto_req_handle_t); 222 static int dca_encrypt_final(crypto_ctx_t *, crypto_data_t *, 223 crypto_req_handle_t); 224 static int dca_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 225 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 226 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 227 228 static int dca_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *, 229 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 230 static int dca_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 231 crypto_req_handle_t); 232 static int dca_decrypt_update(crypto_ctx_t *, crypto_data_t *, 233 crypto_data_t *, crypto_req_handle_t); 234 static int dca_decrypt_final(crypto_ctx_t *, crypto_data_t *, 235 crypto_req_handle_t); 236 static int dca_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 237 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 238 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 239 240 static crypto_cipher_ops_t dca_cipher_ops = { 241 dca_encrypt_init, 242 dca_encrypt, 243 dca_encrypt_update, 244 dca_encrypt_final, 245 dca_encrypt_atomic, 246 dca_decrypt_init, 247 dca_decrypt, 248 dca_decrypt_update, 249 dca_decrypt_final, 250 dca_decrypt_atomic 251 }; 252 253 static int dca_sign_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, 254 crypto_spi_ctx_template_t, crypto_req_handle_t); 255 static int dca_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 256 crypto_req_handle_t); 257 static int dca_sign_update(crypto_ctx_t *, crypto_data_t *, 258 crypto_req_handle_t); 259 static int dca_sign_final(crypto_ctx_t *, crypto_data_t *, 260 crypto_req_handle_t); 261 static int dca_sign_atomic(crypto_provider_handle_t, crypto_session_id_t, 262 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, 263 crypto_spi_ctx_template_t, crypto_req_handle_t); 264 static int dca_sign_recover_init(crypto_ctx_t *, crypto_mechanism_t *, 265 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 266 static int dca_sign_recover(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 267 crypto_req_handle_t); 268 static int dca_sign_recover_atomic(crypto_provider_handle_t, 269 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 270 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 271 272 static crypto_sign_ops_t dca_sign_ops = { 273 dca_sign_init, 274 dca_sign, 275 dca_sign_update, 276 dca_sign_final, 277 dca_sign_atomic, 278 dca_sign_recover_init, 279 dca_sign_recover, 280 dca_sign_recover_atomic 281 }; 282 283 static int dca_verify_init(crypto_ctx_t *, crypto_mechanism_t *, 284 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 285 static int dca_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 286 crypto_req_handle_t); 287 static int dca_verify_update(crypto_ctx_t *, crypto_data_t *, 288 crypto_req_handle_t); 289 static int dca_verify_final(crypto_ctx_t *, crypto_data_t *, 290 crypto_req_handle_t); 291 static int dca_verify_atomic(crypto_provider_handle_t, crypto_session_id_t, 292 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 293 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 294 static int dca_verify_recover_init(crypto_ctx_t *, crypto_mechanism_t *, 295 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 296 static int dca_verify_recover(crypto_ctx_t *, crypto_data_t *, 297 crypto_data_t *, crypto_req_handle_t); 298 static int dca_verify_recover_atomic(crypto_provider_handle_t, 299 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 300 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 301 302 static crypto_verify_ops_t dca_verify_ops = { 303 dca_verify_init, 304 dca_verify, 305 dca_verify_update, 306 dca_verify_final, 307 dca_verify_atomic, 308 dca_verify_recover_init, 309 dca_verify_recover, 310 dca_verify_recover_atomic 311 }; 312 313 static int dca_generate_random(crypto_provider_handle_t, crypto_session_id_t, 314 uchar_t *, size_t, crypto_req_handle_t); 315 316 static crypto_random_number_ops_t dca_random_number_ops = { 317 NULL, 318 dca_generate_random 319 }; 320 321 static int ext_info_sym(crypto_provider_handle_t prov, 322 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq); 323 static int ext_info_asym(crypto_provider_handle_t prov, 324 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq); 325 static int ext_info_base(crypto_provider_handle_t prov, 326 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id); 327 328 static crypto_provider_management_ops_t dca_provmanage_ops_1 = { 329 ext_info_sym, /* ext_info */ 330 NULL, /* init_token */ 331 NULL, /* init_pin */ 332 NULL /* set_pin */ 333 }; 334 335 static crypto_provider_management_ops_t dca_provmanage_ops_2 = { 336 ext_info_asym, /* ext_info */ 337 NULL, /* init_token */ 338 NULL, /* init_pin */ 339 NULL /* set_pin */ 340 }; 341 342 int dca_free_context(crypto_ctx_t *); 343 344 static crypto_ctx_ops_t dca_ctx_ops = { 345 NULL, 346 dca_free_context 347 }; 348 349 /* Operations for the symmetric cipher provider */ 350 static crypto_ops_t dca_crypto_ops1 = { 351 &dca_control_ops, 352 NULL, /* digest_ops */ 353 &dca_cipher_ops, 354 NULL, /* mac_ops */ 355 NULL, /* sign_ops */ 356 NULL, /* verify_ops */ 357 NULL, /* dual_ops */ 358 NULL, /* cipher_mac_ops */ 359 NULL, /* random_number_ops */ 360 NULL, /* session_ops */ 361 NULL, /* object_ops */ 362 NULL, /* key_ops */ 363 &dca_provmanage_ops_1, /* management_ops */ 364 &dca_ctx_ops 365 }; 366 367 /* Operations for the asymmetric cipher provider */ 368 static crypto_ops_t dca_crypto_ops2 = { 369 &dca_control_ops, 370 NULL, /* digest_ops */ 371 &dca_cipher_ops, 372 NULL, /* mac_ops */ 373 &dca_sign_ops, 374 &dca_verify_ops, 375 NULL, /* dual_ops */ 376 NULL, /* cipher_mac_ops */ 377 &dca_random_number_ops, 378 NULL, /* session_ops */ 379 NULL, /* object_ops */ 380 NULL, /* key_ops */ 381 &dca_provmanage_ops_2, /* management_ops */ 382 &dca_ctx_ops 383 }; 384 385 /* Provider information for the symmetric cipher provider */ 386 static crypto_provider_info_t dca_prov_info1 = { 387 CRYPTO_SPI_VERSION_1, 388 NULL, /* pi_provider_description */ 389 CRYPTO_HW_PROVIDER, 390 NULL, /* pi_provider_dev */ 391 NULL, /* pi_provider_handle */ 392 &dca_crypto_ops1, 393 sizeof (dca_mech_info_tab1)/sizeof (crypto_mech_info_t), 394 dca_mech_info_tab1, 395 0, /* pi_logical_provider_count */ 396 NULL /* pi_logical_providers */ 397 }; 398 399 /* Provider information for the asymmetric cipher provider */ 400 static crypto_provider_info_t dca_prov_info2 = { 401 CRYPTO_SPI_VERSION_1, 402 NULL, /* pi_provider_description */ 403 CRYPTO_HW_PROVIDER, 404 NULL, /* pi_provider_dev */ 405 NULL, /* pi_provider_handle */ 406 &dca_crypto_ops2, 407 sizeof (dca_mech_info_tab2)/sizeof (crypto_mech_info_t), 408 dca_mech_info_tab2, 409 0, /* pi_logical_provider_count */ 410 NULL /* pi_logical_providers */ 411 }; 412 413 /* Convenience macros */ 414 /* Retrieve the softc and instance number from a SPI crypto context */ 415 #define DCA_SOFTC_FROM_CTX(ctx, softc, instance) { \ 416 (softc) = (dca_t *)(ctx)->cc_provider; \ 417 (instance) = ddi_get_instance((softc)->dca_dip); \ 418 } 419 420 #define DCA_MECH_FROM_CTX(ctx) \ 421 (((dca_request_t *)(ctx)->cc_provider_private)->dr_ctx.ctx_cm_type) 422 423 static int dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset, 424 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags, 425 dca_chain_t *head, int *n_chain); 426 static uint64_t dca_ena(uint64_t ena); 427 static caddr_t dca_bufdaddr_out(crypto_data_t *data); 428 static char *dca_fma_eclass_string(char *model, dca_fma_eclass_t index); 429 static int dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle, 430 dca_fma_eclass_t eclass_index); 431 432 static void dca_fma_init(dca_t *dca); 433 static void dca_fma_fini(dca_t *dca); 434 static int dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 435 const void *impl_data); 436 437 438 static dca_device_t dca_devices[] = { 439 /* Broadcom vanilla variants */ 440 { 0x14e4, 0x5820, "Broadcom 5820" }, 441 { 0x14e4, 0x5821, "Broadcom 5821" }, 442 { 0x14e4, 0x5822, "Broadcom 5822" }, 443 { 0x14e4, 0x5825, "Broadcom 5825" }, 444 /* Sun specific OEMd variants */ 445 { 0x108e, 0x5454, "SCA" }, 446 { 0x108e, 0x5455, "SCA 1000" }, 447 { 0x108e, 0x5457, "SCA 500" }, 448 /* subsysid should be 0x5457, but got 0x1 from HW. Assume both here. */ 449 { 0x108e, 0x1, "SCA 500" }, 450 }; 451 452 /* 453 * Device attributes. 454 */ 455 static struct ddi_device_acc_attr dca_regsattr = { 456 DDI_DEVICE_ATTR_V0, 457 DDI_STRUCTURE_LE_ACC, 458 DDI_STRICTORDER_ACC, 459 DDI_FLAGERR_ACC 460 }; 461 462 static struct ddi_device_acc_attr dca_devattr = { 463 DDI_DEVICE_ATTR_V0, 464 DDI_STRUCTURE_LE_ACC, 465 DDI_STRICTORDER_ACC, 466 DDI_FLAGERR_ACC 467 }; 468 469 #if !defined(i386) && !defined(__i386) 470 static struct ddi_device_acc_attr dca_bufattr = { 471 DDI_DEVICE_ATTR_V0, 472 DDI_NEVERSWAP_ACC, 473 DDI_STRICTORDER_ACC, 474 DDI_FLAGERR_ACC 475 }; 476 #endif 477 478 static struct ddi_dma_attr dca_dmaattr = { 479 DMA_ATTR_V0, /* dma_attr_version */ 480 0x0, /* dma_attr_addr_lo */ 481 0xffffffffUL, /* dma_attr_addr_hi */ 482 0x00ffffffUL, /* dma_attr_count_max */ 483 0x40, /* dma_attr_align */ 484 0x40, /* dma_attr_burstsizes */ 485 0x1, /* dma_attr_minxfer */ 486 0x00ffffffUL, /* dma_attr_maxxfer */ 487 0xffffffffUL, /* dma_attr_seg */ 488 #if defined(i386) || defined(__i386) || defined(__amd64) 489 512, /* dma_attr_sgllen */ 490 #else 491 1, /* dma_attr_sgllen */ 492 #endif 493 1, /* dma_attr_granular */ 494 DDI_DMA_FLAGERR /* dma_attr_flags */ 495 }; 496 497 static void *dca_state = NULL; 498 int dca_mindma = 2500; 499 500 /* 501 * FMA eclass string definitions. Note that these string arrays must be 502 * consistent with the dca_fma_eclass_t enum. 503 */ 504 static char *dca_fma_eclass_sca1000[] = { 505 "sca1000.hw.device", 506 "sca1000.hw.timeout", 507 "sca1000.none" 508 }; 509 510 static char *dca_fma_eclass_sca500[] = { 511 "sca500.hw.device", 512 "sca500.hw.timeout", 513 "sca500.none" 514 }; 515 516 /* 517 * DDI entry points. 518 */ 519 int 520 _init(void) 521 { 522 int rv; 523 524 DBG(NULL, DMOD, "dca: in _init"); 525 526 if ((rv = ddi_soft_state_init(&dca_state, sizeof (dca_t), 1)) != 0) { 527 /* this should *never* happen! */ 528 return (rv); 529 } 530 531 if ((rv = mod_install(&modlinkage)) != 0) { 532 /* cleanup here */ 533 ddi_soft_state_fini(&dca_state); 534 return (rv); 535 } 536 537 return (0); 538 } 539 540 int 541 _fini(void) 542 { 543 int rv; 544 545 DBG(NULL, DMOD, "dca: in _fini"); 546 547 if ((rv = mod_remove(&modlinkage)) == 0) { 548 /* cleanup here */ 549 ddi_soft_state_fini(&dca_state); 550 } 551 return (rv); 552 } 553 554 int 555 _info(struct modinfo *modinfop) 556 { 557 DBG(NULL, DMOD, "dca: in _info"); 558 559 return (mod_info(&modlinkage, modinfop)); 560 } 561 562 int 563 dca_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 564 { 565 ddi_acc_handle_t pci; 566 int instance; 567 ddi_iblock_cookie_t ibc; 568 int intr_added = 0; 569 dca_t *dca; 570 ushort_t venid; 571 ushort_t devid; 572 ushort_t revid; 573 ushort_t subsysid; 574 ushort_t subvenid; 575 int i; 576 int ret; 577 char ID[64]; 578 static char *unknowndev = "Unknown device"; 579 580 #if DEBUG 581 /* these are only used for debugging */ 582 ushort_t pcicomm; 583 ushort_t pcistat; 584 uchar_t cachelinesz; 585 uchar_t mingnt; 586 uchar_t maxlat; 587 uchar_t lattmr; 588 #endif 589 590 instance = ddi_get_instance(dip); 591 592 DBG(NULL, DMOD, "dca: in dca_attach() for %d", instance); 593 594 switch (cmd) { 595 case DDI_RESUME: 596 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 597 dca_diperror(dip, "no soft state in detach"); 598 return (DDI_FAILURE); 599 } 600 /* assumption: we won't be DDI_DETACHed until we return */ 601 return (dca_resume(dca)); 602 case DDI_ATTACH: 603 break; 604 default: 605 return (DDI_FAILURE); 606 } 607 608 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 609 dca_diperror(dip, "slot does not support PCI bus-master"); 610 return (DDI_FAILURE); 611 } 612 613 if (ddi_intr_hilevel(dip, 0) != 0) { 614 dca_diperror(dip, "hilevel interrupts not supported"); 615 return (DDI_FAILURE); 616 } 617 618 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) { 619 dca_diperror(dip, "unable to setup PCI config handle"); 620 return (DDI_FAILURE); 621 } 622 623 /* common PCI attributes */ 624 venid = pci_config_get16(pci, PCI_VENID); 625 devid = pci_config_get16(pci, PCI_DEVID); 626 revid = pci_config_get8(pci, PCI_REVID); 627 subvenid = pci_config_get16(pci, PCI_SUBVENID); 628 subsysid = pci_config_get16(pci, PCI_SUBSYSID); 629 630 /* 631 * Broadcom-specific timings. 632 * We disable these timers/counters since they can cause 633 * incorrect false failures when the bus is just a little 634 * bit slow, or busy. 635 */ 636 pci_config_put8(pci, PCI_TRDYTO, 0); 637 pci_config_put8(pci, PCI_RETRIES, 0); 638 639 /* initialize PCI access settings */ 640 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE | 641 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE); 642 643 /* set up our PCI latency timer */ 644 pci_config_put8(pci, PCI_LATTMR, 0x40); 645 646 #if DEBUG 647 /* read registers (for debugging) */ 648 pcicomm = pci_config_get16(pci, PCI_COMM); 649 pcistat = pci_config_get16(pci, PCI_STATUS); 650 cachelinesz = pci_config_get8(pci, PCI_CACHELINESZ); 651 mingnt = pci_config_get8(pci, PCI_MINGNT); 652 maxlat = pci_config_get8(pci, PCI_MAXLAT); 653 lattmr = pci_config_get8(pci, PCI_LATTMR); 654 #endif 655 656 pci_config_teardown(&pci); 657 658 if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS) { 659 dca_diperror(dip, "unable to get iblock cookie"); 660 return (DDI_FAILURE); 661 } 662 663 if (ddi_soft_state_zalloc(dca_state, instance) != DDI_SUCCESS) { 664 dca_diperror(dip, "unable to allocate soft state"); 665 return (DDI_FAILURE); 666 } 667 668 dca = ddi_get_soft_state(dca_state, instance); 669 ASSERT(dca != NULL); 670 dca->dca_dip = dip; 671 WORKLIST(dca, MCR1)->dwl_prov = NULL; 672 WORKLIST(dca, MCR2)->dwl_prov = NULL; 673 /* figure pagesize */ 674 dca->dca_pagesize = ddi_ptob(dip, 1); 675 676 /* 677 * Search for the device in our supported devices table. This 678 * is here for two reasons. First, we want to ensure that 679 * only Sun-qualified (and presumably Sun-labeled) devices can 680 * be used with this driver. Second, some devices have 681 * specific differences. E.g. the 5821 has support for a 682 * special mode of RC4, deeper queues, power management, and 683 * other changes. Also, the export versions of some of these 684 * chips don't support RC4 or 3DES, so we catch that here. 685 * 686 * Note that we only look at the upper nibble of the device 687 * id, which is used to distinguish export vs. domestic 688 * versions of the chip. (The lower nibble is used for 689 * stepping information.) 690 */ 691 for (i = 0; i < (sizeof (dca_devices) / sizeof (dca_device_t)); i++) { 692 /* 693 * Try to match the subsystem information first. 694 */ 695 if (subvenid && (subvenid == dca_devices[i].dd_vendor_id) && 696 subsysid && (subsysid == dca_devices[i].dd_device_id)) { 697 dca->dca_model = dca_devices[i].dd_model; 698 dca->dca_devid = dca_devices[i].dd_device_id; 699 break; 700 } 701 /* 702 * Failing that, try the generic vendor and device id. 703 * Even if we find a match, we keep searching anyway, 704 * since we would prefer to find a match based on the 705 * subsystem ids. 706 */ 707 if ((venid == dca_devices[i].dd_vendor_id) && 708 (devid == dca_devices[i].dd_device_id)) { 709 dca->dca_model = dca_devices[i].dd_model; 710 dca->dca_devid = dca_devices[i].dd_device_id; 711 } 712 } 713 /* try and handle an unrecognized device */ 714 if (dca->dca_model == NULL) { 715 dca->dca_model = unknowndev; 716 dca_error(dca, "device not recognized, not supported"); 717 DBG(dca, DPCI, "i=%d venid=%x devid=%x rev=%d", 718 i, venid, devid, revid); 719 } 720 721 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "description", 722 dca->dca_model) != DDI_SUCCESS) { 723 dca_error(dca, "unable to create description property"); 724 return (DDI_FAILURE); 725 } 726 727 DBG(dca, DPCI, "PCI command=0x%x status=%x cachelinesz=%x", 728 pcicomm, pcistat, cachelinesz); 729 DBG(dca, DPCI, "mingnt=0x%x maxlat=0x%x lattmr=0x%x", 730 mingnt, maxlat, lattmr); 731 732 /* 733 * initialize locks, etc. 734 */ 735 (void) mutex_init(&dca->dca_intrlock, NULL, MUTEX_DRIVER, ibc); 736 737 /* use RNGSHA1 by default */ 738 if (ddi_getprop(DDI_DEV_T_ANY, dip, 739 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rngdirect", 0) == 0) { 740 dca->dca_flags |= DCA_RNGSHA1; 741 } 742 743 /* initialize FMA */ 744 dca_fma_init(dca); 745 746 /* initialize some key data structures */ 747 if (dca_init(dca) != DDI_SUCCESS) { 748 goto failed; 749 } 750 751 /* initialize kstats */ 752 dca_ksinit(dca); 753 754 /* setup access to registers */ 755 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&dca->dca_regs, 756 0, 0, &dca_regsattr, &dca->dca_regs_handle) != DDI_SUCCESS) { 757 dca_error(dca, "unable to map registers"); 758 goto failed; 759 } 760 761 DBG(dca, DCHATTY, "MCR1 = %x", GETCSR(dca, CSR_MCR1)); 762 DBG(dca, DCHATTY, "CONTROL = %x", GETCSR(dca, CSR_DMACTL)); 763 DBG(dca, DCHATTY, "STATUS = %x", GETCSR(dca, CSR_DMASTAT)); 764 DBG(dca, DCHATTY, "DMAEA = %x", GETCSR(dca, CSR_DMAEA)); 765 DBG(dca, DCHATTY, "MCR2 = %x", GETCSR(dca, CSR_MCR2)); 766 767 /* reset the chip */ 768 if (dca_reset(dca, 0) < 0) { 769 goto failed; 770 } 771 772 /* initialize the chip */ 773 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64); 774 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 775 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 776 goto failed; 777 } 778 779 /* add the interrupt */ 780 if (ddi_add_intr(dip, 0, &dca->dca_icookie, NULL, dca_intr, 781 (void *)dca) != DDI_SUCCESS) { 782 DBG(dca, DWARN, "ddi_add_intr failed"); 783 goto failed; 784 } else { 785 intr_added = 1; 786 } 787 788 /* enable interrupts on the device */ 789 /* 790 * XXX: Note, 5820A1 errata indicates that this may clobber 791 * bits 24 and 23, which affect the speed of the RNG. Since 792 * we always want to run in full-speed mode, this should be 793 * harmless. 794 */ 795 if (dca->dca_devid == 0x5825) { 796 /* for 5825 - increase the DMA read size */ 797 SETBIT(dca, CSR_DMACTL, 798 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256); 799 } else { 800 SETBIT(dca, CSR_DMACTL, 801 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 802 } 803 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 804 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 805 goto failed; 806 } 807 808 /* register MCR1 with the crypto framework */ 809 /* Be careful not to exceed 32 chars */ 810 (void) sprintf(ID, "%s/%d %s", 811 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_SYM); 812 dca_prov_info1.pi_provider_description = ID; 813 dca_prov_info1.pi_provider_dev.pd_hw = dip; 814 dca_prov_info1.pi_provider_handle = dca; 815 if ((ret = crypto_register_provider(&dca_prov_info1, 816 &WORKLIST(dca, MCR1)->dwl_prov)) != CRYPTO_SUCCESS) { 817 cmn_err(CE_WARN, 818 "crypto_register_provider() failed (%d) for MCR1", ret); 819 goto failed; 820 } 821 822 /* register MCR2 with the crypto framework */ 823 /* Be careful not to exceed 32 chars */ 824 (void) sprintf(ID, "%s/%d %s", 825 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_ASYM); 826 dca_prov_info2.pi_provider_description = ID; 827 dca_prov_info2.pi_provider_dev.pd_hw = dip; 828 dca_prov_info2.pi_provider_handle = dca; 829 if ((ret = crypto_register_provider(&dca_prov_info2, 830 &WORKLIST(dca, MCR2)->dwl_prov)) != CRYPTO_SUCCESS) { 831 cmn_err(CE_WARN, 832 "crypto_register_provider() failed (%d) for MCR2", ret); 833 goto failed; 834 } 835 836 crypto_prov_notify(WORKLIST(dca, MCR1)->dwl_prov, 837 CRYPTO_PROVIDER_READY); 838 crypto_prov_notify(WORKLIST(dca, MCR2)->dwl_prov, 839 CRYPTO_PROVIDER_READY); 840 841 /* Initialize the local random number pool for this instance */ 842 if ((ret = dca_random_init(dca)) != CRYPTO_SUCCESS) { 843 goto failed; 844 } 845 846 mutex_enter(&dca->dca_intrlock); 847 dca->dca_jobtid = timeout(dca_jobtimeout, (void *)dca, 848 drv_usectohz(SECOND)); 849 mutex_exit(&dca->dca_intrlock); 850 851 ddi_set_driver_private(dip, (caddr_t)dca); 852 853 ddi_report_dev(dip); 854 855 if (ddi_get_devstate(dca->dca_dip) != DDI_DEVSTATE_UP) { 856 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_RESTORED); 857 } 858 859 return (DDI_SUCCESS); 860 861 failed: 862 /* unregister from the crypto framework */ 863 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) { 864 (void) crypto_unregister_provider( 865 WORKLIST(dca, MCR1)->dwl_prov); 866 } 867 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) { 868 (void) crypto_unregister_provider( 869 WORKLIST(dca, MCR2)->dwl_prov); 870 } 871 if (intr_added) { 872 CLRBIT(dca, CSR_DMACTL, 873 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 874 /* unregister intr handler */ 875 ddi_remove_intr(dip, 0, dca->dca_icookie); 876 } 877 if (dca->dca_regs_handle) { 878 ddi_regs_map_free(&dca->dca_regs_handle); 879 } 880 if (dca->dca_intrstats) { 881 kstat_delete(dca->dca_intrstats); 882 } 883 if (dca->dca_ksp) { 884 kstat_delete(dca->dca_ksp); 885 } 886 dca_uninit(dca); 887 888 /* finalize FMA */ 889 dca_fma_fini(dca); 890 891 mutex_destroy(&dca->dca_intrlock); 892 ddi_soft_state_free(dca_state, instance); 893 return (DDI_FAILURE); 894 895 } 896 897 int 898 dca_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 899 { 900 int instance; 901 dca_t *dca; 902 timeout_id_t tid; 903 904 instance = ddi_get_instance(dip); 905 906 DBG(NULL, DMOD, "dca: in dca_detach() for %d", instance); 907 908 switch (cmd) { 909 case DDI_SUSPEND: 910 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 911 dca_diperror(dip, "no soft state in detach"); 912 return (DDI_FAILURE); 913 } 914 /* assumption: we won't be DDI_DETACHed until we return */ 915 return (dca_suspend(dca)); 916 917 case DDI_DETACH: 918 break; 919 default: 920 return (DDI_FAILURE); 921 } 922 923 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 924 dca_diperror(dip, "no soft state in detach"); 925 return (DDI_FAILURE); 926 } 927 928 /* 929 * Unregister from kCF. 930 * This needs to be done at the beginning of detach. 931 */ 932 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) { 933 if (crypto_unregister_provider( 934 WORKLIST(dca, MCR1)->dwl_prov) != CRYPTO_SUCCESS) { 935 dca_error(dca, "unable to unregister MCR1 from kcf"); 936 return (DDI_FAILURE); 937 } 938 } 939 940 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) { 941 if (crypto_unregister_provider( 942 WORKLIST(dca, MCR2)->dwl_prov) != CRYPTO_SUCCESS) { 943 dca_error(dca, "unable to unregister MCR2 from kcf"); 944 return (DDI_FAILURE); 945 } 946 } 947 948 /* 949 * Cleanup the private context list. Once the 950 * crypto_unregister_provider returns, it is safe to do so. 951 */ 952 dca_free_context_list(dca); 953 954 /* Cleanup the local random number pool */ 955 dca_random_fini(dca); 956 957 /* send any jobs in the waitq back to kCF */ 958 dca_rejectjobs(dca); 959 960 /* untimeout the timeouts */ 961 mutex_enter(&dca->dca_intrlock); 962 tid = dca->dca_jobtid; 963 dca->dca_jobtid = 0; 964 mutex_exit(&dca->dca_intrlock); 965 if (tid) { 966 (void) untimeout(tid); 967 } 968 969 /* disable device interrupts */ 970 CLRBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 971 972 /* unregister interrupt handlers */ 973 ddi_remove_intr(dip, 0, dca->dca_icookie); 974 975 /* release our regs handle */ 976 ddi_regs_map_free(&dca->dca_regs_handle); 977 978 /* toss out kstats */ 979 if (dca->dca_intrstats) { 980 kstat_delete(dca->dca_intrstats); 981 } 982 if (dca->dca_ksp) { 983 kstat_delete(dca->dca_ksp); 984 } 985 986 mutex_destroy(&dca->dca_intrlock); 987 dca_uninit(dca); 988 989 /* finalize FMA */ 990 dca_fma_fini(dca); 991 992 ddi_soft_state_free(dca_state, instance); 993 994 return (DDI_SUCCESS); 995 } 996 997 int 998 dca_resume(dca_t *dca) 999 { 1000 ddi_acc_handle_t pci; 1001 1002 if (pci_config_setup(dca->dca_dip, &pci) != DDI_SUCCESS) { 1003 dca_error(dca, "unable to setup PCI config handle"); 1004 return (DDI_FAILURE); 1005 } 1006 1007 /* 1008 * Reprogram registers in PCI configuration space. 1009 */ 1010 1011 /* Broadcom-specific timers -- we disable them. */ 1012 pci_config_put8(pci, PCI_TRDYTO, 0); 1013 pci_config_put8(pci, PCI_RETRIES, 0); 1014 1015 /* initialize PCI access settings */ 1016 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE | 1017 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE); 1018 1019 /* set up our PCI latency timer */ 1020 pci_config_put8(pci, PCI_LATTMR, 0x40); 1021 1022 pci_config_teardown(&pci); 1023 1024 if (dca_reset(dca, 0) < 0) { 1025 dca_error(dca, "unable to reset device during resume"); 1026 return (DDI_FAILURE); 1027 } 1028 1029 /* 1030 * Now restore the card-specific CSRs. 1031 */ 1032 1033 /* restore endianness settings */ 1034 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64); 1035 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1036 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1037 return (DDI_FAILURE); 1038 1039 /* restore interrupt enables */ 1040 if (dca->dca_devid == 0x5825) { 1041 /* for 5825 set 256 byte read size to improve performance */ 1042 SETBIT(dca, CSR_DMACTL, 1043 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256); 1044 } else { 1045 SETBIT(dca, CSR_DMACTL, 1046 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 1047 } 1048 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1049 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1050 return (DDI_FAILURE); 1051 1052 /* resume scheduling jobs on the device */ 1053 dca_undrain(dca); 1054 1055 return (DDI_SUCCESS); 1056 } 1057 1058 int 1059 dca_suspend(dca_t *dca) 1060 { 1061 if ((dca_drain(dca)) != 0) { 1062 return (DDI_FAILURE); 1063 } 1064 if (dca_reset(dca, 0) < 0) { 1065 dca_error(dca, "unable to reset device during suspend"); 1066 return (DDI_FAILURE); 1067 } 1068 return (DDI_SUCCESS); 1069 } 1070 1071 /* 1072 * Hardware access stuff. 1073 */ 1074 int 1075 dca_reset(dca_t *dca, int failreset) 1076 { 1077 int i; 1078 1079 if (dca->dca_regs_handle == NULL) { 1080 return (-1); 1081 } 1082 1083 PUTCSR(dca, CSR_DMACTL, DMACTL_RESET); 1084 if (!failreset) { 1085 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1086 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1087 return (-1); 1088 } 1089 1090 /* now wait for a reset */ 1091 for (i = 1; i < 100; i++) { 1092 uint32_t dmactl; 1093 drv_usecwait(100); 1094 dmactl = GETCSR(dca, CSR_DMACTL); 1095 if (!failreset) { 1096 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1097 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1098 return (-1); 1099 } 1100 if ((dmactl & DMACTL_RESET) == 0) { 1101 DBG(dca, DCHATTY, "reset in %d usec", i * 100); 1102 return (0); 1103 } 1104 } 1105 if (!failreset) { 1106 dca_failure(dca, DDI_DEVICE_FAULT, 1107 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR, 1108 "timeout waiting for reset after %d usec", i * 100); 1109 } 1110 return (-1); 1111 } 1112 1113 int 1114 dca_initworklist(dca_t *dca, dca_worklist_t *wlp) 1115 { 1116 int i; 1117 int reqprealloc = wlp->dwl_hiwater + (MAXWORK * MAXREQSPERMCR); 1118 1119 /* 1120 * Set up work queue. 1121 */ 1122 mutex_init(&wlp->dwl_lock, NULL, MUTEX_DRIVER, dca->dca_icookie); 1123 mutex_init(&wlp->dwl_freereqslock, NULL, MUTEX_DRIVER, 1124 dca->dca_icookie); 1125 mutex_init(&wlp->dwl_freelock, NULL, MUTEX_DRIVER, dca->dca_icookie); 1126 cv_init(&wlp->dwl_cv, NULL, CV_DRIVER, NULL); 1127 1128 mutex_enter(&wlp->dwl_lock); 1129 1130 dca_initq(&wlp->dwl_freereqs); 1131 dca_initq(&wlp->dwl_waitq); 1132 dca_initq(&wlp->dwl_freework); 1133 dca_initq(&wlp->dwl_runq); 1134 1135 for (i = 0; i < MAXWORK; i++) { 1136 dca_work_t *workp; 1137 1138 if ((workp = dca_newwork(dca)) == NULL) { 1139 dca_error(dca, "unable to allocate work"); 1140 mutex_exit(&wlp->dwl_lock); 1141 return (DDI_FAILURE); 1142 } 1143 workp->dw_wlp = wlp; 1144 dca_freework(workp); 1145 } 1146 mutex_exit(&wlp->dwl_lock); 1147 1148 for (i = 0; i < reqprealloc; i++) { 1149 dca_request_t *reqp; 1150 1151 if ((reqp = dca_newreq(dca)) == NULL) { 1152 dca_error(dca, "unable to allocate request"); 1153 return (DDI_FAILURE); 1154 } 1155 reqp->dr_dca = dca; 1156 reqp->dr_wlp = wlp; 1157 dca_freereq(reqp); 1158 } 1159 return (DDI_SUCCESS); 1160 } 1161 1162 int 1163 dca_init(dca_t *dca) 1164 { 1165 dca_worklist_t *wlp; 1166 1167 /* Initialize the private context list and the corresponding lock. */ 1168 mutex_init(&dca->dca_ctx_list_lock, NULL, MUTEX_DRIVER, NULL); 1169 dca_initq(&dca->dca_ctx_list); 1170 1171 /* 1172 * MCR1 algorithms. 1173 */ 1174 wlp = WORKLIST(dca, MCR1); 1175 (void) sprintf(wlp->dwl_name, "dca%d:mcr1", 1176 ddi_get_instance(dca->dca_dip)); 1177 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY, 1178 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1179 "mcr1_lowater", MCR1LOWATER); 1180 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY, 1181 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1182 "mcr1_hiwater", MCR1HIWATER); 1183 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY, 1184 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1185 "mcr1_maxreqs", MCR1MAXREQS), MAXREQSPERMCR); 1186 wlp->dwl_dca = dca; 1187 wlp->dwl_mcr = MCR1; 1188 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) { 1189 return (DDI_FAILURE); 1190 } 1191 1192 /* 1193 * MCR2 algorithms. 1194 */ 1195 wlp = WORKLIST(dca, MCR2); 1196 (void) sprintf(wlp->dwl_name, "dca%d:mcr2", 1197 ddi_get_instance(dca->dca_dip)); 1198 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY, 1199 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1200 "mcr2_lowater", MCR2LOWATER); 1201 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY, 1202 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1203 "mcr2_hiwater", MCR2HIWATER); 1204 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY, 1205 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1206 "mcr2_maxreqs", MCR2MAXREQS), MAXREQSPERMCR); 1207 wlp->dwl_dca = dca; 1208 wlp->dwl_mcr = MCR2; 1209 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) { 1210 return (DDI_FAILURE); 1211 } 1212 return (DDI_SUCCESS); 1213 } 1214 1215 /* 1216 * Uninitialize worklists. This routine should only be called when no 1217 * active jobs (hence DMA mappings) exist. One way to ensure this is 1218 * to unregister from kCF before calling this routine. (This is done 1219 * e.g. in detach(9e).) 1220 */ 1221 void 1222 dca_uninit(dca_t *dca) 1223 { 1224 int mcr; 1225 1226 mutex_destroy(&dca->dca_ctx_list_lock); 1227 1228 for (mcr = MCR1; mcr <= MCR2; mcr++) { 1229 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1230 dca_work_t *workp; 1231 dca_request_t *reqp; 1232 1233 if (dca->dca_regs_handle == NULL) { 1234 continue; 1235 } 1236 1237 mutex_enter(&wlp->dwl_lock); 1238 while ((workp = dca_getwork(dca, mcr)) != NULL) { 1239 dca_destroywork(workp); 1240 } 1241 mutex_exit(&wlp->dwl_lock); 1242 while ((reqp = dca_getreq(dca, mcr, 0)) != NULL) { 1243 dca_destroyreq(reqp); 1244 } 1245 1246 mutex_destroy(&wlp->dwl_lock); 1247 mutex_destroy(&wlp->dwl_freereqslock); 1248 mutex_destroy(&wlp->dwl_freelock); 1249 cv_destroy(&wlp->dwl_cv); 1250 wlp->dwl_prov = NULL; 1251 } 1252 } 1253 1254 static void 1255 dca_enlist2(dca_listnode_t *q, dca_listnode_t *node, kmutex_t *lock) 1256 { 1257 if (!q || !node) 1258 return; 1259 1260 mutex_enter(lock); 1261 node->dl_next2 = q; 1262 node->dl_prev2 = q->dl_prev2; 1263 node->dl_next2->dl_prev2 = node; 1264 node->dl_prev2->dl_next2 = node; 1265 mutex_exit(lock); 1266 } 1267 1268 static void 1269 dca_rmlist2(dca_listnode_t *node, kmutex_t *lock) 1270 { 1271 if (!node) 1272 return; 1273 1274 mutex_enter(lock); 1275 node->dl_next2->dl_prev2 = node->dl_prev2; 1276 node->dl_prev2->dl_next2 = node->dl_next2; 1277 node->dl_next2 = NULL; 1278 node->dl_prev2 = NULL; 1279 mutex_exit(lock); 1280 } 1281 1282 static dca_listnode_t * 1283 dca_delist2(dca_listnode_t *q, kmutex_t *lock) 1284 { 1285 dca_listnode_t *node; 1286 1287 mutex_enter(lock); 1288 if ((node = q->dl_next2) == q) { 1289 mutex_exit(lock); 1290 return (NULL); 1291 } 1292 1293 node->dl_next2->dl_prev2 = node->dl_prev2; 1294 node->dl_prev2->dl_next2 = node->dl_next2; 1295 node->dl_next2 = NULL; 1296 node->dl_prev2 = NULL; 1297 mutex_exit(lock); 1298 1299 return (node); 1300 } 1301 1302 void 1303 dca_initq(dca_listnode_t *q) 1304 { 1305 q->dl_next = q; 1306 q->dl_prev = q; 1307 q->dl_next2 = q; 1308 q->dl_prev2 = q; 1309 } 1310 1311 void 1312 dca_enqueue(dca_listnode_t *q, dca_listnode_t *node) 1313 { 1314 /* 1315 * Enqueue submits at the "tail" of the list, i.e. just 1316 * behind the sentinel. 1317 */ 1318 node->dl_next = q; 1319 node->dl_prev = q->dl_prev; 1320 node->dl_next->dl_prev = node; 1321 node->dl_prev->dl_next = node; 1322 } 1323 1324 void 1325 dca_rmqueue(dca_listnode_t *node) 1326 { 1327 node->dl_next->dl_prev = node->dl_prev; 1328 node->dl_prev->dl_next = node->dl_next; 1329 node->dl_next = NULL; 1330 node->dl_prev = NULL; 1331 } 1332 1333 dca_listnode_t * 1334 dca_dequeue(dca_listnode_t *q) 1335 { 1336 dca_listnode_t *node; 1337 /* 1338 * Dequeue takes from the "head" of the list, i.e. just after 1339 * the sentinel. 1340 */ 1341 if ((node = q->dl_next) == q) { 1342 /* queue is empty */ 1343 return (NULL); 1344 } 1345 dca_rmqueue(node); 1346 return (node); 1347 } 1348 1349 /* this is the opposite of dequeue, it takes things off in LIFO order */ 1350 dca_listnode_t * 1351 dca_unqueue(dca_listnode_t *q) 1352 { 1353 dca_listnode_t *node; 1354 /* 1355 * unqueue takes from the "tail" of the list, i.e. just before 1356 * the sentinel. 1357 */ 1358 if ((node = q->dl_prev) == q) { 1359 /* queue is empty */ 1360 return (NULL); 1361 } 1362 dca_rmqueue(node); 1363 return (node); 1364 } 1365 1366 dca_listnode_t * 1367 dca_peekqueue(dca_listnode_t *q) 1368 { 1369 dca_listnode_t *node; 1370 1371 if ((node = q->dl_next) == q) { 1372 return (NULL); 1373 } else { 1374 return (node); 1375 } 1376 } 1377 1378 /* 1379 * Interrupt service routine. 1380 */ 1381 uint_t 1382 dca_intr(char *arg) 1383 { 1384 dca_t *dca = (dca_t *)arg; 1385 uint32_t status; 1386 1387 mutex_enter(&dca->dca_intrlock); 1388 status = GETCSR(dca, CSR_DMASTAT); 1389 PUTCSR(dca, CSR_DMASTAT, status & DMASTAT_INTERRUPTS); 1390 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1391 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 1392 mutex_exit(&dca->dca_intrlock); 1393 return ((uint_t)DDI_FAILURE); 1394 } 1395 1396 DBG(dca, DINTR, "interrupted, status = 0x%x!", status); 1397 1398 if ((status & DMASTAT_INTERRUPTS) == 0) { 1399 /* increment spurious interrupt kstat */ 1400 if (dca->dca_intrstats) { 1401 KIOIP(dca)->intrs[KSTAT_INTR_SPURIOUS]++; 1402 } 1403 mutex_exit(&dca->dca_intrlock); 1404 return (DDI_INTR_UNCLAIMED); 1405 } 1406 1407 if (dca->dca_intrstats) { 1408 KIOIP(dca)->intrs[KSTAT_INTR_HARD]++; 1409 } 1410 if (status & DMASTAT_MCR1INT) { 1411 DBG(dca, DINTR, "MCR1 interrupted"); 1412 mutex_enter(&(WORKLIST(dca, MCR1)->dwl_lock)); 1413 dca_schedule(dca, MCR1); 1414 dca_reclaim(dca, MCR1); 1415 mutex_exit(&(WORKLIST(dca, MCR1)->dwl_lock)); 1416 } 1417 1418 if (status & DMASTAT_MCR2INT) { 1419 DBG(dca, DINTR, "MCR2 interrupted"); 1420 mutex_enter(&(WORKLIST(dca, MCR2)->dwl_lock)); 1421 dca_schedule(dca, MCR2); 1422 dca_reclaim(dca, MCR2); 1423 mutex_exit(&(WORKLIST(dca, MCR2)->dwl_lock)); 1424 } 1425 1426 if (status & DMASTAT_ERRINT) { 1427 uint32_t erraddr; 1428 erraddr = GETCSR(dca, CSR_DMAEA); 1429 mutex_exit(&dca->dca_intrlock); 1430 1431 /* 1432 * bit 1 of the error address indicates failure during 1433 * read if set, during write otherwise. 1434 */ 1435 dca_failure(dca, DDI_DEVICE_FAULT, 1436 DCA_FM_ECLASS_HW_DEVICE, dca_ena(0), CRYPTO_DEVICE_ERROR, 1437 "DMA master access error %s address 0x%x", 1438 erraddr & 0x1 ? "reading" : "writing", erraddr & ~1); 1439 return (DDI_INTR_CLAIMED); 1440 } 1441 1442 mutex_exit(&dca->dca_intrlock); 1443 1444 return (DDI_INTR_CLAIMED); 1445 } 1446 1447 /* 1448 * Reverse a string of bytes from s1 into s2. The reversal happens 1449 * from the tail of s1. If len1 < len2, then null bytes will be 1450 * padded to the end of s2. If len2 < len1, then (presumably null) 1451 * bytes will be dropped from the start of s1. 1452 * 1453 * The rationale here is that when s1 (source) is shorter, then we 1454 * are reversing from big-endian ordering, into device ordering, and 1455 * want to add some extra nulls to the tail (MSB) side of the device. 1456 * 1457 * Similarly, when s2 (dest) is shorter, then we are truncating what 1458 * are presumably null MSB bits from the device. 1459 * 1460 * There is an expectation when reversing from the device back into 1461 * big-endian, that the number of bytes to reverse and the target size 1462 * will match, and no truncation or padding occurs. 1463 */ 1464 void 1465 dca_reverse(void *s1, void *s2, int len1, int len2) 1466 { 1467 caddr_t src, dst; 1468 1469 if (len1 == 0) { 1470 if (len2) { 1471 bzero(s2, len2); 1472 } 1473 return; 1474 } 1475 src = (caddr_t)s1 + len1 - 1; 1476 dst = s2; 1477 while ((src >= (caddr_t)s1) && (len2)) { 1478 *dst++ = *src--; 1479 len2--; 1480 } 1481 while (len2 > 0) { 1482 *dst++ = 0; 1483 len2--; 1484 } 1485 } 1486 1487 uint16_t 1488 dca_padfull(int num) 1489 { 1490 if (num <= 512) { 1491 return (BITS2BYTES(512)); 1492 } 1493 if (num <= 768) { 1494 return (BITS2BYTES(768)); 1495 } 1496 if (num <= 1024) { 1497 return (BITS2BYTES(1024)); 1498 } 1499 if (num <= 1536) { 1500 return (BITS2BYTES(1536)); 1501 } 1502 if (num <= 2048) { 1503 return (BITS2BYTES(2048)); 1504 } 1505 return (0); 1506 } 1507 1508 uint16_t 1509 dca_padhalf(int num) 1510 { 1511 if (num <= 256) { 1512 return (BITS2BYTES(256)); 1513 } 1514 if (num <= 384) { 1515 return (BITS2BYTES(384)); 1516 } 1517 if (num <= 512) { 1518 return (BITS2BYTES(512)); 1519 } 1520 if (num <= 768) { 1521 return (BITS2BYTES(768)); 1522 } 1523 if (num <= 1024) { 1524 return (BITS2BYTES(1024)); 1525 } 1526 return (0); 1527 } 1528 1529 dca_work_t * 1530 dca_newwork(dca_t *dca) 1531 { 1532 dca_work_t *workp; 1533 size_t size; 1534 ddi_dma_cookie_t c; 1535 unsigned nc; 1536 int rv; 1537 1538 workp = kmem_zalloc(sizeof (dca_work_t), KM_SLEEP); 1539 1540 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1541 DDI_DMA_SLEEP, NULL, &workp->dw_mcr_dmah); 1542 if (rv != 0) { 1543 dca_error(dca, "unable to alloc MCR DMA handle"); 1544 dca_destroywork(workp); 1545 return (NULL); 1546 } 1547 1548 rv = ddi_dma_mem_alloc(workp->dw_mcr_dmah, 1549 ROUNDUP(MCR_SIZE, dca->dca_pagesize), 1550 &dca_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 1551 &workp->dw_mcr_kaddr, &size, &workp->dw_mcr_acch); 1552 if (rv != 0) { 1553 dca_error(dca, "unable to alloc MCR DMA memory"); 1554 dca_destroywork(workp); 1555 return (NULL); 1556 } 1557 1558 rv = ddi_dma_addr_bind_handle(workp->dw_mcr_dmah, NULL, 1559 workp->dw_mcr_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR, 1560 DDI_DMA_SLEEP, NULL, &c, &nc); 1561 if (rv != DDI_DMA_MAPPED) { 1562 dca_error(dca, "unable to map MCR DMA memory"); 1563 dca_destroywork(workp); 1564 return (NULL); 1565 } 1566 1567 workp->dw_mcr_paddr = c.dmac_address; 1568 return (workp); 1569 } 1570 1571 void 1572 dca_destroywork(dca_work_t *workp) 1573 { 1574 if (workp->dw_mcr_paddr) { 1575 (void) ddi_dma_unbind_handle(workp->dw_mcr_dmah); 1576 } 1577 if (workp->dw_mcr_acch) { 1578 ddi_dma_mem_free(&workp->dw_mcr_acch); 1579 } 1580 if (workp->dw_mcr_dmah) { 1581 ddi_dma_free_handle(&workp->dw_mcr_dmah); 1582 } 1583 kmem_free(workp, sizeof (dca_work_t)); 1584 } 1585 1586 dca_request_t * 1587 dca_newreq(dca_t *dca) 1588 { 1589 dca_request_t *reqp; 1590 size_t size; 1591 ddi_dma_cookie_t c; 1592 unsigned nc; 1593 int rv; 1594 int n_chain = 0; 1595 1596 size = (DESC_SIZE * MAXFRAGS) + CTX_MAXLENGTH; 1597 1598 reqp = kmem_zalloc(sizeof (dca_request_t), KM_SLEEP); 1599 1600 reqp->dr_dca = dca; 1601 1602 /* 1603 * Setup the DMA region for the context and descriptors. 1604 */ 1605 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, DDI_DMA_SLEEP, 1606 NULL, &reqp->dr_ctx_dmah); 1607 if (rv != DDI_SUCCESS) { 1608 dca_error(dca, "failure allocating request DMA handle"); 1609 dca_destroyreq(reqp); 1610 return (NULL); 1611 } 1612 1613 /* for driver hardening, allocate in whole pages */ 1614 rv = ddi_dma_mem_alloc(reqp->dr_ctx_dmah, 1615 ROUNDUP(size, dca->dca_pagesize), &dca_devattr, DDI_DMA_CONSISTENT, 1616 DDI_DMA_SLEEP, NULL, &reqp->dr_ctx_kaddr, &size, 1617 &reqp->dr_ctx_acch); 1618 if (rv != DDI_SUCCESS) { 1619 dca_error(dca, "unable to alloc request DMA memory"); 1620 dca_destroyreq(reqp); 1621 return (NULL); 1622 } 1623 1624 rv = ddi_dma_addr_bind_handle(reqp->dr_ctx_dmah, NULL, 1625 reqp->dr_ctx_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_WRITE, 1626 DDI_DMA_SLEEP, 0, &c, &nc); 1627 if (rv != DDI_DMA_MAPPED) { 1628 dca_error(dca, "failed binding request DMA handle"); 1629 dca_destroyreq(reqp); 1630 return (NULL); 1631 } 1632 reqp->dr_ctx_paddr = c.dmac_address; 1633 1634 reqp->dr_dma_size = size; 1635 1636 /* 1637 * Set up the dma for our scratch/shared buffers. 1638 */ 1639 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1640 DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_dmah); 1641 if (rv != DDI_SUCCESS) { 1642 dca_error(dca, "failure allocating ibuf DMA handle"); 1643 dca_destroyreq(reqp); 1644 return (NULL); 1645 } 1646 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1647 DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_dmah); 1648 if (rv != DDI_SUCCESS) { 1649 dca_error(dca, "failure allocating obuf DMA handle"); 1650 dca_destroyreq(reqp); 1651 return (NULL); 1652 } 1653 1654 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1655 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_in_dmah); 1656 if (rv != DDI_SUCCESS) { 1657 dca_error(dca, "failure allocating chain_in DMA handle"); 1658 dca_destroyreq(reqp); 1659 return (NULL); 1660 } 1661 1662 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1663 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_out_dmah); 1664 if (rv != DDI_SUCCESS) { 1665 dca_error(dca, "failure allocating chain_out DMA handle"); 1666 dca_destroyreq(reqp); 1667 return (NULL); 1668 } 1669 1670 /* 1671 * for driver hardening, allocate in whole pages. 1672 */ 1673 size = ROUNDUP(MAXPACKET, dca->dca_pagesize); 1674 #if defined(i386) || defined(__i386) 1675 /* 1676 * Use kmem_alloc instead of ddi_dma_mem_alloc here since the latter 1677 * may fail on x86 platform if a physically contigous memory chunk 1678 * cannot be found. From initial testing, we did not see performance 1679 * degration as seen on Sparc. 1680 */ 1681 if ((reqp->dr_ibuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) { 1682 dca_error(dca, "unable to alloc request ibuf memory"); 1683 dca_destroyreq(reqp); 1684 return (NULL); 1685 } 1686 if ((reqp->dr_obuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) { 1687 dca_error(dca, "unable to alloc request obuf memory"); 1688 dca_destroyreq(reqp); 1689 return (NULL); 1690 } 1691 #else 1692 /* 1693 * We could kmem_alloc for sparc too. However, it gives worse 1694 * performance when transfering more than one page data. For example, 1695 * using 4 threads and 12032 byte data and 3DES on 900MHZ sparc system, 1696 * kmem_alloc uses 80% CPU and ddi_dma_mem_alloc uses 50% CPU for 1697 * the same throughput. 1698 */ 1699 rv = ddi_dma_mem_alloc(reqp->dr_ibuf_dmah, 1700 size, &dca_bufattr, 1701 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_kaddr, 1702 &size, &reqp->dr_ibuf_acch); 1703 if (rv != DDI_SUCCESS) { 1704 dca_error(dca, "unable to alloc request DMA memory"); 1705 dca_destroyreq(reqp); 1706 return (NULL); 1707 } 1708 1709 rv = ddi_dma_mem_alloc(reqp->dr_obuf_dmah, 1710 size, &dca_bufattr, 1711 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_kaddr, 1712 &size, &reqp->dr_obuf_acch); 1713 if (rv != DDI_SUCCESS) { 1714 dca_error(dca, "unable to alloc request DMA memory"); 1715 dca_destroyreq(reqp); 1716 return (NULL); 1717 } 1718 #endif 1719 1720 /* Skip the used portion in the context page */ 1721 reqp->dr_offset = CTX_MAXLENGTH; 1722 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset, 1723 reqp->dr_ibuf_kaddr, reqp->dr_ibuf_dmah, 1724 DDI_DMA_WRITE | DDI_DMA_STREAMING, 1725 &reqp->dr_ibuf_head, &n_chain)) != DDI_SUCCESS) { 1726 (void) dca_destroyreq(reqp); 1727 return (NULL); 1728 } 1729 reqp->dr_ibuf_paddr = reqp->dr_ibuf_head.dc_buffer_paddr; 1730 /* Skip the space used by the input buffer */ 1731 reqp->dr_offset += DESC_SIZE * n_chain; 1732 1733 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset, 1734 reqp->dr_obuf_kaddr, reqp->dr_obuf_dmah, 1735 DDI_DMA_READ | DDI_DMA_STREAMING, 1736 &reqp->dr_obuf_head, &n_chain)) != DDI_SUCCESS) { 1737 (void) dca_destroyreq(reqp); 1738 return (NULL); 1739 } 1740 reqp->dr_obuf_paddr = reqp->dr_obuf_head.dc_buffer_paddr; 1741 /* Skip the space used by the output buffer */ 1742 reqp->dr_offset += DESC_SIZE * n_chain; 1743 1744 DBG(dca, DCHATTY, "CTX is 0x%p, phys 0x%x, len %d", 1745 reqp->dr_ctx_kaddr, reqp->dr_ctx_paddr, CTX_MAXLENGTH); 1746 return (reqp); 1747 } 1748 1749 void 1750 dca_destroyreq(dca_request_t *reqp) 1751 { 1752 #if defined(i386) || defined(__i386) 1753 dca_t *dca = reqp->dr_dca; 1754 size_t size = ROUNDUP(MAXPACKET, dca->dca_pagesize); 1755 #endif 1756 1757 /* 1758 * Clean up DMA for the context structure. 1759 */ 1760 if (reqp->dr_ctx_paddr) { 1761 (void) ddi_dma_unbind_handle(reqp->dr_ctx_dmah); 1762 } 1763 1764 if (reqp->dr_ctx_acch) { 1765 ddi_dma_mem_free(&reqp->dr_ctx_acch); 1766 } 1767 1768 if (reqp->dr_ctx_dmah) { 1769 ddi_dma_free_handle(&reqp->dr_ctx_dmah); 1770 } 1771 1772 /* 1773 * Clean up DMA for the scratch buffer. 1774 */ 1775 #if defined(i386) || defined(__i386) 1776 if (reqp->dr_ibuf_dmah) { 1777 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah); 1778 ddi_dma_free_handle(&reqp->dr_ibuf_dmah); 1779 } 1780 if (reqp->dr_obuf_dmah) { 1781 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah); 1782 ddi_dma_free_handle(&reqp->dr_obuf_dmah); 1783 } 1784 1785 kmem_free(reqp->dr_ibuf_kaddr, size); 1786 kmem_free(reqp->dr_obuf_kaddr, size); 1787 #else 1788 if (reqp->dr_ibuf_paddr) { 1789 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah); 1790 } 1791 if (reqp->dr_obuf_paddr) { 1792 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah); 1793 } 1794 1795 if (reqp->dr_ibuf_acch) { 1796 ddi_dma_mem_free(&reqp->dr_ibuf_acch); 1797 } 1798 if (reqp->dr_obuf_acch) { 1799 ddi_dma_mem_free(&reqp->dr_obuf_acch); 1800 } 1801 1802 if (reqp->dr_ibuf_dmah) { 1803 ddi_dma_free_handle(&reqp->dr_ibuf_dmah); 1804 } 1805 if (reqp->dr_obuf_dmah) { 1806 ddi_dma_free_handle(&reqp->dr_obuf_dmah); 1807 } 1808 #endif 1809 /* 1810 * These two DMA handles should have been unbinded in 1811 * dca_unbindchains() function 1812 */ 1813 if (reqp->dr_chain_in_dmah) { 1814 ddi_dma_free_handle(&reqp->dr_chain_in_dmah); 1815 } 1816 if (reqp->dr_chain_out_dmah) { 1817 ddi_dma_free_handle(&reqp->dr_chain_out_dmah); 1818 } 1819 1820 kmem_free(reqp, sizeof (dca_request_t)); 1821 } 1822 1823 dca_work_t * 1824 dca_getwork(dca_t *dca, int mcr) 1825 { 1826 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1827 dca_work_t *workp; 1828 1829 mutex_enter(&wlp->dwl_freelock); 1830 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_freework); 1831 mutex_exit(&wlp->dwl_freelock); 1832 if (workp) { 1833 int nreqs; 1834 bzero(workp->dw_mcr_kaddr, 8); 1835 1836 /* clear out old requests */ 1837 for (nreqs = 0; nreqs < MAXREQSPERMCR; nreqs++) { 1838 workp->dw_reqs[nreqs] = NULL; 1839 } 1840 } 1841 return (workp); 1842 } 1843 1844 void 1845 dca_freework(dca_work_t *workp) 1846 { 1847 mutex_enter(&workp->dw_wlp->dwl_freelock); 1848 dca_enqueue(&workp->dw_wlp->dwl_freework, (dca_listnode_t *)workp); 1849 mutex_exit(&workp->dw_wlp->dwl_freelock); 1850 } 1851 1852 dca_request_t * 1853 dca_getreq(dca_t *dca, int mcr, int tryhard) 1854 { 1855 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1856 dca_request_t *reqp; 1857 1858 mutex_enter(&wlp->dwl_freereqslock); 1859 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_freereqs); 1860 mutex_exit(&wlp->dwl_freereqslock); 1861 if (reqp) { 1862 reqp->dr_flags = 0; 1863 reqp->dr_callback = NULL; 1864 } else if (tryhard) { 1865 /* 1866 * failed to get a free one, try an allocation, the hard way. 1867 * XXX: Kstat desired here. 1868 */ 1869 if ((reqp = dca_newreq(dca)) != NULL) { 1870 reqp->dr_wlp = wlp; 1871 reqp->dr_dca = dca; 1872 reqp->dr_flags = 0; 1873 reqp->dr_callback = NULL; 1874 } 1875 } 1876 return (reqp); 1877 } 1878 1879 void 1880 dca_freereq(dca_request_t *reqp) 1881 { 1882 reqp->dr_kcf_req = NULL; 1883 if (!(reqp->dr_flags & DR_NOCACHE)) { 1884 mutex_enter(&reqp->dr_wlp->dwl_freereqslock); 1885 dca_enqueue(&reqp->dr_wlp->dwl_freereqs, 1886 (dca_listnode_t *)reqp); 1887 mutex_exit(&reqp->dr_wlp->dwl_freereqslock); 1888 } 1889 } 1890 1891 /* 1892 * Binds user buffers to DMA handles dynamically. On Sparc, a user buffer 1893 * is mapped to a single physicall address. On x86, a user buffer is mapped 1894 * to multiple physically addresses. These phsyical addresses are chained 1895 * using the method specified in Broadcom BCM5820 specification 1896 */ 1897 int 1898 dca_bindchains(dca_request_t *reqp, size_t incnt, size_t outcnt) 1899 { 1900 int rv; 1901 caddr_t kaddr; 1902 uint_t flags; 1903 int n_chain = 0; 1904 1905 if (reqp->dr_flags & DR_INPLACE) { 1906 flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT; 1907 } else { 1908 flags = DDI_DMA_WRITE | DDI_DMA_STREAMING; 1909 } 1910 1911 /* first the input */ 1912 if (incnt) { 1913 if ((kaddr = dca_bufdaddr(reqp->dr_in)) == NULL) { 1914 DBG(NULL, DWARN, "unrecognised crypto data format"); 1915 return (DDI_FAILURE); 1916 } 1917 if ((rv = dca_bindchains_one(reqp, incnt, reqp->dr_offset, 1918 kaddr, reqp->dr_chain_in_dmah, flags, 1919 &reqp->dr_chain_in_head, &n_chain)) != DDI_SUCCESS) { 1920 (void) dca_unbindchains(reqp); 1921 return (rv); 1922 } 1923 1924 /* 1925 * The offset and length are altered by the calling routine 1926 * reqp->dr_in->cd_offset += incnt; 1927 * reqp->dr_in->cd_length -= incnt; 1928 */ 1929 /* Save the first one in the chain for MCR */ 1930 reqp->dr_in_paddr = reqp->dr_chain_in_head.dc_buffer_paddr; 1931 reqp->dr_in_next = reqp->dr_chain_in_head.dc_next_paddr; 1932 reqp->dr_in_len = reqp->dr_chain_in_head.dc_buffer_length; 1933 } else { 1934 reqp->dr_in_paddr = NULL; 1935 reqp->dr_in_next = 0; 1936 reqp->dr_in_len = 0; 1937 } 1938 1939 if (reqp->dr_flags & DR_INPLACE) { 1940 reqp->dr_out_paddr = reqp->dr_in_paddr; 1941 reqp->dr_out_len = reqp->dr_in_len; 1942 reqp->dr_out_next = reqp->dr_in_next; 1943 return (DDI_SUCCESS); 1944 } 1945 1946 /* then the output */ 1947 if (outcnt) { 1948 flags = DDI_DMA_READ | DDI_DMA_STREAMING; 1949 if ((kaddr = dca_bufdaddr_out(reqp->dr_out)) == NULL) { 1950 DBG(NULL, DWARN, "unrecognised crypto data format"); 1951 (void) dca_unbindchains(reqp); 1952 return (DDI_FAILURE); 1953 } 1954 rv = dca_bindchains_one(reqp, outcnt, reqp->dr_offset + 1955 n_chain * DESC_SIZE, kaddr, reqp->dr_chain_out_dmah, 1956 flags, &reqp->dr_chain_out_head, &n_chain); 1957 if (rv != DDI_SUCCESS) { 1958 (void) dca_unbindchains(reqp); 1959 return (DDI_FAILURE); 1960 } 1961 1962 /* Save the first one in the chain for MCR */ 1963 reqp->dr_out_paddr = reqp->dr_chain_out_head.dc_buffer_paddr; 1964 reqp->dr_out_next = reqp->dr_chain_out_head.dc_next_paddr; 1965 reqp->dr_out_len = reqp->dr_chain_out_head.dc_buffer_length; 1966 } else { 1967 reqp->dr_out_paddr = NULL; 1968 reqp->dr_out_next = 0; 1969 reqp->dr_out_len = 0; 1970 } 1971 1972 return (DDI_SUCCESS); 1973 } 1974 1975 /* 1976 * Unbind the user buffers from the DMA handles. 1977 */ 1978 int 1979 dca_unbindchains(dca_request_t *reqp) 1980 { 1981 int rv = DDI_SUCCESS; 1982 int rv1 = DDI_SUCCESS; 1983 1984 /* Clear the input chain */ 1985 if (reqp->dr_chain_in_head.dc_buffer_paddr != NULL) { 1986 (void) ddi_dma_unbind_handle(reqp->dr_chain_in_dmah); 1987 reqp->dr_chain_in_head.dc_buffer_paddr = 0; 1988 } 1989 1990 if (reqp->dr_flags & DR_INPLACE) { 1991 return (rv); 1992 } 1993 1994 /* Clear the output chain */ 1995 if (reqp->dr_chain_out_head.dc_buffer_paddr != NULL) { 1996 (void) ddi_dma_unbind_handle(reqp->dr_chain_out_dmah); 1997 reqp->dr_chain_out_head.dc_buffer_paddr = 0; 1998 } 1999 2000 return ((rv != DDI_SUCCESS)? rv : rv1); 2001 } 2002 2003 /* 2004 * Build either input chain or output chain. It is single-item chain for Sparc, 2005 * and possible mutiple-item chain for x86. 2006 */ 2007 static int 2008 dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset, 2009 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags, 2010 dca_chain_t *head, int *n_chain) 2011 { 2012 ddi_dma_cookie_t c; 2013 uint_t nc; 2014 int rv; 2015 caddr_t chain_kaddr_pre; 2016 caddr_t chain_kaddr; 2017 uint32_t chain_paddr; 2018 int i; 2019 2020 /* Advance past the context structure to the starting address */ 2021 chain_paddr = reqp->dr_ctx_paddr + dr_offset; 2022 chain_kaddr = reqp->dr_ctx_kaddr + dr_offset; 2023 2024 /* 2025 * Bind the kernel address to the DMA handle. On x86, the actual 2026 * buffer is mapped into multiple physical addresses. On Sparc, 2027 * the actual buffer is mapped into a single address. 2028 */ 2029 rv = ddi_dma_addr_bind_handle(handle, 2030 NULL, kaddr, cnt, flags, DDI_DMA_DONTWAIT, NULL, &c, &nc); 2031 if (rv != DDI_DMA_MAPPED) { 2032 return (DDI_FAILURE); 2033 } 2034 2035 (void) ddi_dma_sync(handle, 0, cnt, DDI_DMA_SYNC_FORDEV); 2036 if ((rv = dca_check_dma_handle(reqp->dr_dca, handle, 2037 DCA_FM_ECLASS_NONE)) != DDI_SUCCESS) { 2038 reqp->destroy = TRUE; 2039 return (rv); 2040 } 2041 2042 *n_chain = nc; 2043 2044 /* Setup the data buffer chain for DMA transfer */ 2045 chain_kaddr_pre = NULL; 2046 head->dc_buffer_paddr = 0; 2047 head->dc_next_paddr = 0; 2048 head->dc_buffer_length = 0; 2049 for (i = 0; i < nc; i++) { 2050 /* PIO */ 2051 PUTDESC32(reqp, chain_kaddr, DESC_BUFADDR, c.dmac_address); 2052 PUTDESC16(reqp, chain_kaddr, DESC_RSVD, 0); 2053 PUTDESC16(reqp, chain_kaddr, DESC_LENGTH, c.dmac_size); 2054 2055 /* Remember the head of the chain */ 2056 if (head->dc_buffer_paddr == 0) { 2057 head->dc_buffer_paddr = c.dmac_address; 2058 head->dc_buffer_length = c.dmac_size; 2059 } 2060 2061 /* Link to the previous one if one exists */ 2062 if (chain_kaddr_pre) { 2063 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 2064 chain_paddr); 2065 if (head->dc_next_paddr == 0) 2066 head->dc_next_paddr = chain_paddr; 2067 } 2068 chain_kaddr_pre = chain_kaddr; 2069 2070 /* Maintain pointers */ 2071 chain_paddr += DESC_SIZE; 2072 chain_kaddr += DESC_SIZE; 2073 2074 /* Retrieve the next cookie if there is one */ 2075 if (i < nc-1) 2076 ddi_dma_nextcookie(handle, &c); 2077 } 2078 2079 /* Set the next pointer in the last entry to NULL */ 2080 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 0); 2081 2082 return (DDI_SUCCESS); 2083 } 2084 2085 /* 2086 * Schedule some work. 2087 */ 2088 int 2089 dca_start(dca_t *dca, dca_request_t *reqp, int mcr, int dosched) 2090 { 2091 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2092 2093 mutex_enter(&wlp->dwl_lock); 2094 2095 DBG(dca, DCHATTY, "req=%p, in=%p, out=%p, ctx=%p, ibuf=%p, obuf=%p", 2096 reqp, reqp->dr_in, reqp->dr_out, reqp->dr_ctx_kaddr, 2097 reqp->dr_ibuf_kaddr, reqp->dr_obuf_kaddr); 2098 DBG(dca, DCHATTY, "ctx paddr = %x, ibuf paddr = %x, obuf paddr = %x", 2099 reqp->dr_ctx_paddr, reqp->dr_ibuf_paddr, reqp->dr_obuf_paddr); 2100 /* sync out the entire context and descriptor chains */ 2101 (void) ddi_dma_sync(reqp->dr_ctx_dmah, 0, 0, DDI_DMA_SYNC_FORDEV); 2102 if (dca_check_dma_handle(dca, reqp->dr_ctx_dmah, 2103 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2104 reqp->destroy = TRUE; 2105 mutex_exit(&wlp->dwl_lock); 2106 return (CRYPTO_DEVICE_ERROR); 2107 } 2108 2109 dca_enqueue(&wlp->dwl_waitq, (dca_listnode_t *)reqp); 2110 wlp->dwl_count++; 2111 wlp->dwl_lastsubmit = ddi_get_lbolt(); 2112 reqp->dr_wlp = wlp; 2113 2114 if ((wlp->dwl_count == wlp->dwl_hiwater) && (wlp->dwl_busy == 0)) { 2115 /* we are fully loaded now, let kCF know */ 2116 2117 wlp->dwl_flowctl++; 2118 wlp->dwl_busy = 1; 2119 2120 crypto_prov_notify(wlp->dwl_prov, CRYPTO_PROVIDER_BUSY); 2121 } 2122 2123 if (dosched) { 2124 #ifdef SCHEDDELAY 2125 /* possibly wait for more work to arrive */ 2126 if (wlp->dwl_count >= wlp->dwl_reqspermcr) { 2127 dca_schedule(dca, mcr); 2128 } else if (!wlp->dwl_schedtid) { 2129 /* wait 1 msec for more work before doing it */ 2130 wlp->dwl_schedtid = timeout(dca_schedtimeout, 2131 (void *)wlp, drv_usectohz(MSEC)); 2132 } 2133 #else 2134 dca_schedule(dca, mcr); 2135 #endif 2136 } 2137 mutex_exit(&wlp->dwl_lock); 2138 2139 return (CRYPTO_QUEUED); 2140 } 2141 2142 void 2143 dca_schedule(dca_t *dca, int mcr) 2144 { 2145 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2146 int csr; 2147 int full; 2148 uint32_t status; 2149 2150 ASSERT(mutex_owned(&wlp->dwl_lock)); 2151 /* 2152 * If the card is draining or has an outstanding failure, 2153 * don't schedule any more work on it right now 2154 */ 2155 if (wlp->dwl_drain || (dca->dca_flags & DCA_FAILED)) { 2156 return; 2157 } 2158 2159 if (mcr == MCR2) { 2160 csr = CSR_MCR2; 2161 full = DMASTAT_MCR2FULL; 2162 } else { 2163 csr = CSR_MCR1; 2164 full = DMASTAT_MCR1FULL; 2165 } 2166 2167 for (;;) { 2168 dca_work_t *workp; 2169 uint32_t offset; 2170 int nreqs; 2171 2172 status = GETCSR(dca, CSR_DMASTAT); 2173 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 2174 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 2175 return; 2176 2177 if ((status & full) != 0) 2178 break; 2179 2180 #ifdef SCHEDDELAY 2181 /* if there isn't enough to do, don't bother now */ 2182 if ((wlp->dwl_count < wlp->dwl_reqspermcr) && 2183 (ddi_get_lbolt() < (wlp->dwl_lastsubmit + 2184 drv_usectohz(MSEC)))) { 2185 /* wait a bit longer... */ 2186 if (wlp->dwl_schedtid == 0) { 2187 wlp->dwl_schedtid = timeout(dca_schedtimeout, 2188 (void *)wlp, drv_usectohz(MSEC)); 2189 } 2190 return; 2191 } 2192 #endif 2193 2194 /* grab a work structure */ 2195 workp = dca_getwork(dca, mcr); 2196 2197 if (workp == NULL) { 2198 /* 2199 * There must be work ready to be reclaimed, 2200 * in this case, since the chip can only hold 2201 * less work outstanding than there are total. 2202 */ 2203 dca_reclaim(dca, mcr); 2204 continue; 2205 } 2206 2207 nreqs = 0; 2208 offset = MCR_CTXADDR; 2209 2210 while (nreqs < wlp->dwl_reqspermcr) { 2211 dca_request_t *reqp; 2212 2213 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_waitq); 2214 if (reqp == NULL) { 2215 /* nothing left to process */ 2216 break; 2217 } 2218 /* 2219 * Update flow control. 2220 */ 2221 wlp->dwl_count--; 2222 if ((wlp->dwl_count == wlp->dwl_lowater) && 2223 (wlp->dwl_busy)) { 2224 wlp->dwl_busy = 0; 2225 crypto_prov_notify(wlp->dwl_prov, 2226 CRYPTO_PROVIDER_READY); 2227 } 2228 2229 /* 2230 * Context address. 2231 */ 2232 PUTMCR32(workp, offset, reqp->dr_ctx_paddr); 2233 offset += 4; 2234 2235 /* 2236 * Input chain. 2237 */ 2238 /* input buffer address */ 2239 PUTMCR32(workp, offset, reqp->dr_in_paddr); 2240 offset += 4; 2241 /* next input buffer entry */ 2242 PUTMCR32(workp, offset, reqp->dr_in_next); 2243 offset += 4; 2244 /* input buffer length */ 2245 PUTMCR16(workp, offset, reqp->dr_in_len); 2246 offset += 2; 2247 /* zero the reserved field */ 2248 PUTMCR16(workp, offset, 0); 2249 offset += 2; 2250 2251 /* 2252 * Overall length. 2253 */ 2254 /* reserved field */ 2255 PUTMCR16(workp, offset, 0); 2256 offset += 2; 2257 /* total packet length */ 2258 PUTMCR16(workp, offset, reqp->dr_pkt_length); 2259 offset += 2; 2260 2261 /* 2262 * Output chain. 2263 */ 2264 /* output buffer address */ 2265 PUTMCR32(workp, offset, reqp->dr_out_paddr); 2266 offset += 4; 2267 /* next output buffer entry */ 2268 PUTMCR32(workp, offset, reqp->dr_out_next); 2269 offset += 4; 2270 /* output buffer length */ 2271 PUTMCR16(workp, offset, reqp->dr_out_len); 2272 offset += 2; 2273 /* zero the reserved field */ 2274 PUTMCR16(workp, offset, 0); 2275 offset += 2; 2276 2277 /* 2278 * Note submission. 2279 */ 2280 workp->dw_reqs[nreqs] = reqp; 2281 nreqs++; 2282 } 2283 2284 if (nreqs == 0) { 2285 /* nothing in the queue! */ 2286 dca_freework(workp); 2287 return; 2288 } 2289 2290 wlp->dwl_submit++; 2291 2292 PUTMCR16(workp, MCR_FLAGS, 0); 2293 PUTMCR16(workp, MCR_COUNT, nreqs); 2294 2295 DBG(dca, DCHATTY, 2296 "posting work (phys %x, virt 0x%p) (%d reqs) to MCR%d", 2297 workp->dw_mcr_paddr, workp->dw_mcr_kaddr, 2298 nreqs, mcr); 2299 2300 workp->dw_lbolt = ddi_get_lbolt(); 2301 /* Make sure MCR is synced out to device. */ 2302 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 0, 2303 DDI_DMA_SYNC_FORDEV); 2304 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah, 2305 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2306 dca_destroywork(workp); 2307 return; 2308 } 2309 2310 PUTCSR(dca, csr, workp->dw_mcr_paddr); 2311 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 2312 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2313 dca_destroywork(workp); 2314 return; 2315 } else { 2316 dca_enqueue(&wlp->dwl_runq, (dca_listnode_t *)workp); 2317 } 2318 2319 DBG(dca, DCHATTY, "posted"); 2320 } 2321 } 2322 2323 /* 2324 * Reclaim completed work, called in interrupt context. 2325 */ 2326 void 2327 dca_reclaim(dca_t *dca, int mcr) 2328 { 2329 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2330 dca_work_t *workp; 2331 ushort_t flags; 2332 int nreclaimed = 0; 2333 int i; 2334 2335 DBG(dca, DRECLAIM, "worklist = 0x%p (MCR%d)", wlp, mcr); 2336 ASSERT(mutex_owned(&wlp->dwl_lock)); 2337 /* 2338 * For each MCR in the submitted (runq), we check to see if 2339 * it has been processed. If so, then we note each individual 2340 * job in the MCR, and and do the completion processing for 2341 * each of such job. 2342 */ 2343 for (;;) { 2344 2345 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq); 2346 if (workp == NULL) { 2347 break; 2348 } 2349 2350 /* only sync the MCR flags, since that's all we need */ 2351 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 4, 2352 DDI_DMA_SYNC_FORKERNEL); 2353 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah, 2354 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2355 dca_rmqueue((dca_listnode_t *)workp); 2356 dca_destroywork(workp); 2357 return; 2358 } 2359 2360 flags = GETMCR16(workp, MCR_FLAGS); 2361 if ((flags & MCRFLAG_FINISHED) == 0) { 2362 /* chip is still working on it */ 2363 DBG(dca, DRECLAIM, 2364 "chip still working on it (MCR%d)", mcr); 2365 break; 2366 } 2367 2368 /* its really for us, so remove it from the queue */ 2369 dca_rmqueue((dca_listnode_t *)workp); 2370 2371 /* if we were draining, signal on the cv */ 2372 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) { 2373 cv_signal(&wlp->dwl_cv); 2374 } 2375 2376 /* update statistics, done under the lock */ 2377 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2378 dca_request_t *reqp = workp->dw_reqs[i]; 2379 if (reqp == NULL) { 2380 continue; 2381 } 2382 if (reqp->dr_byte_stat >= 0) { 2383 dca->dca_stats[reqp->dr_byte_stat] += 2384 reqp->dr_pkt_length; 2385 } 2386 if (reqp->dr_job_stat >= 0) { 2387 dca->dca_stats[reqp->dr_job_stat]++; 2388 } 2389 } 2390 mutex_exit(&wlp->dwl_lock); 2391 2392 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2393 dca_request_t *reqp = workp->dw_reqs[i]; 2394 2395 if (reqp == NULL) { 2396 continue; 2397 } 2398 2399 /* Do the callback. */ 2400 workp->dw_reqs[i] = NULL; 2401 dca_done(reqp, CRYPTO_SUCCESS); 2402 2403 nreclaimed++; 2404 } 2405 2406 /* now we can release the work */ 2407 dca_freework(workp); 2408 2409 mutex_enter(&wlp->dwl_lock); 2410 } 2411 DBG(dca, DRECLAIM, "reclaimed %d cmds", nreclaimed); 2412 } 2413 2414 int 2415 dca_length(crypto_data_t *cdata) 2416 { 2417 return (cdata->cd_length); 2418 } 2419 2420 /* 2421 * This is the callback function called from the interrupt when a kCF job 2422 * completes. It does some driver-specific things, and then calls the 2423 * kCF-provided callback. Finally, it cleans up the state for the work 2424 * request and drops the reference count to allow for DR. 2425 */ 2426 void 2427 dca_done(dca_request_t *reqp, int err) 2428 { 2429 uint64_t ena = 0; 2430 2431 /* unbind any chains we were using */ 2432 if (dca_unbindchains(reqp) != DDI_SUCCESS) { 2433 /* DMA failure */ 2434 ena = dca_ena(ena); 2435 dca_failure(reqp->dr_dca, DDI_DATAPATH_FAULT, 2436 DCA_FM_ECLASS_NONE, ena, CRYPTO_DEVICE_ERROR, 2437 "fault on buffer DMA handle"); 2438 if (err == CRYPTO_SUCCESS) { 2439 err = CRYPTO_DEVICE_ERROR; 2440 } 2441 } 2442 2443 if (reqp->dr_callback != NULL) { 2444 reqp->dr_callback(reqp, err); 2445 } else { 2446 dca_freereq(reqp); 2447 } 2448 } 2449 2450 /* 2451 * Call this when a failure is detected. It will reset the chip, 2452 * log a message, alert kCF, and mark jobs in the runq as failed. 2453 */ 2454 /* ARGSUSED */ 2455 void 2456 dca_failure(dca_t *dca, ddi_fault_location_t loc, dca_fma_eclass_t index, 2457 uint64_t ena, int errno, char *mess, ...) 2458 { 2459 va_list ap; 2460 char buf[256]; 2461 int mcr; 2462 char *eclass; 2463 int have_mutex; 2464 2465 va_start(ap, mess); 2466 (void) vsprintf(buf, mess, ap); 2467 va_end(ap); 2468 2469 eclass = dca_fma_eclass_string(dca->dca_model, index); 2470 2471 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) && 2472 index != DCA_FM_ECLASS_NONE) { 2473 ddi_fm_ereport_post(dca->dca_dip, eclass, ena, 2474 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 2475 FM_EREPORT_VERS0, NULL); 2476 2477 /* Report the impact of the failure to the DDI. */ 2478 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_LOST); 2479 } else { 2480 /* Just log the error string to the message log */ 2481 dca_error(dca, buf); 2482 } 2483 2484 /* 2485 * Indicate a failure (keeps schedule from running). 2486 */ 2487 dca->dca_flags |= DCA_FAILED; 2488 2489 /* 2490 * Reset the chip. This should also have as a side effect, the 2491 * disabling of all interrupts from the device. 2492 */ 2493 (void) dca_reset(dca, 1); 2494 2495 /* 2496 * Report the failure to kCF. 2497 */ 2498 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2499 if (WORKLIST(dca, mcr)->dwl_prov) { 2500 crypto_prov_notify(WORKLIST(dca, mcr)->dwl_prov, 2501 CRYPTO_PROVIDER_FAILED); 2502 } 2503 } 2504 2505 /* 2506 * Return jobs not sent to hardware back to kCF. 2507 */ 2508 dca_rejectjobs(dca); 2509 2510 /* 2511 * From this point on, no new work should be arriving, and the 2512 * chip should not be doing any active DMA. 2513 */ 2514 2515 /* 2516 * Now find all the work submitted to the device and fail 2517 * them. 2518 */ 2519 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2520 dca_worklist_t *wlp; 2521 int i; 2522 2523 wlp = WORKLIST(dca, mcr); 2524 2525 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) { 2526 continue; 2527 } 2528 for (;;) { 2529 dca_work_t *workp; 2530 2531 have_mutex = mutex_tryenter(&wlp->dwl_lock); 2532 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_runq); 2533 if (workp == NULL) { 2534 if (have_mutex) 2535 mutex_exit(&wlp->dwl_lock); 2536 break; 2537 } 2538 mutex_exit(&wlp->dwl_lock); 2539 2540 /* 2541 * Free up requests 2542 */ 2543 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2544 dca_request_t *reqp = workp->dw_reqs[i]; 2545 if (reqp) { 2546 dca_done(reqp, errno); 2547 workp->dw_reqs[i] = NULL; 2548 } 2549 } 2550 2551 mutex_enter(&wlp->dwl_lock); 2552 /* 2553 * If waiting to drain, signal on the waiter. 2554 */ 2555 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) { 2556 cv_signal(&wlp->dwl_cv); 2557 } 2558 2559 /* 2560 * Return the work and request structures to 2561 * the free pool. 2562 */ 2563 dca_freework(workp); 2564 if (have_mutex) 2565 mutex_exit(&wlp->dwl_lock); 2566 } 2567 } 2568 2569 } 2570 2571 #ifdef SCHEDDELAY 2572 /* 2573 * Reschedule worklist as needed. 2574 */ 2575 void 2576 dca_schedtimeout(void *arg) 2577 { 2578 dca_worklist_t *wlp = (dca_worklist_t *)arg; 2579 mutex_enter(&wlp->dwl_lock); 2580 wlp->dwl_schedtid = 0; 2581 dca_schedule(wlp->dwl_dca, wlp->dwl_mcr); 2582 mutex_exit(&wlp->dwl_lock); 2583 } 2584 #endif 2585 2586 /* 2587 * Check for stalled jobs. 2588 */ 2589 void 2590 dca_jobtimeout(void *arg) 2591 { 2592 int mcr; 2593 dca_t *dca = (dca_t *)arg; 2594 int hung = 0; 2595 2596 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2597 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2598 dca_work_t *workp; 2599 clock_t when; 2600 2601 mutex_enter(&wlp->dwl_lock); 2602 when = ddi_get_lbolt(); 2603 2604 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq); 2605 if (workp == NULL) { 2606 /* nothing sitting in the queue */ 2607 mutex_exit(&wlp->dwl_lock); 2608 continue; 2609 } 2610 2611 if ((when - workp->dw_lbolt) < drv_usectohz(STALETIME)) { 2612 /* request has been queued for less than STALETIME */ 2613 mutex_exit(&wlp->dwl_lock); 2614 continue; 2615 } 2616 2617 /* job has been sitting around for over 1 second, badness */ 2618 DBG(dca, DWARN, "stale job (0x%p) found in MCR%d!", workp, 2619 mcr); 2620 2621 /* put it back in the queue, until we reset the chip */ 2622 hung++; 2623 mutex_exit(&wlp->dwl_lock); 2624 } 2625 2626 if (hung) { 2627 dca_failure(dca, DDI_DEVICE_FAULT, 2628 DCA_FM_ECLASS_HW_TIMEOUT, dca_ena(0), CRYPTO_DEVICE_ERROR, 2629 "timeout processing job.)"); 2630 } 2631 2632 /* reschedule ourself */ 2633 mutex_enter(&dca->dca_intrlock); 2634 if (dca->dca_jobtid == 0) { 2635 /* timeout has been canceled, prior to DR */ 2636 mutex_exit(&dca->dca_intrlock); 2637 return; 2638 } 2639 2640 /* check again in 1 second */ 2641 dca->dca_jobtid = timeout(dca_jobtimeout, arg, 2642 drv_usectohz(SECOND)); 2643 mutex_exit(&dca->dca_intrlock); 2644 } 2645 2646 /* 2647 * This returns all jobs back to kCF. It assumes that processing 2648 * on the worklist has halted. 2649 */ 2650 void 2651 dca_rejectjobs(dca_t *dca) 2652 { 2653 int mcr; 2654 int have_mutex; 2655 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2656 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2657 dca_request_t *reqp; 2658 2659 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) { 2660 continue; 2661 } 2662 have_mutex = mutex_tryenter(&wlp->dwl_lock); 2663 for (;;) { 2664 reqp = (dca_request_t *)dca_unqueue(&wlp->dwl_waitq); 2665 if (reqp == NULL) { 2666 break; 2667 } 2668 /* update flow control */ 2669 wlp->dwl_count--; 2670 if ((wlp->dwl_count == wlp->dwl_lowater) && 2671 (wlp->dwl_busy)) { 2672 wlp->dwl_busy = 0; 2673 crypto_prov_notify(wlp->dwl_prov, 2674 CRYPTO_PROVIDER_READY); 2675 } 2676 mutex_exit(&wlp->dwl_lock); 2677 2678 (void) dca_unbindchains(reqp); 2679 reqp->dr_callback(reqp, EAGAIN); 2680 mutex_enter(&wlp->dwl_lock); 2681 } 2682 if (have_mutex) 2683 mutex_exit(&wlp->dwl_lock); 2684 } 2685 } 2686 2687 int 2688 dca_drain(dca_t *dca) 2689 { 2690 int mcr; 2691 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2692 #ifdef SCHEDDELAY 2693 timeout_id_t tid; 2694 #endif 2695 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2696 2697 mutex_enter(&wlp->dwl_lock); 2698 wlp->dwl_drain = 1; 2699 2700 /* give it up to a second to drain from the chip */ 2701 if (!QEMPTY(&wlp->dwl_runq)) { 2702 (void) cv_timedwait(&wlp->dwl_cv, &wlp->dwl_lock, 2703 ddi_get_time() + drv_usectohz(STALETIME)); 2704 2705 if (!QEMPTY(&wlp->dwl_runq)) { 2706 dca_error(dca, "unable to drain device"); 2707 mutex_exit(&wlp->dwl_lock); 2708 dca_undrain(dca); 2709 return (EBUSY); 2710 } 2711 } 2712 2713 #ifdef SCHEDDELAY 2714 tid = wlp->dwl_schedtid; 2715 mutex_exit(&wlp->dwl_lock); 2716 2717 /* 2718 * untimeout outside the lock -- this is safe because we 2719 * have set the drain flag, so dca_schedule() will not 2720 * reschedule another timeout 2721 */ 2722 if (tid) { 2723 untimeout(tid); 2724 } 2725 #else 2726 mutex_exit(&wlp->dwl_lock); 2727 #endif 2728 } 2729 return (0); 2730 } 2731 2732 void 2733 dca_undrain(dca_t *dca) 2734 { 2735 int mcr; 2736 2737 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2738 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2739 mutex_enter(&wlp->dwl_lock); 2740 wlp->dwl_drain = 0; 2741 dca_schedule(dca, mcr); 2742 mutex_exit(&wlp->dwl_lock); 2743 } 2744 } 2745 2746 /* 2747 * Duplicate the crypto_data_t structure, but point to the original 2748 * buffers. 2749 */ 2750 int 2751 dca_dupcrypto(crypto_data_t *input, crypto_data_t *ninput) 2752 { 2753 ninput->cd_format = input->cd_format; 2754 ninput->cd_offset = input->cd_offset; 2755 ninput->cd_length = input->cd_length; 2756 ninput->cd_miscdata = input->cd_miscdata; 2757 2758 switch (input->cd_format) { 2759 case CRYPTO_DATA_RAW: 2760 ninput->cd_raw.iov_base = input->cd_raw.iov_base; 2761 ninput->cd_raw.iov_len = input->cd_raw.iov_len; 2762 break; 2763 2764 case CRYPTO_DATA_UIO: 2765 ninput->cd_uio = input->cd_uio; 2766 break; 2767 2768 case CRYPTO_DATA_MBLK: 2769 ninput->cd_mp = input->cd_mp; 2770 break; 2771 2772 default: 2773 DBG(NULL, DWARN, 2774 "dca_dupcrypto: unrecognised crypto data format"); 2775 return (CRYPTO_FAILED); 2776 } 2777 2778 return (CRYPTO_SUCCESS); 2779 } 2780 2781 /* 2782 * Performs validation checks on the input and output data structures. 2783 */ 2784 int 2785 dca_verifyio(crypto_data_t *input, crypto_data_t *output) 2786 { 2787 int rv = CRYPTO_SUCCESS; 2788 2789 switch (input->cd_format) { 2790 case CRYPTO_DATA_RAW: 2791 break; 2792 2793 case CRYPTO_DATA_UIO: 2794 /* we support only kernel buffer */ 2795 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) { 2796 DBG(NULL, DWARN, "non kernel input uio buffer"); 2797 rv = CRYPTO_ARGUMENTS_BAD; 2798 } 2799 break; 2800 2801 case CRYPTO_DATA_MBLK: 2802 break; 2803 2804 default: 2805 DBG(NULL, DWARN, "unrecognised input crypto data format"); 2806 rv = CRYPTO_ARGUMENTS_BAD; 2807 } 2808 2809 switch (output->cd_format) { 2810 case CRYPTO_DATA_RAW: 2811 break; 2812 2813 case CRYPTO_DATA_UIO: 2814 /* we support only kernel buffer */ 2815 if (output->cd_uio->uio_segflg != UIO_SYSSPACE) { 2816 DBG(NULL, DWARN, "non kernel output uio buffer"); 2817 rv = CRYPTO_ARGUMENTS_BAD; 2818 } 2819 break; 2820 2821 case CRYPTO_DATA_MBLK: 2822 break; 2823 2824 default: 2825 DBG(NULL, DWARN, "unrecognised output crypto data format"); 2826 rv = CRYPTO_ARGUMENTS_BAD; 2827 } 2828 2829 return (rv); 2830 } 2831 2832 /* 2833 * data: source crypto_data_t struct 2834 * off: offset into the source before commencing copy 2835 * count: the amount of data to copy 2836 * dest: destination buffer 2837 */ 2838 int 2839 dca_getbufbytes(crypto_data_t *data, size_t off, int count, uchar_t *dest) 2840 { 2841 int rv = CRYPTO_SUCCESS; 2842 uio_t *uiop; 2843 uint_t vec_idx; 2844 size_t cur_len; 2845 mblk_t *mp; 2846 2847 if (count == 0) { 2848 /* We don't want anything so we're done. */ 2849 return (rv); 2850 } 2851 2852 /* 2853 * Sanity check that we haven't specified a length greater than the 2854 * offset adjusted size of the buffer. 2855 */ 2856 if (count > (data->cd_length - off)) { 2857 return (CRYPTO_DATA_LEN_RANGE); 2858 } 2859 2860 /* Add the internal crypto_data offset to the requested offset. */ 2861 off += data->cd_offset; 2862 2863 switch (data->cd_format) { 2864 case CRYPTO_DATA_RAW: 2865 bcopy(data->cd_raw.iov_base + off, dest, count); 2866 break; 2867 2868 case CRYPTO_DATA_UIO: 2869 /* 2870 * Jump to the first iovec containing data to be 2871 * processed. 2872 */ 2873 uiop = data->cd_uio; 2874 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 2875 off >= uiop->uio_iov[vec_idx].iov_len; 2876 off -= uiop->uio_iov[vec_idx++].iov_len) 2877 ; 2878 if (vec_idx == uiop->uio_iovcnt) { 2879 /* 2880 * The caller specified an offset that is larger than 2881 * the total size of the buffers it provided. 2882 */ 2883 return (CRYPTO_DATA_LEN_RANGE); 2884 } 2885 2886 /* 2887 * Now process the iovecs. 2888 */ 2889 while (vec_idx < uiop->uio_iovcnt && count > 0) { 2890 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 2891 off, count); 2892 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest, 2893 cur_len); 2894 count -= cur_len; 2895 dest += cur_len; 2896 vec_idx++; 2897 off = 0; 2898 } 2899 2900 if (vec_idx == uiop->uio_iovcnt && count > 0) { 2901 /* 2902 * The end of the specified iovec's was reached but 2903 * the length requested could not be processed 2904 * (requested to digest more data than it provided). 2905 */ 2906 return (CRYPTO_DATA_LEN_RANGE); 2907 } 2908 break; 2909 2910 case CRYPTO_DATA_MBLK: 2911 /* 2912 * Jump to the first mblk_t containing data to be processed. 2913 */ 2914 for (mp = data->cd_mp; mp != NULL && off >= MBLKL(mp); 2915 off -= MBLKL(mp), mp = mp->b_cont) 2916 ; 2917 if (mp == NULL) { 2918 /* 2919 * The caller specified an offset that is larger than 2920 * the total size of the buffers it provided. 2921 */ 2922 return (CRYPTO_DATA_LEN_RANGE); 2923 } 2924 2925 /* 2926 * Now do the processing on the mblk chain. 2927 */ 2928 while (mp != NULL && count > 0) { 2929 cur_len = min(MBLKL(mp) - off, count); 2930 bcopy((char *)(mp->b_rptr + off), dest, cur_len); 2931 count -= cur_len; 2932 dest += cur_len; 2933 mp = mp->b_cont; 2934 off = 0; 2935 } 2936 2937 if (mp == NULL && count > 0) { 2938 /* 2939 * The end of the mblk was reached but the length 2940 * requested could not be processed, (requested to 2941 * digest more data than it provided). 2942 */ 2943 return (CRYPTO_DATA_LEN_RANGE); 2944 } 2945 break; 2946 2947 default: 2948 DBG(NULL, DWARN, "unrecognised crypto data format"); 2949 rv = CRYPTO_ARGUMENTS_BAD; 2950 } 2951 return (rv); 2952 } 2953 2954 2955 /* 2956 * Performs the input, output or hard scatter/gather checks on the specified 2957 * crypto_data_t struct. Returns true if the data is scatter/gather in nature 2958 * ie fails the test. 2959 */ 2960 int 2961 dca_sgcheck(dca_t *dca, crypto_data_t *data, dca_sg_param_t val) 2962 { 2963 uio_t *uiop; 2964 mblk_t *mp; 2965 int rv = FALSE; 2966 2967 switch (val) { 2968 case DCA_SG_CONTIG: 2969 /* 2970 * Check for a contiguous data buffer. 2971 */ 2972 switch (data->cd_format) { 2973 case CRYPTO_DATA_RAW: 2974 /* Contiguous in nature */ 2975 break; 2976 2977 case CRYPTO_DATA_UIO: 2978 if (data->cd_uio->uio_iovcnt > 1) 2979 rv = TRUE; 2980 break; 2981 2982 case CRYPTO_DATA_MBLK: 2983 mp = data->cd_mp; 2984 if (mp->b_cont != NULL) 2985 rv = TRUE; 2986 break; 2987 2988 default: 2989 DBG(NULL, DWARN, "unrecognised crypto data format"); 2990 } 2991 break; 2992 2993 case DCA_SG_WALIGN: 2994 /* 2995 * Check for a contiguous data buffer that is 32-bit word 2996 * aligned and is of word multiples in size. 2997 */ 2998 switch (data->cd_format) { 2999 case CRYPTO_DATA_RAW: 3000 if ((data->cd_raw.iov_len % sizeof (uint32_t)) || 3001 ((uintptr_t)data->cd_raw.iov_base % 3002 sizeof (uint32_t))) { 3003 rv = TRUE; 3004 } 3005 break; 3006 3007 case CRYPTO_DATA_UIO: 3008 uiop = data->cd_uio; 3009 if (uiop->uio_iovcnt > 1) { 3010 return (TRUE); 3011 } 3012 /* So there is only one iovec */ 3013 if ((uiop->uio_iov[0].iov_len % sizeof (uint32_t)) || 3014 ((uintptr_t)uiop->uio_iov[0].iov_base % 3015 sizeof (uint32_t))) { 3016 rv = TRUE; 3017 } 3018 break; 3019 3020 case CRYPTO_DATA_MBLK: 3021 mp = data->cd_mp; 3022 if (mp->b_cont != NULL) { 3023 return (TRUE); 3024 } 3025 /* So there is only one mblk in the chain */ 3026 if ((MBLKL(mp) % sizeof (uint32_t)) || 3027 ((uintptr_t)mp->b_rptr % sizeof (uint32_t))) { 3028 rv = TRUE; 3029 } 3030 break; 3031 3032 default: 3033 DBG(NULL, DWARN, "unrecognised crypto data format"); 3034 } 3035 break; 3036 3037 case DCA_SG_PALIGN: 3038 /* 3039 * Check that the data buffer is page aligned and is of 3040 * page multiples in size. 3041 */ 3042 switch (data->cd_format) { 3043 case CRYPTO_DATA_RAW: 3044 if ((data->cd_length % dca->dca_pagesize) || 3045 ((uintptr_t)data->cd_raw.iov_base % 3046 dca->dca_pagesize)) { 3047 rv = TRUE; 3048 } 3049 break; 3050 3051 case CRYPTO_DATA_UIO: 3052 uiop = data->cd_uio; 3053 if ((uiop->uio_iov[0].iov_len % dca->dca_pagesize) || 3054 ((uintptr_t)uiop->uio_iov[0].iov_base % 3055 dca->dca_pagesize)) { 3056 rv = TRUE; 3057 } 3058 break; 3059 3060 case CRYPTO_DATA_MBLK: 3061 mp = data->cd_mp; 3062 if ((MBLKL(mp) % dca->dca_pagesize) || 3063 ((uintptr_t)mp->b_rptr % dca->dca_pagesize)) { 3064 rv = TRUE; 3065 } 3066 break; 3067 3068 default: 3069 DBG(NULL, DWARN, "unrecognised crypto data format"); 3070 } 3071 break; 3072 3073 default: 3074 DBG(NULL, DWARN, "unrecognised scatter/gather param type"); 3075 } 3076 3077 return (rv); 3078 } 3079 3080 /* 3081 * Increments the cd_offset and decrements the cd_length as the data is 3082 * gathered from the crypto_data_t struct. 3083 * The data is reverse-copied into the dest buffer if the flag is true. 3084 */ 3085 int 3086 dca_gather(crypto_data_t *in, char *dest, int count, int reverse) 3087 { 3088 int rv = CRYPTO_SUCCESS; 3089 uint_t vec_idx; 3090 uio_t *uiop; 3091 off_t off = in->cd_offset; 3092 size_t cur_len; 3093 mblk_t *mp; 3094 3095 switch (in->cd_format) { 3096 case CRYPTO_DATA_RAW: 3097 if (count > in->cd_length) { 3098 /* 3099 * The caller specified a length greater than the 3100 * size of the buffer. 3101 */ 3102 return (CRYPTO_DATA_LEN_RANGE); 3103 } 3104 if (reverse) 3105 dca_reverse(in->cd_raw.iov_base + off, dest, count, 3106 count); 3107 else 3108 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count); 3109 in->cd_offset += count; 3110 in->cd_length -= count; 3111 break; 3112 3113 case CRYPTO_DATA_UIO: 3114 /* 3115 * Jump to the first iovec containing data to be processed. 3116 */ 3117 uiop = in->cd_uio; 3118 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3119 off >= uiop->uio_iov[vec_idx].iov_len; 3120 off -= uiop->uio_iov[vec_idx++].iov_len) 3121 ; 3122 if (vec_idx == uiop->uio_iovcnt) { 3123 /* 3124 * The caller specified an offset that is larger than 3125 * the total size of the buffers it provided. 3126 */ 3127 return (CRYPTO_DATA_LEN_RANGE); 3128 } 3129 3130 /* 3131 * Now process the iovecs. 3132 */ 3133 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3134 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3135 off, count); 3136 count -= cur_len; 3137 if (reverse) { 3138 /* Fill the dest buffer from the end */ 3139 dca_reverse(uiop->uio_iov[vec_idx].iov_base + 3140 off, dest+count, cur_len, cur_len); 3141 } else { 3142 bcopy(uiop->uio_iov[vec_idx].iov_base + off, 3143 dest, cur_len); 3144 dest += cur_len; 3145 } 3146 in->cd_offset += cur_len; 3147 in->cd_length -= cur_len; 3148 vec_idx++; 3149 off = 0; 3150 } 3151 3152 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3153 /* 3154 * The end of the specified iovec's was reached but 3155 * the length requested could not be processed 3156 * (requested to digest more data than it provided). 3157 */ 3158 return (CRYPTO_DATA_LEN_RANGE); 3159 } 3160 break; 3161 3162 case CRYPTO_DATA_MBLK: 3163 /* 3164 * Jump to the first mblk_t containing data to be processed. 3165 */ 3166 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp); 3167 off -= MBLKL(mp), mp = mp->b_cont) 3168 ; 3169 if (mp == NULL) { 3170 /* 3171 * The caller specified an offset that is larger than 3172 * the total size of the buffers it provided. 3173 */ 3174 return (CRYPTO_DATA_LEN_RANGE); 3175 } 3176 3177 /* 3178 * Now do the processing on the mblk chain. 3179 */ 3180 while (mp != NULL && count > 0) { 3181 cur_len = min(MBLKL(mp) - off, count); 3182 count -= cur_len; 3183 if (reverse) { 3184 /* Fill the dest buffer from the end */ 3185 dca_reverse((char *)(mp->b_rptr + off), 3186 dest+count, cur_len, cur_len); 3187 } else { 3188 bcopy((char *)(mp->b_rptr + off), dest, 3189 cur_len); 3190 dest += cur_len; 3191 } 3192 in->cd_offset += cur_len; 3193 in->cd_length -= cur_len; 3194 mp = mp->b_cont; 3195 off = 0; 3196 } 3197 3198 if (mp == NULL && count > 0) { 3199 /* 3200 * The end of the mblk was reached but the length 3201 * requested could not be processed, (requested to 3202 * digest more data than it provided). 3203 */ 3204 return (CRYPTO_DATA_LEN_RANGE); 3205 } 3206 break; 3207 3208 default: 3209 DBG(NULL, DWARN, "dca_gather: unrecognised crypto data format"); 3210 rv = CRYPTO_ARGUMENTS_BAD; 3211 } 3212 return (rv); 3213 } 3214 3215 /* 3216 * Increments the cd_offset and decrements the cd_length as the data is 3217 * gathered from the crypto_data_t struct. 3218 */ 3219 int 3220 dca_resid_gather(crypto_data_t *in, char *resid, int *residlen, char *dest, 3221 int count) 3222 { 3223 int rv = CRYPTO_SUCCESS; 3224 caddr_t baddr; 3225 uint_t vec_idx; 3226 uio_t *uiop; 3227 off_t off = in->cd_offset; 3228 size_t cur_len; 3229 mblk_t *mp; 3230 3231 /* Process the residual first */ 3232 if (*residlen > 0) { 3233 uint_t num = min(count, *residlen); 3234 bcopy(resid, dest, num); 3235 *residlen -= num; 3236 if (*residlen > 0) { 3237 /* 3238 * Requested amount 'count' is less than what's in 3239 * the residual, so shuffle any remaining resid to 3240 * the front. 3241 */ 3242 baddr = resid + num; 3243 bcopy(baddr, resid, *residlen); 3244 } 3245 dest += num; 3246 count -= num; 3247 } 3248 3249 /* Now process what's in the crypto_data_t structs */ 3250 switch (in->cd_format) { 3251 case CRYPTO_DATA_RAW: 3252 if (count > in->cd_length) { 3253 /* 3254 * The caller specified a length greater than the 3255 * size of the buffer. 3256 */ 3257 return (CRYPTO_DATA_LEN_RANGE); 3258 } 3259 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count); 3260 in->cd_offset += count; 3261 in->cd_length -= count; 3262 break; 3263 3264 case CRYPTO_DATA_UIO: 3265 /* 3266 * Jump to the first iovec containing data to be processed. 3267 */ 3268 uiop = in->cd_uio; 3269 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3270 off >= uiop->uio_iov[vec_idx].iov_len; 3271 off -= uiop->uio_iov[vec_idx++].iov_len) 3272 ; 3273 if (vec_idx == uiop->uio_iovcnt) { 3274 /* 3275 * The caller specified an offset that is larger than 3276 * the total size of the buffers it provided. 3277 */ 3278 return (CRYPTO_DATA_LEN_RANGE); 3279 } 3280 3281 /* 3282 * Now process the iovecs. 3283 */ 3284 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3285 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3286 off, count); 3287 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest, 3288 cur_len); 3289 count -= cur_len; 3290 dest += cur_len; 3291 in->cd_offset += cur_len; 3292 in->cd_length -= cur_len; 3293 vec_idx++; 3294 off = 0; 3295 } 3296 3297 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3298 /* 3299 * The end of the specified iovec's was reached but 3300 * the length requested could not be processed 3301 * (requested to digest more data than it provided). 3302 */ 3303 return (CRYPTO_DATA_LEN_RANGE); 3304 } 3305 break; 3306 3307 case CRYPTO_DATA_MBLK: 3308 /* 3309 * Jump to the first mblk_t containing data to be processed. 3310 */ 3311 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp); 3312 off -= MBLKL(mp), mp = mp->b_cont) 3313 ; 3314 if (mp == NULL) { 3315 /* 3316 * The caller specified an offset that is larger than 3317 * the total size of the buffers it provided. 3318 */ 3319 return (CRYPTO_DATA_LEN_RANGE); 3320 } 3321 3322 /* 3323 * Now do the processing on the mblk chain. 3324 */ 3325 while (mp != NULL && count > 0) { 3326 cur_len = min(MBLKL(mp) - off, count); 3327 bcopy((char *)(mp->b_rptr + off), dest, cur_len); 3328 count -= cur_len; 3329 dest += cur_len; 3330 in->cd_offset += cur_len; 3331 in->cd_length -= cur_len; 3332 mp = mp->b_cont; 3333 off = 0; 3334 } 3335 3336 if (mp == NULL && count > 0) { 3337 /* 3338 * The end of the mblk was reached but the length 3339 * requested could not be processed, (requested to 3340 * digest more data than it provided). 3341 */ 3342 return (CRYPTO_DATA_LEN_RANGE); 3343 } 3344 break; 3345 3346 default: 3347 DBG(NULL, DWARN, 3348 "dca_resid_gather: unrecognised crypto data format"); 3349 rv = CRYPTO_ARGUMENTS_BAD; 3350 } 3351 return (rv); 3352 } 3353 3354 /* 3355 * Appends the data to the crypto_data_t struct increasing cd_length. 3356 * cd_offset is left unchanged. 3357 * Data is reverse-copied if the flag is TRUE. 3358 */ 3359 int 3360 dca_scatter(const char *src, crypto_data_t *out, int count, int reverse) 3361 { 3362 int rv = CRYPTO_SUCCESS; 3363 off_t offset = out->cd_offset + out->cd_length; 3364 uint_t vec_idx; 3365 uio_t *uiop; 3366 size_t cur_len; 3367 mblk_t *mp; 3368 3369 switch (out->cd_format) { 3370 case CRYPTO_DATA_RAW: 3371 if (out->cd_raw.iov_len - offset < count) { 3372 /* Trying to write out more than space available. */ 3373 return (CRYPTO_DATA_LEN_RANGE); 3374 } 3375 if (reverse) 3376 dca_reverse((void*) src, out->cd_raw.iov_base + offset, 3377 count, count); 3378 else 3379 bcopy(src, out->cd_raw.iov_base + offset, count); 3380 out->cd_length += count; 3381 break; 3382 3383 case CRYPTO_DATA_UIO: 3384 /* 3385 * Jump to the first iovec that can be written to. 3386 */ 3387 uiop = out->cd_uio; 3388 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3389 offset >= uiop->uio_iov[vec_idx].iov_len; 3390 offset -= uiop->uio_iov[vec_idx++].iov_len) 3391 ; 3392 if (vec_idx == uiop->uio_iovcnt) { 3393 /* 3394 * The caller specified an offset that is larger than 3395 * the total size of the buffers it provided. 3396 */ 3397 return (CRYPTO_DATA_LEN_RANGE); 3398 } 3399 3400 /* 3401 * Now process the iovecs. 3402 */ 3403 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3404 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3405 offset, count); 3406 count -= cur_len; 3407 if (reverse) { 3408 dca_reverse((void*) (src+count), 3409 uiop->uio_iov[vec_idx].iov_base + 3410 offset, cur_len, cur_len); 3411 } else { 3412 bcopy(src, uiop->uio_iov[vec_idx].iov_base + 3413 offset, cur_len); 3414 src += cur_len; 3415 } 3416 out->cd_length += cur_len; 3417 vec_idx++; 3418 offset = 0; 3419 } 3420 3421 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3422 /* 3423 * The end of the specified iovec's was reached but 3424 * the length requested could not be processed 3425 * (requested to write more data than space provided). 3426 */ 3427 return (CRYPTO_DATA_LEN_RANGE); 3428 } 3429 break; 3430 3431 case CRYPTO_DATA_MBLK: 3432 /* 3433 * Jump to the first mblk_t that can be written to. 3434 */ 3435 for (mp = out->cd_mp; mp != NULL && offset >= MBLKL(mp); 3436 offset -= MBLKL(mp), mp = mp->b_cont) 3437 ; 3438 if (mp == NULL) { 3439 /* 3440 * The caller specified an offset that is larger than 3441 * the total size of the buffers it provided. 3442 */ 3443 return (CRYPTO_DATA_LEN_RANGE); 3444 } 3445 3446 /* 3447 * Now do the processing on the mblk chain. 3448 */ 3449 while (mp != NULL && count > 0) { 3450 cur_len = min(MBLKL(mp) - offset, count); 3451 count -= cur_len; 3452 if (reverse) { 3453 dca_reverse((void*) (src+count), 3454 (char *)(mp->b_rptr + offset), cur_len, 3455 cur_len); 3456 } else { 3457 bcopy(src, (char *)(mp->b_rptr + offset), 3458 cur_len); 3459 src += cur_len; 3460 } 3461 out->cd_length += cur_len; 3462 mp = mp->b_cont; 3463 offset = 0; 3464 } 3465 3466 if (mp == NULL && count > 0) { 3467 /* 3468 * The end of the mblk was reached but the length 3469 * requested could not be processed, (requested to 3470 * digest more data than it provided). 3471 */ 3472 return (CRYPTO_DATA_LEN_RANGE); 3473 } 3474 break; 3475 3476 default: 3477 DBG(NULL, DWARN, "unrecognised crypto data format"); 3478 rv = CRYPTO_ARGUMENTS_BAD; 3479 } 3480 return (rv); 3481 } 3482 3483 /* 3484 * Compare two byte arrays in reverse order. 3485 * Return 0 if they are identical, 1 otherwise. 3486 */ 3487 int 3488 dca_bcmp_reverse(const void *s1, const void *s2, size_t n) 3489 { 3490 int i; 3491 caddr_t src, dst; 3492 3493 if (!n) 3494 return (0); 3495 3496 src = ((caddr_t)s1) + n - 1; 3497 dst = (caddr_t)s2; 3498 for (i = 0; i < n; i++) { 3499 if (*src != *dst) 3500 return (1); 3501 src--; 3502 dst++; 3503 } 3504 3505 return (0); 3506 } 3507 3508 3509 /* 3510 * This calculates the size of a bignum in bits, specifically not counting 3511 * leading zero bits. This size calculation must be done *before* any 3512 * endian reversal takes place (i.e. the numbers are in absolute big-endian 3513 * order.) 3514 */ 3515 int 3516 dca_bitlen(unsigned char *bignum, int bytelen) 3517 { 3518 unsigned char msbyte; 3519 int i, j; 3520 3521 for (i = 0; i < bytelen - 1; i++) { 3522 if (bignum[i] != 0) { 3523 break; 3524 } 3525 } 3526 msbyte = bignum[i]; 3527 for (j = 8; j > 1; j--) { 3528 if (msbyte & 0x80) { 3529 break; 3530 } 3531 msbyte <<= 1; 3532 } 3533 return ((8 * (bytelen - i - 1)) + j); 3534 } 3535 3536 /* 3537 * This compares to bignums (in big-endian order). It ignores leading 3538 * null bytes. The result semantics follow bcmp, mempcmp, strcmp, etc. 3539 */ 3540 int 3541 dca_numcmp(caddr_t n1, int n1len, caddr_t n2, int n2len) 3542 { 3543 while ((n1len > 1) && (*n1 == 0)) { 3544 n1len--; 3545 n1++; 3546 } 3547 while ((n2len > 1) && (*n2 == 0)) { 3548 n2len--; 3549 n2++; 3550 } 3551 if (n1len != n2len) { 3552 return (n1len - n2len); 3553 } 3554 while ((n1len > 1) && (*n1 == *n2)) { 3555 n1++; 3556 n2++; 3557 n1len--; 3558 } 3559 return ((int)(*(uchar_t *)n1) - (int)(*(uchar_t *)n2)); 3560 } 3561 3562 /* 3563 * Return array of key attributes. 3564 */ 3565 crypto_object_attribute_t * 3566 dca_get_key_attr(crypto_key_t *key) 3567 { 3568 if ((key->ck_format != CRYPTO_KEY_ATTR_LIST) || 3569 (key->ck_count == 0)) { 3570 return (NULL); 3571 } 3572 3573 return (key->ck_attrs); 3574 } 3575 3576 /* 3577 * If attribute type exists valp points to it's 32-bit value. 3578 */ 3579 int 3580 dca_attr_lookup_uint32(crypto_object_attribute_t *attrp, uint_t atnum, 3581 uint64_t atype, uint32_t *valp) 3582 { 3583 crypto_object_attribute_t *bap; 3584 3585 bap = dca_find_attribute(attrp, atnum, atype); 3586 if (bap == NULL) { 3587 return (CRYPTO_ATTRIBUTE_TYPE_INVALID); 3588 } 3589 3590 *valp = *bap->oa_value; 3591 3592 return (CRYPTO_SUCCESS); 3593 } 3594 3595 /* 3596 * If attribute type exists data contains the start address of the value, 3597 * and numelems contains it's length. 3598 */ 3599 int 3600 dca_attr_lookup_uint8_array(crypto_object_attribute_t *attrp, uint_t atnum, 3601 uint64_t atype, void **data, unsigned int *numelems) 3602 { 3603 crypto_object_attribute_t *bap; 3604 3605 bap = dca_find_attribute(attrp, atnum, atype); 3606 if (bap == NULL) { 3607 return (CRYPTO_ATTRIBUTE_TYPE_INVALID); 3608 } 3609 3610 *data = bap->oa_value; 3611 *numelems = bap->oa_value_len; 3612 3613 return (CRYPTO_SUCCESS); 3614 } 3615 3616 /* 3617 * Finds entry of specified name. If it is not found dca_find_attribute returns 3618 * NULL. 3619 */ 3620 crypto_object_attribute_t * 3621 dca_find_attribute(crypto_object_attribute_t *attrp, uint_t atnum, 3622 uint64_t atype) 3623 { 3624 while (atnum) { 3625 if (attrp->oa_type == atype) 3626 return (attrp); 3627 atnum--; 3628 attrp++; 3629 } 3630 return (NULL); 3631 } 3632 3633 /* 3634 * Return the address of the first data buffer. If the data format is 3635 * unrecognised return NULL. 3636 */ 3637 caddr_t 3638 dca_bufdaddr(crypto_data_t *data) 3639 { 3640 switch (data->cd_format) { 3641 case CRYPTO_DATA_RAW: 3642 return (data->cd_raw.iov_base + data->cd_offset); 3643 case CRYPTO_DATA_UIO: 3644 return (data->cd_uio->uio_iov[0].iov_base + data->cd_offset); 3645 case CRYPTO_DATA_MBLK: 3646 return ((char *)data->cd_mp->b_rptr + data->cd_offset); 3647 default: 3648 DBG(NULL, DWARN, 3649 "dca_bufdaddr: unrecognised crypto data format"); 3650 return (NULL); 3651 } 3652 } 3653 3654 static caddr_t 3655 dca_bufdaddr_out(crypto_data_t *data) 3656 { 3657 size_t offset = data->cd_offset + data->cd_length; 3658 3659 switch (data->cd_format) { 3660 case CRYPTO_DATA_RAW: 3661 return (data->cd_raw.iov_base + offset); 3662 case CRYPTO_DATA_UIO: 3663 return (data->cd_uio->uio_iov[0].iov_base + offset); 3664 case CRYPTO_DATA_MBLK: 3665 return ((char *)data->cd_mp->b_rptr + offset); 3666 default: 3667 DBG(NULL, DWARN, 3668 "dca_bufdaddr_out: unrecognised crypto data format"); 3669 return (NULL); 3670 } 3671 } 3672 3673 /* 3674 * Control entry points. 3675 */ 3676 3677 /* ARGSUSED */ 3678 static void 3679 dca_provider_status(crypto_provider_handle_t provider, uint_t *status) 3680 { 3681 *status = CRYPTO_PROVIDER_READY; 3682 } 3683 3684 /* 3685 * Cipher (encrypt/decrypt) entry points. 3686 */ 3687 3688 /* ARGSUSED */ 3689 static int 3690 dca_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 3691 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 3692 crypto_req_handle_t req) 3693 { 3694 int error = CRYPTO_FAILED; 3695 dca_t *softc; 3696 /* LINTED E_FUNC_SET_NOT_USED */ 3697 int instance; 3698 3699 /* extract softc and instance number from context */ 3700 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3701 DBG(softc, DENTRY, "dca_encrypt_init: started"); 3702 3703 /* check mechanism */ 3704 switch (mechanism->cm_type) { 3705 case DES_CBC_MECH_INFO_TYPE: 3706 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3707 DR_ENCRYPT); 3708 break; 3709 case DES3_CBC_MECH_INFO_TYPE: 3710 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3711 DR_ENCRYPT | DR_TRIPLE); 3712 break; 3713 case RSA_PKCS_MECH_INFO_TYPE: 3714 case RSA_X_509_MECH_INFO_TYPE: 3715 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 3716 break; 3717 default: 3718 cmn_err(CE_WARN, "dca_encrypt_init: unexpected mech type " 3719 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3720 error = CRYPTO_MECHANISM_INVALID; 3721 } 3722 3723 DBG(softc, DENTRY, "dca_encrypt_init: done, err = 0x%x", error); 3724 3725 if (error == CRYPTO_SUCCESS) 3726 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 3727 &softc->dca_ctx_list_lock); 3728 3729 return (error); 3730 } 3731 3732 /* ARGSUSED */ 3733 static int 3734 dca_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, 3735 crypto_data_t *ciphertext, crypto_req_handle_t req) 3736 { 3737 int error = CRYPTO_FAILED; 3738 dca_t *softc; 3739 /* LINTED E_FUNC_SET_NOT_USED */ 3740 int instance; 3741 3742 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3743 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3744 3745 /* extract softc and instance number from context */ 3746 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3747 DBG(softc, DENTRY, "dca_encrypt: started"); 3748 3749 /* handle inplace ops */ 3750 if (!ciphertext) { 3751 dca_request_t *reqp = ctx->cc_provider_private; 3752 reqp->dr_flags |= DR_INPLACE; 3753 ciphertext = plaintext; 3754 } 3755 3756 /* check mechanism */ 3757 switch (DCA_MECH_FROM_CTX(ctx)) { 3758 case DES_CBC_MECH_INFO_TYPE: 3759 error = dca_3des(ctx, plaintext, ciphertext, req, DR_ENCRYPT); 3760 break; 3761 case DES3_CBC_MECH_INFO_TYPE: 3762 error = dca_3des(ctx, plaintext, ciphertext, req, 3763 DR_ENCRYPT | DR_TRIPLE); 3764 break; 3765 case RSA_PKCS_MECH_INFO_TYPE: 3766 case RSA_X_509_MECH_INFO_TYPE: 3767 error = dca_rsastart(ctx, plaintext, ciphertext, req, 3768 DCA_RSA_ENC); 3769 break; 3770 default: 3771 /* Should never reach here */ 3772 cmn_err(CE_WARN, "dca_encrypt: unexpected mech type " 3773 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3774 error = CRYPTO_MECHANISM_INVALID; 3775 } 3776 3777 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) && 3778 (error != CRYPTO_BUFFER_TOO_SMALL)) { 3779 ciphertext->cd_length = 0; 3780 } 3781 3782 DBG(softc, DENTRY, "dca_encrypt: done, err = 0x%x", error); 3783 3784 return (error); 3785 } 3786 3787 /* ARGSUSED */ 3788 static int 3789 dca_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext, 3790 crypto_data_t *ciphertext, crypto_req_handle_t req) 3791 { 3792 int error = CRYPTO_FAILED; 3793 dca_t *softc; 3794 /* LINTED E_FUNC_SET_NOT_USED */ 3795 int instance; 3796 3797 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3798 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3799 3800 /* extract softc and instance number from context */ 3801 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3802 DBG(softc, DENTRY, "dca_encrypt_update: started"); 3803 3804 /* handle inplace ops */ 3805 if (!ciphertext) { 3806 dca_request_t *reqp = ctx->cc_provider_private; 3807 reqp->dr_flags |= DR_INPLACE; 3808 ciphertext = plaintext; 3809 } 3810 3811 /* check mechanism */ 3812 switch (DCA_MECH_FROM_CTX(ctx)) { 3813 case DES_CBC_MECH_INFO_TYPE: 3814 error = dca_3desupdate(ctx, plaintext, ciphertext, req, 3815 DR_ENCRYPT); 3816 break; 3817 case DES3_CBC_MECH_INFO_TYPE: 3818 error = dca_3desupdate(ctx, plaintext, ciphertext, req, 3819 DR_ENCRYPT | DR_TRIPLE); 3820 break; 3821 default: 3822 /* Should never reach here */ 3823 cmn_err(CE_WARN, "dca_encrypt_update: unexpected mech type " 3824 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3825 error = CRYPTO_MECHANISM_INVALID; 3826 } 3827 3828 DBG(softc, DENTRY, "dca_encrypt_update: done, err = 0x%x", error); 3829 3830 return (error); 3831 } 3832 3833 /* ARGSUSED */ 3834 static int 3835 dca_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 3836 crypto_req_handle_t req) 3837 { 3838 int error = CRYPTO_FAILED; 3839 dca_t *softc; 3840 /* LINTED E_FUNC_SET_NOT_USED */ 3841 int instance; 3842 3843 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3844 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3845 3846 /* extract softc and instance number from context */ 3847 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3848 DBG(softc, DENTRY, "dca_encrypt_final: started"); 3849 3850 /* check mechanism */ 3851 switch (DCA_MECH_FROM_CTX(ctx)) { 3852 case DES_CBC_MECH_INFO_TYPE: 3853 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT); 3854 break; 3855 case DES3_CBC_MECH_INFO_TYPE: 3856 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT | DR_TRIPLE); 3857 break; 3858 default: 3859 /* Should never reach here */ 3860 cmn_err(CE_WARN, "dca_encrypt_final: unexpected mech type " 3861 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3862 error = CRYPTO_MECHANISM_INVALID; 3863 } 3864 3865 DBG(softc, DENTRY, "dca_encrypt_final: done, err = 0x%x", error); 3866 3867 return (error); 3868 } 3869 3870 /* ARGSUSED */ 3871 static int 3872 dca_encrypt_atomic(crypto_provider_handle_t provider, 3873 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 3874 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext, 3875 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 3876 { 3877 int error = CRYPTO_FAILED; 3878 dca_t *softc = (dca_t *)provider; 3879 3880 DBG(softc, DENTRY, "dca_encrypt_atomic: started"); 3881 3882 if (ctx_template != NULL) 3883 return (CRYPTO_ARGUMENTS_BAD); 3884 3885 /* handle inplace ops */ 3886 if (!ciphertext) { 3887 ciphertext = plaintext; 3888 } 3889 3890 /* check mechanism */ 3891 switch (mechanism->cm_type) { 3892 case DES_CBC_MECH_INFO_TYPE: 3893 error = dca_3desatomic(provider, session_id, mechanism, key, 3894 plaintext, ciphertext, KM_SLEEP, req, 3895 DR_ENCRYPT | DR_ATOMIC); 3896 break; 3897 case DES3_CBC_MECH_INFO_TYPE: 3898 error = dca_3desatomic(provider, session_id, mechanism, key, 3899 plaintext, ciphertext, KM_SLEEP, req, 3900 DR_ENCRYPT | DR_TRIPLE | DR_ATOMIC); 3901 break; 3902 case RSA_PKCS_MECH_INFO_TYPE: 3903 case RSA_X_509_MECH_INFO_TYPE: 3904 error = dca_rsaatomic(provider, session_id, mechanism, key, 3905 plaintext, ciphertext, KM_SLEEP, req, DCA_RSA_ENC); 3906 break; 3907 default: 3908 cmn_err(CE_WARN, "dca_encrypt_atomic: unexpected mech type " 3909 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3910 error = CRYPTO_MECHANISM_INVALID; 3911 } 3912 3913 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) { 3914 ciphertext->cd_length = 0; 3915 } 3916 3917 DBG(softc, DENTRY, "dca_encrypt_atomic: done, err = 0x%x", error); 3918 3919 return (error); 3920 } 3921 3922 /* ARGSUSED */ 3923 static int 3924 dca_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 3925 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 3926 crypto_req_handle_t req) 3927 { 3928 int error = CRYPTO_FAILED; 3929 dca_t *softc; 3930 /* LINTED E_FUNC_SET_NOT_USED */ 3931 int instance; 3932 3933 /* extract softc and instance number from context */ 3934 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3935 DBG(softc, DENTRY, "dca_decrypt_init: started"); 3936 3937 /* check mechanism */ 3938 switch (mechanism->cm_type) { 3939 case DES_CBC_MECH_INFO_TYPE: 3940 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3941 DR_DECRYPT); 3942 break; 3943 case DES3_CBC_MECH_INFO_TYPE: 3944 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3945 DR_DECRYPT | DR_TRIPLE); 3946 break; 3947 case RSA_PKCS_MECH_INFO_TYPE: 3948 case RSA_X_509_MECH_INFO_TYPE: 3949 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 3950 break; 3951 default: 3952 cmn_err(CE_WARN, "dca_decrypt_init: unexpected mech type " 3953 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3954 error = CRYPTO_MECHANISM_INVALID; 3955 } 3956 3957 DBG(softc, DENTRY, "dca_decrypt_init: done, err = 0x%x", error); 3958 3959 if (error == CRYPTO_SUCCESS) 3960 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 3961 &softc->dca_ctx_list_lock); 3962 3963 return (error); 3964 } 3965 3966 /* ARGSUSED */ 3967 static int 3968 dca_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 3969 crypto_data_t *plaintext, crypto_req_handle_t req) 3970 { 3971 int error = CRYPTO_FAILED; 3972 dca_t *softc; 3973 /* LINTED E_FUNC_SET_NOT_USED */ 3974 int instance; 3975 3976 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3977 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3978 3979 /* extract softc and instance number from context */ 3980 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3981 DBG(softc, DENTRY, "dca_decrypt: started"); 3982 3983 /* handle inplace ops */ 3984 if (!plaintext) { 3985 dca_request_t *reqp = ctx->cc_provider_private; 3986 reqp->dr_flags |= DR_INPLACE; 3987 plaintext = ciphertext; 3988 } 3989 3990 /* check mechanism */ 3991 switch (DCA_MECH_FROM_CTX(ctx)) { 3992 case DES_CBC_MECH_INFO_TYPE: 3993 error = dca_3des(ctx, ciphertext, plaintext, req, DR_DECRYPT); 3994 break; 3995 case DES3_CBC_MECH_INFO_TYPE: 3996 error = dca_3des(ctx, ciphertext, plaintext, req, 3997 DR_DECRYPT | DR_TRIPLE); 3998 break; 3999 case RSA_PKCS_MECH_INFO_TYPE: 4000 case RSA_X_509_MECH_INFO_TYPE: 4001 error = dca_rsastart(ctx, ciphertext, plaintext, req, 4002 DCA_RSA_DEC); 4003 break; 4004 default: 4005 /* Should never reach here */ 4006 cmn_err(CE_WARN, "dca_decrypt: unexpected mech type " 4007 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4008 error = CRYPTO_MECHANISM_INVALID; 4009 } 4010 4011 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) && 4012 (error != CRYPTO_BUFFER_TOO_SMALL)) { 4013 if (plaintext) 4014 plaintext->cd_length = 0; 4015 } 4016 4017 DBG(softc, DENTRY, "dca_decrypt: done, err = 0x%x", error); 4018 4019 return (error); 4020 } 4021 4022 /* ARGSUSED */ 4023 static int 4024 dca_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 4025 crypto_data_t *plaintext, crypto_req_handle_t req) 4026 { 4027 int error = CRYPTO_FAILED; 4028 dca_t *softc; 4029 /* LINTED E_FUNC_SET_NOT_USED */ 4030 int instance; 4031 4032 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4033 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4034 4035 /* extract softc and instance number from context */ 4036 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4037 DBG(softc, DENTRY, "dca_decrypt_update: started"); 4038 4039 /* handle inplace ops */ 4040 if (!plaintext) { 4041 dca_request_t *reqp = ctx->cc_provider_private; 4042 reqp->dr_flags |= DR_INPLACE; 4043 plaintext = ciphertext; 4044 } 4045 4046 /* check mechanism */ 4047 switch (DCA_MECH_FROM_CTX(ctx)) { 4048 case DES_CBC_MECH_INFO_TYPE: 4049 error = dca_3desupdate(ctx, ciphertext, plaintext, req, 4050 DR_DECRYPT); 4051 break; 4052 case DES3_CBC_MECH_INFO_TYPE: 4053 error = dca_3desupdate(ctx, ciphertext, plaintext, req, 4054 DR_DECRYPT | DR_TRIPLE); 4055 break; 4056 default: 4057 /* Should never reach here */ 4058 cmn_err(CE_WARN, "dca_decrypt_update: unexpected mech type " 4059 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4060 error = CRYPTO_MECHANISM_INVALID; 4061 } 4062 4063 DBG(softc, DENTRY, "dca_decrypt_update: done, err = 0x%x", error); 4064 4065 return (error); 4066 } 4067 4068 /* ARGSUSED */ 4069 static int 4070 dca_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *plaintext, 4071 crypto_req_handle_t req) 4072 { 4073 int error = CRYPTO_FAILED; 4074 dca_t *softc; 4075 /* LINTED E_FUNC_SET_NOT_USED */ 4076 int instance; 4077 4078 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4079 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4080 4081 /* extract softc and instance number from context */ 4082 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4083 DBG(softc, DENTRY, "dca_decrypt_final: started"); 4084 4085 /* check mechanism */ 4086 switch (DCA_MECH_FROM_CTX(ctx)) { 4087 case DES_CBC_MECH_INFO_TYPE: 4088 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT); 4089 break; 4090 case DES3_CBC_MECH_INFO_TYPE: 4091 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT | DR_TRIPLE); 4092 break; 4093 default: 4094 /* Should never reach here */ 4095 cmn_err(CE_WARN, "dca_decrypt_final: unexpected mech type " 4096 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4097 error = CRYPTO_MECHANISM_INVALID; 4098 } 4099 4100 DBG(softc, DENTRY, "dca_decrypt_final: done, err = 0x%x", error); 4101 4102 return (error); 4103 } 4104 4105 /* ARGSUSED */ 4106 static int 4107 dca_decrypt_atomic(crypto_provider_handle_t provider, 4108 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4109 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext, 4110 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4111 { 4112 int error = CRYPTO_FAILED; 4113 dca_t *softc = (dca_t *)provider; 4114 4115 DBG(softc, DENTRY, "dca_decrypt_atomic: started"); 4116 4117 if (ctx_template != NULL) 4118 return (CRYPTO_ARGUMENTS_BAD); 4119 4120 /* handle inplace ops */ 4121 if (!plaintext) { 4122 plaintext = ciphertext; 4123 } 4124 4125 /* check mechanism */ 4126 switch (mechanism->cm_type) { 4127 case DES_CBC_MECH_INFO_TYPE: 4128 error = dca_3desatomic(provider, session_id, mechanism, key, 4129 ciphertext, plaintext, KM_SLEEP, req, 4130 DR_DECRYPT | DR_ATOMIC); 4131 break; 4132 case DES3_CBC_MECH_INFO_TYPE: 4133 error = dca_3desatomic(provider, session_id, mechanism, key, 4134 ciphertext, plaintext, KM_SLEEP, req, 4135 DR_DECRYPT | DR_TRIPLE | DR_ATOMIC); 4136 break; 4137 case RSA_PKCS_MECH_INFO_TYPE: 4138 case RSA_X_509_MECH_INFO_TYPE: 4139 error = dca_rsaatomic(provider, session_id, mechanism, key, 4140 ciphertext, plaintext, KM_SLEEP, req, DCA_RSA_DEC); 4141 break; 4142 default: 4143 cmn_err(CE_WARN, "dca_decrypt_atomic: unexpected mech type " 4144 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4145 error = CRYPTO_MECHANISM_INVALID; 4146 } 4147 4148 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) { 4149 plaintext->cd_length = 0; 4150 } 4151 4152 DBG(softc, DENTRY, "dca_decrypt_atomic: done, err = 0x%x", error); 4153 4154 return (error); 4155 } 4156 4157 /* 4158 * Sign entry points. 4159 */ 4160 4161 /* ARGSUSED */ 4162 static int 4163 dca_sign_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4164 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4165 crypto_req_handle_t req) 4166 { 4167 int error = CRYPTO_FAILED; 4168 dca_t *softc; 4169 /* LINTED E_FUNC_SET_NOT_USED */ 4170 int instance; 4171 4172 /* extract softc and instance number from context */ 4173 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4174 DBG(softc, DENTRY, "dca_sign_init: started\n"); 4175 4176 if (ctx_template != NULL) 4177 return (CRYPTO_ARGUMENTS_BAD); 4178 4179 /* check mechanism */ 4180 switch (mechanism->cm_type) { 4181 case RSA_PKCS_MECH_INFO_TYPE: 4182 case RSA_X_509_MECH_INFO_TYPE: 4183 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4184 break; 4185 case DSA_MECH_INFO_TYPE: 4186 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP, 4187 DCA_DSA_SIGN); 4188 break; 4189 default: 4190 cmn_err(CE_WARN, "dca_sign_init: unexpected mech type " 4191 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4192 error = CRYPTO_MECHANISM_INVALID; 4193 } 4194 4195 DBG(softc, DENTRY, "dca_sign_init: done, err = 0x%x", error); 4196 4197 if (error == CRYPTO_SUCCESS) 4198 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4199 &softc->dca_ctx_list_lock); 4200 4201 return (error); 4202 } 4203 4204 static int 4205 dca_sign(crypto_ctx_t *ctx, crypto_data_t *data, 4206 crypto_data_t *signature, crypto_req_handle_t req) 4207 { 4208 int error = CRYPTO_FAILED; 4209 dca_t *softc; 4210 /* LINTED E_FUNC_SET_NOT_USED */ 4211 int instance; 4212 4213 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4214 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4215 4216 /* extract softc and instance number from context */ 4217 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4218 DBG(softc, DENTRY, "dca_sign: started\n"); 4219 4220 /* check mechanism */ 4221 switch (DCA_MECH_FROM_CTX(ctx)) { 4222 case RSA_PKCS_MECH_INFO_TYPE: 4223 case RSA_X_509_MECH_INFO_TYPE: 4224 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGN); 4225 break; 4226 case DSA_MECH_INFO_TYPE: 4227 error = dca_dsa_sign(ctx, data, signature, req); 4228 break; 4229 default: 4230 cmn_err(CE_WARN, "dca_sign: unexpected mech type " 4231 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4232 error = CRYPTO_MECHANISM_INVALID; 4233 } 4234 4235 DBG(softc, DENTRY, "dca_sign: done, err = 0x%x", error); 4236 4237 return (error); 4238 } 4239 4240 /* ARGSUSED */ 4241 static int 4242 dca_sign_update(crypto_ctx_t *ctx, crypto_data_t *data, 4243 crypto_req_handle_t req) 4244 { 4245 int error = CRYPTO_MECHANISM_INVALID; 4246 dca_t *softc; 4247 /* LINTED E_FUNC_SET_NOT_USED */ 4248 int instance; 4249 4250 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4251 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4252 4253 /* extract softc and instance number from context */ 4254 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4255 DBG(softc, DENTRY, "dca_sign_update: started\n"); 4256 4257 cmn_err(CE_WARN, "dca_sign_update: unexpected mech type " 4258 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4259 4260 DBG(softc, DENTRY, "dca_sign_update: done, err = 0x%x", error); 4261 4262 return (error); 4263 } 4264 4265 /* ARGSUSED */ 4266 static int 4267 dca_sign_final(crypto_ctx_t *ctx, crypto_data_t *signature, 4268 crypto_req_handle_t req) 4269 { 4270 int error = CRYPTO_MECHANISM_INVALID; 4271 dca_t *softc; 4272 /* LINTED E_FUNC_SET_NOT_USED */ 4273 int instance; 4274 4275 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4276 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4277 4278 /* extract softc and instance number from context */ 4279 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4280 DBG(softc, DENTRY, "dca_sign_final: started\n"); 4281 4282 cmn_err(CE_WARN, "dca_sign_final: unexpected mech type " 4283 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4284 4285 DBG(softc, DENTRY, "dca_sign_final: done, err = 0x%x", error); 4286 4287 return (error); 4288 } 4289 4290 static int 4291 dca_sign_atomic(crypto_provider_handle_t provider, 4292 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4293 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4294 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4295 { 4296 int error = CRYPTO_FAILED; 4297 dca_t *softc = (dca_t *)provider; 4298 4299 DBG(softc, DENTRY, "dca_sign_atomic: started\n"); 4300 4301 if (ctx_template != NULL) 4302 return (CRYPTO_ARGUMENTS_BAD); 4303 4304 /* check mechanism */ 4305 switch (mechanism->cm_type) { 4306 case RSA_PKCS_MECH_INFO_TYPE: 4307 case RSA_X_509_MECH_INFO_TYPE: 4308 error = dca_rsaatomic(provider, session_id, mechanism, key, 4309 data, signature, KM_SLEEP, req, DCA_RSA_SIGN); 4310 break; 4311 case DSA_MECH_INFO_TYPE: 4312 error = dca_dsaatomic(provider, session_id, mechanism, key, 4313 data, signature, KM_SLEEP, req, DCA_DSA_SIGN); 4314 break; 4315 default: 4316 cmn_err(CE_WARN, "dca_sign_atomic: unexpected mech type " 4317 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4318 error = CRYPTO_MECHANISM_INVALID; 4319 } 4320 4321 DBG(softc, DENTRY, "dca_sign_atomic: done, err = 0x%x", error); 4322 4323 return (error); 4324 } 4325 4326 /* ARGSUSED */ 4327 static int 4328 dca_sign_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4329 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4330 crypto_req_handle_t req) 4331 { 4332 int error = CRYPTO_FAILED; 4333 dca_t *softc; 4334 /* LINTED E_FUNC_SET_NOT_USED */ 4335 int instance; 4336 4337 /* extract softc and instance number from context */ 4338 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4339 DBG(softc, DENTRY, "dca_sign_recover_init: started\n"); 4340 4341 if (ctx_template != NULL) 4342 return (CRYPTO_ARGUMENTS_BAD); 4343 4344 /* check mechanism */ 4345 switch (mechanism->cm_type) { 4346 case RSA_PKCS_MECH_INFO_TYPE: 4347 case RSA_X_509_MECH_INFO_TYPE: 4348 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4349 break; 4350 default: 4351 cmn_err(CE_WARN, "dca_sign_recover_init: unexpected mech type " 4352 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4353 error = CRYPTO_MECHANISM_INVALID; 4354 } 4355 4356 DBG(softc, DENTRY, "dca_sign_recover_init: done, err = 0x%x", error); 4357 4358 if (error == CRYPTO_SUCCESS) 4359 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4360 &softc->dca_ctx_list_lock); 4361 4362 return (error); 4363 } 4364 4365 static int 4366 dca_sign_recover(crypto_ctx_t *ctx, crypto_data_t *data, 4367 crypto_data_t *signature, crypto_req_handle_t req) 4368 { 4369 int error = CRYPTO_FAILED; 4370 dca_t *softc; 4371 /* LINTED E_FUNC_SET_NOT_USED */ 4372 int instance; 4373 4374 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4375 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4376 4377 /* extract softc and instance number from context */ 4378 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4379 DBG(softc, DENTRY, "dca_sign_recover: started\n"); 4380 4381 /* check mechanism */ 4382 switch (DCA_MECH_FROM_CTX(ctx)) { 4383 case RSA_PKCS_MECH_INFO_TYPE: 4384 case RSA_X_509_MECH_INFO_TYPE: 4385 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGNR); 4386 break; 4387 default: 4388 cmn_err(CE_WARN, "dca_sign_recover: unexpected mech type " 4389 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4390 error = CRYPTO_MECHANISM_INVALID; 4391 } 4392 4393 DBG(softc, DENTRY, "dca_sign_recover: done, err = 0x%x", error); 4394 4395 return (error); 4396 } 4397 4398 static int 4399 dca_sign_recover_atomic(crypto_provider_handle_t provider, 4400 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4401 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4402 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4403 { 4404 int error = CRYPTO_FAILED; 4405 dca_t *softc = (dca_t *)provider; 4406 /* LINTED E_FUNC_SET_NOT_USED */ 4407 int instance; 4408 4409 instance = ddi_get_instance(softc->dca_dip); 4410 DBG(softc, DENTRY, "dca_sign_recover_atomic: started\n"); 4411 4412 if (ctx_template != NULL) 4413 return (CRYPTO_ARGUMENTS_BAD); 4414 4415 /* check mechanism */ 4416 switch (mechanism->cm_type) { 4417 case RSA_PKCS_MECH_INFO_TYPE: 4418 case RSA_X_509_MECH_INFO_TYPE: 4419 error = dca_rsaatomic(provider, session_id, mechanism, key, 4420 data, signature, KM_SLEEP, req, DCA_RSA_SIGNR); 4421 break; 4422 default: 4423 cmn_err(CE_WARN, "dca_sign_recover_atomic: unexpected mech type" 4424 " 0x%llx\n", (unsigned long long)mechanism->cm_type); 4425 error = CRYPTO_MECHANISM_INVALID; 4426 } 4427 4428 DBG(softc, DENTRY, "dca_sign_recover_atomic: done, err = 0x%x", error); 4429 4430 return (error); 4431 } 4432 4433 /* 4434 * Verify entry points. 4435 */ 4436 4437 /* ARGSUSED */ 4438 static int 4439 dca_verify_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4440 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4441 crypto_req_handle_t req) 4442 { 4443 int error = CRYPTO_FAILED; 4444 dca_t *softc; 4445 /* LINTED E_FUNC_SET_NOT_USED */ 4446 int instance; 4447 4448 /* extract softc and instance number from context */ 4449 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4450 DBG(softc, DENTRY, "dca_verify_init: started\n"); 4451 4452 if (ctx_template != NULL) 4453 return (CRYPTO_ARGUMENTS_BAD); 4454 4455 /* check mechanism */ 4456 switch (mechanism->cm_type) { 4457 case RSA_PKCS_MECH_INFO_TYPE: 4458 case RSA_X_509_MECH_INFO_TYPE: 4459 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4460 break; 4461 case DSA_MECH_INFO_TYPE: 4462 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP, 4463 DCA_DSA_VRFY); 4464 break; 4465 default: 4466 cmn_err(CE_WARN, "dca_verify_init: unexpected mech type " 4467 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4468 error = CRYPTO_MECHANISM_INVALID; 4469 } 4470 4471 DBG(softc, DENTRY, "dca_verify_init: done, err = 0x%x", error); 4472 4473 if (error == CRYPTO_SUCCESS) 4474 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4475 &softc->dca_ctx_list_lock); 4476 4477 return (error); 4478 } 4479 4480 static int 4481 dca_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *signature, 4482 crypto_req_handle_t req) 4483 { 4484 int error = CRYPTO_FAILED; 4485 dca_t *softc; 4486 /* LINTED E_FUNC_SET_NOT_USED */ 4487 int instance; 4488 4489 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4490 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4491 4492 /* extract softc and instance number from context */ 4493 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4494 DBG(softc, DENTRY, "dca_verify: started\n"); 4495 4496 /* check mechanism */ 4497 switch (DCA_MECH_FROM_CTX(ctx)) { 4498 case RSA_PKCS_MECH_INFO_TYPE: 4499 case RSA_X_509_MECH_INFO_TYPE: 4500 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFY); 4501 break; 4502 case DSA_MECH_INFO_TYPE: 4503 error = dca_dsa_verify(ctx, data, signature, req); 4504 break; 4505 default: 4506 cmn_err(CE_WARN, "dca_verify: unexpected mech type " 4507 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4508 error = CRYPTO_MECHANISM_INVALID; 4509 } 4510 4511 DBG(softc, DENTRY, "dca_verify: done, err = 0x%x", error); 4512 4513 return (error); 4514 } 4515 4516 /* ARGSUSED */ 4517 static int 4518 dca_verify_update(crypto_ctx_t *ctx, crypto_data_t *data, 4519 crypto_req_handle_t req) 4520 { 4521 int error = CRYPTO_MECHANISM_INVALID; 4522 dca_t *softc; 4523 /* LINTED E_FUNC_SET_NOT_USED */ 4524 int instance; 4525 4526 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4527 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4528 4529 /* extract softc and instance number from context */ 4530 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4531 DBG(softc, DENTRY, "dca_verify_update: started\n"); 4532 4533 cmn_err(CE_WARN, "dca_verify_update: unexpected mech type " 4534 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4535 4536 DBG(softc, DENTRY, "dca_verify_update: done, err = 0x%x", error); 4537 4538 return (error); 4539 } 4540 4541 /* ARGSUSED */ 4542 static int 4543 dca_verify_final(crypto_ctx_t *ctx, crypto_data_t *signature, 4544 crypto_req_handle_t req) 4545 { 4546 int error = CRYPTO_MECHANISM_INVALID; 4547 dca_t *softc; 4548 /* LINTED E_FUNC_SET_NOT_USED */ 4549 int instance; 4550 4551 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4552 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4553 4554 /* extract softc and instance number from context */ 4555 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4556 DBG(softc, DENTRY, "dca_verify_final: started\n"); 4557 4558 cmn_err(CE_WARN, "dca_verify_final: unexpected mech type " 4559 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4560 4561 DBG(softc, DENTRY, "dca_verify_final: done, err = 0x%x", error); 4562 4563 return (error); 4564 } 4565 4566 static int 4567 dca_verify_atomic(crypto_provider_handle_t provider, 4568 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4569 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4570 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4571 { 4572 int error = CRYPTO_FAILED; 4573 dca_t *softc = (dca_t *)provider; 4574 4575 DBG(softc, DENTRY, "dca_verify_atomic: started\n"); 4576 4577 if (ctx_template != NULL) 4578 return (CRYPTO_ARGUMENTS_BAD); 4579 4580 /* check mechanism */ 4581 switch (mechanism->cm_type) { 4582 case RSA_PKCS_MECH_INFO_TYPE: 4583 case RSA_X_509_MECH_INFO_TYPE: 4584 error = dca_rsaatomic(provider, session_id, mechanism, key, 4585 signature, data, KM_SLEEP, req, DCA_RSA_VRFY); 4586 break; 4587 case DSA_MECH_INFO_TYPE: 4588 error = dca_dsaatomic(provider, session_id, mechanism, key, 4589 data, signature, KM_SLEEP, req, DCA_DSA_VRFY); 4590 break; 4591 default: 4592 cmn_err(CE_WARN, "dca_verify_atomic: unexpected mech type " 4593 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4594 error = CRYPTO_MECHANISM_INVALID; 4595 } 4596 4597 DBG(softc, DENTRY, "dca_verify_atomic: done, err = 0x%x", error); 4598 4599 return (error); 4600 } 4601 4602 /* ARGSUSED */ 4603 static int 4604 dca_verify_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4605 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4606 crypto_req_handle_t req) 4607 { 4608 int error = CRYPTO_MECHANISM_INVALID; 4609 dca_t *softc; 4610 /* LINTED E_FUNC_SET_NOT_USED */ 4611 int instance; 4612 4613 /* extract softc and instance number from context */ 4614 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4615 DBG(softc, DENTRY, "dca_verify_recover_init: started\n"); 4616 4617 if (ctx_template != NULL) 4618 return (CRYPTO_ARGUMENTS_BAD); 4619 4620 /* check mechanism */ 4621 switch (mechanism->cm_type) { 4622 case RSA_PKCS_MECH_INFO_TYPE: 4623 case RSA_X_509_MECH_INFO_TYPE: 4624 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4625 break; 4626 default: 4627 cmn_err(CE_WARN, "dca_verify_recover_init: unexpected mech type" 4628 " 0x%llx\n", (unsigned long long)mechanism->cm_type); 4629 } 4630 4631 DBG(softc, DENTRY, "dca_verify_recover_init: done, err = 0x%x", error); 4632 4633 if (error == CRYPTO_SUCCESS) 4634 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4635 &softc->dca_ctx_list_lock); 4636 4637 return (error); 4638 } 4639 4640 static int 4641 dca_verify_recover(crypto_ctx_t *ctx, crypto_data_t *signature, 4642 crypto_data_t *data, crypto_req_handle_t req) 4643 { 4644 int error = CRYPTO_MECHANISM_INVALID; 4645 dca_t *softc; 4646 /* LINTED E_FUNC_SET_NOT_USED */ 4647 int instance; 4648 4649 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4650 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4651 4652 /* extract softc and instance number from context */ 4653 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4654 DBG(softc, DENTRY, "dca_verify_recover: started\n"); 4655 4656 /* check mechanism */ 4657 switch (DCA_MECH_FROM_CTX(ctx)) { 4658 case RSA_PKCS_MECH_INFO_TYPE: 4659 case RSA_X_509_MECH_INFO_TYPE: 4660 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFYR); 4661 break; 4662 default: 4663 cmn_err(CE_WARN, "dca_verify_recover: unexpected mech type " 4664 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4665 } 4666 4667 DBG(softc, DENTRY, "dca_verify_recover: done, err = 0x%x", error); 4668 4669 return (error); 4670 } 4671 4672 static int 4673 dca_verify_recover_atomic(crypto_provider_handle_t provider, 4674 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4675 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4676 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4677 { 4678 int error = CRYPTO_MECHANISM_INVALID; 4679 dca_t *softc = (dca_t *)provider; 4680 4681 DBG(softc, DENTRY, "dca_verify_recover_atomic: started\n"); 4682 4683 if (ctx_template != NULL) 4684 return (CRYPTO_ARGUMENTS_BAD); 4685 4686 /* check mechanism */ 4687 switch (mechanism->cm_type) { 4688 case RSA_PKCS_MECH_INFO_TYPE: 4689 case RSA_X_509_MECH_INFO_TYPE: 4690 error = dca_rsaatomic(provider, session_id, mechanism, key, 4691 signature, data, KM_SLEEP, req, DCA_RSA_VRFYR); 4692 break; 4693 default: 4694 cmn_err(CE_WARN, "dca_verify_recover_atomic: unexpected mech " 4695 "type 0x%llx\n", (unsigned long long)mechanism->cm_type); 4696 error = CRYPTO_MECHANISM_INVALID; 4697 } 4698 4699 DBG(softc, DENTRY, 4700 "dca_verify_recover_atomic: done, err = 0x%x", error); 4701 4702 return (error); 4703 } 4704 4705 /* 4706 * Random number entry points. 4707 */ 4708 4709 /* ARGSUSED */ 4710 static int 4711 dca_generate_random(crypto_provider_handle_t provider, 4712 crypto_session_id_t session_id, 4713 uchar_t *buf, size_t len, crypto_req_handle_t req) 4714 { 4715 int error = CRYPTO_FAILED; 4716 dca_t *softc = (dca_t *)provider; 4717 /* LINTED E_FUNC_SET_NOT_USED */ 4718 int instance; 4719 4720 instance = ddi_get_instance(softc->dca_dip); 4721 DBG(softc, DENTRY, "dca_generate_random: started"); 4722 4723 error = dca_rng(softc, buf, len, req); 4724 4725 DBG(softc, DENTRY, "dca_generate_random: done, err = 0x%x", error); 4726 4727 return (error); 4728 } 4729 4730 /* 4731 * Context management entry points. 4732 */ 4733 4734 int 4735 dca_free_context(crypto_ctx_t *ctx) 4736 { 4737 int error = CRYPTO_SUCCESS; 4738 dca_t *softc; 4739 /* LINTED E_FUNC_SET_NOT_USED */ 4740 int instance; 4741 4742 /* extract softc and instance number from context */ 4743 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4744 DBG(softc, DENTRY, "dca_free_context: entered"); 4745 4746 if (ctx->cc_provider_private == NULL) 4747 return (error); 4748 4749 dca_rmlist2(ctx->cc_provider_private, &softc->dca_ctx_list_lock); 4750 4751 error = dca_free_context_low(ctx); 4752 4753 DBG(softc, DENTRY, "dca_free_context: done, err = 0x%x", error); 4754 4755 return (error); 4756 } 4757 4758 static int 4759 dca_free_context_low(crypto_ctx_t *ctx) 4760 { 4761 int error = CRYPTO_SUCCESS; 4762 4763 /* check mechanism */ 4764 switch (DCA_MECH_FROM_CTX(ctx)) { 4765 case DES_CBC_MECH_INFO_TYPE: 4766 case DES3_CBC_MECH_INFO_TYPE: 4767 dca_3desctxfree(ctx); 4768 break; 4769 case RSA_PKCS_MECH_INFO_TYPE: 4770 case RSA_X_509_MECH_INFO_TYPE: 4771 dca_rsactxfree(ctx); 4772 break; 4773 case DSA_MECH_INFO_TYPE: 4774 dca_dsactxfree(ctx); 4775 break; 4776 default: 4777 /* Should never reach here */ 4778 cmn_err(CE_WARN, "dca_free_context_low: unexpected mech type " 4779 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4780 error = CRYPTO_MECHANISM_INVALID; 4781 } 4782 4783 return (error); 4784 } 4785 4786 4787 /* Free any unfreed private context. It is called in detach. */ 4788 static void 4789 dca_free_context_list(dca_t *dca) 4790 { 4791 dca_listnode_t *node; 4792 crypto_ctx_t ctx; 4793 4794 (void) memset(&ctx, 0, sizeof (ctx)); 4795 ctx.cc_provider = dca; 4796 4797 while ((node = dca_delist2(&dca->dca_ctx_list, 4798 &dca->dca_ctx_list_lock)) != NULL) { 4799 ctx.cc_provider_private = node; 4800 (void) dca_free_context_low(&ctx); 4801 } 4802 } 4803 4804 static int 4805 ext_info_sym(crypto_provider_handle_t prov, 4806 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq) 4807 { 4808 return (ext_info_base(prov, ext_info, cfreq, IDENT_SYM)); 4809 } 4810 4811 static int 4812 ext_info_asym(crypto_provider_handle_t prov, 4813 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq) 4814 { 4815 int rv; 4816 4817 rv = ext_info_base(prov, ext_info, cfreq, IDENT_ASYM); 4818 /* The asymmetric cipher slot supports random */ 4819 ext_info->ei_flags |= CRYPTO_EXTF_RNG; 4820 4821 return (rv); 4822 } 4823 4824 /* ARGSUSED */ 4825 static int 4826 ext_info_base(crypto_provider_handle_t prov, 4827 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id) 4828 { 4829 dca_t *dca = (dca_t *)prov; 4830 int len; 4831 4832 /* Label */ 4833 (void) sprintf((char *)ext_info->ei_label, "%s/%d %s", 4834 ddi_driver_name(dca->dca_dip), ddi_get_instance(dca->dca_dip), id); 4835 len = strlen((char *)ext_info->ei_label); 4836 (void) memset(ext_info->ei_label + len, ' ', 4837 CRYPTO_EXT_SIZE_LABEL - len); 4838 4839 /* Manufacturer ID */ 4840 (void) sprintf((char *)ext_info->ei_manufacturerID, "%s", 4841 DCA_MANUFACTURER_ID); 4842 len = strlen((char *)ext_info->ei_manufacturerID); 4843 (void) memset(ext_info->ei_manufacturerID + len, ' ', 4844 CRYPTO_EXT_SIZE_MANUF - len); 4845 4846 /* Model */ 4847 (void) sprintf((char *)ext_info->ei_model, dca->dca_model); 4848 4849 DBG(dca, DWARN, "kCF MODEL: %s", (char *)ext_info->ei_model); 4850 4851 len = strlen((char *)ext_info->ei_model); 4852 (void) memset(ext_info->ei_model + len, ' ', 4853 CRYPTO_EXT_SIZE_MODEL - len); 4854 4855 /* Serial Number. Blank for Deimos */ 4856 (void) memset(ext_info->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL); 4857 4858 ext_info->ei_flags = CRYPTO_EXTF_WRITE_PROTECTED; 4859 4860 ext_info->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO; 4861 ext_info->ei_max_pin_len = CRYPTO_UNAVAILABLE_INFO; 4862 ext_info->ei_min_pin_len = CRYPTO_UNAVAILABLE_INFO; 4863 ext_info->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO; 4864 ext_info->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO; 4865 ext_info->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO; 4866 ext_info->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO; 4867 ext_info->ei_hardware_version.cv_major = 0; 4868 ext_info->ei_hardware_version.cv_minor = 0; 4869 ext_info->ei_firmware_version.cv_major = 0; 4870 ext_info->ei_firmware_version.cv_minor = 0; 4871 4872 /* Time. No need to be supplied for token without a clock */ 4873 ext_info->ei_time[0] = '\000'; 4874 4875 return (CRYPTO_SUCCESS); 4876 } 4877 4878 static void 4879 dca_fma_init(dca_t *dca) 4880 { 4881 ddi_iblock_cookie_t fm_ibc; 4882 int fm_capabilities = DDI_FM_EREPORT_CAPABLE | 4883 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE | 4884 DDI_FM_ERRCB_CAPABLE; 4885 4886 /* Read FMA capabilities from dca.conf file (if present) */ 4887 dca->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip, 4888 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 4889 fm_capabilities); 4890 4891 DBG(dca, DWARN, "dca->fm_capabilities = 0x%x", dca->fm_capabilities); 4892 4893 /* Only register with IO Fault Services if we have some capability */ 4894 if (dca->fm_capabilities) { 4895 dca_regsattr.devacc_attr_access = DDI_FLAGERR_ACC; 4896 dca_devattr.devacc_attr_access = DDI_FLAGERR_ACC; 4897 dca_dmaattr.dma_attr_flags = DDI_DMA_FLAGERR; 4898 4899 /* Register capabilities with IO Fault Services */ 4900 ddi_fm_init(dca->dca_dip, &dca->fm_capabilities, &fm_ibc); 4901 DBG(dca, DWARN, "fm_capable() = 0x%x", 4902 ddi_fm_capable(dca->dca_dip)); 4903 4904 /* 4905 * Initialize pci ereport capabilities if ereport capable 4906 */ 4907 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) || 4908 DDI_FM_ERRCB_CAP(dca->fm_capabilities)) 4909 pci_ereport_setup(dca->dca_dip); 4910 4911 /* 4912 * Initialize callback mutex and register error callback if 4913 * error callback capable. 4914 */ 4915 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) { 4916 ddi_fm_handler_register(dca->dca_dip, dca_fm_error_cb, 4917 (void *)dca); 4918 } 4919 } else { 4920 /* 4921 * These fields have to be cleared of FMA if there are no 4922 * FMA capabilities at runtime. 4923 */ 4924 dca_regsattr.devacc_attr_access = DDI_DEFAULT_ACC; 4925 dca_devattr.devacc_attr_access = DDI_DEFAULT_ACC; 4926 dca_dmaattr.dma_attr_flags = 0; 4927 } 4928 } 4929 4930 4931 static void 4932 dca_fma_fini(dca_t *dca) 4933 { 4934 /* Only unregister FMA capabilities if we registered some */ 4935 if (dca->fm_capabilities) { 4936 4937 /* 4938 * Release any resources allocated by pci_ereport_setup() 4939 */ 4940 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) || 4941 DDI_FM_ERRCB_CAP(dca->fm_capabilities)) { 4942 pci_ereport_teardown(dca->dca_dip); 4943 } 4944 4945 /* 4946 * Free callback mutex and un-register error callback if 4947 * error callback capable. 4948 */ 4949 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) { 4950 ddi_fm_handler_unregister(dca->dca_dip); 4951 } 4952 4953 /* Unregister from IO Fault Services */ 4954 ddi_fm_fini(dca->dca_dip); 4955 DBG(dca, DWARN, "fm_capable() = 0x%x", 4956 ddi_fm_capable(dca->dca_dip)); 4957 } 4958 } 4959 4960 4961 /* 4962 * The IO fault service error handling callback function 4963 */ 4964 /*ARGSUSED*/ 4965 static int 4966 dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 4967 { 4968 dca_t *dca = (dca_t *)impl_data; 4969 4970 pci_ereport_post(dip, err, NULL); 4971 if (err->fme_status == DDI_FM_FATAL) { 4972 dca_failure(dca, DDI_DATAPATH_FAULT, 4973 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR, 4974 "fault PCI in FMA callback."); 4975 } 4976 return (err->fme_status); 4977 } 4978 4979 4980 static int 4981 dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle, 4982 dca_fma_eclass_t eclass_index) 4983 { 4984 ddi_fm_error_t de; 4985 int version = 0; 4986 4987 ddi_fm_acc_err_get(handle, &de, version); 4988 if (de.fme_status != DDI_FM_OK) { 4989 dca_failure(dca, DDI_DATAPATH_FAULT, 4990 eclass_index, fm_ena_increment(de.fme_ena), 4991 CRYPTO_DEVICE_ERROR, ""); 4992 return (DDI_FAILURE); 4993 } 4994 4995 return (DDI_SUCCESS); 4996 } 4997 4998 int 4999 dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle, 5000 dca_fma_eclass_t eclass_index) 5001 { 5002 ddi_fm_error_t de; 5003 int version = 0; 5004 5005 ddi_fm_dma_err_get(handle, &de, version); 5006 if (de.fme_status != DDI_FM_OK) { 5007 dca_failure(dca, DDI_DATAPATH_FAULT, 5008 eclass_index, fm_ena_increment(de.fme_ena), 5009 CRYPTO_DEVICE_ERROR, ""); 5010 return (DDI_FAILURE); 5011 } 5012 return (DDI_SUCCESS); 5013 } 5014 5015 static uint64_t 5016 dca_ena(uint64_t ena) 5017 { 5018 if (ena == 0) 5019 ena = fm_ena_generate(0, FM_ENA_FMT1); 5020 else 5021 ena = fm_ena_increment(ena); 5022 return (ena); 5023 } 5024 5025 static char * 5026 dca_fma_eclass_string(char *model, dca_fma_eclass_t index) 5027 { 5028 if (strstr(model, "500")) 5029 return (dca_fma_eclass_sca500[index]); 5030 else 5031 return (dca_fma_eclass_sca1000[index]); 5032 } 5033