1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 28 /* 29 * Deimos - cryptographic acceleration based upon Broadcom 582x. 30 */ 31 32 #include <sys/types.h> 33 #include <sys/modctl.h> 34 #include <sys/conf.h> 35 #include <sys/devops.h> 36 #include <sys/ddi.h> 37 #include <sys/sunddi.h> 38 #include <sys/cmn_err.h> 39 #include <sys/varargs.h> 40 #include <sys/file.h> 41 #include <sys/stat.h> 42 #include <sys/kmem.h> 43 #include <sys/ioccom.h> 44 #include <sys/open.h> 45 #include <sys/cred.h> 46 #include <sys/kstat.h> 47 #include <sys/strsun.h> 48 #include <sys/note.h> 49 #include <sys/crypto/common.h> 50 #include <sys/crypto/spi.h> 51 #include <sys/ddifm.h> 52 #include <sys/fm/protocol.h> 53 #include <sys/fm/util.h> 54 #include <sys/fm/io/ddi.h> 55 #include <sys/crypto/dca.h> 56 57 /* 58 * Core Deimos driver. 59 */ 60 61 static void dca_enlist2(dca_listnode_t *, dca_listnode_t *, 62 kmutex_t *); 63 static void dca_rmlist2(dca_listnode_t *node, kmutex_t *); 64 static dca_listnode_t *dca_delist2(dca_listnode_t *q, kmutex_t *); 65 static void dca_free_context_list(dca_t *dca); 66 static int dca_free_context_low(crypto_ctx_t *ctx); 67 static int dca_attach(dev_info_t *, ddi_attach_cmd_t); 68 static int dca_detach(dev_info_t *, ddi_detach_cmd_t); 69 static int dca_suspend(dca_t *); 70 static int dca_resume(dca_t *); 71 static int dca_init(dca_t *); 72 static int dca_reset(dca_t *, int); 73 static int dca_initworklist(dca_t *, dca_worklist_t *); 74 static void dca_uninit(dca_t *); 75 static void dca_initq(dca_listnode_t *); 76 static void dca_enqueue(dca_listnode_t *, dca_listnode_t *); 77 static dca_listnode_t *dca_dequeue(dca_listnode_t *); 78 static dca_listnode_t *dca_unqueue(dca_listnode_t *); 79 static dca_request_t *dca_newreq(dca_t *); 80 static dca_work_t *dca_getwork(dca_t *, int); 81 static void dca_freework(dca_work_t *); 82 static dca_work_t *dca_newwork(dca_t *); 83 static void dca_destroywork(dca_work_t *); 84 static void dca_schedule(dca_t *, int); 85 static void dca_reclaim(dca_t *, int); 86 static uint_t dca_intr(char *); 87 static void dca_failure(dca_t *, ddi_fault_location_t, 88 dca_fma_eclass_t index, uint64_t, int, char *, ...); 89 static void dca_jobtimeout(void *); 90 static int dca_drain(dca_t *); 91 static void dca_undrain(dca_t *); 92 static void dca_rejectjobs(dca_t *); 93 94 #ifdef SCHEDDELAY 95 static void dca_schedtimeout(void *); 96 #endif 97 98 /* 99 * We want these inlined for performance. 100 */ 101 #ifndef DEBUG 102 #pragma inline(dca_freereq, dca_getreq, dca_freework, dca_getwork) 103 #pragma inline(dca_enqueue, dca_dequeue, dca_rmqueue, dca_done) 104 #pragma inline(dca_reverse, dca_length) 105 #endif 106 107 /* 108 * Device operations. 109 */ 110 static struct dev_ops devops = { 111 DEVO_REV, /* devo_rev */ 112 0, /* devo_refcnt */ 113 nodev, /* devo_getinfo */ 114 nulldev, /* devo_identify */ 115 nulldev, /* devo_probe */ 116 dca_attach, /* devo_attach */ 117 dca_detach, /* devo_detach */ 118 nodev, /* devo_reset */ 119 NULL, /* devo_cb_ops */ 120 NULL, /* devo_bus_ops */ 121 ddi_power, /* devo_power */ 122 ddi_quiesce_not_supported, /* devo_quiesce */ 123 }; 124 125 #define IDENT "PCI Crypto Accelerator" 126 #define IDENT_SYM "Crypto Accel Sym 2.0" 127 #define IDENT_ASYM "Crypto Accel Asym 2.0" 128 129 /* Space-padded, will be filled in dynamically during registration */ 130 #define IDENT3 "PCI Crypto Accelerator Mod 2.0" 131 132 #define VENDOR "Sun Microsystems, Inc." 133 134 #define STALETIME (30 * SECOND) 135 136 #define crypto_prov_notify crypto_provider_notification 137 /* A 28 char function name doesn't leave much line space */ 138 139 /* 140 * Module linkage. 141 */ 142 static struct modldrv modldrv = { 143 &mod_driverops, /* drv_modops */ 144 IDENT, /* drv_linkinfo */ 145 &devops, /* drv_dev_ops */ 146 }; 147 148 extern struct mod_ops mod_cryptoops; 149 150 static struct modlcrypto modlcrypto = { 151 &mod_cryptoops, 152 IDENT3 153 }; 154 155 static struct modlinkage modlinkage = { 156 MODREV_1, /* ml_rev */ 157 &modldrv, /* ml_linkage */ 158 &modlcrypto, 159 NULL 160 }; 161 162 /* 163 * CSPI information (entry points, provider info, etc.) 164 */ 165 166 /* Mechanisms for the symmetric cipher provider */ 167 static crypto_mech_info_t dca_mech_info_tab1[] = { 168 /* DES-CBC */ 169 {SUN_CKM_DES_CBC, DES_CBC_MECH_INFO_TYPE, 170 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | 171 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC, 172 DES_KEY_LEN, DES_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, 173 /* 3DES-CBC */ 174 {SUN_CKM_DES3_CBC, DES3_CBC_MECH_INFO_TYPE, 175 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | 176 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC, 177 DES3_MIN_KEY_LEN, DES3_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES} 178 }; 179 180 /* Mechanisms for the asymmetric cipher provider */ 181 static crypto_mech_info_t dca_mech_info_tab2[] = { 182 /* DSA */ 183 {SUN_CKM_DSA, DSA_MECH_INFO_TYPE, 184 CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY | 185 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_VERIFY_ATOMIC, 186 CRYPTO_BYTES2BITS(DSA_MIN_KEY_LEN), 187 CRYPTO_BYTES2BITS(DSA_MAX_KEY_LEN), 188 CRYPTO_KEYSIZE_UNIT_IN_BITS}, 189 190 /* RSA */ 191 {SUN_CKM_RSA_X_509, RSA_X_509_MECH_INFO_TYPE, 192 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN | 193 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY | 194 CRYPTO_FG_VERIFY_RECOVER | 195 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC | 196 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC | 197 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC, 198 CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN), 199 CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN), 200 CRYPTO_KEYSIZE_UNIT_IN_BITS}, 201 {SUN_CKM_RSA_PKCS, RSA_PKCS_MECH_INFO_TYPE, 202 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN | 203 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY | 204 CRYPTO_FG_VERIFY_RECOVER | 205 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC | 206 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC | 207 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC, 208 CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN), 209 CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN), 210 CRYPTO_KEYSIZE_UNIT_IN_BITS} 211 }; 212 213 static void dca_provider_status(crypto_provider_handle_t, uint_t *); 214 215 static crypto_control_ops_t dca_control_ops = { 216 dca_provider_status 217 }; 218 219 static int dca_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *, 220 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 221 static int dca_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 222 crypto_req_handle_t); 223 static int dca_encrypt_update(crypto_ctx_t *, crypto_data_t *, 224 crypto_data_t *, crypto_req_handle_t); 225 static int dca_encrypt_final(crypto_ctx_t *, crypto_data_t *, 226 crypto_req_handle_t); 227 static int dca_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 228 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 229 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 230 231 static int dca_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *, 232 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 233 static int dca_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 234 crypto_req_handle_t); 235 static int dca_decrypt_update(crypto_ctx_t *, crypto_data_t *, 236 crypto_data_t *, crypto_req_handle_t); 237 static int dca_decrypt_final(crypto_ctx_t *, crypto_data_t *, 238 crypto_req_handle_t); 239 static int dca_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 240 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 241 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 242 243 static crypto_cipher_ops_t dca_cipher_ops = { 244 dca_encrypt_init, 245 dca_encrypt, 246 dca_encrypt_update, 247 dca_encrypt_final, 248 dca_encrypt_atomic, 249 dca_decrypt_init, 250 dca_decrypt, 251 dca_decrypt_update, 252 dca_decrypt_final, 253 dca_decrypt_atomic 254 }; 255 256 static int dca_sign_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, 257 crypto_spi_ctx_template_t, crypto_req_handle_t); 258 static int dca_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 259 crypto_req_handle_t); 260 static int dca_sign_update(crypto_ctx_t *, crypto_data_t *, 261 crypto_req_handle_t); 262 static int dca_sign_final(crypto_ctx_t *, crypto_data_t *, 263 crypto_req_handle_t); 264 static int dca_sign_atomic(crypto_provider_handle_t, crypto_session_id_t, 265 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, 266 crypto_spi_ctx_template_t, crypto_req_handle_t); 267 static int dca_sign_recover_init(crypto_ctx_t *, crypto_mechanism_t *, 268 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 269 static int dca_sign_recover(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 270 crypto_req_handle_t); 271 static int dca_sign_recover_atomic(crypto_provider_handle_t, 272 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 273 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 274 275 static crypto_sign_ops_t dca_sign_ops = { 276 dca_sign_init, 277 dca_sign, 278 dca_sign_update, 279 dca_sign_final, 280 dca_sign_atomic, 281 dca_sign_recover_init, 282 dca_sign_recover, 283 dca_sign_recover_atomic 284 }; 285 286 static int dca_verify_init(crypto_ctx_t *, crypto_mechanism_t *, 287 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 288 static int dca_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 289 crypto_req_handle_t); 290 static int dca_verify_update(crypto_ctx_t *, crypto_data_t *, 291 crypto_req_handle_t); 292 static int dca_verify_final(crypto_ctx_t *, crypto_data_t *, 293 crypto_req_handle_t); 294 static int dca_verify_atomic(crypto_provider_handle_t, crypto_session_id_t, 295 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 296 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 297 static int dca_verify_recover_init(crypto_ctx_t *, crypto_mechanism_t *, 298 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 299 static int dca_verify_recover(crypto_ctx_t *, crypto_data_t *, 300 crypto_data_t *, crypto_req_handle_t); 301 static int dca_verify_recover_atomic(crypto_provider_handle_t, 302 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 303 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 304 305 static crypto_verify_ops_t dca_verify_ops = { 306 dca_verify_init, 307 dca_verify, 308 dca_verify_update, 309 dca_verify_final, 310 dca_verify_atomic, 311 dca_verify_recover_init, 312 dca_verify_recover, 313 dca_verify_recover_atomic 314 }; 315 316 static int dca_generate_random(crypto_provider_handle_t, crypto_session_id_t, 317 uchar_t *, size_t, crypto_req_handle_t); 318 319 static crypto_random_number_ops_t dca_random_number_ops = { 320 NULL, 321 dca_generate_random 322 }; 323 324 static int ext_info_sym(crypto_provider_handle_t prov, 325 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq); 326 static int ext_info_asym(crypto_provider_handle_t prov, 327 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq); 328 static int ext_info_base(crypto_provider_handle_t prov, 329 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id); 330 331 static crypto_provider_management_ops_t dca_provmanage_ops_1 = { 332 ext_info_sym, /* ext_info */ 333 NULL, /* init_token */ 334 NULL, /* init_pin */ 335 NULL /* set_pin */ 336 }; 337 338 static crypto_provider_management_ops_t dca_provmanage_ops_2 = { 339 ext_info_asym, /* ext_info */ 340 NULL, /* init_token */ 341 NULL, /* init_pin */ 342 NULL /* set_pin */ 343 }; 344 345 int dca_free_context(crypto_ctx_t *); 346 347 static crypto_ctx_ops_t dca_ctx_ops = { 348 NULL, 349 dca_free_context 350 }; 351 352 /* Operations for the symmetric cipher provider */ 353 static crypto_ops_t dca_crypto_ops1 = { 354 &dca_control_ops, 355 NULL, /* digest_ops */ 356 &dca_cipher_ops, 357 NULL, /* mac_ops */ 358 NULL, /* sign_ops */ 359 NULL, /* verify_ops */ 360 NULL, /* dual_ops */ 361 NULL, /* cipher_mac_ops */ 362 NULL, /* random_number_ops */ 363 NULL, /* session_ops */ 364 NULL, /* object_ops */ 365 NULL, /* key_ops */ 366 &dca_provmanage_ops_1, /* management_ops */ 367 &dca_ctx_ops 368 }; 369 370 /* Operations for the asymmetric cipher provider */ 371 static crypto_ops_t dca_crypto_ops2 = { 372 &dca_control_ops, 373 NULL, /* digest_ops */ 374 &dca_cipher_ops, 375 NULL, /* mac_ops */ 376 &dca_sign_ops, 377 &dca_verify_ops, 378 NULL, /* dual_ops */ 379 NULL, /* cipher_mac_ops */ 380 &dca_random_number_ops, 381 NULL, /* session_ops */ 382 NULL, /* object_ops */ 383 NULL, /* key_ops */ 384 &dca_provmanage_ops_2, /* management_ops */ 385 &dca_ctx_ops 386 }; 387 388 /* Provider information for the symmetric cipher provider */ 389 static crypto_provider_info_t dca_prov_info1 = { 390 CRYPTO_SPI_VERSION_1, 391 NULL, /* pi_provider_description */ 392 CRYPTO_HW_PROVIDER, 393 NULL, /* pi_provider_dev */ 394 NULL, /* pi_provider_handle */ 395 &dca_crypto_ops1, 396 sizeof (dca_mech_info_tab1)/sizeof (crypto_mech_info_t), 397 dca_mech_info_tab1, 398 0, /* pi_logical_provider_count */ 399 NULL /* pi_logical_providers */ 400 }; 401 402 /* Provider information for the asymmetric cipher provider */ 403 static crypto_provider_info_t dca_prov_info2 = { 404 CRYPTO_SPI_VERSION_1, 405 NULL, /* pi_provider_description */ 406 CRYPTO_HW_PROVIDER, 407 NULL, /* pi_provider_dev */ 408 NULL, /* pi_provider_handle */ 409 &dca_crypto_ops2, 410 sizeof (dca_mech_info_tab2)/sizeof (crypto_mech_info_t), 411 dca_mech_info_tab2, 412 0, /* pi_logical_provider_count */ 413 NULL /* pi_logical_providers */ 414 }; 415 416 /* Convenience macros */ 417 /* Retrieve the softc and instance number from a SPI crypto context */ 418 #define DCA_SOFTC_FROM_CTX(ctx, softc, instance) { \ 419 (softc) = (dca_t *)(ctx)->cc_provider; \ 420 (instance) = ddi_get_instance((softc)->dca_dip); \ 421 } 422 423 #define DCA_MECH_FROM_CTX(ctx) \ 424 (((dca_request_t *)(ctx)->cc_provider_private)->dr_ctx.ctx_cm_type) 425 426 static int dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset, 427 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags, 428 dca_chain_t *head, int *n_chain); 429 static uint64_t dca_ena(uint64_t ena); 430 static caddr_t dca_bufdaddr_out(crypto_data_t *data); 431 static char *dca_fma_eclass_string(char *model, dca_fma_eclass_t index); 432 static int dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle, 433 dca_fma_eclass_t eclass_index); 434 435 static void dca_fma_init(dca_t *dca); 436 static void dca_fma_fini(dca_t *dca); 437 static int dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 438 const void *impl_data); 439 440 441 static dca_device_t dca_devices[] = { 442 /* Broadcom vanilla variants */ 443 { 0x14e4, 0x5820, "Broadcom 5820" }, 444 { 0x14e4, 0x5821, "Broadcom 5821" }, 445 { 0x14e4, 0x5822, "Broadcom 5822" }, 446 { 0x14e4, 0x5825, "Broadcom 5825" }, 447 /* Sun specific OEMd variants */ 448 { 0x108e, 0x5454, "SCA" }, 449 { 0x108e, 0x5455, "SCA 1000" }, 450 { 0x108e, 0x5457, "SCA 500" }, 451 /* subsysid should be 0x5457, but got 0x1 from HW. Assume both here. */ 452 { 0x108e, 0x1, "SCA 500" }, 453 }; 454 455 /* 456 * Device attributes. 457 */ 458 static struct ddi_device_acc_attr dca_regsattr = { 459 DDI_DEVICE_ATTR_V1, 460 DDI_STRUCTURE_LE_ACC, 461 DDI_STRICTORDER_ACC, 462 DDI_FLAGERR_ACC 463 }; 464 465 static struct ddi_device_acc_attr dca_devattr = { 466 DDI_DEVICE_ATTR_V0, 467 DDI_STRUCTURE_LE_ACC, 468 DDI_STRICTORDER_ACC 469 }; 470 471 #if !defined(i386) && !defined(__i386) 472 static struct ddi_device_acc_attr dca_bufattr = { 473 DDI_DEVICE_ATTR_V0, 474 DDI_NEVERSWAP_ACC, 475 DDI_STRICTORDER_ACC 476 }; 477 #endif 478 479 static struct ddi_dma_attr dca_dmaattr = { 480 DMA_ATTR_V0, /* dma_attr_version */ 481 0x0, /* dma_attr_addr_lo */ 482 0xffffffffUL, /* dma_attr_addr_hi */ 483 0x00ffffffUL, /* dma_attr_count_max */ 484 0x40, /* dma_attr_align */ 485 0x40, /* dma_attr_burstsizes */ 486 0x1, /* dma_attr_minxfer */ 487 0x00ffffffUL, /* dma_attr_maxxfer */ 488 0xffffffffUL, /* dma_attr_seg */ 489 #if defined(i386) || defined(__i386) || defined(__amd64) 490 512, /* dma_attr_sgllen */ 491 #else 492 1, /* dma_attr_sgllen */ 493 #endif 494 1, /* dma_attr_granular */ 495 DDI_DMA_FLAGERR /* dma_attr_flags */ 496 }; 497 498 static void *dca_state = NULL; 499 int dca_mindma = 2500; 500 501 /* 502 * FMA eclass string definitions. Note that these string arrays must be 503 * consistent with the dca_fma_eclass_t enum. 504 */ 505 static char *dca_fma_eclass_sca1000[] = { 506 "sca1000.hw.device", 507 "sca1000.hw.timeout", 508 "sca1000.none" 509 }; 510 511 static char *dca_fma_eclass_sca500[] = { 512 "sca500.hw.device", 513 "sca500.hw.timeout", 514 "sca500.none" 515 }; 516 517 /* 518 * DDI entry points. 519 */ 520 int 521 _init(void) 522 { 523 int rv; 524 525 DBG(NULL, DMOD, "dca: in _init"); 526 527 if ((rv = ddi_soft_state_init(&dca_state, sizeof (dca_t), 1)) != 0) { 528 /* this should *never* happen! */ 529 return (rv); 530 } 531 532 if ((rv = mod_install(&modlinkage)) != 0) { 533 /* cleanup here */ 534 ddi_soft_state_fini(&dca_state); 535 return (rv); 536 } 537 538 return (0); 539 } 540 541 int 542 _fini(void) 543 { 544 int rv; 545 546 DBG(NULL, DMOD, "dca: in _fini"); 547 548 if ((rv = mod_remove(&modlinkage)) == 0) { 549 /* cleanup here */ 550 ddi_soft_state_fini(&dca_state); 551 } 552 return (rv); 553 } 554 555 int 556 _info(struct modinfo *modinfop) 557 { 558 DBG(NULL, DMOD, "dca: in _info"); 559 560 return (mod_info(&modlinkage, modinfop)); 561 } 562 563 int 564 dca_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 565 { 566 ddi_acc_handle_t pci; 567 int instance; 568 ddi_iblock_cookie_t ibc; 569 int intr_added = 0; 570 dca_t *dca; 571 ushort_t venid; 572 ushort_t devid; 573 ushort_t revid; 574 ushort_t subsysid; 575 ushort_t subvenid; 576 int i; 577 int ret; 578 char ID[64]; 579 static char *unknowndev = "Unknown device"; 580 581 #if DEBUG 582 /* these are only used for debugging */ 583 ushort_t pcicomm; 584 ushort_t pcistat; 585 uchar_t cachelinesz; 586 uchar_t mingnt; 587 uchar_t maxlat; 588 uchar_t lattmr; 589 #endif 590 591 instance = ddi_get_instance(dip); 592 593 DBG(NULL, DMOD, "dca: in dca_attach() for %d", instance); 594 595 switch (cmd) { 596 case DDI_RESUME: 597 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 598 dca_diperror(dip, "no soft state in detach"); 599 return (DDI_FAILURE); 600 } 601 /* assumption: we won't be DDI_DETACHed until we return */ 602 return (dca_resume(dca)); 603 case DDI_ATTACH: 604 break; 605 default: 606 return (DDI_FAILURE); 607 } 608 609 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 610 dca_diperror(dip, "slot does not support PCI bus-master"); 611 return (DDI_FAILURE); 612 } 613 614 if (ddi_intr_hilevel(dip, 0) != 0) { 615 dca_diperror(dip, "hilevel interrupts not supported"); 616 return (DDI_FAILURE); 617 } 618 619 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) { 620 dca_diperror(dip, "unable to setup PCI config handle"); 621 return (DDI_FAILURE); 622 } 623 624 /* common PCI attributes */ 625 venid = pci_config_get16(pci, PCI_VENID); 626 devid = pci_config_get16(pci, PCI_DEVID); 627 revid = pci_config_get8(pci, PCI_REVID); 628 subvenid = pci_config_get16(pci, PCI_SUBVENID); 629 subsysid = pci_config_get16(pci, PCI_SUBSYSID); 630 631 /* 632 * Broadcom-specific timings. 633 * We disable these timers/counters since they can cause 634 * incorrect false failures when the bus is just a little 635 * bit slow, or busy. 636 */ 637 pci_config_put8(pci, PCI_TRDYTO, 0); 638 pci_config_put8(pci, PCI_RETRIES, 0); 639 640 /* initialize PCI access settings */ 641 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE | 642 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE); 643 644 /* set up our PCI latency timer */ 645 pci_config_put8(pci, PCI_LATTMR, 0x40); 646 647 #if DEBUG 648 /* read registers (for debugging) */ 649 pcicomm = pci_config_get16(pci, PCI_COMM); 650 pcistat = pci_config_get16(pci, PCI_STATUS); 651 cachelinesz = pci_config_get8(pci, PCI_CACHELINESZ); 652 mingnt = pci_config_get8(pci, PCI_MINGNT); 653 maxlat = pci_config_get8(pci, PCI_MAXLAT); 654 lattmr = pci_config_get8(pci, PCI_LATTMR); 655 #endif 656 657 pci_config_teardown(&pci); 658 659 if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS) { 660 dca_diperror(dip, "unable to get iblock cookie"); 661 return (DDI_FAILURE); 662 } 663 664 if (ddi_soft_state_zalloc(dca_state, instance) != DDI_SUCCESS) { 665 dca_diperror(dip, "unable to allocate soft state"); 666 return (DDI_FAILURE); 667 } 668 669 dca = ddi_get_soft_state(dca_state, instance); 670 ASSERT(dca != NULL); 671 dca->dca_dip = dip; 672 WORKLIST(dca, MCR1)->dwl_prov = NULL; 673 WORKLIST(dca, MCR2)->dwl_prov = NULL; 674 /* figure pagesize */ 675 dca->dca_pagesize = ddi_ptob(dip, 1); 676 677 /* 678 * Search for the device in our supported devices table. This 679 * is here for two reasons. First, we want to ensure that 680 * only Sun-qualified (and presumably Sun-labeled) devices can 681 * be used with this driver. Second, some devices have 682 * specific differences. E.g. the 5821 has support for a 683 * special mode of RC4, deeper queues, power management, and 684 * other changes. Also, the export versions of some of these 685 * chips don't support RC4 or 3DES, so we catch that here. 686 * 687 * Note that we only look at the upper nibble of the device 688 * id, which is used to distinguish export vs. domestic 689 * versions of the chip. (The lower nibble is used for 690 * stepping information.) 691 */ 692 for (i = 0; i < (sizeof (dca_devices) / sizeof (dca_device_t)); i++) { 693 /* 694 * Try to match the subsystem information first. 695 */ 696 if (subvenid && (subvenid == dca_devices[i].dd_vendor_id) && 697 subsysid && (subsysid == dca_devices[i].dd_device_id)) { 698 dca->dca_model = dca_devices[i].dd_model; 699 dca->dca_devid = dca_devices[i].dd_device_id; 700 break; 701 } 702 /* 703 * Failing that, try the generic vendor and device id. 704 * Even if we find a match, we keep searching anyway, 705 * since we would prefer to find a match based on the 706 * subsystem ids. 707 */ 708 if ((venid == dca_devices[i].dd_vendor_id) && 709 (devid == dca_devices[i].dd_device_id)) { 710 dca->dca_model = dca_devices[i].dd_model; 711 dca->dca_devid = dca_devices[i].dd_device_id; 712 } 713 } 714 /* try and handle an unrecognized device */ 715 if (dca->dca_model == NULL) { 716 dca->dca_model = unknowndev; 717 dca_error(dca, "device not recognized, not supported"); 718 DBG(dca, DPCI, "i=%d venid=%x devid=%x rev=%d", 719 i, venid, devid, revid); 720 } 721 722 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "description", 723 dca->dca_model) != DDI_SUCCESS) { 724 dca_error(dca, "unable to create description property"); 725 return (DDI_FAILURE); 726 } 727 728 DBG(dca, DPCI, "PCI command=0x%x status=%x cachelinesz=%x", 729 pcicomm, pcistat, cachelinesz); 730 DBG(dca, DPCI, "mingnt=0x%x maxlat=0x%x lattmr=0x%x", 731 mingnt, maxlat, lattmr); 732 733 /* 734 * initialize locks, etc. 735 */ 736 (void) mutex_init(&dca->dca_intrlock, NULL, MUTEX_DRIVER, ibc); 737 738 /* use RNGSHA1 by default */ 739 if (ddi_getprop(DDI_DEV_T_ANY, dip, 740 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rngdirect", 0) == 0) { 741 dca->dca_flags |= DCA_RNGSHA1; 742 } 743 744 /* initialize FMA */ 745 dca_fma_init(dca); 746 747 /* initialize some key data structures */ 748 if (dca_init(dca) != DDI_SUCCESS) { 749 goto failed; 750 } 751 752 /* initialize kstats */ 753 dca_ksinit(dca); 754 755 /* setup access to registers */ 756 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&dca->dca_regs, 757 0, 0, &dca_regsattr, &dca->dca_regs_handle) != DDI_SUCCESS) { 758 dca_error(dca, "unable to map registers"); 759 goto failed; 760 } 761 762 DBG(dca, DCHATTY, "MCR1 = %x", GETCSR(dca, CSR_MCR1)); 763 DBG(dca, DCHATTY, "CONTROL = %x", GETCSR(dca, CSR_DMACTL)); 764 DBG(dca, DCHATTY, "STATUS = %x", GETCSR(dca, CSR_DMASTAT)); 765 DBG(dca, DCHATTY, "DMAEA = %x", GETCSR(dca, CSR_DMAEA)); 766 DBG(dca, DCHATTY, "MCR2 = %x", GETCSR(dca, CSR_MCR2)); 767 768 /* reset the chip */ 769 if (dca_reset(dca, 0) < 0) { 770 goto failed; 771 } 772 773 /* initialize the chip */ 774 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64); 775 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 776 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 777 goto failed; 778 } 779 780 /* add the interrupt */ 781 if (ddi_add_intr(dip, 0, &dca->dca_icookie, NULL, dca_intr, 782 (void *)dca) != DDI_SUCCESS) { 783 DBG(dca, DWARN, "ddi_add_intr failed"); 784 goto failed; 785 } else { 786 intr_added = 1; 787 } 788 789 /* enable interrupts on the device */ 790 /* 791 * XXX: Note, 5820A1 errata indicates that this may clobber 792 * bits 24 and 23, which affect the speed of the RNG. Since 793 * we always want to run in full-speed mode, this should be 794 * harmless. 795 */ 796 if (dca->dca_devid == 0x5825) { 797 /* for 5825 - increase the DMA read size */ 798 SETBIT(dca, CSR_DMACTL, 799 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256); 800 } else { 801 SETBIT(dca, CSR_DMACTL, 802 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 803 } 804 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 805 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 806 goto failed; 807 } 808 809 /* register MCR1 with the crypto framework */ 810 /* Be careful not to exceed 32 chars */ 811 (void) sprintf(ID, "%s/%d %s", 812 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_SYM); 813 dca_prov_info1.pi_provider_description = ID; 814 dca_prov_info1.pi_provider_dev.pd_hw = dip; 815 dca_prov_info1.pi_provider_handle = dca; 816 if ((ret = crypto_register_provider(&dca_prov_info1, 817 &WORKLIST(dca, MCR1)->dwl_prov)) != CRYPTO_SUCCESS) { 818 cmn_err(CE_WARN, 819 "crypto_register_provider() failed (%d) for MCR1", ret); 820 goto failed; 821 } 822 823 /* register MCR2 with the crypto framework */ 824 /* Be careful not to exceed 32 chars */ 825 (void) sprintf(ID, "%s/%d %s", 826 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_ASYM); 827 dca_prov_info2.pi_provider_description = ID; 828 dca_prov_info2.pi_provider_dev.pd_hw = dip; 829 dca_prov_info2.pi_provider_handle = dca; 830 if ((ret = crypto_register_provider(&dca_prov_info2, 831 &WORKLIST(dca, MCR2)->dwl_prov)) != CRYPTO_SUCCESS) { 832 cmn_err(CE_WARN, 833 "crypto_register_provider() failed (%d) for MCR2", ret); 834 goto failed; 835 } 836 837 crypto_prov_notify(WORKLIST(dca, MCR1)->dwl_prov, 838 CRYPTO_PROVIDER_READY); 839 crypto_prov_notify(WORKLIST(dca, MCR2)->dwl_prov, 840 CRYPTO_PROVIDER_READY); 841 842 /* Initialize the local random number pool for this instance */ 843 if ((ret = dca_random_init(dca)) != CRYPTO_SUCCESS) { 844 goto failed; 845 } 846 847 mutex_enter(&dca->dca_intrlock); 848 dca->dca_jobtid = timeout(dca_jobtimeout, (void *)dca, 849 drv_usectohz(SECOND)); 850 mutex_exit(&dca->dca_intrlock); 851 852 ddi_set_driver_private(dip, (caddr_t)dca); 853 854 ddi_report_dev(dip); 855 856 if (ddi_get_devstate(dca->dca_dip) != DDI_DEVSTATE_UP) { 857 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_RESTORED); 858 } 859 860 return (DDI_SUCCESS); 861 862 failed: 863 /* unregister from the crypto framework */ 864 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) { 865 (void) crypto_unregister_provider( 866 WORKLIST(dca, MCR1)->dwl_prov); 867 } 868 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) { 869 (void) crypto_unregister_provider( 870 WORKLIST(dca, MCR2)->dwl_prov); 871 } 872 if (intr_added) { 873 CLRBIT(dca, CSR_DMACTL, 874 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 875 /* unregister intr handler */ 876 ddi_remove_intr(dip, 0, dca->dca_icookie); 877 } 878 if (dca->dca_regs_handle) { 879 ddi_regs_map_free(&dca->dca_regs_handle); 880 } 881 if (dca->dca_intrstats) { 882 kstat_delete(dca->dca_intrstats); 883 } 884 if (dca->dca_ksp) { 885 kstat_delete(dca->dca_ksp); 886 } 887 dca_uninit(dca); 888 889 /* finalize FMA */ 890 dca_fma_fini(dca); 891 892 mutex_destroy(&dca->dca_intrlock); 893 ddi_soft_state_free(dca_state, instance); 894 return (DDI_FAILURE); 895 896 } 897 898 int 899 dca_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 900 { 901 int instance; 902 dca_t *dca; 903 timeout_id_t tid; 904 905 instance = ddi_get_instance(dip); 906 907 DBG(NULL, DMOD, "dca: in dca_detach() for %d", instance); 908 909 switch (cmd) { 910 case DDI_SUSPEND: 911 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 912 dca_diperror(dip, "no soft state in detach"); 913 return (DDI_FAILURE); 914 } 915 /* assumption: we won't be DDI_DETACHed until we return */ 916 return (dca_suspend(dca)); 917 918 case DDI_DETACH: 919 break; 920 default: 921 return (DDI_FAILURE); 922 } 923 924 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 925 dca_diperror(dip, "no soft state in detach"); 926 return (DDI_FAILURE); 927 } 928 929 /* 930 * Unregister from kCF. 931 * This needs to be done at the beginning of detach. 932 */ 933 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) { 934 if (crypto_unregister_provider( 935 WORKLIST(dca, MCR1)->dwl_prov) != CRYPTO_SUCCESS) { 936 dca_error(dca, "unable to unregister MCR1 from kcf"); 937 return (DDI_FAILURE); 938 } 939 } 940 941 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) { 942 if (crypto_unregister_provider( 943 WORKLIST(dca, MCR2)->dwl_prov) != CRYPTO_SUCCESS) { 944 dca_error(dca, "unable to unregister MCR2 from kcf"); 945 return (DDI_FAILURE); 946 } 947 } 948 949 /* 950 * Cleanup the private context list. Once the 951 * crypto_unregister_provider returns, it is safe to do so. 952 */ 953 dca_free_context_list(dca); 954 955 /* Cleanup the local random number pool */ 956 dca_random_fini(dca); 957 958 /* send any jobs in the waitq back to kCF */ 959 dca_rejectjobs(dca); 960 961 /* untimeout the timeouts */ 962 mutex_enter(&dca->dca_intrlock); 963 tid = dca->dca_jobtid; 964 dca->dca_jobtid = 0; 965 mutex_exit(&dca->dca_intrlock); 966 if (tid) { 967 (void) untimeout(tid); 968 } 969 970 /* disable device interrupts */ 971 CLRBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 972 973 /* unregister interrupt handlers */ 974 ddi_remove_intr(dip, 0, dca->dca_icookie); 975 976 /* release our regs handle */ 977 ddi_regs_map_free(&dca->dca_regs_handle); 978 979 /* toss out kstats */ 980 if (dca->dca_intrstats) { 981 kstat_delete(dca->dca_intrstats); 982 } 983 if (dca->dca_ksp) { 984 kstat_delete(dca->dca_ksp); 985 } 986 987 mutex_destroy(&dca->dca_intrlock); 988 dca_uninit(dca); 989 990 /* finalize FMA */ 991 dca_fma_fini(dca); 992 993 ddi_soft_state_free(dca_state, instance); 994 995 return (DDI_SUCCESS); 996 } 997 998 int 999 dca_resume(dca_t *dca) 1000 { 1001 ddi_acc_handle_t pci; 1002 1003 if (pci_config_setup(dca->dca_dip, &pci) != DDI_SUCCESS) { 1004 dca_error(dca, "unable to setup PCI config handle"); 1005 return (DDI_FAILURE); 1006 } 1007 1008 /* 1009 * Reprogram registers in PCI configuration space. 1010 */ 1011 1012 /* Broadcom-specific timers -- we disable them. */ 1013 pci_config_put8(pci, PCI_TRDYTO, 0); 1014 pci_config_put8(pci, PCI_RETRIES, 0); 1015 1016 /* initialize PCI access settings */ 1017 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE | 1018 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE); 1019 1020 /* set up our PCI latency timer */ 1021 pci_config_put8(pci, PCI_LATTMR, 0x40); 1022 1023 pci_config_teardown(&pci); 1024 1025 if (dca_reset(dca, 0) < 0) { 1026 dca_error(dca, "unable to reset device during resume"); 1027 return (DDI_FAILURE); 1028 } 1029 1030 /* 1031 * Now restore the card-specific CSRs. 1032 */ 1033 1034 /* restore endianness settings */ 1035 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64); 1036 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1037 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1038 return (DDI_FAILURE); 1039 1040 /* restore interrupt enables */ 1041 if (dca->dca_devid == 0x5825) { 1042 /* for 5825 set 256 byte read size to improve performance */ 1043 SETBIT(dca, CSR_DMACTL, 1044 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256); 1045 } else { 1046 SETBIT(dca, CSR_DMACTL, 1047 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 1048 } 1049 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1050 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1051 return (DDI_FAILURE); 1052 1053 /* resume scheduling jobs on the device */ 1054 dca_undrain(dca); 1055 1056 return (DDI_SUCCESS); 1057 } 1058 1059 int 1060 dca_suspend(dca_t *dca) 1061 { 1062 if ((dca_drain(dca)) != 0) { 1063 return (DDI_FAILURE); 1064 } 1065 if (dca_reset(dca, 0) < 0) { 1066 dca_error(dca, "unable to reset device during suspend"); 1067 return (DDI_FAILURE); 1068 } 1069 return (DDI_SUCCESS); 1070 } 1071 1072 /* 1073 * Hardware access stuff. 1074 */ 1075 int 1076 dca_reset(dca_t *dca, int failreset) 1077 { 1078 int i; 1079 1080 if (dca->dca_regs_handle == NULL) { 1081 return (-1); 1082 } 1083 1084 PUTCSR(dca, CSR_DMACTL, DMACTL_RESET); 1085 if (!failreset) { 1086 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1087 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1088 return (-1); 1089 } 1090 1091 /* now wait for a reset */ 1092 for (i = 1; i < 100; i++) { 1093 uint32_t dmactl; 1094 drv_usecwait(100); 1095 dmactl = GETCSR(dca, CSR_DMACTL); 1096 if (!failreset) { 1097 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1098 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1099 return (-1); 1100 } 1101 if ((dmactl & DMACTL_RESET) == 0) { 1102 DBG(dca, DCHATTY, "reset in %d usec", i * 100); 1103 return (0); 1104 } 1105 } 1106 if (!failreset) { 1107 dca_failure(dca, DDI_DEVICE_FAULT, 1108 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR, 1109 "timeout waiting for reset after %d usec", i * 100); 1110 } 1111 return (-1); 1112 } 1113 1114 int 1115 dca_initworklist(dca_t *dca, dca_worklist_t *wlp) 1116 { 1117 int i; 1118 int reqprealloc = wlp->dwl_hiwater + (MAXWORK * MAXREQSPERMCR); 1119 1120 /* 1121 * Set up work queue. 1122 */ 1123 mutex_init(&wlp->dwl_lock, NULL, MUTEX_DRIVER, dca->dca_icookie); 1124 mutex_init(&wlp->dwl_freereqslock, NULL, MUTEX_DRIVER, 1125 dca->dca_icookie); 1126 mutex_init(&wlp->dwl_freelock, NULL, MUTEX_DRIVER, dca->dca_icookie); 1127 cv_init(&wlp->dwl_cv, NULL, CV_DRIVER, NULL); 1128 1129 mutex_enter(&wlp->dwl_lock); 1130 1131 dca_initq(&wlp->dwl_freereqs); 1132 dca_initq(&wlp->dwl_waitq); 1133 dca_initq(&wlp->dwl_freework); 1134 dca_initq(&wlp->dwl_runq); 1135 1136 for (i = 0; i < MAXWORK; i++) { 1137 dca_work_t *workp; 1138 1139 if ((workp = dca_newwork(dca)) == NULL) { 1140 dca_error(dca, "unable to allocate work"); 1141 mutex_exit(&wlp->dwl_lock); 1142 return (DDI_FAILURE); 1143 } 1144 workp->dw_wlp = wlp; 1145 dca_freework(workp); 1146 } 1147 mutex_exit(&wlp->dwl_lock); 1148 1149 for (i = 0; i < reqprealloc; i++) { 1150 dca_request_t *reqp; 1151 1152 if ((reqp = dca_newreq(dca)) == NULL) { 1153 dca_error(dca, "unable to allocate request"); 1154 return (DDI_FAILURE); 1155 } 1156 reqp->dr_dca = dca; 1157 reqp->dr_wlp = wlp; 1158 dca_freereq(reqp); 1159 } 1160 return (DDI_SUCCESS); 1161 } 1162 1163 int 1164 dca_init(dca_t *dca) 1165 { 1166 dca_worklist_t *wlp; 1167 1168 /* Initialize the private context list and the corresponding lock. */ 1169 mutex_init(&dca->dca_ctx_list_lock, NULL, MUTEX_DRIVER, NULL); 1170 dca_initq(&dca->dca_ctx_list); 1171 1172 /* 1173 * MCR1 algorithms. 1174 */ 1175 wlp = WORKLIST(dca, MCR1); 1176 (void) sprintf(wlp->dwl_name, "dca%d:mcr1", 1177 ddi_get_instance(dca->dca_dip)); 1178 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY, 1179 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1180 "mcr1_lowater", MCR1LOWATER); 1181 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY, 1182 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1183 "mcr1_hiwater", MCR1HIWATER); 1184 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY, 1185 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1186 "mcr1_maxreqs", MCR1MAXREQS), MAXREQSPERMCR); 1187 wlp->dwl_dca = dca; 1188 wlp->dwl_mcr = MCR1; 1189 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) { 1190 return (DDI_FAILURE); 1191 } 1192 1193 /* 1194 * MCR2 algorithms. 1195 */ 1196 wlp = WORKLIST(dca, MCR2); 1197 (void) sprintf(wlp->dwl_name, "dca%d:mcr2", 1198 ddi_get_instance(dca->dca_dip)); 1199 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY, 1200 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1201 "mcr2_lowater", MCR2LOWATER); 1202 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY, 1203 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1204 "mcr2_hiwater", MCR2HIWATER); 1205 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY, 1206 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1207 "mcr2_maxreqs", MCR2MAXREQS), MAXREQSPERMCR); 1208 wlp->dwl_dca = dca; 1209 wlp->dwl_mcr = MCR2; 1210 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) { 1211 return (DDI_FAILURE); 1212 } 1213 return (DDI_SUCCESS); 1214 } 1215 1216 /* 1217 * Uninitialize worklists. This routine should only be called when no 1218 * active jobs (hence DMA mappings) exist. One way to ensure this is 1219 * to unregister from kCF before calling this routine. (This is done 1220 * e.g. in detach(9e).) 1221 */ 1222 void 1223 dca_uninit(dca_t *dca) 1224 { 1225 int mcr; 1226 1227 mutex_destroy(&dca->dca_ctx_list_lock); 1228 1229 for (mcr = MCR1; mcr <= MCR2; mcr++) { 1230 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1231 dca_work_t *workp; 1232 dca_request_t *reqp; 1233 1234 if (dca->dca_regs_handle == NULL) { 1235 continue; 1236 } 1237 1238 mutex_enter(&wlp->dwl_lock); 1239 while ((workp = dca_getwork(dca, mcr)) != NULL) { 1240 dca_destroywork(workp); 1241 } 1242 mutex_exit(&wlp->dwl_lock); 1243 while ((reqp = dca_getreq(dca, mcr, 0)) != NULL) { 1244 dca_destroyreq(reqp); 1245 } 1246 1247 mutex_destroy(&wlp->dwl_lock); 1248 mutex_destroy(&wlp->dwl_freereqslock); 1249 mutex_destroy(&wlp->dwl_freelock); 1250 cv_destroy(&wlp->dwl_cv); 1251 wlp->dwl_prov = NULL; 1252 } 1253 } 1254 1255 static void 1256 dca_enlist2(dca_listnode_t *q, dca_listnode_t *node, kmutex_t *lock) 1257 { 1258 if (!q || !node) 1259 return; 1260 1261 mutex_enter(lock); 1262 node->dl_next2 = q; 1263 node->dl_prev2 = q->dl_prev2; 1264 node->dl_next2->dl_prev2 = node; 1265 node->dl_prev2->dl_next2 = node; 1266 mutex_exit(lock); 1267 } 1268 1269 static void 1270 dca_rmlist2(dca_listnode_t *node, kmutex_t *lock) 1271 { 1272 if (!node) 1273 return; 1274 1275 mutex_enter(lock); 1276 node->dl_next2->dl_prev2 = node->dl_prev2; 1277 node->dl_prev2->dl_next2 = node->dl_next2; 1278 node->dl_next2 = NULL; 1279 node->dl_prev2 = NULL; 1280 mutex_exit(lock); 1281 } 1282 1283 static dca_listnode_t * 1284 dca_delist2(dca_listnode_t *q, kmutex_t *lock) 1285 { 1286 dca_listnode_t *node; 1287 1288 mutex_enter(lock); 1289 if ((node = q->dl_next2) == q) { 1290 mutex_exit(lock); 1291 return (NULL); 1292 } 1293 1294 node->dl_next2->dl_prev2 = node->dl_prev2; 1295 node->dl_prev2->dl_next2 = node->dl_next2; 1296 node->dl_next2 = NULL; 1297 node->dl_prev2 = NULL; 1298 mutex_exit(lock); 1299 1300 return (node); 1301 } 1302 1303 void 1304 dca_initq(dca_listnode_t *q) 1305 { 1306 q->dl_next = q; 1307 q->dl_prev = q; 1308 q->dl_next2 = q; 1309 q->dl_prev2 = q; 1310 } 1311 1312 void 1313 dca_enqueue(dca_listnode_t *q, dca_listnode_t *node) 1314 { 1315 /* 1316 * Enqueue submits at the "tail" of the list, i.e. just 1317 * behind the sentinel. 1318 */ 1319 node->dl_next = q; 1320 node->dl_prev = q->dl_prev; 1321 node->dl_next->dl_prev = node; 1322 node->dl_prev->dl_next = node; 1323 } 1324 1325 void 1326 dca_rmqueue(dca_listnode_t *node) 1327 { 1328 node->dl_next->dl_prev = node->dl_prev; 1329 node->dl_prev->dl_next = node->dl_next; 1330 node->dl_next = NULL; 1331 node->dl_prev = NULL; 1332 } 1333 1334 dca_listnode_t * 1335 dca_dequeue(dca_listnode_t *q) 1336 { 1337 dca_listnode_t *node; 1338 /* 1339 * Dequeue takes from the "head" of the list, i.e. just after 1340 * the sentinel. 1341 */ 1342 if ((node = q->dl_next) == q) { 1343 /* queue is empty */ 1344 return (NULL); 1345 } 1346 dca_rmqueue(node); 1347 return (node); 1348 } 1349 1350 /* this is the opposite of dequeue, it takes things off in LIFO order */ 1351 dca_listnode_t * 1352 dca_unqueue(dca_listnode_t *q) 1353 { 1354 dca_listnode_t *node; 1355 /* 1356 * unqueue takes from the "tail" of the list, i.e. just before 1357 * the sentinel. 1358 */ 1359 if ((node = q->dl_prev) == q) { 1360 /* queue is empty */ 1361 return (NULL); 1362 } 1363 dca_rmqueue(node); 1364 return (node); 1365 } 1366 1367 dca_listnode_t * 1368 dca_peekqueue(dca_listnode_t *q) 1369 { 1370 dca_listnode_t *node; 1371 1372 if ((node = q->dl_next) == q) { 1373 return (NULL); 1374 } else { 1375 return (node); 1376 } 1377 } 1378 1379 /* 1380 * Interrupt service routine. 1381 */ 1382 uint_t 1383 dca_intr(char *arg) 1384 { 1385 dca_t *dca = (dca_t *)arg; 1386 uint32_t status; 1387 1388 mutex_enter(&dca->dca_intrlock); 1389 status = GETCSR(dca, CSR_DMASTAT); 1390 PUTCSR(dca, CSR_DMASTAT, status & DMASTAT_INTERRUPTS); 1391 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1392 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 1393 mutex_exit(&dca->dca_intrlock); 1394 return ((uint_t)DDI_FAILURE); 1395 } 1396 1397 DBG(dca, DINTR, "interrupted, status = 0x%x!", status); 1398 1399 if ((status & DMASTAT_INTERRUPTS) == 0) { 1400 /* increment spurious interrupt kstat */ 1401 if (dca->dca_intrstats) { 1402 KIOIP(dca)->intrs[KSTAT_INTR_SPURIOUS]++; 1403 } 1404 mutex_exit(&dca->dca_intrlock); 1405 return (DDI_INTR_UNCLAIMED); 1406 } 1407 1408 if (dca->dca_intrstats) { 1409 KIOIP(dca)->intrs[KSTAT_INTR_HARD]++; 1410 } 1411 if (status & DMASTAT_MCR1INT) { 1412 DBG(dca, DINTR, "MCR1 interrupted"); 1413 mutex_enter(&(WORKLIST(dca, MCR1)->dwl_lock)); 1414 dca_schedule(dca, MCR1); 1415 dca_reclaim(dca, MCR1); 1416 mutex_exit(&(WORKLIST(dca, MCR1)->dwl_lock)); 1417 } 1418 1419 if (status & DMASTAT_MCR2INT) { 1420 DBG(dca, DINTR, "MCR2 interrupted"); 1421 mutex_enter(&(WORKLIST(dca, MCR2)->dwl_lock)); 1422 dca_schedule(dca, MCR2); 1423 dca_reclaim(dca, MCR2); 1424 mutex_exit(&(WORKLIST(dca, MCR2)->dwl_lock)); 1425 } 1426 1427 if (status & DMASTAT_ERRINT) { 1428 uint32_t erraddr; 1429 erraddr = GETCSR(dca, CSR_DMAEA); 1430 mutex_exit(&dca->dca_intrlock); 1431 1432 /* 1433 * bit 1 of the error address indicates failure during 1434 * read if set, during write otherwise. 1435 */ 1436 dca_failure(dca, DDI_DEVICE_FAULT, 1437 DCA_FM_ECLASS_HW_DEVICE, dca_ena(0), CRYPTO_DEVICE_ERROR, 1438 "DMA master access error %s address 0x%x", 1439 erraddr & 0x1 ? "reading" : "writing", erraddr & ~1); 1440 return (DDI_INTR_CLAIMED); 1441 } 1442 1443 mutex_exit(&dca->dca_intrlock); 1444 1445 return (DDI_INTR_CLAIMED); 1446 } 1447 1448 /* 1449 * Reverse a string of bytes from s1 into s2. The reversal happens 1450 * from the tail of s1. If len1 < len2, then null bytes will be 1451 * padded to the end of s2. If len2 < len1, then (presumably null) 1452 * bytes will be dropped from the start of s1. 1453 * 1454 * The rationale here is that when s1 (source) is shorter, then we 1455 * are reversing from big-endian ordering, into device ordering, and 1456 * want to add some extra nulls to the tail (MSB) side of the device. 1457 * 1458 * Similarly, when s2 (dest) is shorter, then we are truncating what 1459 * are presumably null MSB bits from the device. 1460 * 1461 * There is an expectation when reversing from the device back into 1462 * big-endian, that the number of bytes to reverse and the target size 1463 * will match, and no truncation or padding occurs. 1464 */ 1465 void 1466 dca_reverse(void *s1, void *s2, int len1, int len2) 1467 { 1468 caddr_t src, dst; 1469 1470 if (len1 == 0) { 1471 if (len2) { 1472 bzero(s2, len2); 1473 } 1474 return; 1475 } 1476 src = (caddr_t)s1 + len1 - 1; 1477 dst = s2; 1478 while ((src >= (caddr_t)s1) && (len2)) { 1479 *dst++ = *src--; 1480 len2--; 1481 } 1482 while (len2 > 0) { 1483 *dst++ = 0; 1484 len2--; 1485 } 1486 } 1487 1488 uint16_t 1489 dca_padfull(int num) 1490 { 1491 if (num <= 512) { 1492 return (BITS2BYTES(512)); 1493 } 1494 if (num <= 768) { 1495 return (BITS2BYTES(768)); 1496 } 1497 if (num <= 1024) { 1498 return (BITS2BYTES(1024)); 1499 } 1500 if (num <= 1536) { 1501 return (BITS2BYTES(1536)); 1502 } 1503 if (num <= 2048) { 1504 return (BITS2BYTES(2048)); 1505 } 1506 return (0); 1507 } 1508 1509 uint16_t 1510 dca_padhalf(int num) 1511 { 1512 if (num <= 256) { 1513 return (BITS2BYTES(256)); 1514 } 1515 if (num <= 384) { 1516 return (BITS2BYTES(384)); 1517 } 1518 if (num <= 512) { 1519 return (BITS2BYTES(512)); 1520 } 1521 if (num <= 768) { 1522 return (BITS2BYTES(768)); 1523 } 1524 if (num <= 1024) { 1525 return (BITS2BYTES(1024)); 1526 } 1527 return (0); 1528 } 1529 1530 dca_work_t * 1531 dca_newwork(dca_t *dca) 1532 { 1533 dca_work_t *workp; 1534 size_t size; 1535 ddi_dma_cookie_t c; 1536 unsigned nc; 1537 int rv; 1538 1539 workp = kmem_zalloc(sizeof (dca_work_t), KM_SLEEP); 1540 1541 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1542 DDI_DMA_SLEEP, NULL, &workp->dw_mcr_dmah); 1543 if (rv != 0) { 1544 dca_error(dca, "unable to alloc MCR DMA handle"); 1545 dca_destroywork(workp); 1546 return (NULL); 1547 } 1548 1549 rv = ddi_dma_mem_alloc(workp->dw_mcr_dmah, 1550 ROUNDUP(MCR_SIZE, dca->dca_pagesize), 1551 &dca_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 1552 &workp->dw_mcr_kaddr, &size, &workp->dw_mcr_acch); 1553 if (rv != 0) { 1554 dca_error(dca, "unable to alloc MCR DMA memory"); 1555 dca_destroywork(workp); 1556 return (NULL); 1557 } 1558 1559 rv = ddi_dma_addr_bind_handle(workp->dw_mcr_dmah, NULL, 1560 workp->dw_mcr_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR, 1561 DDI_DMA_SLEEP, NULL, &c, &nc); 1562 if (rv != DDI_DMA_MAPPED) { 1563 dca_error(dca, "unable to map MCR DMA memory"); 1564 dca_destroywork(workp); 1565 return (NULL); 1566 } 1567 1568 workp->dw_mcr_paddr = c.dmac_address; 1569 return (workp); 1570 } 1571 1572 void 1573 dca_destroywork(dca_work_t *workp) 1574 { 1575 if (workp->dw_mcr_paddr) { 1576 (void) ddi_dma_unbind_handle(workp->dw_mcr_dmah); 1577 } 1578 if (workp->dw_mcr_acch) { 1579 ddi_dma_mem_free(&workp->dw_mcr_acch); 1580 } 1581 if (workp->dw_mcr_dmah) { 1582 ddi_dma_free_handle(&workp->dw_mcr_dmah); 1583 } 1584 kmem_free(workp, sizeof (dca_work_t)); 1585 } 1586 1587 dca_request_t * 1588 dca_newreq(dca_t *dca) 1589 { 1590 dca_request_t *reqp; 1591 size_t size; 1592 ddi_dma_cookie_t c; 1593 unsigned nc; 1594 int rv; 1595 int n_chain = 0; 1596 1597 size = (DESC_SIZE * MAXFRAGS) + CTX_MAXLENGTH; 1598 1599 reqp = kmem_zalloc(sizeof (dca_request_t), KM_SLEEP); 1600 1601 reqp->dr_dca = dca; 1602 1603 /* 1604 * Setup the DMA region for the context and descriptors. 1605 */ 1606 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, DDI_DMA_SLEEP, 1607 NULL, &reqp->dr_ctx_dmah); 1608 if (rv != DDI_SUCCESS) { 1609 dca_error(dca, "failure allocating request DMA handle"); 1610 dca_destroyreq(reqp); 1611 return (NULL); 1612 } 1613 1614 /* for driver hardening, allocate in whole pages */ 1615 rv = ddi_dma_mem_alloc(reqp->dr_ctx_dmah, 1616 ROUNDUP(size, dca->dca_pagesize), &dca_devattr, DDI_DMA_CONSISTENT, 1617 DDI_DMA_SLEEP, NULL, &reqp->dr_ctx_kaddr, &size, 1618 &reqp->dr_ctx_acch); 1619 if (rv != DDI_SUCCESS) { 1620 dca_error(dca, "unable to alloc request DMA memory"); 1621 dca_destroyreq(reqp); 1622 return (NULL); 1623 } 1624 1625 rv = ddi_dma_addr_bind_handle(reqp->dr_ctx_dmah, NULL, 1626 reqp->dr_ctx_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_WRITE, 1627 DDI_DMA_SLEEP, 0, &c, &nc); 1628 if (rv != DDI_DMA_MAPPED) { 1629 dca_error(dca, "failed binding request DMA handle"); 1630 dca_destroyreq(reqp); 1631 return (NULL); 1632 } 1633 reqp->dr_ctx_paddr = c.dmac_address; 1634 1635 reqp->dr_dma_size = size; 1636 1637 /* 1638 * Set up the dma for our scratch/shared buffers. 1639 */ 1640 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1641 DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_dmah); 1642 if (rv != DDI_SUCCESS) { 1643 dca_error(dca, "failure allocating ibuf DMA handle"); 1644 dca_destroyreq(reqp); 1645 return (NULL); 1646 } 1647 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1648 DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_dmah); 1649 if (rv != DDI_SUCCESS) { 1650 dca_error(dca, "failure allocating obuf DMA handle"); 1651 dca_destroyreq(reqp); 1652 return (NULL); 1653 } 1654 1655 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1656 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_in_dmah); 1657 if (rv != DDI_SUCCESS) { 1658 dca_error(dca, "failure allocating chain_in DMA handle"); 1659 dca_destroyreq(reqp); 1660 return (NULL); 1661 } 1662 1663 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1664 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_out_dmah); 1665 if (rv != DDI_SUCCESS) { 1666 dca_error(dca, "failure allocating chain_out DMA handle"); 1667 dca_destroyreq(reqp); 1668 return (NULL); 1669 } 1670 1671 /* 1672 * for driver hardening, allocate in whole pages. 1673 */ 1674 size = ROUNDUP(MAXPACKET, dca->dca_pagesize); 1675 #if defined(i386) || defined(__i386) 1676 /* 1677 * Use kmem_alloc instead of ddi_dma_mem_alloc here since the latter 1678 * may fail on x86 platform if a physically contiguous memory chunk 1679 * cannot be found. From initial testing, we did not see performance 1680 * degradation as seen on Sparc. 1681 */ 1682 if ((reqp->dr_ibuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) { 1683 dca_error(dca, "unable to alloc request ibuf memory"); 1684 dca_destroyreq(reqp); 1685 return (NULL); 1686 } 1687 if ((reqp->dr_obuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) { 1688 dca_error(dca, "unable to alloc request obuf memory"); 1689 dca_destroyreq(reqp); 1690 return (NULL); 1691 } 1692 #else 1693 /* 1694 * We could kmem_alloc for Sparc too. However, it gives worse 1695 * performance when transferring more than one page data. For example, 1696 * using 4 threads and 12032 byte data and 3DES on 900MHZ Sparc system, 1697 * kmem_alloc uses 80% CPU and ddi_dma_mem_alloc uses 50% CPU for 1698 * the same throughput. 1699 */ 1700 rv = ddi_dma_mem_alloc(reqp->dr_ibuf_dmah, 1701 size, &dca_bufattr, 1702 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_kaddr, 1703 &size, &reqp->dr_ibuf_acch); 1704 if (rv != DDI_SUCCESS) { 1705 dca_error(dca, "unable to alloc request DMA memory"); 1706 dca_destroyreq(reqp); 1707 return (NULL); 1708 } 1709 1710 rv = ddi_dma_mem_alloc(reqp->dr_obuf_dmah, 1711 size, &dca_bufattr, 1712 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_kaddr, 1713 &size, &reqp->dr_obuf_acch); 1714 if (rv != DDI_SUCCESS) { 1715 dca_error(dca, "unable to alloc request DMA memory"); 1716 dca_destroyreq(reqp); 1717 return (NULL); 1718 } 1719 #endif 1720 1721 /* Skip the used portion in the context page */ 1722 reqp->dr_offset = CTX_MAXLENGTH; 1723 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset, 1724 reqp->dr_ibuf_kaddr, reqp->dr_ibuf_dmah, 1725 DDI_DMA_WRITE | DDI_DMA_STREAMING, 1726 &reqp->dr_ibuf_head, &n_chain)) != DDI_SUCCESS) { 1727 (void) dca_destroyreq(reqp); 1728 return (NULL); 1729 } 1730 reqp->dr_ibuf_paddr = reqp->dr_ibuf_head.dc_buffer_paddr; 1731 /* Skip the space used by the input buffer */ 1732 reqp->dr_offset += DESC_SIZE * n_chain; 1733 1734 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset, 1735 reqp->dr_obuf_kaddr, reqp->dr_obuf_dmah, 1736 DDI_DMA_READ | DDI_DMA_STREAMING, 1737 &reqp->dr_obuf_head, &n_chain)) != DDI_SUCCESS) { 1738 (void) dca_destroyreq(reqp); 1739 return (NULL); 1740 } 1741 reqp->dr_obuf_paddr = reqp->dr_obuf_head.dc_buffer_paddr; 1742 /* Skip the space used by the output buffer */ 1743 reqp->dr_offset += DESC_SIZE * n_chain; 1744 1745 DBG(dca, DCHATTY, "CTX is 0x%p, phys 0x%x, len %d", 1746 reqp->dr_ctx_kaddr, reqp->dr_ctx_paddr, CTX_MAXLENGTH); 1747 return (reqp); 1748 } 1749 1750 void 1751 dca_destroyreq(dca_request_t *reqp) 1752 { 1753 #if defined(i386) || defined(__i386) 1754 dca_t *dca = reqp->dr_dca; 1755 size_t size = ROUNDUP(MAXPACKET, dca->dca_pagesize); 1756 #endif 1757 1758 /* 1759 * Clean up DMA for the context structure. 1760 */ 1761 if (reqp->dr_ctx_paddr) { 1762 (void) ddi_dma_unbind_handle(reqp->dr_ctx_dmah); 1763 } 1764 1765 if (reqp->dr_ctx_acch) { 1766 ddi_dma_mem_free(&reqp->dr_ctx_acch); 1767 } 1768 1769 if (reqp->dr_ctx_dmah) { 1770 ddi_dma_free_handle(&reqp->dr_ctx_dmah); 1771 } 1772 1773 /* 1774 * Clean up DMA for the scratch buffer. 1775 */ 1776 #if defined(i386) || defined(__i386) 1777 if (reqp->dr_ibuf_dmah) { 1778 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah); 1779 ddi_dma_free_handle(&reqp->dr_ibuf_dmah); 1780 } 1781 if (reqp->dr_obuf_dmah) { 1782 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah); 1783 ddi_dma_free_handle(&reqp->dr_obuf_dmah); 1784 } 1785 1786 kmem_free(reqp->dr_ibuf_kaddr, size); 1787 kmem_free(reqp->dr_obuf_kaddr, size); 1788 #else 1789 if (reqp->dr_ibuf_paddr) { 1790 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah); 1791 } 1792 if (reqp->dr_obuf_paddr) { 1793 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah); 1794 } 1795 1796 if (reqp->dr_ibuf_acch) { 1797 ddi_dma_mem_free(&reqp->dr_ibuf_acch); 1798 } 1799 if (reqp->dr_obuf_acch) { 1800 ddi_dma_mem_free(&reqp->dr_obuf_acch); 1801 } 1802 1803 if (reqp->dr_ibuf_dmah) { 1804 ddi_dma_free_handle(&reqp->dr_ibuf_dmah); 1805 } 1806 if (reqp->dr_obuf_dmah) { 1807 ddi_dma_free_handle(&reqp->dr_obuf_dmah); 1808 } 1809 #endif 1810 /* 1811 * These two DMA handles should have been unbinded in 1812 * dca_unbindchains() function 1813 */ 1814 if (reqp->dr_chain_in_dmah) { 1815 ddi_dma_free_handle(&reqp->dr_chain_in_dmah); 1816 } 1817 if (reqp->dr_chain_out_dmah) { 1818 ddi_dma_free_handle(&reqp->dr_chain_out_dmah); 1819 } 1820 1821 kmem_free(reqp, sizeof (dca_request_t)); 1822 } 1823 1824 dca_work_t * 1825 dca_getwork(dca_t *dca, int mcr) 1826 { 1827 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1828 dca_work_t *workp; 1829 1830 mutex_enter(&wlp->dwl_freelock); 1831 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_freework); 1832 mutex_exit(&wlp->dwl_freelock); 1833 if (workp) { 1834 int nreqs; 1835 bzero(workp->dw_mcr_kaddr, 8); 1836 1837 /* clear out old requests */ 1838 for (nreqs = 0; nreqs < MAXREQSPERMCR; nreqs++) { 1839 workp->dw_reqs[nreqs] = NULL; 1840 } 1841 } 1842 return (workp); 1843 } 1844 1845 void 1846 dca_freework(dca_work_t *workp) 1847 { 1848 mutex_enter(&workp->dw_wlp->dwl_freelock); 1849 dca_enqueue(&workp->dw_wlp->dwl_freework, (dca_listnode_t *)workp); 1850 mutex_exit(&workp->dw_wlp->dwl_freelock); 1851 } 1852 1853 dca_request_t * 1854 dca_getreq(dca_t *dca, int mcr, int tryhard) 1855 { 1856 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1857 dca_request_t *reqp; 1858 1859 mutex_enter(&wlp->dwl_freereqslock); 1860 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_freereqs); 1861 mutex_exit(&wlp->dwl_freereqslock); 1862 if (reqp) { 1863 reqp->dr_flags = 0; 1864 reqp->dr_callback = NULL; 1865 } else if (tryhard) { 1866 /* 1867 * failed to get a free one, try an allocation, the hard way. 1868 * XXX: Kstat desired here. 1869 */ 1870 if ((reqp = dca_newreq(dca)) != NULL) { 1871 reqp->dr_wlp = wlp; 1872 reqp->dr_dca = dca; 1873 reqp->dr_flags = 0; 1874 reqp->dr_callback = NULL; 1875 } 1876 } 1877 return (reqp); 1878 } 1879 1880 void 1881 dca_freereq(dca_request_t *reqp) 1882 { 1883 reqp->dr_kcf_req = NULL; 1884 if (!(reqp->dr_flags & DR_NOCACHE)) { 1885 mutex_enter(&reqp->dr_wlp->dwl_freereqslock); 1886 dca_enqueue(&reqp->dr_wlp->dwl_freereqs, 1887 (dca_listnode_t *)reqp); 1888 mutex_exit(&reqp->dr_wlp->dwl_freereqslock); 1889 } 1890 } 1891 1892 /* 1893 * Binds user buffers to DMA handles dynamically. On Sparc, a user buffer 1894 * is mapped to a single physical address. On x86, a user buffer is mapped 1895 * to multiple physical addresses. These physical addresses are chained 1896 * using the method specified in Broadcom BCM5820 specification. 1897 */ 1898 int 1899 dca_bindchains(dca_request_t *reqp, size_t incnt, size_t outcnt) 1900 { 1901 int rv; 1902 caddr_t kaddr; 1903 uint_t flags; 1904 int n_chain = 0; 1905 1906 if (reqp->dr_flags & DR_INPLACE) { 1907 flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT; 1908 } else { 1909 flags = DDI_DMA_WRITE | DDI_DMA_STREAMING; 1910 } 1911 1912 /* first the input */ 1913 if (incnt) { 1914 if ((kaddr = dca_bufdaddr(reqp->dr_in)) == NULL) { 1915 DBG(NULL, DWARN, "unrecognised crypto data format"); 1916 return (DDI_FAILURE); 1917 } 1918 if ((rv = dca_bindchains_one(reqp, incnt, reqp->dr_offset, 1919 kaddr, reqp->dr_chain_in_dmah, flags, 1920 &reqp->dr_chain_in_head, &n_chain)) != DDI_SUCCESS) { 1921 (void) dca_unbindchains(reqp); 1922 return (rv); 1923 } 1924 1925 /* 1926 * The offset and length are altered by the calling routine 1927 * reqp->dr_in->cd_offset += incnt; 1928 * reqp->dr_in->cd_length -= incnt; 1929 */ 1930 /* Save the first one in the chain for MCR */ 1931 reqp->dr_in_paddr = reqp->dr_chain_in_head.dc_buffer_paddr; 1932 reqp->dr_in_next = reqp->dr_chain_in_head.dc_next_paddr; 1933 reqp->dr_in_len = reqp->dr_chain_in_head.dc_buffer_length; 1934 } else { 1935 reqp->dr_in_paddr = NULL; 1936 reqp->dr_in_next = 0; 1937 reqp->dr_in_len = 0; 1938 } 1939 1940 if (reqp->dr_flags & DR_INPLACE) { 1941 reqp->dr_out_paddr = reqp->dr_in_paddr; 1942 reqp->dr_out_len = reqp->dr_in_len; 1943 reqp->dr_out_next = reqp->dr_in_next; 1944 return (DDI_SUCCESS); 1945 } 1946 1947 /* then the output */ 1948 if (outcnt) { 1949 flags = DDI_DMA_READ | DDI_DMA_STREAMING; 1950 if ((kaddr = dca_bufdaddr_out(reqp->dr_out)) == NULL) { 1951 DBG(NULL, DWARN, "unrecognised crypto data format"); 1952 (void) dca_unbindchains(reqp); 1953 return (DDI_FAILURE); 1954 } 1955 rv = dca_bindchains_one(reqp, outcnt, reqp->dr_offset + 1956 n_chain * DESC_SIZE, kaddr, reqp->dr_chain_out_dmah, 1957 flags, &reqp->dr_chain_out_head, &n_chain); 1958 if (rv != DDI_SUCCESS) { 1959 (void) dca_unbindchains(reqp); 1960 return (DDI_FAILURE); 1961 } 1962 1963 /* Save the first one in the chain for MCR */ 1964 reqp->dr_out_paddr = reqp->dr_chain_out_head.dc_buffer_paddr; 1965 reqp->dr_out_next = reqp->dr_chain_out_head.dc_next_paddr; 1966 reqp->dr_out_len = reqp->dr_chain_out_head.dc_buffer_length; 1967 } else { 1968 reqp->dr_out_paddr = NULL; 1969 reqp->dr_out_next = 0; 1970 reqp->dr_out_len = 0; 1971 } 1972 1973 return (DDI_SUCCESS); 1974 } 1975 1976 /* 1977 * Unbind the user buffers from the DMA handles. 1978 */ 1979 int 1980 dca_unbindchains(dca_request_t *reqp) 1981 { 1982 int rv = DDI_SUCCESS; 1983 int rv1 = DDI_SUCCESS; 1984 1985 /* Clear the input chain */ 1986 if (reqp->dr_chain_in_head.dc_buffer_paddr != NULL) { 1987 (void) ddi_dma_unbind_handle(reqp->dr_chain_in_dmah); 1988 reqp->dr_chain_in_head.dc_buffer_paddr = 0; 1989 } 1990 1991 if (reqp->dr_flags & DR_INPLACE) { 1992 return (rv); 1993 } 1994 1995 /* Clear the output chain */ 1996 if (reqp->dr_chain_out_head.dc_buffer_paddr != NULL) { 1997 (void) ddi_dma_unbind_handle(reqp->dr_chain_out_dmah); 1998 reqp->dr_chain_out_head.dc_buffer_paddr = 0; 1999 } 2000 2001 return ((rv != DDI_SUCCESS)? rv : rv1); 2002 } 2003 2004 /* 2005 * Build either input chain or output chain. It is single-item chain for Sparc, 2006 * and possible mutiple-item chain for x86. 2007 */ 2008 static int 2009 dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset, 2010 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags, 2011 dca_chain_t *head, int *n_chain) 2012 { 2013 ddi_dma_cookie_t c; 2014 uint_t nc; 2015 int rv; 2016 caddr_t chain_kaddr_pre; 2017 caddr_t chain_kaddr; 2018 uint32_t chain_paddr; 2019 int i; 2020 2021 /* Advance past the context structure to the starting address */ 2022 chain_paddr = reqp->dr_ctx_paddr + dr_offset; 2023 chain_kaddr = reqp->dr_ctx_kaddr + dr_offset; 2024 2025 /* 2026 * Bind the kernel address to the DMA handle. On x86, the actual 2027 * buffer is mapped into multiple physical addresses. On Sparc, 2028 * the actual buffer is mapped into a single address. 2029 */ 2030 rv = ddi_dma_addr_bind_handle(handle, 2031 NULL, kaddr, cnt, flags, DDI_DMA_DONTWAIT, NULL, &c, &nc); 2032 if (rv != DDI_DMA_MAPPED) { 2033 return (DDI_FAILURE); 2034 } 2035 2036 (void) ddi_dma_sync(handle, 0, cnt, DDI_DMA_SYNC_FORDEV); 2037 if ((rv = dca_check_dma_handle(reqp->dr_dca, handle, 2038 DCA_FM_ECLASS_NONE)) != DDI_SUCCESS) { 2039 reqp->destroy = TRUE; 2040 return (rv); 2041 } 2042 2043 *n_chain = nc; 2044 2045 /* Setup the data buffer chain for DMA transfer */ 2046 chain_kaddr_pre = NULL; 2047 head->dc_buffer_paddr = 0; 2048 head->dc_next_paddr = 0; 2049 head->dc_buffer_length = 0; 2050 for (i = 0; i < nc; i++) { 2051 /* PIO */ 2052 PUTDESC32(reqp, chain_kaddr, DESC_BUFADDR, c.dmac_address); 2053 PUTDESC16(reqp, chain_kaddr, DESC_RSVD, 0); 2054 PUTDESC16(reqp, chain_kaddr, DESC_LENGTH, c.dmac_size); 2055 2056 /* Remember the head of the chain */ 2057 if (head->dc_buffer_paddr == 0) { 2058 head->dc_buffer_paddr = c.dmac_address; 2059 head->dc_buffer_length = c.dmac_size; 2060 } 2061 2062 /* Link to the previous one if one exists */ 2063 if (chain_kaddr_pre) { 2064 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 2065 chain_paddr); 2066 if (head->dc_next_paddr == 0) 2067 head->dc_next_paddr = chain_paddr; 2068 } 2069 chain_kaddr_pre = chain_kaddr; 2070 2071 /* Maintain pointers */ 2072 chain_paddr += DESC_SIZE; 2073 chain_kaddr += DESC_SIZE; 2074 2075 /* Retrieve the next cookie if there is one */ 2076 if (i < nc-1) 2077 ddi_dma_nextcookie(handle, &c); 2078 } 2079 2080 /* Set the next pointer in the last entry to NULL */ 2081 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 0); 2082 2083 return (DDI_SUCCESS); 2084 } 2085 2086 /* 2087 * Schedule some work. 2088 */ 2089 int 2090 dca_start(dca_t *dca, dca_request_t *reqp, int mcr, int dosched) 2091 { 2092 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2093 2094 mutex_enter(&wlp->dwl_lock); 2095 2096 DBG(dca, DCHATTY, "req=%p, in=%p, out=%p, ctx=%p, ibuf=%p, obuf=%p", 2097 reqp, reqp->dr_in, reqp->dr_out, reqp->dr_ctx_kaddr, 2098 reqp->dr_ibuf_kaddr, reqp->dr_obuf_kaddr); 2099 DBG(dca, DCHATTY, "ctx paddr = %x, ibuf paddr = %x, obuf paddr = %x", 2100 reqp->dr_ctx_paddr, reqp->dr_ibuf_paddr, reqp->dr_obuf_paddr); 2101 /* sync out the entire context and descriptor chains */ 2102 (void) ddi_dma_sync(reqp->dr_ctx_dmah, 0, 0, DDI_DMA_SYNC_FORDEV); 2103 if (dca_check_dma_handle(dca, reqp->dr_ctx_dmah, 2104 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2105 reqp->destroy = TRUE; 2106 mutex_exit(&wlp->dwl_lock); 2107 return (CRYPTO_DEVICE_ERROR); 2108 } 2109 2110 dca_enqueue(&wlp->dwl_waitq, (dca_listnode_t *)reqp); 2111 wlp->dwl_count++; 2112 wlp->dwl_lastsubmit = ddi_get_lbolt(); 2113 reqp->dr_wlp = wlp; 2114 2115 if ((wlp->dwl_count == wlp->dwl_hiwater) && (wlp->dwl_busy == 0)) { 2116 /* we are fully loaded now, let kCF know */ 2117 2118 wlp->dwl_flowctl++; 2119 wlp->dwl_busy = 1; 2120 2121 crypto_prov_notify(wlp->dwl_prov, CRYPTO_PROVIDER_BUSY); 2122 } 2123 2124 if (dosched) { 2125 #ifdef SCHEDDELAY 2126 /* possibly wait for more work to arrive */ 2127 if (wlp->dwl_count >= wlp->dwl_reqspermcr) { 2128 dca_schedule(dca, mcr); 2129 } else if (!wlp->dwl_schedtid) { 2130 /* wait 1 msec for more work before doing it */ 2131 wlp->dwl_schedtid = timeout(dca_schedtimeout, 2132 (void *)wlp, drv_usectohz(MSEC)); 2133 } 2134 #else 2135 dca_schedule(dca, mcr); 2136 #endif 2137 } 2138 mutex_exit(&wlp->dwl_lock); 2139 2140 return (CRYPTO_QUEUED); 2141 } 2142 2143 void 2144 dca_schedule(dca_t *dca, int mcr) 2145 { 2146 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2147 int csr; 2148 int full; 2149 uint32_t status; 2150 2151 ASSERT(mutex_owned(&wlp->dwl_lock)); 2152 /* 2153 * If the card is draining or has an outstanding failure, 2154 * don't schedule any more work on it right now 2155 */ 2156 if (wlp->dwl_drain || (dca->dca_flags & DCA_FAILED)) { 2157 return; 2158 } 2159 2160 if (mcr == MCR2) { 2161 csr = CSR_MCR2; 2162 full = DMASTAT_MCR2FULL; 2163 } else { 2164 csr = CSR_MCR1; 2165 full = DMASTAT_MCR1FULL; 2166 } 2167 2168 for (;;) { 2169 dca_work_t *workp; 2170 uint32_t offset; 2171 int nreqs; 2172 2173 status = GETCSR(dca, CSR_DMASTAT); 2174 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 2175 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 2176 return; 2177 2178 if ((status & full) != 0) 2179 break; 2180 2181 #ifdef SCHEDDELAY 2182 /* if there isn't enough to do, don't bother now */ 2183 if ((wlp->dwl_count < wlp->dwl_reqspermcr) && 2184 (ddi_get_lbolt() < (wlp->dwl_lastsubmit + 2185 drv_usectohz(MSEC)))) { 2186 /* wait a bit longer... */ 2187 if (wlp->dwl_schedtid == 0) { 2188 wlp->dwl_schedtid = timeout(dca_schedtimeout, 2189 (void *)wlp, drv_usectohz(MSEC)); 2190 } 2191 return; 2192 } 2193 #endif 2194 2195 /* grab a work structure */ 2196 workp = dca_getwork(dca, mcr); 2197 2198 if (workp == NULL) { 2199 /* 2200 * There must be work ready to be reclaimed, 2201 * in this case, since the chip can only hold 2202 * less work outstanding than there are total. 2203 */ 2204 dca_reclaim(dca, mcr); 2205 continue; 2206 } 2207 2208 nreqs = 0; 2209 offset = MCR_CTXADDR; 2210 2211 while (nreqs < wlp->dwl_reqspermcr) { 2212 dca_request_t *reqp; 2213 2214 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_waitq); 2215 if (reqp == NULL) { 2216 /* nothing left to process */ 2217 break; 2218 } 2219 /* 2220 * Update flow control. 2221 */ 2222 wlp->dwl_count--; 2223 if ((wlp->dwl_count == wlp->dwl_lowater) && 2224 (wlp->dwl_busy)) { 2225 wlp->dwl_busy = 0; 2226 crypto_prov_notify(wlp->dwl_prov, 2227 CRYPTO_PROVIDER_READY); 2228 } 2229 2230 /* 2231 * Context address. 2232 */ 2233 PUTMCR32(workp, offset, reqp->dr_ctx_paddr); 2234 offset += 4; 2235 2236 /* 2237 * Input chain. 2238 */ 2239 /* input buffer address */ 2240 PUTMCR32(workp, offset, reqp->dr_in_paddr); 2241 offset += 4; 2242 /* next input buffer entry */ 2243 PUTMCR32(workp, offset, reqp->dr_in_next); 2244 offset += 4; 2245 /* input buffer length */ 2246 PUTMCR16(workp, offset, reqp->dr_in_len); 2247 offset += 2; 2248 /* zero the reserved field */ 2249 PUTMCR16(workp, offset, 0); 2250 offset += 2; 2251 2252 /* 2253 * Overall length. 2254 */ 2255 /* reserved field */ 2256 PUTMCR16(workp, offset, 0); 2257 offset += 2; 2258 /* total packet length */ 2259 PUTMCR16(workp, offset, reqp->dr_pkt_length); 2260 offset += 2; 2261 2262 /* 2263 * Output chain. 2264 */ 2265 /* output buffer address */ 2266 PUTMCR32(workp, offset, reqp->dr_out_paddr); 2267 offset += 4; 2268 /* next output buffer entry */ 2269 PUTMCR32(workp, offset, reqp->dr_out_next); 2270 offset += 4; 2271 /* output buffer length */ 2272 PUTMCR16(workp, offset, reqp->dr_out_len); 2273 offset += 2; 2274 /* zero the reserved field */ 2275 PUTMCR16(workp, offset, 0); 2276 offset += 2; 2277 2278 /* 2279 * Note submission. 2280 */ 2281 workp->dw_reqs[nreqs] = reqp; 2282 nreqs++; 2283 } 2284 2285 if (nreqs == 0) { 2286 /* nothing in the queue! */ 2287 dca_freework(workp); 2288 return; 2289 } 2290 2291 wlp->dwl_submit++; 2292 2293 PUTMCR16(workp, MCR_FLAGS, 0); 2294 PUTMCR16(workp, MCR_COUNT, nreqs); 2295 2296 DBG(dca, DCHATTY, 2297 "posting work (phys %x, virt 0x%p) (%d reqs) to MCR%d", 2298 workp->dw_mcr_paddr, workp->dw_mcr_kaddr, 2299 nreqs, mcr); 2300 2301 workp->dw_lbolt = ddi_get_lbolt(); 2302 /* Make sure MCR is synced out to device. */ 2303 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 0, 2304 DDI_DMA_SYNC_FORDEV); 2305 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah, 2306 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2307 dca_destroywork(workp); 2308 return; 2309 } 2310 2311 PUTCSR(dca, csr, workp->dw_mcr_paddr); 2312 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 2313 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2314 dca_destroywork(workp); 2315 return; 2316 } else { 2317 dca_enqueue(&wlp->dwl_runq, (dca_listnode_t *)workp); 2318 } 2319 2320 DBG(dca, DCHATTY, "posted"); 2321 } 2322 } 2323 2324 /* 2325 * Reclaim completed work, called in interrupt context. 2326 */ 2327 void 2328 dca_reclaim(dca_t *dca, int mcr) 2329 { 2330 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2331 dca_work_t *workp; 2332 ushort_t flags; 2333 int nreclaimed = 0; 2334 int i; 2335 2336 DBG(dca, DRECLAIM, "worklist = 0x%p (MCR%d)", wlp, mcr); 2337 ASSERT(mutex_owned(&wlp->dwl_lock)); 2338 /* 2339 * For each MCR in the submitted (runq), we check to see if 2340 * it has been processed. If so, then we note each individual 2341 * job in the MCR, and and do the completion processing for 2342 * each of such job. 2343 */ 2344 for (;;) { 2345 2346 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq); 2347 if (workp == NULL) { 2348 break; 2349 } 2350 2351 /* only sync the MCR flags, since that's all we need */ 2352 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 4, 2353 DDI_DMA_SYNC_FORKERNEL); 2354 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah, 2355 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2356 dca_rmqueue((dca_listnode_t *)workp); 2357 dca_destroywork(workp); 2358 return; 2359 } 2360 2361 flags = GETMCR16(workp, MCR_FLAGS); 2362 if ((flags & MCRFLAG_FINISHED) == 0) { 2363 /* chip is still working on it */ 2364 DBG(dca, DRECLAIM, 2365 "chip still working on it (MCR%d)", mcr); 2366 break; 2367 } 2368 2369 /* its really for us, so remove it from the queue */ 2370 dca_rmqueue((dca_listnode_t *)workp); 2371 2372 /* if we were draining, signal on the cv */ 2373 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) { 2374 cv_signal(&wlp->dwl_cv); 2375 } 2376 2377 /* update statistics, done under the lock */ 2378 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2379 dca_request_t *reqp = workp->dw_reqs[i]; 2380 if (reqp == NULL) { 2381 continue; 2382 } 2383 if (reqp->dr_byte_stat >= 0) { 2384 dca->dca_stats[reqp->dr_byte_stat] += 2385 reqp->dr_pkt_length; 2386 } 2387 if (reqp->dr_job_stat >= 0) { 2388 dca->dca_stats[reqp->dr_job_stat]++; 2389 } 2390 } 2391 mutex_exit(&wlp->dwl_lock); 2392 2393 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2394 dca_request_t *reqp = workp->dw_reqs[i]; 2395 2396 if (reqp == NULL) { 2397 continue; 2398 } 2399 2400 /* Do the callback. */ 2401 workp->dw_reqs[i] = NULL; 2402 dca_done(reqp, CRYPTO_SUCCESS); 2403 2404 nreclaimed++; 2405 } 2406 2407 /* now we can release the work */ 2408 dca_freework(workp); 2409 2410 mutex_enter(&wlp->dwl_lock); 2411 } 2412 DBG(dca, DRECLAIM, "reclaimed %d cmds", nreclaimed); 2413 } 2414 2415 int 2416 dca_length(crypto_data_t *cdata) 2417 { 2418 return (cdata->cd_length); 2419 } 2420 2421 /* 2422 * This is the callback function called from the interrupt when a kCF job 2423 * completes. It does some driver-specific things, and then calls the 2424 * kCF-provided callback. Finally, it cleans up the state for the work 2425 * request and drops the reference count to allow for DR. 2426 */ 2427 void 2428 dca_done(dca_request_t *reqp, int err) 2429 { 2430 uint64_t ena = 0; 2431 2432 /* unbind any chains we were using */ 2433 if (dca_unbindchains(reqp) != DDI_SUCCESS) { 2434 /* DMA failure */ 2435 ena = dca_ena(ena); 2436 dca_failure(reqp->dr_dca, DDI_DATAPATH_FAULT, 2437 DCA_FM_ECLASS_NONE, ena, CRYPTO_DEVICE_ERROR, 2438 "fault on buffer DMA handle"); 2439 if (err == CRYPTO_SUCCESS) { 2440 err = CRYPTO_DEVICE_ERROR; 2441 } 2442 } 2443 2444 if (reqp->dr_callback != NULL) { 2445 reqp->dr_callback(reqp, err); 2446 } else { 2447 dca_freereq(reqp); 2448 } 2449 } 2450 2451 /* 2452 * Call this when a failure is detected. It will reset the chip, 2453 * log a message, alert kCF, and mark jobs in the runq as failed. 2454 */ 2455 /* ARGSUSED */ 2456 void 2457 dca_failure(dca_t *dca, ddi_fault_location_t loc, dca_fma_eclass_t index, 2458 uint64_t ena, int errno, char *mess, ...) 2459 { 2460 va_list ap; 2461 char buf[256]; 2462 int mcr; 2463 char *eclass; 2464 int have_mutex; 2465 2466 va_start(ap, mess); 2467 (void) vsprintf(buf, mess, ap); 2468 va_end(ap); 2469 2470 eclass = dca_fma_eclass_string(dca->dca_model, index); 2471 2472 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) && 2473 index != DCA_FM_ECLASS_NONE) { 2474 ddi_fm_ereport_post(dca->dca_dip, eclass, ena, 2475 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 2476 FM_EREPORT_VERS0, NULL); 2477 2478 /* Report the impact of the failure to the DDI. */ 2479 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_LOST); 2480 } else { 2481 /* Just log the error string to the message log */ 2482 dca_error(dca, buf); 2483 } 2484 2485 /* 2486 * Indicate a failure (keeps schedule from running). 2487 */ 2488 dca->dca_flags |= DCA_FAILED; 2489 2490 /* 2491 * Reset the chip. This should also have as a side effect, the 2492 * disabling of all interrupts from the device. 2493 */ 2494 (void) dca_reset(dca, 1); 2495 2496 /* 2497 * Report the failure to kCF. 2498 */ 2499 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2500 if (WORKLIST(dca, mcr)->dwl_prov) { 2501 crypto_prov_notify(WORKLIST(dca, mcr)->dwl_prov, 2502 CRYPTO_PROVIDER_FAILED); 2503 } 2504 } 2505 2506 /* 2507 * Return jobs not sent to hardware back to kCF. 2508 */ 2509 dca_rejectjobs(dca); 2510 2511 /* 2512 * From this point on, no new work should be arriving, and the 2513 * chip should not be doing any active DMA. 2514 */ 2515 2516 /* 2517 * Now find all the work submitted to the device and fail 2518 * them. 2519 */ 2520 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2521 dca_worklist_t *wlp; 2522 int i; 2523 2524 wlp = WORKLIST(dca, mcr); 2525 2526 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) { 2527 continue; 2528 } 2529 for (;;) { 2530 dca_work_t *workp; 2531 2532 have_mutex = mutex_tryenter(&wlp->dwl_lock); 2533 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_runq); 2534 if (workp == NULL) { 2535 if (have_mutex) 2536 mutex_exit(&wlp->dwl_lock); 2537 break; 2538 } 2539 mutex_exit(&wlp->dwl_lock); 2540 2541 /* 2542 * Free up requests 2543 */ 2544 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2545 dca_request_t *reqp = workp->dw_reqs[i]; 2546 if (reqp) { 2547 dca_done(reqp, errno); 2548 workp->dw_reqs[i] = NULL; 2549 } 2550 } 2551 2552 mutex_enter(&wlp->dwl_lock); 2553 /* 2554 * If waiting to drain, signal on the waiter. 2555 */ 2556 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) { 2557 cv_signal(&wlp->dwl_cv); 2558 } 2559 2560 /* 2561 * Return the work and request structures to 2562 * the free pool. 2563 */ 2564 dca_freework(workp); 2565 if (have_mutex) 2566 mutex_exit(&wlp->dwl_lock); 2567 } 2568 } 2569 2570 } 2571 2572 #ifdef SCHEDDELAY 2573 /* 2574 * Reschedule worklist as needed. 2575 */ 2576 void 2577 dca_schedtimeout(void *arg) 2578 { 2579 dca_worklist_t *wlp = (dca_worklist_t *)arg; 2580 mutex_enter(&wlp->dwl_lock); 2581 wlp->dwl_schedtid = 0; 2582 dca_schedule(wlp->dwl_dca, wlp->dwl_mcr); 2583 mutex_exit(&wlp->dwl_lock); 2584 } 2585 #endif 2586 2587 /* 2588 * Check for stalled jobs. 2589 */ 2590 void 2591 dca_jobtimeout(void *arg) 2592 { 2593 int mcr; 2594 dca_t *dca = (dca_t *)arg; 2595 int hung = 0; 2596 2597 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2598 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2599 dca_work_t *workp; 2600 clock_t when; 2601 2602 mutex_enter(&wlp->dwl_lock); 2603 when = ddi_get_lbolt(); 2604 2605 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq); 2606 if (workp == NULL) { 2607 /* nothing sitting in the queue */ 2608 mutex_exit(&wlp->dwl_lock); 2609 continue; 2610 } 2611 2612 if ((when - workp->dw_lbolt) < drv_usectohz(STALETIME)) { 2613 /* request has been queued for less than STALETIME */ 2614 mutex_exit(&wlp->dwl_lock); 2615 continue; 2616 } 2617 2618 /* job has been sitting around for over 1 second, badness */ 2619 DBG(dca, DWARN, "stale job (0x%p) found in MCR%d!", workp, 2620 mcr); 2621 2622 /* put it back in the queue, until we reset the chip */ 2623 hung++; 2624 mutex_exit(&wlp->dwl_lock); 2625 } 2626 2627 if (hung) { 2628 dca_failure(dca, DDI_DEVICE_FAULT, 2629 DCA_FM_ECLASS_HW_TIMEOUT, dca_ena(0), CRYPTO_DEVICE_ERROR, 2630 "timeout processing job.)"); 2631 } 2632 2633 /* reschedule ourself */ 2634 mutex_enter(&dca->dca_intrlock); 2635 if (dca->dca_jobtid == 0) { 2636 /* timeout has been canceled, prior to DR */ 2637 mutex_exit(&dca->dca_intrlock); 2638 return; 2639 } 2640 2641 /* check again in 1 second */ 2642 dca->dca_jobtid = timeout(dca_jobtimeout, arg, 2643 drv_usectohz(SECOND)); 2644 mutex_exit(&dca->dca_intrlock); 2645 } 2646 2647 /* 2648 * This returns all jobs back to kCF. It assumes that processing 2649 * on the worklist has halted. 2650 */ 2651 void 2652 dca_rejectjobs(dca_t *dca) 2653 { 2654 int mcr; 2655 int have_mutex; 2656 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2657 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2658 dca_request_t *reqp; 2659 2660 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) { 2661 continue; 2662 } 2663 have_mutex = mutex_tryenter(&wlp->dwl_lock); 2664 for (;;) { 2665 reqp = (dca_request_t *)dca_unqueue(&wlp->dwl_waitq); 2666 if (reqp == NULL) { 2667 break; 2668 } 2669 /* update flow control */ 2670 wlp->dwl_count--; 2671 if ((wlp->dwl_count == wlp->dwl_lowater) && 2672 (wlp->dwl_busy)) { 2673 wlp->dwl_busy = 0; 2674 crypto_prov_notify(wlp->dwl_prov, 2675 CRYPTO_PROVIDER_READY); 2676 } 2677 mutex_exit(&wlp->dwl_lock); 2678 2679 (void) dca_unbindchains(reqp); 2680 reqp->dr_callback(reqp, EAGAIN); 2681 mutex_enter(&wlp->dwl_lock); 2682 } 2683 if (have_mutex) 2684 mutex_exit(&wlp->dwl_lock); 2685 } 2686 } 2687 2688 int 2689 dca_drain(dca_t *dca) 2690 { 2691 int mcr; 2692 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2693 #ifdef SCHEDDELAY 2694 timeout_id_t tid; 2695 #endif 2696 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2697 2698 mutex_enter(&wlp->dwl_lock); 2699 wlp->dwl_drain = 1; 2700 2701 /* give it up to a second to drain from the chip */ 2702 if (!QEMPTY(&wlp->dwl_runq)) { 2703 (void) cv_reltimedwait(&wlp->dwl_cv, &wlp->dwl_lock, 2704 drv_usectohz(STALETIME), TR_CLOCK_TICK); 2705 2706 if (!QEMPTY(&wlp->dwl_runq)) { 2707 dca_error(dca, "unable to drain device"); 2708 mutex_exit(&wlp->dwl_lock); 2709 dca_undrain(dca); 2710 return (EBUSY); 2711 } 2712 } 2713 2714 #ifdef SCHEDDELAY 2715 tid = wlp->dwl_schedtid; 2716 mutex_exit(&wlp->dwl_lock); 2717 2718 /* 2719 * untimeout outside the lock -- this is safe because we 2720 * have set the drain flag, so dca_schedule() will not 2721 * reschedule another timeout 2722 */ 2723 if (tid) { 2724 untimeout(tid); 2725 } 2726 #else 2727 mutex_exit(&wlp->dwl_lock); 2728 #endif 2729 } 2730 return (0); 2731 } 2732 2733 void 2734 dca_undrain(dca_t *dca) 2735 { 2736 int mcr; 2737 2738 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2739 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2740 mutex_enter(&wlp->dwl_lock); 2741 wlp->dwl_drain = 0; 2742 dca_schedule(dca, mcr); 2743 mutex_exit(&wlp->dwl_lock); 2744 } 2745 } 2746 2747 /* 2748 * Duplicate the crypto_data_t structure, but point to the original 2749 * buffers. 2750 */ 2751 int 2752 dca_dupcrypto(crypto_data_t *input, crypto_data_t *ninput) 2753 { 2754 ninput->cd_format = input->cd_format; 2755 ninput->cd_offset = input->cd_offset; 2756 ninput->cd_length = input->cd_length; 2757 ninput->cd_miscdata = input->cd_miscdata; 2758 2759 switch (input->cd_format) { 2760 case CRYPTO_DATA_RAW: 2761 ninput->cd_raw.iov_base = input->cd_raw.iov_base; 2762 ninput->cd_raw.iov_len = input->cd_raw.iov_len; 2763 break; 2764 2765 case CRYPTO_DATA_UIO: 2766 ninput->cd_uio = input->cd_uio; 2767 break; 2768 2769 case CRYPTO_DATA_MBLK: 2770 ninput->cd_mp = input->cd_mp; 2771 break; 2772 2773 default: 2774 DBG(NULL, DWARN, 2775 "dca_dupcrypto: unrecognised crypto data format"); 2776 return (CRYPTO_FAILED); 2777 } 2778 2779 return (CRYPTO_SUCCESS); 2780 } 2781 2782 /* 2783 * Performs validation checks on the input and output data structures. 2784 */ 2785 int 2786 dca_verifyio(crypto_data_t *input, crypto_data_t *output) 2787 { 2788 int rv = CRYPTO_SUCCESS; 2789 2790 switch (input->cd_format) { 2791 case CRYPTO_DATA_RAW: 2792 break; 2793 2794 case CRYPTO_DATA_UIO: 2795 /* we support only kernel buffer */ 2796 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) { 2797 DBG(NULL, DWARN, "non kernel input uio buffer"); 2798 rv = CRYPTO_ARGUMENTS_BAD; 2799 } 2800 break; 2801 2802 case CRYPTO_DATA_MBLK: 2803 break; 2804 2805 default: 2806 DBG(NULL, DWARN, "unrecognised input crypto data format"); 2807 rv = CRYPTO_ARGUMENTS_BAD; 2808 } 2809 2810 switch (output->cd_format) { 2811 case CRYPTO_DATA_RAW: 2812 break; 2813 2814 case CRYPTO_DATA_UIO: 2815 /* we support only kernel buffer */ 2816 if (output->cd_uio->uio_segflg != UIO_SYSSPACE) { 2817 DBG(NULL, DWARN, "non kernel output uio buffer"); 2818 rv = CRYPTO_ARGUMENTS_BAD; 2819 } 2820 break; 2821 2822 case CRYPTO_DATA_MBLK: 2823 break; 2824 2825 default: 2826 DBG(NULL, DWARN, "unrecognised output crypto data format"); 2827 rv = CRYPTO_ARGUMENTS_BAD; 2828 } 2829 2830 return (rv); 2831 } 2832 2833 /* 2834 * data: source crypto_data_t struct 2835 * off: offset into the source before commencing copy 2836 * count: the amount of data to copy 2837 * dest: destination buffer 2838 */ 2839 int 2840 dca_getbufbytes(crypto_data_t *data, size_t off, int count, uchar_t *dest) 2841 { 2842 int rv = CRYPTO_SUCCESS; 2843 uio_t *uiop; 2844 uint_t vec_idx; 2845 size_t cur_len; 2846 mblk_t *mp; 2847 2848 if (count == 0) { 2849 /* We don't want anything so we're done. */ 2850 return (rv); 2851 } 2852 2853 /* 2854 * Sanity check that we haven't specified a length greater than the 2855 * offset adjusted size of the buffer. 2856 */ 2857 if (count > (data->cd_length - off)) { 2858 return (CRYPTO_DATA_LEN_RANGE); 2859 } 2860 2861 /* Add the internal crypto_data offset to the requested offset. */ 2862 off += data->cd_offset; 2863 2864 switch (data->cd_format) { 2865 case CRYPTO_DATA_RAW: 2866 bcopy(data->cd_raw.iov_base + off, dest, count); 2867 break; 2868 2869 case CRYPTO_DATA_UIO: 2870 /* 2871 * Jump to the first iovec containing data to be 2872 * processed. 2873 */ 2874 uiop = data->cd_uio; 2875 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 2876 off >= uiop->uio_iov[vec_idx].iov_len; 2877 off -= uiop->uio_iov[vec_idx++].iov_len) 2878 ; 2879 if (vec_idx == uiop->uio_iovcnt) { 2880 /* 2881 * The caller specified an offset that is larger than 2882 * the total size of the buffers it provided. 2883 */ 2884 return (CRYPTO_DATA_LEN_RANGE); 2885 } 2886 2887 /* 2888 * Now process the iovecs. 2889 */ 2890 while (vec_idx < uiop->uio_iovcnt && count > 0) { 2891 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 2892 off, count); 2893 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest, 2894 cur_len); 2895 count -= cur_len; 2896 dest += cur_len; 2897 vec_idx++; 2898 off = 0; 2899 } 2900 2901 if (vec_idx == uiop->uio_iovcnt && count > 0) { 2902 /* 2903 * The end of the specified iovec's was reached but 2904 * the length requested could not be processed 2905 * (requested to digest more data than it provided). 2906 */ 2907 return (CRYPTO_DATA_LEN_RANGE); 2908 } 2909 break; 2910 2911 case CRYPTO_DATA_MBLK: 2912 /* 2913 * Jump to the first mblk_t containing data to be processed. 2914 */ 2915 for (mp = data->cd_mp; mp != NULL && off >= MBLKL(mp); 2916 off -= MBLKL(mp), mp = mp->b_cont) 2917 ; 2918 if (mp == NULL) { 2919 /* 2920 * The caller specified an offset that is larger than 2921 * the total size of the buffers it provided. 2922 */ 2923 return (CRYPTO_DATA_LEN_RANGE); 2924 } 2925 2926 /* 2927 * Now do the processing on the mblk chain. 2928 */ 2929 while (mp != NULL && count > 0) { 2930 cur_len = min(MBLKL(mp) - off, count); 2931 bcopy((char *)(mp->b_rptr + off), dest, cur_len); 2932 count -= cur_len; 2933 dest += cur_len; 2934 mp = mp->b_cont; 2935 off = 0; 2936 } 2937 2938 if (mp == NULL && count > 0) { 2939 /* 2940 * The end of the mblk was reached but the length 2941 * requested could not be processed, (requested to 2942 * digest more data than it provided). 2943 */ 2944 return (CRYPTO_DATA_LEN_RANGE); 2945 } 2946 break; 2947 2948 default: 2949 DBG(NULL, DWARN, "unrecognised crypto data format"); 2950 rv = CRYPTO_ARGUMENTS_BAD; 2951 } 2952 return (rv); 2953 } 2954 2955 2956 /* 2957 * Performs the input, output or hard scatter/gather checks on the specified 2958 * crypto_data_t struct. Returns true if the data is scatter/gather in nature 2959 * ie fails the test. 2960 */ 2961 int 2962 dca_sgcheck(dca_t *dca, crypto_data_t *data, dca_sg_param_t val) 2963 { 2964 uio_t *uiop; 2965 mblk_t *mp; 2966 int rv = FALSE; 2967 2968 switch (val) { 2969 case DCA_SG_CONTIG: 2970 /* 2971 * Check for a contiguous data buffer. 2972 */ 2973 switch (data->cd_format) { 2974 case CRYPTO_DATA_RAW: 2975 /* Contiguous in nature */ 2976 break; 2977 2978 case CRYPTO_DATA_UIO: 2979 if (data->cd_uio->uio_iovcnt > 1) 2980 rv = TRUE; 2981 break; 2982 2983 case CRYPTO_DATA_MBLK: 2984 mp = data->cd_mp; 2985 if (mp->b_cont != NULL) 2986 rv = TRUE; 2987 break; 2988 2989 default: 2990 DBG(NULL, DWARN, "unrecognised crypto data format"); 2991 } 2992 break; 2993 2994 case DCA_SG_WALIGN: 2995 /* 2996 * Check for a contiguous data buffer that is 32-bit word 2997 * aligned and is of word multiples in size. 2998 */ 2999 switch (data->cd_format) { 3000 case CRYPTO_DATA_RAW: 3001 if ((data->cd_raw.iov_len % sizeof (uint32_t)) || 3002 ((uintptr_t)data->cd_raw.iov_base % 3003 sizeof (uint32_t))) { 3004 rv = TRUE; 3005 } 3006 break; 3007 3008 case CRYPTO_DATA_UIO: 3009 uiop = data->cd_uio; 3010 if (uiop->uio_iovcnt > 1) { 3011 return (TRUE); 3012 } 3013 /* So there is only one iovec */ 3014 if ((uiop->uio_iov[0].iov_len % sizeof (uint32_t)) || 3015 ((uintptr_t)uiop->uio_iov[0].iov_base % 3016 sizeof (uint32_t))) { 3017 rv = TRUE; 3018 } 3019 break; 3020 3021 case CRYPTO_DATA_MBLK: 3022 mp = data->cd_mp; 3023 if (mp->b_cont != NULL) { 3024 return (TRUE); 3025 } 3026 /* So there is only one mblk in the chain */ 3027 if ((MBLKL(mp) % sizeof (uint32_t)) || 3028 ((uintptr_t)mp->b_rptr % sizeof (uint32_t))) { 3029 rv = TRUE; 3030 } 3031 break; 3032 3033 default: 3034 DBG(NULL, DWARN, "unrecognised crypto data format"); 3035 } 3036 break; 3037 3038 case DCA_SG_PALIGN: 3039 /* 3040 * Check that the data buffer is page aligned and is of 3041 * page multiples in size. 3042 */ 3043 switch (data->cd_format) { 3044 case CRYPTO_DATA_RAW: 3045 if ((data->cd_length % dca->dca_pagesize) || 3046 ((uintptr_t)data->cd_raw.iov_base % 3047 dca->dca_pagesize)) { 3048 rv = TRUE; 3049 } 3050 break; 3051 3052 case CRYPTO_DATA_UIO: 3053 uiop = data->cd_uio; 3054 if ((uiop->uio_iov[0].iov_len % dca->dca_pagesize) || 3055 ((uintptr_t)uiop->uio_iov[0].iov_base % 3056 dca->dca_pagesize)) { 3057 rv = TRUE; 3058 } 3059 break; 3060 3061 case CRYPTO_DATA_MBLK: 3062 mp = data->cd_mp; 3063 if ((MBLKL(mp) % dca->dca_pagesize) || 3064 ((uintptr_t)mp->b_rptr % dca->dca_pagesize)) { 3065 rv = TRUE; 3066 } 3067 break; 3068 3069 default: 3070 DBG(NULL, DWARN, "unrecognised crypto data format"); 3071 } 3072 break; 3073 3074 default: 3075 DBG(NULL, DWARN, "unrecognised scatter/gather param type"); 3076 } 3077 3078 return (rv); 3079 } 3080 3081 /* 3082 * Increments the cd_offset and decrements the cd_length as the data is 3083 * gathered from the crypto_data_t struct. 3084 * The data is reverse-copied into the dest buffer if the flag is true. 3085 */ 3086 int 3087 dca_gather(crypto_data_t *in, char *dest, int count, int reverse) 3088 { 3089 int rv = CRYPTO_SUCCESS; 3090 uint_t vec_idx; 3091 uio_t *uiop; 3092 off_t off = in->cd_offset; 3093 size_t cur_len; 3094 mblk_t *mp; 3095 3096 switch (in->cd_format) { 3097 case CRYPTO_DATA_RAW: 3098 if (count > in->cd_length) { 3099 /* 3100 * The caller specified a length greater than the 3101 * size of the buffer. 3102 */ 3103 return (CRYPTO_DATA_LEN_RANGE); 3104 } 3105 if (reverse) 3106 dca_reverse(in->cd_raw.iov_base + off, dest, count, 3107 count); 3108 else 3109 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count); 3110 in->cd_offset += count; 3111 in->cd_length -= count; 3112 break; 3113 3114 case CRYPTO_DATA_UIO: 3115 /* 3116 * Jump to the first iovec containing data to be processed. 3117 */ 3118 uiop = in->cd_uio; 3119 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3120 off >= uiop->uio_iov[vec_idx].iov_len; 3121 off -= uiop->uio_iov[vec_idx++].iov_len) 3122 ; 3123 if (vec_idx == uiop->uio_iovcnt) { 3124 /* 3125 * The caller specified an offset that is larger than 3126 * the total size of the buffers it provided. 3127 */ 3128 return (CRYPTO_DATA_LEN_RANGE); 3129 } 3130 3131 /* 3132 * Now process the iovecs. 3133 */ 3134 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3135 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3136 off, count); 3137 count -= cur_len; 3138 if (reverse) { 3139 /* Fill the dest buffer from the end */ 3140 dca_reverse(uiop->uio_iov[vec_idx].iov_base + 3141 off, dest+count, cur_len, cur_len); 3142 } else { 3143 bcopy(uiop->uio_iov[vec_idx].iov_base + off, 3144 dest, cur_len); 3145 dest += cur_len; 3146 } 3147 in->cd_offset += cur_len; 3148 in->cd_length -= cur_len; 3149 vec_idx++; 3150 off = 0; 3151 } 3152 3153 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3154 /* 3155 * The end of the specified iovec's was reached but 3156 * the length requested could not be processed 3157 * (requested to digest more data than it provided). 3158 */ 3159 return (CRYPTO_DATA_LEN_RANGE); 3160 } 3161 break; 3162 3163 case CRYPTO_DATA_MBLK: 3164 /* 3165 * Jump to the first mblk_t containing data to be processed. 3166 */ 3167 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp); 3168 off -= MBLKL(mp), mp = mp->b_cont) 3169 ; 3170 if (mp == NULL) { 3171 /* 3172 * The caller specified an offset that is larger than 3173 * the total size of the buffers it provided. 3174 */ 3175 return (CRYPTO_DATA_LEN_RANGE); 3176 } 3177 3178 /* 3179 * Now do the processing on the mblk chain. 3180 */ 3181 while (mp != NULL && count > 0) { 3182 cur_len = min(MBLKL(mp) - off, count); 3183 count -= cur_len; 3184 if (reverse) { 3185 /* Fill the dest buffer from the end */ 3186 dca_reverse((char *)(mp->b_rptr + off), 3187 dest+count, cur_len, cur_len); 3188 } else { 3189 bcopy((char *)(mp->b_rptr + off), dest, 3190 cur_len); 3191 dest += cur_len; 3192 } 3193 in->cd_offset += cur_len; 3194 in->cd_length -= cur_len; 3195 mp = mp->b_cont; 3196 off = 0; 3197 } 3198 3199 if (mp == NULL && count > 0) { 3200 /* 3201 * The end of the mblk was reached but the length 3202 * requested could not be processed, (requested to 3203 * digest more data than it provided). 3204 */ 3205 return (CRYPTO_DATA_LEN_RANGE); 3206 } 3207 break; 3208 3209 default: 3210 DBG(NULL, DWARN, "dca_gather: unrecognised crypto data format"); 3211 rv = CRYPTO_ARGUMENTS_BAD; 3212 } 3213 return (rv); 3214 } 3215 3216 /* 3217 * Increments the cd_offset and decrements the cd_length as the data is 3218 * gathered from the crypto_data_t struct. 3219 */ 3220 int 3221 dca_resid_gather(crypto_data_t *in, char *resid, int *residlen, char *dest, 3222 int count) 3223 { 3224 int rv = CRYPTO_SUCCESS; 3225 caddr_t baddr; 3226 uint_t vec_idx; 3227 uio_t *uiop; 3228 off_t off = in->cd_offset; 3229 size_t cur_len; 3230 mblk_t *mp; 3231 3232 /* Process the residual first */ 3233 if (*residlen > 0) { 3234 uint_t num = min(count, *residlen); 3235 bcopy(resid, dest, num); 3236 *residlen -= num; 3237 if (*residlen > 0) { 3238 /* 3239 * Requested amount 'count' is less than what's in 3240 * the residual, so shuffle any remaining resid to 3241 * the front. 3242 */ 3243 baddr = resid + num; 3244 bcopy(baddr, resid, *residlen); 3245 } 3246 dest += num; 3247 count -= num; 3248 } 3249 3250 /* Now process what's in the crypto_data_t structs */ 3251 switch (in->cd_format) { 3252 case CRYPTO_DATA_RAW: 3253 if (count > in->cd_length) { 3254 /* 3255 * The caller specified a length greater than the 3256 * size of the buffer. 3257 */ 3258 return (CRYPTO_DATA_LEN_RANGE); 3259 } 3260 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count); 3261 in->cd_offset += count; 3262 in->cd_length -= count; 3263 break; 3264 3265 case CRYPTO_DATA_UIO: 3266 /* 3267 * Jump to the first iovec containing data to be processed. 3268 */ 3269 uiop = in->cd_uio; 3270 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3271 off >= uiop->uio_iov[vec_idx].iov_len; 3272 off -= uiop->uio_iov[vec_idx++].iov_len) 3273 ; 3274 if (vec_idx == uiop->uio_iovcnt) { 3275 /* 3276 * The caller specified an offset that is larger than 3277 * the total size of the buffers it provided. 3278 */ 3279 return (CRYPTO_DATA_LEN_RANGE); 3280 } 3281 3282 /* 3283 * Now process the iovecs. 3284 */ 3285 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3286 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3287 off, count); 3288 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest, 3289 cur_len); 3290 count -= cur_len; 3291 dest += cur_len; 3292 in->cd_offset += cur_len; 3293 in->cd_length -= cur_len; 3294 vec_idx++; 3295 off = 0; 3296 } 3297 3298 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3299 /* 3300 * The end of the specified iovec's was reached but 3301 * the length requested could not be processed 3302 * (requested to digest more data than it provided). 3303 */ 3304 return (CRYPTO_DATA_LEN_RANGE); 3305 } 3306 break; 3307 3308 case CRYPTO_DATA_MBLK: 3309 /* 3310 * Jump to the first mblk_t containing data to be processed. 3311 */ 3312 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp); 3313 off -= MBLKL(mp), mp = mp->b_cont) 3314 ; 3315 if (mp == NULL) { 3316 /* 3317 * The caller specified an offset that is larger than 3318 * the total size of the buffers it provided. 3319 */ 3320 return (CRYPTO_DATA_LEN_RANGE); 3321 } 3322 3323 /* 3324 * Now do the processing on the mblk chain. 3325 */ 3326 while (mp != NULL && count > 0) { 3327 cur_len = min(MBLKL(mp) - off, count); 3328 bcopy((char *)(mp->b_rptr + off), dest, cur_len); 3329 count -= cur_len; 3330 dest += cur_len; 3331 in->cd_offset += cur_len; 3332 in->cd_length -= cur_len; 3333 mp = mp->b_cont; 3334 off = 0; 3335 } 3336 3337 if (mp == NULL && count > 0) { 3338 /* 3339 * The end of the mblk was reached but the length 3340 * requested could not be processed, (requested to 3341 * digest more data than it provided). 3342 */ 3343 return (CRYPTO_DATA_LEN_RANGE); 3344 } 3345 break; 3346 3347 default: 3348 DBG(NULL, DWARN, 3349 "dca_resid_gather: unrecognised crypto data format"); 3350 rv = CRYPTO_ARGUMENTS_BAD; 3351 } 3352 return (rv); 3353 } 3354 3355 /* 3356 * Appends the data to the crypto_data_t struct increasing cd_length. 3357 * cd_offset is left unchanged. 3358 * Data is reverse-copied if the flag is TRUE. 3359 */ 3360 int 3361 dca_scatter(const char *src, crypto_data_t *out, int count, int reverse) 3362 { 3363 int rv = CRYPTO_SUCCESS; 3364 off_t offset = out->cd_offset + out->cd_length; 3365 uint_t vec_idx; 3366 uio_t *uiop; 3367 size_t cur_len; 3368 mblk_t *mp; 3369 3370 switch (out->cd_format) { 3371 case CRYPTO_DATA_RAW: 3372 if (out->cd_raw.iov_len - offset < count) { 3373 /* Trying to write out more than space available. */ 3374 return (CRYPTO_DATA_LEN_RANGE); 3375 } 3376 if (reverse) 3377 dca_reverse((void*) src, out->cd_raw.iov_base + offset, 3378 count, count); 3379 else 3380 bcopy(src, out->cd_raw.iov_base + offset, count); 3381 out->cd_length += count; 3382 break; 3383 3384 case CRYPTO_DATA_UIO: 3385 /* 3386 * Jump to the first iovec that can be written to. 3387 */ 3388 uiop = out->cd_uio; 3389 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3390 offset >= uiop->uio_iov[vec_idx].iov_len; 3391 offset -= uiop->uio_iov[vec_idx++].iov_len) 3392 ; 3393 if (vec_idx == uiop->uio_iovcnt) { 3394 /* 3395 * The caller specified an offset that is larger than 3396 * the total size of the buffers it provided. 3397 */ 3398 return (CRYPTO_DATA_LEN_RANGE); 3399 } 3400 3401 /* 3402 * Now process the iovecs. 3403 */ 3404 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3405 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3406 offset, count); 3407 count -= cur_len; 3408 if (reverse) { 3409 dca_reverse((void*) (src+count), 3410 uiop->uio_iov[vec_idx].iov_base + 3411 offset, cur_len, cur_len); 3412 } else { 3413 bcopy(src, uiop->uio_iov[vec_idx].iov_base + 3414 offset, cur_len); 3415 src += cur_len; 3416 } 3417 out->cd_length += cur_len; 3418 vec_idx++; 3419 offset = 0; 3420 } 3421 3422 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3423 /* 3424 * The end of the specified iovec's was reached but 3425 * the length requested could not be processed 3426 * (requested to write more data than space provided). 3427 */ 3428 return (CRYPTO_DATA_LEN_RANGE); 3429 } 3430 break; 3431 3432 case CRYPTO_DATA_MBLK: 3433 /* 3434 * Jump to the first mblk_t that can be written to. 3435 */ 3436 for (mp = out->cd_mp; mp != NULL && offset >= MBLKL(mp); 3437 offset -= MBLKL(mp), mp = mp->b_cont) 3438 ; 3439 if (mp == NULL) { 3440 /* 3441 * The caller specified an offset that is larger than 3442 * the total size of the buffers it provided. 3443 */ 3444 return (CRYPTO_DATA_LEN_RANGE); 3445 } 3446 3447 /* 3448 * Now do the processing on the mblk chain. 3449 */ 3450 while (mp != NULL && count > 0) { 3451 cur_len = min(MBLKL(mp) - offset, count); 3452 count -= cur_len; 3453 if (reverse) { 3454 dca_reverse((void*) (src+count), 3455 (char *)(mp->b_rptr + offset), cur_len, 3456 cur_len); 3457 } else { 3458 bcopy(src, (char *)(mp->b_rptr + offset), 3459 cur_len); 3460 src += cur_len; 3461 } 3462 out->cd_length += cur_len; 3463 mp = mp->b_cont; 3464 offset = 0; 3465 } 3466 3467 if (mp == NULL && count > 0) { 3468 /* 3469 * The end of the mblk was reached but the length 3470 * requested could not be processed, (requested to 3471 * digest more data than it provided). 3472 */ 3473 return (CRYPTO_DATA_LEN_RANGE); 3474 } 3475 break; 3476 3477 default: 3478 DBG(NULL, DWARN, "unrecognised crypto data format"); 3479 rv = CRYPTO_ARGUMENTS_BAD; 3480 } 3481 return (rv); 3482 } 3483 3484 /* 3485 * Compare two byte arrays in reverse order. 3486 * Return 0 if they are identical, 1 otherwise. 3487 */ 3488 int 3489 dca_bcmp_reverse(const void *s1, const void *s2, size_t n) 3490 { 3491 int i; 3492 caddr_t src, dst; 3493 3494 if (!n) 3495 return (0); 3496 3497 src = ((caddr_t)s1) + n - 1; 3498 dst = (caddr_t)s2; 3499 for (i = 0; i < n; i++) { 3500 if (*src != *dst) 3501 return (1); 3502 src--; 3503 dst++; 3504 } 3505 3506 return (0); 3507 } 3508 3509 3510 /* 3511 * This calculates the size of a bignum in bits, specifically not counting 3512 * leading zero bits. This size calculation must be done *before* any 3513 * endian reversal takes place (i.e. the numbers are in absolute big-endian 3514 * order.) 3515 */ 3516 int 3517 dca_bitlen(unsigned char *bignum, int bytelen) 3518 { 3519 unsigned char msbyte; 3520 int i, j; 3521 3522 for (i = 0; i < bytelen - 1; i++) { 3523 if (bignum[i] != 0) { 3524 break; 3525 } 3526 } 3527 msbyte = bignum[i]; 3528 for (j = 8; j > 1; j--) { 3529 if (msbyte & 0x80) { 3530 break; 3531 } 3532 msbyte <<= 1; 3533 } 3534 return ((8 * (bytelen - i - 1)) + j); 3535 } 3536 3537 /* 3538 * This compares to bignums (in big-endian order). It ignores leading 3539 * null bytes. The result semantics follow bcmp, mempcmp, strcmp, etc. 3540 */ 3541 int 3542 dca_numcmp(caddr_t n1, int n1len, caddr_t n2, int n2len) 3543 { 3544 while ((n1len > 1) && (*n1 == 0)) { 3545 n1len--; 3546 n1++; 3547 } 3548 while ((n2len > 1) && (*n2 == 0)) { 3549 n2len--; 3550 n2++; 3551 } 3552 if (n1len != n2len) { 3553 return (n1len - n2len); 3554 } 3555 while ((n1len > 1) && (*n1 == *n2)) { 3556 n1++; 3557 n2++; 3558 n1len--; 3559 } 3560 return ((int)(*(uchar_t *)n1) - (int)(*(uchar_t *)n2)); 3561 } 3562 3563 /* 3564 * Return array of key attributes. 3565 */ 3566 crypto_object_attribute_t * 3567 dca_get_key_attr(crypto_key_t *key) 3568 { 3569 if ((key->ck_format != CRYPTO_KEY_ATTR_LIST) || 3570 (key->ck_count == 0)) { 3571 return (NULL); 3572 } 3573 3574 return (key->ck_attrs); 3575 } 3576 3577 /* 3578 * If attribute type exists valp points to it's 32-bit value. 3579 */ 3580 int 3581 dca_attr_lookup_uint32(crypto_object_attribute_t *attrp, uint_t atnum, 3582 uint64_t atype, uint32_t *valp) 3583 { 3584 crypto_object_attribute_t *bap; 3585 3586 bap = dca_find_attribute(attrp, atnum, atype); 3587 if (bap == NULL) { 3588 return (CRYPTO_ATTRIBUTE_TYPE_INVALID); 3589 } 3590 3591 *valp = *bap->oa_value; 3592 3593 return (CRYPTO_SUCCESS); 3594 } 3595 3596 /* 3597 * If attribute type exists data contains the start address of the value, 3598 * and numelems contains it's length. 3599 */ 3600 int 3601 dca_attr_lookup_uint8_array(crypto_object_attribute_t *attrp, uint_t atnum, 3602 uint64_t atype, void **data, unsigned int *numelems) 3603 { 3604 crypto_object_attribute_t *bap; 3605 3606 bap = dca_find_attribute(attrp, atnum, atype); 3607 if (bap == NULL) { 3608 return (CRYPTO_ATTRIBUTE_TYPE_INVALID); 3609 } 3610 3611 *data = bap->oa_value; 3612 *numelems = bap->oa_value_len; 3613 3614 return (CRYPTO_SUCCESS); 3615 } 3616 3617 /* 3618 * Finds entry of specified name. If it is not found dca_find_attribute returns 3619 * NULL. 3620 */ 3621 crypto_object_attribute_t * 3622 dca_find_attribute(crypto_object_attribute_t *attrp, uint_t atnum, 3623 uint64_t atype) 3624 { 3625 while (atnum) { 3626 if (attrp->oa_type == atype) 3627 return (attrp); 3628 atnum--; 3629 attrp++; 3630 } 3631 return (NULL); 3632 } 3633 3634 /* 3635 * Return the address of the first data buffer. If the data format is 3636 * unrecognised return NULL. 3637 */ 3638 caddr_t 3639 dca_bufdaddr(crypto_data_t *data) 3640 { 3641 switch (data->cd_format) { 3642 case CRYPTO_DATA_RAW: 3643 return (data->cd_raw.iov_base + data->cd_offset); 3644 case CRYPTO_DATA_UIO: 3645 return (data->cd_uio->uio_iov[0].iov_base + data->cd_offset); 3646 case CRYPTO_DATA_MBLK: 3647 return ((char *)data->cd_mp->b_rptr + data->cd_offset); 3648 default: 3649 DBG(NULL, DWARN, 3650 "dca_bufdaddr: unrecognised crypto data format"); 3651 return (NULL); 3652 } 3653 } 3654 3655 static caddr_t 3656 dca_bufdaddr_out(crypto_data_t *data) 3657 { 3658 size_t offset = data->cd_offset + data->cd_length; 3659 3660 switch (data->cd_format) { 3661 case CRYPTO_DATA_RAW: 3662 return (data->cd_raw.iov_base + offset); 3663 case CRYPTO_DATA_UIO: 3664 return (data->cd_uio->uio_iov[0].iov_base + offset); 3665 case CRYPTO_DATA_MBLK: 3666 return ((char *)data->cd_mp->b_rptr + offset); 3667 default: 3668 DBG(NULL, DWARN, 3669 "dca_bufdaddr_out: unrecognised crypto data format"); 3670 return (NULL); 3671 } 3672 } 3673 3674 /* 3675 * Control entry points. 3676 */ 3677 3678 /* ARGSUSED */ 3679 static void 3680 dca_provider_status(crypto_provider_handle_t provider, uint_t *status) 3681 { 3682 *status = CRYPTO_PROVIDER_READY; 3683 } 3684 3685 /* 3686 * Cipher (encrypt/decrypt) entry points. 3687 */ 3688 3689 /* ARGSUSED */ 3690 static int 3691 dca_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 3692 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 3693 crypto_req_handle_t req) 3694 { 3695 int error = CRYPTO_FAILED; 3696 dca_t *softc; 3697 /* LINTED E_FUNC_SET_NOT_USED */ 3698 int instance; 3699 3700 /* extract softc and instance number from context */ 3701 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3702 DBG(softc, DENTRY, "dca_encrypt_init: started"); 3703 3704 /* check mechanism */ 3705 switch (mechanism->cm_type) { 3706 case DES_CBC_MECH_INFO_TYPE: 3707 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3708 DR_ENCRYPT); 3709 break; 3710 case DES3_CBC_MECH_INFO_TYPE: 3711 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3712 DR_ENCRYPT | DR_TRIPLE); 3713 break; 3714 case RSA_PKCS_MECH_INFO_TYPE: 3715 case RSA_X_509_MECH_INFO_TYPE: 3716 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 3717 break; 3718 default: 3719 cmn_err(CE_WARN, "dca_encrypt_init: unexpected mech type " 3720 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3721 error = CRYPTO_MECHANISM_INVALID; 3722 } 3723 3724 DBG(softc, DENTRY, "dca_encrypt_init: done, err = 0x%x", error); 3725 3726 if (error == CRYPTO_SUCCESS) 3727 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 3728 &softc->dca_ctx_list_lock); 3729 3730 return (error); 3731 } 3732 3733 /* ARGSUSED */ 3734 static int 3735 dca_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, 3736 crypto_data_t *ciphertext, crypto_req_handle_t req) 3737 { 3738 int error = CRYPTO_FAILED; 3739 dca_t *softc; 3740 /* LINTED E_FUNC_SET_NOT_USED */ 3741 int instance; 3742 3743 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3744 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3745 3746 /* extract softc and instance number from context */ 3747 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3748 DBG(softc, DENTRY, "dca_encrypt: started"); 3749 3750 /* handle inplace ops */ 3751 if (!ciphertext) { 3752 dca_request_t *reqp = ctx->cc_provider_private; 3753 reqp->dr_flags |= DR_INPLACE; 3754 ciphertext = plaintext; 3755 } 3756 3757 /* check mechanism */ 3758 switch (DCA_MECH_FROM_CTX(ctx)) { 3759 case DES_CBC_MECH_INFO_TYPE: 3760 error = dca_3des(ctx, plaintext, ciphertext, req, DR_ENCRYPT); 3761 break; 3762 case DES3_CBC_MECH_INFO_TYPE: 3763 error = dca_3des(ctx, plaintext, ciphertext, req, 3764 DR_ENCRYPT | DR_TRIPLE); 3765 break; 3766 case RSA_PKCS_MECH_INFO_TYPE: 3767 case RSA_X_509_MECH_INFO_TYPE: 3768 error = dca_rsastart(ctx, plaintext, ciphertext, req, 3769 DCA_RSA_ENC); 3770 break; 3771 default: 3772 /* Should never reach here */ 3773 cmn_err(CE_WARN, "dca_encrypt: unexpected mech type " 3774 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3775 error = CRYPTO_MECHANISM_INVALID; 3776 } 3777 3778 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) && 3779 (error != CRYPTO_BUFFER_TOO_SMALL)) { 3780 ciphertext->cd_length = 0; 3781 } 3782 3783 DBG(softc, DENTRY, "dca_encrypt: done, err = 0x%x", error); 3784 3785 return (error); 3786 } 3787 3788 /* ARGSUSED */ 3789 static int 3790 dca_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext, 3791 crypto_data_t *ciphertext, crypto_req_handle_t req) 3792 { 3793 int error = CRYPTO_FAILED; 3794 dca_t *softc; 3795 /* LINTED E_FUNC_SET_NOT_USED */ 3796 int instance; 3797 3798 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3799 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3800 3801 /* extract softc and instance number from context */ 3802 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3803 DBG(softc, DENTRY, "dca_encrypt_update: started"); 3804 3805 /* handle inplace ops */ 3806 if (!ciphertext) { 3807 dca_request_t *reqp = ctx->cc_provider_private; 3808 reqp->dr_flags |= DR_INPLACE; 3809 ciphertext = plaintext; 3810 } 3811 3812 /* check mechanism */ 3813 switch (DCA_MECH_FROM_CTX(ctx)) { 3814 case DES_CBC_MECH_INFO_TYPE: 3815 error = dca_3desupdate(ctx, plaintext, ciphertext, req, 3816 DR_ENCRYPT); 3817 break; 3818 case DES3_CBC_MECH_INFO_TYPE: 3819 error = dca_3desupdate(ctx, plaintext, ciphertext, req, 3820 DR_ENCRYPT | DR_TRIPLE); 3821 break; 3822 default: 3823 /* Should never reach here */ 3824 cmn_err(CE_WARN, "dca_encrypt_update: unexpected mech type " 3825 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3826 error = CRYPTO_MECHANISM_INVALID; 3827 } 3828 3829 DBG(softc, DENTRY, "dca_encrypt_update: done, err = 0x%x", error); 3830 3831 return (error); 3832 } 3833 3834 /* ARGSUSED */ 3835 static int 3836 dca_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 3837 crypto_req_handle_t req) 3838 { 3839 int error = CRYPTO_FAILED; 3840 dca_t *softc; 3841 /* LINTED E_FUNC_SET_NOT_USED */ 3842 int instance; 3843 3844 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3845 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3846 3847 /* extract softc and instance number from context */ 3848 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3849 DBG(softc, DENTRY, "dca_encrypt_final: started"); 3850 3851 /* check mechanism */ 3852 switch (DCA_MECH_FROM_CTX(ctx)) { 3853 case DES_CBC_MECH_INFO_TYPE: 3854 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT); 3855 break; 3856 case DES3_CBC_MECH_INFO_TYPE: 3857 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT | DR_TRIPLE); 3858 break; 3859 default: 3860 /* Should never reach here */ 3861 cmn_err(CE_WARN, "dca_encrypt_final: unexpected mech type " 3862 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3863 error = CRYPTO_MECHANISM_INVALID; 3864 } 3865 3866 DBG(softc, DENTRY, "dca_encrypt_final: done, err = 0x%x", error); 3867 3868 return (error); 3869 } 3870 3871 /* ARGSUSED */ 3872 static int 3873 dca_encrypt_atomic(crypto_provider_handle_t provider, 3874 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 3875 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext, 3876 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 3877 { 3878 int error = CRYPTO_FAILED; 3879 dca_t *softc = (dca_t *)provider; 3880 3881 DBG(softc, DENTRY, "dca_encrypt_atomic: started"); 3882 3883 if (ctx_template != NULL) 3884 return (CRYPTO_ARGUMENTS_BAD); 3885 3886 /* handle inplace ops */ 3887 if (!ciphertext) { 3888 ciphertext = plaintext; 3889 } 3890 3891 /* check mechanism */ 3892 switch (mechanism->cm_type) { 3893 case DES_CBC_MECH_INFO_TYPE: 3894 error = dca_3desatomic(provider, session_id, mechanism, key, 3895 plaintext, ciphertext, KM_SLEEP, req, 3896 DR_ENCRYPT | DR_ATOMIC); 3897 break; 3898 case DES3_CBC_MECH_INFO_TYPE: 3899 error = dca_3desatomic(provider, session_id, mechanism, key, 3900 plaintext, ciphertext, KM_SLEEP, req, 3901 DR_ENCRYPT | DR_TRIPLE | DR_ATOMIC); 3902 break; 3903 case RSA_PKCS_MECH_INFO_TYPE: 3904 case RSA_X_509_MECH_INFO_TYPE: 3905 error = dca_rsaatomic(provider, session_id, mechanism, key, 3906 plaintext, ciphertext, KM_SLEEP, req, DCA_RSA_ENC); 3907 break; 3908 default: 3909 cmn_err(CE_WARN, "dca_encrypt_atomic: unexpected mech type " 3910 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3911 error = CRYPTO_MECHANISM_INVALID; 3912 } 3913 3914 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) { 3915 ciphertext->cd_length = 0; 3916 } 3917 3918 DBG(softc, DENTRY, "dca_encrypt_atomic: done, err = 0x%x", error); 3919 3920 return (error); 3921 } 3922 3923 /* ARGSUSED */ 3924 static int 3925 dca_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 3926 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 3927 crypto_req_handle_t req) 3928 { 3929 int error = CRYPTO_FAILED; 3930 dca_t *softc; 3931 /* LINTED E_FUNC_SET_NOT_USED */ 3932 int instance; 3933 3934 /* extract softc and instance number from context */ 3935 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3936 DBG(softc, DENTRY, "dca_decrypt_init: started"); 3937 3938 /* check mechanism */ 3939 switch (mechanism->cm_type) { 3940 case DES_CBC_MECH_INFO_TYPE: 3941 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3942 DR_DECRYPT); 3943 break; 3944 case DES3_CBC_MECH_INFO_TYPE: 3945 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3946 DR_DECRYPT | DR_TRIPLE); 3947 break; 3948 case RSA_PKCS_MECH_INFO_TYPE: 3949 case RSA_X_509_MECH_INFO_TYPE: 3950 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 3951 break; 3952 default: 3953 cmn_err(CE_WARN, "dca_decrypt_init: unexpected mech type " 3954 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3955 error = CRYPTO_MECHANISM_INVALID; 3956 } 3957 3958 DBG(softc, DENTRY, "dca_decrypt_init: done, err = 0x%x", error); 3959 3960 if (error == CRYPTO_SUCCESS) 3961 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 3962 &softc->dca_ctx_list_lock); 3963 3964 return (error); 3965 } 3966 3967 /* ARGSUSED */ 3968 static int 3969 dca_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 3970 crypto_data_t *plaintext, crypto_req_handle_t req) 3971 { 3972 int error = CRYPTO_FAILED; 3973 dca_t *softc; 3974 /* LINTED E_FUNC_SET_NOT_USED */ 3975 int instance; 3976 3977 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3978 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3979 3980 /* extract softc and instance number from context */ 3981 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3982 DBG(softc, DENTRY, "dca_decrypt: started"); 3983 3984 /* handle inplace ops */ 3985 if (!plaintext) { 3986 dca_request_t *reqp = ctx->cc_provider_private; 3987 reqp->dr_flags |= DR_INPLACE; 3988 plaintext = ciphertext; 3989 } 3990 3991 /* check mechanism */ 3992 switch (DCA_MECH_FROM_CTX(ctx)) { 3993 case DES_CBC_MECH_INFO_TYPE: 3994 error = dca_3des(ctx, ciphertext, plaintext, req, DR_DECRYPT); 3995 break; 3996 case DES3_CBC_MECH_INFO_TYPE: 3997 error = dca_3des(ctx, ciphertext, plaintext, req, 3998 DR_DECRYPT | DR_TRIPLE); 3999 break; 4000 case RSA_PKCS_MECH_INFO_TYPE: 4001 case RSA_X_509_MECH_INFO_TYPE: 4002 error = dca_rsastart(ctx, ciphertext, plaintext, req, 4003 DCA_RSA_DEC); 4004 break; 4005 default: 4006 /* Should never reach here */ 4007 cmn_err(CE_WARN, "dca_decrypt: unexpected mech type " 4008 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4009 error = CRYPTO_MECHANISM_INVALID; 4010 } 4011 4012 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) && 4013 (error != CRYPTO_BUFFER_TOO_SMALL)) { 4014 if (plaintext) 4015 plaintext->cd_length = 0; 4016 } 4017 4018 DBG(softc, DENTRY, "dca_decrypt: done, err = 0x%x", error); 4019 4020 return (error); 4021 } 4022 4023 /* ARGSUSED */ 4024 static int 4025 dca_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 4026 crypto_data_t *plaintext, crypto_req_handle_t req) 4027 { 4028 int error = CRYPTO_FAILED; 4029 dca_t *softc; 4030 /* LINTED E_FUNC_SET_NOT_USED */ 4031 int instance; 4032 4033 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4034 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4035 4036 /* extract softc and instance number from context */ 4037 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4038 DBG(softc, DENTRY, "dca_decrypt_update: started"); 4039 4040 /* handle inplace ops */ 4041 if (!plaintext) { 4042 dca_request_t *reqp = ctx->cc_provider_private; 4043 reqp->dr_flags |= DR_INPLACE; 4044 plaintext = ciphertext; 4045 } 4046 4047 /* check mechanism */ 4048 switch (DCA_MECH_FROM_CTX(ctx)) { 4049 case DES_CBC_MECH_INFO_TYPE: 4050 error = dca_3desupdate(ctx, ciphertext, plaintext, req, 4051 DR_DECRYPT); 4052 break; 4053 case DES3_CBC_MECH_INFO_TYPE: 4054 error = dca_3desupdate(ctx, ciphertext, plaintext, req, 4055 DR_DECRYPT | DR_TRIPLE); 4056 break; 4057 default: 4058 /* Should never reach here */ 4059 cmn_err(CE_WARN, "dca_decrypt_update: unexpected mech type " 4060 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4061 error = CRYPTO_MECHANISM_INVALID; 4062 } 4063 4064 DBG(softc, DENTRY, "dca_decrypt_update: done, err = 0x%x", error); 4065 4066 return (error); 4067 } 4068 4069 /* ARGSUSED */ 4070 static int 4071 dca_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *plaintext, 4072 crypto_req_handle_t req) 4073 { 4074 int error = CRYPTO_FAILED; 4075 dca_t *softc; 4076 /* LINTED E_FUNC_SET_NOT_USED */ 4077 int instance; 4078 4079 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4080 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4081 4082 /* extract softc and instance number from context */ 4083 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4084 DBG(softc, DENTRY, "dca_decrypt_final: started"); 4085 4086 /* check mechanism */ 4087 switch (DCA_MECH_FROM_CTX(ctx)) { 4088 case DES_CBC_MECH_INFO_TYPE: 4089 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT); 4090 break; 4091 case DES3_CBC_MECH_INFO_TYPE: 4092 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT | DR_TRIPLE); 4093 break; 4094 default: 4095 /* Should never reach here */ 4096 cmn_err(CE_WARN, "dca_decrypt_final: unexpected mech type " 4097 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4098 error = CRYPTO_MECHANISM_INVALID; 4099 } 4100 4101 DBG(softc, DENTRY, "dca_decrypt_final: done, err = 0x%x", error); 4102 4103 return (error); 4104 } 4105 4106 /* ARGSUSED */ 4107 static int 4108 dca_decrypt_atomic(crypto_provider_handle_t provider, 4109 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4110 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext, 4111 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4112 { 4113 int error = CRYPTO_FAILED; 4114 dca_t *softc = (dca_t *)provider; 4115 4116 DBG(softc, DENTRY, "dca_decrypt_atomic: started"); 4117 4118 if (ctx_template != NULL) 4119 return (CRYPTO_ARGUMENTS_BAD); 4120 4121 /* handle inplace ops */ 4122 if (!plaintext) { 4123 plaintext = ciphertext; 4124 } 4125 4126 /* check mechanism */ 4127 switch (mechanism->cm_type) { 4128 case DES_CBC_MECH_INFO_TYPE: 4129 error = dca_3desatomic(provider, session_id, mechanism, key, 4130 ciphertext, plaintext, KM_SLEEP, req, 4131 DR_DECRYPT | DR_ATOMIC); 4132 break; 4133 case DES3_CBC_MECH_INFO_TYPE: 4134 error = dca_3desatomic(provider, session_id, mechanism, key, 4135 ciphertext, plaintext, KM_SLEEP, req, 4136 DR_DECRYPT | DR_TRIPLE | DR_ATOMIC); 4137 break; 4138 case RSA_PKCS_MECH_INFO_TYPE: 4139 case RSA_X_509_MECH_INFO_TYPE: 4140 error = dca_rsaatomic(provider, session_id, mechanism, key, 4141 ciphertext, plaintext, KM_SLEEP, req, DCA_RSA_DEC); 4142 break; 4143 default: 4144 cmn_err(CE_WARN, "dca_decrypt_atomic: unexpected mech type " 4145 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4146 error = CRYPTO_MECHANISM_INVALID; 4147 } 4148 4149 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) { 4150 plaintext->cd_length = 0; 4151 } 4152 4153 DBG(softc, DENTRY, "dca_decrypt_atomic: done, err = 0x%x", error); 4154 4155 return (error); 4156 } 4157 4158 /* 4159 * Sign entry points. 4160 */ 4161 4162 /* ARGSUSED */ 4163 static int 4164 dca_sign_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4165 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4166 crypto_req_handle_t req) 4167 { 4168 int error = CRYPTO_FAILED; 4169 dca_t *softc; 4170 /* LINTED E_FUNC_SET_NOT_USED */ 4171 int instance; 4172 4173 /* extract softc and instance number from context */ 4174 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4175 DBG(softc, DENTRY, "dca_sign_init: started\n"); 4176 4177 if (ctx_template != NULL) 4178 return (CRYPTO_ARGUMENTS_BAD); 4179 4180 /* check mechanism */ 4181 switch (mechanism->cm_type) { 4182 case RSA_PKCS_MECH_INFO_TYPE: 4183 case RSA_X_509_MECH_INFO_TYPE: 4184 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4185 break; 4186 case DSA_MECH_INFO_TYPE: 4187 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP, 4188 DCA_DSA_SIGN); 4189 break; 4190 default: 4191 cmn_err(CE_WARN, "dca_sign_init: unexpected mech type " 4192 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4193 error = CRYPTO_MECHANISM_INVALID; 4194 } 4195 4196 DBG(softc, DENTRY, "dca_sign_init: done, err = 0x%x", error); 4197 4198 if (error == CRYPTO_SUCCESS) 4199 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4200 &softc->dca_ctx_list_lock); 4201 4202 return (error); 4203 } 4204 4205 static int 4206 dca_sign(crypto_ctx_t *ctx, crypto_data_t *data, 4207 crypto_data_t *signature, crypto_req_handle_t req) 4208 { 4209 int error = CRYPTO_FAILED; 4210 dca_t *softc; 4211 /* LINTED E_FUNC_SET_NOT_USED */ 4212 int instance; 4213 4214 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4215 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4216 4217 /* extract softc and instance number from context */ 4218 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4219 DBG(softc, DENTRY, "dca_sign: started\n"); 4220 4221 /* check mechanism */ 4222 switch (DCA_MECH_FROM_CTX(ctx)) { 4223 case RSA_PKCS_MECH_INFO_TYPE: 4224 case RSA_X_509_MECH_INFO_TYPE: 4225 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGN); 4226 break; 4227 case DSA_MECH_INFO_TYPE: 4228 error = dca_dsa_sign(ctx, data, signature, req); 4229 break; 4230 default: 4231 cmn_err(CE_WARN, "dca_sign: unexpected mech type " 4232 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4233 error = CRYPTO_MECHANISM_INVALID; 4234 } 4235 4236 DBG(softc, DENTRY, "dca_sign: done, err = 0x%x", error); 4237 4238 return (error); 4239 } 4240 4241 /* ARGSUSED */ 4242 static int 4243 dca_sign_update(crypto_ctx_t *ctx, crypto_data_t *data, 4244 crypto_req_handle_t req) 4245 { 4246 int error = CRYPTO_MECHANISM_INVALID; 4247 dca_t *softc; 4248 /* LINTED E_FUNC_SET_NOT_USED */ 4249 int instance; 4250 4251 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4252 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4253 4254 /* extract softc and instance number from context */ 4255 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4256 DBG(softc, DENTRY, "dca_sign_update: started\n"); 4257 4258 cmn_err(CE_WARN, "dca_sign_update: unexpected mech type " 4259 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4260 4261 DBG(softc, DENTRY, "dca_sign_update: done, err = 0x%x", error); 4262 4263 return (error); 4264 } 4265 4266 /* ARGSUSED */ 4267 static int 4268 dca_sign_final(crypto_ctx_t *ctx, crypto_data_t *signature, 4269 crypto_req_handle_t req) 4270 { 4271 int error = CRYPTO_MECHANISM_INVALID; 4272 dca_t *softc; 4273 /* LINTED E_FUNC_SET_NOT_USED */ 4274 int instance; 4275 4276 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4277 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4278 4279 /* extract softc and instance number from context */ 4280 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4281 DBG(softc, DENTRY, "dca_sign_final: started\n"); 4282 4283 cmn_err(CE_WARN, "dca_sign_final: unexpected mech type " 4284 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4285 4286 DBG(softc, DENTRY, "dca_sign_final: done, err = 0x%x", error); 4287 4288 return (error); 4289 } 4290 4291 static int 4292 dca_sign_atomic(crypto_provider_handle_t provider, 4293 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4294 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4295 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4296 { 4297 int error = CRYPTO_FAILED; 4298 dca_t *softc = (dca_t *)provider; 4299 4300 DBG(softc, DENTRY, "dca_sign_atomic: started\n"); 4301 4302 if (ctx_template != NULL) 4303 return (CRYPTO_ARGUMENTS_BAD); 4304 4305 /* check mechanism */ 4306 switch (mechanism->cm_type) { 4307 case RSA_PKCS_MECH_INFO_TYPE: 4308 case RSA_X_509_MECH_INFO_TYPE: 4309 error = dca_rsaatomic(provider, session_id, mechanism, key, 4310 data, signature, KM_SLEEP, req, DCA_RSA_SIGN); 4311 break; 4312 case DSA_MECH_INFO_TYPE: 4313 error = dca_dsaatomic(provider, session_id, mechanism, key, 4314 data, signature, KM_SLEEP, req, DCA_DSA_SIGN); 4315 break; 4316 default: 4317 cmn_err(CE_WARN, "dca_sign_atomic: unexpected mech type " 4318 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4319 error = CRYPTO_MECHANISM_INVALID; 4320 } 4321 4322 DBG(softc, DENTRY, "dca_sign_atomic: done, err = 0x%x", error); 4323 4324 return (error); 4325 } 4326 4327 /* ARGSUSED */ 4328 static int 4329 dca_sign_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4330 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4331 crypto_req_handle_t req) 4332 { 4333 int error = CRYPTO_FAILED; 4334 dca_t *softc; 4335 /* LINTED E_FUNC_SET_NOT_USED */ 4336 int instance; 4337 4338 /* extract softc and instance number from context */ 4339 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4340 DBG(softc, DENTRY, "dca_sign_recover_init: started\n"); 4341 4342 if (ctx_template != NULL) 4343 return (CRYPTO_ARGUMENTS_BAD); 4344 4345 /* check mechanism */ 4346 switch (mechanism->cm_type) { 4347 case RSA_PKCS_MECH_INFO_TYPE: 4348 case RSA_X_509_MECH_INFO_TYPE: 4349 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4350 break; 4351 default: 4352 cmn_err(CE_WARN, "dca_sign_recover_init: unexpected mech type " 4353 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4354 error = CRYPTO_MECHANISM_INVALID; 4355 } 4356 4357 DBG(softc, DENTRY, "dca_sign_recover_init: done, err = 0x%x", error); 4358 4359 if (error == CRYPTO_SUCCESS) 4360 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4361 &softc->dca_ctx_list_lock); 4362 4363 return (error); 4364 } 4365 4366 static int 4367 dca_sign_recover(crypto_ctx_t *ctx, crypto_data_t *data, 4368 crypto_data_t *signature, crypto_req_handle_t req) 4369 { 4370 int error = CRYPTO_FAILED; 4371 dca_t *softc; 4372 /* LINTED E_FUNC_SET_NOT_USED */ 4373 int instance; 4374 4375 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4376 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4377 4378 /* extract softc and instance number from context */ 4379 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4380 DBG(softc, DENTRY, "dca_sign_recover: started\n"); 4381 4382 /* check mechanism */ 4383 switch (DCA_MECH_FROM_CTX(ctx)) { 4384 case RSA_PKCS_MECH_INFO_TYPE: 4385 case RSA_X_509_MECH_INFO_TYPE: 4386 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGNR); 4387 break; 4388 default: 4389 cmn_err(CE_WARN, "dca_sign_recover: unexpected mech type " 4390 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4391 error = CRYPTO_MECHANISM_INVALID; 4392 } 4393 4394 DBG(softc, DENTRY, "dca_sign_recover: done, err = 0x%x", error); 4395 4396 return (error); 4397 } 4398 4399 static int 4400 dca_sign_recover_atomic(crypto_provider_handle_t provider, 4401 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4402 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4403 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4404 { 4405 int error = CRYPTO_FAILED; 4406 dca_t *softc = (dca_t *)provider; 4407 /* LINTED E_FUNC_SET_NOT_USED */ 4408 int instance; 4409 4410 instance = ddi_get_instance(softc->dca_dip); 4411 DBG(softc, DENTRY, "dca_sign_recover_atomic: started\n"); 4412 4413 if (ctx_template != NULL) 4414 return (CRYPTO_ARGUMENTS_BAD); 4415 4416 /* check mechanism */ 4417 switch (mechanism->cm_type) { 4418 case RSA_PKCS_MECH_INFO_TYPE: 4419 case RSA_X_509_MECH_INFO_TYPE: 4420 error = dca_rsaatomic(provider, session_id, mechanism, key, 4421 data, signature, KM_SLEEP, req, DCA_RSA_SIGNR); 4422 break; 4423 default: 4424 cmn_err(CE_WARN, "dca_sign_recover_atomic: unexpected mech type" 4425 " 0x%llx\n", (unsigned long long)mechanism->cm_type); 4426 error = CRYPTO_MECHANISM_INVALID; 4427 } 4428 4429 DBG(softc, DENTRY, "dca_sign_recover_atomic: done, err = 0x%x", error); 4430 4431 return (error); 4432 } 4433 4434 /* 4435 * Verify entry points. 4436 */ 4437 4438 /* ARGSUSED */ 4439 static int 4440 dca_verify_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4441 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4442 crypto_req_handle_t req) 4443 { 4444 int error = CRYPTO_FAILED; 4445 dca_t *softc; 4446 /* LINTED E_FUNC_SET_NOT_USED */ 4447 int instance; 4448 4449 /* extract softc and instance number from context */ 4450 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4451 DBG(softc, DENTRY, "dca_verify_init: started\n"); 4452 4453 if (ctx_template != NULL) 4454 return (CRYPTO_ARGUMENTS_BAD); 4455 4456 /* check mechanism */ 4457 switch (mechanism->cm_type) { 4458 case RSA_PKCS_MECH_INFO_TYPE: 4459 case RSA_X_509_MECH_INFO_TYPE: 4460 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4461 break; 4462 case DSA_MECH_INFO_TYPE: 4463 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP, 4464 DCA_DSA_VRFY); 4465 break; 4466 default: 4467 cmn_err(CE_WARN, "dca_verify_init: unexpected mech type " 4468 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4469 error = CRYPTO_MECHANISM_INVALID; 4470 } 4471 4472 DBG(softc, DENTRY, "dca_verify_init: done, err = 0x%x", error); 4473 4474 if (error == CRYPTO_SUCCESS) 4475 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4476 &softc->dca_ctx_list_lock); 4477 4478 return (error); 4479 } 4480 4481 static int 4482 dca_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *signature, 4483 crypto_req_handle_t req) 4484 { 4485 int error = CRYPTO_FAILED; 4486 dca_t *softc; 4487 /* LINTED E_FUNC_SET_NOT_USED */ 4488 int instance; 4489 4490 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4491 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4492 4493 /* extract softc and instance number from context */ 4494 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4495 DBG(softc, DENTRY, "dca_verify: started\n"); 4496 4497 /* check mechanism */ 4498 switch (DCA_MECH_FROM_CTX(ctx)) { 4499 case RSA_PKCS_MECH_INFO_TYPE: 4500 case RSA_X_509_MECH_INFO_TYPE: 4501 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFY); 4502 break; 4503 case DSA_MECH_INFO_TYPE: 4504 error = dca_dsa_verify(ctx, data, signature, req); 4505 break; 4506 default: 4507 cmn_err(CE_WARN, "dca_verify: unexpected mech type " 4508 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4509 error = CRYPTO_MECHANISM_INVALID; 4510 } 4511 4512 DBG(softc, DENTRY, "dca_verify: done, err = 0x%x", error); 4513 4514 return (error); 4515 } 4516 4517 /* ARGSUSED */ 4518 static int 4519 dca_verify_update(crypto_ctx_t *ctx, crypto_data_t *data, 4520 crypto_req_handle_t req) 4521 { 4522 int error = CRYPTO_MECHANISM_INVALID; 4523 dca_t *softc; 4524 /* LINTED E_FUNC_SET_NOT_USED */ 4525 int instance; 4526 4527 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4528 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4529 4530 /* extract softc and instance number from context */ 4531 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4532 DBG(softc, DENTRY, "dca_verify_update: started\n"); 4533 4534 cmn_err(CE_WARN, "dca_verify_update: unexpected mech type " 4535 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4536 4537 DBG(softc, DENTRY, "dca_verify_update: done, err = 0x%x", error); 4538 4539 return (error); 4540 } 4541 4542 /* ARGSUSED */ 4543 static int 4544 dca_verify_final(crypto_ctx_t *ctx, crypto_data_t *signature, 4545 crypto_req_handle_t req) 4546 { 4547 int error = CRYPTO_MECHANISM_INVALID; 4548 dca_t *softc; 4549 /* LINTED E_FUNC_SET_NOT_USED */ 4550 int instance; 4551 4552 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4553 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4554 4555 /* extract softc and instance number from context */ 4556 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4557 DBG(softc, DENTRY, "dca_verify_final: started\n"); 4558 4559 cmn_err(CE_WARN, "dca_verify_final: unexpected mech type " 4560 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4561 4562 DBG(softc, DENTRY, "dca_verify_final: done, err = 0x%x", error); 4563 4564 return (error); 4565 } 4566 4567 static int 4568 dca_verify_atomic(crypto_provider_handle_t provider, 4569 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4570 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4571 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4572 { 4573 int error = CRYPTO_FAILED; 4574 dca_t *softc = (dca_t *)provider; 4575 4576 DBG(softc, DENTRY, "dca_verify_atomic: started\n"); 4577 4578 if (ctx_template != NULL) 4579 return (CRYPTO_ARGUMENTS_BAD); 4580 4581 /* check mechanism */ 4582 switch (mechanism->cm_type) { 4583 case RSA_PKCS_MECH_INFO_TYPE: 4584 case RSA_X_509_MECH_INFO_TYPE: 4585 error = dca_rsaatomic(provider, session_id, mechanism, key, 4586 signature, data, KM_SLEEP, req, DCA_RSA_VRFY); 4587 break; 4588 case DSA_MECH_INFO_TYPE: 4589 error = dca_dsaatomic(provider, session_id, mechanism, key, 4590 data, signature, KM_SLEEP, req, DCA_DSA_VRFY); 4591 break; 4592 default: 4593 cmn_err(CE_WARN, "dca_verify_atomic: unexpected mech type " 4594 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4595 error = CRYPTO_MECHANISM_INVALID; 4596 } 4597 4598 DBG(softc, DENTRY, "dca_verify_atomic: done, err = 0x%x", error); 4599 4600 return (error); 4601 } 4602 4603 /* ARGSUSED */ 4604 static int 4605 dca_verify_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4606 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4607 crypto_req_handle_t req) 4608 { 4609 int error = CRYPTO_MECHANISM_INVALID; 4610 dca_t *softc; 4611 /* LINTED E_FUNC_SET_NOT_USED */ 4612 int instance; 4613 4614 /* extract softc and instance number from context */ 4615 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4616 DBG(softc, DENTRY, "dca_verify_recover_init: started\n"); 4617 4618 if (ctx_template != NULL) 4619 return (CRYPTO_ARGUMENTS_BAD); 4620 4621 /* check mechanism */ 4622 switch (mechanism->cm_type) { 4623 case RSA_PKCS_MECH_INFO_TYPE: 4624 case RSA_X_509_MECH_INFO_TYPE: 4625 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4626 break; 4627 default: 4628 cmn_err(CE_WARN, "dca_verify_recover_init: unexpected mech type" 4629 " 0x%llx\n", (unsigned long long)mechanism->cm_type); 4630 } 4631 4632 DBG(softc, DENTRY, "dca_verify_recover_init: done, err = 0x%x", error); 4633 4634 if (error == CRYPTO_SUCCESS) 4635 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4636 &softc->dca_ctx_list_lock); 4637 4638 return (error); 4639 } 4640 4641 static int 4642 dca_verify_recover(crypto_ctx_t *ctx, crypto_data_t *signature, 4643 crypto_data_t *data, crypto_req_handle_t req) 4644 { 4645 int error = CRYPTO_MECHANISM_INVALID; 4646 dca_t *softc; 4647 /* LINTED E_FUNC_SET_NOT_USED */ 4648 int instance; 4649 4650 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4651 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4652 4653 /* extract softc and instance number from context */ 4654 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4655 DBG(softc, DENTRY, "dca_verify_recover: started\n"); 4656 4657 /* check mechanism */ 4658 switch (DCA_MECH_FROM_CTX(ctx)) { 4659 case RSA_PKCS_MECH_INFO_TYPE: 4660 case RSA_X_509_MECH_INFO_TYPE: 4661 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFYR); 4662 break; 4663 default: 4664 cmn_err(CE_WARN, "dca_verify_recover: unexpected mech type " 4665 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4666 } 4667 4668 DBG(softc, DENTRY, "dca_verify_recover: done, err = 0x%x", error); 4669 4670 return (error); 4671 } 4672 4673 static int 4674 dca_verify_recover_atomic(crypto_provider_handle_t provider, 4675 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4676 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4677 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4678 { 4679 int error = CRYPTO_MECHANISM_INVALID; 4680 dca_t *softc = (dca_t *)provider; 4681 4682 DBG(softc, DENTRY, "dca_verify_recover_atomic: started\n"); 4683 4684 if (ctx_template != NULL) 4685 return (CRYPTO_ARGUMENTS_BAD); 4686 4687 /* check mechanism */ 4688 switch (mechanism->cm_type) { 4689 case RSA_PKCS_MECH_INFO_TYPE: 4690 case RSA_X_509_MECH_INFO_TYPE: 4691 error = dca_rsaatomic(provider, session_id, mechanism, key, 4692 signature, data, KM_SLEEP, req, DCA_RSA_VRFYR); 4693 break; 4694 default: 4695 cmn_err(CE_WARN, "dca_verify_recover_atomic: unexpected mech " 4696 "type 0x%llx\n", (unsigned long long)mechanism->cm_type); 4697 error = CRYPTO_MECHANISM_INVALID; 4698 } 4699 4700 DBG(softc, DENTRY, 4701 "dca_verify_recover_atomic: done, err = 0x%x", error); 4702 4703 return (error); 4704 } 4705 4706 /* 4707 * Random number entry points. 4708 */ 4709 4710 /* ARGSUSED */ 4711 static int 4712 dca_generate_random(crypto_provider_handle_t provider, 4713 crypto_session_id_t session_id, 4714 uchar_t *buf, size_t len, crypto_req_handle_t req) 4715 { 4716 int error = CRYPTO_FAILED; 4717 dca_t *softc = (dca_t *)provider; 4718 /* LINTED E_FUNC_SET_NOT_USED */ 4719 int instance; 4720 4721 instance = ddi_get_instance(softc->dca_dip); 4722 DBG(softc, DENTRY, "dca_generate_random: started"); 4723 4724 error = dca_rng(softc, buf, len, req); 4725 4726 DBG(softc, DENTRY, "dca_generate_random: done, err = 0x%x", error); 4727 4728 return (error); 4729 } 4730 4731 /* 4732 * Context management entry points. 4733 */ 4734 4735 int 4736 dca_free_context(crypto_ctx_t *ctx) 4737 { 4738 int error = CRYPTO_SUCCESS; 4739 dca_t *softc; 4740 /* LINTED E_FUNC_SET_NOT_USED */ 4741 int instance; 4742 4743 /* extract softc and instance number from context */ 4744 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4745 DBG(softc, DENTRY, "dca_free_context: entered"); 4746 4747 if (ctx->cc_provider_private == NULL) 4748 return (error); 4749 4750 dca_rmlist2(ctx->cc_provider_private, &softc->dca_ctx_list_lock); 4751 4752 error = dca_free_context_low(ctx); 4753 4754 DBG(softc, DENTRY, "dca_free_context: done, err = 0x%x", error); 4755 4756 return (error); 4757 } 4758 4759 static int 4760 dca_free_context_low(crypto_ctx_t *ctx) 4761 { 4762 int error = CRYPTO_SUCCESS; 4763 4764 /* check mechanism */ 4765 switch (DCA_MECH_FROM_CTX(ctx)) { 4766 case DES_CBC_MECH_INFO_TYPE: 4767 case DES3_CBC_MECH_INFO_TYPE: 4768 dca_3desctxfree(ctx); 4769 break; 4770 case RSA_PKCS_MECH_INFO_TYPE: 4771 case RSA_X_509_MECH_INFO_TYPE: 4772 dca_rsactxfree(ctx); 4773 break; 4774 case DSA_MECH_INFO_TYPE: 4775 dca_dsactxfree(ctx); 4776 break; 4777 default: 4778 /* Should never reach here */ 4779 cmn_err(CE_WARN, "dca_free_context_low: unexpected mech type " 4780 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4781 error = CRYPTO_MECHANISM_INVALID; 4782 } 4783 4784 return (error); 4785 } 4786 4787 4788 /* Free any unfreed private context. It is called in detach. */ 4789 static void 4790 dca_free_context_list(dca_t *dca) 4791 { 4792 dca_listnode_t *node; 4793 crypto_ctx_t ctx; 4794 4795 (void) memset(&ctx, 0, sizeof (ctx)); 4796 ctx.cc_provider = dca; 4797 4798 while ((node = dca_delist2(&dca->dca_ctx_list, 4799 &dca->dca_ctx_list_lock)) != NULL) { 4800 ctx.cc_provider_private = node; 4801 (void) dca_free_context_low(&ctx); 4802 } 4803 } 4804 4805 static int 4806 ext_info_sym(crypto_provider_handle_t prov, 4807 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq) 4808 { 4809 return (ext_info_base(prov, ext_info, cfreq, IDENT_SYM)); 4810 } 4811 4812 static int 4813 ext_info_asym(crypto_provider_handle_t prov, 4814 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq) 4815 { 4816 int rv; 4817 4818 rv = ext_info_base(prov, ext_info, cfreq, IDENT_ASYM); 4819 /* The asymmetric cipher slot supports random */ 4820 ext_info->ei_flags |= CRYPTO_EXTF_RNG; 4821 4822 return (rv); 4823 } 4824 4825 /* ARGSUSED */ 4826 static int 4827 ext_info_base(crypto_provider_handle_t prov, 4828 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id) 4829 { 4830 dca_t *dca = (dca_t *)prov; 4831 int len; 4832 4833 /* Label */ 4834 (void) sprintf((char *)ext_info->ei_label, "%s/%d %s", 4835 ddi_driver_name(dca->dca_dip), ddi_get_instance(dca->dca_dip), id); 4836 len = strlen((char *)ext_info->ei_label); 4837 (void) memset(ext_info->ei_label + len, ' ', 4838 CRYPTO_EXT_SIZE_LABEL - len); 4839 4840 /* Manufacturer ID */ 4841 (void) sprintf((char *)ext_info->ei_manufacturerID, "%s", 4842 DCA_MANUFACTURER_ID); 4843 len = strlen((char *)ext_info->ei_manufacturerID); 4844 (void) memset(ext_info->ei_manufacturerID + len, ' ', 4845 CRYPTO_EXT_SIZE_MANUF - len); 4846 4847 /* Model */ 4848 (void) sprintf((char *)ext_info->ei_model, dca->dca_model); 4849 4850 DBG(dca, DWARN, "kCF MODEL: %s", (char *)ext_info->ei_model); 4851 4852 len = strlen((char *)ext_info->ei_model); 4853 (void) memset(ext_info->ei_model + len, ' ', 4854 CRYPTO_EXT_SIZE_MODEL - len); 4855 4856 /* Serial Number. Blank for Deimos */ 4857 (void) memset(ext_info->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL); 4858 4859 ext_info->ei_flags = CRYPTO_EXTF_WRITE_PROTECTED; 4860 4861 ext_info->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO; 4862 ext_info->ei_max_pin_len = CRYPTO_UNAVAILABLE_INFO; 4863 ext_info->ei_min_pin_len = CRYPTO_UNAVAILABLE_INFO; 4864 ext_info->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO; 4865 ext_info->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO; 4866 ext_info->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO; 4867 ext_info->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO; 4868 ext_info->ei_hardware_version.cv_major = 0; 4869 ext_info->ei_hardware_version.cv_minor = 0; 4870 ext_info->ei_firmware_version.cv_major = 0; 4871 ext_info->ei_firmware_version.cv_minor = 0; 4872 4873 /* Time. No need to be supplied for token without a clock */ 4874 ext_info->ei_time[0] = '\000'; 4875 4876 return (CRYPTO_SUCCESS); 4877 } 4878 4879 static void 4880 dca_fma_init(dca_t *dca) 4881 { 4882 ddi_iblock_cookie_t fm_ibc; 4883 int fm_capabilities = DDI_FM_EREPORT_CAPABLE | 4884 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE | 4885 DDI_FM_ERRCB_CAPABLE; 4886 4887 /* Read FMA capabilities from dca.conf file (if present) */ 4888 dca->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip, 4889 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 4890 fm_capabilities); 4891 4892 DBG(dca, DWARN, "dca->fm_capabilities = 0x%x", dca->fm_capabilities); 4893 4894 /* Only register with IO Fault Services if we have some capability */ 4895 if (dca->fm_capabilities) { 4896 dca_regsattr.devacc_attr_access = DDI_FLAGERR_ACC; 4897 dca_dmaattr.dma_attr_flags = DDI_DMA_FLAGERR; 4898 4899 /* Register capabilities with IO Fault Services */ 4900 ddi_fm_init(dca->dca_dip, &dca->fm_capabilities, &fm_ibc); 4901 DBG(dca, DWARN, "fm_capable() = 0x%x", 4902 ddi_fm_capable(dca->dca_dip)); 4903 4904 /* 4905 * Initialize pci ereport capabilities if ereport capable 4906 */ 4907 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) || 4908 DDI_FM_ERRCB_CAP(dca->fm_capabilities)) 4909 pci_ereport_setup(dca->dca_dip); 4910 4911 /* 4912 * Initialize callback mutex and register error callback if 4913 * error callback capable. 4914 */ 4915 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) { 4916 ddi_fm_handler_register(dca->dca_dip, dca_fm_error_cb, 4917 (void *)dca); 4918 } 4919 } else { 4920 /* 4921 * These fields have to be cleared of FMA if there are no 4922 * FMA capabilities at runtime. 4923 */ 4924 dca_regsattr.devacc_attr_access = DDI_DEFAULT_ACC; 4925 dca_dmaattr.dma_attr_flags = 0; 4926 } 4927 } 4928 4929 4930 static void 4931 dca_fma_fini(dca_t *dca) 4932 { 4933 /* Only unregister FMA capabilities if we registered some */ 4934 if (dca->fm_capabilities) { 4935 4936 /* 4937 * Release any resources allocated by pci_ereport_setup() 4938 */ 4939 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) || 4940 DDI_FM_ERRCB_CAP(dca->fm_capabilities)) { 4941 pci_ereport_teardown(dca->dca_dip); 4942 } 4943 4944 /* 4945 * Free callback mutex and un-register error callback if 4946 * error callback capable. 4947 */ 4948 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) { 4949 ddi_fm_handler_unregister(dca->dca_dip); 4950 } 4951 4952 /* Unregister from IO Fault Services */ 4953 ddi_fm_fini(dca->dca_dip); 4954 DBG(dca, DWARN, "fm_capable() = 0x%x", 4955 ddi_fm_capable(dca->dca_dip)); 4956 } 4957 } 4958 4959 4960 /* 4961 * The IO fault service error handling callback function 4962 */ 4963 /*ARGSUSED*/ 4964 static int 4965 dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 4966 { 4967 dca_t *dca = (dca_t *)impl_data; 4968 4969 pci_ereport_post(dip, err, NULL); 4970 if (err->fme_status == DDI_FM_FATAL) { 4971 dca_failure(dca, DDI_DATAPATH_FAULT, 4972 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR, 4973 "fault PCI in FMA callback."); 4974 } 4975 return (err->fme_status); 4976 } 4977 4978 4979 static int 4980 dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle, 4981 dca_fma_eclass_t eclass_index) 4982 { 4983 ddi_fm_error_t de; 4984 int version = 0; 4985 4986 ddi_fm_acc_err_get(handle, &de, version); 4987 if (de.fme_status != DDI_FM_OK) { 4988 dca_failure(dca, DDI_DATAPATH_FAULT, 4989 eclass_index, fm_ena_increment(de.fme_ena), 4990 CRYPTO_DEVICE_ERROR, ""); 4991 return (DDI_FAILURE); 4992 } 4993 4994 return (DDI_SUCCESS); 4995 } 4996 4997 int 4998 dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle, 4999 dca_fma_eclass_t eclass_index) 5000 { 5001 ddi_fm_error_t de; 5002 int version = 0; 5003 5004 ddi_fm_dma_err_get(handle, &de, version); 5005 if (de.fme_status != DDI_FM_OK) { 5006 dca_failure(dca, DDI_DATAPATH_FAULT, 5007 eclass_index, fm_ena_increment(de.fme_ena), 5008 CRYPTO_DEVICE_ERROR, ""); 5009 return (DDI_FAILURE); 5010 } 5011 return (DDI_SUCCESS); 5012 } 5013 5014 static uint64_t 5015 dca_ena(uint64_t ena) 5016 { 5017 if (ena == 0) 5018 ena = fm_ena_generate(0, FM_ENA_FMT1); 5019 else 5020 ena = fm_ena_increment(ena); 5021 return (ena); 5022 } 5023 5024 static char * 5025 dca_fma_eclass_string(char *model, dca_fma_eclass_t index) 5026 { 5027 if (strstr(model, "500")) 5028 return (dca_fma_eclass_sca500[index]); 5029 else 5030 return (dca_fma_eclass_sca1000[index]); 5031 } 5032