1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 28 /* 29 * Deimos - cryptographic acceleration based upon Broadcom 582x. 30 */ 31 32 #include <sys/types.h> 33 #include <sys/modctl.h> 34 #include <sys/conf.h> 35 #include <sys/devops.h> 36 #include <sys/ddi.h> 37 #include <sys/sunddi.h> 38 #include <sys/cmn_err.h> 39 #include <sys/varargs.h> 40 #include <sys/file.h> 41 #include <sys/stat.h> 42 #include <sys/kmem.h> 43 #include <sys/ioccom.h> 44 #include <sys/open.h> 45 #include <sys/cred.h> 46 #include <sys/kstat.h> 47 #include <sys/strsun.h> 48 #include <sys/note.h> 49 #include <sys/crypto/common.h> 50 #include <sys/crypto/spi.h> 51 #include <sys/ddifm.h> 52 #include <sys/fm/protocol.h> 53 #include <sys/fm/util.h> 54 #include <sys/fm/io/ddi.h> 55 #include <sys/crypto/dca.h> 56 57 /* 58 * Core Deimos driver. 59 */ 60 61 static void dca_enlist2(dca_listnode_t *, dca_listnode_t *, 62 kmutex_t *); 63 static void dca_rmlist2(dca_listnode_t *node, kmutex_t *); 64 static dca_listnode_t *dca_delist2(dca_listnode_t *q, kmutex_t *); 65 static void dca_free_context_list(dca_t *dca); 66 static int dca_free_context_low(crypto_ctx_t *ctx); 67 static int dca_attach(dev_info_t *, ddi_attach_cmd_t); 68 static int dca_detach(dev_info_t *, ddi_detach_cmd_t); 69 static int dca_suspend(dca_t *); 70 static int dca_resume(dca_t *); 71 static int dca_init(dca_t *); 72 static int dca_reset(dca_t *, int); 73 static int dca_initworklist(dca_t *, dca_worklist_t *); 74 static void dca_uninit(dca_t *); 75 static void dca_initq(dca_listnode_t *); 76 static void dca_enqueue(dca_listnode_t *, dca_listnode_t *); 77 static dca_listnode_t *dca_dequeue(dca_listnode_t *); 78 static dca_listnode_t *dca_unqueue(dca_listnode_t *); 79 static dca_request_t *dca_newreq(dca_t *); 80 static dca_work_t *dca_getwork(dca_t *, int); 81 static void dca_freework(dca_work_t *); 82 static dca_work_t *dca_newwork(dca_t *); 83 static void dca_destroywork(dca_work_t *); 84 static void dca_schedule(dca_t *, int); 85 static void dca_reclaim(dca_t *, int); 86 static uint_t dca_intr(char *); 87 static void dca_failure(dca_t *, ddi_fault_location_t, 88 dca_fma_eclass_t index, uint64_t, int, char *, ...); 89 static void dca_jobtimeout(void *); 90 static int dca_drain(dca_t *); 91 static void dca_undrain(dca_t *); 92 static void dca_rejectjobs(dca_t *); 93 94 #ifdef SCHEDDELAY 95 static void dca_schedtimeout(void *); 96 #endif 97 98 /* 99 * We want these inlined for performance. 100 */ 101 #ifndef DEBUG 102 #pragma inline(dca_freereq, dca_getreq, dca_freework, dca_getwork) 103 #pragma inline(dca_enqueue, dca_dequeue, dca_rmqueue, dca_done) 104 #pragma inline(dca_reverse, dca_length) 105 #endif 106 107 /* 108 * Device operations. 109 */ 110 static struct dev_ops devops = { 111 DEVO_REV, /* devo_rev */ 112 0, /* devo_refcnt */ 113 nodev, /* devo_getinfo */ 114 nulldev, /* devo_identify */ 115 nulldev, /* devo_probe */ 116 dca_attach, /* devo_attach */ 117 dca_detach, /* devo_detach */ 118 nodev, /* devo_reset */ 119 NULL, /* devo_cb_ops */ 120 NULL, /* devo_bus_ops */ 121 ddi_power, /* devo_power */ 122 ddi_quiesce_not_supported, /* devo_quiesce */ 123 }; 124 125 #define IDENT "PCI Crypto Accelerator" 126 #define IDENT_SYM "Crypto Accel Sym 2.0" 127 #define IDENT_ASYM "Crypto Accel Asym 2.0" 128 129 /* Space-padded, will be filled in dynamically during registration */ 130 #define IDENT3 "PCI Crypto Accelerator Mod 2.0" 131 132 #define VENDOR "Sun Microsystems, Inc." 133 134 #define STALETIME (30 * SECOND) 135 136 #define crypto_prov_notify crypto_provider_notification 137 /* A 28 char function name doesn't leave much line space */ 138 139 /* 140 * Module linkage. 141 */ 142 static struct modldrv modldrv = { 143 &mod_driverops, /* drv_modops */ 144 IDENT, /* drv_linkinfo */ 145 &devops, /* drv_dev_ops */ 146 }; 147 148 extern struct mod_ops mod_cryptoops; 149 150 static struct modlcrypto modlcrypto = { 151 &mod_cryptoops, 152 IDENT3 153 }; 154 155 static struct modlinkage modlinkage = { 156 MODREV_1, /* ml_rev */ 157 &modldrv, /* ml_linkage */ 158 &modlcrypto, 159 NULL 160 }; 161 162 /* 163 * CSPI information (entry points, provider info, etc.) 164 */ 165 166 /* Mechanisms for the symmetric cipher provider */ 167 static crypto_mech_info_t dca_mech_info_tab1[] = { 168 /* DES-CBC */ 169 {SUN_CKM_DES_CBC, DES_CBC_MECH_INFO_TYPE, 170 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | 171 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC, 172 DES_KEY_LEN, DES_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, 173 /* 3DES-CBC */ 174 {SUN_CKM_DES3_CBC, DES3_CBC_MECH_INFO_TYPE, 175 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | 176 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC, 177 DES3_MIN_KEY_LEN, DES3_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES} 178 }; 179 180 /* Mechanisms for the asymmetric cipher provider */ 181 static crypto_mech_info_t dca_mech_info_tab2[] = { 182 /* DSA */ 183 {SUN_CKM_DSA, DSA_MECH_INFO_TYPE, 184 CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY | 185 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_VERIFY_ATOMIC, 186 DSA_MIN_KEY_LEN * 8, DSA_MAX_KEY_LEN * 8, 187 CRYPTO_KEYSIZE_UNIT_IN_BITS}, 188 189 /* RSA */ 190 {SUN_CKM_RSA_X_509, RSA_X_509_MECH_INFO_TYPE, 191 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN | 192 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY | 193 CRYPTO_FG_VERIFY_RECOVER | 194 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC | 195 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC | 196 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC, 197 RSA_MIN_KEY_LEN * 8, RSA_MAX_KEY_LEN * 8, 198 CRYPTO_KEYSIZE_UNIT_IN_BITS}, 199 {SUN_CKM_RSA_PKCS, RSA_PKCS_MECH_INFO_TYPE, 200 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN | 201 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY | 202 CRYPTO_FG_VERIFY_RECOVER | 203 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC | 204 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC | 205 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC, 206 RSA_MIN_KEY_LEN * 8, RSA_MAX_KEY_LEN * 8, 207 CRYPTO_KEYSIZE_UNIT_IN_BITS} 208 }; 209 210 static void dca_provider_status(crypto_provider_handle_t, uint_t *); 211 212 static crypto_control_ops_t dca_control_ops = { 213 dca_provider_status 214 }; 215 216 static int dca_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *, 217 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 218 static int dca_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 219 crypto_req_handle_t); 220 static int dca_encrypt_update(crypto_ctx_t *, crypto_data_t *, 221 crypto_data_t *, crypto_req_handle_t); 222 static int dca_encrypt_final(crypto_ctx_t *, crypto_data_t *, 223 crypto_req_handle_t); 224 static int dca_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 225 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 226 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 227 228 static int dca_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *, 229 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 230 static int dca_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 231 crypto_req_handle_t); 232 static int dca_decrypt_update(crypto_ctx_t *, crypto_data_t *, 233 crypto_data_t *, crypto_req_handle_t); 234 static int dca_decrypt_final(crypto_ctx_t *, crypto_data_t *, 235 crypto_req_handle_t); 236 static int dca_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 237 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 238 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 239 240 static crypto_cipher_ops_t dca_cipher_ops = { 241 dca_encrypt_init, 242 dca_encrypt, 243 dca_encrypt_update, 244 dca_encrypt_final, 245 dca_encrypt_atomic, 246 dca_decrypt_init, 247 dca_decrypt, 248 dca_decrypt_update, 249 dca_decrypt_final, 250 dca_decrypt_atomic 251 }; 252 253 static int dca_sign_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, 254 crypto_spi_ctx_template_t, crypto_req_handle_t); 255 static int dca_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 256 crypto_req_handle_t); 257 static int dca_sign_update(crypto_ctx_t *, crypto_data_t *, 258 crypto_req_handle_t); 259 static int dca_sign_final(crypto_ctx_t *, crypto_data_t *, 260 crypto_req_handle_t); 261 static int dca_sign_atomic(crypto_provider_handle_t, crypto_session_id_t, 262 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, 263 crypto_spi_ctx_template_t, crypto_req_handle_t); 264 static int dca_sign_recover_init(crypto_ctx_t *, crypto_mechanism_t *, 265 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 266 static int dca_sign_recover(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 267 crypto_req_handle_t); 268 static int dca_sign_recover_atomic(crypto_provider_handle_t, 269 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 270 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 271 272 static crypto_sign_ops_t dca_sign_ops = { 273 dca_sign_init, 274 dca_sign, 275 dca_sign_update, 276 dca_sign_final, 277 dca_sign_atomic, 278 dca_sign_recover_init, 279 dca_sign_recover, 280 dca_sign_recover_atomic 281 }; 282 283 static int dca_verify_init(crypto_ctx_t *, crypto_mechanism_t *, 284 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 285 static int dca_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 286 crypto_req_handle_t); 287 static int dca_verify_update(crypto_ctx_t *, crypto_data_t *, 288 crypto_req_handle_t); 289 static int dca_verify_final(crypto_ctx_t *, crypto_data_t *, 290 crypto_req_handle_t); 291 static int dca_verify_atomic(crypto_provider_handle_t, crypto_session_id_t, 292 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 293 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 294 static int dca_verify_recover_init(crypto_ctx_t *, crypto_mechanism_t *, 295 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 296 static int dca_verify_recover(crypto_ctx_t *, crypto_data_t *, 297 crypto_data_t *, crypto_req_handle_t); 298 static int dca_verify_recover_atomic(crypto_provider_handle_t, 299 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 300 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 301 302 static crypto_verify_ops_t dca_verify_ops = { 303 dca_verify_init, 304 dca_verify, 305 dca_verify_update, 306 dca_verify_final, 307 dca_verify_atomic, 308 dca_verify_recover_init, 309 dca_verify_recover, 310 dca_verify_recover_atomic 311 }; 312 313 static int dca_generate_random(crypto_provider_handle_t, crypto_session_id_t, 314 uchar_t *, size_t, crypto_req_handle_t); 315 316 static crypto_random_number_ops_t dca_random_number_ops = { 317 NULL, 318 dca_generate_random 319 }; 320 321 static int ext_info_sym(crypto_provider_handle_t prov, 322 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq); 323 static int ext_info_asym(crypto_provider_handle_t prov, 324 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq); 325 static int ext_info_base(crypto_provider_handle_t prov, 326 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id); 327 328 static crypto_provider_management_ops_t dca_provmanage_ops_1 = { 329 ext_info_sym, /* ext_info */ 330 NULL, /* init_token */ 331 NULL, /* init_pin */ 332 NULL /* set_pin */ 333 }; 334 335 static crypto_provider_management_ops_t dca_provmanage_ops_2 = { 336 ext_info_asym, /* ext_info */ 337 NULL, /* init_token */ 338 NULL, /* init_pin */ 339 NULL /* set_pin */ 340 }; 341 342 int dca_free_context(crypto_ctx_t *); 343 344 static crypto_ctx_ops_t dca_ctx_ops = { 345 NULL, 346 dca_free_context 347 }; 348 349 /* Operations for the symmetric cipher provider */ 350 static crypto_ops_t dca_crypto_ops1 = { 351 &dca_control_ops, 352 NULL, /* digest_ops */ 353 &dca_cipher_ops, 354 NULL, /* mac_ops */ 355 NULL, /* sign_ops */ 356 NULL, /* verify_ops */ 357 NULL, /* dual_ops */ 358 NULL, /* cipher_mac_ops */ 359 NULL, /* random_number_ops */ 360 NULL, /* session_ops */ 361 NULL, /* object_ops */ 362 NULL, /* key_ops */ 363 &dca_provmanage_ops_1, /* management_ops */ 364 &dca_ctx_ops 365 }; 366 367 /* Operations for the asymmetric cipher provider */ 368 static crypto_ops_t dca_crypto_ops2 = { 369 &dca_control_ops, 370 NULL, /* digest_ops */ 371 &dca_cipher_ops, 372 NULL, /* mac_ops */ 373 &dca_sign_ops, 374 &dca_verify_ops, 375 NULL, /* dual_ops */ 376 NULL, /* cipher_mac_ops */ 377 &dca_random_number_ops, 378 NULL, /* session_ops */ 379 NULL, /* object_ops */ 380 NULL, /* key_ops */ 381 &dca_provmanage_ops_2, /* management_ops */ 382 &dca_ctx_ops 383 }; 384 385 /* Provider information for the symmetric cipher provider */ 386 static crypto_provider_info_t dca_prov_info1 = { 387 CRYPTO_SPI_VERSION_1, 388 NULL, /* pi_provider_description */ 389 CRYPTO_HW_PROVIDER, 390 NULL, /* pi_provider_dev */ 391 NULL, /* pi_provider_handle */ 392 &dca_crypto_ops1, 393 sizeof (dca_mech_info_tab1)/sizeof (crypto_mech_info_t), 394 dca_mech_info_tab1, 395 0, /* pi_logical_provider_count */ 396 NULL /* pi_logical_providers */ 397 }; 398 399 /* Provider information for the asymmetric cipher provider */ 400 static crypto_provider_info_t dca_prov_info2 = { 401 CRYPTO_SPI_VERSION_1, 402 NULL, /* pi_provider_description */ 403 CRYPTO_HW_PROVIDER, 404 NULL, /* pi_provider_dev */ 405 NULL, /* pi_provider_handle */ 406 &dca_crypto_ops2, 407 sizeof (dca_mech_info_tab2)/sizeof (crypto_mech_info_t), 408 dca_mech_info_tab2, 409 0, /* pi_logical_provider_count */ 410 NULL /* pi_logical_providers */ 411 }; 412 413 /* Convenience macros */ 414 /* Retrieve the softc and instance number from a SPI crypto context */ 415 #define DCA_SOFTC_FROM_CTX(ctx, softc, instance) { \ 416 (softc) = (dca_t *)(ctx)->cc_provider; \ 417 (instance) = ddi_get_instance((softc)->dca_dip); \ 418 } 419 420 #define DCA_MECH_FROM_CTX(ctx) \ 421 (((dca_request_t *)(ctx)->cc_provider_private)->dr_ctx.ctx_cm_type) 422 423 static int dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset, 424 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags, 425 dca_chain_t *head, int *n_chain); 426 static uint64_t dca_ena(uint64_t ena); 427 static caddr_t dca_bufdaddr_out(crypto_data_t *data); 428 static char *dca_fma_eclass_string(char *model, dca_fma_eclass_t index); 429 static int dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle, 430 dca_fma_eclass_t eclass_index); 431 432 static void dca_fma_init(dca_t *dca); 433 static void dca_fma_fini(dca_t *dca); 434 static int dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 435 const void *impl_data); 436 437 438 static dca_device_t dca_devices[] = { 439 /* Broadcom vanilla variants */ 440 { 0x14e4, 0x5820, "Broadcom 5820" }, 441 { 0x14e4, 0x5821, "Broadcom 5821" }, 442 { 0x14e4, 0x5822, "Broadcom 5822" }, 443 { 0x14e4, 0x5825, "Broadcom 5825" }, 444 /* Sun specific OEMd variants */ 445 { 0x108e, 0x5454, "SCA" }, 446 { 0x108e, 0x5455, "SCA 1000" }, 447 { 0x108e, 0x5457, "SCA 500" }, 448 /* subsysid should be 0x5457, but got 0x1 from HW. Assume both here. */ 449 { 0x108e, 0x1, "SCA 500" }, 450 }; 451 452 /* 453 * Device attributes. 454 */ 455 static struct ddi_device_acc_attr dca_regsattr = { 456 DDI_DEVICE_ATTR_V1, 457 DDI_STRUCTURE_LE_ACC, 458 DDI_STRICTORDER_ACC, 459 DDI_FLAGERR_ACC 460 }; 461 462 static struct ddi_device_acc_attr dca_devattr = { 463 DDI_DEVICE_ATTR_V0, 464 DDI_STRUCTURE_LE_ACC, 465 DDI_STRICTORDER_ACC 466 }; 467 468 #if !defined(i386) && !defined(__i386) 469 static struct ddi_device_acc_attr dca_bufattr = { 470 DDI_DEVICE_ATTR_V0, 471 DDI_NEVERSWAP_ACC, 472 DDI_STRICTORDER_ACC 473 }; 474 #endif 475 476 static struct ddi_dma_attr dca_dmaattr = { 477 DMA_ATTR_V0, /* dma_attr_version */ 478 0x0, /* dma_attr_addr_lo */ 479 0xffffffffUL, /* dma_attr_addr_hi */ 480 0x00ffffffUL, /* dma_attr_count_max */ 481 0x40, /* dma_attr_align */ 482 0x40, /* dma_attr_burstsizes */ 483 0x1, /* dma_attr_minxfer */ 484 0x00ffffffUL, /* dma_attr_maxxfer */ 485 0xffffffffUL, /* dma_attr_seg */ 486 #if defined(i386) || defined(__i386) || defined(__amd64) 487 512, /* dma_attr_sgllen */ 488 #else 489 1, /* dma_attr_sgllen */ 490 #endif 491 1, /* dma_attr_granular */ 492 DDI_DMA_FLAGERR /* dma_attr_flags */ 493 }; 494 495 static void *dca_state = NULL; 496 int dca_mindma = 2500; 497 498 /* 499 * FMA eclass string definitions. Note that these string arrays must be 500 * consistent with the dca_fma_eclass_t enum. 501 */ 502 static char *dca_fma_eclass_sca1000[] = { 503 "sca1000.hw.device", 504 "sca1000.hw.timeout", 505 "sca1000.none" 506 }; 507 508 static char *dca_fma_eclass_sca500[] = { 509 "sca500.hw.device", 510 "sca500.hw.timeout", 511 "sca500.none" 512 }; 513 514 /* 515 * DDI entry points. 516 */ 517 int 518 _init(void) 519 { 520 int rv; 521 522 DBG(NULL, DMOD, "dca: in _init"); 523 524 if ((rv = ddi_soft_state_init(&dca_state, sizeof (dca_t), 1)) != 0) { 525 /* this should *never* happen! */ 526 return (rv); 527 } 528 529 if ((rv = mod_install(&modlinkage)) != 0) { 530 /* cleanup here */ 531 ddi_soft_state_fini(&dca_state); 532 return (rv); 533 } 534 535 return (0); 536 } 537 538 int 539 _fini(void) 540 { 541 int rv; 542 543 DBG(NULL, DMOD, "dca: in _fini"); 544 545 if ((rv = mod_remove(&modlinkage)) == 0) { 546 /* cleanup here */ 547 ddi_soft_state_fini(&dca_state); 548 } 549 return (rv); 550 } 551 552 int 553 _info(struct modinfo *modinfop) 554 { 555 DBG(NULL, DMOD, "dca: in _info"); 556 557 return (mod_info(&modlinkage, modinfop)); 558 } 559 560 int 561 dca_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 562 { 563 ddi_acc_handle_t pci; 564 int instance; 565 ddi_iblock_cookie_t ibc; 566 int intr_added = 0; 567 dca_t *dca; 568 ushort_t venid; 569 ushort_t devid; 570 ushort_t revid; 571 ushort_t subsysid; 572 ushort_t subvenid; 573 int i; 574 int ret; 575 char ID[64]; 576 static char *unknowndev = "Unknown device"; 577 578 #if DEBUG 579 /* these are only used for debugging */ 580 ushort_t pcicomm; 581 ushort_t pcistat; 582 uchar_t cachelinesz; 583 uchar_t mingnt; 584 uchar_t maxlat; 585 uchar_t lattmr; 586 #endif 587 588 instance = ddi_get_instance(dip); 589 590 DBG(NULL, DMOD, "dca: in dca_attach() for %d", instance); 591 592 switch (cmd) { 593 case DDI_RESUME: 594 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 595 dca_diperror(dip, "no soft state in detach"); 596 return (DDI_FAILURE); 597 } 598 /* assumption: we won't be DDI_DETACHed until we return */ 599 return (dca_resume(dca)); 600 case DDI_ATTACH: 601 break; 602 default: 603 return (DDI_FAILURE); 604 } 605 606 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 607 dca_diperror(dip, "slot does not support PCI bus-master"); 608 return (DDI_FAILURE); 609 } 610 611 if (ddi_intr_hilevel(dip, 0) != 0) { 612 dca_diperror(dip, "hilevel interrupts not supported"); 613 return (DDI_FAILURE); 614 } 615 616 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) { 617 dca_diperror(dip, "unable to setup PCI config handle"); 618 return (DDI_FAILURE); 619 } 620 621 /* common PCI attributes */ 622 venid = pci_config_get16(pci, PCI_VENID); 623 devid = pci_config_get16(pci, PCI_DEVID); 624 revid = pci_config_get8(pci, PCI_REVID); 625 subvenid = pci_config_get16(pci, PCI_SUBVENID); 626 subsysid = pci_config_get16(pci, PCI_SUBSYSID); 627 628 /* 629 * Broadcom-specific timings. 630 * We disable these timers/counters since they can cause 631 * incorrect false failures when the bus is just a little 632 * bit slow, or busy. 633 */ 634 pci_config_put8(pci, PCI_TRDYTO, 0); 635 pci_config_put8(pci, PCI_RETRIES, 0); 636 637 /* initialize PCI access settings */ 638 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE | 639 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE); 640 641 /* set up our PCI latency timer */ 642 pci_config_put8(pci, PCI_LATTMR, 0x40); 643 644 #if DEBUG 645 /* read registers (for debugging) */ 646 pcicomm = pci_config_get16(pci, PCI_COMM); 647 pcistat = pci_config_get16(pci, PCI_STATUS); 648 cachelinesz = pci_config_get8(pci, PCI_CACHELINESZ); 649 mingnt = pci_config_get8(pci, PCI_MINGNT); 650 maxlat = pci_config_get8(pci, PCI_MAXLAT); 651 lattmr = pci_config_get8(pci, PCI_LATTMR); 652 #endif 653 654 pci_config_teardown(&pci); 655 656 if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS) { 657 dca_diperror(dip, "unable to get iblock cookie"); 658 return (DDI_FAILURE); 659 } 660 661 if (ddi_soft_state_zalloc(dca_state, instance) != DDI_SUCCESS) { 662 dca_diperror(dip, "unable to allocate soft state"); 663 return (DDI_FAILURE); 664 } 665 666 dca = ddi_get_soft_state(dca_state, instance); 667 ASSERT(dca != NULL); 668 dca->dca_dip = dip; 669 WORKLIST(dca, MCR1)->dwl_prov = NULL; 670 WORKLIST(dca, MCR2)->dwl_prov = NULL; 671 /* figure pagesize */ 672 dca->dca_pagesize = ddi_ptob(dip, 1); 673 674 /* 675 * Search for the device in our supported devices table. This 676 * is here for two reasons. First, we want to ensure that 677 * only Sun-qualified (and presumably Sun-labeled) devices can 678 * be used with this driver. Second, some devices have 679 * specific differences. E.g. the 5821 has support for a 680 * special mode of RC4, deeper queues, power management, and 681 * other changes. Also, the export versions of some of these 682 * chips don't support RC4 or 3DES, so we catch that here. 683 * 684 * Note that we only look at the upper nibble of the device 685 * id, which is used to distinguish export vs. domestic 686 * versions of the chip. (The lower nibble is used for 687 * stepping information.) 688 */ 689 for (i = 0; i < (sizeof (dca_devices) / sizeof (dca_device_t)); i++) { 690 /* 691 * Try to match the subsystem information first. 692 */ 693 if (subvenid && (subvenid == dca_devices[i].dd_vendor_id) && 694 subsysid && (subsysid == dca_devices[i].dd_device_id)) { 695 dca->dca_model = dca_devices[i].dd_model; 696 dca->dca_devid = dca_devices[i].dd_device_id; 697 break; 698 } 699 /* 700 * Failing that, try the generic vendor and device id. 701 * Even if we find a match, we keep searching anyway, 702 * since we would prefer to find a match based on the 703 * subsystem ids. 704 */ 705 if ((venid == dca_devices[i].dd_vendor_id) && 706 (devid == dca_devices[i].dd_device_id)) { 707 dca->dca_model = dca_devices[i].dd_model; 708 dca->dca_devid = dca_devices[i].dd_device_id; 709 } 710 } 711 /* try and handle an unrecognized device */ 712 if (dca->dca_model == NULL) { 713 dca->dca_model = unknowndev; 714 dca_error(dca, "device not recognized, not supported"); 715 DBG(dca, DPCI, "i=%d venid=%x devid=%x rev=%d", 716 i, venid, devid, revid); 717 } 718 719 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "description", 720 dca->dca_model) != DDI_SUCCESS) { 721 dca_error(dca, "unable to create description property"); 722 return (DDI_FAILURE); 723 } 724 725 DBG(dca, DPCI, "PCI command=0x%x status=%x cachelinesz=%x", 726 pcicomm, pcistat, cachelinesz); 727 DBG(dca, DPCI, "mingnt=0x%x maxlat=0x%x lattmr=0x%x", 728 mingnt, maxlat, lattmr); 729 730 /* 731 * initialize locks, etc. 732 */ 733 (void) mutex_init(&dca->dca_intrlock, NULL, MUTEX_DRIVER, ibc); 734 735 /* use RNGSHA1 by default */ 736 if (ddi_getprop(DDI_DEV_T_ANY, dip, 737 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rngdirect", 0) == 0) { 738 dca->dca_flags |= DCA_RNGSHA1; 739 } 740 741 /* initialize FMA */ 742 dca_fma_init(dca); 743 744 /* initialize some key data structures */ 745 if (dca_init(dca) != DDI_SUCCESS) { 746 goto failed; 747 } 748 749 /* initialize kstats */ 750 dca_ksinit(dca); 751 752 /* setup access to registers */ 753 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&dca->dca_regs, 754 0, 0, &dca_regsattr, &dca->dca_regs_handle) != DDI_SUCCESS) { 755 dca_error(dca, "unable to map registers"); 756 goto failed; 757 } 758 759 DBG(dca, DCHATTY, "MCR1 = %x", GETCSR(dca, CSR_MCR1)); 760 DBG(dca, DCHATTY, "CONTROL = %x", GETCSR(dca, CSR_DMACTL)); 761 DBG(dca, DCHATTY, "STATUS = %x", GETCSR(dca, CSR_DMASTAT)); 762 DBG(dca, DCHATTY, "DMAEA = %x", GETCSR(dca, CSR_DMAEA)); 763 DBG(dca, DCHATTY, "MCR2 = %x", GETCSR(dca, CSR_MCR2)); 764 765 /* reset the chip */ 766 if (dca_reset(dca, 0) < 0) { 767 goto failed; 768 } 769 770 /* initialize the chip */ 771 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64); 772 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 773 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 774 goto failed; 775 } 776 777 /* add the interrupt */ 778 if (ddi_add_intr(dip, 0, &dca->dca_icookie, NULL, dca_intr, 779 (void *)dca) != DDI_SUCCESS) { 780 DBG(dca, DWARN, "ddi_add_intr failed"); 781 goto failed; 782 } else { 783 intr_added = 1; 784 } 785 786 /* enable interrupts on the device */ 787 /* 788 * XXX: Note, 5820A1 errata indicates that this may clobber 789 * bits 24 and 23, which affect the speed of the RNG. Since 790 * we always want to run in full-speed mode, this should be 791 * harmless. 792 */ 793 if (dca->dca_devid == 0x5825) { 794 /* for 5825 - increase the DMA read size */ 795 SETBIT(dca, CSR_DMACTL, 796 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256); 797 } else { 798 SETBIT(dca, CSR_DMACTL, 799 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 800 } 801 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 802 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 803 goto failed; 804 } 805 806 /* register MCR1 with the crypto framework */ 807 /* Be careful not to exceed 32 chars */ 808 (void) sprintf(ID, "%s/%d %s", 809 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_SYM); 810 dca_prov_info1.pi_provider_description = ID; 811 dca_prov_info1.pi_provider_dev.pd_hw = dip; 812 dca_prov_info1.pi_provider_handle = dca; 813 if ((ret = crypto_register_provider(&dca_prov_info1, 814 &WORKLIST(dca, MCR1)->dwl_prov)) != CRYPTO_SUCCESS) { 815 cmn_err(CE_WARN, 816 "crypto_register_provider() failed (%d) for MCR1", ret); 817 goto failed; 818 } 819 820 /* register MCR2 with the crypto framework */ 821 /* Be careful not to exceed 32 chars */ 822 (void) sprintf(ID, "%s/%d %s", 823 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_ASYM); 824 dca_prov_info2.pi_provider_description = ID; 825 dca_prov_info2.pi_provider_dev.pd_hw = dip; 826 dca_prov_info2.pi_provider_handle = dca; 827 if ((ret = crypto_register_provider(&dca_prov_info2, 828 &WORKLIST(dca, MCR2)->dwl_prov)) != CRYPTO_SUCCESS) { 829 cmn_err(CE_WARN, 830 "crypto_register_provider() failed (%d) for MCR2", ret); 831 goto failed; 832 } 833 834 crypto_prov_notify(WORKLIST(dca, MCR1)->dwl_prov, 835 CRYPTO_PROVIDER_READY); 836 crypto_prov_notify(WORKLIST(dca, MCR2)->dwl_prov, 837 CRYPTO_PROVIDER_READY); 838 839 /* Initialize the local random number pool for this instance */ 840 if ((ret = dca_random_init(dca)) != CRYPTO_SUCCESS) { 841 goto failed; 842 } 843 844 mutex_enter(&dca->dca_intrlock); 845 dca->dca_jobtid = timeout(dca_jobtimeout, (void *)dca, 846 drv_usectohz(SECOND)); 847 mutex_exit(&dca->dca_intrlock); 848 849 ddi_set_driver_private(dip, (caddr_t)dca); 850 851 ddi_report_dev(dip); 852 853 if (ddi_get_devstate(dca->dca_dip) != DDI_DEVSTATE_UP) { 854 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_RESTORED); 855 } 856 857 return (DDI_SUCCESS); 858 859 failed: 860 /* unregister from the crypto framework */ 861 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) { 862 (void) crypto_unregister_provider( 863 WORKLIST(dca, MCR1)->dwl_prov); 864 } 865 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) { 866 (void) crypto_unregister_provider( 867 WORKLIST(dca, MCR2)->dwl_prov); 868 } 869 if (intr_added) { 870 CLRBIT(dca, CSR_DMACTL, 871 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 872 /* unregister intr handler */ 873 ddi_remove_intr(dip, 0, dca->dca_icookie); 874 } 875 if (dca->dca_regs_handle) { 876 ddi_regs_map_free(&dca->dca_regs_handle); 877 } 878 if (dca->dca_intrstats) { 879 kstat_delete(dca->dca_intrstats); 880 } 881 if (dca->dca_ksp) { 882 kstat_delete(dca->dca_ksp); 883 } 884 dca_uninit(dca); 885 886 /* finalize FMA */ 887 dca_fma_fini(dca); 888 889 mutex_destroy(&dca->dca_intrlock); 890 ddi_soft_state_free(dca_state, instance); 891 return (DDI_FAILURE); 892 893 } 894 895 int 896 dca_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 897 { 898 int instance; 899 dca_t *dca; 900 timeout_id_t tid; 901 902 instance = ddi_get_instance(dip); 903 904 DBG(NULL, DMOD, "dca: in dca_detach() for %d", instance); 905 906 switch (cmd) { 907 case DDI_SUSPEND: 908 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 909 dca_diperror(dip, "no soft state in detach"); 910 return (DDI_FAILURE); 911 } 912 /* assumption: we won't be DDI_DETACHed until we return */ 913 return (dca_suspend(dca)); 914 915 case DDI_DETACH: 916 break; 917 default: 918 return (DDI_FAILURE); 919 } 920 921 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 922 dca_diperror(dip, "no soft state in detach"); 923 return (DDI_FAILURE); 924 } 925 926 /* 927 * Unregister from kCF. 928 * This needs to be done at the beginning of detach. 929 */ 930 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) { 931 if (crypto_unregister_provider( 932 WORKLIST(dca, MCR1)->dwl_prov) != CRYPTO_SUCCESS) { 933 dca_error(dca, "unable to unregister MCR1 from kcf"); 934 return (DDI_FAILURE); 935 } 936 } 937 938 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) { 939 if (crypto_unregister_provider( 940 WORKLIST(dca, MCR2)->dwl_prov) != CRYPTO_SUCCESS) { 941 dca_error(dca, "unable to unregister MCR2 from kcf"); 942 return (DDI_FAILURE); 943 } 944 } 945 946 /* 947 * Cleanup the private context list. Once the 948 * crypto_unregister_provider returns, it is safe to do so. 949 */ 950 dca_free_context_list(dca); 951 952 /* Cleanup the local random number pool */ 953 dca_random_fini(dca); 954 955 /* send any jobs in the waitq back to kCF */ 956 dca_rejectjobs(dca); 957 958 /* untimeout the timeouts */ 959 mutex_enter(&dca->dca_intrlock); 960 tid = dca->dca_jobtid; 961 dca->dca_jobtid = 0; 962 mutex_exit(&dca->dca_intrlock); 963 if (tid) { 964 (void) untimeout(tid); 965 } 966 967 /* disable device interrupts */ 968 CLRBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 969 970 /* unregister interrupt handlers */ 971 ddi_remove_intr(dip, 0, dca->dca_icookie); 972 973 /* release our regs handle */ 974 ddi_regs_map_free(&dca->dca_regs_handle); 975 976 /* toss out kstats */ 977 if (dca->dca_intrstats) { 978 kstat_delete(dca->dca_intrstats); 979 } 980 if (dca->dca_ksp) { 981 kstat_delete(dca->dca_ksp); 982 } 983 984 mutex_destroy(&dca->dca_intrlock); 985 dca_uninit(dca); 986 987 /* finalize FMA */ 988 dca_fma_fini(dca); 989 990 ddi_soft_state_free(dca_state, instance); 991 992 return (DDI_SUCCESS); 993 } 994 995 int 996 dca_resume(dca_t *dca) 997 { 998 ddi_acc_handle_t pci; 999 1000 if (pci_config_setup(dca->dca_dip, &pci) != DDI_SUCCESS) { 1001 dca_error(dca, "unable to setup PCI config handle"); 1002 return (DDI_FAILURE); 1003 } 1004 1005 /* 1006 * Reprogram registers in PCI configuration space. 1007 */ 1008 1009 /* Broadcom-specific timers -- we disable them. */ 1010 pci_config_put8(pci, PCI_TRDYTO, 0); 1011 pci_config_put8(pci, PCI_RETRIES, 0); 1012 1013 /* initialize PCI access settings */ 1014 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE | 1015 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE); 1016 1017 /* set up our PCI latency timer */ 1018 pci_config_put8(pci, PCI_LATTMR, 0x40); 1019 1020 pci_config_teardown(&pci); 1021 1022 if (dca_reset(dca, 0) < 0) { 1023 dca_error(dca, "unable to reset device during resume"); 1024 return (DDI_FAILURE); 1025 } 1026 1027 /* 1028 * Now restore the card-specific CSRs. 1029 */ 1030 1031 /* restore endianness settings */ 1032 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64); 1033 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1034 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1035 return (DDI_FAILURE); 1036 1037 /* restore interrupt enables */ 1038 if (dca->dca_devid == 0x5825) { 1039 /* for 5825 set 256 byte read size to improve performance */ 1040 SETBIT(dca, CSR_DMACTL, 1041 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256); 1042 } else { 1043 SETBIT(dca, CSR_DMACTL, 1044 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 1045 } 1046 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1047 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1048 return (DDI_FAILURE); 1049 1050 /* resume scheduling jobs on the device */ 1051 dca_undrain(dca); 1052 1053 return (DDI_SUCCESS); 1054 } 1055 1056 int 1057 dca_suspend(dca_t *dca) 1058 { 1059 if ((dca_drain(dca)) != 0) { 1060 return (DDI_FAILURE); 1061 } 1062 if (dca_reset(dca, 0) < 0) { 1063 dca_error(dca, "unable to reset device during suspend"); 1064 return (DDI_FAILURE); 1065 } 1066 return (DDI_SUCCESS); 1067 } 1068 1069 /* 1070 * Hardware access stuff. 1071 */ 1072 int 1073 dca_reset(dca_t *dca, int failreset) 1074 { 1075 int i; 1076 1077 if (dca->dca_regs_handle == NULL) { 1078 return (-1); 1079 } 1080 1081 PUTCSR(dca, CSR_DMACTL, DMACTL_RESET); 1082 if (!failreset) { 1083 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1084 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1085 return (-1); 1086 } 1087 1088 /* now wait for a reset */ 1089 for (i = 1; i < 100; i++) { 1090 uint32_t dmactl; 1091 drv_usecwait(100); 1092 dmactl = GETCSR(dca, CSR_DMACTL); 1093 if (!failreset) { 1094 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1095 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1096 return (-1); 1097 } 1098 if ((dmactl & DMACTL_RESET) == 0) { 1099 DBG(dca, DCHATTY, "reset in %d usec", i * 100); 1100 return (0); 1101 } 1102 } 1103 if (!failreset) { 1104 dca_failure(dca, DDI_DEVICE_FAULT, 1105 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR, 1106 "timeout waiting for reset after %d usec", i * 100); 1107 } 1108 return (-1); 1109 } 1110 1111 int 1112 dca_initworklist(dca_t *dca, dca_worklist_t *wlp) 1113 { 1114 int i; 1115 int reqprealloc = wlp->dwl_hiwater + (MAXWORK * MAXREQSPERMCR); 1116 1117 /* 1118 * Set up work queue. 1119 */ 1120 mutex_init(&wlp->dwl_lock, NULL, MUTEX_DRIVER, dca->dca_icookie); 1121 mutex_init(&wlp->dwl_freereqslock, NULL, MUTEX_DRIVER, 1122 dca->dca_icookie); 1123 mutex_init(&wlp->dwl_freelock, NULL, MUTEX_DRIVER, dca->dca_icookie); 1124 cv_init(&wlp->dwl_cv, NULL, CV_DRIVER, NULL); 1125 1126 mutex_enter(&wlp->dwl_lock); 1127 1128 dca_initq(&wlp->dwl_freereqs); 1129 dca_initq(&wlp->dwl_waitq); 1130 dca_initq(&wlp->dwl_freework); 1131 dca_initq(&wlp->dwl_runq); 1132 1133 for (i = 0; i < MAXWORK; i++) { 1134 dca_work_t *workp; 1135 1136 if ((workp = dca_newwork(dca)) == NULL) { 1137 dca_error(dca, "unable to allocate work"); 1138 mutex_exit(&wlp->dwl_lock); 1139 return (DDI_FAILURE); 1140 } 1141 workp->dw_wlp = wlp; 1142 dca_freework(workp); 1143 } 1144 mutex_exit(&wlp->dwl_lock); 1145 1146 for (i = 0; i < reqprealloc; i++) { 1147 dca_request_t *reqp; 1148 1149 if ((reqp = dca_newreq(dca)) == NULL) { 1150 dca_error(dca, "unable to allocate request"); 1151 return (DDI_FAILURE); 1152 } 1153 reqp->dr_dca = dca; 1154 reqp->dr_wlp = wlp; 1155 dca_freereq(reqp); 1156 } 1157 return (DDI_SUCCESS); 1158 } 1159 1160 int 1161 dca_init(dca_t *dca) 1162 { 1163 dca_worklist_t *wlp; 1164 1165 /* Initialize the private context list and the corresponding lock. */ 1166 mutex_init(&dca->dca_ctx_list_lock, NULL, MUTEX_DRIVER, NULL); 1167 dca_initq(&dca->dca_ctx_list); 1168 1169 /* 1170 * MCR1 algorithms. 1171 */ 1172 wlp = WORKLIST(dca, MCR1); 1173 (void) sprintf(wlp->dwl_name, "dca%d:mcr1", 1174 ddi_get_instance(dca->dca_dip)); 1175 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY, 1176 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1177 "mcr1_lowater", MCR1LOWATER); 1178 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY, 1179 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1180 "mcr1_hiwater", MCR1HIWATER); 1181 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY, 1182 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1183 "mcr1_maxreqs", MCR1MAXREQS), MAXREQSPERMCR); 1184 wlp->dwl_dca = dca; 1185 wlp->dwl_mcr = MCR1; 1186 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) { 1187 return (DDI_FAILURE); 1188 } 1189 1190 /* 1191 * MCR2 algorithms. 1192 */ 1193 wlp = WORKLIST(dca, MCR2); 1194 (void) sprintf(wlp->dwl_name, "dca%d:mcr2", 1195 ddi_get_instance(dca->dca_dip)); 1196 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY, 1197 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1198 "mcr2_lowater", MCR2LOWATER); 1199 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY, 1200 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1201 "mcr2_hiwater", MCR2HIWATER); 1202 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY, 1203 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1204 "mcr2_maxreqs", MCR2MAXREQS), MAXREQSPERMCR); 1205 wlp->dwl_dca = dca; 1206 wlp->dwl_mcr = MCR2; 1207 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) { 1208 return (DDI_FAILURE); 1209 } 1210 return (DDI_SUCCESS); 1211 } 1212 1213 /* 1214 * Uninitialize worklists. This routine should only be called when no 1215 * active jobs (hence DMA mappings) exist. One way to ensure this is 1216 * to unregister from kCF before calling this routine. (This is done 1217 * e.g. in detach(9e).) 1218 */ 1219 void 1220 dca_uninit(dca_t *dca) 1221 { 1222 int mcr; 1223 1224 mutex_destroy(&dca->dca_ctx_list_lock); 1225 1226 for (mcr = MCR1; mcr <= MCR2; mcr++) { 1227 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1228 dca_work_t *workp; 1229 dca_request_t *reqp; 1230 1231 if (dca->dca_regs_handle == NULL) { 1232 continue; 1233 } 1234 1235 mutex_enter(&wlp->dwl_lock); 1236 while ((workp = dca_getwork(dca, mcr)) != NULL) { 1237 dca_destroywork(workp); 1238 } 1239 mutex_exit(&wlp->dwl_lock); 1240 while ((reqp = dca_getreq(dca, mcr, 0)) != NULL) { 1241 dca_destroyreq(reqp); 1242 } 1243 1244 mutex_destroy(&wlp->dwl_lock); 1245 mutex_destroy(&wlp->dwl_freereqslock); 1246 mutex_destroy(&wlp->dwl_freelock); 1247 cv_destroy(&wlp->dwl_cv); 1248 wlp->dwl_prov = NULL; 1249 } 1250 } 1251 1252 static void 1253 dca_enlist2(dca_listnode_t *q, dca_listnode_t *node, kmutex_t *lock) 1254 { 1255 if (!q || !node) 1256 return; 1257 1258 mutex_enter(lock); 1259 node->dl_next2 = q; 1260 node->dl_prev2 = q->dl_prev2; 1261 node->dl_next2->dl_prev2 = node; 1262 node->dl_prev2->dl_next2 = node; 1263 mutex_exit(lock); 1264 } 1265 1266 static void 1267 dca_rmlist2(dca_listnode_t *node, kmutex_t *lock) 1268 { 1269 if (!node) 1270 return; 1271 1272 mutex_enter(lock); 1273 node->dl_next2->dl_prev2 = node->dl_prev2; 1274 node->dl_prev2->dl_next2 = node->dl_next2; 1275 node->dl_next2 = NULL; 1276 node->dl_prev2 = NULL; 1277 mutex_exit(lock); 1278 } 1279 1280 static dca_listnode_t * 1281 dca_delist2(dca_listnode_t *q, kmutex_t *lock) 1282 { 1283 dca_listnode_t *node; 1284 1285 mutex_enter(lock); 1286 if ((node = q->dl_next2) == q) { 1287 mutex_exit(lock); 1288 return (NULL); 1289 } 1290 1291 node->dl_next2->dl_prev2 = node->dl_prev2; 1292 node->dl_prev2->dl_next2 = node->dl_next2; 1293 node->dl_next2 = NULL; 1294 node->dl_prev2 = NULL; 1295 mutex_exit(lock); 1296 1297 return (node); 1298 } 1299 1300 void 1301 dca_initq(dca_listnode_t *q) 1302 { 1303 q->dl_next = q; 1304 q->dl_prev = q; 1305 q->dl_next2 = q; 1306 q->dl_prev2 = q; 1307 } 1308 1309 void 1310 dca_enqueue(dca_listnode_t *q, dca_listnode_t *node) 1311 { 1312 /* 1313 * Enqueue submits at the "tail" of the list, i.e. just 1314 * behind the sentinel. 1315 */ 1316 node->dl_next = q; 1317 node->dl_prev = q->dl_prev; 1318 node->dl_next->dl_prev = node; 1319 node->dl_prev->dl_next = node; 1320 } 1321 1322 void 1323 dca_rmqueue(dca_listnode_t *node) 1324 { 1325 node->dl_next->dl_prev = node->dl_prev; 1326 node->dl_prev->dl_next = node->dl_next; 1327 node->dl_next = NULL; 1328 node->dl_prev = NULL; 1329 } 1330 1331 dca_listnode_t * 1332 dca_dequeue(dca_listnode_t *q) 1333 { 1334 dca_listnode_t *node; 1335 /* 1336 * Dequeue takes from the "head" of the list, i.e. just after 1337 * the sentinel. 1338 */ 1339 if ((node = q->dl_next) == q) { 1340 /* queue is empty */ 1341 return (NULL); 1342 } 1343 dca_rmqueue(node); 1344 return (node); 1345 } 1346 1347 /* this is the opposite of dequeue, it takes things off in LIFO order */ 1348 dca_listnode_t * 1349 dca_unqueue(dca_listnode_t *q) 1350 { 1351 dca_listnode_t *node; 1352 /* 1353 * unqueue takes from the "tail" of the list, i.e. just before 1354 * the sentinel. 1355 */ 1356 if ((node = q->dl_prev) == q) { 1357 /* queue is empty */ 1358 return (NULL); 1359 } 1360 dca_rmqueue(node); 1361 return (node); 1362 } 1363 1364 dca_listnode_t * 1365 dca_peekqueue(dca_listnode_t *q) 1366 { 1367 dca_listnode_t *node; 1368 1369 if ((node = q->dl_next) == q) { 1370 return (NULL); 1371 } else { 1372 return (node); 1373 } 1374 } 1375 1376 /* 1377 * Interrupt service routine. 1378 */ 1379 uint_t 1380 dca_intr(char *arg) 1381 { 1382 dca_t *dca = (dca_t *)arg; 1383 uint32_t status; 1384 1385 mutex_enter(&dca->dca_intrlock); 1386 status = GETCSR(dca, CSR_DMASTAT); 1387 PUTCSR(dca, CSR_DMASTAT, status & DMASTAT_INTERRUPTS); 1388 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1389 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 1390 mutex_exit(&dca->dca_intrlock); 1391 return ((uint_t)DDI_FAILURE); 1392 } 1393 1394 DBG(dca, DINTR, "interrupted, status = 0x%x!", status); 1395 1396 if ((status & DMASTAT_INTERRUPTS) == 0) { 1397 /* increment spurious interrupt kstat */ 1398 if (dca->dca_intrstats) { 1399 KIOIP(dca)->intrs[KSTAT_INTR_SPURIOUS]++; 1400 } 1401 mutex_exit(&dca->dca_intrlock); 1402 return (DDI_INTR_UNCLAIMED); 1403 } 1404 1405 if (dca->dca_intrstats) { 1406 KIOIP(dca)->intrs[KSTAT_INTR_HARD]++; 1407 } 1408 if (status & DMASTAT_MCR1INT) { 1409 DBG(dca, DINTR, "MCR1 interrupted"); 1410 mutex_enter(&(WORKLIST(dca, MCR1)->dwl_lock)); 1411 dca_schedule(dca, MCR1); 1412 dca_reclaim(dca, MCR1); 1413 mutex_exit(&(WORKLIST(dca, MCR1)->dwl_lock)); 1414 } 1415 1416 if (status & DMASTAT_MCR2INT) { 1417 DBG(dca, DINTR, "MCR2 interrupted"); 1418 mutex_enter(&(WORKLIST(dca, MCR2)->dwl_lock)); 1419 dca_schedule(dca, MCR2); 1420 dca_reclaim(dca, MCR2); 1421 mutex_exit(&(WORKLIST(dca, MCR2)->dwl_lock)); 1422 } 1423 1424 if (status & DMASTAT_ERRINT) { 1425 uint32_t erraddr; 1426 erraddr = GETCSR(dca, CSR_DMAEA); 1427 mutex_exit(&dca->dca_intrlock); 1428 1429 /* 1430 * bit 1 of the error address indicates failure during 1431 * read if set, during write otherwise. 1432 */ 1433 dca_failure(dca, DDI_DEVICE_FAULT, 1434 DCA_FM_ECLASS_HW_DEVICE, dca_ena(0), CRYPTO_DEVICE_ERROR, 1435 "DMA master access error %s address 0x%x", 1436 erraddr & 0x1 ? "reading" : "writing", erraddr & ~1); 1437 return (DDI_INTR_CLAIMED); 1438 } 1439 1440 mutex_exit(&dca->dca_intrlock); 1441 1442 return (DDI_INTR_CLAIMED); 1443 } 1444 1445 /* 1446 * Reverse a string of bytes from s1 into s2. The reversal happens 1447 * from the tail of s1. If len1 < len2, then null bytes will be 1448 * padded to the end of s2. If len2 < len1, then (presumably null) 1449 * bytes will be dropped from the start of s1. 1450 * 1451 * The rationale here is that when s1 (source) is shorter, then we 1452 * are reversing from big-endian ordering, into device ordering, and 1453 * want to add some extra nulls to the tail (MSB) side of the device. 1454 * 1455 * Similarly, when s2 (dest) is shorter, then we are truncating what 1456 * are presumably null MSB bits from the device. 1457 * 1458 * There is an expectation when reversing from the device back into 1459 * big-endian, that the number of bytes to reverse and the target size 1460 * will match, and no truncation or padding occurs. 1461 */ 1462 void 1463 dca_reverse(void *s1, void *s2, int len1, int len2) 1464 { 1465 caddr_t src, dst; 1466 1467 if (len1 == 0) { 1468 if (len2) { 1469 bzero(s2, len2); 1470 } 1471 return; 1472 } 1473 src = (caddr_t)s1 + len1 - 1; 1474 dst = s2; 1475 while ((src >= (caddr_t)s1) && (len2)) { 1476 *dst++ = *src--; 1477 len2--; 1478 } 1479 while (len2 > 0) { 1480 *dst++ = 0; 1481 len2--; 1482 } 1483 } 1484 1485 uint16_t 1486 dca_padfull(int num) 1487 { 1488 if (num <= 512) { 1489 return (BITS2BYTES(512)); 1490 } 1491 if (num <= 768) { 1492 return (BITS2BYTES(768)); 1493 } 1494 if (num <= 1024) { 1495 return (BITS2BYTES(1024)); 1496 } 1497 if (num <= 1536) { 1498 return (BITS2BYTES(1536)); 1499 } 1500 if (num <= 2048) { 1501 return (BITS2BYTES(2048)); 1502 } 1503 return (0); 1504 } 1505 1506 uint16_t 1507 dca_padhalf(int num) 1508 { 1509 if (num <= 256) { 1510 return (BITS2BYTES(256)); 1511 } 1512 if (num <= 384) { 1513 return (BITS2BYTES(384)); 1514 } 1515 if (num <= 512) { 1516 return (BITS2BYTES(512)); 1517 } 1518 if (num <= 768) { 1519 return (BITS2BYTES(768)); 1520 } 1521 if (num <= 1024) { 1522 return (BITS2BYTES(1024)); 1523 } 1524 return (0); 1525 } 1526 1527 dca_work_t * 1528 dca_newwork(dca_t *dca) 1529 { 1530 dca_work_t *workp; 1531 size_t size; 1532 ddi_dma_cookie_t c; 1533 unsigned nc; 1534 int rv; 1535 1536 workp = kmem_zalloc(sizeof (dca_work_t), KM_SLEEP); 1537 1538 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1539 DDI_DMA_SLEEP, NULL, &workp->dw_mcr_dmah); 1540 if (rv != 0) { 1541 dca_error(dca, "unable to alloc MCR DMA handle"); 1542 dca_destroywork(workp); 1543 return (NULL); 1544 } 1545 1546 rv = ddi_dma_mem_alloc(workp->dw_mcr_dmah, 1547 ROUNDUP(MCR_SIZE, dca->dca_pagesize), 1548 &dca_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 1549 &workp->dw_mcr_kaddr, &size, &workp->dw_mcr_acch); 1550 if (rv != 0) { 1551 dca_error(dca, "unable to alloc MCR DMA memory"); 1552 dca_destroywork(workp); 1553 return (NULL); 1554 } 1555 1556 rv = ddi_dma_addr_bind_handle(workp->dw_mcr_dmah, NULL, 1557 workp->dw_mcr_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR, 1558 DDI_DMA_SLEEP, NULL, &c, &nc); 1559 if (rv != DDI_DMA_MAPPED) { 1560 dca_error(dca, "unable to map MCR DMA memory"); 1561 dca_destroywork(workp); 1562 return (NULL); 1563 } 1564 1565 workp->dw_mcr_paddr = c.dmac_address; 1566 return (workp); 1567 } 1568 1569 void 1570 dca_destroywork(dca_work_t *workp) 1571 { 1572 if (workp->dw_mcr_paddr) { 1573 (void) ddi_dma_unbind_handle(workp->dw_mcr_dmah); 1574 } 1575 if (workp->dw_mcr_acch) { 1576 ddi_dma_mem_free(&workp->dw_mcr_acch); 1577 } 1578 if (workp->dw_mcr_dmah) { 1579 ddi_dma_free_handle(&workp->dw_mcr_dmah); 1580 } 1581 kmem_free(workp, sizeof (dca_work_t)); 1582 } 1583 1584 dca_request_t * 1585 dca_newreq(dca_t *dca) 1586 { 1587 dca_request_t *reqp; 1588 size_t size; 1589 ddi_dma_cookie_t c; 1590 unsigned nc; 1591 int rv; 1592 int n_chain = 0; 1593 1594 size = (DESC_SIZE * MAXFRAGS) + CTX_MAXLENGTH; 1595 1596 reqp = kmem_zalloc(sizeof (dca_request_t), KM_SLEEP); 1597 1598 reqp->dr_dca = dca; 1599 1600 /* 1601 * Setup the DMA region for the context and descriptors. 1602 */ 1603 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, DDI_DMA_SLEEP, 1604 NULL, &reqp->dr_ctx_dmah); 1605 if (rv != DDI_SUCCESS) { 1606 dca_error(dca, "failure allocating request DMA handle"); 1607 dca_destroyreq(reqp); 1608 return (NULL); 1609 } 1610 1611 /* for driver hardening, allocate in whole pages */ 1612 rv = ddi_dma_mem_alloc(reqp->dr_ctx_dmah, 1613 ROUNDUP(size, dca->dca_pagesize), &dca_devattr, DDI_DMA_CONSISTENT, 1614 DDI_DMA_SLEEP, NULL, &reqp->dr_ctx_kaddr, &size, 1615 &reqp->dr_ctx_acch); 1616 if (rv != DDI_SUCCESS) { 1617 dca_error(dca, "unable to alloc request DMA memory"); 1618 dca_destroyreq(reqp); 1619 return (NULL); 1620 } 1621 1622 rv = ddi_dma_addr_bind_handle(reqp->dr_ctx_dmah, NULL, 1623 reqp->dr_ctx_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_WRITE, 1624 DDI_DMA_SLEEP, 0, &c, &nc); 1625 if (rv != DDI_DMA_MAPPED) { 1626 dca_error(dca, "failed binding request DMA handle"); 1627 dca_destroyreq(reqp); 1628 return (NULL); 1629 } 1630 reqp->dr_ctx_paddr = c.dmac_address; 1631 1632 reqp->dr_dma_size = size; 1633 1634 /* 1635 * Set up the dma for our scratch/shared buffers. 1636 */ 1637 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1638 DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_dmah); 1639 if (rv != DDI_SUCCESS) { 1640 dca_error(dca, "failure allocating ibuf DMA handle"); 1641 dca_destroyreq(reqp); 1642 return (NULL); 1643 } 1644 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1645 DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_dmah); 1646 if (rv != DDI_SUCCESS) { 1647 dca_error(dca, "failure allocating obuf DMA handle"); 1648 dca_destroyreq(reqp); 1649 return (NULL); 1650 } 1651 1652 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1653 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_in_dmah); 1654 if (rv != DDI_SUCCESS) { 1655 dca_error(dca, "failure allocating chain_in DMA handle"); 1656 dca_destroyreq(reqp); 1657 return (NULL); 1658 } 1659 1660 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1661 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_out_dmah); 1662 if (rv != DDI_SUCCESS) { 1663 dca_error(dca, "failure allocating chain_out DMA handle"); 1664 dca_destroyreq(reqp); 1665 return (NULL); 1666 } 1667 1668 /* 1669 * for driver hardening, allocate in whole pages. 1670 */ 1671 size = ROUNDUP(MAXPACKET, dca->dca_pagesize); 1672 #if defined(i386) || defined(__i386) 1673 /* 1674 * Use kmem_alloc instead of ddi_dma_mem_alloc here since the latter 1675 * may fail on x86 platform if a physically contigous memory chunk 1676 * cannot be found. From initial testing, we did not see performance 1677 * degration as seen on Sparc. 1678 */ 1679 if ((reqp->dr_ibuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) { 1680 dca_error(dca, "unable to alloc request ibuf memory"); 1681 dca_destroyreq(reqp); 1682 return (NULL); 1683 } 1684 if ((reqp->dr_obuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) { 1685 dca_error(dca, "unable to alloc request obuf memory"); 1686 dca_destroyreq(reqp); 1687 return (NULL); 1688 } 1689 #else 1690 /* 1691 * We could kmem_alloc for sparc too. However, it gives worse 1692 * performance when transfering more than one page data. For example, 1693 * using 4 threads and 12032 byte data and 3DES on 900MHZ sparc system, 1694 * kmem_alloc uses 80% CPU and ddi_dma_mem_alloc uses 50% CPU for 1695 * the same throughput. 1696 */ 1697 rv = ddi_dma_mem_alloc(reqp->dr_ibuf_dmah, 1698 size, &dca_bufattr, 1699 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_kaddr, 1700 &size, &reqp->dr_ibuf_acch); 1701 if (rv != DDI_SUCCESS) { 1702 dca_error(dca, "unable to alloc request DMA memory"); 1703 dca_destroyreq(reqp); 1704 return (NULL); 1705 } 1706 1707 rv = ddi_dma_mem_alloc(reqp->dr_obuf_dmah, 1708 size, &dca_bufattr, 1709 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_kaddr, 1710 &size, &reqp->dr_obuf_acch); 1711 if (rv != DDI_SUCCESS) { 1712 dca_error(dca, "unable to alloc request DMA memory"); 1713 dca_destroyreq(reqp); 1714 return (NULL); 1715 } 1716 #endif 1717 1718 /* Skip the used portion in the context page */ 1719 reqp->dr_offset = CTX_MAXLENGTH; 1720 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset, 1721 reqp->dr_ibuf_kaddr, reqp->dr_ibuf_dmah, 1722 DDI_DMA_WRITE | DDI_DMA_STREAMING, 1723 &reqp->dr_ibuf_head, &n_chain)) != DDI_SUCCESS) { 1724 (void) dca_destroyreq(reqp); 1725 return (NULL); 1726 } 1727 reqp->dr_ibuf_paddr = reqp->dr_ibuf_head.dc_buffer_paddr; 1728 /* Skip the space used by the input buffer */ 1729 reqp->dr_offset += DESC_SIZE * n_chain; 1730 1731 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset, 1732 reqp->dr_obuf_kaddr, reqp->dr_obuf_dmah, 1733 DDI_DMA_READ | DDI_DMA_STREAMING, 1734 &reqp->dr_obuf_head, &n_chain)) != DDI_SUCCESS) { 1735 (void) dca_destroyreq(reqp); 1736 return (NULL); 1737 } 1738 reqp->dr_obuf_paddr = reqp->dr_obuf_head.dc_buffer_paddr; 1739 /* Skip the space used by the output buffer */ 1740 reqp->dr_offset += DESC_SIZE * n_chain; 1741 1742 DBG(dca, DCHATTY, "CTX is 0x%p, phys 0x%x, len %d", 1743 reqp->dr_ctx_kaddr, reqp->dr_ctx_paddr, CTX_MAXLENGTH); 1744 return (reqp); 1745 } 1746 1747 void 1748 dca_destroyreq(dca_request_t *reqp) 1749 { 1750 #if defined(i386) || defined(__i386) 1751 dca_t *dca = reqp->dr_dca; 1752 size_t size = ROUNDUP(MAXPACKET, dca->dca_pagesize); 1753 #endif 1754 1755 /* 1756 * Clean up DMA for the context structure. 1757 */ 1758 if (reqp->dr_ctx_paddr) { 1759 (void) ddi_dma_unbind_handle(reqp->dr_ctx_dmah); 1760 } 1761 1762 if (reqp->dr_ctx_acch) { 1763 ddi_dma_mem_free(&reqp->dr_ctx_acch); 1764 } 1765 1766 if (reqp->dr_ctx_dmah) { 1767 ddi_dma_free_handle(&reqp->dr_ctx_dmah); 1768 } 1769 1770 /* 1771 * Clean up DMA for the scratch buffer. 1772 */ 1773 #if defined(i386) || defined(__i386) 1774 if (reqp->dr_ibuf_dmah) { 1775 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah); 1776 ddi_dma_free_handle(&reqp->dr_ibuf_dmah); 1777 } 1778 if (reqp->dr_obuf_dmah) { 1779 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah); 1780 ddi_dma_free_handle(&reqp->dr_obuf_dmah); 1781 } 1782 1783 kmem_free(reqp->dr_ibuf_kaddr, size); 1784 kmem_free(reqp->dr_obuf_kaddr, size); 1785 #else 1786 if (reqp->dr_ibuf_paddr) { 1787 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah); 1788 } 1789 if (reqp->dr_obuf_paddr) { 1790 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah); 1791 } 1792 1793 if (reqp->dr_ibuf_acch) { 1794 ddi_dma_mem_free(&reqp->dr_ibuf_acch); 1795 } 1796 if (reqp->dr_obuf_acch) { 1797 ddi_dma_mem_free(&reqp->dr_obuf_acch); 1798 } 1799 1800 if (reqp->dr_ibuf_dmah) { 1801 ddi_dma_free_handle(&reqp->dr_ibuf_dmah); 1802 } 1803 if (reqp->dr_obuf_dmah) { 1804 ddi_dma_free_handle(&reqp->dr_obuf_dmah); 1805 } 1806 #endif 1807 /* 1808 * These two DMA handles should have been unbinded in 1809 * dca_unbindchains() function 1810 */ 1811 if (reqp->dr_chain_in_dmah) { 1812 ddi_dma_free_handle(&reqp->dr_chain_in_dmah); 1813 } 1814 if (reqp->dr_chain_out_dmah) { 1815 ddi_dma_free_handle(&reqp->dr_chain_out_dmah); 1816 } 1817 1818 kmem_free(reqp, sizeof (dca_request_t)); 1819 } 1820 1821 dca_work_t * 1822 dca_getwork(dca_t *dca, int mcr) 1823 { 1824 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1825 dca_work_t *workp; 1826 1827 mutex_enter(&wlp->dwl_freelock); 1828 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_freework); 1829 mutex_exit(&wlp->dwl_freelock); 1830 if (workp) { 1831 int nreqs; 1832 bzero(workp->dw_mcr_kaddr, 8); 1833 1834 /* clear out old requests */ 1835 for (nreqs = 0; nreqs < MAXREQSPERMCR; nreqs++) { 1836 workp->dw_reqs[nreqs] = NULL; 1837 } 1838 } 1839 return (workp); 1840 } 1841 1842 void 1843 dca_freework(dca_work_t *workp) 1844 { 1845 mutex_enter(&workp->dw_wlp->dwl_freelock); 1846 dca_enqueue(&workp->dw_wlp->dwl_freework, (dca_listnode_t *)workp); 1847 mutex_exit(&workp->dw_wlp->dwl_freelock); 1848 } 1849 1850 dca_request_t * 1851 dca_getreq(dca_t *dca, int mcr, int tryhard) 1852 { 1853 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1854 dca_request_t *reqp; 1855 1856 mutex_enter(&wlp->dwl_freereqslock); 1857 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_freereqs); 1858 mutex_exit(&wlp->dwl_freereqslock); 1859 if (reqp) { 1860 reqp->dr_flags = 0; 1861 reqp->dr_callback = NULL; 1862 } else if (tryhard) { 1863 /* 1864 * failed to get a free one, try an allocation, the hard way. 1865 * XXX: Kstat desired here. 1866 */ 1867 if ((reqp = dca_newreq(dca)) != NULL) { 1868 reqp->dr_wlp = wlp; 1869 reqp->dr_dca = dca; 1870 reqp->dr_flags = 0; 1871 reqp->dr_callback = NULL; 1872 } 1873 } 1874 return (reqp); 1875 } 1876 1877 void 1878 dca_freereq(dca_request_t *reqp) 1879 { 1880 reqp->dr_kcf_req = NULL; 1881 if (!(reqp->dr_flags & DR_NOCACHE)) { 1882 mutex_enter(&reqp->dr_wlp->dwl_freereqslock); 1883 dca_enqueue(&reqp->dr_wlp->dwl_freereqs, 1884 (dca_listnode_t *)reqp); 1885 mutex_exit(&reqp->dr_wlp->dwl_freereqslock); 1886 } 1887 } 1888 1889 /* 1890 * Binds user buffers to DMA handles dynamically. On Sparc, a user buffer 1891 * is mapped to a single physicall address. On x86, a user buffer is mapped 1892 * to multiple physically addresses. These phsyical addresses are chained 1893 * using the method specified in Broadcom BCM5820 specification 1894 */ 1895 int 1896 dca_bindchains(dca_request_t *reqp, size_t incnt, size_t outcnt) 1897 { 1898 int rv; 1899 caddr_t kaddr; 1900 uint_t flags; 1901 int n_chain = 0; 1902 1903 if (reqp->dr_flags & DR_INPLACE) { 1904 flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT; 1905 } else { 1906 flags = DDI_DMA_WRITE | DDI_DMA_STREAMING; 1907 } 1908 1909 /* first the input */ 1910 if (incnt) { 1911 if ((kaddr = dca_bufdaddr(reqp->dr_in)) == NULL) { 1912 DBG(NULL, DWARN, "unrecognised crypto data format"); 1913 return (DDI_FAILURE); 1914 } 1915 if ((rv = dca_bindchains_one(reqp, incnt, reqp->dr_offset, 1916 kaddr, reqp->dr_chain_in_dmah, flags, 1917 &reqp->dr_chain_in_head, &n_chain)) != DDI_SUCCESS) { 1918 (void) dca_unbindchains(reqp); 1919 return (rv); 1920 } 1921 1922 /* 1923 * The offset and length are altered by the calling routine 1924 * reqp->dr_in->cd_offset += incnt; 1925 * reqp->dr_in->cd_length -= incnt; 1926 */ 1927 /* Save the first one in the chain for MCR */ 1928 reqp->dr_in_paddr = reqp->dr_chain_in_head.dc_buffer_paddr; 1929 reqp->dr_in_next = reqp->dr_chain_in_head.dc_next_paddr; 1930 reqp->dr_in_len = reqp->dr_chain_in_head.dc_buffer_length; 1931 } else { 1932 reqp->dr_in_paddr = NULL; 1933 reqp->dr_in_next = 0; 1934 reqp->dr_in_len = 0; 1935 } 1936 1937 if (reqp->dr_flags & DR_INPLACE) { 1938 reqp->dr_out_paddr = reqp->dr_in_paddr; 1939 reqp->dr_out_len = reqp->dr_in_len; 1940 reqp->dr_out_next = reqp->dr_in_next; 1941 return (DDI_SUCCESS); 1942 } 1943 1944 /* then the output */ 1945 if (outcnt) { 1946 flags = DDI_DMA_READ | DDI_DMA_STREAMING; 1947 if ((kaddr = dca_bufdaddr_out(reqp->dr_out)) == NULL) { 1948 DBG(NULL, DWARN, "unrecognised crypto data format"); 1949 (void) dca_unbindchains(reqp); 1950 return (DDI_FAILURE); 1951 } 1952 rv = dca_bindchains_one(reqp, outcnt, reqp->dr_offset + 1953 n_chain * DESC_SIZE, kaddr, reqp->dr_chain_out_dmah, 1954 flags, &reqp->dr_chain_out_head, &n_chain); 1955 if (rv != DDI_SUCCESS) { 1956 (void) dca_unbindchains(reqp); 1957 return (DDI_FAILURE); 1958 } 1959 1960 /* Save the first one in the chain for MCR */ 1961 reqp->dr_out_paddr = reqp->dr_chain_out_head.dc_buffer_paddr; 1962 reqp->dr_out_next = reqp->dr_chain_out_head.dc_next_paddr; 1963 reqp->dr_out_len = reqp->dr_chain_out_head.dc_buffer_length; 1964 } else { 1965 reqp->dr_out_paddr = NULL; 1966 reqp->dr_out_next = 0; 1967 reqp->dr_out_len = 0; 1968 } 1969 1970 return (DDI_SUCCESS); 1971 } 1972 1973 /* 1974 * Unbind the user buffers from the DMA handles. 1975 */ 1976 int 1977 dca_unbindchains(dca_request_t *reqp) 1978 { 1979 int rv = DDI_SUCCESS; 1980 int rv1 = DDI_SUCCESS; 1981 1982 /* Clear the input chain */ 1983 if (reqp->dr_chain_in_head.dc_buffer_paddr != NULL) { 1984 (void) ddi_dma_unbind_handle(reqp->dr_chain_in_dmah); 1985 reqp->dr_chain_in_head.dc_buffer_paddr = 0; 1986 } 1987 1988 if (reqp->dr_flags & DR_INPLACE) { 1989 return (rv); 1990 } 1991 1992 /* Clear the output chain */ 1993 if (reqp->dr_chain_out_head.dc_buffer_paddr != NULL) { 1994 (void) ddi_dma_unbind_handle(reqp->dr_chain_out_dmah); 1995 reqp->dr_chain_out_head.dc_buffer_paddr = 0; 1996 } 1997 1998 return ((rv != DDI_SUCCESS)? rv : rv1); 1999 } 2000 2001 /* 2002 * Build either input chain or output chain. It is single-item chain for Sparc, 2003 * and possible mutiple-item chain for x86. 2004 */ 2005 static int 2006 dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset, 2007 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags, 2008 dca_chain_t *head, int *n_chain) 2009 { 2010 ddi_dma_cookie_t c; 2011 uint_t nc; 2012 int rv; 2013 caddr_t chain_kaddr_pre; 2014 caddr_t chain_kaddr; 2015 uint32_t chain_paddr; 2016 int i; 2017 2018 /* Advance past the context structure to the starting address */ 2019 chain_paddr = reqp->dr_ctx_paddr + dr_offset; 2020 chain_kaddr = reqp->dr_ctx_kaddr + dr_offset; 2021 2022 /* 2023 * Bind the kernel address to the DMA handle. On x86, the actual 2024 * buffer is mapped into multiple physical addresses. On Sparc, 2025 * the actual buffer is mapped into a single address. 2026 */ 2027 rv = ddi_dma_addr_bind_handle(handle, 2028 NULL, kaddr, cnt, flags, DDI_DMA_DONTWAIT, NULL, &c, &nc); 2029 if (rv != DDI_DMA_MAPPED) { 2030 return (DDI_FAILURE); 2031 } 2032 2033 (void) ddi_dma_sync(handle, 0, cnt, DDI_DMA_SYNC_FORDEV); 2034 if ((rv = dca_check_dma_handle(reqp->dr_dca, handle, 2035 DCA_FM_ECLASS_NONE)) != DDI_SUCCESS) { 2036 reqp->destroy = TRUE; 2037 return (rv); 2038 } 2039 2040 *n_chain = nc; 2041 2042 /* Setup the data buffer chain for DMA transfer */ 2043 chain_kaddr_pre = NULL; 2044 head->dc_buffer_paddr = 0; 2045 head->dc_next_paddr = 0; 2046 head->dc_buffer_length = 0; 2047 for (i = 0; i < nc; i++) { 2048 /* PIO */ 2049 PUTDESC32(reqp, chain_kaddr, DESC_BUFADDR, c.dmac_address); 2050 PUTDESC16(reqp, chain_kaddr, DESC_RSVD, 0); 2051 PUTDESC16(reqp, chain_kaddr, DESC_LENGTH, c.dmac_size); 2052 2053 /* Remember the head of the chain */ 2054 if (head->dc_buffer_paddr == 0) { 2055 head->dc_buffer_paddr = c.dmac_address; 2056 head->dc_buffer_length = c.dmac_size; 2057 } 2058 2059 /* Link to the previous one if one exists */ 2060 if (chain_kaddr_pre) { 2061 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 2062 chain_paddr); 2063 if (head->dc_next_paddr == 0) 2064 head->dc_next_paddr = chain_paddr; 2065 } 2066 chain_kaddr_pre = chain_kaddr; 2067 2068 /* Maintain pointers */ 2069 chain_paddr += DESC_SIZE; 2070 chain_kaddr += DESC_SIZE; 2071 2072 /* Retrieve the next cookie if there is one */ 2073 if (i < nc-1) 2074 ddi_dma_nextcookie(handle, &c); 2075 } 2076 2077 /* Set the next pointer in the last entry to NULL */ 2078 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 0); 2079 2080 return (DDI_SUCCESS); 2081 } 2082 2083 /* 2084 * Schedule some work. 2085 */ 2086 int 2087 dca_start(dca_t *dca, dca_request_t *reqp, int mcr, int dosched) 2088 { 2089 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2090 2091 mutex_enter(&wlp->dwl_lock); 2092 2093 DBG(dca, DCHATTY, "req=%p, in=%p, out=%p, ctx=%p, ibuf=%p, obuf=%p", 2094 reqp, reqp->dr_in, reqp->dr_out, reqp->dr_ctx_kaddr, 2095 reqp->dr_ibuf_kaddr, reqp->dr_obuf_kaddr); 2096 DBG(dca, DCHATTY, "ctx paddr = %x, ibuf paddr = %x, obuf paddr = %x", 2097 reqp->dr_ctx_paddr, reqp->dr_ibuf_paddr, reqp->dr_obuf_paddr); 2098 /* sync out the entire context and descriptor chains */ 2099 (void) ddi_dma_sync(reqp->dr_ctx_dmah, 0, 0, DDI_DMA_SYNC_FORDEV); 2100 if (dca_check_dma_handle(dca, reqp->dr_ctx_dmah, 2101 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2102 reqp->destroy = TRUE; 2103 mutex_exit(&wlp->dwl_lock); 2104 return (CRYPTO_DEVICE_ERROR); 2105 } 2106 2107 dca_enqueue(&wlp->dwl_waitq, (dca_listnode_t *)reqp); 2108 wlp->dwl_count++; 2109 wlp->dwl_lastsubmit = ddi_get_lbolt(); 2110 reqp->dr_wlp = wlp; 2111 2112 if ((wlp->dwl_count == wlp->dwl_hiwater) && (wlp->dwl_busy == 0)) { 2113 /* we are fully loaded now, let kCF know */ 2114 2115 wlp->dwl_flowctl++; 2116 wlp->dwl_busy = 1; 2117 2118 crypto_prov_notify(wlp->dwl_prov, CRYPTO_PROVIDER_BUSY); 2119 } 2120 2121 if (dosched) { 2122 #ifdef SCHEDDELAY 2123 /* possibly wait for more work to arrive */ 2124 if (wlp->dwl_count >= wlp->dwl_reqspermcr) { 2125 dca_schedule(dca, mcr); 2126 } else if (!wlp->dwl_schedtid) { 2127 /* wait 1 msec for more work before doing it */ 2128 wlp->dwl_schedtid = timeout(dca_schedtimeout, 2129 (void *)wlp, drv_usectohz(MSEC)); 2130 } 2131 #else 2132 dca_schedule(dca, mcr); 2133 #endif 2134 } 2135 mutex_exit(&wlp->dwl_lock); 2136 2137 return (CRYPTO_QUEUED); 2138 } 2139 2140 void 2141 dca_schedule(dca_t *dca, int mcr) 2142 { 2143 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2144 int csr; 2145 int full; 2146 uint32_t status; 2147 2148 ASSERT(mutex_owned(&wlp->dwl_lock)); 2149 /* 2150 * If the card is draining or has an outstanding failure, 2151 * don't schedule any more work on it right now 2152 */ 2153 if (wlp->dwl_drain || (dca->dca_flags & DCA_FAILED)) { 2154 return; 2155 } 2156 2157 if (mcr == MCR2) { 2158 csr = CSR_MCR2; 2159 full = DMASTAT_MCR2FULL; 2160 } else { 2161 csr = CSR_MCR1; 2162 full = DMASTAT_MCR1FULL; 2163 } 2164 2165 for (;;) { 2166 dca_work_t *workp; 2167 uint32_t offset; 2168 int nreqs; 2169 2170 status = GETCSR(dca, CSR_DMASTAT); 2171 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 2172 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 2173 return; 2174 2175 if ((status & full) != 0) 2176 break; 2177 2178 #ifdef SCHEDDELAY 2179 /* if there isn't enough to do, don't bother now */ 2180 if ((wlp->dwl_count < wlp->dwl_reqspermcr) && 2181 (ddi_get_lbolt() < (wlp->dwl_lastsubmit + 2182 drv_usectohz(MSEC)))) { 2183 /* wait a bit longer... */ 2184 if (wlp->dwl_schedtid == 0) { 2185 wlp->dwl_schedtid = timeout(dca_schedtimeout, 2186 (void *)wlp, drv_usectohz(MSEC)); 2187 } 2188 return; 2189 } 2190 #endif 2191 2192 /* grab a work structure */ 2193 workp = dca_getwork(dca, mcr); 2194 2195 if (workp == NULL) { 2196 /* 2197 * There must be work ready to be reclaimed, 2198 * in this case, since the chip can only hold 2199 * less work outstanding than there are total. 2200 */ 2201 dca_reclaim(dca, mcr); 2202 continue; 2203 } 2204 2205 nreqs = 0; 2206 offset = MCR_CTXADDR; 2207 2208 while (nreqs < wlp->dwl_reqspermcr) { 2209 dca_request_t *reqp; 2210 2211 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_waitq); 2212 if (reqp == NULL) { 2213 /* nothing left to process */ 2214 break; 2215 } 2216 /* 2217 * Update flow control. 2218 */ 2219 wlp->dwl_count--; 2220 if ((wlp->dwl_count == wlp->dwl_lowater) && 2221 (wlp->dwl_busy)) { 2222 wlp->dwl_busy = 0; 2223 crypto_prov_notify(wlp->dwl_prov, 2224 CRYPTO_PROVIDER_READY); 2225 } 2226 2227 /* 2228 * Context address. 2229 */ 2230 PUTMCR32(workp, offset, reqp->dr_ctx_paddr); 2231 offset += 4; 2232 2233 /* 2234 * Input chain. 2235 */ 2236 /* input buffer address */ 2237 PUTMCR32(workp, offset, reqp->dr_in_paddr); 2238 offset += 4; 2239 /* next input buffer entry */ 2240 PUTMCR32(workp, offset, reqp->dr_in_next); 2241 offset += 4; 2242 /* input buffer length */ 2243 PUTMCR16(workp, offset, reqp->dr_in_len); 2244 offset += 2; 2245 /* zero the reserved field */ 2246 PUTMCR16(workp, offset, 0); 2247 offset += 2; 2248 2249 /* 2250 * Overall length. 2251 */ 2252 /* reserved field */ 2253 PUTMCR16(workp, offset, 0); 2254 offset += 2; 2255 /* total packet length */ 2256 PUTMCR16(workp, offset, reqp->dr_pkt_length); 2257 offset += 2; 2258 2259 /* 2260 * Output chain. 2261 */ 2262 /* output buffer address */ 2263 PUTMCR32(workp, offset, reqp->dr_out_paddr); 2264 offset += 4; 2265 /* next output buffer entry */ 2266 PUTMCR32(workp, offset, reqp->dr_out_next); 2267 offset += 4; 2268 /* output buffer length */ 2269 PUTMCR16(workp, offset, reqp->dr_out_len); 2270 offset += 2; 2271 /* zero the reserved field */ 2272 PUTMCR16(workp, offset, 0); 2273 offset += 2; 2274 2275 /* 2276 * Note submission. 2277 */ 2278 workp->dw_reqs[nreqs] = reqp; 2279 nreqs++; 2280 } 2281 2282 if (nreqs == 0) { 2283 /* nothing in the queue! */ 2284 dca_freework(workp); 2285 return; 2286 } 2287 2288 wlp->dwl_submit++; 2289 2290 PUTMCR16(workp, MCR_FLAGS, 0); 2291 PUTMCR16(workp, MCR_COUNT, nreqs); 2292 2293 DBG(dca, DCHATTY, 2294 "posting work (phys %x, virt 0x%p) (%d reqs) to MCR%d", 2295 workp->dw_mcr_paddr, workp->dw_mcr_kaddr, 2296 nreqs, mcr); 2297 2298 workp->dw_lbolt = ddi_get_lbolt(); 2299 /* Make sure MCR is synced out to device. */ 2300 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 0, 2301 DDI_DMA_SYNC_FORDEV); 2302 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah, 2303 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2304 dca_destroywork(workp); 2305 return; 2306 } 2307 2308 PUTCSR(dca, csr, workp->dw_mcr_paddr); 2309 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 2310 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2311 dca_destroywork(workp); 2312 return; 2313 } else { 2314 dca_enqueue(&wlp->dwl_runq, (dca_listnode_t *)workp); 2315 } 2316 2317 DBG(dca, DCHATTY, "posted"); 2318 } 2319 } 2320 2321 /* 2322 * Reclaim completed work, called in interrupt context. 2323 */ 2324 void 2325 dca_reclaim(dca_t *dca, int mcr) 2326 { 2327 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2328 dca_work_t *workp; 2329 ushort_t flags; 2330 int nreclaimed = 0; 2331 int i; 2332 2333 DBG(dca, DRECLAIM, "worklist = 0x%p (MCR%d)", wlp, mcr); 2334 ASSERT(mutex_owned(&wlp->dwl_lock)); 2335 /* 2336 * For each MCR in the submitted (runq), we check to see if 2337 * it has been processed. If so, then we note each individual 2338 * job in the MCR, and and do the completion processing for 2339 * each of such job. 2340 */ 2341 for (;;) { 2342 2343 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq); 2344 if (workp == NULL) { 2345 break; 2346 } 2347 2348 /* only sync the MCR flags, since that's all we need */ 2349 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 4, 2350 DDI_DMA_SYNC_FORKERNEL); 2351 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah, 2352 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2353 dca_rmqueue((dca_listnode_t *)workp); 2354 dca_destroywork(workp); 2355 return; 2356 } 2357 2358 flags = GETMCR16(workp, MCR_FLAGS); 2359 if ((flags & MCRFLAG_FINISHED) == 0) { 2360 /* chip is still working on it */ 2361 DBG(dca, DRECLAIM, 2362 "chip still working on it (MCR%d)", mcr); 2363 break; 2364 } 2365 2366 /* its really for us, so remove it from the queue */ 2367 dca_rmqueue((dca_listnode_t *)workp); 2368 2369 /* if we were draining, signal on the cv */ 2370 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) { 2371 cv_signal(&wlp->dwl_cv); 2372 } 2373 2374 /* update statistics, done under the lock */ 2375 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2376 dca_request_t *reqp = workp->dw_reqs[i]; 2377 if (reqp == NULL) { 2378 continue; 2379 } 2380 if (reqp->dr_byte_stat >= 0) { 2381 dca->dca_stats[reqp->dr_byte_stat] += 2382 reqp->dr_pkt_length; 2383 } 2384 if (reqp->dr_job_stat >= 0) { 2385 dca->dca_stats[reqp->dr_job_stat]++; 2386 } 2387 } 2388 mutex_exit(&wlp->dwl_lock); 2389 2390 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2391 dca_request_t *reqp = workp->dw_reqs[i]; 2392 2393 if (reqp == NULL) { 2394 continue; 2395 } 2396 2397 /* Do the callback. */ 2398 workp->dw_reqs[i] = NULL; 2399 dca_done(reqp, CRYPTO_SUCCESS); 2400 2401 nreclaimed++; 2402 } 2403 2404 /* now we can release the work */ 2405 dca_freework(workp); 2406 2407 mutex_enter(&wlp->dwl_lock); 2408 } 2409 DBG(dca, DRECLAIM, "reclaimed %d cmds", nreclaimed); 2410 } 2411 2412 int 2413 dca_length(crypto_data_t *cdata) 2414 { 2415 return (cdata->cd_length); 2416 } 2417 2418 /* 2419 * This is the callback function called from the interrupt when a kCF job 2420 * completes. It does some driver-specific things, and then calls the 2421 * kCF-provided callback. Finally, it cleans up the state for the work 2422 * request and drops the reference count to allow for DR. 2423 */ 2424 void 2425 dca_done(dca_request_t *reqp, int err) 2426 { 2427 uint64_t ena = 0; 2428 2429 /* unbind any chains we were using */ 2430 if (dca_unbindchains(reqp) != DDI_SUCCESS) { 2431 /* DMA failure */ 2432 ena = dca_ena(ena); 2433 dca_failure(reqp->dr_dca, DDI_DATAPATH_FAULT, 2434 DCA_FM_ECLASS_NONE, ena, CRYPTO_DEVICE_ERROR, 2435 "fault on buffer DMA handle"); 2436 if (err == CRYPTO_SUCCESS) { 2437 err = CRYPTO_DEVICE_ERROR; 2438 } 2439 } 2440 2441 if (reqp->dr_callback != NULL) { 2442 reqp->dr_callback(reqp, err); 2443 } else { 2444 dca_freereq(reqp); 2445 } 2446 } 2447 2448 /* 2449 * Call this when a failure is detected. It will reset the chip, 2450 * log a message, alert kCF, and mark jobs in the runq as failed. 2451 */ 2452 /* ARGSUSED */ 2453 void 2454 dca_failure(dca_t *dca, ddi_fault_location_t loc, dca_fma_eclass_t index, 2455 uint64_t ena, int errno, char *mess, ...) 2456 { 2457 va_list ap; 2458 char buf[256]; 2459 int mcr; 2460 char *eclass; 2461 int have_mutex; 2462 2463 va_start(ap, mess); 2464 (void) vsprintf(buf, mess, ap); 2465 va_end(ap); 2466 2467 eclass = dca_fma_eclass_string(dca->dca_model, index); 2468 2469 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) && 2470 index != DCA_FM_ECLASS_NONE) { 2471 ddi_fm_ereport_post(dca->dca_dip, eclass, ena, 2472 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 2473 FM_EREPORT_VERS0, NULL); 2474 2475 /* Report the impact of the failure to the DDI. */ 2476 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_LOST); 2477 } else { 2478 /* Just log the error string to the message log */ 2479 dca_error(dca, buf); 2480 } 2481 2482 /* 2483 * Indicate a failure (keeps schedule from running). 2484 */ 2485 dca->dca_flags |= DCA_FAILED; 2486 2487 /* 2488 * Reset the chip. This should also have as a side effect, the 2489 * disabling of all interrupts from the device. 2490 */ 2491 (void) dca_reset(dca, 1); 2492 2493 /* 2494 * Report the failure to kCF. 2495 */ 2496 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2497 if (WORKLIST(dca, mcr)->dwl_prov) { 2498 crypto_prov_notify(WORKLIST(dca, mcr)->dwl_prov, 2499 CRYPTO_PROVIDER_FAILED); 2500 } 2501 } 2502 2503 /* 2504 * Return jobs not sent to hardware back to kCF. 2505 */ 2506 dca_rejectjobs(dca); 2507 2508 /* 2509 * From this point on, no new work should be arriving, and the 2510 * chip should not be doing any active DMA. 2511 */ 2512 2513 /* 2514 * Now find all the work submitted to the device and fail 2515 * them. 2516 */ 2517 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2518 dca_worklist_t *wlp; 2519 int i; 2520 2521 wlp = WORKLIST(dca, mcr); 2522 2523 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) { 2524 continue; 2525 } 2526 for (;;) { 2527 dca_work_t *workp; 2528 2529 have_mutex = mutex_tryenter(&wlp->dwl_lock); 2530 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_runq); 2531 if (workp == NULL) { 2532 if (have_mutex) 2533 mutex_exit(&wlp->dwl_lock); 2534 break; 2535 } 2536 mutex_exit(&wlp->dwl_lock); 2537 2538 /* 2539 * Free up requests 2540 */ 2541 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2542 dca_request_t *reqp = workp->dw_reqs[i]; 2543 if (reqp) { 2544 dca_done(reqp, errno); 2545 workp->dw_reqs[i] = NULL; 2546 } 2547 } 2548 2549 mutex_enter(&wlp->dwl_lock); 2550 /* 2551 * If waiting to drain, signal on the waiter. 2552 */ 2553 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) { 2554 cv_signal(&wlp->dwl_cv); 2555 } 2556 2557 /* 2558 * Return the work and request structures to 2559 * the free pool. 2560 */ 2561 dca_freework(workp); 2562 if (have_mutex) 2563 mutex_exit(&wlp->dwl_lock); 2564 } 2565 } 2566 2567 } 2568 2569 #ifdef SCHEDDELAY 2570 /* 2571 * Reschedule worklist as needed. 2572 */ 2573 void 2574 dca_schedtimeout(void *arg) 2575 { 2576 dca_worklist_t *wlp = (dca_worklist_t *)arg; 2577 mutex_enter(&wlp->dwl_lock); 2578 wlp->dwl_schedtid = 0; 2579 dca_schedule(wlp->dwl_dca, wlp->dwl_mcr); 2580 mutex_exit(&wlp->dwl_lock); 2581 } 2582 #endif 2583 2584 /* 2585 * Check for stalled jobs. 2586 */ 2587 void 2588 dca_jobtimeout(void *arg) 2589 { 2590 int mcr; 2591 dca_t *dca = (dca_t *)arg; 2592 int hung = 0; 2593 2594 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2595 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2596 dca_work_t *workp; 2597 clock_t when; 2598 2599 mutex_enter(&wlp->dwl_lock); 2600 when = ddi_get_lbolt(); 2601 2602 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq); 2603 if (workp == NULL) { 2604 /* nothing sitting in the queue */ 2605 mutex_exit(&wlp->dwl_lock); 2606 continue; 2607 } 2608 2609 if ((when - workp->dw_lbolt) < drv_usectohz(STALETIME)) { 2610 /* request has been queued for less than STALETIME */ 2611 mutex_exit(&wlp->dwl_lock); 2612 continue; 2613 } 2614 2615 /* job has been sitting around for over 1 second, badness */ 2616 DBG(dca, DWARN, "stale job (0x%p) found in MCR%d!", workp, 2617 mcr); 2618 2619 /* put it back in the queue, until we reset the chip */ 2620 hung++; 2621 mutex_exit(&wlp->dwl_lock); 2622 } 2623 2624 if (hung) { 2625 dca_failure(dca, DDI_DEVICE_FAULT, 2626 DCA_FM_ECLASS_HW_TIMEOUT, dca_ena(0), CRYPTO_DEVICE_ERROR, 2627 "timeout processing job.)"); 2628 } 2629 2630 /* reschedule ourself */ 2631 mutex_enter(&dca->dca_intrlock); 2632 if (dca->dca_jobtid == 0) { 2633 /* timeout has been canceled, prior to DR */ 2634 mutex_exit(&dca->dca_intrlock); 2635 return; 2636 } 2637 2638 /* check again in 1 second */ 2639 dca->dca_jobtid = timeout(dca_jobtimeout, arg, 2640 drv_usectohz(SECOND)); 2641 mutex_exit(&dca->dca_intrlock); 2642 } 2643 2644 /* 2645 * This returns all jobs back to kCF. It assumes that processing 2646 * on the worklist has halted. 2647 */ 2648 void 2649 dca_rejectjobs(dca_t *dca) 2650 { 2651 int mcr; 2652 int have_mutex; 2653 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2654 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2655 dca_request_t *reqp; 2656 2657 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) { 2658 continue; 2659 } 2660 have_mutex = mutex_tryenter(&wlp->dwl_lock); 2661 for (;;) { 2662 reqp = (dca_request_t *)dca_unqueue(&wlp->dwl_waitq); 2663 if (reqp == NULL) { 2664 break; 2665 } 2666 /* update flow control */ 2667 wlp->dwl_count--; 2668 if ((wlp->dwl_count == wlp->dwl_lowater) && 2669 (wlp->dwl_busy)) { 2670 wlp->dwl_busy = 0; 2671 crypto_prov_notify(wlp->dwl_prov, 2672 CRYPTO_PROVIDER_READY); 2673 } 2674 mutex_exit(&wlp->dwl_lock); 2675 2676 (void) dca_unbindchains(reqp); 2677 reqp->dr_callback(reqp, EAGAIN); 2678 mutex_enter(&wlp->dwl_lock); 2679 } 2680 if (have_mutex) 2681 mutex_exit(&wlp->dwl_lock); 2682 } 2683 } 2684 2685 int 2686 dca_drain(dca_t *dca) 2687 { 2688 int mcr; 2689 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2690 #ifdef SCHEDDELAY 2691 timeout_id_t tid; 2692 #endif 2693 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2694 2695 mutex_enter(&wlp->dwl_lock); 2696 wlp->dwl_drain = 1; 2697 2698 /* give it up to a second to drain from the chip */ 2699 if (!QEMPTY(&wlp->dwl_runq)) { 2700 (void) cv_reltimedwait(&wlp->dwl_cv, &wlp->dwl_lock, 2701 drv_usectohz(STALETIME), TR_CLOCK_TICK); 2702 2703 if (!QEMPTY(&wlp->dwl_runq)) { 2704 dca_error(dca, "unable to drain device"); 2705 mutex_exit(&wlp->dwl_lock); 2706 dca_undrain(dca); 2707 return (EBUSY); 2708 } 2709 } 2710 2711 #ifdef SCHEDDELAY 2712 tid = wlp->dwl_schedtid; 2713 mutex_exit(&wlp->dwl_lock); 2714 2715 /* 2716 * untimeout outside the lock -- this is safe because we 2717 * have set the drain flag, so dca_schedule() will not 2718 * reschedule another timeout 2719 */ 2720 if (tid) { 2721 untimeout(tid); 2722 } 2723 #else 2724 mutex_exit(&wlp->dwl_lock); 2725 #endif 2726 } 2727 return (0); 2728 } 2729 2730 void 2731 dca_undrain(dca_t *dca) 2732 { 2733 int mcr; 2734 2735 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2736 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2737 mutex_enter(&wlp->dwl_lock); 2738 wlp->dwl_drain = 0; 2739 dca_schedule(dca, mcr); 2740 mutex_exit(&wlp->dwl_lock); 2741 } 2742 } 2743 2744 /* 2745 * Duplicate the crypto_data_t structure, but point to the original 2746 * buffers. 2747 */ 2748 int 2749 dca_dupcrypto(crypto_data_t *input, crypto_data_t *ninput) 2750 { 2751 ninput->cd_format = input->cd_format; 2752 ninput->cd_offset = input->cd_offset; 2753 ninput->cd_length = input->cd_length; 2754 ninput->cd_miscdata = input->cd_miscdata; 2755 2756 switch (input->cd_format) { 2757 case CRYPTO_DATA_RAW: 2758 ninput->cd_raw.iov_base = input->cd_raw.iov_base; 2759 ninput->cd_raw.iov_len = input->cd_raw.iov_len; 2760 break; 2761 2762 case CRYPTO_DATA_UIO: 2763 ninput->cd_uio = input->cd_uio; 2764 break; 2765 2766 case CRYPTO_DATA_MBLK: 2767 ninput->cd_mp = input->cd_mp; 2768 break; 2769 2770 default: 2771 DBG(NULL, DWARN, 2772 "dca_dupcrypto: unrecognised crypto data format"); 2773 return (CRYPTO_FAILED); 2774 } 2775 2776 return (CRYPTO_SUCCESS); 2777 } 2778 2779 /* 2780 * Performs validation checks on the input and output data structures. 2781 */ 2782 int 2783 dca_verifyio(crypto_data_t *input, crypto_data_t *output) 2784 { 2785 int rv = CRYPTO_SUCCESS; 2786 2787 switch (input->cd_format) { 2788 case CRYPTO_DATA_RAW: 2789 break; 2790 2791 case CRYPTO_DATA_UIO: 2792 /* we support only kernel buffer */ 2793 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) { 2794 DBG(NULL, DWARN, "non kernel input uio buffer"); 2795 rv = CRYPTO_ARGUMENTS_BAD; 2796 } 2797 break; 2798 2799 case CRYPTO_DATA_MBLK: 2800 break; 2801 2802 default: 2803 DBG(NULL, DWARN, "unrecognised input crypto data format"); 2804 rv = CRYPTO_ARGUMENTS_BAD; 2805 } 2806 2807 switch (output->cd_format) { 2808 case CRYPTO_DATA_RAW: 2809 break; 2810 2811 case CRYPTO_DATA_UIO: 2812 /* we support only kernel buffer */ 2813 if (output->cd_uio->uio_segflg != UIO_SYSSPACE) { 2814 DBG(NULL, DWARN, "non kernel output uio buffer"); 2815 rv = CRYPTO_ARGUMENTS_BAD; 2816 } 2817 break; 2818 2819 case CRYPTO_DATA_MBLK: 2820 break; 2821 2822 default: 2823 DBG(NULL, DWARN, "unrecognised output crypto data format"); 2824 rv = CRYPTO_ARGUMENTS_BAD; 2825 } 2826 2827 return (rv); 2828 } 2829 2830 /* 2831 * data: source crypto_data_t struct 2832 * off: offset into the source before commencing copy 2833 * count: the amount of data to copy 2834 * dest: destination buffer 2835 */ 2836 int 2837 dca_getbufbytes(crypto_data_t *data, size_t off, int count, uchar_t *dest) 2838 { 2839 int rv = CRYPTO_SUCCESS; 2840 uio_t *uiop; 2841 uint_t vec_idx; 2842 size_t cur_len; 2843 mblk_t *mp; 2844 2845 if (count == 0) { 2846 /* We don't want anything so we're done. */ 2847 return (rv); 2848 } 2849 2850 /* 2851 * Sanity check that we haven't specified a length greater than the 2852 * offset adjusted size of the buffer. 2853 */ 2854 if (count > (data->cd_length - off)) { 2855 return (CRYPTO_DATA_LEN_RANGE); 2856 } 2857 2858 /* Add the internal crypto_data offset to the requested offset. */ 2859 off += data->cd_offset; 2860 2861 switch (data->cd_format) { 2862 case CRYPTO_DATA_RAW: 2863 bcopy(data->cd_raw.iov_base + off, dest, count); 2864 break; 2865 2866 case CRYPTO_DATA_UIO: 2867 /* 2868 * Jump to the first iovec containing data to be 2869 * processed. 2870 */ 2871 uiop = data->cd_uio; 2872 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 2873 off >= uiop->uio_iov[vec_idx].iov_len; 2874 off -= uiop->uio_iov[vec_idx++].iov_len) 2875 ; 2876 if (vec_idx == uiop->uio_iovcnt) { 2877 /* 2878 * The caller specified an offset that is larger than 2879 * the total size of the buffers it provided. 2880 */ 2881 return (CRYPTO_DATA_LEN_RANGE); 2882 } 2883 2884 /* 2885 * Now process the iovecs. 2886 */ 2887 while (vec_idx < uiop->uio_iovcnt && count > 0) { 2888 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 2889 off, count); 2890 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest, 2891 cur_len); 2892 count -= cur_len; 2893 dest += cur_len; 2894 vec_idx++; 2895 off = 0; 2896 } 2897 2898 if (vec_idx == uiop->uio_iovcnt && count > 0) { 2899 /* 2900 * The end of the specified iovec's was reached but 2901 * the length requested could not be processed 2902 * (requested to digest more data than it provided). 2903 */ 2904 return (CRYPTO_DATA_LEN_RANGE); 2905 } 2906 break; 2907 2908 case CRYPTO_DATA_MBLK: 2909 /* 2910 * Jump to the first mblk_t containing data to be processed. 2911 */ 2912 for (mp = data->cd_mp; mp != NULL && off >= MBLKL(mp); 2913 off -= MBLKL(mp), mp = mp->b_cont) 2914 ; 2915 if (mp == NULL) { 2916 /* 2917 * The caller specified an offset that is larger than 2918 * the total size of the buffers it provided. 2919 */ 2920 return (CRYPTO_DATA_LEN_RANGE); 2921 } 2922 2923 /* 2924 * Now do the processing on the mblk chain. 2925 */ 2926 while (mp != NULL && count > 0) { 2927 cur_len = min(MBLKL(mp) - off, count); 2928 bcopy((char *)(mp->b_rptr + off), dest, cur_len); 2929 count -= cur_len; 2930 dest += cur_len; 2931 mp = mp->b_cont; 2932 off = 0; 2933 } 2934 2935 if (mp == NULL && count > 0) { 2936 /* 2937 * The end of the mblk was reached but the length 2938 * requested could not be processed, (requested to 2939 * digest more data than it provided). 2940 */ 2941 return (CRYPTO_DATA_LEN_RANGE); 2942 } 2943 break; 2944 2945 default: 2946 DBG(NULL, DWARN, "unrecognised crypto data format"); 2947 rv = CRYPTO_ARGUMENTS_BAD; 2948 } 2949 return (rv); 2950 } 2951 2952 2953 /* 2954 * Performs the input, output or hard scatter/gather checks on the specified 2955 * crypto_data_t struct. Returns true if the data is scatter/gather in nature 2956 * ie fails the test. 2957 */ 2958 int 2959 dca_sgcheck(dca_t *dca, crypto_data_t *data, dca_sg_param_t val) 2960 { 2961 uio_t *uiop; 2962 mblk_t *mp; 2963 int rv = FALSE; 2964 2965 switch (val) { 2966 case DCA_SG_CONTIG: 2967 /* 2968 * Check for a contiguous data buffer. 2969 */ 2970 switch (data->cd_format) { 2971 case CRYPTO_DATA_RAW: 2972 /* Contiguous in nature */ 2973 break; 2974 2975 case CRYPTO_DATA_UIO: 2976 if (data->cd_uio->uio_iovcnt > 1) 2977 rv = TRUE; 2978 break; 2979 2980 case CRYPTO_DATA_MBLK: 2981 mp = data->cd_mp; 2982 if (mp->b_cont != NULL) 2983 rv = TRUE; 2984 break; 2985 2986 default: 2987 DBG(NULL, DWARN, "unrecognised crypto data format"); 2988 } 2989 break; 2990 2991 case DCA_SG_WALIGN: 2992 /* 2993 * Check for a contiguous data buffer that is 32-bit word 2994 * aligned and is of word multiples in size. 2995 */ 2996 switch (data->cd_format) { 2997 case CRYPTO_DATA_RAW: 2998 if ((data->cd_raw.iov_len % sizeof (uint32_t)) || 2999 ((uintptr_t)data->cd_raw.iov_base % 3000 sizeof (uint32_t))) { 3001 rv = TRUE; 3002 } 3003 break; 3004 3005 case CRYPTO_DATA_UIO: 3006 uiop = data->cd_uio; 3007 if (uiop->uio_iovcnt > 1) { 3008 return (TRUE); 3009 } 3010 /* So there is only one iovec */ 3011 if ((uiop->uio_iov[0].iov_len % sizeof (uint32_t)) || 3012 ((uintptr_t)uiop->uio_iov[0].iov_base % 3013 sizeof (uint32_t))) { 3014 rv = TRUE; 3015 } 3016 break; 3017 3018 case CRYPTO_DATA_MBLK: 3019 mp = data->cd_mp; 3020 if (mp->b_cont != NULL) { 3021 return (TRUE); 3022 } 3023 /* So there is only one mblk in the chain */ 3024 if ((MBLKL(mp) % sizeof (uint32_t)) || 3025 ((uintptr_t)mp->b_rptr % sizeof (uint32_t))) { 3026 rv = TRUE; 3027 } 3028 break; 3029 3030 default: 3031 DBG(NULL, DWARN, "unrecognised crypto data format"); 3032 } 3033 break; 3034 3035 case DCA_SG_PALIGN: 3036 /* 3037 * Check that the data buffer is page aligned and is of 3038 * page multiples in size. 3039 */ 3040 switch (data->cd_format) { 3041 case CRYPTO_DATA_RAW: 3042 if ((data->cd_length % dca->dca_pagesize) || 3043 ((uintptr_t)data->cd_raw.iov_base % 3044 dca->dca_pagesize)) { 3045 rv = TRUE; 3046 } 3047 break; 3048 3049 case CRYPTO_DATA_UIO: 3050 uiop = data->cd_uio; 3051 if ((uiop->uio_iov[0].iov_len % dca->dca_pagesize) || 3052 ((uintptr_t)uiop->uio_iov[0].iov_base % 3053 dca->dca_pagesize)) { 3054 rv = TRUE; 3055 } 3056 break; 3057 3058 case CRYPTO_DATA_MBLK: 3059 mp = data->cd_mp; 3060 if ((MBLKL(mp) % dca->dca_pagesize) || 3061 ((uintptr_t)mp->b_rptr % dca->dca_pagesize)) { 3062 rv = TRUE; 3063 } 3064 break; 3065 3066 default: 3067 DBG(NULL, DWARN, "unrecognised crypto data format"); 3068 } 3069 break; 3070 3071 default: 3072 DBG(NULL, DWARN, "unrecognised scatter/gather param type"); 3073 } 3074 3075 return (rv); 3076 } 3077 3078 /* 3079 * Increments the cd_offset and decrements the cd_length as the data is 3080 * gathered from the crypto_data_t struct. 3081 * The data is reverse-copied into the dest buffer if the flag is true. 3082 */ 3083 int 3084 dca_gather(crypto_data_t *in, char *dest, int count, int reverse) 3085 { 3086 int rv = CRYPTO_SUCCESS; 3087 uint_t vec_idx; 3088 uio_t *uiop; 3089 off_t off = in->cd_offset; 3090 size_t cur_len; 3091 mblk_t *mp; 3092 3093 switch (in->cd_format) { 3094 case CRYPTO_DATA_RAW: 3095 if (count > in->cd_length) { 3096 /* 3097 * The caller specified a length greater than the 3098 * size of the buffer. 3099 */ 3100 return (CRYPTO_DATA_LEN_RANGE); 3101 } 3102 if (reverse) 3103 dca_reverse(in->cd_raw.iov_base + off, dest, count, 3104 count); 3105 else 3106 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count); 3107 in->cd_offset += count; 3108 in->cd_length -= count; 3109 break; 3110 3111 case CRYPTO_DATA_UIO: 3112 /* 3113 * Jump to the first iovec containing data to be processed. 3114 */ 3115 uiop = in->cd_uio; 3116 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3117 off >= uiop->uio_iov[vec_idx].iov_len; 3118 off -= uiop->uio_iov[vec_idx++].iov_len) 3119 ; 3120 if (vec_idx == uiop->uio_iovcnt) { 3121 /* 3122 * The caller specified an offset that is larger than 3123 * the total size of the buffers it provided. 3124 */ 3125 return (CRYPTO_DATA_LEN_RANGE); 3126 } 3127 3128 /* 3129 * Now process the iovecs. 3130 */ 3131 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3132 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3133 off, count); 3134 count -= cur_len; 3135 if (reverse) { 3136 /* Fill the dest buffer from the end */ 3137 dca_reverse(uiop->uio_iov[vec_idx].iov_base + 3138 off, dest+count, cur_len, cur_len); 3139 } else { 3140 bcopy(uiop->uio_iov[vec_idx].iov_base + off, 3141 dest, cur_len); 3142 dest += cur_len; 3143 } 3144 in->cd_offset += cur_len; 3145 in->cd_length -= cur_len; 3146 vec_idx++; 3147 off = 0; 3148 } 3149 3150 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3151 /* 3152 * The end of the specified iovec's was reached but 3153 * the length requested could not be processed 3154 * (requested to digest more data than it provided). 3155 */ 3156 return (CRYPTO_DATA_LEN_RANGE); 3157 } 3158 break; 3159 3160 case CRYPTO_DATA_MBLK: 3161 /* 3162 * Jump to the first mblk_t containing data to be processed. 3163 */ 3164 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp); 3165 off -= MBLKL(mp), mp = mp->b_cont) 3166 ; 3167 if (mp == NULL) { 3168 /* 3169 * The caller specified an offset that is larger than 3170 * the total size of the buffers it provided. 3171 */ 3172 return (CRYPTO_DATA_LEN_RANGE); 3173 } 3174 3175 /* 3176 * Now do the processing on the mblk chain. 3177 */ 3178 while (mp != NULL && count > 0) { 3179 cur_len = min(MBLKL(mp) - off, count); 3180 count -= cur_len; 3181 if (reverse) { 3182 /* Fill the dest buffer from the end */ 3183 dca_reverse((char *)(mp->b_rptr + off), 3184 dest+count, cur_len, cur_len); 3185 } else { 3186 bcopy((char *)(mp->b_rptr + off), dest, 3187 cur_len); 3188 dest += cur_len; 3189 } 3190 in->cd_offset += cur_len; 3191 in->cd_length -= cur_len; 3192 mp = mp->b_cont; 3193 off = 0; 3194 } 3195 3196 if (mp == NULL && count > 0) { 3197 /* 3198 * The end of the mblk was reached but the length 3199 * requested could not be processed, (requested to 3200 * digest more data than it provided). 3201 */ 3202 return (CRYPTO_DATA_LEN_RANGE); 3203 } 3204 break; 3205 3206 default: 3207 DBG(NULL, DWARN, "dca_gather: unrecognised crypto data format"); 3208 rv = CRYPTO_ARGUMENTS_BAD; 3209 } 3210 return (rv); 3211 } 3212 3213 /* 3214 * Increments the cd_offset and decrements the cd_length as the data is 3215 * gathered from the crypto_data_t struct. 3216 */ 3217 int 3218 dca_resid_gather(crypto_data_t *in, char *resid, int *residlen, char *dest, 3219 int count) 3220 { 3221 int rv = CRYPTO_SUCCESS; 3222 caddr_t baddr; 3223 uint_t vec_idx; 3224 uio_t *uiop; 3225 off_t off = in->cd_offset; 3226 size_t cur_len; 3227 mblk_t *mp; 3228 3229 /* Process the residual first */ 3230 if (*residlen > 0) { 3231 uint_t num = min(count, *residlen); 3232 bcopy(resid, dest, num); 3233 *residlen -= num; 3234 if (*residlen > 0) { 3235 /* 3236 * Requested amount 'count' is less than what's in 3237 * the residual, so shuffle any remaining resid to 3238 * the front. 3239 */ 3240 baddr = resid + num; 3241 bcopy(baddr, resid, *residlen); 3242 } 3243 dest += num; 3244 count -= num; 3245 } 3246 3247 /* Now process what's in the crypto_data_t structs */ 3248 switch (in->cd_format) { 3249 case CRYPTO_DATA_RAW: 3250 if (count > in->cd_length) { 3251 /* 3252 * The caller specified a length greater than the 3253 * size of the buffer. 3254 */ 3255 return (CRYPTO_DATA_LEN_RANGE); 3256 } 3257 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count); 3258 in->cd_offset += count; 3259 in->cd_length -= count; 3260 break; 3261 3262 case CRYPTO_DATA_UIO: 3263 /* 3264 * Jump to the first iovec containing data to be processed. 3265 */ 3266 uiop = in->cd_uio; 3267 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3268 off >= uiop->uio_iov[vec_idx].iov_len; 3269 off -= uiop->uio_iov[vec_idx++].iov_len) 3270 ; 3271 if (vec_idx == uiop->uio_iovcnt) { 3272 /* 3273 * The caller specified an offset that is larger than 3274 * the total size of the buffers it provided. 3275 */ 3276 return (CRYPTO_DATA_LEN_RANGE); 3277 } 3278 3279 /* 3280 * Now process the iovecs. 3281 */ 3282 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3283 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3284 off, count); 3285 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest, 3286 cur_len); 3287 count -= cur_len; 3288 dest += cur_len; 3289 in->cd_offset += cur_len; 3290 in->cd_length -= cur_len; 3291 vec_idx++; 3292 off = 0; 3293 } 3294 3295 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3296 /* 3297 * The end of the specified iovec's was reached but 3298 * the length requested could not be processed 3299 * (requested to digest more data than it provided). 3300 */ 3301 return (CRYPTO_DATA_LEN_RANGE); 3302 } 3303 break; 3304 3305 case CRYPTO_DATA_MBLK: 3306 /* 3307 * Jump to the first mblk_t containing data to be processed. 3308 */ 3309 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp); 3310 off -= MBLKL(mp), mp = mp->b_cont) 3311 ; 3312 if (mp == NULL) { 3313 /* 3314 * The caller specified an offset that is larger than 3315 * the total size of the buffers it provided. 3316 */ 3317 return (CRYPTO_DATA_LEN_RANGE); 3318 } 3319 3320 /* 3321 * Now do the processing on the mblk chain. 3322 */ 3323 while (mp != NULL && count > 0) { 3324 cur_len = min(MBLKL(mp) - off, count); 3325 bcopy((char *)(mp->b_rptr + off), dest, cur_len); 3326 count -= cur_len; 3327 dest += cur_len; 3328 in->cd_offset += cur_len; 3329 in->cd_length -= cur_len; 3330 mp = mp->b_cont; 3331 off = 0; 3332 } 3333 3334 if (mp == NULL && count > 0) { 3335 /* 3336 * The end of the mblk was reached but the length 3337 * requested could not be processed, (requested to 3338 * digest more data than it provided). 3339 */ 3340 return (CRYPTO_DATA_LEN_RANGE); 3341 } 3342 break; 3343 3344 default: 3345 DBG(NULL, DWARN, 3346 "dca_resid_gather: unrecognised crypto data format"); 3347 rv = CRYPTO_ARGUMENTS_BAD; 3348 } 3349 return (rv); 3350 } 3351 3352 /* 3353 * Appends the data to the crypto_data_t struct increasing cd_length. 3354 * cd_offset is left unchanged. 3355 * Data is reverse-copied if the flag is TRUE. 3356 */ 3357 int 3358 dca_scatter(const char *src, crypto_data_t *out, int count, int reverse) 3359 { 3360 int rv = CRYPTO_SUCCESS; 3361 off_t offset = out->cd_offset + out->cd_length; 3362 uint_t vec_idx; 3363 uio_t *uiop; 3364 size_t cur_len; 3365 mblk_t *mp; 3366 3367 switch (out->cd_format) { 3368 case CRYPTO_DATA_RAW: 3369 if (out->cd_raw.iov_len - offset < count) { 3370 /* Trying to write out more than space available. */ 3371 return (CRYPTO_DATA_LEN_RANGE); 3372 } 3373 if (reverse) 3374 dca_reverse((void*) src, out->cd_raw.iov_base + offset, 3375 count, count); 3376 else 3377 bcopy(src, out->cd_raw.iov_base + offset, count); 3378 out->cd_length += count; 3379 break; 3380 3381 case CRYPTO_DATA_UIO: 3382 /* 3383 * Jump to the first iovec that can be written to. 3384 */ 3385 uiop = out->cd_uio; 3386 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3387 offset >= uiop->uio_iov[vec_idx].iov_len; 3388 offset -= uiop->uio_iov[vec_idx++].iov_len) 3389 ; 3390 if (vec_idx == uiop->uio_iovcnt) { 3391 /* 3392 * The caller specified an offset that is larger than 3393 * the total size of the buffers it provided. 3394 */ 3395 return (CRYPTO_DATA_LEN_RANGE); 3396 } 3397 3398 /* 3399 * Now process the iovecs. 3400 */ 3401 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3402 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3403 offset, count); 3404 count -= cur_len; 3405 if (reverse) { 3406 dca_reverse((void*) (src+count), 3407 uiop->uio_iov[vec_idx].iov_base + 3408 offset, cur_len, cur_len); 3409 } else { 3410 bcopy(src, uiop->uio_iov[vec_idx].iov_base + 3411 offset, cur_len); 3412 src += cur_len; 3413 } 3414 out->cd_length += cur_len; 3415 vec_idx++; 3416 offset = 0; 3417 } 3418 3419 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3420 /* 3421 * The end of the specified iovec's was reached but 3422 * the length requested could not be processed 3423 * (requested to write more data than space provided). 3424 */ 3425 return (CRYPTO_DATA_LEN_RANGE); 3426 } 3427 break; 3428 3429 case CRYPTO_DATA_MBLK: 3430 /* 3431 * Jump to the first mblk_t that can be written to. 3432 */ 3433 for (mp = out->cd_mp; mp != NULL && offset >= MBLKL(mp); 3434 offset -= MBLKL(mp), mp = mp->b_cont) 3435 ; 3436 if (mp == NULL) { 3437 /* 3438 * The caller specified an offset that is larger than 3439 * the total size of the buffers it provided. 3440 */ 3441 return (CRYPTO_DATA_LEN_RANGE); 3442 } 3443 3444 /* 3445 * Now do the processing on the mblk chain. 3446 */ 3447 while (mp != NULL && count > 0) { 3448 cur_len = min(MBLKL(mp) - offset, count); 3449 count -= cur_len; 3450 if (reverse) { 3451 dca_reverse((void*) (src+count), 3452 (char *)(mp->b_rptr + offset), cur_len, 3453 cur_len); 3454 } else { 3455 bcopy(src, (char *)(mp->b_rptr + offset), 3456 cur_len); 3457 src += cur_len; 3458 } 3459 out->cd_length += cur_len; 3460 mp = mp->b_cont; 3461 offset = 0; 3462 } 3463 3464 if (mp == NULL && count > 0) { 3465 /* 3466 * The end of the mblk was reached but the length 3467 * requested could not be processed, (requested to 3468 * digest more data than it provided). 3469 */ 3470 return (CRYPTO_DATA_LEN_RANGE); 3471 } 3472 break; 3473 3474 default: 3475 DBG(NULL, DWARN, "unrecognised crypto data format"); 3476 rv = CRYPTO_ARGUMENTS_BAD; 3477 } 3478 return (rv); 3479 } 3480 3481 /* 3482 * Compare two byte arrays in reverse order. 3483 * Return 0 if they are identical, 1 otherwise. 3484 */ 3485 int 3486 dca_bcmp_reverse(const void *s1, const void *s2, size_t n) 3487 { 3488 int i; 3489 caddr_t src, dst; 3490 3491 if (!n) 3492 return (0); 3493 3494 src = ((caddr_t)s1) + n - 1; 3495 dst = (caddr_t)s2; 3496 for (i = 0; i < n; i++) { 3497 if (*src != *dst) 3498 return (1); 3499 src--; 3500 dst++; 3501 } 3502 3503 return (0); 3504 } 3505 3506 3507 /* 3508 * This calculates the size of a bignum in bits, specifically not counting 3509 * leading zero bits. This size calculation must be done *before* any 3510 * endian reversal takes place (i.e. the numbers are in absolute big-endian 3511 * order.) 3512 */ 3513 int 3514 dca_bitlen(unsigned char *bignum, int bytelen) 3515 { 3516 unsigned char msbyte; 3517 int i, j; 3518 3519 for (i = 0; i < bytelen - 1; i++) { 3520 if (bignum[i] != 0) { 3521 break; 3522 } 3523 } 3524 msbyte = bignum[i]; 3525 for (j = 8; j > 1; j--) { 3526 if (msbyte & 0x80) { 3527 break; 3528 } 3529 msbyte <<= 1; 3530 } 3531 return ((8 * (bytelen - i - 1)) + j); 3532 } 3533 3534 /* 3535 * This compares to bignums (in big-endian order). It ignores leading 3536 * null bytes. The result semantics follow bcmp, mempcmp, strcmp, etc. 3537 */ 3538 int 3539 dca_numcmp(caddr_t n1, int n1len, caddr_t n2, int n2len) 3540 { 3541 while ((n1len > 1) && (*n1 == 0)) { 3542 n1len--; 3543 n1++; 3544 } 3545 while ((n2len > 1) && (*n2 == 0)) { 3546 n2len--; 3547 n2++; 3548 } 3549 if (n1len != n2len) { 3550 return (n1len - n2len); 3551 } 3552 while ((n1len > 1) && (*n1 == *n2)) { 3553 n1++; 3554 n2++; 3555 n1len--; 3556 } 3557 return ((int)(*(uchar_t *)n1) - (int)(*(uchar_t *)n2)); 3558 } 3559 3560 /* 3561 * Return array of key attributes. 3562 */ 3563 crypto_object_attribute_t * 3564 dca_get_key_attr(crypto_key_t *key) 3565 { 3566 if ((key->ck_format != CRYPTO_KEY_ATTR_LIST) || 3567 (key->ck_count == 0)) { 3568 return (NULL); 3569 } 3570 3571 return (key->ck_attrs); 3572 } 3573 3574 /* 3575 * If attribute type exists valp points to it's 32-bit value. 3576 */ 3577 int 3578 dca_attr_lookup_uint32(crypto_object_attribute_t *attrp, uint_t atnum, 3579 uint64_t atype, uint32_t *valp) 3580 { 3581 crypto_object_attribute_t *bap; 3582 3583 bap = dca_find_attribute(attrp, atnum, atype); 3584 if (bap == NULL) { 3585 return (CRYPTO_ATTRIBUTE_TYPE_INVALID); 3586 } 3587 3588 *valp = *bap->oa_value; 3589 3590 return (CRYPTO_SUCCESS); 3591 } 3592 3593 /* 3594 * If attribute type exists data contains the start address of the value, 3595 * and numelems contains it's length. 3596 */ 3597 int 3598 dca_attr_lookup_uint8_array(crypto_object_attribute_t *attrp, uint_t atnum, 3599 uint64_t atype, void **data, unsigned int *numelems) 3600 { 3601 crypto_object_attribute_t *bap; 3602 3603 bap = dca_find_attribute(attrp, atnum, atype); 3604 if (bap == NULL) { 3605 return (CRYPTO_ATTRIBUTE_TYPE_INVALID); 3606 } 3607 3608 *data = bap->oa_value; 3609 *numelems = bap->oa_value_len; 3610 3611 return (CRYPTO_SUCCESS); 3612 } 3613 3614 /* 3615 * Finds entry of specified name. If it is not found dca_find_attribute returns 3616 * NULL. 3617 */ 3618 crypto_object_attribute_t * 3619 dca_find_attribute(crypto_object_attribute_t *attrp, uint_t atnum, 3620 uint64_t atype) 3621 { 3622 while (atnum) { 3623 if (attrp->oa_type == atype) 3624 return (attrp); 3625 atnum--; 3626 attrp++; 3627 } 3628 return (NULL); 3629 } 3630 3631 /* 3632 * Return the address of the first data buffer. If the data format is 3633 * unrecognised return NULL. 3634 */ 3635 caddr_t 3636 dca_bufdaddr(crypto_data_t *data) 3637 { 3638 switch (data->cd_format) { 3639 case CRYPTO_DATA_RAW: 3640 return (data->cd_raw.iov_base + data->cd_offset); 3641 case CRYPTO_DATA_UIO: 3642 return (data->cd_uio->uio_iov[0].iov_base + data->cd_offset); 3643 case CRYPTO_DATA_MBLK: 3644 return ((char *)data->cd_mp->b_rptr + data->cd_offset); 3645 default: 3646 DBG(NULL, DWARN, 3647 "dca_bufdaddr: unrecognised crypto data format"); 3648 return (NULL); 3649 } 3650 } 3651 3652 static caddr_t 3653 dca_bufdaddr_out(crypto_data_t *data) 3654 { 3655 size_t offset = data->cd_offset + data->cd_length; 3656 3657 switch (data->cd_format) { 3658 case CRYPTO_DATA_RAW: 3659 return (data->cd_raw.iov_base + offset); 3660 case CRYPTO_DATA_UIO: 3661 return (data->cd_uio->uio_iov[0].iov_base + offset); 3662 case CRYPTO_DATA_MBLK: 3663 return ((char *)data->cd_mp->b_rptr + offset); 3664 default: 3665 DBG(NULL, DWARN, 3666 "dca_bufdaddr_out: unrecognised crypto data format"); 3667 return (NULL); 3668 } 3669 } 3670 3671 /* 3672 * Control entry points. 3673 */ 3674 3675 /* ARGSUSED */ 3676 static void 3677 dca_provider_status(crypto_provider_handle_t provider, uint_t *status) 3678 { 3679 *status = CRYPTO_PROVIDER_READY; 3680 } 3681 3682 /* 3683 * Cipher (encrypt/decrypt) entry points. 3684 */ 3685 3686 /* ARGSUSED */ 3687 static int 3688 dca_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 3689 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 3690 crypto_req_handle_t req) 3691 { 3692 int error = CRYPTO_FAILED; 3693 dca_t *softc; 3694 /* LINTED E_FUNC_SET_NOT_USED */ 3695 int instance; 3696 3697 /* extract softc and instance number from context */ 3698 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3699 DBG(softc, DENTRY, "dca_encrypt_init: started"); 3700 3701 /* check mechanism */ 3702 switch (mechanism->cm_type) { 3703 case DES_CBC_MECH_INFO_TYPE: 3704 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3705 DR_ENCRYPT); 3706 break; 3707 case DES3_CBC_MECH_INFO_TYPE: 3708 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3709 DR_ENCRYPT | DR_TRIPLE); 3710 break; 3711 case RSA_PKCS_MECH_INFO_TYPE: 3712 case RSA_X_509_MECH_INFO_TYPE: 3713 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 3714 break; 3715 default: 3716 cmn_err(CE_WARN, "dca_encrypt_init: unexpected mech type " 3717 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3718 error = CRYPTO_MECHANISM_INVALID; 3719 } 3720 3721 DBG(softc, DENTRY, "dca_encrypt_init: done, err = 0x%x", error); 3722 3723 if (error == CRYPTO_SUCCESS) 3724 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 3725 &softc->dca_ctx_list_lock); 3726 3727 return (error); 3728 } 3729 3730 /* ARGSUSED */ 3731 static int 3732 dca_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, 3733 crypto_data_t *ciphertext, crypto_req_handle_t req) 3734 { 3735 int error = CRYPTO_FAILED; 3736 dca_t *softc; 3737 /* LINTED E_FUNC_SET_NOT_USED */ 3738 int instance; 3739 3740 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3741 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3742 3743 /* extract softc and instance number from context */ 3744 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3745 DBG(softc, DENTRY, "dca_encrypt: started"); 3746 3747 /* handle inplace ops */ 3748 if (!ciphertext) { 3749 dca_request_t *reqp = ctx->cc_provider_private; 3750 reqp->dr_flags |= DR_INPLACE; 3751 ciphertext = plaintext; 3752 } 3753 3754 /* check mechanism */ 3755 switch (DCA_MECH_FROM_CTX(ctx)) { 3756 case DES_CBC_MECH_INFO_TYPE: 3757 error = dca_3des(ctx, plaintext, ciphertext, req, DR_ENCRYPT); 3758 break; 3759 case DES3_CBC_MECH_INFO_TYPE: 3760 error = dca_3des(ctx, plaintext, ciphertext, req, 3761 DR_ENCRYPT | DR_TRIPLE); 3762 break; 3763 case RSA_PKCS_MECH_INFO_TYPE: 3764 case RSA_X_509_MECH_INFO_TYPE: 3765 error = dca_rsastart(ctx, plaintext, ciphertext, req, 3766 DCA_RSA_ENC); 3767 break; 3768 default: 3769 /* Should never reach here */ 3770 cmn_err(CE_WARN, "dca_encrypt: unexpected mech type " 3771 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3772 error = CRYPTO_MECHANISM_INVALID; 3773 } 3774 3775 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) && 3776 (error != CRYPTO_BUFFER_TOO_SMALL)) { 3777 ciphertext->cd_length = 0; 3778 } 3779 3780 DBG(softc, DENTRY, "dca_encrypt: done, err = 0x%x", error); 3781 3782 return (error); 3783 } 3784 3785 /* ARGSUSED */ 3786 static int 3787 dca_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext, 3788 crypto_data_t *ciphertext, crypto_req_handle_t req) 3789 { 3790 int error = CRYPTO_FAILED; 3791 dca_t *softc; 3792 /* LINTED E_FUNC_SET_NOT_USED */ 3793 int instance; 3794 3795 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3796 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3797 3798 /* extract softc and instance number from context */ 3799 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3800 DBG(softc, DENTRY, "dca_encrypt_update: started"); 3801 3802 /* handle inplace ops */ 3803 if (!ciphertext) { 3804 dca_request_t *reqp = ctx->cc_provider_private; 3805 reqp->dr_flags |= DR_INPLACE; 3806 ciphertext = plaintext; 3807 } 3808 3809 /* check mechanism */ 3810 switch (DCA_MECH_FROM_CTX(ctx)) { 3811 case DES_CBC_MECH_INFO_TYPE: 3812 error = dca_3desupdate(ctx, plaintext, ciphertext, req, 3813 DR_ENCRYPT); 3814 break; 3815 case DES3_CBC_MECH_INFO_TYPE: 3816 error = dca_3desupdate(ctx, plaintext, ciphertext, req, 3817 DR_ENCRYPT | DR_TRIPLE); 3818 break; 3819 default: 3820 /* Should never reach here */ 3821 cmn_err(CE_WARN, "dca_encrypt_update: unexpected mech type " 3822 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3823 error = CRYPTO_MECHANISM_INVALID; 3824 } 3825 3826 DBG(softc, DENTRY, "dca_encrypt_update: done, err = 0x%x", error); 3827 3828 return (error); 3829 } 3830 3831 /* ARGSUSED */ 3832 static int 3833 dca_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 3834 crypto_req_handle_t req) 3835 { 3836 int error = CRYPTO_FAILED; 3837 dca_t *softc; 3838 /* LINTED E_FUNC_SET_NOT_USED */ 3839 int instance; 3840 3841 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3842 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3843 3844 /* extract softc and instance number from context */ 3845 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3846 DBG(softc, DENTRY, "dca_encrypt_final: started"); 3847 3848 /* check mechanism */ 3849 switch (DCA_MECH_FROM_CTX(ctx)) { 3850 case DES_CBC_MECH_INFO_TYPE: 3851 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT); 3852 break; 3853 case DES3_CBC_MECH_INFO_TYPE: 3854 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT | DR_TRIPLE); 3855 break; 3856 default: 3857 /* Should never reach here */ 3858 cmn_err(CE_WARN, "dca_encrypt_final: unexpected mech type " 3859 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3860 error = CRYPTO_MECHANISM_INVALID; 3861 } 3862 3863 DBG(softc, DENTRY, "dca_encrypt_final: done, err = 0x%x", error); 3864 3865 return (error); 3866 } 3867 3868 /* ARGSUSED */ 3869 static int 3870 dca_encrypt_atomic(crypto_provider_handle_t provider, 3871 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 3872 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext, 3873 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 3874 { 3875 int error = CRYPTO_FAILED; 3876 dca_t *softc = (dca_t *)provider; 3877 3878 DBG(softc, DENTRY, "dca_encrypt_atomic: started"); 3879 3880 if (ctx_template != NULL) 3881 return (CRYPTO_ARGUMENTS_BAD); 3882 3883 /* handle inplace ops */ 3884 if (!ciphertext) { 3885 ciphertext = plaintext; 3886 } 3887 3888 /* check mechanism */ 3889 switch (mechanism->cm_type) { 3890 case DES_CBC_MECH_INFO_TYPE: 3891 error = dca_3desatomic(provider, session_id, mechanism, key, 3892 plaintext, ciphertext, KM_SLEEP, req, 3893 DR_ENCRYPT | DR_ATOMIC); 3894 break; 3895 case DES3_CBC_MECH_INFO_TYPE: 3896 error = dca_3desatomic(provider, session_id, mechanism, key, 3897 plaintext, ciphertext, KM_SLEEP, req, 3898 DR_ENCRYPT | DR_TRIPLE | DR_ATOMIC); 3899 break; 3900 case RSA_PKCS_MECH_INFO_TYPE: 3901 case RSA_X_509_MECH_INFO_TYPE: 3902 error = dca_rsaatomic(provider, session_id, mechanism, key, 3903 plaintext, ciphertext, KM_SLEEP, req, DCA_RSA_ENC); 3904 break; 3905 default: 3906 cmn_err(CE_WARN, "dca_encrypt_atomic: unexpected mech type " 3907 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3908 error = CRYPTO_MECHANISM_INVALID; 3909 } 3910 3911 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) { 3912 ciphertext->cd_length = 0; 3913 } 3914 3915 DBG(softc, DENTRY, "dca_encrypt_atomic: done, err = 0x%x", error); 3916 3917 return (error); 3918 } 3919 3920 /* ARGSUSED */ 3921 static int 3922 dca_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 3923 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 3924 crypto_req_handle_t req) 3925 { 3926 int error = CRYPTO_FAILED; 3927 dca_t *softc; 3928 /* LINTED E_FUNC_SET_NOT_USED */ 3929 int instance; 3930 3931 /* extract softc and instance number from context */ 3932 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3933 DBG(softc, DENTRY, "dca_decrypt_init: started"); 3934 3935 /* check mechanism */ 3936 switch (mechanism->cm_type) { 3937 case DES_CBC_MECH_INFO_TYPE: 3938 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3939 DR_DECRYPT); 3940 break; 3941 case DES3_CBC_MECH_INFO_TYPE: 3942 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3943 DR_DECRYPT | DR_TRIPLE); 3944 break; 3945 case RSA_PKCS_MECH_INFO_TYPE: 3946 case RSA_X_509_MECH_INFO_TYPE: 3947 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 3948 break; 3949 default: 3950 cmn_err(CE_WARN, "dca_decrypt_init: unexpected mech type " 3951 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3952 error = CRYPTO_MECHANISM_INVALID; 3953 } 3954 3955 DBG(softc, DENTRY, "dca_decrypt_init: done, err = 0x%x", error); 3956 3957 if (error == CRYPTO_SUCCESS) 3958 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 3959 &softc->dca_ctx_list_lock); 3960 3961 return (error); 3962 } 3963 3964 /* ARGSUSED */ 3965 static int 3966 dca_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 3967 crypto_data_t *plaintext, crypto_req_handle_t req) 3968 { 3969 int error = CRYPTO_FAILED; 3970 dca_t *softc; 3971 /* LINTED E_FUNC_SET_NOT_USED */ 3972 int instance; 3973 3974 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3975 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3976 3977 /* extract softc and instance number from context */ 3978 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3979 DBG(softc, DENTRY, "dca_decrypt: started"); 3980 3981 /* handle inplace ops */ 3982 if (!plaintext) { 3983 dca_request_t *reqp = ctx->cc_provider_private; 3984 reqp->dr_flags |= DR_INPLACE; 3985 plaintext = ciphertext; 3986 } 3987 3988 /* check mechanism */ 3989 switch (DCA_MECH_FROM_CTX(ctx)) { 3990 case DES_CBC_MECH_INFO_TYPE: 3991 error = dca_3des(ctx, ciphertext, plaintext, req, DR_DECRYPT); 3992 break; 3993 case DES3_CBC_MECH_INFO_TYPE: 3994 error = dca_3des(ctx, ciphertext, plaintext, req, 3995 DR_DECRYPT | DR_TRIPLE); 3996 break; 3997 case RSA_PKCS_MECH_INFO_TYPE: 3998 case RSA_X_509_MECH_INFO_TYPE: 3999 error = dca_rsastart(ctx, ciphertext, plaintext, req, 4000 DCA_RSA_DEC); 4001 break; 4002 default: 4003 /* Should never reach here */ 4004 cmn_err(CE_WARN, "dca_decrypt: unexpected mech type " 4005 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4006 error = CRYPTO_MECHANISM_INVALID; 4007 } 4008 4009 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) && 4010 (error != CRYPTO_BUFFER_TOO_SMALL)) { 4011 if (plaintext) 4012 plaintext->cd_length = 0; 4013 } 4014 4015 DBG(softc, DENTRY, "dca_decrypt: done, err = 0x%x", error); 4016 4017 return (error); 4018 } 4019 4020 /* ARGSUSED */ 4021 static int 4022 dca_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 4023 crypto_data_t *plaintext, crypto_req_handle_t req) 4024 { 4025 int error = CRYPTO_FAILED; 4026 dca_t *softc; 4027 /* LINTED E_FUNC_SET_NOT_USED */ 4028 int instance; 4029 4030 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4031 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4032 4033 /* extract softc and instance number from context */ 4034 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4035 DBG(softc, DENTRY, "dca_decrypt_update: started"); 4036 4037 /* handle inplace ops */ 4038 if (!plaintext) { 4039 dca_request_t *reqp = ctx->cc_provider_private; 4040 reqp->dr_flags |= DR_INPLACE; 4041 plaintext = ciphertext; 4042 } 4043 4044 /* check mechanism */ 4045 switch (DCA_MECH_FROM_CTX(ctx)) { 4046 case DES_CBC_MECH_INFO_TYPE: 4047 error = dca_3desupdate(ctx, ciphertext, plaintext, req, 4048 DR_DECRYPT); 4049 break; 4050 case DES3_CBC_MECH_INFO_TYPE: 4051 error = dca_3desupdate(ctx, ciphertext, plaintext, req, 4052 DR_DECRYPT | DR_TRIPLE); 4053 break; 4054 default: 4055 /* Should never reach here */ 4056 cmn_err(CE_WARN, "dca_decrypt_update: unexpected mech type " 4057 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4058 error = CRYPTO_MECHANISM_INVALID; 4059 } 4060 4061 DBG(softc, DENTRY, "dca_decrypt_update: done, err = 0x%x", error); 4062 4063 return (error); 4064 } 4065 4066 /* ARGSUSED */ 4067 static int 4068 dca_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *plaintext, 4069 crypto_req_handle_t req) 4070 { 4071 int error = CRYPTO_FAILED; 4072 dca_t *softc; 4073 /* LINTED E_FUNC_SET_NOT_USED */ 4074 int instance; 4075 4076 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4077 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4078 4079 /* extract softc and instance number from context */ 4080 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4081 DBG(softc, DENTRY, "dca_decrypt_final: started"); 4082 4083 /* check mechanism */ 4084 switch (DCA_MECH_FROM_CTX(ctx)) { 4085 case DES_CBC_MECH_INFO_TYPE: 4086 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT); 4087 break; 4088 case DES3_CBC_MECH_INFO_TYPE: 4089 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT | DR_TRIPLE); 4090 break; 4091 default: 4092 /* Should never reach here */ 4093 cmn_err(CE_WARN, "dca_decrypt_final: unexpected mech type " 4094 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4095 error = CRYPTO_MECHANISM_INVALID; 4096 } 4097 4098 DBG(softc, DENTRY, "dca_decrypt_final: done, err = 0x%x", error); 4099 4100 return (error); 4101 } 4102 4103 /* ARGSUSED */ 4104 static int 4105 dca_decrypt_atomic(crypto_provider_handle_t provider, 4106 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4107 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext, 4108 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4109 { 4110 int error = CRYPTO_FAILED; 4111 dca_t *softc = (dca_t *)provider; 4112 4113 DBG(softc, DENTRY, "dca_decrypt_atomic: started"); 4114 4115 if (ctx_template != NULL) 4116 return (CRYPTO_ARGUMENTS_BAD); 4117 4118 /* handle inplace ops */ 4119 if (!plaintext) { 4120 plaintext = ciphertext; 4121 } 4122 4123 /* check mechanism */ 4124 switch (mechanism->cm_type) { 4125 case DES_CBC_MECH_INFO_TYPE: 4126 error = dca_3desatomic(provider, session_id, mechanism, key, 4127 ciphertext, plaintext, KM_SLEEP, req, 4128 DR_DECRYPT | DR_ATOMIC); 4129 break; 4130 case DES3_CBC_MECH_INFO_TYPE: 4131 error = dca_3desatomic(provider, session_id, mechanism, key, 4132 ciphertext, plaintext, KM_SLEEP, req, 4133 DR_DECRYPT | DR_TRIPLE | DR_ATOMIC); 4134 break; 4135 case RSA_PKCS_MECH_INFO_TYPE: 4136 case RSA_X_509_MECH_INFO_TYPE: 4137 error = dca_rsaatomic(provider, session_id, mechanism, key, 4138 ciphertext, plaintext, KM_SLEEP, req, DCA_RSA_DEC); 4139 break; 4140 default: 4141 cmn_err(CE_WARN, "dca_decrypt_atomic: unexpected mech type " 4142 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4143 error = CRYPTO_MECHANISM_INVALID; 4144 } 4145 4146 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) { 4147 plaintext->cd_length = 0; 4148 } 4149 4150 DBG(softc, DENTRY, "dca_decrypt_atomic: done, err = 0x%x", error); 4151 4152 return (error); 4153 } 4154 4155 /* 4156 * Sign entry points. 4157 */ 4158 4159 /* ARGSUSED */ 4160 static int 4161 dca_sign_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4162 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4163 crypto_req_handle_t req) 4164 { 4165 int error = CRYPTO_FAILED; 4166 dca_t *softc; 4167 /* LINTED E_FUNC_SET_NOT_USED */ 4168 int instance; 4169 4170 /* extract softc and instance number from context */ 4171 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4172 DBG(softc, DENTRY, "dca_sign_init: started\n"); 4173 4174 if (ctx_template != NULL) 4175 return (CRYPTO_ARGUMENTS_BAD); 4176 4177 /* check mechanism */ 4178 switch (mechanism->cm_type) { 4179 case RSA_PKCS_MECH_INFO_TYPE: 4180 case RSA_X_509_MECH_INFO_TYPE: 4181 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4182 break; 4183 case DSA_MECH_INFO_TYPE: 4184 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP, 4185 DCA_DSA_SIGN); 4186 break; 4187 default: 4188 cmn_err(CE_WARN, "dca_sign_init: unexpected mech type " 4189 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4190 error = CRYPTO_MECHANISM_INVALID; 4191 } 4192 4193 DBG(softc, DENTRY, "dca_sign_init: done, err = 0x%x", error); 4194 4195 if (error == CRYPTO_SUCCESS) 4196 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4197 &softc->dca_ctx_list_lock); 4198 4199 return (error); 4200 } 4201 4202 static int 4203 dca_sign(crypto_ctx_t *ctx, crypto_data_t *data, 4204 crypto_data_t *signature, crypto_req_handle_t req) 4205 { 4206 int error = CRYPTO_FAILED; 4207 dca_t *softc; 4208 /* LINTED E_FUNC_SET_NOT_USED */ 4209 int instance; 4210 4211 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4212 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4213 4214 /* extract softc and instance number from context */ 4215 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4216 DBG(softc, DENTRY, "dca_sign: started\n"); 4217 4218 /* check mechanism */ 4219 switch (DCA_MECH_FROM_CTX(ctx)) { 4220 case RSA_PKCS_MECH_INFO_TYPE: 4221 case RSA_X_509_MECH_INFO_TYPE: 4222 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGN); 4223 break; 4224 case DSA_MECH_INFO_TYPE: 4225 error = dca_dsa_sign(ctx, data, signature, req); 4226 break; 4227 default: 4228 cmn_err(CE_WARN, "dca_sign: unexpected mech type " 4229 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4230 error = CRYPTO_MECHANISM_INVALID; 4231 } 4232 4233 DBG(softc, DENTRY, "dca_sign: done, err = 0x%x", error); 4234 4235 return (error); 4236 } 4237 4238 /* ARGSUSED */ 4239 static int 4240 dca_sign_update(crypto_ctx_t *ctx, crypto_data_t *data, 4241 crypto_req_handle_t req) 4242 { 4243 int error = CRYPTO_MECHANISM_INVALID; 4244 dca_t *softc; 4245 /* LINTED E_FUNC_SET_NOT_USED */ 4246 int instance; 4247 4248 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4249 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4250 4251 /* extract softc and instance number from context */ 4252 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4253 DBG(softc, DENTRY, "dca_sign_update: started\n"); 4254 4255 cmn_err(CE_WARN, "dca_sign_update: unexpected mech type " 4256 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4257 4258 DBG(softc, DENTRY, "dca_sign_update: done, err = 0x%x", error); 4259 4260 return (error); 4261 } 4262 4263 /* ARGSUSED */ 4264 static int 4265 dca_sign_final(crypto_ctx_t *ctx, crypto_data_t *signature, 4266 crypto_req_handle_t req) 4267 { 4268 int error = CRYPTO_MECHANISM_INVALID; 4269 dca_t *softc; 4270 /* LINTED E_FUNC_SET_NOT_USED */ 4271 int instance; 4272 4273 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4274 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4275 4276 /* extract softc and instance number from context */ 4277 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4278 DBG(softc, DENTRY, "dca_sign_final: started\n"); 4279 4280 cmn_err(CE_WARN, "dca_sign_final: unexpected mech type " 4281 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4282 4283 DBG(softc, DENTRY, "dca_sign_final: done, err = 0x%x", error); 4284 4285 return (error); 4286 } 4287 4288 static int 4289 dca_sign_atomic(crypto_provider_handle_t provider, 4290 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4291 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4292 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4293 { 4294 int error = CRYPTO_FAILED; 4295 dca_t *softc = (dca_t *)provider; 4296 4297 DBG(softc, DENTRY, "dca_sign_atomic: started\n"); 4298 4299 if (ctx_template != NULL) 4300 return (CRYPTO_ARGUMENTS_BAD); 4301 4302 /* check mechanism */ 4303 switch (mechanism->cm_type) { 4304 case RSA_PKCS_MECH_INFO_TYPE: 4305 case RSA_X_509_MECH_INFO_TYPE: 4306 error = dca_rsaatomic(provider, session_id, mechanism, key, 4307 data, signature, KM_SLEEP, req, DCA_RSA_SIGN); 4308 break; 4309 case DSA_MECH_INFO_TYPE: 4310 error = dca_dsaatomic(provider, session_id, mechanism, key, 4311 data, signature, KM_SLEEP, req, DCA_DSA_SIGN); 4312 break; 4313 default: 4314 cmn_err(CE_WARN, "dca_sign_atomic: unexpected mech type " 4315 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4316 error = CRYPTO_MECHANISM_INVALID; 4317 } 4318 4319 DBG(softc, DENTRY, "dca_sign_atomic: done, err = 0x%x", error); 4320 4321 return (error); 4322 } 4323 4324 /* ARGSUSED */ 4325 static int 4326 dca_sign_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4327 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4328 crypto_req_handle_t req) 4329 { 4330 int error = CRYPTO_FAILED; 4331 dca_t *softc; 4332 /* LINTED E_FUNC_SET_NOT_USED */ 4333 int instance; 4334 4335 /* extract softc and instance number from context */ 4336 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4337 DBG(softc, DENTRY, "dca_sign_recover_init: started\n"); 4338 4339 if (ctx_template != NULL) 4340 return (CRYPTO_ARGUMENTS_BAD); 4341 4342 /* check mechanism */ 4343 switch (mechanism->cm_type) { 4344 case RSA_PKCS_MECH_INFO_TYPE: 4345 case RSA_X_509_MECH_INFO_TYPE: 4346 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4347 break; 4348 default: 4349 cmn_err(CE_WARN, "dca_sign_recover_init: unexpected mech type " 4350 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4351 error = CRYPTO_MECHANISM_INVALID; 4352 } 4353 4354 DBG(softc, DENTRY, "dca_sign_recover_init: done, err = 0x%x", error); 4355 4356 if (error == CRYPTO_SUCCESS) 4357 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4358 &softc->dca_ctx_list_lock); 4359 4360 return (error); 4361 } 4362 4363 static int 4364 dca_sign_recover(crypto_ctx_t *ctx, crypto_data_t *data, 4365 crypto_data_t *signature, crypto_req_handle_t req) 4366 { 4367 int error = CRYPTO_FAILED; 4368 dca_t *softc; 4369 /* LINTED E_FUNC_SET_NOT_USED */ 4370 int instance; 4371 4372 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4373 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4374 4375 /* extract softc and instance number from context */ 4376 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4377 DBG(softc, DENTRY, "dca_sign_recover: started\n"); 4378 4379 /* check mechanism */ 4380 switch (DCA_MECH_FROM_CTX(ctx)) { 4381 case RSA_PKCS_MECH_INFO_TYPE: 4382 case RSA_X_509_MECH_INFO_TYPE: 4383 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGNR); 4384 break; 4385 default: 4386 cmn_err(CE_WARN, "dca_sign_recover: unexpected mech type " 4387 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4388 error = CRYPTO_MECHANISM_INVALID; 4389 } 4390 4391 DBG(softc, DENTRY, "dca_sign_recover: done, err = 0x%x", error); 4392 4393 return (error); 4394 } 4395 4396 static int 4397 dca_sign_recover_atomic(crypto_provider_handle_t provider, 4398 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4399 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4400 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4401 { 4402 int error = CRYPTO_FAILED; 4403 dca_t *softc = (dca_t *)provider; 4404 /* LINTED E_FUNC_SET_NOT_USED */ 4405 int instance; 4406 4407 instance = ddi_get_instance(softc->dca_dip); 4408 DBG(softc, DENTRY, "dca_sign_recover_atomic: started\n"); 4409 4410 if (ctx_template != NULL) 4411 return (CRYPTO_ARGUMENTS_BAD); 4412 4413 /* check mechanism */ 4414 switch (mechanism->cm_type) { 4415 case RSA_PKCS_MECH_INFO_TYPE: 4416 case RSA_X_509_MECH_INFO_TYPE: 4417 error = dca_rsaatomic(provider, session_id, mechanism, key, 4418 data, signature, KM_SLEEP, req, DCA_RSA_SIGNR); 4419 break; 4420 default: 4421 cmn_err(CE_WARN, "dca_sign_recover_atomic: unexpected mech type" 4422 " 0x%llx\n", (unsigned long long)mechanism->cm_type); 4423 error = CRYPTO_MECHANISM_INVALID; 4424 } 4425 4426 DBG(softc, DENTRY, "dca_sign_recover_atomic: done, err = 0x%x", error); 4427 4428 return (error); 4429 } 4430 4431 /* 4432 * Verify entry points. 4433 */ 4434 4435 /* ARGSUSED */ 4436 static int 4437 dca_verify_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4438 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4439 crypto_req_handle_t req) 4440 { 4441 int error = CRYPTO_FAILED; 4442 dca_t *softc; 4443 /* LINTED E_FUNC_SET_NOT_USED */ 4444 int instance; 4445 4446 /* extract softc and instance number from context */ 4447 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4448 DBG(softc, DENTRY, "dca_verify_init: started\n"); 4449 4450 if (ctx_template != NULL) 4451 return (CRYPTO_ARGUMENTS_BAD); 4452 4453 /* check mechanism */ 4454 switch (mechanism->cm_type) { 4455 case RSA_PKCS_MECH_INFO_TYPE: 4456 case RSA_X_509_MECH_INFO_TYPE: 4457 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4458 break; 4459 case DSA_MECH_INFO_TYPE: 4460 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP, 4461 DCA_DSA_VRFY); 4462 break; 4463 default: 4464 cmn_err(CE_WARN, "dca_verify_init: unexpected mech type " 4465 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4466 error = CRYPTO_MECHANISM_INVALID; 4467 } 4468 4469 DBG(softc, DENTRY, "dca_verify_init: done, err = 0x%x", error); 4470 4471 if (error == CRYPTO_SUCCESS) 4472 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4473 &softc->dca_ctx_list_lock); 4474 4475 return (error); 4476 } 4477 4478 static int 4479 dca_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *signature, 4480 crypto_req_handle_t req) 4481 { 4482 int error = CRYPTO_FAILED; 4483 dca_t *softc; 4484 /* LINTED E_FUNC_SET_NOT_USED */ 4485 int instance; 4486 4487 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4488 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4489 4490 /* extract softc and instance number from context */ 4491 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4492 DBG(softc, DENTRY, "dca_verify: started\n"); 4493 4494 /* check mechanism */ 4495 switch (DCA_MECH_FROM_CTX(ctx)) { 4496 case RSA_PKCS_MECH_INFO_TYPE: 4497 case RSA_X_509_MECH_INFO_TYPE: 4498 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFY); 4499 break; 4500 case DSA_MECH_INFO_TYPE: 4501 error = dca_dsa_verify(ctx, data, signature, req); 4502 break; 4503 default: 4504 cmn_err(CE_WARN, "dca_verify: unexpected mech type " 4505 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4506 error = CRYPTO_MECHANISM_INVALID; 4507 } 4508 4509 DBG(softc, DENTRY, "dca_verify: done, err = 0x%x", error); 4510 4511 return (error); 4512 } 4513 4514 /* ARGSUSED */ 4515 static int 4516 dca_verify_update(crypto_ctx_t *ctx, crypto_data_t *data, 4517 crypto_req_handle_t req) 4518 { 4519 int error = CRYPTO_MECHANISM_INVALID; 4520 dca_t *softc; 4521 /* LINTED E_FUNC_SET_NOT_USED */ 4522 int instance; 4523 4524 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4525 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4526 4527 /* extract softc and instance number from context */ 4528 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4529 DBG(softc, DENTRY, "dca_verify_update: started\n"); 4530 4531 cmn_err(CE_WARN, "dca_verify_update: unexpected mech type " 4532 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4533 4534 DBG(softc, DENTRY, "dca_verify_update: done, err = 0x%x", error); 4535 4536 return (error); 4537 } 4538 4539 /* ARGSUSED */ 4540 static int 4541 dca_verify_final(crypto_ctx_t *ctx, crypto_data_t *signature, 4542 crypto_req_handle_t req) 4543 { 4544 int error = CRYPTO_MECHANISM_INVALID; 4545 dca_t *softc; 4546 /* LINTED E_FUNC_SET_NOT_USED */ 4547 int instance; 4548 4549 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4550 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4551 4552 /* extract softc and instance number from context */ 4553 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4554 DBG(softc, DENTRY, "dca_verify_final: started\n"); 4555 4556 cmn_err(CE_WARN, "dca_verify_final: unexpected mech type " 4557 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4558 4559 DBG(softc, DENTRY, "dca_verify_final: done, err = 0x%x", error); 4560 4561 return (error); 4562 } 4563 4564 static int 4565 dca_verify_atomic(crypto_provider_handle_t provider, 4566 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4567 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4568 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4569 { 4570 int error = CRYPTO_FAILED; 4571 dca_t *softc = (dca_t *)provider; 4572 4573 DBG(softc, DENTRY, "dca_verify_atomic: started\n"); 4574 4575 if (ctx_template != NULL) 4576 return (CRYPTO_ARGUMENTS_BAD); 4577 4578 /* check mechanism */ 4579 switch (mechanism->cm_type) { 4580 case RSA_PKCS_MECH_INFO_TYPE: 4581 case RSA_X_509_MECH_INFO_TYPE: 4582 error = dca_rsaatomic(provider, session_id, mechanism, key, 4583 signature, data, KM_SLEEP, req, DCA_RSA_VRFY); 4584 break; 4585 case DSA_MECH_INFO_TYPE: 4586 error = dca_dsaatomic(provider, session_id, mechanism, key, 4587 data, signature, KM_SLEEP, req, DCA_DSA_VRFY); 4588 break; 4589 default: 4590 cmn_err(CE_WARN, "dca_verify_atomic: unexpected mech type " 4591 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4592 error = CRYPTO_MECHANISM_INVALID; 4593 } 4594 4595 DBG(softc, DENTRY, "dca_verify_atomic: done, err = 0x%x", error); 4596 4597 return (error); 4598 } 4599 4600 /* ARGSUSED */ 4601 static int 4602 dca_verify_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4603 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4604 crypto_req_handle_t req) 4605 { 4606 int error = CRYPTO_MECHANISM_INVALID; 4607 dca_t *softc; 4608 /* LINTED E_FUNC_SET_NOT_USED */ 4609 int instance; 4610 4611 /* extract softc and instance number from context */ 4612 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4613 DBG(softc, DENTRY, "dca_verify_recover_init: started\n"); 4614 4615 if (ctx_template != NULL) 4616 return (CRYPTO_ARGUMENTS_BAD); 4617 4618 /* check mechanism */ 4619 switch (mechanism->cm_type) { 4620 case RSA_PKCS_MECH_INFO_TYPE: 4621 case RSA_X_509_MECH_INFO_TYPE: 4622 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4623 break; 4624 default: 4625 cmn_err(CE_WARN, "dca_verify_recover_init: unexpected mech type" 4626 " 0x%llx\n", (unsigned long long)mechanism->cm_type); 4627 } 4628 4629 DBG(softc, DENTRY, "dca_verify_recover_init: done, err = 0x%x", error); 4630 4631 if (error == CRYPTO_SUCCESS) 4632 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4633 &softc->dca_ctx_list_lock); 4634 4635 return (error); 4636 } 4637 4638 static int 4639 dca_verify_recover(crypto_ctx_t *ctx, crypto_data_t *signature, 4640 crypto_data_t *data, crypto_req_handle_t req) 4641 { 4642 int error = CRYPTO_MECHANISM_INVALID; 4643 dca_t *softc; 4644 /* LINTED E_FUNC_SET_NOT_USED */ 4645 int instance; 4646 4647 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4648 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4649 4650 /* extract softc and instance number from context */ 4651 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4652 DBG(softc, DENTRY, "dca_verify_recover: started\n"); 4653 4654 /* check mechanism */ 4655 switch (DCA_MECH_FROM_CTX(ctx)) { 4656 case RSA_PKCS_MECH_INFO_TYPE: 4657 case RSA_X_509_MECH_INFO_TYPE: 4658 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFYR); 4659 break; 4660 default: 4661 cmn_err(CE_WARN, "dca_verify_recover: unexpected mech type " 4662 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4663 } 4664 4665 DBG(softc, DENTRY, "dca_verify_recover: done, err = 0x%x", error); 4666 4667 return (error); 4668 } 4669 4670 static int 4671 dca_verify_recover_atomic(crypto_provider_handle_t provider, 4672 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4673 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4674 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4675 { 4676 int error = CRYPTO_MECHANISM_INVALID; 4677 dca_t *softc = (dca_t *)provider; 4678 4679 DBG(softc, DENTRY, "dca_verify_recover_atomic: started\n"); 4680 4681 if (ctx_template != NULL) 4682 return (CRYPTO_ARGUMENTS_BAD); 4683 4684 /* check mechanism */ 4685 switch (mechanism->cm_type) { 4686 case RSA_PKCS_MECH_INFO_TYPE: 4687 case RSA_X_509_MECH_INFO_TYPE: 4688 error = dca_rsaatomic(provider, session_id, mechanism, key, 4689 signature, data, KM_SLEEP, req, DCA_RSA_VRFYR); 4690 break; 4691 default: 4692 cmn_err(CE_WARN, "dca_verify_recover_atomic: unexpected mech " 4693 "type 0x%llx\n", (unsigned long long)mechanism->cm_type); 4694 error = CRYPTO_MECHANISM_INVALID; 4695 } 4696 4697 DBG(softc, DENTRY, 4698 "dca_verify_recover_atomic: done, err = 0x%x", error); 4699 4700 return (error); 4701 } 4702 4703 /* 4704 * Random number entry points. 4705 */ 4706 4707 /* ARGSUSED */ 4708 static int 4709 dca_generate_random(crypto_provider_handle_t provider, 4710 crypto_session_id_t session_id, 4711 uchar_t *buf, size_t len, crypto_req_handle_t req) 4712 { 4713 int error = CRYPTO_FAILED; 4714 dca_t *softc = (dca_t *)provider; 4715 /* LINTED E_FUNC_SET_NOT_USED */ 4716 int instance; 4717 4718 instance = ddi_get_instance(softc->dca_dip); 4719 DBG(softc, DENTRY, "dca_generate_random: started"); 4720 4721 error = dca_rng(softc, buf, len, req); 4722 4723 DBG(softc, DENTRY, "dca_generate_random: done, err = 0x%x", error); 4724 4725 return (error); 4726 } 4727 4728 /* 4729 * Context management entry points. 4730 */ 4731 4732 int 4733 dca_free_context(crypto_ctx_t *ctx) 4734 { 4735 int error = CRYPTO_SUCCESS; 4736 dca_t *softc; 4737 /* LINTED E_FUNC_SET_NOT_USED */ 4738 int instance; 4739 4740 /* extract softc and instance number from context */ 4741 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4742 DBG(softc, DENTRY, "dca_free_context: entered"); 4743 4744 if (ctx->cc_provider_private == NULL) 4745 return (error); 4746 4747 dca_rmlist2(ctx->cc_provider_private, &softc->dca_ctx_list_lock); 4748 4749 error = dca_free_context_low(ctx); 4750 4751 DBG(softc, DENTRY, "dca_free_context: done, err = 0x%x", error); 4752 4753 return (error); 4754 } 4755 4756 static int 4757 dca_free_context_low(crypto_ctx_t *ctx) 4758 { 4759 int error = CRYPTO_SUCCESS; 4760 4761 /* check mechanism */ 4762 switch (DCA_MECH_FROM_CTX(ctx)) { 4763 case DES_CBC_MECH_INFO_TYPE: 4764 case DES3_CBC_MECH_INFO_TYPE: 4765 dca_3desctxfree(ctx); 4766 break; 4767 case RSA_PKCS_MECH_INFO_TYPE: 4768 case RSA_X_509_MECH_INFO_TYPE: 4769 dca_rsactxfree(ctx); 4770 break; 4771 case DSA_MECH_INFO_TYPE: 4772 dca_dsactxfree(ctx); 4773 break; 4774 default: 4775 /* Should never reach here */ 4776 cmn_err(CE_WARN, "dca_free_context_low: unexpected mech type " 4777 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4778 error = CRYPTO_MECHANISM_INVALID; 4779 } 4780 4781 return (error); 4782 } 4783 4784 4785 /* Free any unfreed private context. It is called in detach. */ 4786 static void 4787 dca_free_context_list(dca_t *dca) 4788 { 4789 dca_listnode_t *node; 4790 crypto_ctx_t ctx; 4791 4792 (void) memset(&ctx, 0, sizeof (ctx)); 4793 ctx.cc_provider = dca; 4794 4795 while ((node = dca_delist2(&dca->dca_ctx_list, 4796 &dca->dca_ctx_list_lock)) != NULL) { 4797 ctx.cc_provider_private = node; 4798 (void) dca_free_context_low(&ctx); 4799 } 4800 } 4801 4802 static int 4803 ext_info_sym(crypto_provider_handle_t prov, 4804 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq) 4805 { 4806 return (ext_info_base(prov, ext_info, cfreq, IDENT_SYM)); 4807 } 4808 4809 static int 4810 ext_info_asym(crypto_provider_handle_t prov, 4811 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq) 4812 { 4813 int rv; 4814 4815 rv = ext_info_base(prov, ext_info, cfreq, IDENT_ASYM); 4816 /* The asymmetric cipher slot supports random */ 4817 ext_info->ei_flags |= CRYPTO_EXTF_RNG; 4818 4819 return (rv); 4820 } 4821 4822 /* ARGSUSED */ 4823 static int 4824 ext_info_base(crypto_provider_handle_t prov, 4825 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id) 4826 { 4827 dca_t *dca = (dca_t *)prov; 4828 int len; 4829 4830 /* Label */ 4831 (void) sprintf((char *)ext_info->ei_label, "%s/%d %s", 4832 ddi_driver_name(dca->dca_dip), ddi_get_instance(dca->dca_dip), id); 4833 len = strlen((char *)ext_info->ei_label); 4834 (void) memset(ext_info->ei_label + len, ' ', 4835 CRYPTO_EXT_SIZE_LABEL - len); 4836 4837 /* Manufacturer ID */ 4838 (void) sprintf((char *)ext_info->ei_manufacturerID, "%s", 4839 DCA_MANUFACTURER_ID); 4840 len = strlen((char *)ext_info->ei_manufacturerID); 4841 (void) memset(ext_info->ei_manufacturerID + len, ' ', 4842 CRYPTO_EXT_SIZE_MANUF - len); 4843 4844 /* Model */ 4845 (void) sprintf((char *)ext_info->ei_model, dca->dca_model); 4846 4847 DBG(dca, DWARN, "kCF MODEL: %s", (char *)ext_info->ei_model); 4848 4849 len = strlen((char *)ext_info->ei_model); 4850 (void) memset(ext_info->ei_model + len, ' ', 4851 CRYPTO_EXT_SIZE_MODEL - len); 4852 4853 /* Serial Number. Blank for Deimos */ 4854 (void) memset(ext_info->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL); 4855 4856 ext_info->ei_flags = CRYPTO_EXTF_WRITE_PROTECTED; 4857 4858 ext_info->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO; 4859 ext_info->ei_max_pin_len = CRYPTO_UNAVAILABLE_INFO; 4860 ext_info->ei_min_pin_len = CRYPTO_UNAVAILABLE_INFO; 4861 ext_info->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO; 4862 ext_info->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO; 4863 ext_info->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO; 4864 ext_info->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO; 4865 ext_info->ei_hardware_version.cv_major = 0; 4866 ext_info->ei_hardware_version.cv_minor = 0; 4867 ext_info->ei_firmware_version.cv_major = 0; 4868 ext_info->ei_firmware_version.cv_minor = 0; 4869 4870 /* Time. No need to be supplied for token without a clock */ 4871 ext_info->ei_time[0] = '\000'; 4872 4873 return (CRYPTO_SUCCESS); 4874 } 4875 4876 static void 4877 dca_fma_init(dca_t *dca) 4878 { 4879 ddi_iblock_cookie_t fm_ibc; 4880 int fm_capabilities = DDI_FM_EREPORT_CAPABLE | 4881 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE | 4882 DDI_FM_ERRCB_CAPABLE; 4883 4884 /* Read FMA capabilities from dca.conf file (if present) */ 4885 dca->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip, 4886 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 4887 fm_capabilities); 4888 4889 DBG(dca, DWARN, "dca->fm_capabilities = 0x%x", dca->fm_capabilities); 4890 4891 /* Only register with IO Fault Services if we have some capability */ 4892 if (dca->fm_capabilities) { 4893 dca_regsattr.devacc_attr_access = DDI_FLAGERR_ACC; 4894 dca_dmaattr.dma_attr_flags = DDI_DMA_FLAGERR; 4895 4896 /* Register capabilities with IO Fault Services */ 4897 ddi_fm_init(dca->dca_dip, &dca->fm_capabilities, &fm_ibc); 4898 DBG(dca, DWARN, "fm_capable() = 0x%x", 4899 ddi_fm_capable(dca->dca_dip)); 4900 4901 /* 4902 * Initialize pci ereport capabilities if ereport capable 4903 */ 4904 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) || 4905 DDI_FM_ERRCB_CAP(dca->fm_capabilities)) 4906 pci_ereport_setup(dca->dca_dip); 4907 4908 /* 4909 * Initialize callback mutex and register error callback if 4910 * error callback capable. 4911 */ 4912 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) { 4913 ddi_fm_handler_register(dca->dca_dip, dca_fm_error_cb, 4914 (void *)dca); 4915 } 4916 } else { 4917 /* 4918 * These fields have to be cleared of FMA if there are no 4919 * FMA capabilities at runtime. 4920 */ 4921 dca_regsattr.devacc_attr_access = DDI_DEFAULT_ACC; 4922 dca_dmaattr.dma_attr_flags = 0; 4923 } 4924 } 4925 4926 4927 static void 4928 dca_fma_fini(dca_t *dca) 4929 { 4930 /* Only unregister FMA capabilities if we registered some */ 4931 if (dca->fm_capabilities) { 4932 4933 /* 4934 * Release any resources allocated by pci_ereport_setup() 4935 */ 4936 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) || 4937 DDI_FM_ERRCB_CAP(dca->fm_capabilities)) { 4938 pci_ereport_teardown(dca->dca_dip); 4939 } 4940 4941 /* 4942 * Free callback mutex and un-register error callback if 4943 * error callback capable. 4944 */ 4945 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) { 4946 ddi_fm_handler_unregister(dca->dca_dip); 4947 } 4948 4949 /* Unregister from IO Fault Services */ 4950 ddi_fm_fini(dca->dca_dip); 4951 DBG(dca, DWARN, "fm_capable() = 0x%x", 4952 ddi_fm_capable(dca->dca_dip)); 4953 } 4954 } 4955 4956 4957 /* 4958 * The IO fault service error handling callback function 4959 */ 4960 /*ARGSUSED*/ 4961 static int 4962 dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 4963 { 4964 dca_t *dca = (dca_t *)impl_data; 4965 4966 pci_ereport_post(dip, err, NULL); 4967 if (err->fme_status == DDI_FM_FATAL) { 4968 dca_failure(dca, DDI_DATAPATH_FAULT, 4969 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR, 4970 "fault PCI in FMA callback."); 4971 } 4972 return (err->fme_status); 4973 } 4974 4975 4976 static int 4977 dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle, 4978 dca_fma_eclass_t eclass_index) 4979 { 4980 ddi_fm_error_t de; 4981 int version = 0; 4982 4983 ddi_fm_acc_err_get(handle, &de, version); 4984 if (de.fme_status != DDI_FM_OK) { 4985 dca_failure(dca, DDI_DATAPATH_FAULT, 4986 eclass_index, fm_ena_increment(de.fme_ena), 4987 CRYPTO_DEVICE_ERROR, ""); 4988 return (DDI_FAILURE); 4989 } 4990 4991 return (DDI_SUCCESS); 4992 } 4993 4994 int 4995 dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle, 4996 dca_fma_eclass_t eclass_index) 4997 { 4998 ddi_fm_error_t de; 4999 int version = 0; 5000 5001 ddi_fm_dma_err_get(handle, &de, version); 5002 if (de.fme_status != DDI_FM_OK) { 5003 dca_failure(dca, DDI_DATAPATH_FAULT, 5004 eclass_index, fm_ena_increment(de.fme_ena), 5005 CRYPTO_DEVICE_ERROR, ""); 5006 return (DDI_FAILURE); 5007 } 5008 return (DDI_SUCCESS); 5009 } 5010 5011 static uint64_t 5012 dca_ena(uint64_t ena) 5013 { 5014 if (ena == 0) 5015 ena = fm_ena_generate(0, FM_ENA_FMT1); 5016 else 5017 ena = fm_ena_increment(ena); 5018 return (ena); 5019 } 5020 5021 static char * 5022 dca_fma_eclass_string(char *model, dca_fma_eclass_t index) 5023 { 5024 if (strstr(model, "500")) 5025 return (dca_fma_eclass_sca500[index]); 5026 else 5027 return (dca_fma_eclass_sca1000[index]); 5028 } 5029