1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Deimos - cryptographic acceleration based upon Broadcom 582x. 31 */ 32 33 #include <sys/types.h> 34 #include <sys/modctl.h> 35 #include <sys/conf.h> 36 #include <sys/devops.h> 37 #include <sys/ddi.h> 38 #include <sys/sunddi.h> 39 #include <sys/cmn_err.h> 40 #include <sys/varargs.h> 41 #include <sys/file.h> 42 #include <sys/stat.h> 43 #include <sys/kmem.h> 44 #include <sys/ioccom.h> 45 #include <sys/open.h> 46 #include <sys/cred.h> 47 #include <sys/kstat.h> 48 #include <sys/strsun.h> 49 #include <sys/note.h> 50 #include <sys/crypto/common.h> 51 #include <sys/crypto/spi.h> 52 #include <sys/ddifm.h> 53 #include <sys/fm/protocol.h> 54 #include <sys/fm/util.h> 55 #include <sys/fm/io/ddi.h> 56 #include <sys/crypto/dca.h> 57 58 /* 59 * Core Deimos driver. 60 */ 61 62 static void dca_enlist2(dca_listnode_t *, dca_listnode_t *, 63 kmutex_t *); 64 static void dca_rmlist2(dca_listnode_t *node, kmutex_t *); 65 static dca_listnode_t *dca_delist2(dca_listnode_t *q, kmutex_t *); 66 static void dca_free_context_list(dca_t *dca); 67 static int dca_free_context_low(crypto_ctx_t *ctx); 68 static int dca_attach(dev_info_t *, ddi_attach_cmd_t); 69 static int dca_detach(dev_info_t *, ddi_detach_cmd_t); 70 static int dca_suspend(dca_t *); 71 static int dca_resume(dca_t *); 72 static int dca_init(dca_t *); 73 static int dca_reset(dca_t *, int); 74 static int dca_initworklist(dca_t *, dca_worklist_t *); 75 static void dca_uninit(dca_t *); 76 static void dca_initq(dca_listnode_t *); 77 static void dca_enqueue(dca_listnode_t *, dca_listnode_t *); 78 static dca_listnode_t *dca_dequeue(dca_listnode_t *); 79 static dca_listnode_t *dca_unqueue(dca_listnode_t *); 80 static dca_request_t *dca_newreq(dca_t *); 81 static dca_work_t *dca_getwork(dca_t *, int); 82 static void dca_freework(dca_work_t *); 83 static dca_work_t *dca_newwork(dca_t *); 84 static void dca_destroywork(dca_work_t *); 85 static void dca_schedule(dca_t *, int); 86 static void dca_reclaim(dca_t *, int); 87 static uint_t dca_intr(char *); 88 static void dca_failure(dca_t *, ddi_fault_location_t, 89 dca_fma_eclass_t index, uint64_t, int, char *, ...); 90 static void dca_jobtimeout(void *); 91 static int dca_drain(dca_t *); 92 static void dca_undrain(dca_t *); 93 static void dca_rejectjobs(dca_t *); 94 95 #ifdef SCHEDDELAY 96 static void dca_schedtimeout(void *); 97 #endif 98 99 /* 100 * We want these inlined for performance. 101 */ 102 #ifndef DEBUG 103 #pragma inline(dca_freereq, dca_getreq, dca_freework, dca_getwork) 104 #pragma inline(dca_enqueue, dca_dequeue, dca_rmqueue, dca_done) 105 #pragma inline(dca_reverse, dca_length) 106 #endif 107 108 /* 109 * Device operations. 110 */ 111 static struct dev_ops devops = { 112 DEVO_REV, /* devo_rev */ 113 0, /* devo_refcnt */ 114 nodev, /* devo_getinfo */ 115 nulldev, /* devo_identify */ 116 nulldev, /* devo_probe */ 117 dca_attach, /* devo_attach */ 118 dca_detach, /* devo_detach */ 119 nodev, /* devo_reset */ 120 NULL, /* devo_cb_ops */ 121 NULL, /* devo_bus_ops */ 122 ddi_power /* devo_power */ 123 }; 124 125 #define IDENT "PCI Crypto Accelerator 2.0" 126 #define IDENT_SYM "Crypto Accel Sym 2.0" 127 #define IDENT_ASYM "Crypto Accel Asym 2.0" 128 129 /* Space-padded, will be filled in dynamically during registration */ 130 #define IDENT3 "PCI Crypto Accelerator Mod 2.0" 131 132 #define VENDOR "Sun Microsystems, Inc." 133 134 #define STALETIME (30 * SECOND) 135 136 #define crypto_prov_notify crypto_provider_notification 137 /* A 28 char function name doesn't leave much line space */ 138 139 /* 140 * Module linkage. 141 */ 142 static struct modldrv modldrv = { 143 &mod_driverops, /* drv_modops */ 144 IDENT, /* drv_linkinfo */ 145 &devops, /* drv_dev_ops */ 146 }; 147 148 extern struct mod_ops mod_cryptoops; 149 150 static struct modlcrypto modlcrypto = { 151 &mod_cryptoops, 152 IDENT3 153 }; 154 155 static struct modlinkage modlinkage = { 156 MODREV_1, /* ml_rev */ 157 &modldrv, /* ml_linkage */ 158 &modlcrypto, 159 NULL 160 }; 161 162 /* 163 * CSPI information (entry points, provider info, etc.) 164 */ 165 166 /* Mechanisms for the symmetric cipher provider */ 167 static crypto_mech_info_t dca_mech_info_tab1[] = { 168 /* DES-CBC */ 169 {SUN_CKM_DES_CBC, DES_CBC_MECH_INFO_TYPE, 170 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | 171 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC, 172 DES_KEY_LEN, DES_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, 173 /* 3DES-CBC */ 174 {SUN_CKM_DES3_CBC, DES3_CBC_MECH_INFO_TYPE, 175 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | 176 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC, 177 DES3_KEY_LEN, DES3_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES} 178 }; 179 180 /* Mechanisms for the asymmetric cipher provider */ 181 static crypto_mech_info_t dca_mech_info_tab2[] = { 182 /* DSA */ 183 {SUN_CKM_DSA, DSA_MECH_INFO_TYPE, 184 CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY | 185 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_VERIFY_ATOMIC, 186 DSA_MIN_KEY_LEN * 8, DSA_MAX_KEY_LEN * 8, 187 CRYPTO_KEYSIZE_UNIT_IN_BITS}, 188 189 /* RSA */ 190 {SUN_CKM_RSA_X_509, RSA_X_509_MECH_INFO_TYPE, 191 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN | 192 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY | 193 CRYPTO_FG_VERIFY_RECOVER | 194 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC | 195 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC | 196 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC, 197 RSA_MIN_KEY_LEN * 8, RSA_MAX_KEY_LEN * 8, 198 CRYPTO_KEYSIZE_UNIT_IN_BITS}, 199 {SUN_CKM_RSA_PKCS, RSA_PKCS_MECH_INFO_TYPE, 200 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN | 201 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY | 202 CRYPTO_FG_VERIFY_RECOVER | 203 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC | 204 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC | 205 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC, 206 RSA_MIN_KEY_LEN * 8, RSA_MAX_KEY_LEN * 8, 207 CRYPTO_KEYSIZE_UNIT_IN_BITS} 208 }; 209 210 static void dca_provider_status(crypto_provider_handle_t, uint_t *); 211 212 static crypto_control_ops_t dca_control_ops = { 213 dca_provider_status 214 }; 215 216 static int dca_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *, 217 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 218 static int dca_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 219 crypto_req_handle_t); 220 static int dca_encrypt_update(crypto_ctx_t *, crypto_data_t *, 221 crypto_data_t *, crypto_req_handle_t); 222 static int dca_encrypt_final(crypto_ctx_t *, crypto_data_t *, 223 crypto_req_handle_t); 224 static int dca_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 225 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 226 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 227 228 static int dca_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *, 229 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 230 static int dca_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 231 crypto_req_handle_t); 232 static int dca_decrypt_update(crypto_ctx_t *, crypto_data_t *, 233 crypto_data_t *, crypto_req_handle_t); 234 static int dca_decrypt_final(crypto_ctx_t *, crypto_data_t *, 235 crypto_req_handle_t); 236 static int dca_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 237 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 238 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 239 240 static crypto_cipher_ops_t dca_cipher_ops = { 241 dca_encrypt_init, 242 dca_encrypt, 243 dca_encrypt_update, 244 dca_encrypt_final, 245 dca_encrypt_atomic, 246 dca_decrypt_init, 247 dca_decrypt, 248 dca_decrypt_update, 249 dca_decrypt_final, 250 dca_decrypt_atomic 251 }; 252 253 static int dca_sign_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, 254 crypto_spi_ctx_template_t, crypto_req_handle_t); 255 static int dca_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 256 crypto_req_handle_t); 257 static int dca_sign_update(crypto_ctx_t *, crypto_data_t *, 258 crypto_req_handle_t); 259 static int dca_sign_final(crypto_ctx_t *, crypto_data_t *, 260 crypto_req_handle_t); 261 static int dca_sign_atomic(crypto_provider_handle_t, crypto_session_id_t, 262 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, 263 crypto_spi_ctx_template_t, crypto_req_handle_t); 264 static int dca_sign_recover_init(crypto_ctx_t *, crypto_mechanism_t *, 265 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 266 static int dca_sign_recover(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 267 crypto_req_handle_t); 268 static int dca_sign_recover_atomic(crypto_provider_handle_t, 269 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 270 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 271 272 static crypto_sign_ops_t dca_sign_ops = { 273 dca_sign_init, 274 dca_sign, 275 dca_sign_update, 276 dca_sign_final, 277 dca_sign_atomic, 278 dca_sign_recover_init, 279 dca_sign_recover, 280 dca_sign_recover_atomic 281 }; 282 283 static int dca_verify_init(crypto_ctx_t *, crypto_mechanism_t *, 284 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 285 static int dca_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 286 crypto_req_handle_t); 287 static int dca_verify_update(crypto_ctx_t *, crypto_data_t *, 288 crypto_req_handle_t); 289 static int dca_verify_final(crypto_ctx_t *, crypto_data_t *, 290 crypto_req_handle_t); 291 static int dca_verify_atomic(crypto_provider_handle_t, crypto_session_id_t, 292 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 293 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 294 static int dca_verify_recover_init(crypto_ctx_t *, crypto_mechanism_t *, 295 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 296 static int dca_verify_recover(crypto_ctx_t *, crypto_data_t *, 297 crypto_data_t *, crypto_req_handle_t); 298 static int dca_verify_recover_atomic(crypto_provider_handle_t, 299 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 300 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 301 302 static crypto_verify_ops_t dca_verify_ops = { 303 dca_verify_init, 304 dca_verify, 305 dca_verify_update, 306 dca_verify_final, 307 dca_verify_atomic, 308 dca_verify_recover_init, 309 dca_verify_recover, 310 dca_verify_recover_atomic 311 }; 312 313 static int dca_generate_random(crypto_provider_handle_t, crypto_session_id_t, 314 uchar_t *, size_t, crypto_req_handle_t); 315 316 static crypto_random_number_ops_t dca_random_number_ops = { 317 NULL, 318 dca_generate_random 319 }; 320 321 static int ext_info_sym(crypto_provider_handle_t prov, 322 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq); 323 static int ext_info_asym(crypto_provider_handle_t prov, 324 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq); 325 static int ext_info_base(crypto_provider_handle_t prov, 326 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id); 327 328 static crypto_provider_management_ops_t dca_provmanage_ops_1 = { 329 ext_info_sym, /* ext_info */ 330 NULL, /* init_token */ 331 NULL, /* init_pin */ 332 NULL /* set_pin */ 333 }; 334 335 static crypto_provider_management_ops_t dca_provmanage_ops_2 = { 336 ext_info_asym, /* ext_info */ 337 NULL, /* init_token */ 338 NULL, /* init_pin */ 339 NULL /* set_pin */ 340 }; 341 342 int dca_free_context(crypto_ctx_t *); 343 344 static crypto_ctx_ops_t dca_ctx_ops = { 345 NULL, 346 dca_free_context 347 }; 348 349 /* Operations for the symmetric cipher provider */ 350 static crypto_ops_t dca_crypto_ops1 = { 351 &dca_control_ops, 352 NULL, /* digest_ops */ 353 &dca_cipher_ops, 354 NULL, /* mac_ops */ 355 NULL, /* sign_ops */ 356 NULL, /* verify_ops */ 357 NULL, /* dual_ops */ 358 NULL, /* cipher_mac_ops */ 359 NULL, /* random_number_ops */ 360 NULL, /* session_ops */ 361 NULL, /* object_ops */ 362 NULL, /* key_ops */ 363 &dca_provmanage_ops_1, /* management_ops */ 364 &dca_ctx_ops 365 }; 366 367 /* Operations for the asymmetric cipher provider */ 368 static crypto_ops_t dca_crypto_ops2 = { 369 &dca_control_ops, 370 NULL, /* digest_ops */ 371 &dca_cipher_ops, 372 NULL, /* mac_ops */ 373 &dca_sign_ops, 374 &dca_verify_ops, 375 NULL, /* dual_ops */ 376 NULL, /* cipher_mac_ops */ 377 &dca_random_number_ops, 378 NULL, /* session_ops */ 379 NULL, /* object_ops */ 380 NULL, /* key_ops */ 381 &dca_provmanage_ops_2, /* management_ops */ 382 &dca_ctx_ops 383 }; 384 385 /* Provider information for the symmetric cipher provider */ 386 static crypto_provider_info_t dca_prov_info1 = { 387 CRYPTO_SPI_VERSION_1, 388 NULL, /* pi_provider_description */ 389 CRYPTO_HW_PROVIDER, 390 NULL, /* pi_provider_dev */ 391 NULL, /* pi_provider_handle */ 392 &dca_crypto_ops1, 393 sizeof (dca_mech_info_tab1)/sizeof (crypto_mech_info_t), 394 dca_mech_info_tab1, 395 0, /* pi_logical_provider_count */ 396 NULL /* pi_logical_providers */ 397 }; 398 399 /* Provider information for the asymmetric cipher provider */ 400 static crypto_provider_info_t dca_prov_info2 = { 401 CRYPTO_SPI_VERSION_1, 402 NULL, /* pi_provider_description */ 403 CRYPTO_HW_PROVIDER, 404 NULL, /* pi_provider_dev */ 405 NULL, /* pi_provider_handle */ 406 &dca_crypto_ops2, 407 sizeof (dca_mech_info_tab2)/sizeof (crypto_mech_info_t), 408 dca_mech_info_tab2, 409 0, /* pi_logical_provider_count */ 410 NULL /* pi_logical_providers */ 411 }; 412 413 /* Convenience macros */ 414 /* Retrieve the softc and instance number from a SPI crypto context */ 415 #define DCA_SOFTC_FROM_CTX(ctx, softc, instance) { \ 416 (softc) = (dca_t *)(ctx)->cc_provider; \ 417 (instance) = ddi_get_instance((softc)->dca_dip); \ 418 } 419 420 #define DCA_MECH_FROM_CTX(ctx) \ 421 (((dca_request_t *)(ctx)->cc_provider_private)->dr_ctx.ctx_cm_type) 422 423 static int dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset, 424 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags, 425 dca_chain_t *head, int *n_chain); 426 static uint64_t dca_ena(uint64_t ena); 427 static caddr_t dca_bufdaddr_out(crypto_data_t *data); 428 static char *dca_fma_eclass_string(char *model, dca_fma_eclass_t index); 429 static int dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle, 430 dca_fma_eclass_t eclass_index); 431 432 static void dca_fma_init(dca_t *dca); 433 static void dca_fma_fini(dca_t *dca); 434 static int dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 435 const void *impl_data); 436 437 438 static dca_device_t dca_devices[] = { 439 /* Broadcom vanilla variants */ 440 { 0x14e4, 0x5820, "Broadcom 5820" }, 441 { 0x14e4, 0x5821, "Broadcom 5821" }, 442 { 0x14e4, 0x5822, "Broadcom 5822" }, 443 { 0x14e4, 0x5825, "Broadcom 5825" }, 444 /* Sun specific OEMd variants */ 445 { 0x108e, 0x5454, "SCA" }, 446 { 0x108e, 0x5455, "SCA 1000" }, 447 { 0x108e, 0x5457, "SCA 500" }, 448 /* subsysid should be 0x5457, but got 0x1 from HW. Assume both here. */ 449 { 0x108e, 0x1, "SCA 500" }, 450 }; 451 452 /* 453 * Device attributes. 454 */ 455 static struct ddi_device_acc_attr dca_regsattr = { 456 DDI_DEVICE_ATTR_V0, 457 DDI_STRUCTURE_LE_ACC, 458 DDI_STRICTORDER_ACC, 459 DDI_FLAGERR_ACC 460 }; 461 462 static struct ddi_device_acc_attr dca_devattr = { 463 DDI_DEVICE_ATTR_V0, 464 DDI_STRUCTURE_LE_ACC, 465 DDI_STRICTORDER_ACC, 466 DDI_FLAGERR_ACC 467 }; 468 469 #if !defined(i386) && !defined(__i386) 470 static struct ddi_device_acc_attr dca_bufattr = { 471 DDI_DEVICE_ATTR_V0, 472 DDI_NEVERSWAP_ACC, 473 DDI_STRICTORDER_ACC, 474 DDI_FLAGERR_ACC 475 }; 476 #endif 477 478 static struct ddi_dma_attr dca_dmaattr = { 479 DMA_ATTR_V0, /* dma_attr_version */ 480 0x0, /* dma_attr_addr_lo */ 481 0xffffffffUL, /* dma_attr_addr_hi */ 482 0x00ffffffUL, /* dma_attr_count_max */ 483 0x40, /* dma_attr_align */ 484 0x40, /* dma_attr_burstsizes */ 485 0x1, /* dma_attr_minxfer */ 486 0x00ffffffUL, /* dma_attr_maxxfer */ 487 0xffffffffUL, /* dma_attr_seg */ 488 #if defined(i386) || defined(__i386) || defined(__amd64) 489 512, /* dma_attr_sgllen */ 490 #else 491 1, /* dma_attr_sgllen */ 492 #endif 493 1, /* dma_attr_granular */ 494 DDI_DMA_FLAGERR /* dma_attr_flags */ 495 }; 496 497 static void *dca_state = NULL; 498 int dca_mindma = 2500; 499 500 /* 501 * FMA eclass string definitions. Note that these string arrays must be 502 * consistent with the dca_fma_eclass_t enum. 503 */ 504 static char *dca_fma_eclass_sca1000[] = { 505 "sca1000.hw.device", 506 "sca1000.hw.timeout", 507 "sca1000.none" 508 }; 509 510 static char *dca_fma_eclass_sca500[] = { 511 "sca500.hw.device", 512 "sca500.hw.timeout", 513 "sca500.none" 514 }; 515 516 /* 517 * DDI entry points. 518 */ 519 int 520 _init(void) 521 { 522 int rv; 523 524 DBG(NULL, DMOD, "dca: in _init"); 525 526 if ((rv = ddi_soft_state_init(&dca_state, sizeof (dca_t), 1)) != 0) { 527 /* this should *never* happen! */ 528 return (rv); 529 } 530 531 if ((rv = mod_install(&modlinkage)) != 0) { 532 /* cleanup here */ 533 ddi_soft_state_fini(&dca_state); 534 return (rv); 535 } 536 537 return (0); 538 } 539 540 int 541 _fini(void) 542 { 543 int rv; 544 545 DBG(NULL, DMOD, "dca: in _fini"); 546 547 if ((rv = mod_remove(&modlinkage)) == 0) { 548 /* cleanup here */ 549 ddi_soft_state_fini(&dca_state); 550 } 551 return (rv); 552 } 553 554 int 555 _info(struct modinfo *modinfop) 556 { 557 DBG(NULL, DMOD, "dca: in _info"); 558 559 return (mod_info(&modlinkage, modinfop)); 560 } 561 562 int 563 dca_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 564 { 565 ddi_acc_handle_t pci; 566 int instance; 567 ddi_iblock_cookie_t ibc; 568 int intr_added = 0; 569 dca_t *dca; 570 ushort_t venid; 571 ushort_t devid; 572 ushort_t revid; 573 ushort_t subsysid; 574 ushort_t subvenid; 575 int i; 576 int ret; 577 char ID[64]; 578 static char *unknowndev = "Unknown device"; 579 580 #if DEBUG 581 /* these are only used for debugging */ 582 ushort_t pcicomm; 583 ushort_t pcistat; 584 uchar_t cachelinesz; 585 uchar_t mingnt; 586 uchar_t maxlat; 587 uchar_t lattmr; 588 #endif 589 590 instance = ddi_get_instance(dip); 591 592 DBG(NULL, DMOD, "dca: in dca_attach() for %d", instance); 593 594 switch (cmd) { 595 case DDI_RESUME: 596 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 597 dca_diperror(dip, "no soft state in detach"); 598 return (DDI_FAILURE); 599 } 600 /* assumption: we won't be DDI_DETACHed until we return */ 601 return (dca_resume(dca)); 602 case DDI_ATTACH: 603 break; 604 default: 605 return (DDI_FAILURE); 606 } 607 608 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 609 dca_diperror(dip, "slot does not support PCI bus-master"); 610 return (DDI_FAILURE); 611 } 612 613 if (ddi_intr_hilevel(dip, 0) != 0) { 614 dca_diperror(dip, "hilevel interrupts not supported"); 615 return (DDI_FAILURE); 616 } 617 618 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) { 619 dca_diperror(dip, "unable to setup PCI config handle"); 620 return (DDI_FAILURE); 621 } 622 623 /* common PCI attributes */ 624 venid = pci_config_get16(pci, PCI_VENID); 625 devid = pci_config_get16(pci, PCI_DEVID); 626 revid = pci_config_get8(pci, PCI_REVID); 627 subvenid = pci_config_get16(pci, PCI_SUBVENID); 628 subsysid = pci_config_get16(pci, PCI_SUBSYSID); 629 630 /* 631 * Broadcom-specific timings. 632 * We disable these timers/counters since they can cause 633 * incorrect false failures when the bus is just a little 634 * bit slow, or busy. 635 */ 636 pci_config_put8(pci, PCI_TRDYTO, 0); 637 pci_config_put8(pci, PCI_RETRIES, 0); 638 639 /* initialize PCI access settings */ 640 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE | 641 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE); 642 643 /* set up our PCI latency timer */ 644 pci_config_put8(pci, PCI_LATTMR, 0x40); 645 646 #if DEBUG 647 /* read registers (for debugging) */ 648 pcicomm = pci_config_get16(pci, PCI_COMM); 649 pcistat = pci_config_get16(pci, PCI_STATUS); 650 cachelinesz = pci_config_get8(pci, PCI_CACHELINESZ); 651 mingnt = pci_config_get8(pci, PCI_MINGNT); 652 maxlat = pci_config_get8(pci, PCI_MAXLAT); 653 lattmr = pci_config_get8(pci, PCI_LATTMR); 654 #endif 655 656 pci_config_teardown(&pci); 657 658 if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS) { 659 dca_diperror(dip, "unable to get iblock cookie"); 660 return (DDI_FAILURE); 661 } 662 663 if (ddi_soft_state_zalloc(dca_state, instance) != DDI_SUCCESS) { 664 dca_diperror(dip, "unable to allocate soft state"); 665 return (DDI_FAILURE); 666 } 667 668 dca = ddi_get_soft_state(dca_state, instance); 669 ASSERT(dca != NULL); 670 dca->dca_dip = dip; 671 WORKLIST(dca, MCR1)->dwl_prov = NULL; 672 WORKLIST(dca, MCR2)->dwl_prov = NULL; 673 /* figure pagesize */ 674 dca->dca_pagesize = ddi_ptob(dip, 1); 675 676 /* 677 * Search for the device in our supported devices table. This 678 * is here for two reasons. First, we want to ensure that 679 * only Sun-qualified (and presumably Sun-labeled) devices can 680 * be used with this driver. Second, some devices have 681 * specific differences. E.g. the 5821 has support for a 682 * special mode of RC4, deeper queues, power management, and 683 * other changes. Also, the export versions of some of these 684 * chips don't support RC4 or 3DES, so we catch that here. 685 * 686 * Note that we only look at the upper nibble of the device 687 * id, which is used to distinguish export vs. domestic 688 * versions of the chip. (The lower nibble is used for 689 * stepping information.) 690 */ 691 for (i = 0; i < (sizeof (dca_devices) / sizeof (dca_device_t)); i++) { 692 /* 693 * Try to match the subsystem information first. 694 */ 695 if (subvenid && (subvenid == dca_devices[i].dd_vendor_id) && 696 subsysid && (subsysid == dca_devices[i].dd_device_id)) { 697 dca->dca_model = dca_devices[i].dd_model; 698 break; 699 } 700 /* 701 * Failing that, try the generic vendor and device id. 702 * Even if we find a match, we keep searching anyway, 703 * since we would prefer to find a match based on the 704 * subsystem ids. 705 */ 706 if ((venid == dca_devices[i].dd_vendor_id) && 707 (devid == dca_devices[i].dd_device_id)) { 708 dca->dca_model = dca_devices[i].dd_model; 709 } 710 } 711 /* try and handle an unrecognized device */ 712 if (dca->dca_model == NULL) { 713 dca->dca_model = unknowndev; 714 dca_error(dca, "device not recognized, not supported"); 715 DBG(dca, DPCI, "i=%d venid=%x devid=%x rev=%d", 716 i, venid, devid, revid); 717 } 718 719 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "description", 720 dca->dca_model) != DDI_SUCCESS) { 721 dca_error(dca, "unable to create description property"); 722 return (DDI_FAILURE); 723 } 724 725 DBG(dca, DPCI, "PCI command=0x%x status=%x cachelinesz=%x", 726 pcicomm, pcistat, cachelinesz); 727 DBG(dca, DPCI, "mingnt=0x%x maxlat=0x%x lattmr=0x%x", 728 mingnt, maxlat, lattmr); 729 730 /* 731 * initialize locks, etc. 732 */ 733 (void) mutex_init(&dca->dca_intrlock, NULL, MUTEX_DRIVER, ibc); 734 735 /* use RNGSHA1 by default */ 736 if (ddi_getprop(DDI_DEV_T_ANY, dip, 737 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rngdirect", 0) == 0) { 738 dca->dca_flags |= DCA_RNGSHA1; 739 } 740 741 /* initialize FMA */ 742 dca_fma_init(dca); 743 744 /* initialize some key data structures */ 745 if (dca_init(dca) != DDI_SUCCESS) { 746 goto failed; 747 } 748 749 /* initialize kstats */ 750 dca_ksinit(dca); 751 752 /* setup access to registers */ 753 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&dca->dca_regs, 754 0, 0, &dca_regsattr, &dca->dca_regs_handle) != DDI_SUCCESS) { 755 dca_error(dca, "unable to map registers"); 756 goto failed; 757 } 758 759 DBG(dca, DCHATTY, "MCR1 = %x", GETCSR(dca, CSR_MCR1)); 760 DBG(dca, DCHATTY, "CONTROL = %x", GETCSR(dca, CSR_DMACTL)); 761 DBG(dca, DCHATTY, "STATUS = %x", GETCSR(dca, CSR_DMASTAT)); 762 DBG(dca, DCHATTY, "DMAEA = %x", GETCSR(dca, CSR_DMAEA)); 763 DBG(dca, DCHATTY, "MCR2 = %x", GETCSR(dca, CSR_MCR2)); 764 765 /* reset the chip */ 766 if (dca_reset(dca, 0) < 0) { 767 goto failed; 768 } 769 770 /* initialize the chip */ 771 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64); 772 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 773 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 774 goto failed; 775 } 776 777 /* add the interrupt */ 778 if (ddi_add_intr(dip, 0, &dca->dca_icookie, NULL, dca_intr, 779 (void *)dca) != DDI_SUCCESS) { 780 DBG(dca, DWARN, "ddi_add_intr failed"); 781 goto failed; 782 } else { 783 intr_added = 1; 784 } 785 786 /* enable interrupts on the device */ 787 /* 788 * XXX: Note, 5820A1 errata indicates that this may clobber 789 * bits 24 and 23, which affect the speed of the RNG. Since 790 * we always want to run in full-speed mode, this should be 791 * harmless. 792 */ 793 SETBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 794 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 795 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 796 goto failed; 797 } 798 799 /* register MCR1 with the crypto framework */ 800 /* Be careful not to exceed 32 chars */ 801 (void) sprintf(ID, "%s/%d %s", 802 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_SYM); 803 dca_prov_info1.pi_provider_description = ID; 804 dca_prov_info1.pi_provider_dev.pd_hw = dip; 805 dca_prov_info1.pi_provider_handle = dca; 806 if ((ret = crypto_register_provider(&dca_prov_info1, 807 &WORKLIST(dca, MCR1)->dwl_prov)) != CRYPTO_SUCCESS) { 808 cmn_err(CE_WARN, 809 "crypto_register_provider() failed (%d) for MCR1", ret); 810 goto failed; 811 } 812 813 /* register MCR2 with the crypto framework */ 814 /* Be careful not to exceed 32 chars */ 815 (void) sprintf(ID, "%s/%d %s", 816 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_ASYM); 817 dca_prov_info2.pi_provider_description = ID; 818 dca_prov_info2.pi_provider_dev.pd_hw = dip; 819 dca_prov_info2.pi_provider_handle = dca; 820 if ((ret = crypto_register_provider(&dca_prov_info2, 821 &WORKLIST(dca, MCR2)->dwl_prov)) != CRYPTO_SUCCESS) { 822 cmn_err(CE_WARN, 823 "crypto_register_provider() failed (%d) for MCR2", ret); 824 goto failed; 825 } 826 827 crypto_prov_notify(WORKLIST(dca, MCR1)->dwl_prov, 828 CRYPTO_PROVIDER_READY); 829 crypto_prov_notify(WORKLIST(dca, MCR2)->dwl_prov, 830 CRYPTO_PROVIDER_READY); 831 832 /* Initialize the local random number pool for this instance */ 833 if ((ret = dca_random_init(dca)) != CRYPTO_SUCCESS) { 834 goto failed; 835 } 836 837 mutex_enter(&dca->dca_intrlock); 838 dca->dca_jobtid = timeout(dca_jobtimeout, (void *)dca, 839 drv_usectohz(SECOND)); 840 mutex_exit(&dca->dca_intrlock); 841 842 ddi_set_driver_private(dip, (caddr_t)dca); 843 844 ddi_report_dev(dip); 845 846 if (ddi_get_devstate(dca->dca_dip) != DDI_DEVSTATE_UP) { 847 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_RESTORED); 848 } 849 850 return (DDI_SUCCESS); 851 852 failed: 853 /* unregister from the crypto framework */ 854 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) { 855 (void) crypto_unregister_provider(WORKLIST(dca, MCR1)->dwl_prov); 856 } 857 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) { 858 (void) crypto_unregister_provider(WORKLIST(dca, MCR2)->dwl_prov); 859 } 860 if (intr_added) { 861 CLRBIT(dca, CSR_DMACTL, 862 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 863 /* unregister intr handler */ 864 ddi_remove_intr(dip, 0, dca->dca_icookie); 865 } 866 if (dca->dca_regs_handle) { 867 ddi_regs_map_free(&dca->dca_regs_handle); 868 } 869 if (dca->dca_intrstats) { 870 kstat_delete(dca->dca_intrstats); 871 } 872 if (dca->dca_ksp) { 873 kstat_delete(dca->dca_ksp); 874 } 875 dca_uninit(dca); 876 877 /* finalize FMA */ 878 dca_fma_fini(dca); 879 880 mutex_destroy(&dca->dca_intrlock); 881 ddi_soft_state_free(dca_state, instance); 882 return (DDI_FAILURE); 883 884 } 885 886 int 887 dca_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 888 { 889 int instance; 890 dca_t *dca; 891 timeout_id_t tid; 892 893 instance = ddi_get_instance(dip); 894 895 DBG(NULL, DMOD, "dca: in dca_detach() for %d", instance); 896 897 switch (cmd) { 898 case DDI_SUSPEND: 899 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 900 dca_diperror(dip, "no soft state in detach"); 901 return (DDI_FAILURE); 902 } 903 /* assumption: we won't be DDI_DETACHed until we return */ 904 return (dca_suspend(dca)); 905 906 case DDI_DETACH: 907 break; 908 default: 909 return (DDI_FAILURE); 910 } 911 912 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) { 913 dca_diperror(dip, "no soft state in detach"); 914 return (DDI_FAILURE); 915 } 916 917 /* 918 * Unregister from kCF. 919 * This needs to be done at the beginning of detach. 920 */ 921 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) { 922 if (crypto_unregister_provider(WORKLIST(dca, MCR1)->dwl_prov) != 923 CRYPTO_SUCCESS) { 924 dca_error(dca, "unable to unregister MCR1 from kcf"); 925 return (DDI_FAILURE); 926 } 927 } 928 929 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) { 930 if (crypto_unregister_provider(WORKLIST(dca, MCR2)->dwl_prov) != 931 CRYPTO_SUCCESS) { 932 dca_error(dca, "unable to unregister MCR2 from kcf"); 933 return (DDI_FAILURE); 934 } 935 } 936 937 /* 938 * Cleanup the private context list. Once the 939 * crypto_unregister_provider returns, it is safe to do so. 940 */ 941 dca_free_context_list(dca); 942 943 /* Cleanup the local random number pool */ 944 dca_random_fini(dca); 945 946 /* send any jobs in the waitq back to kCF */ 947 dca_rejectjobs(dca); 948 949 /* untimeout the timeouts */ 950 mutex_enter(&dca->dca_intrlock); 951 tid = dca->dca_jobtid; 952 dca->dca_jobtid = 0; 953 mutex_exit(&dca->dca_intrlock); 954 if (tid) { 955 (void) untimeout(tid); 956 } 957 958 /* disable device interrupts */ 959 CLRBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 960 961 /* unregister interrupt handlers */ 962 ddi_remove_intr(dip, 0, dca->dca_icookie); 963 964 /* release our regs handle */ 965 ddi_regs_map_free(&dca->dca_regs_handle); 966 967 /* toss out kstats */ 968 if (dca->dca_intrstats) { 969 kstat_delete(dca->dca_intrstats); 970 } 971 if (dca->dca_ksp) { 972 kstat_delete(dca->dca_ksp); 973 } 974 975 mutex_destroy(&dca->dca_intrlock); 976 dca_uninit(dca); 977 978 /* finalize FMA */ 979 dca_fma_fini(dca); 980 981 ddi_soft_state_free(dca_state, instance); 982 983 return (DDI_SUCCESS); 984 } 985 986 int 987 dca_resume(dca_t *dca) 988 { 989 ddi_acc_handle_t pci; 990 991 if (pci_config_setup(dca->dca_dip, &pci) != DDI_SUCCESS) { 992 dca_error(dca, "unable to setup PCI config handle"); 993 return (DDI_FAILURE); 994 } 995 996 /* 997 * Reprogram registers in PCI configuration space. 998 */ 999 1000 /* Broadcom-specific timers -- we disable them. */ 1001 pci_config_put8(pci, PCI_TRDYTO, 0); 1002 pci_config_put8(pci, PCI_RETRIES, 0); 1003 1004 /* initialize PCI access settings */ 1005 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE | 1006 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE); 1007 1008 /* set up our PCI latency timer */ 1009 pci_config_put8(pci, PCI_LATTMR, 0x40); 1010 1011 pci_config_teardown(&pci); 1012 1013 if (dca_reset(dca, 0) < 0) { 1014 dca_error(dca, "unable to reset device during resume"); 1015 return (DDI_FAILURE); 1016 } 1017 1018 /* 1019 * Now restore the card-specific CSRs. 1020 */ 1021 1022 /* restore endianness settings */ 1023 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64); 1024 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1025 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1026 return (DDI_FAILURE); 1027 1028 /* restore interrupt enables */ 1029 SETBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE); 1030 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1031 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1032 return (DDI_FAILURE); 1033 1034 /* resume scheduling jobs on the device */ 1035 dca_undrain(dca); 1036 1037 return (DDI_SUCCESS); 1038 } 1039 1040 int 1041 dca_suspend(dca_t *dca) 1042 { 1043 if ((dca_drain(dca)) != 0) { 1044 return (DDI_FAILURE); 1045 } 1046 if (dca_reset(dca, 0) < 0) { 1047 dca_error(dca, "unable to reset device during suspend"); 1048 return (DDI_FAILURE); 1049 } 1050 return (DDI_SUCCESS); 1051 } 1052 1053 /* 1054 * Hardware access stuff. 1055 */ 1056 int 1057 dca_reset(dca_t *dca, int failreset) 1058 { 1059 int i; 1060 1061 if (dca->dca_regs_handle == NULL) { 1062 return (-1); 1063 } 1064 1065 PUTCSR(dca, CSR_DMACTL, DMACTL_RESET); 1066 if (!failreset) { 1067 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1068 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1069 return (-1); 1070 } 1071 1072 /* now wait for a reset */ 1073 for (i = 1; i < 100; i++) { 1074 uint32_t dmactl; 1075 drv_usecwait(100); 1076 dmactl = GETCSR(dca, CSR_DMACTL); 1077 if (!failreset) { 1078 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1079 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 1080 return (-1); 1081 } 1082 if ((dmactl & DMACTL_RESET) == 0) { 1083 DBG(dca, DCHATTY, "reset in %d usec", i * 100); 1084 return (0); 1085 } 1086 } 1087 if (!failreset) { 1088 dca_failure(dca, DDI_DEVICE_FAULT, 1089 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR, 1090 "timeout waiting for reset after %d usec", i * 100); 1091 } 1092 return (-1); 1093 } 1094 1095 int 1096 dca_initworklist(dca_t *dca, dca_worklist_t *wlp) 1097 { 1098 int i; 1099 int reqprealloc = wlp->dwl_hiwater + (MAXWORK * MAXREQSPERMCR); 1100 1101 /* 1102 * Set up work queue. 1103 */ 1104 mutex_init(&wlp->dwl_lock, NULL, MUTEX_DRIVER, dca->dca_icookie); 1105 mutex_init(&wlp->dwl_freereqslock, NULL, MUTEX_DRIVER, 1106 dca->dca_icookie); 1107 cv_init(&wlp->dwl_cv, NULL, CV_DRIVER, NULL); 1108 1109 mutex_enter(&wlp->dwl_lock); 1110 1111 dca_initq(&wlp->dwl_freereqs); 1112 dca_initq(&wlp->dwl_waitq); 1113 dca_initq(&wlp->dwl_freework); 1114 dca_initq(&wlp->dwl_runq); 1115 1116 for (i = 0; i < MAXWORK; i++) { 1117 dca_work_t *workp; 1118 1119 if ((workp = dca_newwork(dca)) == NULL) { 1120 dca_error(dca, "unable to allocate work"); 1121 mutex_exit(&wlp->dwl_lock); 1122 return (DDI_FAILURE); 1123 } 1124 workp->dw_wlp = wlp; 1125 dca_freework(workp); 1126 } 1127 mutex_exit(&wlp->dwl_lock); 1128 1129 for (i = 0; i < reqprealloc; i++) { 1130 dca_request_t *reqp; 1131 1132 if ((reqp = dca_newreq(dca)) == NULL) { 1133 dca_error(dca, "unable to allocate request"); 1134 return (DDI_FAILURE); 1135 } 1136 reqp->dr_dca = dca; 1137 reqp->dr_wlp = wlp; 1138 dca_freereq(reqp); 1139 } 1140 return (DDI_SUCCESS); 1141 } 1142 1143 int 1144 dca_init(dca_t *dca) 1145 { 1146 dca_worklist_t *wlp; 1147 1148 /* Initialize the private context list and the corresponding lock. */ 1149 mutex_init(&dca->dca_ctx_list_lock, NULL, MUTEX_DRIVER, NULL); 1150 dca_initq(&dca->dca_ctx_list); 1151 1152 /* 1153 * MCR1 algorithms. 1154 */ 1155 wlp = WORKLIST(dca, MCR1); 1156 (void) sprintf(wlp->dwl_name, "dca%d:mcr1", 1157 ddi_get_instance(dca->dca_dip)); 1158 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY, 1159 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1160 "mcr1_lowater", MCR1LOWATER); 1161 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY, 1162 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1163 "mcr1_hiwater", MCR1HIWATER); 1164 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY, 1165 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1166 "mcr1_maxreqs", MCR1MAXREQS), MAXREQSPERMCR); 1167 wlp->dwl_dca = dca; 1168 wlp->dwl_mcr = MCR1; 1169 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) { 1170 return (DDI_FAILURE); 1171 } 1172 1173 /* 1174 * MCR2 algorithms. 1175 */ 1176 wlp = WORKLIST(dca, MCR2); 1177 (void) sprintf(wlp->dwl_name, "dca%d:mcr2", 1178 ddi_get_instance(dca->dca_dip)); 1179 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY, 1180 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1181 "mcr2_lowater", MCR2LOWATER); 1182 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY, 1183 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1184 "mcr2_hiwater", MCR2HIWATER); 1185 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY, 1186 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, 1187 "mcr2_maxreqs", MCR2MAXREQS), MAXREQSPERMCR); 1188 wlp->dwl_dca = dca; 1189 wlp->dwl_mcr = MCR2; 1190 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) { 1191 return (DDI_FAILURE); 1192 } 1193 return (DDI_SUCCESS); 1194 } 1195 1196 /* 1197 * Uninitialize worklists. This routine should only be called when no 1198 * active jobs (hence DMA mappings) exist. One way to ensure this is 1199 * to unregister from kCF before calling this routine. (This is done 1200 * e.g. in detach(9e).) 1201 */ 1202 void 1203 dca_uninit(dca_t *dca) 1204 { 1205 int mcr; 1206 1207 mutex_destroy(&dca->dca_ctx_list_lock); 1208 1209 for (mcr = MCR1; mcr <= MCR2; mcr++) { 1210 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1211 dca_work_t *workp; 1212 dca_request_t *reqp; 1213 1214 if (dca->dca_regs_handle == NULL) { 1215 continue; 1216 } 1217 1218 mutex_enter(&wlp->dwl_lock); 1219 while ((workp = dca_getwork(dca, mcr)) != NULL) { 1220 dca_destroywork(workp); 1221 } 1222 mutex_exit(&wlp->dwl_lock); 1223 while ((reqp = dca_getreq(dca, mcr, 0)) != NULL) { 1224 dca_destroyreq(reqp); 1225 } 1226 1227 mutex_destroy(&wlp->dwl_lock); 1228 mutex_destroy(&wlp->dwl_freereqslock); 1229 cv_destroy(&wlp->dwl_cv); 1230 wlp->dwl_prov = NULL; 1231 } 1232 } 1233 1234 static void 1235 dca_enlist2(dca_listnode_t *q, dca_listnode_t *node, kmutex_t *lock) 1236 { 1237 if (!q || !node) 1238 return; 1239 1240 mutex_enter(lock); 1241 node->dl_next2 = q; 1242 node->dl_prev2 = q->dl_prev2; 1243 node->dl_next2->dl_prev2 = node; 1244 node->dl_prev2->dl_next2 = node; 1245 mutex_exit(lock); 1246 } 1247 1248 static void 1249 dca_rmlist2(dca_listnode_t *node, kmutex_t *lock) 1250 { 1251 if (!node) 1252 return; 1253 1254 mutex_enter(lock); 1255 node->dl_next2->dl_prev2 = node->dl_prev2; 1256 node->dl_prev2->dl_next2 = node->dl_next2; 1257 node->dl_next2 = NULL; 1258 node->dl_prev2 = NULL; 1259 mutex_exit(lock); 1260 } 1261 1262 static dca_listnode_t * 1263 dca_delist2(dca_listnode_t *q, kmutex_t *lock) 1264 { 1265 dca_listnode_t *node; 1266 1267 mutex_enter(lock); 1268 if ((node = q->dl_next2) == q) { 1269 mutex_exit(lock); 1270 return (NULL); 1271 } 1272 1273 node->dl_next2->dl_prev2 = node->dl_prev2; 1274 node->dl_prev2->dl_next2 = node->dl_next2; 1275 node->dl_next2 = NULL; 1276 node->dl_prev2 = NULL; 1277 mutex_exit(lock); 1278 1279 return (node); 1280 } 1281 1282 void 1283 dca_initq(dca_listnode_t *q) 1284 { 1285 q->dl_next = q; 1286 q->dl_prev = q; 1287 q->dl_next2 = q; 1288 q->dl_prev2 = q; 1289 } 1290 1291 void 1292 dca_enqueue(dca_listnode_t *q, dca_listnode_t *node) 1293 { 1294 /* 1295 * Enqueue submits at the "tail" of the list, i.e. just 1296 * behind the sentinel. 1297 */ 1298 node->dl_next = q; 1299 node->dl_prev = q->dl_prev; 1300 node->dl_next->dl_prev = node; 1301 node->dl_prev->dl_next = node; 1302 } 1303 1304 void 1305 dca_rmqueue(dca_listnode_t *node) 1306 { 1307 node->dl_next->dl_prev = node->dl_prev; 1308 node->dl_prev->dl_next = node->dl_next; 1309 node->dl_next = NULL; 1310 node->dl_prev = NULL; 1311 } 1312 1313 dca_listnode_t * 1314 dca_dequeue(dca_listnode_t *q) 1315 { 1316 dca_listnode_t *node; 1317 /* 1318 * Dequeue takes from the "head" of the list, i.e. just after 1319 * the sentinel. 1320 */ 1321 if ((node = q->dl_next) == q) { 1322 /* queue is empty */ 1323 return (NULL); 1324 } 1325 dca_rmqueue(node); 1326 return (node); 1327 } 1328 1329 /* this is the opposite of dequeue, it takes things off in LIFO order */ 1330 dca_listnode_t * 1331 dca_unqueue(dca_listnode_t *q) 1332 { 1333 dca_listnode_t *node; 1334 /* 1335 * unqueue takes from the "tail" of the list, i.e. just before 1336 * the sentinel. 1337 */ 1338 if ((node = q->dl_prev) == q) {; 1339 /* queue is empty */ 1340 return (NULL); 1341 } 1342 dca_rmqueue(node); 1343 return (node); 1344 } 1345 1346 dca_listnode_t * 1347 dca_peekqueue(dca_listnode_t *q) 1348 { 1349 dca_listnode_t *node; 1350 1351 if ((node = q->dl_next) == q) { 1352 return (NULL); 1353 } else { 1354 return (node); 1355 } 1356 } 1357 1358 /* 1359 * Interrupt service routine. 1360 */ 1361 uint_t 1362 dca_intr(char *arg) 1363 { 1364 dca_t *dca = (dca_t *)arg; 1365 uint32_t status; 1366 1367 mutex_enter(&dca->dca_intrlock); 1368 status = GETCSR(dca, CSR_DMASTAT); 1369 PUTCSR(dca, CSR_DMASTAT, status & DMASTAT_INTERRUPTS); 1370 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 1371 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 1372 mutex_exit(&dca->dca_intrlock); 1373 return ((uint_t)DDI_FAILURE); 1374 } 1375 1376 DBG(dca, DINTR, "interrupted, status = 0x%x!", status); 1377 1378 if ((status & DMASTAT_INTERRUPTS) == 0) { 1379 /* increment spurious interrupt kstat */ 1380 if (dca->dca_intrstats) { 1381 KIOIP(dca)->intrs[KSTAT_INTR_SPURIOUS]++; 1382 } 1383 mutex_exit(&dca->dca_intrlock); 1384 return (DDI_INTR_UNCLAIMED); 1385 } 1386 1387 if (dca->dca_intrstats) { 1388 KIOIP(dca)->intrs[KSTAT_INTR_HARD]++; 1389 } 1390 if (status & DMASTAT_MCR1INT) { 1391 DBG(dca, DINTR, "MCR1 interrupted"); 1392 mutex_enter(&(WORKLIST(dca, MCR1)->dwl_lock)); 1393 dca_schedule(dca, MCR1); 1394 dca_reclaim(dca, MCR1); 1395 mutex_exit(&(WORKLIST(dca, MCR1)->dwl_lock)); 1396 } 1397 1398 if (status & DMASTAT_MCR2INT) { 1399 DBG(dca, DINTR, "MCR2 interrupted"); 1400 mutex_enter(&(WORKLIST(dca, MCR2)->dwl_lock)); 1401 dca_schedule(dca, MCR2); 1402 dca_reclaim(dca, MCR2); 1403 mutex_exit(&(WORKLIST(dca, MCR2)->dwl_lock)); 1404 } 1405 1406 if (status & DMASTAT_ERRINT) { 1407 uint32_t erraddr; 1408 erraddr = GETCSR(dca, CSR_DMAEA); 1409 mutex_exit(&dca->dca_intrlock); 1410 1411 /* 1412 * bit 1 of the error address indicates failure during 1413 * read if set, during write otherwise. 1414 */ 1415 dca_failure(dca, DDI_DEVICE_FAULT, 1416 DCA_FM_ECLASS_HW_DEVICE, dca_ena(0), CRYPTO_DEVICE_ERROR, 1417 "DMA master access error %s address 0x%x", 1418 erraddr & 0x1 ? "reading" : "writing", erraddr & ~1); 1419 return (DDI_INTR_CLAIMED); 1420 } 1421 1422 mutex_exit(&dca->dca_intrlock); 1423 1424 return (DDI_INTR_CLAIMED); 1425 } 1426 1427 /* 1428 * Reverse a string of bytes from s1 into s2. The reversal happens 1429 * from the tail of s1. If len1 < len2, then null bytes will be 1430 * padded to the end of s2. If len2 < len1, then (presumably null) 1431 * bytes will be dropped from the start of s1. 1432 * 1433 * The rationale here is that when s1 (source) is shorter, then we 1434 * are reversing from big-endian ordering, into device ordering, and 1435 * want to add some extra nulls to the tail (MSB) side of the device. 1436 * 1437 * Similarly, when s2 (dest) is shorter, then we are truncating what 1438 * are presumably null MSB bits from the device. 1439 * 1440 * There is an expectation when reversing from the device back into 1441 * big-endian, that the number of bytes to reverse and the target size 1442 * will match, and no truncation or padding occurs. 1443 */ 1444 void 1445 dca_reverse(void *s1, void *s2, int len1, int len2) 1446 { 1447 caddr_t src, dst; 1448 1449 if (len1 == 0) { 1450 if (len2) { 1451 bzero(s2, len2); 1452 } 1453 return; 1454 } 1455 src = (caddr_t)s1 + len1 - 1; 1456 dst = s2; 1457 while ((src >= (caddr_t)s1) && (len2)) { 1458 *dst++ = *src--; 1459 len2--; 1460 } 1461 while (len2 > 0) { 1462 *dst++ = 0; 1463 len2--; 1464 } 1465 } 1466 1467 uint16_t 1468 dca_padfull(int num) 1469 { 1470 if (num <= 512) { 1471 return (BITS2BYTES(512)); 1472 } 1473 if (num <= 768) { 1474 return (BITS2BYTES(768)); 1475 } 1476 if (num <= 1024) { 1477 return (BITS2BYTES(1024)); 1478 } 1479 if (num <= 1536) { 1480 return (BITS2BYTES(1536)); 1481 } 1482 if (num <= 2048) { 1483 return (BITS2BYTES(2048)); 1484 } 1485 return (0); 1486 } 1487 1488 uint16_t 1489 dca_padhalf(int num) 1490 { 1491 if (num <= 256) { 1492 return (BITS2BYTES(256)); 1493 } 1494 if (num <= 384) { 1495 return (BITS2BYTES(384)); 1496 } 1497 if (num <= 512) { 1498 return (BITS2BYTES(512)); 1499 } 1500 if (num <= 768) { 1501 return (BITS2BYTES(768)); 1502 } 1503 if (num <= 1024) { 1504 return (BITS2BYTES(1024)); 1505 } 1506 return (0); 1507 } 1508 1509 dca_work_t * 1510 dca_newwork(dca_t *dca) 1511 { 1512 dca_work_t *workp; 1513 size_t size; 1514 ddi_dma_cookie_t c; 1515 unsigned nc; 1516 int rv; 1517 1518 workp = kmem_zalloc(sizeof (dca_work_t), KM_SLEEP); 1519 1520 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1521 DDI_DMA_SLEEP, NULL, &workp->dw_mcr_dmah); 1522 if (rv != 0) { 1523 dca_error(dca, "unable to alloc MCR DMA handle"); 1524 dca_destroywork(workp); 1525 return (NULL); 1526 } 1527 1528 rv = ddi_dma_mem_alloc(workp->dw_mcr_dmah, 1529 ROUNDUP(MCR_SIZE, dca->dca_pagesize), 1530 &dca_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 1531 &workp->dw_mcr_kaddr, &size, &workp->dw_mcr_acch); 1532 if (rv != 0) { 1533 dca_error(dca, "unable to alloc MCR DMA memory"); 1534 dca_destroywork(workp); 1535 return (NULL); 1536 } 1537 1538 rv = ddi_dma_addr_bind_handle(workp->dw_mcr_dmah, NULL, 1539 workp->dw_mcr_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR, 1540 DDI_DMA_SLEEP, NULL, &c, &nc); 1541 if (rv != DDI_DMA_MAPPED) { 1542 dca_error(dca, "unable to map MCR DMA memory"); 1543 dca_destroywork(workp); 1544 return (NULL); 1545 } 1546 1547 workp->dw_mcr_paddr = c.dmac_address; 1548 return (workp); 1549 } 1550 1551 void 1552 dca_destroywork(dca_work_t *workp) 1553 { 1554 if (workp->dw_mcr_paddr) { 1555 (void) ddi_dma_unbind_handle(workp->dw_mcr_dmah); 1556 } 1557 if (workp->dw_mcr_acch) { 1558 ddi_dma_mem_free(&workp->dw_mcr_acch); 1559 } 1560 if (workp->dw_mcr_dmah) { 1561 ddi_dma_free_handle(&workp->dw_mcr_dmah); 1562 } 1563 kmem_free(workp, sizeof (dca_work_t)); 1564 } 1565 1566 dca_request_t * 1567 dca_newreq(dca_t *dca) 1568 { 1569 dca_request_t *reqp; 1570 size_t size; 1571 ddi_dma_cookie_t c; 1572 unsigned nc; 1573 int rv; 1574 int n_chain = 0; 1575 1576 size = (DESC_SIZE * MAXFRAGS) + CTX_MAXLENGTH; 1577 1578 reqp = kmem_zalloc(sizeof (dca_request_t), KM_SLEEP); 1579 1580 reqp->dr_dca = dca; 1581 1582 /* 1583 * Setup the DMA region for the context and descriptors. 1584 */ 1585 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, DDI_DMA_SLEEP, 1586 NULL, &reqp->dr_ctx_dmah); 1587 if (rv != DDI_SUCCESS) { 1588 dca_error(dca, "failure allocating request DMA handle"); 1589 dca_destroyreq(reqp); 1590 return (NULL); 1591 } 1592 1593 /* for driver hardening, allocate in whole pages */ 1594 rv = ddi_dma_mem_alloc(reqp->dr_ctx_dmah, 1595 ROUNDUP(size, dca->dca_pagesize), &dca_devattr, DDI_DMA_CONSISTENT, 1596 DDI_DMA_SLEEP, NULL, &reqp->dr_ctx_kaddr, &size, 1597 &reqp->dr_ctx_acch); 1598 if (rv != DDI_SUCCESS) { 1599 dca_error(dca, "unable to alloc request DMA memory"); 1600 dca_destroyreq(reqp); 1601 return (NULL); 1602 } 1603 1604 rv = ddi_dma_addr_bind_handle(reqp->dr_ctx_dmah, NULL, 1605 reqp->dr_ctx_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_WRITE, 1606 DDI_DMA_SLEEP, 0, &c, &nc); 1607 if (rv != DDI_DMA_MAPPED) { 1608 dca_error(dca, "failed binding request DMA handle"); 1609 dca_destroyreq(reqp); 1610 return (NULL); 1611 } 1612 reqp->dr_ctx_paddr = c.dmac_address; 1613 1614 reqp->dr_dma_size = size; 1615 1616 /* 1617 * Set up the dma for our scratch/shared buffers. 1618 */ 1619 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1620 DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_dmah); 1621 if (rv != DDI_SUCCESS) { 1622 dca_error(dca, "failure allocating ibuf DMA handle"); 1623 dca_destroyreq(reqp); 1624 return (NULL); 1625 } 1626 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1627 DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_dmah); 1628 if (rv != DDI_SUCCESS) { 1629 dca_error(dca, "failure allocating obuf DMA handle"); 1630 dca_destroyreq(reqp); 1631 return (NULL); 1632 } 1633 1634 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1635 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_in_dmah); 1636 if (rv != DDI_SUCCESS) { 1637 dca_error(dca, "failure allocating chain_in DMA handle"); 1638 dca_destroyreq(reqp); 1639 return (NULL); 1640 } 1641 1642 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, 1643 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_out_dmah); 1644 if (rv != DDI_SUCCESS) { 1645 dca_error(dca, "failure allocating chain_out DMA handle"); 1646 dca_destroyreq(reqp); 1647 return (NULL); 1648 } 1649 1650 /* 1651 * for driver hardening, allocate in whole pages. 1652 */ 1653 size = ROUNDUP(MAXPACKET, dca->dca_pagesize); 1654 #if defined(i386) || defined(__i386) 1655 /* 1656 * Use kmem_alloc instead of ddi_dma_mem_alloc here since the latter 1657 * may fail on x86 platform if a physically contigous memory chunk 1658 * cannot be found. From initial testing, we did not see performance 1659 * degration as seen on Sparc. 1660 */ 1661 if ((reqp->dr_ibuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) { 1662 dca_error(dca, "unable to alloc request ibuf memory"); 1663 dca_destroyreq(reqp); 1664 return (NULL); 1665 } 1666 if ((reqp->dr_obuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) { 1667 dca_error(dca, "unable to alloc request obuf memory"); 1668 dca_destroyreq(reqp); 1669 return (NULL); 1670 } 1671 #else 1672 /* 1673 * We could kmem_alloc for sparc too. However, it gives worse 1674 * performance when transfering more than one page data. For example, 1675 * using 4 threads and 12032 byte data and 3DES on 900MHZ sparc system, 1676 * kmem_alloc uses 80% CPU and ddi_dma_mem_alloc uses 50% CPU for 1677 * the same throughput. 1678 */ 1679 rv = ddi_dma_mem_alloc(reqp->dr_ibuf_dmah, 1680 size, &dca_bufattr, 1681 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_kaddr, 1682 &size, &reqp->dr_ibuf_acch); 1683 if (rv != DDI_SUCCESS) { 1684 dca_error(dca, "unable to alloc request DMA memory"); 1685 dca_destroyreq(reqp); 1686 return (NULL); 1687 } 1688 1689 rv = ddi_dma_mem_alloc(reqp->dr_obuf_dmah, 1690 size, &dca_bufattr, 1691 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_kaddr, 1692 &size, &reqp->dr_obuf_acch); 1693 if (rv != DDI_SUCCESS) { 1694 dca_error(dca, "unable to alloc request DMA memory"); 1695 dca_destroyreq(reqp); 1696 return (NULL); 1697 } 1698 #endif 1699 1700 /* Skip the used portion in the context page */ 1701 reqp->dr_offset = CTX_MAXLENGTH; 1702 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset, 1703 reqp->dr_ibuf_kaddr, reqp->dr_ibuf_dmah, 1704 DDI_DMA_WRITE | DDI_DMA_STREAMING, 1705 &reqp->dr_ibuf_head, &n_chain)) != DDI_SUCCESS) { 1706 (void) dca_destroyreq(reqp); 1707 return (NULL); 1708 } 1709 reqp->dr_ibuf_paddr = reqp->dr_ibuf_head.dc_buffer_paddr; 1710 /* Skip the space used by the input buffer */ 1711 reqp->dr_offset += DESC_SIZE * n_chain; 1712 1713 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset, 1714 reqp->dr_obuf_kaddr, reqp->dr_obuf_dmah, 1715 DDI_DMA_READ | DDI_DMA_STREAMING, 1716 &reqp->dr_obuf_head, &n_chain)) != DDI_SUCCESS) { 1717 (void) dca_destroyreq(reqp); 1718 return (NULL); 1719 } 1720 reqp->dr_obuf_paddr = reqp->dr_obuf_head.dc_buffer_paddr; 1721 /* Skip the space used by the output buffer */ 1722 reqp->dr_offset += DESC_SIZE * n_chain; 1723 1724 DBG(dca, DCHATTY, "CTX is 0x%p, phys 0x%x, len %d", 1725 reqp->dr_ctx_kaddr, reqp->dr_ctx_paddr, CTX_MAXLENGTH); 1726 return (reqp); 1727 } 1728 1729 void 1730 dca_destroyreq(dca_request_t *reqp) 1731 { 1732 #if defined(i386) || defined(__i386) 1733 dca_t *dca = reqp->dr_dca; 1734 size_t size = ROUNDUP(MAXPACKET, dca->dca_pagesize); 1735 #endif 1736 1737 /* 1738 * Clean up DMA for the context structure. 1739 */ 1740 if (reqp->dr_ctx_paddr) { 1741 (void) ddi_dma_unbind_handle(reqp->dr_ctx_dmah); 1742 } 1743 1744 if (reqp->dr_ctx_acch) { 1745 ddi_dma_mem_free(&reqp->dr_ctx_acch); 1746 } 1747 1748 if (reqp->dr_ctx_dmah) { 1749 ddi_dma_free_handle(&reqp->dr_ctx_dmah); 1750 } 1751 1752 /* 1753 * Clean up DMA for the scratch buffer. 1754 */ 1755 #if defined(i386) || defined(__i386) 1756 if (reqp->dr_ibuf_dmah) { 1757 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah); 1758 ddi_dma_free_handle(&reqp->dr_ibuf_dmah); 1759 } 1760 if (reqp->dr_obuf_dmah) { 1761 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah); 1762 ddi_dma_free_handle(&reqp->dr_obuf_dmah); 1763 } 1764 1765 kmem_free(reqp->dr_ibuf_kaddr, size); 1766 kmem_free(reqp->dr_obuf_kaddr, size); 1767 #else 1768 if (reqp->dr_ibuf_paddr) { 1769 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah); 1770 } 1771 if (reqp->dr_obuf_paddr) { 1772 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah); 1773 } 1774 1775 if (reqp->dr_ibuf_acch) { 1776 ddi_dma_mem_free(&reqp->dr_ibuf_acch); 1777 } 1778 if (reqp->dr_obuf_acch) { 1779 ddi_dma_mem_free(&reqp->dr_obuf_acch); 1780 } 1781 1782 if (reqp->dr_ibuf_dmah) { 1783 ddi_dma_free_handle(&reqp->dr_ibuf_dmah); 1784 } 1785 if (reqp->dr_obuf_dmah) { 1786 ddi_dma_free_handle(&reqp->dr_obuf_dmah); 1787 } 1788 #endif 1789 /* 1790 * These two DMA handles should have been unbinded in 1791 * dca_unbindchains() function 1792 */ 1793 if (reqp->dr_chain_in_dmah) { 1794 ddi_dma_free_handle(&reqp->dr_chain_in_dmah); 1795 } 1796 if (reqp->dr_chain_out_dmah) { 1797 ddi_dma_free_handle(&reqp->dr_chain_out_dmah); 1798 } 1799 1800 kmem_free(reqp, sizeof (dca_request_t)); 1801 } 1802 1803 dca_work_t * 1804 dca_getwork(dca_t *dca, int mcr) 1805 { 1806 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1807 dca_work_t *workp; 1808 1809 ASSERT(mutex_owned(&wlp->dwl_lock)); 1810 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_freework); 1811 if (workp) { 1812 int nreqs; 1813 bzero(workp->dw_mcr_kaddr, 8); 1814 1815 /* clear out old requests */ 1816 for (nreqs = 0; nreqs < MAXREQSPERMCR; nreqs++) { 1817 workp->dw_reqs[nreqs] = NULL; 1818 } 1819 } 1820 return (workp); 1821 } 1822 1823 void 1824 dca_freework(dca_work_t *workp) 1825 { 1826 ASSERT(mutex_owned(&workp->dw_wlp->dwl_lock)); 1827 dca_enqueue(&workp->dw_wlp->dwl_freework, (dca_listnode_t *)workp); 1828 } 1829 1830 dca_request_t * 1831 dca_getreq(dca_t *dca, int mcr, int tryhard) 1832 { 1833 dca_worklist_t *wlp = WORKLIST(dca, mcr); 1834 dca_request_t *reqp; 1835 1836 mutex_enter(&wlp->dwl_freereqslock); 1837 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_freereqs); 1838 mutex_exit(&wlp->dwl_freereqslock); 1839 if (reqp) { 1840 reqp->dr_flags = 0; 1841 reqp->dr_callback = NULL; 1842 } else if (tryhard) { 1843 /* 1844 * failed to get a free one, try an allocation, the hard way. 1845 * XXX: Kstat desired here. 1846 */ 1847 if ((reqp = dca_newreq(dca)) != NULL) { 1848 reqp->dr_wlp = wlp; 1849 reqp->dr_dca = dca; 1850 reqp->dr_flags = 0; 1851 reqp->dr_callback = NULL; 1852 } 1853 } 1854 return (reqp); 1855 } 1856 1857 void 1858 dca_freereq(dca_request_t *reqp) 1859 { 1860 reqp->dr_kcf_req = NULL; 1861 if (!(reqp->dr_flags & DR_NOCACHE)) { 1862 mutex_enter(&reqp->dr_wlp->dwl_freereqslock); 1863 dca_enqueue(&reqp->dr_wlp->dwl_freereqs, 1864 (dca_listnode_t *)reqp); 1865 mutex_exit(&reqp->dr_wlp->dwl_freereqslock); 1866 } 1867 } 1868 1869 /* 1870 * Binds user buffers to DMA handles dynamically. On Sparc, a user buffer 1871 * is mapped to a single physicall address. On x86, a user buffer is mapped 1872 * to multiple physically addresses. These phsyical addresses are chained 1873 * using the method specified in Broadcom BCM5820 specification 1874 */ 1875 int 1876 dca_bindchains(dca_request_t *reqp, size_t incnt, size_t outcnt) 1877 { 1878 int rv; 1879 caddr_t kaddr; 1880 uint_t flags; 1881 int n_chain = 0; 1882 1883 if (reqp->dr_flags & DR_INPLACE) { 1884 flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT; 1885 } else { 1886 flags = DDI_DMA_WRITE | DDI_DMA_STREAMING; 1887 } 1888 1889 /* first the input */ 1890 if (incnt) { 1891 if ((kaddr = dca_bufdaddr(reqp->dr_in)) == NULL) { 1892 DBG(NULL, DWARN, "unrecognised crypto data format"); 1893 return (DDI_FAILURE); 1894 } 1895 if ((rv = dca_bindchains_one(reqp, incnt, reqp->dr_offset, 1896 kaddr, reqp->dr_chain_in_dmah, flags, 1897 &reqp->dr_chain_in_head, &n_chain)) != DDI_SUCCESS) { 1898 (void) dca_unbindchains(reqp); 1899 return (rv); 1900 } 1901 1902 /* 1903 * The offset and length are altered by the calling routine 1904 * reqp->dr_in->cd_offset += incnt; 1905 * reqp->dr_in->cd_length -= incnt; 1906 */ 1907 /* Save the first one in the chain for MCR */ 1908 reqp->dr_in_paddr = reqp->dr_chain_in_head.dc_buffer_paddr; 1909 reqp->dr_in_next = reqp->dr_chain_in_head.dc_next_paddr; 1910 reqp->dr_in_len = reqp->dr_chain_in_head.dc_buffer_length; 1911 } else { 1912 reqp->dr_in_paddr = NULL; 1913 reqp->dr_in_next = 0; 1914 reqp->dr_in_len = 0; 1915 } 1916 1917 if (reqp->dr_flags & DR_INPLACE) { 1918 reqp->dr_out_paddr = reqp->dr_in_paddr; 1919 reqp->dr_out_len = reqp->dr_in_len; 1920 reqp->dr_out_next = reqp->dr_in_next; 1921 return (DDI_SUCCESS); 1922 } 1923 1924 /* then the output */ 1925 if (outcnt) { 1926 flags = DDI_DMA_READ | DDI_DMA_STREAMING; 1927 if ((kaddr = dca_bufdaddr_out(reqp->dr_out)) == NULL) { 1928 DBG(NULL, DWARN, "unrecognised crypto data format"); 1929 (void) dca_unbindchains(reqp); 1930 return (DDI_FAILURE); 1931 } 1932 rv = dca_bindchains_one(reqp, outcnt, reqp->dr_offset + 1933 n_chain * DESC_SIZE, kaddr, reqp->dr_chain_out_dmah, 1934 flags, &reqp->dr_chain_out_head, &n_chain); 1935 if (rv != DDI_SUCCESS) { 1936 (void) dca_unbindchains(reqp); 1937 return (DDI_FAILURE); 1938 } 1939 1940 /* Save the first one in the chain for MCR */ 1941 reqp->dr_out_paddr = reqp->dr_chain_out_head.dc_buffer_paddr; 1942 reqp->dr_out_next = reqp->dr_chain_out_head.dc_next_paddr; 1943 reqp->dr_out_len = reqp->dr_chain_out_head.dc_buffer_length; 1944 } else { 1945 reqp->dr_out_paddr = NULL; 1946 reqp->dr_out_next = 0; 1947 reqp->dr_out_len = 0; 1948 } 1949 1950 return (DDI_SUCCESS); 1951 } 1952 1953 /* 1954 * Unbind the user buffers from the DMA handles. 1955 */ 1956 int 1957 dca_unbindchains(dca_request_t *reqp) 1958 { 1959 int rv = DDI_SUCCESS; 1960 int rv1 = DDI_SUCCESS; 1961 1962 /* Clear the input chain */ 1963 if (reqp->dr_chain_in_head.dc_buffer_paddr != NULL) { 1964 (void) ddi_dma_unbind_handle(reqp->dr_chain_in_dmah); 1965 reqp->dr_chain_in_head.dc_buffer_paddr = 0; 1966 } 1967 1968 /* Clear the output chain */ 1969 if (reqp->dr_chain_out_head.dc_buffer_paddr != NULL) { 1970 (void) ddi_dma_unbind_handle(reqp->dr_chain_out_dmah); 1971 reqp->dr_chain_out_head.dc_buffer_paddr = 0; 1972 } 1973 1974 return ((rv != DDI_SUCCESS)? rv : rv1); 1975 } 1976 1977 /* 1978 * Build either input chain or output chain. It is single-item chain for Sparc, 1979 * and possible mutiple-item chain for x86. 1980 */ 1981 static int 1982 dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset, 1983 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags, 1984 dca_chain_t *head, int *n_chain) 1985 { 1986 ddi_dma_cookie_t c; 1987 uint_t nc; 1988 int rv; 1989 caddr_t chain_kaddr_pre; 1990 caddr_t chain_kaddr; 1991 uint32_t chain_paddr; 1992 int i; 1993 1994 /* Advance past the context structure to the starting address */ 1995 chain_paddr = reqp->dr_ctx_paddr + dr_offset; 1996 chain_kaddr = reqp->dr_ctx_kaddr + dr_offset; 1997 1998 /* 1999 * Bind the kernel address to the DMA handle. On x86, the actual 2000 * buffer is mapped into multiple physical addresses. On Sparc, 2001 * the actual buffer is mapped into a single address. 2002 */ 2003 rv = ddi_dma_addr_bind_handle(handle, 2004 NULL, kaddr, cnt, flags, DDI_DMA_DONTWAIT, NULL, &c, &nc); 2005 if (rv != DDI_DMA_MAPPED) { 2006 return (DDI_FAILURE); 2007 } 2008 2009 (void) ddi_dma_sync(handle, 0, cnt, DDI_DMA_SYNC_FORDEV); 2010 if ((rv = dca_check_dma_handle(reqp->dr_dca, handle, 2011 DCA_FM_ECLASS_NONE)) != DDI_SUCCESS) { 2012 reqp->destroy = TRUE; 2013 return (rv); 2014 } 2015 2016 *n_chain = nc; 2017 2018 /* Setup the data buffer chain for DMA transfer */ 2019 chain_kaddr_pre = NULL; 2020 head->dc_buffer_paddr = 0; 2021 head->dc_next_paddr = 0; 2022 head->dc_buffer_length = 0; 2023 for (i = 0; i < nc; i++) { 2024 /* PIO */ 2025 PUTDESC32(reqp, chain_kaddr, DESC_BUFADDR, c.dmac_address); 2026 PUTDESC16(reqp, chain_kaddr, DESC_RSVD, 0); 2027 PUTDESC16(reqp, chain_kaddr, DESC_LENGTH, c.dmac_size); 2028 2029 /* Remember the head of the chain */ 2030 if (head->dc_buffer_paddr == 0) { 2031 head->dc_buffer_paddr = c.dmac_address; 2032 head->dc_buffer_length = c.dmac_size; 2033 } 2034 2035 /* Link to the previous one if one exists */ 2036 if (chain_kaddr_pre) { 2037 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 2038 chain_paddr); 2039 if (head->dc_next_paddr == 0) 2040 head->dc_next_paddr = chain_paddr; 2041 } 2042 chain_kaddr_pre = chain_kaddr; 2043 2044 /* Maintain pointers */ 2045 chain_paddr += DESC_SIZE; 2046 chain_kaddr += DESC_SIZE; 2047 2048 /* Retrieve the next cookie if there is one */ 2049 if (i < nc-1) 2050 ddi_dma_nextcookie(handle, &c); 2051 } 2052 2053 /* Set the next pointer in the last entry to NULL */ 2054 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 0); 2055 2056 return (DDI_SUCCESS); 2057 } 2058 2059 /* 2060 * Schedule some work. 2061 */ 2062 int 2063 dca_start(dca_t *dca, dca_request_t *reqp, int mcr, int dosched) 2064 { 2065 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2066 2067 mutex_enter(&wlp->dwl_lock); 2068 2069 DBG(dca, DCHATTY, "req=%p, in=%p, out=%p, ctx=%p, ibuf=%p, obuf=%p", 2070 reqp, reqp->dr_in, reqp->dr_out, reqp->dr_ctx_kaddr, 2071 reqp->dr_ibuf_kaddr, reqp->dr_obuf_kaddr); 2072 DBG(dca, DCHATTY, "ctx paddr = %x, ibuf paddr = %x, obuf paddr = %x", 2073 reqp->dr_ctx_paddr, reqp->dr_ibuf_paddr, reqp->dr_obuf_paddr); 2074 /* sync out the entire context and descriptor chains */ 2075 (void) ddi_dma_sync(reqp->dr_ctx_dmah, 0, 0, DDI_DMA_SYNC_FORDEV); 2076 if (dca_check_dma_handle(dca, reqp->dr_ctx_dmah, 2077 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2078 reqp->destroy = TRUE; 2079 mutex_exit(&wlp->dwl_lock); 2080 return (CRYPTO_DEVICE_ERROR); 2081 } 2082 2083 dca_enqueue(&wlp->dwl_waitq, (dca_listnode_t *)reqp); 2084 wlp->dwl_count++; 2085 wlp->dwl_lastsubmit = ddi_get_lbolt(); 2086 reqp->dr_wlp = wlp; 2087 2088 if ((wlp->dwl_count == wlp->dwl_hiwater) && (wlp->dwl_busy == 0)) { 2089 /* we are fully loaded now, let kCF know */ 2090 2091 wlp->dwl_flowctl++; 2092 wlp->dwl_busy = 1; 2093 2094 crypto_prov_notify(wlp->dwl_prov, CRYPTO_PROVIDER_BUSY); 2095 } 2096 2097 if (dosched) { 2098 #ifdef SCHEDDELAY 2099 /* possibly wait for more work to arrive */ 2100 if (wlp->dwl_count >= wlp->dwl_reqspermcr) { 2101 dca_schedule(dca, mcr); 2102 } else if (!wlp->dwl_schedtid) { 2103 /* wait 1 msec for more work before doing it */ 2104 wlp->dwl_schedtid = timeout(dca_schedtimeout, 2105 (void *)wlp, drv_usectohz(MSEC)); 2106 } 2107 #else 2108 dca_schedule(dca, mcr); 2109 #endif 2110 } 2111 mutex_exit(&wlp->dwl_lock); 2112 2113 return (CRYPTO_QUEUED); 2114 } 2115 2116 void 2117 dca_schedule(dca_t *dca, int mcr) 2118 { 2119 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2120 int csr; 2121 int full; 2122 uint32_t status; 2123 2124 ASSERT(mutex_owned(&wlp->dwl_lock)); 2125 /* 2126 * If the card is draining or has an outstanding failure, 2127 * don't schedule any more work on it right now 2128 */ 2129 if (wlp->dwl_drain || (dca->dca_flags & DCA_FAILED)) { 2130 return; 2131 } 2132 2133 if (mcr == MCR2) { 2134 csr = CSR_MCR2; 2135 full = DMASTAT_MCR2FULL; 2136 } else { 2137 csr = CSR_MCR1; 2138 full = DMASTAT_MCR1FULL; 2139 } 2140 2141 for (;;) { 2142 dca_work_t *workp; 2143 uint32_t offset; 2144 int nreqs; 2145 2146 status = GETCSR(dca, CSR_DMASTAT); 2147 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 2148 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) 2149 return; 2150 2151 if ((status & full) != 0) 2152 break; 2153 2154 #ifdef SCHEDDELAY 2155 /* if there isn't enough to do, don't bother now */ 2156 if ((wlp->dwl_count < wlp->dwl_reqspermcr) && 2157 (ddi_get_lbolt() < (wlp->dwl_lastsubmit + 2158 drv_usectohz(MSEC)))) { 2159 /* wait a bit longer... */ 2160 if (wlp->dwl_schedtid == 0) { 2161 wlp->dwl_schedtid = timeout(dca_schedtimeout, 2162 (void *)wlp, drv_usectohz(MSEC)); 2163 } 2164 return; 2165 } 2166 #endif 2167 2168 /* grab a work structure */ 2169 workp = dca_getwork(dca, mcr); 2170 2171 if (workp == NULL) { 2172 /* 2173 * There must be work ready to be reclaimed, 2174 * in this case, since the chip can only hold 2175 * less work outstanding than there are total. 2176 */ 2177 dca_reclaim(dca, mcr); 2178 continue; 2179 } 2180 2181 nreqs = 0; 2182 offset = MCR_CTXADDR; 2183 2184 while (nreqs < wlp->dwl_reqspermcr) { 2185 dca_request_t *reqp; 2186 2187 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_waitq); 2188 if (reqp == NULL) { 2189 /* nothing left to process */ 2190 break; 2191 } 2192 /* 2193 * Update flow control. 2194 */ 2195 wlp->dwl_count--; 2196 if ((wlp->dwl_count == wlp->dwl_lowater) && 2197 (wlp->dwl_busy)) { 2198 wlp->dwl_busy = 0; 2199 crypto_prov_notify(wlp->dwl_prov, 2200 CRYPTO_PROVIDER_READY); 2201 } 2202 2203 /* 2204 * Context address. 2205 */ 2206 PUTMCR32(workp, offset, reqp->dr_ctx_paddr); 2207 offset += 4; 2208 2209 /* 2210 * Input chain. 2211 */ 2212 /* input buffer address */ 2213 PUTMCR32(workp, offset, reqp->dr_in_paddr); 2214 offset += 4; 2215 /* next input buffer entry */ 2216 PUTMCR32(workp, offset, reqp->dr_in_next); 2217 offset += 4; 2218 /* input buffer length */ 2219 PUTMCR16(workp, offset, reqp->dr_in_len); 2220 offset += 2; 2221 /* zero the reserved field */ 2222 PUTMCR16(workp, offset, 0); 2223 offset += 2; 2224 2225 /* 2226 * Overall length. 2227 */ 2228 /* reserved field */ 2229 PUTMCR16(workp, offset, 0); 2230 offset += 2; 2231 /* total packet length */ 2232 PUTMCR16(workp, offset, reqp->dr_pkt_length); 2233 offset += 2; 2234 2235 /* 2236 * Output chain. 2237 */ 2238 /* output buffer address */ 2239 PUTMCR32(workp, offset, reqp->dr_out_paddr); 2240 offset += 4; 2241 /* next output buffer entry */ 2242 PUTMCR32(workp, offset, reqp->dr_out_next); 2243 offset += 4; 2244 /* output buffer length */ 2245 PUTMCR16(workp, offset, reqp->dr_out_len); 2246 offset += 2; 2247 /* zero the reserved field */ 2248 PUTMCR16(workp, offset, 0); 2249 offset += 2; 2250 2251 /* 2252 * Note submission. 2253 */ 2254 workp->dw_reqs[nreqs] = reqp; 2255 nreqs++; 2256 } 2257 2258 if (nreqs == 0) { 2259 /* nothing in the queue! */ 2260 dca_freework(workp); 2261 return; 2262 } 2263 2264 wlp->dwl_submit++; 2265 2266 PUTMCR16(workp, MCR_FLAGS, 0); 2267 PUTMCR16(workp, MCR_COUNT, nreqs); 2268 2269 DBG(dca, DCHATTY, 2270 "posting work (phys %x, virt 0x%p) (%d reqs) to MCR%d", 2271 workp->dw_mcr_paddr, workp->dw_mcr_kaddr, 2272 nreqs, mcr); 2273 2274 workp->dw_lbolt = ddi_get_lbolt(); 2275 /* Make sure MCR is synced out to device. */ 2276 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 0, 2277 DDI_DMA_SYNC_FORDEV); 2278 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah, 2279 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2280 dca_destroywork(workp); 2281 return; 2282 } 2283 2284 PUTCSR(dca, csr, workp->dw_mcr_paddr); 2285 if (dca_check_acc_handle(dca, dca->dca_regs_handle, 2286 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2287 dca_destroywork(workp); 2288 return; 2289 } else { 2290 dca_enqueue(&wlp->dwl_runq, (dca_listnode_t *)workp); 2291 } 2292 2293 DBG(dca, DCHATTY, "posted"); 2294 } 2295 } 2296 2297 /* 2298 * Reclaim completed work, called in interrupt context. 2299 */ 2300 void 2301 dca_reclaim(dca_t *dca, int mcr) 2302 { 2303 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2304 dca_work_t *workp; 2305 ushort_t flags; 2306 int nreclaimed = 0; 2307 int i; 2308 2309 DBG(dca, DRECLAIM, "worklist = 0x%p (MCR%d)", wlp, mcr); 2310 ASSERT(mutex_owned(&wlp->dwl_lock)); 2311 /* 2312 * For each MCR in the submitted (runq), we check to see if 2313 * it has been processed. If so, then we note each individual 2314 * job in the MCR, and and do the completion processing for 2315 * each of such job. 2316 */ 2317 for (;;) { 2318 2319 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq); 2320 if (workp == NULL) { 2321 break; 2322 } 2323 2324 /* only sync the MCR flags, since that's all we need */ 2325 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 4, 2326 DDI_DMA_SYNC_FORKERNEL); 2327 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah, 2328 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) { 2329 dca_rmqueue((dca_listnode_t *)workp); 2330 dca_destroywork(workp); 2331 return; 2332 } 2333 2334 flags = GETMCR16(workp, MCR_FLAGS); 2335 if ((flags & MCRFLAG_FINISHED) == 0) { 2336 /* chip is still working on it */ 2337 DBG(dca, DRECLAIM, 2338 "chip still working on it (MCR%d)", mcr); 2339 break; 2340 } 2341 2342 /* its really for us, so remove it from the queue */ 2343 dca_rmqueue((dca_listnode_t *)workp); 2344 2345 /* if we were draining, signal on the cv */ 2346 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) { 2347 cv_signal(&wlp->dwl_cv); 2348 } 2349 2350 /* update statistics, done under the lock */ 2351 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2352 dca_request_t *reqp = workp->dw_reqs[i]; 2353 if (reqp == NULL) { 2354 continue; 2355 } 2356 if (reqp->dr_byte_stat >= 0) { 2357 dca->dca_stats[reqp->dr_byte_stat] += 2358 reqp->dr_pkt_length; 2359 } 2360 if (reqp->dr_job_stat >= 0) { 2361 dca->dca_stats[reqp->dr_job_stat]++; 2362 } 2363 } 2364 mutex_exit(&wlp->dwl_lock); 2365 2366 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2367 dca_request_t *reqp = workp->dw_reqs[i]; 2368 2369 if (reqp == NULL) { 2370 continue; 2371 } 2372 2373 /* Do the callback. */ 2374 workp->dw_reqs[i] = NULL; 2375 dca_done(reqp, CRYPTO_SUCCESS); 2376 2377 nreclaimed++; 2378 } 2379 2380 mutex_enter(&wlp->dwl_lock); 2381 2382 /* now we can release the work */ 2383 dca_freework(workp); 2384 } 2385 DBG(dca, DRECLAIM, "reclaimed %d cmds", nreclaimed); 2386 } 2387 2388 int 2389 dca_length(crypto_data_t *cdata) 2390 { 2391 return (cdata->cd_length); 2392 } 2393 2394 /* 2395 * This is the callback function called from the interrupt when a kCF job 2396 * completes. It does some driver-specific things, and then calls the 2397 * kCF-provided callback. Finally, it cleans up the state for the work 2398 * request and drops the reference count to allow for DR. 2399 */ 2400 void 2401 dca_done(dca_request_t *reqp, int err) 2402 { 2403 uint64_t ena = 0; 2404 2405 /* unbind any chains we were using */ 2406 if (dca_unbindchains(reqp) != DDI_SUCCESS) { 2407 /* DMA failure */ 2408 ena = dca_ena(ena); 2409 dca_failure(reqp->dr_dca, DDI_DATAPATH_FAULT, 2410 DCA_FM_ECLASS_NONE, ena, CRYPTO_DEVICE_ERROR, 2411 "fault on buffer DMA handle"); 2412 if (err == CRYPTO_SUCCESS) { 2413 err = CRYPTO_DEVICE_ERROR; 2414 } 2415 } 2416 2417 if (reqp->dr_callback != NULL) { 2418 reqp->dr_callback(reqp, err); 2419 } else { 2420 dca_freereq(reqp); 2421 } 2422 } 2423 2424 /* 2425 * Call this when a failure is detected. It will reset the chip, 2426 * log a message, alert kCF, and mark jobs in the runq as failed. 2427 */ 2428 /* ARGSUSED */ 2429 void 2430 dca_failure(dca_t *dca, ddi_fault_location_t loc, dca_fma_eclass_t index, 2431 uint64_t ena, int errno, char *mess, ...) 2432 { 2433 va_list ap; 2434 char buf[256]; 2435 int mcr; 2436 char *eclass; 2437 int have_mutex; 2438 2439 va_start(ap, mess); 2440 (void) vsprintf(buf, mess, ap); 2441 va_end(ap); 2442 2443 eclass = dca_fma_eclass_string(dca->dca_model, index); 2444 2445 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) && 2446 index != DCA_FM_ECLASS_NONE) { 2447 ddi_fm_ereport_post(dca->dca_dip, eclass, ena, 2448 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 2449 FM_EREPORT_VERS0, NULL); 2450 2451 /* Report the impact of the failure to the DDI. */ 2452 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_LOST); 2453 } else { 2454 /* Just log the error string to the message log */ 2455 dca_error(dca, buf); 2456 } 2457 2458 /* 2459 * Indicate a failure (keeps schedule from running). 2460 */ 2461 dca->dca_flags |= DCA_FAILED; 2462 2463 /* 2464 * Reset the chip. This should also have as a side effect, the 2465 * disabling of all interrupts from the device. 2466 */ 2467 (void) dca_reset(dca, 1); 2468 2469 /* 2470 * Report the failure to kCF. 2471 */ 2472 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2473 if (WORKLIST(dca, mcr)->dwl_prov) { 2474 crypto_prov_notify(WORKLIST(dca, mcr)->dwl_prov, 2475 CRYPTO_PROVIDER_FAILED); 2476 } 2477 } 2478 2479 /* 2480 * Return jobs not sent to hardware back to kCF. 2481 */ 2482 dca_rejectjobs(dca); 2483 2484 /* 2485 * From this point on, no new work should be arriving, and the 2486 * chip should not be doing any active DMA. 2487 */ 2488 2489 /* 2490 * Now find all the work submitted to the device and fail 2491 * them. 2492 */ 2493 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2494 dca_worklist_t *wlp; 2495 int i; 2496 2497 wlp = WORKLIST(dca, mcr); 2498 2499 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) { 2500 continue; 2501 } 2502 for (;;) { 2503 dca_work_t *workp; 2504 2505 have_mutex = mutex_tryenter(&wlp->dwl_lock); 2506 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_runq); 2507 if (workp == NULL) { 2508 if (have_mutex) 2509 mutex_exit(&wlp->dwl_lock); 2510 break; 2511 } 2512 mutex_exit(&wlp->dwl_lock); 2513 2514 /* 2515 * Free up requests 2516 */ 2517 for (i = 0; i < wlp->dwl_reqspermcr; i++) { 2518 dca_request_t *reqp = workp->dw_reqs[i]; 2519 if (reqp) { 2520 if (reqp->dr_flags & DR_INPLACE) { 2521 dca_done(reqp, errno); 2522 } else { 2523 /* 2524 * cause it to get retried 2525 * elsewhere (software) 2526 */ 2527 dca_done(reqp, CRYPTO_FAILED); 2528 } 2529 workp->dw_reqs[i] = NULL; 2530 } 2531 } 2532 2533 mutex_enter(&wlp->dwl_lock); 2534 /* 2535 * If waiting to drain, signal on the waiter. 2536 */ 2537 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) { 2538 cv_signal(&wlp->dwl_cv); 2539 } 2540 2541 /* 2542 * Return the work and request structures to 2543 * the free pool. 2544 */ 2545 dca_freework(workp); 2546 if (have_mutex) 2547 mutex_exit(&wlp->dwl_lock); 2548 } 2549 } 2550 2551 } 2552 2553 #ifdef SCHEDDELAY 2554 /* 2555 * Reschedule worklist as needed. 2556 */ 2557 void 2558 dca_schedtimeout(void *arg) 2559 { 2560 dca_worklist_t *wlp = (dca_worklist_t *)arg; 2561 mutex_enter(&wlp->dwl_lock); 2562 wlp->dwl_schedtid = 0; 2563 dca_schedule(wlp->dwl_dca, wlp->dwl_mcr); 2564 mutex_exit(&wlp->dwl_lock); 2565 } 2566 #endif 2567 2568 /* 2569 * Check for stalled jobs. 2570 */ 2571 void 2572 dca_jobtimeout(void *arg) 2573 { 2574 int mcr; 2575 dca_t *dca = (dca_t *)arg; 2576 int hung = 0; 2577 2578 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2579 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2580 dca_work_t *workp; 2581 clock_t when; 2582 2583 mutex_enter(&wlp->dwl_lock); 2584 when = ddi_get_lbolt(); 2585 2586 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq); 2587 if (workp == NULL) { 2588 /* nothing sitting in the queue */ 2589 mutex_exit(&wlp->dwl_lock); 2590 continue; 2591 } 2592 2593 if ((when - workp->dw_lbolt) < drv_usectohz(STALETIME)) { 2594 /* request has been queued for less than STALETIME */ 2595 mutex_exit(&wlp->dwl_lock); 2596 continue; 2597 } 2598 2599 /* job has been sitting around for over 1 second, badness */ 2600 DBG(dca, DWARN, "stale job (0x%p) found in MCR%d!", workp, 2601 mcr); 2602 2603 /* put it back in the queue, until we reset the chip */ 2604 hung++; 2605 mutex_exit(&wlp->dwl_lock); 2606 } 2607 2608 if (hung) { 2609 dca_failure(dca, DDI_DEVICE_FAULT, 2610 DCA_FM_ECLASS_HW_TIMEOUT, dca_ena(0), CRYPTO_DEVICE_ERROR, 2611 "timeout processing job.)"); 2612 } 2613 2614 /* reschedule ourself */ 2615 mutex_enter(&dca->dca_intrlock); 2616 if (dca->dca_jobtid == 0) { 2617 /* timeout has been canceled, prior to DR */ 2618 mutex_exit(&dca->dca_intrlock); 2619 return; 2620 } 2621 2622 /* check again in 1 second */ 2623 dca->dca_jobtid = timeout(dca_jobtimeout, arg, 2624 drv_usectohz(SECOND)); 2625 mutex_exit(&dca->dca_intrlock); 2626 } 2627 2628 /* 2629 * This returns all jobs back to kCF. It assumes that processing 2630 * on the worklist has halted. 2631 */ 2632 void 2633 dca_rejectjobs(dca_t *dca) 2634 { 2635 int mcr; 2636 int have_mutex; 2637 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2638 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2639 dca_request_t *reqp; 2640 2641 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) { 2642 continue; 2643 } 2644 have_mutex = mutex_tryenter(&wlp->dwl_lock); 2645 for (;;) { 2646 reqp = (dca_request_t *)dca_unqueue(&wlp->dwl_waitq); 2647 if (reqp == NULL) { 2648 break; 2649 } 2650 /* update flow control */ 2651 wlp->dwl_count--; 2652 if ((wlp->dwl_count == wlp->dwl_lowater) && 2653 (wlp->dwl_busy)) { 2654 wlp->dwl_busy = 0; 2655 crypto_prov_notify(wlp->dwl_prov, 2656 CRYPTO_PROVIDER_READY); 2657 } 2658 mutex_exit(&wlp->dwl_lock); 2659 2660 (void) dca_unbindchains(reqp); 2661 reqp->dr_callback(reqp, EAGAIN); 2662 mutex_enter(&wlp->dwl_lock); 2663 } 2664 if (have_mutex) 2665 mutex_exit(&wlp->dwl_lock); 2666 } 2667 } 2668 2669 int 2670 dca_drain(dca_t *dca) 2671 { 2672 int mcr; 2673 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2674 #ifdef SCHEDDELAY 2675 timeout_id_t tid; 2676 #endif 2677 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2678 2679 mutex_enter(&wlp->dwl_lock); 2680 wlp->dwl_drain = 1; 2681 2682 /* give it up to a second to drain from the chip */ 2683 if (!QEMPTY(&wlp->dwl_runq)) { 2684 (void) cv_timedwait(&wlp->dwl_cv, &wlp->dwl_lock, 2685 ddi_get_time() + drv_usectohz(STALETIME)); 2686 2687 if (!QEMPTY(&wlp->dwl_runq)) { 2688 dca_error(dca, "unable to drain device"); 2689 mutex_exit(&wlp->dwl_lock); 2690 dca_undrain(dca); 2691 return (EBUSY); 2692 } 2693 } 2694 2695 #ifdef SCHEDDELAY 2696 tid = wlp->dwl_schedtid; 2697 mutex_exit(&wlp->dwl_lock); 2698 2699 /* 2700 * untimeout outside the lock -- this is safe because we 2701 * have set the drain flag, so dca_schedule() will not 2702 * reschedule another timeout 2703 */ 2704 if (tid) { 2705 untimeout(tid); 2706 } 2707 #else 2708 mutex_exit(&wlp->dwl_lock); 2709 #endif 2710 } 2711 return (0); 2712 } 2713 2714 void 2715 dca_undrain(dca_t *dca) 2716 { 2717 int mcr; 2718 2719 for (mcr = MCR1; mcr <= MCR2; mcr++) { 2720 dca_worklist_t *wlp = WORKLIST(dca, mcr); 2721 mutex_enter(&wlp->dwl_lock); 2722 wlp->dwl_drain = 0; 2723 dca_schedule(dca, mcr); 2724 mutex_exit(&wlp->dwl_lock); 2725 } 2726 } 2727 2728 /* 2729 * Duplicate the crypto_data_t structure, but point to the original 2730 * buffers. 2731 */ 2732 int 2733 dca_dupcrypto(crypto_data_t *input, crypto_data_t *ninput) 2734 { 2735 ninput->cd_format = input->cd_format; 2736 ninput->cd_offset = input->cd_offset; 2737 ninput->cd_length = input->cd_length; 2738 ninput->cd_miscdata = input->cd_miscdata; 2739 2740 switch (input->cd_format) { 2741 case CRYPTO_DATA_RAW: 2742 ninput->cd_raw.iov_base = input->cd_raw.iov_base; 2743 ninput->cd_raw.iov_len = input->cd_raw.iov_len; 2744 break; 2745 2746 case CRYPTO_DATA_UIO: 2747 ninput->cd_uio = input->cd_uio; 2748 break; 2749 2750 case CRYPTO_DATA_MBLK: 2751 ninput->cd_mp = input->cd_mp; 2752 break; 2753 2754 default: 2755 DBG(NULL, DWARN, 2756 "dca_dupcrypto: unrecognised crypto data format"); 2757 return (CRYPTO_FAILED); 2758 } 2759 2760 return (CRYPTO_SUCCESS); 2761 } 2762 2763 /* 2764 * Performs validation checks on the input and output data structures. 2765 */ 2766 int 2767 dca_verifyio(crypto_data_t *input, crypto_data_t *output) 2768 { 2769 int rv = CRYPTO_SUCCESS; 2770 2771 switch (input->cd_format) { 2772 case CRYPTO_DATA_RAW: 2773 break; 2774 2775 case CRYPTO_DATA_UIO: 2776 /* we support only kernel buffer */ 2777 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) { 2778 DBG(NULL, DWARN, "non kernel input uio buffer"); 2779 rv = CRYPTO_ARGUMENTS_BAD; 2780 } 2781 break; 2782 2783 case CRYPTO_DATA_MBLK: 2784 break; 2785 2786 default: 2787 DBG(NULL, DWARN, "unrecognised input crypto data format"); 2788 rv = CRYPTO_ARGUMENTS_BAD; 2789 } 2790 2791 switch (output->cd_format) { 2792 case CRYPTO_DATA_RAW: 2793 break; 2794 2795 case CRYPTO_DATA_UIO: 2796 /* we support only kernel buffer */ 2797 if (output->cd_uio->uio_segflg != UIO_SYSSPACE) { 2798 DBG(NULL, DWARN, "non kernel output uio buffer"); 2799 rv = CRYPTO_ARGUMENTS_BAD; 2800 } 2801 break; 2802 2803 case CRYPTO_DATA_MBLK: 2804 break; 2805 2806 default: 2807 DBG(NULL, DWARN, "unrecognised output crypto data format"); 2808 rv = CRYPTO_ARGUMENTS_BAD; 2809 } 2810 2811 return (rv); 2812 } 2813 2814 /* 2815 * data: source crypto_data_t struct 2816 * off: offset into the source before commencing copy 2817 * count: the amount of data to copy 2818 * dest: destination buffer 2819 */ 2820 int 2821 dca_getbufbytes(crypto_data_t *data, size_t off, int count, uchar_t *dest) 2822 { 2823 int rv = CRYPTO_SUCCESS; 2824 uio_t *uiop; 2825 uint_t vec_idx; 2826 size_t cur_len; 2827 mblk_t *mp; 2828 2829 if (count == 0) { 2830 /* We don't want anything so we're done. */ 2831 return (rv); 2832 } 2833 2834 /* 2835 * Sanity check that we haven't specified a length greater than the 2836 * offset adjusted size of the buffer. 2837 */ 2838 if (count > (data->cd_length - off)) { 2839 return (CRYPTO_DATA_LEN_RANGE); 2840 } 2841 2842 /* Add the internal crypto_data offset to the requested offset. */ 2843 off += data->cd_offset; 2844 2845 switch (data->cd_format) { 2846 case CRYPTO_DATA_RAW: 2847 bcopy(data->cd_raw.iov_base + off, dest, count); 2848 break; 2849 2850 case CRYPTO_DATA_UIO: 2851 /* 2852 * Jump to the first iovec containing data to be 2853 * processed. 2854 */ 2855 uiop = data->cd_uio; 2856 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 2857 off >= uiop->uio_iov[vec_idx].iov_len; 2858 off -= uiop->uio_iov[vec_idx++].iov_len); 2859 if (vec_idx == uiop->uio_iovcnt) { 2860 /* 2861 * The caller specified an offset that is larger than 2862 * the total size of the buffers it provided. 2863 */ 2864 return (CRYPTO_DATA_LEN_RANGE); 2865 } 2866 2867 /* 2868 * Now process the iovecs. 2869 */ 2870 while (vec_idx < uiop->uio_iovcnt && count > 0) { 2871 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 2872 off, count); 2873 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest, 2874 cur_len); 2875 count -= cur_len; 2876 dest += cur_len; 2877 vec_idx++; 2878 off = 0; 2879 } 2880 2881 if (vec_idx == uiop->uio_iovcnt && count > 0) { 2882 /* 2883 * The end of the specified iovec's was reached but 2884 * the length requested could not be processed 2885 * (requested to digest more data than it provided). 2886 */ 2887 return (CRYPTO_DATA_LEN_RANGE); 2888 } 2889 break; 2890 2891 case CRYPTO_DATA_MBLK: 2892 /* 2893 * Jump to the first mblk_t containing data to be processed. 2894 */ 2895 for (mp = data->cd_mp; mp != NULL && off >= MBLKL(mp); 2896 off -= MBLKL(mp), mp = mp->b_cont); 2897 if (mp == NULL) { 2898 /* 2899 * The caller specified an offset that is larger than 2900 * the total size of the buffers it provided. 2901 */ 2902 return (CRYPTO_DATA_LEN_RANGE); 2903 } 2904 2905 /* 2906 * Now do the processing on the mblk chain. 2907 */ 2908 while (mp != NULL && count > 0) { 2909 cur_len = min(MBLKL(mp) - off, count); 2910 bcopy((char *)(mp->b_rptr + off), dest, cur_len); 2911 count -= cur_len; 2912 dest += cur_len; 2913 mp = mp->b_cont; 2914 off = 0; 2915 } 2916 2917 if (mp == NULL && count > 0) { 2918 /* 2919 * The end of the mblk was reached but the length 2920 * requested could not be processed, (requested to 2921 * digest more data than it provided). 2922 */ 2923 return (CRYPTO_DATA_LEN_RANGE); 2924 } 2925 break; 2926 2927 default: 2928 DBG(NULL, DWARN, "unrecognised crypto data format"); 2929 rv = CRYPTO_ARGUMENTS_BAD; 2930 } 2931 return (rv); 2932 } 2933 2934 2935 /* 2936 * Performs the input, output or hard scatter/gather checks on the specified 2937 * crypto_data_t struct. Returns true if the data is scatter/gather in nature 2938 * ie fails the test. 2939 */ 2940 int 2941 dca_sgcheck(dca_t *dca, crypto_data_t *data, dca_sg_param_t val) 2942 { 2943 uio_t *uiop; 2944 mblk_t *mp; 2945 int rv = FALSE; 2946 2947 switch (val) { 2948 case DCA_SG_CONTIG: 2949 /* 2950 * Check for a contiguous data buffer. 2951 */ 2952 switch (data->cd_format) { 2953 case CRYPTO_DATA_RAW: 2954 /* Contiguous in nature */ 2955 break; 2956 2957 case CRYPTO_DATA_UIO: 2958 if (data->cd_uio->uio_iovcnt > 1) 2959 rv = TRUE; 2960 break; 2961 2962 case CRYPTO_DATA_MBLK: 2963 mp = data->cd_mp; 2964 if (mp->b_cont != NULL) 2965 rv = TRUE; 2966 break; 2967 2968 default: 2969 DBG(NULL, DWARN, "unrecognised crypto data format"); 2970 } 2971 break; 2972 2973 case DCA_SG_WALIGN: 2974 /* 2975 * Check for a contiguous data buffer that is 32-bit word 2976 * aligned and is of word multiples in size. 2977 */ 2978 switch (data->cd_format) { 2979 case CRYPTO_DATA_RAW: 2980 if ((data->cd_raw.iov_len % sizeof (uint32_t)) || 2981 ((uintptr_t)data->cd_raw.iov_base % 2982 sizeof (uint32_t))) { 2983 rv = TRUE; 2984 } 2985 break; 2986 2987 case CRYPTO_DATA_UIO: 2988 uiop = data->cd_uio; 2989 if (uiop->uio_iovcnt > 1) { 2990 return (TRUE); 2991 } 2992 /* So there is only one iovec */ 2993 if ((uiop->uio_iov[0].iov_len % sizeof (uint32_t)) || 2994 ((uintptr_t)uiop->uio_iov[0].iov_base % 2995 sizeof (uint32_t))) { 2996 rv = TRUE; 2997 } 2998 break; 2999 3000 case CRYPTO_DATA_MBLK: 3001 mp = data->cd_mp; 3002 if (mp->b_cont != NULL) { 3003 return (TRUE); 3004 } 3005 /* So there is only one mblk in the chain */ 3006 if ((MBLKL(mp) % sizeof (uint32_t)) || 3007 ((uintptr_t)mp->b_rptr % sizeof (uint32_t))) { 3008 rv = TRUE; 3009 } 3010 break; 3011 3012 default: 3013 DBG(NULL, DWARN, "unrecognised crypto data format"); 3014 } 3015 break; 3016 3017 case DCA_SG_PALIGN: 3018 /* 3019 * Check that the data buffer is page aligned and is of 3020 * page multiples in size. 3021 */ 3022 switch (data->cd_format) { 3023 case CRYPTO_DATA_RAW: 3024 if ((data->cd_length % dca->dca_pagesize) || 3025 ((uintptr_t)data->cd_raw.iov_base % 3026 dca->dca_pagesize)) { 3027 rv = TRUE; 3028 } 3029 break; 3030 3031 case CRYPTO_DATA_UIO: 3032 uiop = data->cd_uio; 3033 if ((uiop->uio_iov[0].iov_len % dca->dca_pagesize) || 3034 ((uintptr_t)uiop->uio_iov[0].iov_base % 3035 dca->dca_pagesize)) { 3036 rv = TRUE; 3037 } 3038 break; 3039 3040 case CRYPTO_DATA_MBLK: 3041 mp = data->cd_mp; 3042 if ((MBLKL(mp) % dca->dca_pagesize) || 3043 ((uintptr_t)mp->b_rptr % dca->dca_pagesize)) { 3044 rv = TRUE; 3045 } 3046 break; 3047 3048 default: 3049 DBG(NULL, DWARN, "unrecognised crypto data format"); 3050 } 3051 break; 3052 3053 default: 3054 DBG(NULL, DWARN, "unrecognised scatter/gather param type"); 3055 } 3056 3057 return (rv); 3058 } 3059 3060 /* 3061 * Increments the cd_offset and decrements the cd_length as the data is 3062 * gathered from the crypto_data_t struct. 3063 * The data is reverse-copied into the dest buffer if the flag is true. 3064 */ 3065 int 3066 dca_gather(crypto_data_t *in, char *dest, int count, int reverse) 3067 { 3068 int rv = CRYPTO_SUCCESS; 3069 uint_t vec_idx; 3070 uio_t *uiop; 3071 off_t off = in->cd_offset; 3072 size_t cur_len; 3073 mblk_t *mp; 3074 3075 switch (in->cd_format) { 3076 case CRYPTO_DATA_RAW: 3077 if (count > in->cd_length) { 3078 /* 3079 * The caller specified a length greater than the 3080 * size of the buffer. 3081 */ 3082 return (CRYPTO_DATA_LEN_RANGE); 3083 } 3084 if (reverse) 3085 dca_reverse(in->cd_raw.iov_base + off, dest, count, 3086 count); 3087 else 3088 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count); 3089 in->cd_offset += count; 3090 in->cd_length -= count; 3091 break; 3092 3093 case CRYPTO_DATA_UIO: 3094 /* 3095 * Jump to the first iovec containing data to be processed. 3096 */ 3097 uiop = in->cd_uio; 3098 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3099 off >= uiop->uio_iov[vec_idx].iov_len; 3100 off -= uiop->uio_iov[vec_idx++].iov_len); 3101 if (vec_idx == uiop->uio_iovcnt) { 3102 /* 3103 * The caller specified an offset that is larger than 3104 * the total size of the buffers it provided. 3105 */ 3106 return (CRYPTO_DATA_LEN_RANGE); 3107 } 3108 3109 /* 3110 * Now process the iovecs. 3111 */ 3112 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3113 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3114 off, count); 3115 count -= cur_len; 3116 if (reverse) { 3117 /* Fill the dest buffer from the end */ 3118 dca_reverse(uiop->uio_iov[vec_idx].iov_base + 3119 off, dest+count, cur_len, cur_len); 3120 } else { 3121 bcopy(uiop->uio_iov[vec_idx].iov_base + off, 3122 dest, cur_len); 3123 dest += cur_len; 3124 } 3125 in->cd_offset += cur_len; 3126 in->cd_length -= cur_len; 3127 vec_idx++; 3128 off = 0; 3129 } 3130 3131 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3132 /* 3133 * The end of the specified iovec's was reached but 3134 * the length requested could not be processed 3135 * (requested to digest more data than it provided). 3136 */ 3137 return (CRYPTO_DATA_LEN_RANGE); 3138 } 3139 break; 3140 3141 case CRYPTO_DATA_MBLK: 3142 /* 3143 * Jump to the first mblk_t containing data to be processed. 3144 */ 3145 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp); 3146 off -= MBLKL(mp), mp = mp->b_cont); 3147 if (mp == NULL) { 3148 /* 3149 * The caller specified an offset that is larger than 3150 * the total size of the buffers it provided. 3151 */ 3152 return (CRYPTO_DATA_LEN_RANGE); 3153 } 3154 3155 /* 3156 * Now do the processing on the mblk chain. 3157 */ 3158 while (mp != NULL && count > 0) { 3159 cur_len = min(MBLKL(mp) - off, count); 3160 count -= cur_len; 3161 if (reverse) { 3162 /* Fill the dest buffer from the end */ 3163 dca_reverse((char *)(mp->b_rptr + off), 3164 dest+count, cur_len, cur_len); 3165 } else { 3166 bcopy((char *)(mp->b_rptr + off), dest, 3167 cur_len); 3168 dest += cur_len; 3169 } 3170 in->cd_offset += cur_len; 3171 in->cd_length -= cur_len; 3172 mp = mp->b_cont; 3173 off = 0; 3174 } 3175 3176 if (mp == NULL && count > 0) { 3177 /* 3178 * The end of the mblk was reached but the length 3179 * requested could not be processed, (requested to 3180 * digest more data than it provided). 3181 */ 3182 return (CRYPTO_DATA_LEN_RANGE); 3183 } 3184 break; 3185 3186 default: 3187 DBG(NULL, DWARN, "dca_gather: unrecognised crypto data format"); 3188 rv = CRYPTO_ARGUMENTS_BAD; 3189 } 3190 return (rv); 3191 } 3192 3193 /* 3194 * Increments the cd_offset and decrements the cd_length as the data is 3195 * gathered from the crypto_data_t struct. 3196 */ 3197 int 3198 dca_resid_gather(crypto_data_t *in, char *resid, int *residlen, char *dest, 3199 int count) 3200 { 3201 int rv = CRYPTO_SUCCESS; 3202 caddr_t baddr; 3203 uint_t vec_idx; 3204 uio_t *uiop; 3205 off_t off = in->cd_offset; 3206 size_t cur_len; 3207 mblk_t *mp; 3208 3209 /* Process the residual first */ 3210 if (*residlen > 0) { 3211 uint_t num = min(count, *residlen); 3212 bcopy(resid, dest, num); 3213 *residlen -= num; 3214 if (*residlen > 0) { 3215 /* 3216 * Requested amount 'count' is less than what's in 3217 * the residual, so shuffle any remaining resid to 3218 * the front. 3219 */ 3220 baddr = resid + num; 3221 bcopy(baddr, resid, *residlen); 3222 } 3223 dest += num; 3224 count -= num; 3225 } 3226 3227 /* Now process what's in the crypto_data_t structs */ 3228 switch (in->cd_format) { 3229 case CRYPTO_DATA_RAW: 3230 if (count > in->cd_length) { 3231 /* 3232 * The caller specified a length greater than the 3233 * size of the buffer. 3234 */ 3235 return (CRYPTO_DATA_LEN_RANGE); 3236 } 3237 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count); 3238 in->cd_offset += count; 3239 in->cd_length -= count; 3240 break; 3241 3242 case CRYPTO_DATA_UIO: 3243 /* 3244 * Jump to the first iovec containing data to be processed. 3245 */ 3246 uiop = in->cd_uio; 3247 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3248 off >= uiop->uio_iov[vec_idx].iov_len; 3249 off -= uiop->uio_iov[vec_idx++].iov_len); 3250 if (vec_idx == uiop->uio_iovcnt) { 3251 /* 3252 * The caller specified an offset that is larger than 3253 * the total size of the buffers it provided. 3254 */ 3255 return (CRYPTO_DATA_LEN_RANGE); 3256 } 3257 3258 /* 3259 * Now process the iovecs. 3260 */ 3261 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3262 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3263 off, count); 3264 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest, 3265 cur_len); 3266 count -= cur_len; 3267 dest += cur_len; 3268 in->cd_offset += cur_len; 3269 in->cd_length -= cur_len; 3270 vec_idx++; 3271 off = 0; 3272 } 3273 3274 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3275 /* 3276 * The end of the specified iovec's was reached but 3277 * the length requested could not be processed 3278 * (requested to digest more data than it provided). 3279 */ 3280 return (CRYPTO_DATA_LEN_RANGE); 3281 } 3282 break; 3283 3284 case CRYPTO_DATA_MBLK: 3285 /* 3286 * Jump to the first mblk_t containing data to be processed. 3287 */ 3288 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp); 3289 off -= MBLKL(mp), mp = mp->b_cont); 3290 if (mp == NULL) { 3291 /* 3292 * The caller specified an offset that is larger than 3293 * the total size of the buffers it provided. 3294 */ 3295 return (CRYPTO_DATA_LEN_RANGE); 3296 } 3297 3298 /* 3299 * Now do the processing on the mblk chain. 3300 */ 3301 while (mp != NULL && count > 0) { 3302 cur_len = min(MBLKL(mp) - off, count); 3303 bcopy((char *)(mp->b_rptr + off), dest, cur_len); 3304 count -= cur_len; 3305 dest += cur_len; 3306 in->cd_offset += cur_len; 3307 in->cd_length -= cur_len; 3308 mp = mp->b_cont; 3309 off = 0; 3310 } 3311 3312 if (mp == NULL && count > 0) { 3313 /* 3314 * The end of the mblk was reached but the length 3315 * requested could not be processed, (requested to 3316 * digest more data than it provided). 3317 */ 3318 return (CRYPTO_DATA_LEN_RANGE); 3319 } 3320 break; 3321 3322 default: 3323 DBG(NULL, DWARN, 3324 "dca_resid_gather: unrecognised crypto data format"); 3325 rv = CRYPTO_ARGUMENTS_BAD; 3326 } 3327 return (rv); 3328 } 3329 3330 /* 3331 * Appends the data to the crypto_data_t struct increasing cd_length. 3332 * cd_offset is left unchanged. 3333 * Data is reverse-copied if the flag is TRUE. 3334 */ 3335 int 3336 dca_scatter(const char *src, crypto_data_t *out, int count, int reverse) 3337 { 3338 int rv = CRYPTO_SUCCESS; 3339 off_t offset = out->cd_offset + out->cd_length; 3340 uint_t vec_idx; 3341 uio_t *uiop; 3342 size_t cur_len; 3343 mblk_t *mp; 3344 3345 switch (out->cd_format) { 3346 case CRYPTO_DATA_RAW: 3347 if (out->cd_raw.iov_len - offset < count) { 3348 /* Trying to write out more than space available. */ 3349 return (CRYPTO_DATA_LEN_RANGE); 3350 } 3351 if (reverse) 3352 dca_reverse((void*) src, out->cd_raw.iov_base + offset, 3353 count, count); 3354 else 3355 bcopy(src, out->cd_raw.iov_base + offset, count); 3356 out->cd_length += count; 3357 break; 3358 3359 case CRYPTO_DATA_UIO: 3360 /* 3361 * Jump to the first iovec that can be written to. 3362 */ 3363 uiop = out->cd_uio; 3364 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 3365 offset >= uiop->uio_iov[vec_idx].iov_len; 3366 offset -= uiop->uio_iov[vec_idx++].iov_len); 3367 if (vec_idx == uiop->uio_iovcnt) { 3368 /* 3369 * The caller specified an offset that is larger than 3370 * the total size of the buffers it provided. 3371 */ 3372 return (CRYPTO_DATA_LEN_RANGE); 3373 } 3374 3375 /* 3376 * Now process the iovecs. 3377 */ 3378 while (vec_idx < uiop->uio_iovcnt && count > 0) { 3379 cur_len = min(uiop->uio_iov[vec_idx].iov_len - 3380 offset, count); 3381 count -= cur_len; 3382 if (reverse) { 3383 dca_reverse((void*) (src+count), 3384 uiop->uio_iov[vec_idx].iov_base + 3385 offset, cur_len, cur_len); 3386 } else { 3387 bcopy(src, uiop->uio_iov[vec_idx].iov_base + 3388 offset, cur_len); 3389 src += cur_len; 3390 } 3391 out->cd_length += cur_len; 3392 vec_idx++; 3393 offset = 0; 3394 } 3395 3396 if (vec_idx == uiop->uio_iovcnt && count > 0) { 3397 /* 3398 * The end of the specified iovec's was reached but 3399 * the length requested could not be processed 3400 * (requested to write more data than space provided). 3401 */ 3402 return (CRYPTO_DATA_LEN_RANGE); 3403 } 3404 break; 3405 3406 case CRYPTO_DATA_MBLK: 3407 /* 3408 * Jump to the first mblk_t that can be written to. 3409 */ 3410 for (mp = out->cd_mp; mp != NULL && offset >= MBLKL(mp); 3411 offset -= MBLKL(mp), mp = mp->b_cont); 3412 if (mp == NULL) { 3413 /* 3414 * The caller specified an offset that is larger than 3415 * the total size of the buffers it provided. 3416 */ 3417 return (CRYPTO_DATA_LEN_RANGE); 3418 } 3419 3420 /* 3421 * Now do the processing on the mblk chain. 3422 */ 3423 while (mp != NULL && count > 0) { 3424 cur_len = min(MBLKL(mp) - offset, count); 3425 count -= cur_len; 3426 if (reverse) { 3427 dca_reverse((void*) (src+count), 3428 (char *)(mp->b_rptr + offset), cur_len, 3429 cur_len); 3430 } else { 3431 bcopy(src, (char *)(mp->b_rptr + offset), 3432 cur_len); 3433 src += cur_len; 3434 } 3435 out->cd_length += cur_len; 3436 mp = mp->b_cont; 3437 offset = 0; 3438 } 3439 3440 if (mp == NULL && count > 0) { 3441 /* 3442 * The end of the mblk was reached but the length 3443 * requested could not be processed, (requested to 3444 * digest more data than it provided). 3445 */ 3446 return (CRYPTO_DATA_LEN_RANGE); 3447 } 3448 break; 3449 3450 default: 3451 DBG(NULL, DWARN, "unrecognised crypto data format"); 3452 rv = CRYPTO_ARGUMENTS_BAD; 3453 } 3454 return (rv); 3455 } 3456 3457 /* 3458 * Compare two byte arrays in reverse order. 3459 * Return 0 if they are identical, 1 otherwise. 3460 */ 3461 int 3462 dca_bcmp_reverse(const void *s1, const void *s2, size_t n) 3463 { 3464 int i; 3465 caddr_t src, dst; 3466 3467 if (!n) 3468 return (0); 3469 3470 src = ((caddr_t)s1) + n - 1; 3471 dst = (caddr_t)s2; 3472 for (i = 0; i < n; i++) { 3473 if (*src != *dst) 3474 return (1); 3475 src--; 3476 dst++; 3477 } 3478 3479 return (0); 3480 } 3481 3482 3483 /* 3484 * This calculates the size of a bignum in bits, specifically not counting 3485 * leading zero bits. This size calculation must be done *before* any 3486 * endian reversal takes place (i.e. the numbers are in absolute big-endian 3487 * order.) 3488 */ 3489 int 3490 dca_bitlen(unsigned char *bignum, int bytelen) 3491 { 3492 unsigned char msbyte; 3493 int i, j; 3494 3495 for (i = 0; i < bytelen - 1; i++) { 3496 if (bignum[i] != 0) { 3497 break; 3498 } 3499 } 3500 msbyte = bignum[i]; 3501 for (j = 8; j > 1; j--) { 3502 if (msbyte & 0x80) { 3503 break; 3504 } 3505 msbyte <<= 1; 3506 } 3507 return ((8 * (bytelen - i - 1)) + j); 3508 } 3509 3510 /* 3511 * This compares to bignums (in big-endian order). It ignores leading 3512 * null bytes. The result semantics follow bcmp, mempcmp, strcmp, etc. 3513 */ 3514 int 3515 dca_numcmp(caddr_t n1, int n1len, caddr_t n2, int n2len) 3516 { 3517 while ((n1len > 1) && (*n1 == 0)) { 3518 n1len--; 3519 n1++; 3520 } 3521 while ((n2len > 1) && (*n2 == 0)) { 3522 n2len--; 3523 n2++; 3524 } 3525 if (n1len != n2len) { 3526 return (n1len - n2len); 3527 } 3528 while ((n1len > 1) && (*n1 == *n2)) { 3529 n1++; 3530 n2++; 3531 n1len--; 3532 } 3533 return ((int)(*(uchar_t *)n1) - (int)(*(uchar_t *)n2)); 3534 } 3535 3536 /* 3537 * Return array of key attributes. 3538 */ 3539 crypto_object_attribute_t * 3540 dca_get_key_attr(crypto_key_t *key) 3541 { 3542 if ((key->ck_format != CRYPTO_KEY_ATTR_LIST) || 3543 (key->ck_count == 0)) { 3544 return (NULL); 3545 } 3546 3547 return (key->ck_attrs); 3548 } 3549 3550 /* 3551 * If attribute type exists valp points to it's 32-bit value. 3552 */ 3553 int 3554 dca_attr_lookup_uint32(crypto_object_attribute_t *attrp, uint_t atnum, 3555 uint64_t atype, uint32_t *valp) 3556 { 3557 crypto_object_attribute_t *bap; 3558 3559 bap = dca_find_attribute(attrp, atnum, atype); 3560 if (bap == NULL) { 3561 return (CRYPTO_ATTRIBUTE_TYPE_INVALID); 3562 } 3563 3564 *valp = *bap->oa_value; 3565 3566 return (CRYPTO_SUCCESS); 3567 } 3568 3569 /* 3570 * If attribute type exists data contains the start address of the value, 3571 * and numelems contains it's length. 3572 */ 3573 int 3574 dca_attr_lookup_uint8_array(crypto_object_attribute_t *attrp, uint_t atnum, 3575 uint64_t atype, void **data, unsigned int *numelems) 3576 { 3577 crypto_object_attribute_t *bap; 3578 3579 bap = dca_find_attribute(attrp, atnum, atype); 3580 if (bap == NULL) { 3581 return (CRYPTO_ATTRIBUTE_TYPE_INVALID); 3582 } 3583 3584 *data = bap->oa_value; 3585 *numelems = bap->oa_value_len; 3586 3587 return (CRYPTO_SUCCESS); 3588 } 3589 3590 /* 3591 * Finds entry of specified name. If it is not found dca_find_attribute returns 3592 * NULL. 3593 */ 3594 crypto_object_attribute_t * 3595 dca_find_attribute(crypto_object_attribute_t *attrp, uint_t atnum, 3596 uint64_t atype) 3597 { 3598 while (atnum) { 3599 if (attrp->oa_type == atype) 3600 return (attrp); 3601 atnum--; 3602 attrp++; 3603 } 3604 return (NULL); 3605 } 3606 3607 /* 3608 * Return the address of the first data buffer. If the data format is 3609 * unrecognised return NULL. 3610 */ 3611 caddr_t 3612 dca_bufdaddr(crypto_data_t *data) 3613 { 3614 switch (data->cd_format) { 3615 case CRYPTO_DATA_RAW: 3616 return (data->cd_raw.iov_base + data->cd_offset); 3617 case CRYPTO_DATA_UIO: 3618 return (data->cd_uio->uio_iov[0].iov_base + data->cd_offset); 3619 case CRYPTO_DATA_MBLK: 3620 return ((char *)data->cd_mp->b_rptr + data->cd_offset); 3621 default: 3622 DBG(NULL, DWARN, 3623 "dca_bufdaddr: unrecognised crypto data format"); 3624 return (NULL); 3625 } 3626 } 3627 3628 static caddr_t 3629 dca_bufdaddr_out(crypto_data_t *data) 3630 { 3631 size_t offset = data->cd_offset + data->cd_length; 3632 3633 switch (data->cd_format) { 3634 case CRYPTO_DATA_RAW: 3635 return (data->cd_raw.iov_base + offset); 3636 case CRYPTO_DATA_UIO: 3637 return (data->cd_uio->uio_iov[0].iov_base + offset); 3638 case CRYPTO_DATA_MBLK: 3639 return ((char *)data->cd_mp->b_rptr + offset); 3640 default: 3641 DBG(NULL, DWARN, 3642 "dca_bufdaddr_out: unrecognised crypto data format"); 3643 return (NULL); 3644 } 3645 } 3646 3647 /* 3648 * Control entry points. 3649 */ 3650 3651 /* ARGSUSED */ 3652 static void 3653 dca_provider_status(crypto_provider_handle_t provider, uint_t *status) 3654 { 3655 *status = CRYPTO_PROVIDER_READY; 3656 } 3657 3658 /* 3659 * Cipher (encrypt/decrypt) entry points. 3660 */ 3661 3662 /* ARGSUSED */ 3663 static int 3664 dca_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 3665 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 3666 crypto_req_handle_t req) 3667 { 3668 int error = CRYPTO_FAILED; 3669 dca_t *softc; 3670 /* LINTED E_FUNC_SET_NOT_USED */ 3671 int instance; 3672 3673 /* extract softc and instance number from context */ 3674 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3675 DBG(softc, DENTRY, "dca_encrypt_init: started"); 3676 3677 /* check mechanism */ 3678 switch (mechanism->cm_type) { 3679 case DES_CBC_MECH_INFO_TYPE: 3680 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3681 DR_ENCRYPT); 3682 break; 3683 case DES3_CBC_MECH_INFO_TYPE: 3684 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3685 DR_ENCRYPT | DR_TRIPLE); 3686 break; 3687 case RSA_PKCS_MECH_INFO_TYPE: 3688 case RSA_X_509_MECH_INFO_TYPE: 3689 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 3690 break; 3691 default: 3692 cmn_err(CE_WARN, "dca_encrypt_init: unexpected mech type " 3693 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3694 error = CRYPTO_MECHANISM_INVALID; 3695 } 3696 3697 DBG(softc, DENTRY, "dca_encrypt_init: done, err = 0x%x", error); 3698 3699 if (error == CRYPTO_SUCCESS) 3700 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 3701 &softc->dca_ctx_list_lock); 3702 3703 return (error); 3704 } 3705 3706 /* ARGSUSED */ 3707 static int 3708 dca_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, 3709 crypto_data_t *ciphertext, crypto_req_handle_t req) 3710 { 3711 int error = CRYPTO_FAILED; 3712 dca_t *softc; 3713 /* LINTED E_FUNC_SET_NOT_USED */ 3714 int instance; 3715 3716 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3717 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3718 3719 /* extract softc and instance number from context */ 3720 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3721 DBG(softc, DENTRY, "dca_encrypt: started"); 3722 3723 /* check mechanism */ 3724 switch (DCA_MECH_FROM_CTX(ctx)) { 3725 case DES_CBC_MECH_INFO_TYPE: 3726 error = dca_3des(ctx, plaintext, ciphertext, req, DR_ENCRYPT); 3727 break; 3728 case DES3_CBC_MECH_INFO_TYPE: 3729 error = dca_3des(ctx, plaintext, ciphertext, req, 3730 DR_ENCRYPT | DR_TRIPLE); 3731 break; 3732 case RSA_PKCS_MECH_INFO_TYPE: 3733 case RSA_X_509_MECH_INFO_TYPE: 3734 error = dca_rsastart(ctx, plaintext, ciphertext, req, 3735 DCA_RSA_ENC); 3736 break; 3737 default: 3738 /* Should never reach here */ 3739 cmn_err(CE_WARN, "dca_encrypt: unexpected mech type " 3740 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3741 error = CRYPTO_MECHANISM_INVALID; 3742 } 3743 3744 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) && 3745 (error != CRYPTO_BUFFER_TOO_SMALL)) { 3746 ciphertext->cd_length = 0; 3747 } 3748 3749 DBG(softc, DENTRY, "dca_encrypt: done, err = 0x%x", error); 3750 3751 return (error); 3752 } 3753 3754 /* ARGSUSED */ 3755 static int 3756 dca_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext, 3757 crypto_data_t *ciphertext, crypto_req_handle_t req) 3758 { 3759 int error = CRYPTO_FAILED; 3760 dca_t *softc; 3761 /* LINTED E_FUNC_SET_NOT_USED */ 3762 int instance; 3763 3764 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3765 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3766 3767 /* extract softc and instance number from context */ 3768 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3769 DBG(softc, DENTRY, "dca_encrypt_update: started"); 3770 3771 /* check mechanism */ 3772 switch (DCA_MECH_FROM_CTX(ctx)) { 3773 case DES_CBC_MECH_INFO_TYPE: 3774 error = dca_3desupdate(ctx, plaintext, ciphertext, req, 3775 DR_ENCRYPT); 3776 break; 3777 case DES3_CBC_MECH_INFO_TYPE: 3778 error = dca_3desupdate(ctx, plaintext, ciphertext, req, 3779 DR_ENCRYPT | DR_TRIPLE); 3780 break; 3781 default: 3782 /* Should never reach here */ 3783 cmn_err(CE_WARN, "dca_encrypt_update: unexpected mech type " 3784 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3785 error = CRYPTO_MECHANISM_INVALID; 3786 } 3787 3788 DBG(softc, DENTRY, "dca_encrypt_update: done, err = 0x%x", error); 3789 3790 return (error); 3791 } 3792 3793 /* ARGSUSED */ 3794 static int 3795 dca_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 3796 crypto_req_handle_t req) 3797 { 3798 int error = CRYPTO_FAILED; 3799 dca_t *softc; 3800 /* LINTED E_FUNC_SET_NOT_USED */ 3801 int instance; 3802 3803 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3804 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3805 3806 /* extract softc and instance number from context */ 3807 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3808 DBG(softc, DENTRY, "dca_encrypt_final: started"); 3809 3810 /* check mechanism */ 3811 switch (DCA_MECH_FROM_CTX(ctx)) { 3812 case DES_CBC_MECH_INFO_TYPE: 3813 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT); 3814 break; 3815 case DES3_CBC_MECH_INFO_TYPE: 3816 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT | DR_TRIPLE); 3817 break; 3818 default: 3819 /* Should never reach here */ 3820 cmn_err(CE_WARN, "dca_encrypt_final: unexpected mech type " 3821 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3822 error = CRYPTO_MECHANISM_INVALID; 3823 } 3824 3825 DBG(softc, DENTRY, "dca_encrypt_final: done, err = 0x%x", error); 3826 3827 return (error); 3828 } 3829 3830 /* ARGSUSED */ 3831 static int 3832 dca_encrypt_atomic(crypto_provider_handle_t provider, 3833 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 3834 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext, 3835 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 3836 { 3837 int error = CRYPTO_FAILED; 3838 dca_t *softc = (dca_t *)provider; 3839 3840 DBG(softc, DENTRY, "dca_encrypt_atomic: started"); 3841 3842 if (ctx_template != NULL) 3843 return (CRYPTO_ARGUMENTS_BAD); 3844 3845 /* check mechanism */ 3846 switch (mechanism->cm_type) { 3847 case DES_CBC_MECH_INFO_TYPE: 3848 error = dca_3desatomic(provider, session_id, mechanism, key, 3849 plaintext, ciphertext, KM_SLEEP, req, 3850 DR_ENCRYPT | DR_ATOMIC); 3851 break; 3852 case DES3_CBC_MECH_INFO_TYPE: 3853 error = dca_3desatomic(provider, session_id, mechanism, key, 3854 plaintext, ciphertext, KM_SLEEP, req, 3855 DR_ENCRYPT | DR_TRIPLE | DR_ATOMIC); 3856 break; 3857 case RSA_PKCS_MECH_INFO_TYPE: 3858 case RSA_X_509_MECH_INFO_TYPE: 3859 error = dca_rsaatomic(provider, session_id, mechanism, key, 3860 plaintext, ciphertext, KM_SLEEP, req, DCA_RSA_ENC); 3861 break; 3862 default: 3863 cmn_err(CE_WARN, "dca_encrypt_atomic: unexpected mech type " 3864 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3865 error = CRYPTO_MECHANISM_INVALID; 3866 } 3867 3868 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) { 3869 ciphertext->cd_length = 0; 3870 } 3871 3872 DBG(softc, DENTRY, "dca_encrypt_atomic: done, err = 0x%x", error); 3873 3874 return (error); 3875 } 3876 3877 /* ARGSUSED */ 3878 static int 3879 dca_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 3880 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 3881 crypto_req_handle_t req) 3882 { 3883 int error = CRYPTO_FAILED; 3884 dca_t *softc; 3885 /* LINTED E_FUNC_SET_NOT_USED */ 3886 int instance; 3887 3888 /* extract softc and instance number from context */ 3889 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3890 DBG(softc, DENTRY, "dca_decrypt_init: started"); 3891 3892 /* check mechanism */ 3893 switch (mechanism->cm_type) { 3894 case DES_CBC_MECH_INFO_TYPE: 3895 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3896 DR_DECRYPT); 3897 break; 3898 case DES3_CBC_MECH_INFO_TYPE: 3899 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP, 3900 DR_DECRYPT | DR_TRIPLE); 3901 break; 3902 case RSA_PKCS_MECH_INFO_TYPE: 3903 case RSA_X_509_MECH_INFO_TYPE: 3904 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 3905 break; 3906 default: 3907 cmn_err(CE_WARN, "dca_decrypt_init: unexpected mech type " 3908 "0x%llx\n", (unsigned long long)mechanism->cm_type); 3909 error = CRYPTO_MECHANISM_INVALID; 3910 } 3911 3912 DBG(softc, DENTRY, "dca_decrypt_init: done, err = 0x%x", error); 3913 3914 if (error == CRYPTO_SUCCESS) 3915 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 3916 &softc->dca_ctx_list_lock); 3917 3918 return (error); 3919 } 3920 3921 /* ARGSUSED */ 3922 static int 3923 dca_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 3924 crypto_data_t *plaintext, crypto_req_handle_t req) 3925 { 3926 int error = CRYPTO_FAILED; 3927 dca_t *softc; 3928 /* LINTED E_FUNC_SET_NOT_USED */ 3929 int instance; 3930 3931 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3932 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3933 3934 /* extract softc and instance number from context */ 3935 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3936 DBG(softc, DENTRY, "dca_decrypt: started"); 3937 3938 /* check mechanism */ 3939 switch (DCA_MECH_FROM_CTX(ctx)) { 3940 case DES_CBC_MECH_INFO_TYPE: 3941 error = dca_3des(ctx, ciphertext, plaintext, req, DR_DECRYPT); 3942 break; 3943 case DES3_CBC_MECH_INFO_TYPE: 3944 error = dca_3des(ctx, ciphertext, plaintext, req, 3945 DR_DECRYPT | DR_TRIPLE); 3946 break; 3947 case RSA_PKCS_MECH_INFO_TYPE: 3948 case RSA_X_509_MECH_INFO_TYPE: 3949 error = dca_rsastart(ctx, ciphertext, plaintext, req, 3950 DCA_RSA_DEC); 3951 break; 3952 default: 3953 /* Should never reach here */ 3954 cmn_err(CE_WARN, "dca_decrypt: unexpected mech type " 3955 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 3956 error = CRYPTO_MECHANISM_INVALID; 3957 } 3958 3959 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) && 3960 (error != CRYPTO_BUFFER_TOO_SMALL)) { 3961 if (plaintext) 3962 plaintext->cd_length = 0; 3963 } 3964 3965 DBG(softc, DENTRY, "dca_decrypt: done, err = 0x%x", error); 3966 3967 return (error); 3968 } 3969 3970 /* ARGSUSED */ 3971 static int 3972 dca_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 3973 crypto_data_t *plaintext, crypto_req_handle_t req) 3974 { 3975 int error = CRYPTO_FAILED; 3976 dca_t *softc; 3977 /* LINTED E_FUNC_SET_NOT_USED */ 3978 int instance; 3979 3980 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 3981 return (CRYPTO_OPERATION_NOT_INITIALIZED); 3982 3983 /* extract softc and instance number from context */ 3984 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 3985 DBG(softc, DENTRY, "dca_decrypt_update: started"); 3986 3987 /* check mechanism */ 3988 switch (DCA_MECH_FROM_CTX(ctx)) { 3989 case DES_CBC_MECH_INFO_TYPE: 3990 error = dca_3desupdate(ctx, ciphertext, plaintext, req, 3991 DR_DECRYPT); 3992 break; 3993 case DES3_CBC_MECH_INFO_TYPE: 3994 error = dca_3desupdate(ctx, ciphertext, plaintext, req, 3995 DR_DECRYPT | DR_TRIPLE); 3996 break; 3997 default: 3998 /* Should never reach here */ 3999 cmn_err(CE_WARN, "dca_decrypt_update: unexpected mech type " 4000 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4001 error = CRYPTO_MECHANISM_INVALID; 4002 } 4003 4004 DBG(softc, DENTRY, "dca_decrypt_update: done, err = 0x%x", error); 4005 4006 return (error); 4007 } 4008 4009 /* ARGSUSED */ 4010 static int 4011 dca_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *plaintext, 4012 crypto_req_handle_t req) 4013 { 4014 int error = CRYPTO_FAILED; 4015 dca_t *softc; 4016 /* LINTED E_FUNC_SET_NOT_USED */ 4017 int instance; 4018 4019 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4020 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4021 4022 /* extract softc and instance number from context */ 4023 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4024 DBG(softc, DENTRY, "dca_decrypt_final: started"); 4025 4026 /* check mechanism */ 4027 switch (DCA_MECH_FROM_CTX(ctx)) { 4028 case DES_CBC_MECH_INFO_TYPE: 4029 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT); 4030 break; 4031 case DES3_CBC_MECH_INFO_TYPE: 4032 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT | DR_TRIPLE); 4033 break; 4034 default: 4035 /* Should never reach here */ 4036 cmn_err(CE_WARN, "dca_decrypt_final: unexpected mech type " 4037 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4038 error = CRYPTO_MECHANISM_INVALID; 4039 } 4040 4041 DBG(softc, DENTRY, "dca_decrypt_final: done, err = 0x%x", error); 4042 4043 return (error); 4044 } 4045 4046 /* ARGSUSED */ 4047 static int 4048 dca_decrypt_atomic(crypto_provider_handle_t provider, 4049 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4050 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext, 4051 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4052 { 4053 int error = CRYPTO_FAILED; 4054 dca_t *softc = (dca_t *)provider; 4055 4056 DBG(softc, DENTRY, "dca_decrypt_atomic: started"); 4057 4058 if (ctx_template != NULL) 4059 return (CRYPTO_ARGUMENTS_BAD); 4060 4061 /* check mechanism */ 4062 switch (mechanism->cm_type) { 4063 case DES_CBC_MECH_INFO_TYPE: 4064 error = dca_3desatomic(provider, session_id, mechanism, key, 4065 ciphertext, plaintext, KM_SLEEP, req, 4066 DR_DECRYPT | DR_ATOMIC); 4067 break; 4068 case DES3_CBC_MECH_INFO_TYPE: 4069 error = dca_3desatomic(provider, session_id, mechanism, key, 4070 ciphertext, plaintext, KM_SLEEP, req, 4071 DR_DECRYPT | DR_TRIPLE | DR_ATOMIC); 4072 break; 4073 case RSA_PKCS_MECH_INFO_TYPE: 4074 case RSA_X_509_MECH_INFO_TYPE: 4075 error = dca_rsaatomic(provider, session_id, mechanism, key, 4076 ciphertext, plaintext, KM_SLEEP, req, DCA_RSA_DEC); 4077 break; 4078 default: 4079 cmn_err(CE_WARN, "dca_decrypt_atomic: unexpected mech type " 4080 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4081 error = CRYPTO_MECHANISM_INVALID; 4082 } 4083 4084 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) { 4085 plaintext->cd_length = 0; 4086 } 4087 4088 DBG(softc, DENTRY, "dca_decrypt_atomic: done, err = 0x%x", error); 4089 4090 return (error); 4091 } 4092 4093 /* 4094 * Sign entry points. 4095 */ 4096 4097 /* ARGSUSED */ 4098 static int 4099 dca_sign_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4100 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4101 crypto_req_handle_t req) 4102 { 4103 int error = CRYPTO_FAILED; 4104 dca_t *softc; 4105 /* LINTED E_FUNC_SET_NOT_USED */ 4106 int instance; 4107 4108 /* extract softc and instance number from context */ 4109 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4110 DBG(softc, DENTRY, "dca_sign_init: started\n"); 4111 4112 if (ctx_template != NULL) 4113 return (CRYPTO_ARGUMENTS_BAD); 4114 4115 /* check mechanism */ 4116 switch (mechanism->cm_type) { 4117 case RSA_PKCS_MECH_INFO_TYPE: 4118 case RSA_X_509_MECH_INFO_TYPE: 4119 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4120 break; 4121 case DSA_MECH_INFO_TYPE: 4122 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP, 4123 DCA_DSA_SIGN); 4124 break; 4125 default: 4126 cmn_err(CE_WARN, "dca_sign_init: unexpected mech type " 4127 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4128 error = CRYPTO_MECHANISM_INVALID; 4129 } 4130 4131 DBG(softc, DENTRY, "dca_sign_init: done, err = 0x%x", error); 4132 4133 if (error == CRYPTO_SUCCESS) 4134 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4135 &softc->dca_ctx_list_lock); 4136 4137 return (error); 4138 } 4139 4140 static int 4141 dca_sign(crypto_ctx_t *ctx, crypto_data_t *data, 4142 crypto_data_t *signature, crypto_req_handle_t req) 4143 { 4144 int error = CRYPTO_FAILED; 4145 dca_t *softc; 4146 /* LINTED E_FUNC_SET_NOT_USED */ 4147 int instance; 4148 4149 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4150 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4151 4152 /* extract softc and instance number from context */ 4153 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4154 DBG(softc, DENTRY, "dca_sign: started\n"); 4155 4156 /* check mechanism */ 4157 switch (DCA_MECH_FROM_CTX(ctx)) { 4158 case RSA_PKCS_MECH_INFO_TYPE: 4159 case RSA_X_509_MECH_INFO_TYPE: 4160 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGN); 4161 break; 4162 case DSA_MECH_INFO_TYPE: 4163 error = dca_dsa_sign(ctx, data, signature, req); 4164 break; 4165 default: 4166 cmn_err(CE_WARN, "dca_sign: unexpected mech type " 4167 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4168 error = CRYPTO_MECHANISM_INVALID; 4169 } 4170 4171 DBG(softc, DENTRY, "dca_sign: done, err = 0x%x", error); 4172 4173 return (error); 4174 } 4175 4176 /* ARGSUSED */ 4177 static int 4178 dca_sign_update(crypto_ctx_t *ctx, crypto_data_t *data, 4179 crypto_req_handle_t req) 4180 { 4181 int error = CRYPTO_MECHANISM_INVALID; 4182 dca_t *softc; 4183 /* LINTED E_FUNC_SET_NOT_USED */ 4184 int instance; 4185 4186 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4187 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4188 4189 /* extract softc and instance number from context */ 4190 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4191 DBG(softc, DENTRY, "dca_sign_update: started\n"); 4192 4193 cmn_err(CE_WARN, "dca_sign_update: unexpected mech type " 4194 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4195 4196 DBG(softc, DENTRY, "dca_sign_update: done, err = 0x%x", error); 4197 4198 return (error); 4199 } 4200 4201 /* ARGSUSED */ 4202 static int 4203 dca_sign_final(crypto_ctx_t *ctx, crypto_data_t *signature, 4204 crypto_req_handle_t req) 4205 { 4206 int error = CRYPTO_MECHANISM_INVALID; 4207 dca_t *softc; 4208 /* LINTED E_FUNC_SET_NOT_USED */ 4209 int instance; 4210 4211 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4212 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4213 4214 /* extract softc and instance number from context */ 4215 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4216 DBG(softc, DENTRY, "dca_sign_final: started\n"); 4217 4218 cmn_err(CE_WARN, "dca_sign_final: unexpected mech type " 4219 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4220 4221 DBG(softc, DENTRY, "dca_sign_final: done, err = 0x%x", error); 4222 4223 return (error); 4224 } 4225 4226 static int 4227 dca_sign_atomic(crypto_provider_handle_t provider, 4228 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4229 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4230 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4231 { 4232 int error = CRYPTO_FAILED; 4233 dca_t *softc = (dca_t *)provider; 4234 4235 DBG(softc, DENTRY, "dca_sign_atomic: started\n"); 4236 4237 if (ctx_template != NULL) 4238 return (CRYPTO_ARGUMENTS_BAD); 4239 4240 /* check mechanism */ 4241 switch (mechanism->cm_type) { 4242 case RSA_PKCS_MECH_INFO_TYPE: 4243 case RSA_X_509_MECH_INFO_TYPE: 4244 error = dca_rsaatomic(provider, session_id, mechanism, key, 4245 data, signature, KM_SLEEP, req, DCA_RSA_SIGN); 4246 break; 4247 case DSA_MECH_INFO_TYPE: 4248 error = dca_dsaatomic(provider, session_id, mechanism, key, 4249 data, signature, KM_SLEEP, req, DCA_DSA_SIGN); 4250 break; 4251 default: 4252 cmn_err(CE_WARN, "dca_sign_atomic: unexpected mech type " 4253 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4254 error = CRYPTO_MECHANISM_INVALID; 4255 } 4256 4257 DBG(softc, DENTRY, "dca_sign_atomic: done, err = 0x%x", error); 4258 4259 return (error); 4260 } 4261 4262 /* ARGSUSED */ 4263 static int 4264 dca_sign_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4265 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4266 crypto_req_handle_t req) 4267 { 4268 int error = CRYPTO_FAILED; 4269 dca_t *softc; 4270 /* LINTED E_FUNC_SET_NOT_USED */ 4271 int instance; 4272 4273 /* extract softc and instance number from context */ 4274 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4275 DBG(softc, DENTRY, "dca_sign_recover_init: started\n"); 4276 4277 if (ctx_template != NULL) 4278 return (CRYPTO_ARGUMENTS_BAD); 4279 4280 /* check mechanism */ 4281 switch (mechanism->cm_type) { 4282 case RSA_PKCS_MECH_INFO_TYPE: 4283 case RSA_X_509_MECH_INFO_TYPE: 4284 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4285 break; 4286 default: 4287 cmn_err(CE_WARN, "dca_sign_recover_init: unexpected mech type " 4288 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4289 error = CRYPTO_MECHANISM_INVALID; 4290 } 4291 4292 DBG(softc, DENTRY, "dca_sign_recover_init: done, err = 0x%x", error); 4293 4294 if (error == CRYPTO_SUCCESS) 4295 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4296 &softc->dca_ctx_list_lock); 4297 4298 return (error); 4299 } 4300 4301 static int 4302 dca_sign_recover(crypto_ctx_t *ctx, crypto_data_t *data, 4303 crypto_data_t *signature, crypto_req_handle_t req) 4304 { 4305 int error = CRYPTO_FAILED; 4306 dca_t *softc; 4307 /* LINTED E_FUNC_SET_NOT_USED */ 4308 int instance; 4309 4310 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4311 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4312 4313 /* extract softc and instance number from context */ 4314 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4315 DBG(softc, DENTRY, "dca_sign_recover: started\n"); 4316 4317 /* check mechanism */ 4318 switch (DCA_MECH_FROM_CTX(ctx)) { 4319 case RSA_PKCS_MECH_INFO_TYPE: 4320 case RSA_X_509_MECH_INFO_TYPE: 4321 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGNR); 4322 break; 4323 default: 4324 cmn_err(CE_WARN, "dca_sign_recover: unexpected mech type " 4325 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4326 error = CRYPTO_MECHANISM_INVALID; 4327 } 4328 4329 DBG(softc, DENTRY, "dca_sign_recover: done, err = 0x%x", error); 4330 4331 return (error); 4332 } 4333 4334 static int 4335 dca_sign_recover_atomic(crypto_provider_handle_t provider, 4336 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4337 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4338 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4339 { 4340 int error = CRYPTO_FAILED; 4341 dca_t *softc = (dca_t *)provider; 4342 /* LINTED E_FUNC_SET_NOT_USED */ 4343 int instance; 4344 4345 instance = ddi_get_instance(softc->dca_dip); 4346 DBG(softc, DENTRY, "dca_sign_recover_atomic: started\n"); 4347 4348 if (ctx_template != NULL) 4349 return (CRYPTO_ARGUMENTS_BAD); 4350 4351 /* check mechanism */ 4352 switch (mechanism->cm_type) { 4353 case RSA_PKCS_MECH_INFO_TYPE: 4354 case RSA_X_509_MECH_INFO_TYPE: 4355 error = dca_rsaatomic(provider, session_id, mechanism, key, 4356 data, signature, KM_SLEEP, req, DCA_RSA_SIGNR); 4357 break; 4358 default: 4359 cmn_err(CE_WARN, "dca_sign_recover_atomic: unexpected mech type" 4360 " 0x%llx\n", (unsigned long long)mechanism->cm_type); 4361 error = CRYPTO_MECHANISM_INVALID; 4362 } 4363 4364 DBG(softc, DENTRY, "dca_sign_recover_atomic: done, err = 0x%x", error); 4365 4366 return (error); 4367 } 4368 4369 /* 4370 * Verify entry points. 4371 */ 4372 4373 /* ARGSUSED */ 4374 static int 4375 dca_verify_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4376 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4377 crypto_req_handle_t req) 4378 { 4379 int error = CRYPTO_FAILED; 4380 dca_t *softc; 4381 /* LINTED E_FUNC_SET_NOT_USED */ 4382 int instance; 4383 4384 /* extract softc and instance number from context */ 4385 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4386 DBG(softc, DENTRY, "dca_verify_init: started\n"); 4387 4388 if (ctx_template != NULL) 4389 return (CRYPTO_ARGUMENTS_BAD); 4390 4391 /* check mechanism */ 4392 switch (mechanism->cm_type) { 4393 case RSA_PKCS_MECH_INFO_TYPE: 4394 case RSA_X_509_MECH_INFO_TYPE: 4395 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4396 break; 4397 case DSA_MECH_INFO_TYPE: 4398 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP, 4399 DCA_DSA_VRFY); 4400 break; 4401 default: 4402 cmn_err(CE_WARN, "dca_verify_init: unexpected mech type " 4403 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4404 error = CRYPTO_MECHANISM_INVALID; 4405 } 4406 4407 DBG(softc, DENTRY, "dca_verify_init: done, err = 0x%x", error); 4408 4409 if (error == CRYPTO_SUCCESS) 4410 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4411 &softc->dca_ctx_list_lock); 4412 4413 return (error); 4414 } 4415 4416 static int 4417 dca_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *signature, 4418 crypto_req_handle_t req) 4419 { 4420 int error = CRYPTO_FAILED; 4421 dca_t *softc; 4422 /* LINTED E_FUNC_SET_NOT_USED */ 4423 int instance; 4424 4425 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4426 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4427 4428 /* extract softc and instance number from context */ 4429 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4430 DBG(softc, DENTRY, "dca_verify: started\n"); 4431 4432 /* check mechanism */ 4433 switch (DCA_MECH_FROM_CTX(ctx)) { 4434 case RSA_PKCS_MECH_INFO_TYPE: 4435 case RSA_X_509_MECH_INFO_TYPE: 4436 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFY); 4437 break; 4438 case DSA_MECH_INFO_TYPE: 4439 error = dca_dsa_verify(ctx, data, signature, req); 4440 break; 4441 default: 4442 cmn_err(CE_WARN, "dca_verify: unexpected mech type " 4443 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4444 error = CRYPTO_MECHANISM_INVALID; 4445 } 4446 4447 DBG(softc, DENTRY, "dca_verify: done, err = 0x%x", error); 4448 4449 return (error); 4450 } 4451 4452 /* ARGSUSED */ 4453 static int 4454 dca_verify_update(crypto_ctx_t *ctx, crypto_data_t *data, 4455 crypto_req_handle_t req) 4456 { 4457 int error = CRYPTO_MECHANISM_INVALID; 4458 dca_t *softc; 4459 /* LINTED E_FUNC_SET_NOT_USED */ 4460 int instance; 4461 4462 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4463 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4464 4465 /* extract softc and instance number from context */ 4466 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4467 DBG(softc, DENTRY, "dca_verify_update: started\n"); 4468 4469 cmn_err(CE_WARN, "dca_verify_update: unexpected mech type " 4470 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4471 4472 DBG(softc, DENTRY, "dca_verify_update: done, err = 0x%x", error); 4473 4474 return (error); 4475 } 4476 4477 /* ARGSUSED */ 4478 static int 4479 dca_verify_final(crypto_ctx_t *ctx, crypto_data_t *signature, 4480 crypto_req_handle_t req) 4481 { 4482 int error = CRYPTO_MECHANISM_INVALID; 4483 dca_t *softc; 4484 /* LINTED E_FUNC_SET_NOT_USED */ 4485 int instance; 4486 4487 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4488 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4489 4490 /* extract softc and instance number from context */ 4491 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4492 DBG(softc, DENTRY, "dca_verify_final: started\n"); 4493 4494 cmn_err(CE_WARN, "dca_verify_final: unexpected mech type " 4495 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4496 4497 DBG(softc, DENTRY, "dca_verify_final: done, err = 0x%x", error); 4498 4499 return (error); 4500 } 4501 4502 static int 4503 dca_verify_atomic(crypto_provider_handle_t provider, 4504 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4505 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4506 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4507 { 4508 int error = CRYPTO_FAILED; 4509 dca_t *softc = (dca_t *)provider; 4510 4511 DBG(softc, DENTRY, "dca_verify_atomic: started\n"); 4512 4513 if (ctx_template != NULL) 4514 return (CRYPTO_ARGUMENTS_BAD); 4515 4516 /* check mechanism */ 4517 switch (mechanism->cm_type) { 4518 case RSA_PKCS_MECH_INFO_TYPE: 4519 case RSA_X_509_MECH_INFO_TYPE: 4520 error = dca_rsaatomic(provider, session_id, mechanism, key, 4521 signature, data, KM_SLEEP, req, DCA_RSA_VRFY); 4522 break; 4523 case DSA_MECH_INFO_TYPE: 4524 error = dca_dsaatomic(provider, session_id, mechanism, key, 4525 data, signature, KM_SLEEP, req, DCA_DSA_VRFY); 4526 break; 4527 default: 4528 cmn_err(CE_WARN, "dca_verify_atomic: unexpected mech type " 4529 "0x%llx\n", (unsigned long long)mechanism->cm_type); 4530 error = CRYPTO_MECHANISM_INVALID; 4531 } 4532 4533 DBG(softc, DENTRY, "dca_verify_atomic: done, err = 0x%x", error); 4534 4535 return (error); 4536 } 4537 4538 /* ARGSUSED */ 4539 static int 4540 dca_verify_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 4541 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, 4542 crypto_req_handle_t req) 4543 { 4544 int error = CRYPTO_MECHANISM_INVALID; 4545 dca_t *softc; 4546 /* LINTED E_FUNC_SET_NOT_USED */ 4547 int instance; 4548 4549 /* extract softc and instance number from context */ 4550 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4551 DBG(softc, DENTRY, "dca_verify_recover_init: started\n"); 4552 4553 if (ctx_template != NULL) 4554 return (CRYPTO_ARGUMENTS_BAD); 4555 4556 /* check mechanism */ 4557 switch (mechanism->cm_type) { 4558 case RSA_PKCS_MECH_INFO_TYPE: 4559 case RSA_X_509_MECH_INFO_TYPE: 4560 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP); 4561 break; 4562 default: 4563 cmn_err(CE_WARN, "dca_verify_recover_init: unexpected mech type" 4564 " 0x%llx\n", (unsigned long long)mechanism->cm_type); 4565 } 4566 4567 DBG(softc, DENTRY, "dca_verify_recover_init: done, err = 0x%x", error); 4568 4569 if (error == CRYPTO_SUCCESS) 4570 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private, 4571 &softc->dca_ctx_list_lock); 4572 4573 return (error); 4574 } 4575 4576 static int 4577 dca_verify_recover(crypto_ctx_t *ctx, crypto_data_t *signature, 4578 crypto_data_t *data, crypto_req_handle_t req) 4579 { 4580 int error = CRYPTO_MECHANISM_INVALID; 4581 dca_t *softc; 4582 /* LINTED E_FUNC_SET_NOT_USED */ 4583 int instance; 4584 4585 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private) 4586 return (CRYPTO_OPERATION_NOT_INITIALIZED); 4587 4588 /* extract softc and instance number from context */ 4589 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4590 DBG(softc, DENTRY, "dca_verify_recover: started\n"); 4591 4592 /* check mechanism */ 4593 switch (DCA_MECH_FROM_CTX(ctx)) { 4594 case RSA_PKCS_MECH_INFO_TYPE: 4595 case RSA_X_509_MECH_INFO_TYPE: 4596 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFYR); 4597 break; 4598 default: 4599 cmn_err(CE_WARN, "dca_verify_recover: unexpected mech type " 4600 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4601 } 4602 4603 DBG(softc, DENTRY, "dca_verify_recover: done, err = 0x%x", error); 4604 4605 return (error); 4606 } 4607 4608 static int 4609 dca_verify_recover_atomic(crypto_provider_handle_t provider, 4610 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 4611 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature, 4612 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) 4613 { 4614 int error = CRYPTO_MECHANISM_INVALID; 4615 dca_t *softc = (dca_t *)provider; 4616 4617 DBG(softc, DENTRY, "dca_verify_recover_atomic: started\n"); 4618 4619 if (ctx_template != NULL) 4620 return (CRYPTO_ARGUMENTS_BAD); 4621 4622 /* check mechanism */ 4623 switch (mechanism->cm_type) { 4624 case RSA_PKCS_MECH_INFO_TYPE: 4625 case RSA_X_509_MECH_INFO_TYPE: 4626 error = dca_rsaatomic(provider, session_id, mechanism, key, 4627 signature, data, KM_SLEEP, req, DCA_RSA_VRFYR); 4628 break; 4629 default: 4630 cmn_err(CE_WARN, "dca_verify_recover_atomic: unexpected mech " 4631 "type 0x%llx\n", (unsigned long long)mechanism->cm_type); 4632 error = CRYPTO_MECHANISM_INVALID; 4633 } 4634 4635 DBG(softc, DENTRY, 4636 "dca_verify_recover_atomic: done, err = 0x%x", error); 4637 4638 return (error); 4639 } 4640 4641 /* 4642 * Random number entry points. 4643 */ 4644 4645 /* ARGSUSED */ 4646 static int 4647 dca_generate_random(crypto_provider_handle_t provider, 4648 crypto_session_id_t session_id, 4649 uchar_t *buf, size_t len, crypto_req_handle_t req) 4650 { 4651 int error = CRYPTO_FAILED; 4652 dca_t *softc = (dca_t *)provider; 4653 /* LINTED E_FUNC_SET_NOT_USED */ 4654 int instance; 4655 4656 instance = ddi_get_instance(softc->dca_dip); 4657 DBG(softc, DENTRY, "dca_generate_random: started"); 4658 4659 error = dca_rng(softc, buf, len, req); 4660 4661 DBG(softc, DENTRY, "dca_generate_random: done, err = 0x%x", error); 4662 4663 return (error); 4664 } 4665 4666 /* 4667 * Context management entry points. 4668 */ 4669 4670 int 4671 dca_free_context(crypto_ctx_t *ctx) 4672 { 4673 int error = CRYPTO_SUCCESS; 4674 dca_t *softc; 4675 /* LINTED E_FUNC_SET_NOT_USED */ 4676 int instance; 4677 4678 /* extract softc and instance number from context */ 4679 DCA_SOFTC_FROM_CTX(ctx, softc, instance); 4680 DBG(softc, DENTRY, "dca_free_context: entered"); 4681 4682 if (ctx->cc_provider_private == NULL) 4683 return (error); 4684 4685 dca_rmlist2(ctx->cc_provider_private, &softc->dca_ctx_list_lock); 4686 4687 error = dca_free_context_low(ctx); 4688 4689 DBG(softc, DENTRY, "dca_free_context: done, err = 0x%x", error); 4690 4691 return (error); 4692 } 4693 4694 static int 4695 dca_free_context_low(crypto_ctx_t *ctx) 4696 { 4697 int error = CRYPTO_SUCCESS; 4698 4699 /* check mechanism */ 4700 switch (DCA_MECH_FROM_CTX(ctx)) { 4701 case DES_CBC_MECH_INFO_TYPE: 4702 case DES3_CBC_MECH_INFO_TYPE: 4703 dca_3desctxfree(ctx); 4704 break; 4705 case RSA_PKCS_MECH_INFO_TYPE: 4706 case RSA_X_509_MECH_INFO_TYPE: 4707 dca_rsactxfree(ctx); 4708 break; 4709 case DSA_MECH_INFO_TYPE: 4710 dca_dsactxfree(ctx); 4711 break; 4712 default: 4713 /* Should never reach here */ 4714 cmn_err(CE_WARN, "dca_free_context_low: unexpected mech type " 4715 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx)); 4716 error = CRYPTO_MECHANISM_INVALID; 4717 } 4718 4719 return (error); 4720 } 4721 4722 4723 /* Free any unfreed private context. It is called in detach. */ 4724 static void 4725 dca_free_context_list(dca_t *dca) 4726 { 4727 dca_listnode_t *node; 4728 crypto_ctx_t ctx; 4729 4730 (void) memset(&ctx, 0, sizeof (ctx)); 4731 ctx.cc_provider = dca; 4732 4733 while ((node = dca_delist2(&dca->dca_ctx_list, 4734 &dca->dca_ctx_list_lock)) != NULL) { 4735 ctx.cc_provider_private = node; 4736 (void) dca_free_context_low(&ctx); 4737 } 4738 } 4739 4740 static int 4741 ext_info_sym(crypto_provider_handle_t prov, 4742 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq) 4743 { 4744 return (ext_info_base(prov, ext_info, cfreq, IDENT_SYM)); 4745 } 4746 4747 static int 4748 ext_info_asym(crypto_provider_handle_t prov, 4749 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq) 4750 { 4751 int rv; 4752 4753 rv = ext_info_base(prov, ext_info, cfreq, IDENT_ASYM); 4754 /* The asymmetric cipher slot supports random */ 4755 ext_info->ei_flags |= CRYPTO_EXTF_RNG; 4756 4757 return (rv); 4758 } 4759 4760 /* ARGSUSED */ 4761 static int 4762 ext_info_base(crypto_provider_handle_t prov, 4763 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id) 4764 { 4765 dca_t *dca = (dca_t *)prov; 4766 int len; 4767 4768 /* Label */ 4769 (void) sprintf((char *)ext_info->ei_label, "%s/%d %s", 4770 ddi_driver_name(dca->dca_dip), ddi_get_instance(dca->dca_dip), id); 4771 len = strlen((char *)ext_info->ei_label); 4772 (void) memset(ext_info->ei_label + len, ' ', 4773 CRYPTO_EXT_SIZE_LABEL - len); 4774 4775 /* Manufacturer ID */ 4776 (void) sprintf((char *)ext_info->ei_manufacturerID, "%s", 4777 DCA_MANUFACTURER_ID); 4778 len = strlen((char *)ext_info->ei_manufacturerID); 4779 (void) memset(ext_info->ei_manufacturerID + len, ' ', 4780 CRYPTO_EXT_SIZE_MANUF - len); 4781 4782 /* Model */ 4783 (void) sprintf((char *)ext_info->ei_model, dca->dca_model); 4784 4785 DBG(dca, DWARN, "kCF MODEL: %s", (char *)ext_info->ei_model); 4786 4787 len = strlen((char *)ext_info->ei_model); 4788 (void) memset(ext_info->ei_model + len, ' ', 4789 CRYPTO_EXT_SIZE_MODEL - len); 4790 4791 /* Serial Number. Blank for Deimos */ 4792 (void) memset(ext_info->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL); 4793 4794 ext_info->ei_flags = CRYPTO_EXTF_WRITE_PROTECTED; 4795 4796 ext_info->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO; 4797 ext_info->ei_max_pin_len = CRYPTO_UNAVAILABLE_INFO; 4798 ext_info->ei_min_pin_len = CRYPTO_UNAVAILABLE_INFO; 4799 ext_info->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO; 4800 ext_info->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO; 4801 ext_info->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO; 4802 ext_info->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO; 4803 ext_info->ei_hardware_version.cv_major = 0; 4804 ext_info->ei_hardware_version.cv_minor = 0; 4805 ext_info->ei_firmware_version.cv_major = 0; 4806 ext_info->ei_firmware_version.cv_minor = 0; 4807 4808 /* Time. No need to be supplied for token without a clock */ 4809 ext_info->ei_time[0] = '\000'; 4810 4811 return (CRYPTO_SUCCESS); 4812 } 4813 4814 static void 4815 dca_fma_init(dca_t *dca) 4816 { 4817 ddi_iblock_cookie_t fm_ibc; 4818 int fm_capabilities = DDI_FM_EREPORT_CAPABLE | 4819 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE | 4820 DDI_FM_ERRCB_CAPABLE; 4821 4822 /* Read FMA capabilities from dca.conf file (if present) */ 4823 dca->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip, 4824 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 4825 fm_capabilities); 4826 4827 DBG(dca, DWARN, "dca->fm_capabilities = 0x%x", dca->fm_capabilities); 4828 4829 /* Only register with IO Fault Services if we have some capability */ 4830 if (dca->fm_capabilities) { 4831 dca_regsattr.devacc_attr_access = DDI_FLAGERR_ACC; 4832 dca_devattr.devacc_attr_access = DDI_FLAGERR_ACC; 4833 dca_dmaattr.dma_attr_flags = DDI_DMA_FLAGERR; 4834 4835 /* Register capabilities with IO Fault Services */ 4836 ddi_fm_init(dca->dca_dip, &dca->fm_capabilities, &fm_ibc); 4837 DBG(dca, DWARN, "fm_capable() = 0x%x", 4838 ddi_fm_capable(dca->dca_dip)); 4839 4840 /* 4841 * Initialize pci ereport capabilities if ereport capable 4842 */ 4843 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) || 4844 DDI_FM_ERRCB_CAP(dca->fm_capabilities)) 4845 pci_ereport_setup(dca->dca_dip); 4846 4847 /* 4848 * Initialize callback mutex and register error callback if 4849 * error callback capable. 4850 */ 4851 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) { 4852 ddi_fm_handler_register(dca->dca_dip, dca_fm_error_cb, 4853 (void *)dca); 4854 } 4855 } else { 4856 /* 4857 * These fields have to be cleared of FMA if there are no 4858 * FMA capabilities at runtime. 4859 */ 4860 dca_regsattr.devacc_attr_access = DDI_DEFAULT_ACC; 4861 dca_devattr.devacc_attr_access = DDI_DEFAULT_ACC; 4862 dca_dmaattr.dma_attr_flags = 0; 4863 } 4864 } 4865 4866 4867 static void 4868 dca_fma_fini(dca_t *dca) 4869 { 4870 /* Only unregister FMA capabilities if we registered some */ 4871 if (dca->fm_capabilities) { 4872 4873 /* 4874 * Release any resources allocated by pci_ereport_setup() 4875 */ 4876 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) || 4877 DDI_FM_ERRCB_CAP(dca->fm_capabilities)) { 4878 pci_ereport_teardown(dca->dca_dip); 4879 } 4880 4881 /* 4882 * Free callback mutex and un-register error callback if 4883 * error callback capable. 4884 */ 4885 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) { 4886 ddi_fm_handler_unregister(dca->dca_dip); 4887 } 4888 4889 /* Unregister from IO Fault Services */ 4890 ddi_fm_fini(dca->dca_dip); 4891 DBG(dca, DWARN, "fm_capable() = 0x%x", 4892 ddi_fm_capable(dca->dca_dip)); 4893 } 4894 } 4895 4896 4897 /* 4898 * The IO fault service error handling callback function 4899 */ 4900 /*ARGSUSED*/ 4901 static int 4902 dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 4903 { 4904 dca_t *dca = (dca_t *)impl_data; 4905 4906 pci_ereport_post(dip, err, NULL); 4907 if (err->fme_status == DDI_FM_FATAL) { 4908 dca_failure(dca, DDI_DATAPATH_FAULT, 4909 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR, 4910 "fault PCI in FMA callback."); 4911 } 4912 return (err->fme_status); 4913 } 4914 4915 4916 static int 4917 dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle, 4918 dca_fma_eclass_t eclass_index) 4919 { 4920 ddi_fm_error_t de; 4921 int version = 0; 4922 4923 ddi_fm_acc_err_get(handle, &de, version); 4924 if (de.fme_status != DDI_FM_OK) { 4925 dca_failure(dca, DDI_DATAPATH_FAULT, 4926 eclass_index, fm_ena_increment(de.fme_ena), 4927 CRYPTO_DEVICE_ERROR, ""); 4928 return (DDI_FAILURE); 4929 } 4930 4931 return (DDI_SUCCESS); 4932 } 4933 4934 int 4935 dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle, 4936 dca_fma_eclass_t eclass_index) 4937 { 4938 ddi_fm_error_t de; 4939 int version = 0; 4940 4941 ddi_fm_dma_err_get(handle, &de, version); 4942 if (de.fme_status != DDI_FM_OK) { 4943 dca_failure(dca, DDI_DATAPATH_FAULT, 4944 eclass_index, fm_ena_increment(de.fme_ena), 4945 CRYPTO_DEVICE_ERROR, ""); 4946 return (DDI_FAILURE); 4947 } 4948 return (DDI_SUCCESS); 4949 } 4950 4951 static uint64_t 4952 dca_ena(uint64_t ena) 4953 { 4954 if (ena == 0) 4955 ena = fm_ena_generate(0, FM_ENA_FMT1); 4956 else 4957 ena = fm_ena_increment(ena); 4958 return (ena); 4959 } 4960 4961 static char * 4962 dca_fma_eclass_string(char *model, dca_fma_eclass_t index) 4963 { 4964 if (strstr(model, "500")) 4965 return (dca_fma_eclass_sca500[index]); 4966 else 4967 return (dca_fma_eclass_sca1000[index]); 4968 } 4969