1 /* 2 * mr_sas.c: source for mr_sas driver 3 * 4 * MegaRAID device driver for SAS2.0 controllers 5 * Copyright (c) 2008-2010, LSI Logic Corporation. 6 * All rights reserved. 7 * 8 * Version: 9 * Author: 10 * Arun Chandrashekhar 11 * Manju R 12 * Rajesh Prabhakaran 13 * Seokmann Ju 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions are met: 17 * 18 * 1. Redistributions of source code must retain the above copyright notice, 19 * this list of conditions and the following disclaimer. 20 * 21 * 2. Redistributions in binary form must reproduce the above copyright notice, 22 * this list of conditions and the following disclaimer in the documentation 23 * and/or other materials provided with the distribution. 24 * 25 * 3. Neither the name of the author nor the names of its contributors may be 26 * used to endorse or promote products derived from this software without 27 * specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 32 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 33 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 35 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 36 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 37 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 38 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 39 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 40 * DAMAGE. 41 */ 42 43 /* 44 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 45 */ 46 47 #include <sys/types.h> 48 #include <sys/param.h> 49 #include <sys/file.h> 50 #include <sys/errno.h> 51 #include <sys/open.h> 52 #include <sys/cred.h> 53 #include <sys/modctl.h> 54 #include <sys/conf.h> 55 #include <sys/devops.h> 56 #include <sys/cmn_err.h> 57 #include <sys/kmem.h> 58 #include <sys/stat.h> 59 #include <sys/mkdev.h> 60 #include <sys/pci.h> 61 #include <sys/scsi/scsi.h> 62 #include <sys/ddi.h> 63 #include <sys/sunddi.h> 64 #include <sys/atomic.h> 65 #include <sys/signal.h> 66 #include <sys/byteorder.h> 67 #include <sys/sdt.h> 68 #include <sys/fs/dv_node.h> /* devfs_clean */ 69 70 #include "mr_sas.h" 71 72 /* 73 * FMA header files 74 */ 75 #include <sys/ddifm.h> 76 #include <sys/fm/protocol.h> 77 #include <sys/fm/util.h> 78 #include <sys/fm/io/ddi.h> 79 80 /* 81 * Local static data 82 */ 83 static void *mrsas_state = NULL; 84 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE; 85 static volatile int debug_level_g = CL_NONE; 86 static volatile int msi_enable = 1; 87 static volatile int ctio_enable = 1; 88 89 /* Default Timeout value to issue online controller reset */ 90 static volatile int debug_timeout_g = 0x12C; 91 /* Simulate consecutive firmware fault */ 92 static volatile int debug_fw_faults_after_ocr_g = 0; 93 94 #ifdef OCRDEBUG 95 /* Simulate three consecutive timeout for an IO */ 96 static volatile int debug_consecutive_timeout_after_ocr_g = 0; 97 #endif 98 99 #pragma weak scsi_hba_open 100 #pragma weak scsi_hba_close 101 #pragma weak scsi_hba_ioctl 102 103 static ddi_dma_attr_t mrsas_generic_dma_attr = { 104 DMA_ATTR_V0, /* dma_attr_version */ 105 0, /* low DMA address range */ 106 0xFFFFFFFFU, /* high DMA address range */ 107 0xFFFFFFFFU, /* DMA counter register */ 108 8, /* DMA address alignment */ 109 0x07, /* DMA burstsizes */ 110 1, /* min DMA size */ 111 0xFFFFFFFFU, /* max DMA size */ 112 0xFFFFFFFFU, /* segment boundary */ 113 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */ 114 512, /* granularity of device */ 115 0 /* bus specific DMA flags */ 116 }; 117 118 int32_t mrsas_max_cap_maxxfer = 0x1000000; 119 120 /* 121 * cb_ops contains base level routines 122 */ 123 static struct cb_ops mrsas_cb_ops = { 124 mrsas_open, /* open */ 125 mrsas_close, /* close */ 126 nodev, /* strategy */ 127 nodev, /* print */ 128 nodev, /* dump */ 129 nodev, /* read */ 130 nodev, /* write */ 131 mrsas_ioctl, /* ioctl */ 132 nodev, /* devmap */ 133 nodev, /* mmap */ 134 nodev, /* segmap */ 135 nochpoll, /* poll */ 136 nodev, /* cb_prop_op */ 137 0, /* streamtab */ 138 D_NEW | D_HOTPLUG, /* cb_flag */ 139 CB_REV, /* cb_rev */ 140 nodev, /* cb_aread */ 141 nodev /* cb_awrite */ 142 }; 143 144 /* 145 * dev_ops contains configuration routines 146 */ 147 static struct dev_ops mrsas_ops = { 148 DEVO_REV, /* rev, */ 149 0, /* refcnt */ 150 mrsas_getinfo, /* getinfo */ 151 nulldev, /* identify */ 152 nulldev, /* probe */ 153 mrsas_attach, /* attach */ 154 mrsas_detach, /* detach */ 155 #ifdef __sparc 156 mrsas_reset, /* reset */ 157 #else /* __sparc */ 158 nodev, 159 #endif /* __sparc */ 160 &mrsas_cb_ops, /* char/block ops */ 161 NULL, /* bus ops */ 162 NULL, /* power */ 163 #ifdef __sparc 164 ddi_quiesce_not_needed 165 #else /* __sparc */ 166 mrsas_quiesce /* quiesce */ 167 #endif /* __sparc */ 168 }; 169 170 char _depends_on[] = "misc/scsi"; 171 172 static struct modldrv modldrv = { 173 &mod_driverops, /* module type - driver */ 174 MRSAS_VERSION, 175 &mrsas_ops, /* driver ops */ 176 }; 177 178 static struct modlinkage modlinkage = { 179 MODREV_1, /* ml_rev - must be MODREV_1 */ 180 &modldrv, /* ml_linkage */ 181 NULL /* end of driver linkage */ 182 }; 183 184 static struct ddi_device_acc_attr endian_attr = { 185 DDI_DEVICE_ATTR_V1, 186 DDI_STRUCTURE_LE_ACC, 187 DDI_STRICTORDER_ACC, 188 DDI_DEFAULT_ACC 189 }; 190 191 192 /* 193 * ************************************************************************** * 194 * * 195 * common entry points - for loadable kernel modules * 196 * * 197 * ************************************************************************** * 198 */ 199 200 int 201 _init(void) 202 { 203 int ret; 204 205 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 206 207 ret = ddi_soft_state_init(&mrsas_state, 208 sizeof (struct mrsas_instance), 0); 209 210 if (ret != DDI_SUCCESS) { 211 con_log(CL_ANN, (CE_WARN, "mr_sas: could not init state")); 212 return (ret); 213 } 214 215 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) { 216 con_log(CL_ANN, (CE_WARN, "mr_sas: could not init scsi hba")); 217 ddi_soft_state_fini(&mrsas_state); 218 return (ret); 219 } 220 221 ret = mod_install(&modlinkage); 222 223 if (ret != DDI_SUCCESS) { 224 con_log(CL_ANN, (CE_WARN, "mr_sas: mod_install failed")); 225 scsi_hba_fini(&modlinkage); 226 ddi_soft_state_fini(&mrsas_state); 227 } 228 229 return (ret); 230 } 231 232 int 233 _info(struct modinfo *modinfop) 234 { 235 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 236 237 return (mod_info(&modlinkage, modinfop)); 238 } 239 240 int 241 _fini(void) 242 { 243 int ret; 244 245 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 246 247 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) 248 return (ret); 249 250 scsi_hba_fini(&modlinkage); 251 252 ddi_soft_state_fini(&mrsas_state); 253 254 return (ret); 255 } 256 257 258 /* 259 * ************************************************************************** * 260 * * 261 * common entry points - for autoconfiguration * 262 * * 263 * ************************************************************************** * 264 */ 265 266 static int 267 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 268 { 269 int instance_no; 270 int nregs; 271 uint8_t added_isr_f = 0; 272 uint8_t added_soft_isr_f = 0; 273 uint8_t create_devctl_node_f = 0; 274 uint8_t create_scsi_node_f = 0; 275 uint8_t create_ioc_node_f = 0; 276 uint8_t tran_alloc_f = 0; 277 uint8_t irq; 278 uint16_t vendor_id; 279 uint16_t device_id; 280 uint16_t subsysvid; 281 uint16_t subsysid; 282 uint16_t command; 283 off_t reglength = 0; 284 int intr_types = 0; 285 char *data; 286 287 scsi_hba_tran_t *tran; 288 ddi_dma_attr_t tran_dma_attr; 289 struct mrsas_instance *instance; 290 291 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 292 293 /* CONSTCOND */ 294 ASSERT(NO_COMPETING_THREADS); 295 296 instance_no = ddi_get_instance(dip); 297 298 /* 299 * check to see whether this device is in a DMA-capable slot. 300 */ 301 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 302 con_log(CL_ANN, (CE_WARN, 303 "mr_sas%d: Device in slave-only slot, unused", 304 instance_no)); 305 return (DDI_FAILURE); 306 } 307 308 switch (cmd) { 309 case DDI_ATTACH: 310 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: DDI_ATTACH")); 311 /* allocate the soft state for the instance */ 312 if (ddi_soft_state_zalloc(mrsas_state, instance_no) 313 != DDI_SUCCESS) { 314 con_log(CL_ANN, (CE_WARN, 315 "mr_sas%d: Failed to allocate soft state", 316 instance_no)); 317 318 return (DDI_FAILURE); 319 } 320 321 instance = (struct mrsas_instance *)ddi_get_soft_state 322 (mrsas_state, instance_no); 323 324 if (instance == NULL) { 325 con_log(CL_ANN, (CE_WARN, 326 "mr_sas%d: Bad soft state", instance_no)); 327 328 ddi_soft_state_free(mrsas_state, instance_no); 329 330 return (DDI_FAILURE); 331 } 332 333 bzero((caddr_t)instance, 334 sizeof (struct mrsas_instance)); 335 336 instance->func_ptr = kmem_zalloc( 337 sizeof (struct mrsas_func_ptr), KM_SLEEP); 338 ASSERT(instance->func_ptr); 339 340 /* Setup the PCI configuration space handles */ 341 if (pci_config_setup(dip, &instance->pci_handle) != 342 DDI_SUCCESS) { 343 con_log(CL_ANN, (CE_WARN, 344 "mr_sas%d: pci config setup failed ", 345 instance_no)); 346 347 kmem_free(instance->func_ptr, 348 sizeof (struct mrsas_func_ptr)); 349 ddi_soft_state_free(mrsas_state, instance_no); 350 351 return (DDI_FAILURE); 352 } 353 354 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { 355 con_log(CL_ANN, (CE_WARN, 356 "mr_sas: failed to get registers.")); 357 358 pci_config_teardown(&instance->pci_handle); 359 kmem_free(instance->func_ptr, 360 sizeof (struct mrsas_func_ptr)); 361 ddi_soft_state_free(mrsas_state, instance_no); 362 363 return (DDI_FAILURE); 364 } 365 366 vendor_id = pci_config_get16(instance->pci_handle, 367 PCI_CONF_VENID); 368 device_id = pci_config_get16(instance->pci_handle, 369 PCI_CONF_DEVID); 370 371 subsysvid = pci_config_get16(instance->pci_handle, 372 PCI_CONF_SUBVENID); 373 subsysid = pci_config_get16(instance->pci_handle, 374 PCI_CONF_SUBSYSID); 375 376 pci_config_put16(instance->pci_handle, PCI_CONF_COMM, 377 (pci_config_get16(instance->pci_handle, 378 PCI_CONF_COMM) | PCI_COMM_ME)); 379 irq = pci_config_get8(instance->pci_handle, 380 PCI_CONF_ILINE); 381 382 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 383 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s", 384 instance_no, vendor_id, device_id, subsysvid, 385 subsysid, irq, MRSAS_VERSION)); 386 387 /* enable bus-mastering */ 388 command = pci_config_get16(instance->pci_handle, 389 PCI_CONF_COMM); 390 391 if (!(command & PCI_COMM_ME)) { 392 command |= PCI_COMM_ME; 393 394 pci_config_put16(instance->pci_handle, 395 PCI_CONF_COMM, command); 396 397 con_log(CL_ANN, (CE_CONT, "mr_sas%d: " 398 "enable bus-mastering", instance_no)); 399 } else { 400 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 401 "bus-mastering already set", instance_no)); 402 } 403 404 /* initialize function pointers */ 405 if ((device_id == PCI_DEVICE_ID_LSI_2108VDE) || 406 (device_id == PCI_DEVICE_ID_LSI_2108V)) { 407 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 408 "2108V/DE detected", instance_no)); 409 instance->func_ptr->read_fw_status_reg = 410 read_fw_status_reg_ppc; 411 instance->func_ptr->issue_cmd = issue_cmd_ppc; 412 instance->func_ptr->issue_cmd_in_sync_mode = 413 issue_cmd_in_sync_mode_ppc; 414 instance->func_ptr->issue_cmd_in_poll_mode = 415 issue_cmd_in_poll_mode_ppc; 416 instance->func_ptr->enable_intr = 417 enable_intr_ppc; 418 instance->func_ptr->disable_intr = 419 disable_intr_ppc; 420 instance->func_ptr->intr_ack = intr_ack_ppc; 421 } else { 422 con_log(CL_ANN, (CE_WARN, 423 "mr_sas: Invalid device detected")); 424 425 pci_config_teardown(&instance->pci_handle); 426 kmem_free(instance->func_ptr, 427 sizeof (struct mrsas_func_ptr)); 428 ddi_soft_state_free(mrsas_state, instance_no); 429 430 return (DDI_FAILURE); 431 } 432 433 instance->baseaddress = pci_config_get32( 434 instance->pci_handle, PCI_CONF_BASE0); 435 instance->baseaddress &= 0x0fffc; 436 437 instance->dip = dip; 438 instance->vendor_id = vendor_id; 439 instance->device_id = device_id; 440 instance->subsysvid = subsysvid; 441 instance->subsysid = subsysid; 442 instance->instance = instance_no; 443 444 /* Initialize FMA */ 445 instance->fm_capabilities = ddi_prop_get_int( 446 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS, 447 "fm-capable", DDI_FM_EREPORT_CAPABLE | 448 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE 449 | DDI_FM_ERRCB_CAPABLE); 450 451 mrsas_fm_init(instance); 452 453 /* Initialize Interrupts */ 454 if ((ddi_dev_regsize(instance->dip, 455 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) || 456 reglength < MINIMUM_MFI_MEM_SZ) { 457 return (DDI_FAILURE); 458 } 459 if (reglength > DEFAULT_MFI_MEM_SZ) { 460 reglength = DEFAULT_MFI_MEM_SZ; 461 con_log(CL_DLEVEL1, (CE_NOTE, 462 "mr_sas: register length to map is " 463 "0x%lx bytes", reglength)); 464 } 465 if (ddi_regs_map_setup(instance->dip, 466 REGISTER_SET_IO_2108, &instance->regmap, 0, 467 reglength, &endian_attr, &instance->regmap_handle) 468 != DDI_SUCCESS) { 469 con_log(CL_ANN, (CE_NOTE, 470 "mr_sas: couldn't map control registers")); 471 goto fail_attach; 472 } 473 474 /* 475 * Disable Interrupt Now. 476 * Setup Software interrupt 477 */ 478 instance->func_ptr->disable_intr(instance); 479 480 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 481 "mrsas-enable-msi", &data) == DDI_SUCCESS) { 482 if (strncmp(data, "no", 3) == 0) { 483 msi_enable = 0; 484 con_log(CL_ANN1, (CE_WARN, 485 "msi_enable = %d disabled", 486 msi_enable)); 487 } 488 ddi_prop_free(data); 489 } 490 491 con_log(CL_DLEVEL1, (CE_WARN, "msi_enable = %d", 492 msi_enable)); 493 494 /* Check for all supported interrupt types */ 495 if (ddi_intr_get_supported_types( 496 dip, &intr_types) != DDI_SUCCESS) { 497 con_log(CL_ANN, (CE_WARN, 498 "ddi_intr_get_supported_types() failed")); 499 goto fail_attach; 500 } 501 502 con_log(CL_DLEVEL1, (CE_NOTE, 503 "ddi_intr_get_supported_types() ret: 0x%x", 504 intr_types)); 505 506 /* Initialize and Setup Interrupt handler */ 507 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) { 508 if (mrsas_add_intrs(instance, 509 DDI_INTR_TYPE_MSIX) != DDI_SUCCESS) { 510 con_log(CL_ANN, (CE_WARN, 511 "MSIX interrupt query failed")); 512 goto fail_attach; 513 } 514 instance->intr_type = DDI_INTR_TYPE_MSIX; 515 } else if (msi_enable && (intr_types & 516 DDI_INTR_TYPE_MSI)) { 517 if (mrsas_add_intrs(instance, 518 DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 519 con_log(CL_ANN, (CE_WARN, 520 "MSI interrupt query failed")); 521 goto fail_attach; 522 } 523 instance->intr_type = DDI_INTR_TYPE_MSI; 524 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 525 msi_enable = 0; 526 if (mrsas_add_intrs(instance, 527 DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 528 con_log(CL_ANN, (CE_WARN, 529 "FIXED interrupt query failed")); 530 goto fail_attach; 531 } 532 instance->intr_type = DDI_INTR_TYPE_FIXED; 533 } else { 534 con_log(CL_ANN, (CE_WARN, "Device cannot " 535 "suppport either FIXED or MSI/X " 536 "interrupts")); 537 goto fail_attach; 538 } 539 540 added_isr_f = 1; 541 542 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 543 "mrsas-enable-ctio", &data) == DDI_SUCCESS) { 544 if (strncmp(data, "no", 3) == 0) { 545 ctio_enable = 0; 546 con_log(CL_ANN1, (CE_WARN, 547 "ctio_enable = %d disabled", 548 ctio_enable)); 549 } 550 ddi_prop_free(data); 551 } 552 553 con_log(CL_DLEVEL1, (CE_WARN, "ctio_enable = %d", 554 ctio_enable)); 555 556 /* setup the mfi based low level driver */ 557 if (init_mfi(instance) != DDI_SUCCESS) { 558 con_log(CL_ANN, (CE_WARN, "mr_sas: " 559 "could not initialize the low level driver")); 560 561 goto fail_attach; 562 } 563 564 /* Initialize all Mutex */ 565 INIT_LIST_HEAD(&instance->completed_pool_list); 566 mutex_init(&instance->completed_pool_mtx, 567 "completed_pool_mtx", MUTEX_DRIVER, 568 DDI_INTR_PRI(instance->intr_pri)); 569 570 mutex_init(&instance->app_cmd_pool_mtx, 571 "app_cmd_pool_mtx", MUTEX_DRIVER, 572 DDI_INTR_PRI(instance->intr_pri)); 573 574 mutex_init(&instance->cmd_pend_mtx, "cmd_pend_mtx", 575 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 576 577 mutex_init(&instance->ocr_flags_mtx, "ocr_flags_mtx", 578 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 579 580 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx", 581 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 582 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL); 583 584 mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx", 585 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 586 587 instance->timeout_id = (timeout_id_t)-1; 588 589 /* Register our soft-isr for highlevel interrupts. */ 590 instance->isr_level = instance->intr_pri; 591 if (instance->isr_level == HIGH_LEVEL_INTR) { 592 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH, 593 &instance->soft_intr_id, NULL, NULL, 594 mrsas_softintr, (caddr_t)instance) != 595 DDI_SUCCESS) { 596 con_log(CL_ANN, (CE_WARN, 597 " Software ISR did not register")); 598 599 goto fail_attach; 600 } 601 602 added_soft_isr_f = 1; 603 } 604 605 /* Allocate a transport structure */ 606 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 607 608 if (tran == NULL) { 609 con_log(CL_ANN, (CE_WARN, 610 "scsi_hba_tran_alloc failed")); 611 goto fail_attach; 612 } 613 614 tran_alloc_f = 1; 615 616 instance->tran = tran; 617 618 tran->tran_hba_private = instance; 619 tran->tran_tgt_init = mrsas_tran_tgt_init; 620 tran->tran_tgt_probe = scsi_hba_probe; 621 tran->tran_tgt_free = mrsas_tran_tgt_free; 622 tran->tran_init_pkt = mrsas_tran_init_pkt; 623 tran->tran_start = mrsas_tran_start; 624 tran->tran_abort = mrsas_tran_abort; 625 tran->tran_reset = mrsas_tran_reset; 626 tran->tran_getcap = mrsas_tran_getcap; 627 tran->tran_setcap = mrsas_tran_setcap; 628 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt; 629 tran->tran_dmafree = mrsas_tran_dmafree; 630 tran->tran_sync_pkt = mrsas_tran_sync_pkt; 631 tran->tran_bus_config = mrsas_tran_bus_config; 632 633 if (mrsas_relaxed_ordering) 634 mrsas_generic_dma_attr.dma_attr_flags |= 635 DDI_DMA_RELAXED_ORDERING; 636 637 638 tran_dma_attr = mrsas_generic_dma_attr; 639 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge; 640 641 /* Attach this instance of the hba */ 642 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0) 643 != DDI_SUCCESS) { 644 con_log(CL_ANN, (CE_WARN, 645 "scsi_hba_attach failed")); 646 647 goto fail_attach; 648 } 649 650 /* create devctl node for cfgadm command */ 651 if (ddi_create_minor_node(dip, "devctl", 652 S_IFCHR, INST2DEVCTL(instance_no), 653 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) { 654 con_log(CL_ANN, (CE_WARN, 655 "mr_sas: failed to create devctl node.")); 656 657 goto fail_attach; 658 } 659 660 create_devctl_node_f = 1; 661 662 /* create scsi node for cfgadm command */ 663 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, 664 INST2SCSI(instance_no), 665 DDI_NT_SCSI_ATTACHMENT_POINT, 0) == 666 DDI_FAILURE) { 667 con_log(CL_ANN, (CE_WARN, 668 "mr_sas: failed to create scsi node.")); 669 670 goto fail_attach; 671 } 672 673 create_scsi_node_f = 1; 674 675 (void) sprintf(instance->iocnode, "%d:lsirdctl", 676 instance_no); 677 678 /* 679 * Create a node for applications 680 * for issuing ioctl to the driver. 681 */ 682 if (ddi_create_minor_node(dip, instance->iocnode, 683 S_IFCHR, INST2LSIRDCTL(instance_no), 684 DDI_PSEUDO, 0) == DDI_FAILURE) { 685 con_log(CL_ANN, (CE_WARN, 686 "mr_sas: failed to create ioctl node.")); 687 688 goto fail_attach; 689 } 690 691 create_ioc_node_f = 1; 692 693 /* Create a taskq to handle dr events */ 694 if ((instance->taskq = ddi_taskq_create(dip, 695 "mrsas_dr_taskq", 1, 696 TASKQ_DEFAULTPRI, 0)) == NULL) { 697 con_log(CL_ANN, (CE_WARN, 698 "mr_sas: failed to create taskq ")); 699 instance->taskq = NULL; 700 goto fail_attach; 701 } 702 703 /* enable interrupt */ 704 instance->func_ptr->enable_intr(instance); 705 706 /* initiate AEN */ 707 if (start_mfi_aen(instance)) { 708 con_log(CL_ANN, (CE_WARN, 709 "mr_sas: failed to initiate AEN.")); 710 goto fail_initiate_aen; 711 } 712 713 con_log(CL_DLEVEL1, (CE_NOTE, 714 "AEN started for instance %d.", instance_no)); 715 716 /* Finally! We are on the air. */ 717 ddi_report_dev(dip); 718 719 if (mrsas_check_acc_handle(instance->regmap_handle) != 720 DDI_SUCCESS) { 721 goto fail_attach; 722 } 723 if (mrsas_check_acc_handle(instance->pci_handle) != 724 DDI_SUCCESS) { 725 goto fail_attach; 726 } 727 instance->mr_ld_list = 728 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld), 729 KM_SLEEP); 730 break; 731 case DDI_PM_RESUME: 732 con_log(CL_ANN, (CE_NOTE, 733 "mr_sas: DDI_PM_RESUME")); 734 break; 735 case DDI_RESUME: 736 con_log(CL_ANN, (CE_NOTE, 737 "mr_sas: DDI_RESUME")); 738 break; 739 default: 740 con_log(CL_ANN, (CE_WARN, 741 "mr_sas: invalid attach cmd=%x", cmd)); 742 return (DDI_FAILURE); 743 } 744 745 return (DDI_SUCCESS); 746 747 fail_initiate_aen: 748 fail_attach: 749 if (create_devctl_node_f) { 750 ddi_remove_minor_node(dip, "devctl"); 751 } 752 753 if (create_scsi_node_f) { 754 ddi_remove_minor_node(dip, "scsi"); 755 } 756 757 if (create_ioc_node_f) { 758 ddi_remove_minor_node(dip, instance->iocnode); 759 } 760 761 if (tran_alloc_f) { 762 scsi_hba_tran_free(tran); 763 } 764 765 766 if (added_soft_isr_f) { 767 ddi_remove_softintr(instance->soft_intr_id); 768 } 769 770 if (added_isr_f) { 771 mrsas_rem_intrs(instance); 772 } 773 774 if (instance && instance->taskq) { 775 ddi_taskq_destroy(instance->taskq); 776 } 777 778 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 779 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 780 781 mrsas_fm_fini(instance); 782 783 pci_config_teardown(&instance->pci_handle); 784 785 ddi_soft_state_free(mrsas_state, instance_no); 786 787 con_log(CL_ANN, (CE_NOTE, 788 "mr_sas: return failure from mrsas_attach")); 789 790 return (DDI_FAILURE); 791 } 792 793 /*ARGSUSED*/ 794 static int 795 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 796 { 797 int rval; 798 int mrsas_minor = getminor((dev_t)arg); 799 800 struct mrsas_instance *instance; 801 802 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 803 804 switch (cmd) { 805 case DDI_INFO_DEVT2DEVINFO: 806 instance = (struct mrsas_instance *) 807 ddi_get_soft_state(mrsas_state, 808 MINOR2INST(mrsas_minor)); 809 810 if (instance == NULL) { 811 *resultp = NULL; 812 rval = DDI_FAILURE; 813 } else { 814 *resultp = instance->dip; 815 rval = DDI_SUCCESS; 816 } 817 break; 818 case DDI_INFO_DEVT2INSTANCE: 819 *resultp = (void *)(intptr_t) 820 (MINOR2INST(getminor((dev_t)arg))); 821 rval = DDI_SUCCESS; 822 break; 823 default: 824 *resultp = NULL; 825 rval = DDI_FAILURE; 826 } 827 828 return (rval); 829 } 830 831 static int 832 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 833 { 834 int instance_no; 835 836 struct mrsas_instance *instance; 837 838 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 839 840 /* CONSTCOND */ 841 ASSERT(NO_COMPETING_THREADS); 842 843 instance_no = ddi_get_instance(dip); 844 845 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state, 846 instance_no); 847 848 if (!instance) { 849 con_log(CL_ANN, (CE_WARN, 850 "mr_sas:%d could not get instance in detach", 851 instance_no)); 852 853 return (DDI_FAILURE); 854 } 855 856 con_log(CL_ANN, (CE_NOTE, 857 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x", 858 instance_no, instance->vendor_id, instance->device_id, 859 instance->subsysvid, instance->subsysid)); 860 861 switch (cmd) { 862 case DDI_DETACH: 863 con_log(CL_ANN, (CE_NOTE, 864 "mrsas_detach: DDI_DETACH")); 865 866 if (scsi_hba_detach(dip) != DDI_SUCCESS) { 867 con_log(CL_ANN, (CE_WARN, 868 "mr_sas:%d failed to detach", 869 instance_no)); 870 871 return (DDI_FAILURE); 872 } 873 874 scsi_hba_tran_free(instance->tran); 875 876 flush_cache(instance); 877 878 if (abort_aen_cmd(instance, instance->aen_cmd)) { 879 con_log(CL_ANN, (CE_WARN, "mrsas_detach: " 880 "failed to abort prevous AEN command")); 881 882 return (DDI_FAILURE); 883 } 884 885 instance->func_ptr->disable_intr(instance); 886 887 if (instance->isr_level == HIGH_LEVEL_INTR) { 888 ddi_remove_softintr(instance->soft_intr_id); 889 } 890 891 mrsas_rem_intrs(instance); 892 893 if (instance->taskq) { 894 ddi_taskq_destroy(instance->taskq); 895 } 896 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD 897 * sizeof (struct mrsas_ld)); 898 free_space_for_mfi(instance); 899 900 mrsas_fm_fini(instance); 901 902 pci_config_teardown(&instance->pci_handle); 903 904 kmem_free(instance->func_ptr, 905 sizeof (struct mrsas_func_ptr)); 906 907 if (instance->timeout_id != (timeout_id_t)-1) { 908 (void) untimeout(instance->timeout_id); 909 instance->timeout_id = (timeout_id_t)-1; 910 } 911 ddi_soft_state_free(mrsas_state, instance_no); 912 break; 913 case DDI_PM_SUSPEND: 914 con_log(CL_ANN, (CE_NOTE, 915 "mrsas_detach: DDI_PM_SUSPEND")); 916 917 break; 918 case DDI_SUSPEND: 919 con_log(CL_ANN, (CE_NOTE, 920 "mrsas_detach: DDI_SUSPEND")); 921 922 break; 923 default: 924 con_log(CL_ANN, (CE_WARN, 925 "invalid detach command:0x%x", cmd)); 926 return (DDI_FAILURE); 927 } 928 929 return (DDI_SUCCESS); 930 } 931 932 /* 933 * ************************************************************************** * 934 * * 935 * common entry points - for character driver types * 936 * * 937 * ************************************************************************** * 938 */ 939 static int 940 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp) 941 { 942 int rval = 0; 943 944 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 945 946 /* Check root permissions */ 947 if (drv_priv(credp) != 0) { 948 con_log(CL_ANN, (CE_WARN, 949 "mr_sas: Non-root ioctl access denied!")); 950 return (EPERM); 951 } 952 953 /* Verify we are being opened as a character device */ 954 if (otyp != OTYP_CHR) { 955 con_log(CL_ANN, (CE_WARN, 956 "mr_sas: ioctl node must be a char node")); 957 return (EINVAL); 958 } 959 960 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev))) 961 == NULL) { 962 return (ENXIO); 963 } 964 965 if (scsi_hba_open) { 966 rval = scsi_hba_open(dev, openflags, otyp, credp); 967 } 968 969 return (rval); 970 } 971 972 static int 973 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp) 974 { 975 int rval = 0; 976 977 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 978 979 /* no need for locks! */ 980 981 if (scsi_hba_close) { 982 rval = scsi_hba_close(dev, openflags, otyp, credp); 983 } 984 985 return (rval); 986 } 987 988 static int 989 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 990 int *rvalp) 991 { 992 int rval = 0; 993 994 struct mrsas_instance *instance; 995 struct mrsas_ioctl *ioctl; 996 struct mrsas_aen aen; 997 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 998 999 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev))); 1000 1001 if (instance == NULL) { 1002 /* invalid minor number */ 1003 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found.")); 1004 return (ENXIO); 1005 } 1006 1007 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl), 1008 KM_SLEEP); 1009 ASSERT(ioctl); 1010 1011 switch ((uint_t)cmd) { 1012 case MRSAS_IOCTL_FIRMWARE: 1013 if (ddi_copyin((void *)arg, ioctl, 1014 sizeof (struct mrsas_ioctl), mode)) { 1015 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: " 1016 "ERROR IOCTL copyin")); 1017 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 1018 return (EFAULT); 1019 } 1020 1021 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) { 1022 rval = handle_drv_ioctl(instance, ioctl, mode); 1023 } else { 1024 rval = handle_mfi_ioctl(instance, ioctl, mode); 1025 } 1026 1027 if (ddi_copyout((void *)ioctl, (void *)arg, 1028 (sizeof (struct mrsas_ioctl) - 1), mode)) { 1029 con_log(CL_ANN, (CE_WARN, 1030 "mrsas_ioctl: copy_to_user failed")); 1031 rval = 1; 1032 } 1033 1034 break; 1035 case MRSAS_IOCTL_AEN: 1036 if (ddi_copyin((void *) arg, &aen, 1037 sizeof (struct mrsas_aen), mode)) { 1038 con_log(CL_ANN, (CE_WARN, 1039 "mrsas_ioctl: ERROR AEN copyin")); 1040 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 1041 return (EFAULT); 1042 } 1043 1044 rval = handle_mfi_aen(instance, &aen); 1045 1046 if (ddi_copyout((void *) &aen, (void *)arg, 1047 sizeof (struct mrsas_aen), mode)) { 1048 con_log(CL_ANN, (CE_WARN, 1049 "mrsas_ioctl: copy_to_user failed")); 1050 rval = 1; 1051 } 1052 1053 break; 1054 default: 1055 rval = scsi_hba_ioctl(dev, cmd, arg, 1056 mode, credp, rvalp); 1057 1058 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: " 1059 "scsi_hba_ioctl called, ret = %x.", rval)); 1060 } 1061 1062 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 1063 return (rval); 1064 } 1065 1066 /* 1067 * ************************************************************************** * 1068 * * 1069 * common entry points - for block driver types * 1070 * * 1071 * ************************************************************************** * 1072 */ 1073 #ifdef __sparc 1074 /*ARGSUSED*/ 1075 static int 1076 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1077 { 1078 int instance_no; 1079 1080 struct mrsas_instance *instance; 1081 1082 instance_no = ddi_get_instance(dip); 1083 instance = (struct mrsas_instance *)ddi_get_soft_state 1084 (mrsas_state, instance_no); 1085 1086 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1087 1088 if (!instance) { 1089 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter " 1090 "in reset", instance_no)); 1091 return (DDI_FAILURE); 1092 } 1093 1094 instance->func_ptr->disable_intr(instance); 1095 1096 con_log(CL_ANN1, (CE_NOTE, "flushing cache for instance %d", 1097 instance_no)); 1098 1099 flush_cache(instance); 1100 1101 return (DDI_SUCCESS); 1102 } 1103 #else /* __sparc */ 1104 /*ARGSUSED*/ 1105 static int 1106 mrsas_quiesce(dev_info_t *dip) 1107 { 1108 int instance_no; 1109 1110 struct mrsas_instance *instance; 1111 1112 instance_no = ddi_get_instance(dip); 1113 instance = (struct mrsas_instance *)ddi_get_soft_state 1114 (mrsas_state, instance_no); 1115 1116 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1117 1118 if (!instance) { 1119 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter " 1120 "in quiesce", instance_no)); 1121 return (DDI_FAILURE); 1122 } 1123 if (instance->deadadapter || instance->adapterresetinprogress) { 1124 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in " 1125 "healthy state", instance_no)); 1126 return (DDI_FAILURE); 1127 } 1128 1129 if (abort_aen_cmd(instance, instance->aen_cmd)) { 1130 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: " 1131 "failed to abort prevous AEN command QUIESCE")); 1132 } 1133 1134 instance->func_ptr->disable_intr(instance); 1135 1136 con_log(CL_ANN1, (CE_NOTE, "flushing cache for instance %d", 1137 instance_no)); 1138 1139 flush_cache(instance); 1140 1141 if (wait_for_outstanding(instance)) { 1142 return (DDI_FAILURE); 1143 } 1144 return (DDI_SUCCESS); 1145 } 1146 #endif /* __sparc */ 1147 1148 /* 1149 * ************************************************************************** * 1150 * * 1151 * entry points (SCSI HBA) * 1152 * * 1153 * ************************************************************************** * 1154 */ 1155 /*ARGSUSED*/ 1156 static int 1157 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1158 scsi_hba_tran_t *tran, struct scsi_device *sd) 1159 { 1160 struct mrsas_instance *instance; 1161 uint16_t tgt = sd->sd_address.a_target; 1162 uint8_t lun = sd->sd_address.a_lun; 1163 1164 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init target %d lun %d", 1165 tgt, lun)); 1166 1167 instance = ADDR2MR(&sd->sd_address); 1168 1169 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 1170 (void) ndi_merge_node(tgt_dip, mrsas_name_node); 1171 ddi_set_name_addr(tgt_dip, NULL); 1172 1173 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init in " 1174 "ndi_dev_is_persistent_node DDI_FAILURE t = %d l = %d", 1175 tgt, lun)); 1176 return (DDI_FAILURE); 1177 } 1178 1179 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p", 1180 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip)); 1181 1182 if (tgt < MRDRV_MAX_LD && lun == 0) { 1183 if (instance->mr_ld_list[tgt].dip == NULL && 1184 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) { 1185 instance->mr_ld_list[tgt].dip = tgt_dip; 1186 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN; 1187 } 1188 } 1189 return (DDI_SUCCESS); 1190 } 1191 1192 /*ARGSUSED*/ 1193 static void 1194 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1195 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1196 { 1197 struct mrsas_instance *instance; 1198 int tgt = sd->sd_address.a_target; 1199 int lun = sd->sd_address.a_lun; 1200 1201 instance = ADDR2MR(&sd->sd_address); 1202 1203 con_log(CL_ANN1, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun)); 1204 1205 if (tgt < MRDRV_MAX_LD && lun == 0) { 1206 if (instance->mr_ld_list[tgt].dip == tgt_dip) { 1207 instance->mr_ld_list[tgt].dip = NULL; 1208 } 1209 } 1210 } 1211 1212 static dev_info_t * 1213 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun) 1214 { 1215 dev_info_t *child = NULL; 1216 char addr[SCSI_MAXNAMELEN]; 1217 char tmp[MAXNAMELEN]; 1218 1219 (void) sprintf(addr, "%x,%x", tgt, lun); 1220 for (child = ddi_get_child(instance->dip); child; 1221 child = ddi_get_next_sibling(child)) { 1222 1223 if (mrsas_name_node(child, tmp, MAXNAMELEN) != 1224 DDI_SUCCESS) { 1225 continue; 1226 } 1227 1228 if (strcmp(addr, tmp) == 0) { 1229 break; 1230 } 1231 } 1232 con_log(CL_ANN1, (CE_NOTE, "mrsas_find_child: return child = %p", 1233 (void *)child)); 1234 return (child); 1235 } 1236 1237 static int 1238 mrsas_name_node(dev_info_t *dip, char *name, int len) 1239 { 1240 int tgt, lun; 1241 1242 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1243 DDI_PROP_DONTPASS, "target", -1); 1244 con_log(CL_ANN1, (CE_NOTE, 1245 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt)); 1246 if (tgt == -1) { 1247 return (DDI_FAILURE); 1248 } 1249 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1250 "lun", -1); 1251 con_log(CL_ANN1, 1252 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun)); 1253 if (lun == -1) { 1254 return (DDI_FAILURE); 1255 } 1256 (void) snprintf(name, len, "%x,%x", tgt, lun); 1257 return (DDI_SUCCESS); 1258 } 1259 1260 static struct scsi_pkt * 1261 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt, 1262 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1263 int flags, int (*callback)(), caddr_t arg) 1264 { 1265 struct scsa_cmd *acmd; 1266 struct mrsas_instance *instance; 1267 struct scsi_pkt *new_pkt; 1268 1269 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1270 1271 instance = ADDR2MR(ap); 1272 1273 /* step #1 : pkt allocation */ 1274 if (pkt == NULL) { 1275 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen, 1276 tgtlen, sizeof (struct scsa_cmd), callback, arg); 1277 if (pkt == NULL) { 1278 return (NULL); 1279 } 1280 1281 acmd = PKT2CMD(pkt); 1282 1283 /* 1284 * Initialize the new pkt - we redundantly initialize 1285 * all the fields for illustrative purposes. 1286 */ 1287 acmd->cmd_pkt = pkt; 1288 acmd->cmd_flags = 0; 1289 acmd->cmd_scblen = statuslen; 1290 acmd->cmd_cdblen = cmdlen; 1291 acmd->cmd_dmahandle = NULL; 1292 acmd->cmd_ncookies = 0; 1293 acmd->cmd_cookie = 0; 1294 acmd->cmd_cookiecnt = 0; 1295 acmd->cmd_nwin = 0; 1296 1297 pkt->pkt_address = *ap; 1298 pkt->pkt_comp = (void (*)())NULL; 1299 pkt->pkt_flags = 0; 1300 pkt->pkt_time = 0; 1301 pkt->pkt_resid = 0; 1302 pkt->pkt_state = 0; 1303 pkt->pkt_statistics = 0; 1304 pkt->pkt_reason = 0; 1305 new_pkt = pkt; 1306 } else { 1307 acmd = PKT2CMD(pkt); 1308 new_pkt = NULL; 1309 } 1310 1311 /* step #2 : dma allocation/move */ 1312 if (bp && bp->b_bcount != 0) { 1313 if (acmd->cmd_dmahandle == NULL) { 1314 if (mrsas_dma_alloc(instance, pkt, bp, flags, 1315 callback) == DDI_FAILURE) { 1316 if (new_pkt) { 1317 scsi_hba_pkt_free(ap, new_pkt); 1318 } 1319 return ((struct scsi_pkt *)NULL); 1320 } 1321 } else { 1322 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) { 1323 return ((struct scsi_pkt *)NULL); 1324 } 1325 } 1326 } 1327 1328 return (pkt); 1329 } 1330 1331 static int 1332 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt) 1333 { 1334 uchar_t cmd_done = 0; 1335 1336 struct mrsas_instance *instance = ADDR2MR(ap); 1337 struct mrsas_cmd *cmd; 1338 1339 if (instance->deadadapter == 1) { 1340 con_log(CL_ANN1, (CE_WARN, 1341 "mrsas_tran_start: return TRAN_FATAL_ERROR " 1342 "for IO, as the HBA doesnt take any more IOs")); 1343 if (pkt) { 1344 pkt->pkt_reason = CMD_DEV_GONE; 1345 pkt->pkt_statistics = STAT_DISCON; 1346 } 1347 return (TRAN_FATAL_ERROR); 1348 } 1349 1350 if (instance->adapterresetinprogress) { 1351 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, " 1352 "returning mfi_pkt and setting TRAN_BUSY\n")); 1353 return (TRAN_BUSY); 1354 } 1355 1356 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x", 1357 __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time)); 1358 1359 pkt->pkt_reason = CMD_CMPLT; 1360 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 1361 1362 cmd = build_cmd(instance, ap, pkt, &cmd_done); 1363 1364 /* 1365 * Check if the command is already completed by the mrsas_build_cmd() 1366 * routine. In which case the busy_flag would be clear and scb will be 1367 * NULL and appropriate reason provided in pkt_reason field 1368 */ 1369 if (cmd_done) { 1370 pkt->pkt_reason = CMD_CMPLT; 1371 pkt->pkt_scbp[0] = STATUS_GOOD; 1372 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET 1373 | STATE_SENT_CMD; 1374 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) { 1375 (*pkt->pkt_comp)(pkt); 1376 } 1377 1378 return (TRAN_ACCEPT); 1379 } 1380 1381 if (cmd == NULL) { 1382 return (TRAN_BUSY); 1383 } 1384 1385 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { 1386 if (instance->fw_outstanding > instance->max_fw_cmds) { 1387 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy")); 1388 DTRACE_PROBE2(start_tran_err, 1389 uint16_t, instance->fw_outstanding, 1390 uint16_t, instance->max_fw_cmds); 1391 return_mfi_pkt(instance, cmd); 1392 return (TRAN_BUSY); 1393 } 1394 1395 /* Synchronize the Cmd frame for the controller */ 1396 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, 1397 DDI_DMA_SYNC_FORDEV); 1398 con_log(CL_ANN1, (CE_NOTE, "Push SCSI CDB[0]=0x%x" 1399 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index)); 1400 instance->func_ptr->issue_cmd(cmd, instance); 1401 1402 } else { 1403 struct mrsas_header *hdr = &cmd->frame->hdr; 1404 1405 cmd->sync_cmd = MRSAS_TRUE; 1406 1407 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd); 1408 1409 pkt->pkt_reason = CMD_CMPLT; 1410 pkt->pkt_statistics = 0; 1411 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS; 1412 1413 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, 1414 &hdr->cmd_status)) { 1415 case MFI_STAT_OK: 1416 pkt->pkt_scbp[0] = STATUS_GOOD; 1417 break; 1418 1419 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1420 1421 pkt->pkt_reason = CMD_CMPLT; 1422 pkt->pkt_statistics = 0; 1423 1424 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; 1425 break; 1426 1427 case MFI_STAT_DEVICE_NOT_FOUND: 1428 pkt->pkt_reason = CMD_DEV_GONE; 1429 pkt->pkt_statistics = STAT_DISCON; 1430 break; 1431 1432 default: 1433 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1; 1434 } 1435 1436 (void) mrsas_common_check(instance, cmd); 1437 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd, 1438 uint8_t, hdr->cmd_status); 1439 return_mfi_pkt(instance, cmd); 1440 1441 if (pkt->pkt_comp) { 1442 (*pkt->pkt_comp)(pkt); 1443 } 1444 1445 } 1446 1447 return (TRAN_ACCEPT); 1448 } 1449 1450 /*ARGSUSED*/ 1451 static int 1452 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1453 { 1454 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1455 1456 /* abort command not supported by H/W */ 1457 1458 return (DDI_FAILURE); 1459 } 1460 1461 /*ARGSUSED*/ 1462 static int 1463 mrsas_tran_reset(struct scsi_address *ap, int level) 1464 { 1465 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1466 1467 /* reset command not supported by H/W */ 1468 1469 return (DDI_FAILURE); 1470 1471 } 1472 1473 /*ARGSUSED*/ 1474 static int 1475 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom) 1476 { 1477 int rval = 0; 1478 1479 struct mrsas_instance *instance = ADDR2MR(ap); 1480 1481 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1482 1483 /* we do allow inquiring about capabilities for other targets */ 1484 if (cap == NULL) { 1485 return (-1); 1486 } 1487 1488 switch (scsi_hba_lookup_capstr(cap)) { 1489 case SCSI_CAP_DMA_MAX: 1490 /* Limit to 16MB max transfer */ 1491 rval = mrsas_max_cap_maxxfer; 1492 break; 1493 case SCSI_CAP_MSG_OUT: 1494 rval = 1; 1495 break; 1496 case SCSI_CAP_DISCONNECT: 1497 rval = 0; 1498 break; 1499 case SCSI_CAP_SYNCHRONOUS: 1500 rval = 0; 1501 break; 1502 case SCSI_CAP_WIDE_XFER: 1503 rval = 1; 1504 break; 1505 case SCSI_CAP_TAGGED_QING: 1506 rval = 1; 1507 break; 1508 case SCSI_CAP_UNTAGGED_QING: 1509 rval = 1; 1510 break; 1511 case SCSI_CAP_PARITY: 1512 rval = 1; 1513 break; 1514 case SCSI_CAP_INITIATOR_ID: 1515 rval = instance->init_id; 1516 break; 1517 case SCSI_CAP_ARQ: 1518 rval = 1; 1519 break; 1520 case SCSI_CAP_LINKED_CMDS: 1521 rval = 0; 1522 break; 1523 case SCSI_CAP_RESET_NOTIFICATION: 1524 rval = 1; 1525 break; 1526 case SCSI_CAP_GEOMETRY: 1527 rval = -1; 1528 1529 break; 1530 default: 1531 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x", 1532 scsi_hba_lookup_capstr(cap))); 1533 rval = -1; 1534 break; 1535 } 1536 1537 return (rval); 1538 } 1539 1540 /*ARGSUSED*/ 1541 static int 1542 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1543 { 1544 int rval = 1; 1545 1546 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1547 1548 /* We don't allow setting capabilities for other targets */ 1549 if (cap == NULL || whom == 0) { 1550 return (-1); 1551 } 1552 1553 switch (scsi_hba_lookup_capstr(cap)) { 1554 case SCSI_CAP_DMA_MAX: 1555 case SCSI_CAP_MSG_OUT: 1556 case SCSI_CAP_PARITY: 1557 case SCSI_CAP_LINKED_CMDS: 1558 case SCSI_CAP_RESET_NOTIFICATION: 1559 case SCSI_CAP_DISCONNECT: 1560 case SCSI_CAP_SYNCHRONOUS: 1561 case SCSI_CAP_UNTAGGED_QING: 1562 case SCSI_CAP_WIDE_XFER: 1563 case SCSI_CAP_INITIATOR_ID: 1564 case SCSI_CAP_ARQ: 1565 /* 1566 * None of these are settable via 1567 * the capability interface. 1568 */ 1569 break; 1570 case SCSI_CAP_TAGGED_QING: 1571 rval = 1; 1572 break; 1573 case SCSI_CAP_SECTOR_SIZE: 1574 rval = 1; 1575 break; 1576 1577 case SCSI_CAP_TOTAL_SECTORS: 1578 rval = 1; 1579 break; 1580 default: 1581 rval = -1; 1582 break; 1583 } 1584 1585 return (rval); 1586 } 1587 1588 static void 1589 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1590 { 1591 struct scsa_cmd *acmd = PKT2CMD(pkt); 1592 1593 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1594 1595 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1596 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1597 1598 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1599 1600 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1601 1602 acmd->cmd_dmahandle = NULL; 1603 } 1604 1605 /* free the pkt */ 1606 scsi_hba_pkt_free(ap, pkt); 1607 } 1608 1609 /*ARGSUSED*/ 1610 static void 1611 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1612 { 1613 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1614 1615 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1616 1617 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1618 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1619 1620 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1621 1622 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1623 1624 acmd->cmd_dmahandle = NULL; 1625 } 1626 } 1627 1628 /*ARGSUSED*/ 1629 static void 1630 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1631 { 1632 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1633 1634 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1635 1636 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1637 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset, 1638 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ? 1639 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU); 1640 } 1641 } 1642 1643 /* 1644 * mrsas_isr(caddr_t) 1645 * 1646 * The Interrupt Service Routine 1647 * 1648 * Collect status for all completed commands and do callback 1649 * 1650 */ 1651 static uint_t 1652 mrsas_isr(struct mrsas_instance *instance) 1653 { 1654 int need_softintr; 1655 uint32_t producer; 1656 uint32_t consumer; 1657 uint32_t context; 1658 1659 struct mrsas_cmd *cmd; 1660 struct mrsas_header *hdr; 1661 struct scsi_pkt *pkt; 1662 1663 ASSERT(instance); 1664 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) && 1665 !instance->func_ptr->intr_ack(instance)) { 1666 return (DDI_INTR_UNCLAIMED); 1667 } 1668 1669 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1670 0, 0, DDI_DMA_SYNC_FORCPU); 1671 1672 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 1673 != DDI_SUCCESS) { 1674 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 1675 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 1676 con_log(CL_ANN1, (CE_WARN, 1677 "mr_sas_isr(): FMA check, returning DDI_INTR_UNCLAIMED")); 1678 return (DDI_INTR_CLAIMED); 1679 } 1680 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1681 1682 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1683 instance->producer); 1684 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1685 instance->consumer); 1686 1687 con_log(CL_ANN1, (CE_NOTE, " producer %x consumer %x ", 1688 producer, consumer)); 1689 if (producer == consumer) { 1690 con_log(CL_ANN1, (CE_WARN, "producer = consumer case")); 1691 DTRACE_PROBE2(isr_pc_err, uint32_t, producer, 1692 uint32_t, consumer); 1693 return (DDI_INTR_CLAIMED); 1694 } 1695 1696 #ifdef OCRDEBUG 1697 if (debug_consecutive_timeout_after_ocr_g == 1) { 1698 con_log(CL_ANN1, (CE_NOTE, 1699 "simulating consecutive timeout after ocr")); 1700 return (DDI_INTR_CLAIMED); 1701 } 1702 #endif 1703 1704 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) { 1705 con_log(CL_ANN1, (CE_NOTE, "Fw Fault State Detected ")); 1706 if (instance->timeout_id == (timeout_id_t)-1) { 1707 con_log(CL_ANN1, (CE_NOTE, 1708 "Trigger timeout in NON IO Case")); 1709 instance->timeout_id = 1710 timeout(io_timeout_checker, (void *)instance, 1711 drv_usectohz(MRSAS_1_SECOND)); 1712 } 1713 return (DDI_INTR_CLAIMED); 1714 } 1715 1716 mutex_enter(&instance->completed_pool_mtx); 1717 mutex_enter(&instance->cmd_pend_mtx); 1718 1719 while (consumer != producer) { 1720 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1721 &instance->reply_queue[consumer]); 1722 cmd = instance->cmd_list[context]; 1723 1724 if (cmd->sync_cmd == MRSAS_TRUE) { 1725 hdr = (struct mrsas_header *)&cmd->frame->hdr; 1726 if (hdr) { 1727 mlist_del_init(&cmd->list); 1728 } 1729 } else { 1730 pkt = cmd->pkt; 1731 if (pkt) { 1732 mlist_del_init(&cmd->list); 1733 } 1734 } 1735 1736 mlist_add_tail(&cmd->list, &instance->completed_pool_list); 1737 1738 consumer++; 1739 if (consumer == (instance->max_fw_cmds + 1)) { 1740 consumer = 0; 1741 } 1742 } 1743 mutex_exit(&instance->cmd_pend_mtx); 1744 mutex_exit(&instance->completed_pool_mtx); 1745 1746 ddi_put32(instance->mfi_internal_dma_obj.acc_handle, 1747 instance->consumer, consumer); 1748 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1749 0, 0, DDI_DMA_SYNC_FORDEV); 1750 1751 if (instance->softint_running) { 1752 need_softintr = 0; 1753 } else { 1754 need_softintr = 1; 1755 } 1756 1757 if (instance->isr_level == HIGH_LEVEL_INTR) { 1758 if (need_softintr) { 1759 ddi_trigger_softintr(instance->soft_intr_id); 1760 } 1761 } else { 1762 /* 1763 * Not a high-level interrupt, therefore call the soft level 1764 * interrupt explicitly 1765 */ 1766 (void) mrsas_softintr(instance); 1767 } 1768 1769 return (DDI_INTR_CLAIMED); 1770 } 1771 1772 1773 /* 1774 * ************************************************************************** * 1775 * * 1776 * libraries * 1777 * * 1778 * ************************************************************************** * 1779 */ 1780 /* 1781 * get_mfi_pkt : Get a command from the free pool 1782 * After successful allocation, the caller of this routine 1783 * must clear the frame buffer (memset to zero) before 1784 * using the packet further. 1785 * 1786 * ***** Note ***** 1787 * After clearing the frame buffer the context id of the 1788 * frame buffer SHOULD be restored back. 1789 */ 1790 static struct mrsas_cmd * 1791 get_mfi_pkt(struct mrsas_instance *instance) 1792 { 1793 mlist_t *head = &instance->cmd_pool_list; 1794 struct mrsas_cmd *cmd = NULL; 1795 1796 mutex_enter(&instance->cmd_pool_mtx); 1797 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1798 1799 if (!mlist_empty(head)) { 1800 cmd = mlist_entry(head->next, struct mrsas_cmd, list); 1801 mlist_del_init(head->next); 1802 } 1803 if (cmd != NULL) { 1804 cmd->pkt = NULL; 1805 cmd->retry_count_for_ocr = 0; 1806 cmd->drv_pkt_time = 0; 1807 } 1808 mutex_exit(&instance->cmd_pool_mtx); 1809 1810 return (cmd); 1811 } 1812 1813 static struct mrsas_cmd * 1814 get_mfi_app_pkt(struct mrsas_instance *instance) 1815 { 1816 mlist_t *head = &instance->app_cmd_pool_list; 1817 struct mrsas_cmd *cmd = NULL; 1818 1819 mutex_enter(&instance->app_cmd_pool_mtx); 1820 ASSERT(mutex_owned(&instance->app_cmd_pool_mtx)); 1821 1822 if (!mlist_empty(head)) { 1823 cmd = mlist_entry(head->next, struct mrsas_cmd, list); 1824 mlist_del_init(head->next); 1825 } 1826 if (cmd != NULL) 1827 cmd->pkt = NULL; 1828 mutex_exit(&instance->app_cmd_pool_mtx); 1829 1830 return (cmd); 1831 } 1832 /* 1833 * return_mfi_pkt : Return a cmd to free command pool 1834 */ 1835 static void 1836 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 1837 { 1838 mutex_enter(&instance->cmd_pool_mtx); 1839 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1840 1841 mlist_add(&cmd->list, &instance->cmd_pool_list); 1842 1843 mutex_exit(&instance->cmd_pool_mtx); 1844 } 1845 1846 static void 1847 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 1848 { 1849 mutex_enter(&instance->app_cmd_pool_mtx); 1850 ASSERT(mutex_owned(&instance->app_cmd_pool_mtx)); 1851 1852 mlist_add(&cmd->list, &instance->app_cmd_pool_list); 1853 1854 mutex_exit(&instance->app_cmd_pool_mtx); 1855 } 1856 static void 1857 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 1858 { 1859 struct scsi_pkt *pkt; 1860 struct mrsas_header *hdr; 1861 con_log(CL_ANN1, (CE_NOTE, "push_pending_pkt(): Called\n")); 1862 mutex_enter(&instance->cmd_pend_mtx); 1863 ASSERT(mutex_owned(&instance->cmd_pend_mtx)); 1864 mlist_del_init(&cmd->list); 1865 mlist_add_tail(&cmd->list, &instance->cmd_pend_list); 1866 if (cmd->sync_cmd == MRSAS_TRUE) { 1867 hdr = (struct mrsas_header *)&cmd->frame->hdr; 1868 if (hdr) { 1869 con_log(CL_ANN1, (CE_CONT, 1870 "push_pending_mfi_pkt: " 1871 "cmd %p index %x " 1872 "time %llx", 1873 (void *)cmd, cmd->index, 1874 gethrtime())); 1875 /* Wait for specified interval */ 1876 hdr->timeout = (unsigned int)debug_timeout_g; 1877 con_log(CL_ANN1, (CE_CONT, 1878 "push_pending_pkt(): " 1879 "Called IO Timeout Value %x\n", 1880 hdr->timeout)); 1881 } 1882 if (hdr && instance->timeout_id == (timeout_id_t)-1) { 1883 instance->timeout_id = timeout(io_timeout_checker, 1884 (void *) instance, drv_usectohz(MRSAS_1_SECOND)); 1885 } 1886 } else { 1887 pkt = cmd->pkt; 1888 if (pkt) { 1889 con_log(CL_ANN1, (CE_CONT, 1890 "push_pending_mfi_pkt: " 1891 "cmd %p index %x pkt %p, " 1892 "time %llx", 1893 (void *)cmd, cmd->index, (void *)pkt, 1894 gethrtime())); 1895 cmd->drv_pkt_time = (uint16_t)debug_timeout_g; 1896 } 1897 if (pkt && instance->timeout_id == (timeout_id_t)-1) { 1898 instance->timeout_id = timeout(io_timeout_checker, 1899 (void *) instance, drv_usectohz(MRSAS_1_SECOND)); 1900 } 1901 } 1902 1903 mutex_exit(&instance->cmd_pend_mtx); 1904 } 1905 1906 static int 1907 mrsas_print_pending_cmds(struct mrsas_instance *instance) 1908 { 1909 mlist_t *head = &instance->cmd_pend_list; 1910 mlist_t *tmp = head; 1911 struct mrsas_cmd *cmd = NULL; 1912 struct mrsas_header *hdr; 1913 unsigned int flag = 1; 1914 1915 struct scsi_pkt *pkt; 1916 con_log(CL_ANN1, (CE_NOTE, 1917 "mrsas_print_pending_cmds(): Called")); 1918 while (flag) { 1919 mutex_enter(&instance->cmd_pend_mtx); 1920 tmp = tmp->next; 1921 if (tmp == head) { 1922 mutex_exit(&instance->cmd_pend_mtx); 1923 flag = 0; 1924 break; 1925 } else { 1926 cmd = mlist_entry(tmp, struct mrsas_cmd, list); 1927 mutex_exit(&instance->cmd_pend_mtx); 1928 if (cmd) { 1929 if (cmd->sync_cmd == MRSAS_TRUE) { 1930 hdr = (struct mrsas_header *)&cmd->frame->hdr; 1931 if (hdr) { 1932 hdr->timeout = 1933 (unsigned int)debug_timeout_g; 1934 con_log(CL_ANN1, (CE_CONT, 1935 "print: cmd %p index %x hdr %p", 1936 (void *)cmd, cmd->index, 1937 (void *)hdr)); 1938 } 1939 } else { 1940 pkt = cmd->pkt; 1941 if (pkt) { 1942 cmd->drv_pkt_time = 1943 (uint16_t)debug_timeout_g; 1944 con_log(CL_ANN1, (CE_CONT, 1945 "print: cmd %p index %x " 1946 "pkt %p", (void *)cmd, cmd->index, 1947 (void *)pkt)); 1948 } 1949 } 1950 } 1951 } 1952 } 1953 con_log(CL_ANN1, (CE_NOTE, "mrsas_print_pending_cmds(): Done\n")); 1954 return (DDI_SUCCESS); 1955 } 1956 1957 1958 static int 1959 mrsas_complete_pending_cmds(struct mrsas_instance *instance) 1960 { 1961 1962 struct mrsas_cmd *cmd = NULL; 1963 struct scsi_pkt *pkt; 1964 struct mrsas_header *hdr; 1965 1966 struct mlist_head *pos, *next; 1967 1968 con_log(CL_ANN1, (CE_NOTE, 1969 "mrsas_complete_pending_cmds(): Called")); 1970 1971 mutex_enter(&instance->cmd_pend_mtx); 1972 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) { 1973 cmd = mlist_entry(pos, struct mrsas_cmd, list); 1974 if (cmd) { 1975 pkt = cmd->pkt; 1976 if (pkt) { // for IO 1977 if (((pkt->pkt_flags & FLAG_NOINTR) 1978 == 0) && pkt->pkt_comp) { 1979 pkt->pkt_reason 1980 = CMD_DEV_GONE; 1981 pkt->pkt_statistics 1982 = STAT_DISCON; 1983 con_log(CL_ANN1, (CE_NOTE, 1984 "fail and posting to scsa " 1985 "cmd %p index %x" 1986 " pkt %p " 1987 "time : %llx", 1988 (void *)cmd, cmd->index, 1989 (void *)pkt, gethrtime())); 1990 (*pkt->pkt_comp)(pkt); 1991 } 1992 } else { /* for DCMDS */ 1993 if (cmd->sync_cmd == MRSAS_TRUE) { 1994 hdr = (struct mrsas_header *)&cmd->frame->hdr; 1995 con_log(CL_ANN1, (CE_NOTE, 1996 "posting invalid status to application " 1997 "cmd %p index %x" 1998 " hdr %p " 1999 "time : %llx", 2000 (void *)cmd, cmd->index, 2001 (void *)hdr, gethrtime())); 2002 hdr->cmd_status = MFI_STAT_INVALID_STATUS; 2003 complete_cmd_in_sync_mode(instance, cmd); 2004 } 2005 } 2006 mlist_del_init(&cmd->list); 2007 } else { 2008 con_log(CL_ANN1, (CE_NOTE, 2009 "mrsas_complete_pending_cmds:" 2010 "NULL command\n")); 2011 } 2012 con_log(CL_ANN1, (CE_NOTE, 2013 "mrsas_complete_pending_cmds:" 2014 "looping for more commands\n")); 2015 } 2016 mutex_exit(&instance->cmd_pend_mtx); 2017 2018 con_log(CL_ANN1, (CE_NOTE, "mrsas_complete_pending_cmds(): DONE\n")); 2019 return (DDI_SUCCESS); 2020 } 2021 2022 2023 static int 2024 mrsas_issue_pending_cmds(struct mrsas_instance *instance) 2025 { 2026 mlist_t *head = &instance->cmd_pend_list; 2027 mlist_t *tmp = head->next; 2028 struct mrsas_cmd *cmd = NULL; 2029 struct scsi_pkt *pkt; 2030 2031 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called")); 2032 while (tmp != head) { 2033 mutex_enter(&instance->cmd_pend_mtx); 2034 cmd = mlist_entry(tmp, struct mrsas_cmd, list); 2035 tmp = tmp->next; 2036 mutex_exit(&instance->cmd_pend_mtx); 2037 if (cmd) { 2038 con_log(CL_ANN1, (CE_NOTE, 2039 "mrsas_issue_pending_cmds(): " 2040 "Got a cmd: cmd:%p\n", (void *)cmd)); 2041 cmd->retry_count_for_ocr++; 2042 con_log(CL_ANN1, (CE_NOTE, 2043 "mrsas_issue_pending_cmds(): " 2044 "cmd retry count = %d\n", 2045 cmd->retry_count_for_ocr)); 2046 if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) { 2047 con_log(CL_ANN1, (CE_NOTE, 2048 "mrsas_issue_pending_cmds():" 2049 "Calling Kill Adapter\n")); 2050 (void) mrsas_kill_adapter(instance); 2051 return (DDI_FAILURE); 2052 } 2053 pkt = cmd->pkt; 2054 if (pkt) { 2055 con_log(CL_ANN1, (CE_NOTE, 2056 "PENDING ISSUE: cmd %p index %x " 2057 "pkt %p time %llx", 2058 (void *)cmd, cmd->index, 2059 (void *)pkt, 2060 gethrtime())); 2061 2062 } 2063 if (cmd->sync_cmd == MRSAS_TRUE) { 2064 instance->func_ptr->issue_cmd_in_sync_mode( 2065 instance, cmd); 2066 } else { 2067 instance->func_ptr->issue_cmd(cmd, instance); 2068 } 2069 } else { 2070 con_log(CL_ANN1, (CE_NOTE, 2071 "mrsas_issue_pending_cmds: NULL command\n")); 2072 } 2073 con_log(CL_ANN1, (CE_NOTE, 2074 "mrsas_issue_pending_cmds:" 2075 "looping for more commands")); 2076 } 2077 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): DONE\n")); 2078 return (DDI_SUCCESS); 2079 } 2080 2081 /* 2082 * destroy_mfi_frame_pool 2083 */ 2084 static void 2085 destroy_mfi_frame_pool(struct mrsas_instance *instance) 2086 { 2087 int i; 2088 uint32_t max_cmd = instance->max_fw_cmds; 2089 2090 struct mrsas_cmd *cmd; 2091 2092 /* return all frames to pool */ 2093 for (i = 0; i < max_cmd+1; i++) { 2094 2095 cmd = instance->cmd_list[i]; 2096 2097 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) 2098 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj); 2099 2100 cmd->frame_dma_obj_status = DMA_OBJ_FREED; 2101 } 2102 2103 } 2104 2105 /* 2106 * create_mfi_frame_pool 2107 */ 2108 static int 2109 create_mfi_frame_pool(struct mrsas_instance *instance) 2110 { 2111 int i = 0; 2112 int cookie_cnt; 2113 uint16_t max_cmd; 2114 uint16_t sge_sz; 2115 uint32_t sgl_sz; 2116 uint32_t tot_frame_size; 2117 struct mrsas_cmd *cmd; 2118 2119 max_cmd = instance->max_fw_cmds; 2120 2121 sge_sz = sizeof (struct mrsas_sge_ieee); 2122 2123 /* calculated the number of 64byte frames required for SGL */ 2124 sgl_sz = sge_sz * instance->max_num_sge; 2125 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH; 2126 2127 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: " 2128 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size)); 2129 2130 while (i < max_cmd+1) { 2131 cmd = instance->cmd_list[i]; 2132 2133 cmd->frame_dma_obj.size = tot_frame_size; 2134 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr; 2135 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 2136 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 2137 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1; 2138 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64; 2139 2140 2141 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj, 2142 (uchar_t)DDI_STRUCTURE_LE_ACC); 2143 2144 if (cookie_cnt == -1 || cookie_cnt > 1) { 2145 con_log(CL_ANN, (CE_WARN, 2146 "create_mfi_frame_pool: could not alloc.")); 2147 return (DDI_FAILURE); 2148 } 2149 2150 bzero(cmd->frame_dma_obj.buffer, tot_frame_size); 2151 2152 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED; 2153 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer; 2154 cmd->frame_phys_addr = 2155 cmd->frame_dma_obj.dma_cookie[0].dmac_address; 2156 2157 cmd->sense = (uint8_t *)(((unsigned long) 2158 cmd->frame_dma_obj.buffer) + 2159 tot_frame_size - SENSE_LENGTH); 2160 cmd->sense_phys_addr = 2161 cmd->frame_dma_obj.dma_cookie[0].dmac_address + 2162 tot_frame_size - SENSE_LENGTH; 2163 2164 if (!cmd->frame || !cmd->sense) { 2165 con_log(CL_ANN, (CE_NOTE, 2166 "mr_sas: pci_pool_alloc failed")); 2167 2168 return (ENOMEM); 2169 } 2170 2171 ddi_put32(cmd->frame_dma_obj.acc_handle, 2172 &cmd->frame->io.context, cmd->index); 2173 i++; 2174 2175 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x", 2176 cmd->index, cmd->frame_phys_addr)); 2177 } 2178 2179 return (DDI_SUCCESS); 2180 } 2181 2182 /* 2183 * free_additional_dma_buffer 2184 */ 2185 static void 2186 free_additional_dma_buffer(struct mrsas_instance *instance) 2187 { 2188 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { 2189 (void) mrsas_free_dma_obj(instance, 2190 instance->mfi_internal_dma_obj); 2191 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; 2192 } 2193 2194 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) { 2195 (void) mrsas_free_dma_obj(instance, 2196 instance->mfi_evt_detail_obj); 2197 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; 2198 } 2199 } 2200 2201 /* 2202 * alloc_additional_dma_buffer 2203 */ 2204 static int 2205 alloc_additional_dma_buffer(struct mrsas_instance *instance) 2206 { 2207 uint32_t reply_q_sz; 2208 uint32_t internal_buf_size = PAGESIZE*2; 2209 2210 /* max cmds plus 1 + producer & consumer */ 2211 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2); 2212 2213 instance->mfi_internal_dma_obj.size = internal_buf_size; 2214 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr; 2215 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 2216 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 2217 0xFFFFFFFFU; 2218 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1; 2219 2220 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj, 2221 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 2222 con_log(CL_ANN, (CE_WARN, 2223 "mr_sas: could not alloc reply queue")); 2224 return (DDI_FAILURE); 2225 } 2226 2227 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size); 2228 2229 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED; 2230 2231 instance->producer = (uint32_t *)((unsigned long) 2232 instance->mfi_internal_dma_obj.buffer); 2233 instance->consumer = (uint32_t *)((unsigned long) 2234 instance->mfi_internal_dma_obj.buffer + 4); 2235 instance->reply_queue = (uint32_t *)((unsigned long) 2236 instance->mfi_internal_dma_obj.buffer + 8); 2237 instance->internal_buf = (caddr_t)(((unsigned long) 2238 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8); 2239 instance->internal_buf_dmac_add = 2240 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 2241 (reply_q_sz + 8); 2242 instance->internal_buf_size = internal_buf_size - 2243 (reply_q_sz + 8); 2244 2245 /* allocate evt_detail */ 2246 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail); 2247 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr; 2248 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 2249 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 2250 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1; 2251 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1; 2252 2253 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj, 2254 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 2255 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: " 2256 "could not allocate data transfer buffer.")); 2257 return (DDI_FAILURE); 2258 } 2259 2260 bzero(instance->mfi_evt_detail_obj.buffer, 2261 sizeof (struct mrsas_evt_detail)); 2262 2263 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED; 2264 2265 return (DDI_SUCCESS); 2266 } 2267 2268 /* 2269 * free_space_for_mfi 2270 */ 2271 static void 2272 free_space_for_mfi(struct mrsas_instance *instance) 2273 { 2274 int i; 2275 uint32_t max_cmd = instance->max_fw_cmds; 2276 2277 /* already freed */ 2278 if (instance->cmd_list == NULL) { 2279 return; 2280 } 2281 2282 free_additional_dma_buffer(instance); 2283 2284 /* first free the MFI frame pool */ 2285 destroy_mfi_frame_pool(instance); 2286 2287 /* free all the commands in the cmd_list */ 2288 for (i = 0; i < instance->max_fw_cmds+1; i++) { 2289 kmem_free(instance->cmd_list[i], 2290 sizeof (struct mrsas_cmd)); 2291 2292 instance->cmd_list[i] = NULL; 2293 } 2294 2295 /* free the cmd_list buffer itself */ 2296 kmem_free(instance->cmd_list, 2297 sizeof (struct mrsas_cmd *) * (max_cmd+1)); 2298 2299 instance->cmd_list = NULL; 2300 2301 INIT_LIST_HEAD(&instance->cmd_pool_list); 2302 INIT_LIST_HEAD(&instance->app_cmd_pool_list); 2303 INIT_LIST_HEAD(&instance->cmd_pend_list); 2304 } 2305 2306 /* 2307 * alloc_space_for_mfi 2308 */ 2309 static int 2310 alloc_space_for_mfi(struct mrsas_instance *instance) 2311 { 2312 int i; 2313 uint32_t max_cmd; 2314 uint32_t reserve_cmd; 2315 size_t sz; 2316 2317 struct mrsas_cmd *cmd; 2318 2319 max_cmd = instance->max_fw_cmds; 2320 2321 /* reserve 1 more slot for flush_cache */ 2322 sz = sizeof (struct mrsas_cmd *) * (max_cmd+1); 2323 2324 /* 2325 * instance->cmd_list is an array of struct mrsas_cmd pointers. 2326 * Allocate the dynamic array first and then allocate individual 2327 * commands. 2328 */ 2329 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); 2330 ASSERT(instance->cmd_list); 2331 2332 for (i = 0; i < max_cmd+1; i++) { 2333 instance->cmd_list[i] = kmem_zalloc(sizeof (struct mrsas_cmd), 2334 KM_SLEEP); 2335 ASSERT(instance->cmd_list[i]); 2336 } 2337 2338 INIT_LIST_HEAD(&instance->cmd_pool_list); 2339 INIT_LIST_HEAD(&instance->cmd_pend_list); 2340 /* add all the commands to command pool (instance->cmd_pool) */ 2341 reserve_cmd = APP_RESERVE_CMDS; 2342 INIT_LIST_HEAD(&instance->app_cmd_pool_list); 2343 for (i = 0; i < reserve_cmd-1; i++) { 2344 cmd = instance->cmd_list[i]; 2345 cmd->index = i; 2346 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list); 2347 } 2348 /* 2349 * reserve slot instance->cmd_list[APP_RESERVE_CMDS-1] 2350 * for abort_aen_cmd 2351 */ 2352 for (i = reserve_cmd; i < max_cmd; i++) { 2353 cmd = instance->cmd_list[i]; 2354 cmd->index = i; 2355 mlist_add_tail(&cmd->list, &instance->cmd_pool_list); 2356 } 2357 2358 /* single slot for flush_cache won't be added in command pool */ 2359 cmd = instance->cmd_list[max_cmd]; 2360 cmd->index = i; 2361 2362 /* create a frame pool and assign one frame to each cmd */ 2363 if (create_mfi_frame_pool(instance)) { 2364 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool")); 2365 return (DDI_FAILURE); 2366 } 2367 2368 /* create a frame pool and assign one frame to each cmd */ 2369 if (alloc_additional_dma_buffer(instance)) { 2370 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool")); 2371 return (DDI_FAILURE); 2372 } 2373 2374 return (DDI_SUCCESS); 2375 } 2376 2377 2378 /* 2379 * get_ctrl_info 2380 */ 2381 static int 2382 get_ctrl_info(struct mrsas_instance *instance, 2383 struct mrsas_ctrl_info *ctrl_info) 2384 { 2385 int ret = 0; 2386 2387 struct mrsas_cmd *cmd; 2388 struct mrsas_dcmd_frame *dcmd; 2389 struct mrsas_ctrl_info *ci; 2390 2391 cmd = get_mfi_pkt(instance); 2392 2393 if (!cmd) { 2394 con_log(CL_ANN, (CE_WARN, 2395 "Failed to get a cmd for ctrl info")); 2396 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding, 2397 uint16_t, instance->max_fw_cmds); 2398 return (DDI_FAILURE); 2399 } 2400 cmd->retry_count_for_ocr = 0; 2401 /* Clear the frame buffer and assign back the context id */ 2402 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2403 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2404 cmd->index); 2405 2406 dcmd = &cmd->frame->dcmd; 2407 2408 ci = (struct mrsas_ctrl_info *)instance->internal_buf; 2409 2410 if (!ci) { 2411 con_log(CL_ANN, (CE_WARN, 2412 "Failed to alloc mem for ctrl info")); 2413 return_mfi_pkt(instance, cmd); 2414 return (DDI_FAILURE); 2415 } 2416 2417 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info)); 2418 2419 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */ 2420 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2421 2422 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 2423 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 2424 MFI_CMD_STATUS_POLL_MODE); 2425 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 2426 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 2427 MFI_FRAME_DIR_READ); 2428 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 2429 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 2430 sizeof (struct mrsas_ctrl_info)); 2431 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2432 MR_DCMD_CTRL_GET_INFO); 2433 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 2434 instance->internal_buf_dmac_add); 2435 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 2436 sizeof (struct mrsas_ctrl_info)); 2437 2438 cmd->frame_count = 1; 2439 2440 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2441 ret = 0; 2442 2443 ctrl_info->max_request_size = ddi_get32( 2444 cmd->frame_dma_obj.acc_handle, &ci->max_request_size); 2445 2446 ctrl_info->ld_present_count = ddi_get16( 2447 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count); 2448 2449 /* 2450 * ctrl_info->properties.on_off_properties.disable_online_ctrl_reset = 2451 * ci->properties.on_off_properties.disable_online_ctrl_reset; 2452 */ 2453 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, 2454 (uint8_t *)(ctrl_info->product_name), 2455 (uint8_t *)(ci->product_name), 80 * sizeof (char), 2456 DDI_DEV_AUTOINCR); 2457 /* should get more members of ci with ddi_get when needed */ 2458 } else { 2459 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed")); 2460 ret = -1; 2461 } 2462 2463 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2464 ret = -1; 2465 } 2466 return_mfi_pkt(instance, cmd); 2467 2468 return (ret); 2469 } 2470 2471 /* 2472 * abort_aen_cmd 2473 */ 2474 static int 2475 abort_aen_cmd(struct mrsas_instance *instance, 2476 struct mrsas_cmd *cmd_to_abort) 2477 { 2478 int ret = 0; 2479 2480 struct mrsas_cmd *cmd; 2481 struct mrsas_abort_frame *abort_fr; 2482 2483 cmd = instance->cmd_list[APP_RESERVE_CMDS-1]; 2484 2485 if (!cmd) { 2486 con_log(CL_ANN1, (CE_WARN, 2487 "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd")); 2488 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding, 2489 uint16_t, instance->max_fw_cmds); 2490 return (DDI_FAILURE); 2491 } 2492 cmd->retry_count_for_ocr = 0; 2493 /* Clear the frame buffer and assign back the context id */ 2494 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2495 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2496 cmd->index); 2497 2498 abort_fr = &cmd->frame->abort; 2499 2500 /* prepare and issue the abort frame */ 2501 ddi_put8(cmd->frame_dma_obj.acc_handle, 2502 &abort_fr->cmd, MFI_CMD_OP_ABORT); 2503 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status, 2504 MFI_CMD_STATUS_SYNC_MODE); 2505 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0); 2506 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context, 2507 cmd_to_abort->index); 2508 ddi_put32(cmd->frame_dma_obj.acc_handle, 2509 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr); 2510 ddi_put32(cmd->frame_dma_obj.acc_handle, 2511 &abort_fr->abort_mfi_phys_addr_hi, 0); 2512 2513 instance->aen_cmd->abort_aen = 1; 2514 2515 cmd->sync_cmd = MRSAS_TRUE; 2516 cmd->frame_count = 1; 2517 2518 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2519 con_log(CL_ANN1, (CE_WARN, 2520 "abort_aen_cmd: issue_cmd_in_poll_mode failed")); 2521 ret = -1; 2522 } else { 2523 ret = 0; 2524 } 2525 2526 instance->aen_cmd->abort_aen = 1; 2527 instance->aen_cmd = 0; 2528 2529 atomic_add_16(&instance->fw_outstanding, (-1)); 2530 2531 return (ret); 2532 } 2533 2534 2535 /* 2536 * init_mfi 2537 */ 2538 static int 2539 init_mfi(struct mrsas_instance *instance) 2540 { 2541 struct mrsas_cmd *cmd; 2542 struct mrsas_ctrl_info ctrl_info; 2543 struct mrsas_init_frame *init_frame; 2544 struct mrsas_init_queue_info *initq_info; 2545 2546 /* we expect the FW state to be READY */ 2547 if (mfi_state_transition_to_ready(instance)) { 2548 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready")); 2549 goto fail_ready_state; 2550 } 2551 2552 /* get various operational parameters from status register */ 2553 instance->max_num_sge = 2554 (instance->func_ptr->read_fw_status_reg(instance) & 2555 0xFF0000) >> 0x10; 2556 /* 2557 * Reduce the max supported cmds by 1. This is to ensure that the 2558 * reply_q_sz (1 more than the max cmd that driver may send) 2559 * does not exceed max cmds that the FW can support 2560 */ 2561 instance->max_fw_cmds = 2562 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF; 2563 instance->max_fw_cmds = instance->max_fw_cmds - 1; 2564 2565 instance->max_num_sge = 2566 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ? 2567 MRSAS_MAX_SGE_CNT : instance->max_num_sge; 2568 2569 /* create a pool of commands */ 2570 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) 2571 goto fail_alloc_fw_space; 2572 2573 /* 2574 * Prepare a init frame. Note the init frame points to queue info 2575 * structure. Each frame has SGL allocated after first 64 bytes. For 2576 * this frame - since we don't need any SGL - we use SGL's space as 2577 * queue info structure 2578 */ 2579 cmd = get_mfi_pkt(instance); 2580 cmd->retry_count_for_ocr = 0; 2581 2582 /* Clear the frame buffer and assign back the context id */ 2583 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2584 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2585 cmd->index); 2586 2587 init_frame = (struct mrsas_init_frame *)cmd->frame; 2588 initq_info = (struct mrsas_init_queue_info *) 2589 ((unsigned long)init_frame + 64); 2590 2591 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE); 2592 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info)); 2593 2594 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0); 2595 2596 ddi_put32(cmd->frame_dma_obj.acc_handle, 2597 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1); 2598 2599 ddi_put32(cmd->frame_dma_obj.acc_handle, 2600 &initq_info->producer_index_phys_addr_hi, 0); 2601 ddi_put32(cmd->frame_dma_obj.acc_handle, 2602 &initq_info->producer_index_phys_addr_lo, 2603 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address); 2604 2605 ddi_put32(cmd->frame_dma_obj.acc_handle, 2606 &initq_info->consumer_index_phys_addr_hi, 0); 2607 ddi_put32(cmd->frame_dma_obj.acc_handle, 2608 &initq_info->consumer_index_phys_addr_lo, 2609 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4); 2610 2611 ddi_put32(cmd->frame_dma_obj.acc_handle, 2612 &initq_info->reply_queue_start_phys_addr_hi, 0); 2613 ddi_put32(cmd->frame_dma_obj.acc_handle, 2614 &initq_info->reply_queue_start_phys_addr_lo, 2615 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8); 2616 2617 ddi_put8(cmd->frame_dma_obj.acc_handle, 2618 &init_frame->cmd, MFI_CMD_OP_INIT); 2619 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status, 2620 MFI_CMD_STATUS_POLL_MODE); 2621 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0); 2622 ddi_put32(cmd->frame_dma_obj.acc_handle, 2623 &init_frame->queue_info_new_phys_addr_lo, 2624 cmd->frame_phys_addr + 64); 2625 ddi_put32(cmd->frame_dma_obj.acc_handle, 2626 &init_frame->queue_info_new_phys_addr_hi, 0); 2627 2628 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len, 2629 sizeof (struct mrsas_init_queue_info)); 2630 2631 cmd->frame_count = 1; 2632 2633 /* issue the init frame in polled mode */ 2634 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2635 con_log(CL_ANN, (CE_WARN, "failed to init firmware")); 2636 return_mfi_pkt(instance, cmd); 2637 goto fail_fw_init; 2638 } 2639 2640 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2641 return_mfi_pkt(instance, cmd); 2642 goto fail_fw_init; 2643 } 2644 return_mfi_pkt(instance, cmd); 2645 2646 if (ctio_enable && 2647 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) { 2648 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported")); 2649 instance->flag_ieee = 1; 2650 } else { 2651 instance->flag_ieee = 0; 2652 } 2653 2654 instance->disable_online_ctrl_reset = 0; 2655 /* gather misc FW related information */ 2656 if (!get_ctrl_info(instance, &ctrl_info)) { 2657 instance->max_sectors_per_req = ctrl_info.max_request_size; 2658 con_log(CL_ANN1, (CE_NOTE, 2659 "product name %s ld present %d", 2660 ctrl_info.product_name, ctrl_info.ld_present_count)); 2661 } else { 2662 instance->max_sectors_per_req = instance->max_num_sge * 2663 PAGESIZE / 512; 2664 } 2665 /* 2666 * instance->disable_online_ctrl_reset = 2667 * ctrl_info.properties.on_off_properties.disable_online_ctrl_reset; 2668 */ 2669 return (DDI_SUCCESS); 2670 2671 fail_fw_init: 2672 fail_alloc_fw_space: 2673 2674 free_space_for_mfi(instance); 2675 2676 fail_ready_state: 2677 ddi_regs_map_free(&instance->regmap_handle); 2678 2679 fail_mfi_reg_setup: 2680 return (DDI_FAILURE); 2681 } 2682 2683 2684 2685 2686 2687 2688 static int 2689 mrsas_issue_init_mfi(struct mrsas_instance *instance) 2690 { 2691 struct mrsas_cmd *cmd; 2692 struct mrsas_init_frame *init_frame; 2693 struct mrsas_init_queue_info *initq_info; 2694 2695 /* 2696 * Prepare a init frame. Note the init frame points to queue info 2697 * structure. Each frame has SGL allocated after first 64 bytes. For 2698 * this frame - since we don't need any SGL - we use SGL's space as 2699 * queue info structure 2700 */ 2701 con_log(CL_ANN1, (CE_NOTE, 2702 "mrsas_issue_init_mfi: entry\n")); 2703 cmd = get_mfi_app_pkt(instance); 2704 2705 if (!cmd) { 2706 con_log(CL_ANN1, (CE_NOTE, 2707 "mrsas_issue_init_mfi: get_pkt failed\n")); 2708 return (DDI_FAILURE); 2709 } 2710 2711 /* Clear the frame buffer and assign back the context id */ 2712 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2713 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2714 cmd->index); 2715 2716 init_frame = (struct mrsas_init_frame *)cmd->frame; 2717 initq_info = (struct mrsas_init_queue_info *) 2718 ((unsigned long)init_frame + 64); 2719 2720 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE); 2721 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info)); 2722 2723 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0); 2724 2725 ddi_put32(cmd->frame_dma_obj.acc_handle, 2726 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1); 2727 ddi_put32(cmd->frame_dma_obj.acc_handle, 2728 &initq_info->producer_index_phys_addr_hi, 0); 2729 ddi_put32(cmd->frame_dma_obj.acc_handle, 2730 &initq_info->producer_index_phys_addr_lo, 2731 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address); 2732 ddi_put32(cmd->frame_dma_obj.acc_handle, 2733 &initq_info->consumer_index_phys_addr_hi, 0); 2734 ddi_put32(cmd->frame_dma_obj.acc_handle, 2735 &initq_info->consumer_index_phys_addr_lo, 2736 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4); 2737 2738 ddi_put32(cmd->frame_dma_obj.acc_handle, 2739 &initq_info->reply_queue_start_phys_addr_hi, 0); 2740 ddi_put32(cmd->frame_dma_obj.acc_handle, 2741 &initq_info->reply_queue_start_phys_addr_lo, 2742 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8); 2743 2744 ddi_put8(cmd->frame_dma_obj.acc_handle, 2745 &init_frame->cmd, MFI_CMD_OP_INIT); 2746 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status, 2747 MFI_CMD_STATUS_POLL_MODE); 2748 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0); 2749 ddi_put32(cmd->frame_dma_obj.acc_handle, 2750 &init_frame->queue_info_new_phys_addr_lo, 2751 cmd->frame_phys_addr + 64); 2752 ddi_put32(cmd->frame_dma_obj.acc_handle, 2753 &init_frame->queue_info_new_phys_addr_hi, 0); 2754 2755 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len, 2756 sizeof (struct mrsas_init_queue_info)); 2757 2758 cmd->frame_count = 1; 2759 2760 /* issue the init frame in polled mode */ 2761 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2762 con_log(CL_ANN1, (CE_WARN, 2763 "mrsas_issue_init_mfi():failed to " 2764 "init firmware")); 2765 return_mfi_app_pkt(instance, cmd); 2766 return (DDI_FAILURE); 2767 } 2768 return_mfi_app_pkt(instance, cmd); 2769 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_init_mfi: Done")); 2770 return (DDI_SUCCESS); 2771 } 2772 /* 2773 * mfi_state_transition_to_ready : Move the FW to READY state 2774 * 2775 * @reg_set : MFI register set 2776 */ 2777 static int 2778 mfi_state_transition_to_ready(struct mrsas_instance *instance) 2779 { 2780 int i; 2781 uint8_t max_wait; 2782 uint32_t fw_ctrl; 2783 uint32_t fw_state; 2784 uint32_t cur_state; 2785 uint32_t cur_abs_reg_val; 2786 uint32_t prev_abs_reg_val; 2787 2788 cur_abs_reg_val = 2789 instance->func_ptr->read_fw_status_reg(instance); 2790 fw_state = 2791 cur_abs_reg_val & MFI_STATE_MASK; 2792 con_log(CL_ANN1, (CE_NOTE, 2793 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state)); 2794 2795 while (fw_state != MFI_STATE_READY) { 2796 con_log(CL_ANN, (CE_NOTE, 2797 "mfi_state_transition_to_ready:FW state%x", fw_state)); 2798 2799 switch (fw_state) { 2800 case MFI_STATE_FAULT: 2801 con_log(CL_ANN1, (CE_NOTE, 2802 "mr_sas: FW in FAULT state!!")); 2803 2804 return (ENODEV); 2805 case MFI_STATE_WAIT_HANDSHAKE: 2806 /* set the CLR bit in IMR0 */ 2807 con_log(CL_ANN1, (CE_NOTE, 2808 "mr_sas: FW waiting for HANDSHAKE")); 2809 /* 2810 * PCI_Hot Plug: MFI F/W requires 2811 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2812 * to be set 2813 */ 2814 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */ 2815 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE | 2816 MFI_INIT_HOTPLUG, instance); 2817 2818 max_wait = 2; 2819 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2820 break; 2821 case MFI_STATE_BOOT_MESSAGE_PENDING: 2822 /* set the CLR bit in IMR0 */ 2823 con_log(CL_ANN1, (CE_NOTE, 2824 "mr_sas: FW state boot message pending")); 2825 /* 2826 * PCI_Hot Plug: MFI F/W requires 2827 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2828 * to be set 2829 */ 2830 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance); 2831 2832 max_wait = 10; 2833 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2834 break; 2835 case MFI_STATE_OPERATIONAL: 2836 /* bring it to READY state; assuming max wait 2 secs */ 2837 instance->func_ptr->disable_intr(instance); 2838 con_log(CL_ANN1, (CE_NOTE, 2839 "mr_sas: FW in OPERATIONAL state")); 2840 /* 2841 * PCI_Hot Plug: MFI F/W requires 2842 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT) 2843 * to be set 2844 */ 2845 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */ 2846 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance); 2847 2848 max_wait = 10; 2849 cur_state = MFI_STATE_OPERATIONAL; 2850 break; 2851 case MFI_STATE_UNDEFINED: 2852 /* this state should not last for more than 2 seconds */ 2853 con_log(CL_ANN1, (CE_NOTE, "FW state undefined")); 2854 2855 max_wait = 2; 2856 cur_state = MFI_STATE_UNDEFINED; 2857 break; 2858 case MFI_STATE_BB_INIT: 2859 max_wait = 2; 2860 cur_state = MFI_STATE_BB_INIT; 2861 break; 2862 case MFI_STATE_FW_INIT: 2863 max_wait = 2; 2864 cur_state = MFI_STATE_FW_INIT; 2865 break; 2866 case MFI_STATE_DEVICE_SCAN: 2867 max_wait = 180; 2868 cur_state = MFI_STATE_DEVICE_SCAN; 2869 prev_abs_reg_val = cur_abs_reg_val; 2870 con_log(CL_NONE, (CE_NOTE, 2871 "Device scan in progress ...\n")); 2872 break; 2873 default: 2874 con_log(CL_ANN1, (CE_NOTE, 2875 "mr_sas: Unknown state 0x%x", fw_state)); 2876 return (ENODEV); 2877 } 2878 2879 /* the cur_state should not last for more than max_wait secs */ 2880 for (i = 0; i < (max_wait * MILLISEC); i++) { 2881 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */ 2882 cur_abs_reg_val = 2883 instance->func_ptr->read_fw_status_reg(instance); 2884 fw_state = cur_abs_reg_val & MFI_STATE_MASK; 2885 2886 if (fw_state == cur_state) { 2887 delay(1 * drv_usectohz(MILLISEC)); 2888 } else { 2889 break; 2890 } 2891 } 2892 if (fw_state == MFI_STATE_DEVICE_SCAN) { 2893 if (prev_abs_reg_val != cur_abs_reg_val) { 2894 continue; 2895 } 2896 } 2897 2898 /* return error if fw_state hasn't changed after max_wait */ 2899 if (fw_state == cur_state) { 2900 con_log(CL_ANN1, (CE_NOTE, 2901 "FW state hasn't changed in %d secs", max_wait)); 2902 return (ENODEV); 2903 } 2904 }; 2905 2906 fw_ctrl = RD_IB_DOORBELL(instance); 2907 2908 con_log(CL_ANN1, (CE_NOTE, 2909 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl)); 2910 2911 /* 2912 * Write 0xF to the doorbell register to do the following. 2913 * - Abort all outstanding commands (bit 0). 2914 * - Transition from OPERATIONAL to READY state (bit 1). 2915 * - Discard (possible) low MFA posted in 64-bit mode (bit-2). 2916 * - Set to release FW to continue running (i.e. BIOS handshake 2917 * (bit 3). 2918 */ 2919 WR_IB_DOORBELL(0xF, instance); 2920 2921 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 2922 return (ENODEV); 2923 } 2924 return (DDI_SUCCESS); 2925 } 2926 2927 /* 2928 * get_seq_num 2929 */ 2930 static int 2931 get_seq_num(struct mrsas_instance *instance, 2932 struct mrsas_evt_log_info *eli) 2933 { 2934 int ret = DDI_SUCCESS; 2935 2936 dma_obj_t dcmd_dma_obj; 2937 struct mrsas_cmd *cmd; 2938 struct mrsas_dcmd_frame *dcmd; 2939 struct mrsas_evt_log_info *eli_tmp; 2940 cmd = get_mfi_pkt(instance); 2941 2942 if (!cmd) { 2943 cmn_err(CE_WARN, "mr_sas: failed to get a cmd"); 2944 DTRACE_PROBE2(seq_num_mfi_err, uint16_t, 2945 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 2946 return (ENOMEM); 2947 } 2948 cmd->retry_count_for_ocr = 0; 2949 /* Clear the frame buffer and assign back the context id */ 2950 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2951 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2952 cmd->index); 2953 2954 dcmd = &cmd->frame->dcmd; 2955 2956 /* allocate the data transfer buffer */ 2957 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info); 2958 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; 2959 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 2960 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 2961 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 2962 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 2963 2964 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, 2965 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 2966 con_log(CL_ANN, (CE_WARN, 2967 "get_seq_num: could not allocate data transfer buffer.")); 2968 return (DDI_FAILURE); 2969 } 2970 2971 (void) memset(dcmd_dma_obj.buffer, 0, 2972 sizeof (struct mrsas_evt_log_info)); 2973 2974 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2975 2976 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 2977 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0); 2978 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 2979 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 2980 MFI_FRAME_DIR_READ); 2981 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 2982 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 2983 sizeof (struct mrsas_evt_log_info)); 2984 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2985 MR_DCMD_CTRL_EVENT_GET_INFO); 2986 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 2987 sizeof (struct mrsas_evt_log_info)); 2988 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 2989 dcmd_dma_obj.dma_cookie[0].dmac_address); 2990 2991 cmd->sync_cmd = MRSAS_TRUE; 2992 cmd->frame_count = 1; 2993 2994 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2995 cmn_err(CE_WARN, "get_seq_num: " 2996 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO"); 2997 ret = DDI_FAILURE; 2998 } else { 2999 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer; 3000 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle, 3001 &eli_tmp->newest_seq_num); 3002 ret = DDI_SUCCESS; 3003 } 3004 3005 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 3006 ret = DDI_FAILURE; 3007 3008 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 3009 ret = DDI_FAILURE; 3010 } 3011 3012 return_mfi_pkt(instance, cmd); 3013 3014 return (ret); 3015 } 3016 3017 /* 3018 * start_mfi_aen 3019 */ 3020 static int 3021 start_mfi_aen(struct mrsas_instance *instance) 3022 { 3023 int ret = 0; 3024 3025 struct mrsas_evt_log_info eli; 3026 union mrsas_evt_class_locale class_locale; 3027 3028 /* get the latest sequence number from FW */ 3029 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info)); 3030 3031 if (get_seq_num(instance, &eli)) { 3032 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num"); 3033 return (-1); 3034 } 3035 3036 /* register AEN with FW for latest sequence number plus 1 */ 3037 class_locale.members.reserved = 0; 3038 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL); 3039 class_locale.members.class = MR_EVT_CLASS_INFO; 3040 class_locale.word = LE_32(class_locale.word); 3041 ret = register_mfi_aen(instance, eli.newest_seq_num + 1, 3042 class_locale.word); 3043 3044 if (ret) { 3045 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed"); 3046 return (-1); 3047 } 3048 3049 return (ret); 3050 } 3051 3052 /* 3053 * flush_cache 3054 */ 3055 static void 3056 flush_cache(struct mrsas_instance *instance) 3057 { 3058 struct mrsas_cmd *cmd = NULL; 3059 struct mrsas_dcmd_frame *dcmd; 3060 uint32_t max_cmd = instance->max_fw_cmds; 3061 3062 cmd = instance->cmd_list[max_cmd]; 3063 3064 if (!cmd) { 3065 con_log(CL_ANN1, (CE_WARN, 3066 "flush_cache():Failed to get a cmd for flush_cache")); 3067 DTRACE_PROBE2(flush_cache_err, uint16_t, 3068 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 3069 return; 3070 } 3071 dcmd = &cmd->frame->dcmd; 3072 3073 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 3074 3075 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 3076 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0); 3077 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0); 3078 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 3079 MFI_FRAME_DIR_NONE); 3080 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 3081 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0); 3082 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 3083 MR_DCMD_CTRL_CACHE_FLUSH); 3084 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0], 3085 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE); 3086 3087 cmd->frame_count = 1; 3088 3089 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 3090 con_log(CL_ANN1, (CE_WARN, 3091 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH")); 3092 } 3093 con_log(CL_ANN1, (CE_NOTE, "flush_cache done")); 3094 } 3095 3096 /* 3097 * service_mfi_aen- Completes an AEN command 3098 * @instance: Adapter soft state 3099 * @cmd: Command to be completed 3100 * 3101 */ 3102 static void 3103 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 3104 { 3105 uint32_t seq_num; 3106 struct mrsas_evt_detail *evt_detail = 3107 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer; 3108 int rval = 0; 3109 int tgt = 0; 3110 ddi_acc_handle_t acc_handle; 3111 3112 acc_handle = cmd->frame_dma_obj.acc_handle; 3113 3114 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status); 3115 3116 if (cmd->cmd_status == ENODATA) { 3117 cmd->cmd_status = 0; 3118 } 3119 3120 /* 3121 * log the MFI AEN event to the sysevent queue so that 3122 * application will get noticed 3123 */ 3124 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS", 3125 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) { 3126 int instance_no = ddi_get_instance(instance->dip); 3127 con_log(CL_ANN, (CE_WARN, 3128 "mr_sas%d: Failed to log AEN event", instance_no)); 3129 } 3130 /* 3131 * Check for any ld devices that has changed state. i.e. online 3132 * or offline. 3133 */ 3134 con_log(CL_ANN1, (CE_NOTE, 3135 "AEN: code = %x class = %x locale = %x args = %x", 3136 ddi_get32(acc_handle, &evt_detail->code), 3137 evt_detail->cl.members.class, 3138 ddi_get16(acc_handle, &evt_detail->cl.members.locale), 3139 ddi_get8(acc_handle, &evt_detail->arg_type))); 3140 3141 switch (ddi_get32(acc_handle, &evt_detail->code)) { 3142 case MR_EVT_CFG_CLEARED: { 3143 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { 3144 if (instance->mr_ld_list[tgt].dip != NULL) { 3145 rval = mrsas_service_evt(instance, tgt, 0, 3146 MRSAS_EVT_UNCONFIG_TGT, NULL); 3147 con_log(CL_ANN1, (CE_WARN, 3148 "mr_sas: CFG CLEARED AEN rval = %d " 3149 "tgt id = %d", rval, tgt)); 3150 } 3151 } 3152 break; 3153 } 3154 3155 case MR_EVT_LD_DELETED: { 3156 rval = mrsas_service_evt(instance, 3157 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, 3158 MRSAS_EVT_UNCONFIG_TGT, NULL); 3159 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d " 3160 "tgt id = %d index = %d", rval, 3161 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 3162 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); 3163 break; 3164 } /* End of MR_EVT_LD_DELETED */ 3165 3166 case MR_EVT_LD_CREATED: { 3167 rval = mrsas_service_evt(instance, 3168 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, 3169 MRSAS_EVT_CONFIG_TGT, NULL); 3170 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d " 3171 "tgt id = %d index = %d", rval, 3172 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 3173 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); 3174 break; 3175 } /* End of MR_EVT_LD_CREATED */ 3176 } /* End of Main Switch */ 3177 3178 /* get copy of seq_num and class/locale for re-registration */ 3179 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num); 3180 seq_num++; 3181 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 3182 sizeof (struct mrsas_evt_detail)); 3183 3184 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0); 3185 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num); 3186 3187 instance->aen_seq_num = seq_num; 3188 3189 cmd->frame_count = 1; 3190 3191 /* Issue the aen registration frame */ 3192 instance->func_ptr->issue_cmd(cmd, instance); 3193 } 3194 3195 /* 3196 * complete_cmd_in_sync_mode - Completes an internal command 3197 * @instance: Adapter soft state 3198 * @cmd: Command to be completed 3199 * 3200 * The issue_cmd_in_sync_mode() function waits for a command to complete 3201 * after it issues a command. This function wakes up that waiting routine by 3202 * calling wake_up() on the wait queue. 3203 */ 3204 static void 3205 complete_cmd_in_sync_mode(struct mrsas_instance *instance, 3206 struct mrsas_cmd *cmd) 3207 { 3208 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle, 3209 &cmd->frame->io.cmd_status); 3210 3211 cmd->sync_cmd = MRSAS_FALSE; 3212 3213 if (cmd->cmd_status == ENODATA) { 3214 cmd->cmd_status = 0; 3215 } 3216 3217 con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n", 3218 (void *)cmd)); 3219 3220 cv_broadcast(&instance->int_cmd_cv); 3221 } 3222 3223 /* 3224 * Call this function inside mrsas_softintr. 3225 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty 3226 * @instance: Adapter soft state 3227 */ 3228 3229 static uint32_t 3230 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance) 3231 { 3232 uint32_t cur_abs_reg_val; 3233 uint32_t fw_state; 3234 3235 cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance); 3236 fw_state = cur_abs_reg_val & MFI_STATE_MASK; 3237 if (fw_state == MFI_STATE_FAULT) { 3238 3239 if (instance->disable_online_ctrl_reset == 1) { 3240 con_log(CL_ANN1, (CE_NOTE, 3241 "mrsas_initiate_ocr_if_fw_is_faulty: " 3242 "FW in Fault state, detected in ISR: " 3243 "FW doesn't support ocr ")); 3244 return (ADAPTER_RESET_NOT_REQUIRED); 3245 } else { 3246 con_log(CL_ANN1, (CE_NOTE, 3247 "mrsas_initiate_ocr_if_fw_is_faulty: " 3248 "FW in Fault state, detected in ISR: FW supports ocr ")); 3249 return (ADAPTER_RESET_REQUIRED); 3250 } 3251 } 3252 return (ADAPTER_RESET_NOT_REQUIRED); 3253 } 3254 3255 /* 3256 * mrsas_softintr - The Software ISR 3257 * @param arg : HBA soft state 3258 * 3259 * called from high-level interrupt if hi-level interrupt are not there, 3260 * otherwise triggered as a soft interrupt 3261 */ 3262 static uint_t 3263 mrsas_softintr(struct mrsas_instance *instance) 3264 { 3265 struct scsi_pkt *pkt; 3266 struct scsa_cmd *acmd; 3267 struct mrsas_cmd *cmd; 3268 struct mlist_head *pos, *next; 3269 mlist_t process_list; 3270 struct mrsas_header *hdr; 3271 struct scsi_arq_status *arqstat; 3272 3273 con_log(CL_ANN1, (CE_CONT, "mrsas_softintr called")); 3274 3275 ASSERT(instance); 3276 3277 mutex_enter(&instance->completed_pool_mtx); 3278 3279 if (mlist_empty(&instance->completed_pool_list)) { 3280 mutex_exit(&instance->completed_pool_mtx); 3281 return (DDI_INTR_CLAIMED); 3282 } 3283 3284 instance->softint_running = 1; 3285 3286 INIT_LIST_HEAD(&process_list); 3287 mlist_splice(&instance->completed_pool_list, &process_list); 3288 INIT_LIST_HEAD(&instance->completed_pool_list); 3289 3290 mutex_exit(&instance->completed_pool_mtx); 3291 3292 /* perform all callbacks first, before releasing the SCBs */ 3293 mlist_for_each_safe(pos, next, &process_list) { 3294 cmd = mlist_entry(pos, struct mrsas_cmd, list); 3295 3296 /* syncronize the Cmd frame for the controller */ 3297 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 3298 0, 0, DDI_DMA_SYNC_FORCPU); 3299 3300 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 3301 DDI_SUCCESS) { 3302 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 3303 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3304 con_log(CL_ANN1, (CE_WARN, 3305 "mrsas_softintr: " 3306 "FMA check reports DMA handle failure")); 3307 return (DDI_INTR_CLAIMED); 3308 } 3309 3310 hdr = &cmd->frame->hdr; 3311 3312 /* remove the internal command from the process list */ 3313 mlist_del_init(&cmd->list); 3314 3315 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) { 3316 case MFI_CMD_OP_PD_SCSI: 3317 case MFI_CMD_OP_LD_SCSI: 3318 case MFI_CMD_OP_LD_READ: 3319 case MFI_CMD_OP_LD_WRITE: 3320 /* 3321 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI 3322 * could have been issued either through an 3323 * IO path or an IOCTL path. If it was via IOCTL, 3324 * we will send it to internal completion. 3325 */ 3326 if (cmd->sync_cmd == MRSAS_TRUE) { 3327 complete_cmd_in_sync_mode(instance, cmd); 3328 break; 3329 } 3330 3331 /* regular commands */ 3332 acmd = cmd->cmd; 3333 pkt = CMD2PKT(acmd); 3334 3335 if (acmd->cmd_flags & CFLAG_DMAVALID) { 3336 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3337 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3338 acmd->cmd_dma_offset, 3339 acmd->cmd_dma_len, 3340 DDI_DMA_SYNC_FORCPU); 3341 } 3342 } 3343 3344 pkt->pkt_reason = CMD_CMPLT; 3345 pkt->pkt_statistics = 0; 3346 pkt->pkt_state = STATE_GOT_BUS 3347 | STATE_GOT_TARGET | STATE_SENT_CMD 3348 | STATE_XFERRED_DATA | STATE_GOT_STATUS; 3349 3350 con_log(CL_ANN1, (CE_CONT, 3351 "CDB[0] = %x completed for %s: size %lx context %x", 3352 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"), 3353 acmd->cmd_dmacount, hdr->context)); 3354 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0], 3355 uint_t, acmd->cmd_cdblen, ulong_t, 3356 acmd->cmd_dmacount); 3357 3358 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) { 3359 struct scsi_inquiry *inq; 3360 3361 if (acmd->cmd_dmacount != 0) { 3362 bp_mapin(acmd->cmd_buf); 3363 inq = (struct scsi_inquiry *) 3364 acmd->cmd_buf->b_un.b_addr; 3365 3366 /* don't expose physical drives to OS */ 3367 if (acmd->islogical && 3368 (hdr->cmd_status == MFI_STAT_OK)) { 3369 display_scsi_inquiry( 3370 (caddr_t)inq); 3371 } else if ((hdr->cmd_status == 3372 MFI_STAT_OK) && inq->inq_dtype == 3373 DTYPE_DIRECT) { 3374 3375 display_scsi_inquiry( 3376 (caddr_t)inq); 3377 3378 /* for physical disk */ 3379 hdr->cmd_status = 3380 MFI_STAT_DEVICE_NOT_FOUND; 3381 } 3382 } 3383 } 3384 3385 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd, 3386 uint8_t, hdr->cmd_status); 3387 3388 switch (hdr->cmd_status) { 3389 case MFI_STAT_OK: 3390 pkt->pkt_scbp[0] = STATUS_GOOD; 3391 break; 3392 case MFI_STAT_LD_CC_IN_PROGRESS: 3393 case MFI_STAT_LD_RECON_IN_PROGRESS: 3394 pkt->pkt_scbp[0] = STATUS_GOOD; 3395 break; 3396 case MFI_STAT_LD_INIT_IN_PROGRESS: 3397 con_log(CL_ANN, 3398 (CE_WARN, "Initialization in Progress")); 3399 pkt->pkt_reason = CMD_TRAN_ERR; 3400 3401 break; 3402 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3403 con_log(CL_ANN1, (CE_CONT, "scsi_done error")); 3404 3405 pkt->pkt_reason = CMD_CMPLT; 3406 ((struct scsi_status *) 3407 pkt->pkt_scbp)->sts_chk = 1; 3408 3409 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) { 3410 3411 con_log(CL_ANN, 3412 (CE_WARN, "TEST_UNIT_READY fail")); 3413 3414 } else { 3415 pkt->pkt_state |= STATE_ARQ_DONE; 3416 arqstat = (void *)(pkt->pkt_scbp); 3417 arqstat->sts_rqpkt_reason = CMD_CMPLT; 3418 arqstat->sts_rqpkt_resid = 0; 3419 arqstat->sts_rqpkt_state |= 3420 STATE_GOT_BUS | STATE_GOT_TARGET 3421 | STATE_SENT_CMD 3422 | STATE_XFERRED_DATA; 3423 *(uint8_t *)&arqstat->sts_rqpkt_status = 3424 STATUS_GOOD; 3425 ddi_rep_get8( 3426 cmd->frame_dma_obj.acc_handle, 3427 (uint8_t *) 3428 &(arqstat->sts_sensedata), 3429 cmd->sense, 3430 acmd->cmd_scblen - 3431 offsetof(struct scsi_arq_status, 3432 sts_sensedata), DDI_DEV_AUTOINCR); 3433 } 3434 break; 3435 case MFI_STAT_LD_OFFLINE: 3436 case MFI_STAT_DEVICE_NOT_FOUND: 3437 con_log(CL_ANN1, (CE_CONT, 3438 "mrsas_softintr:device not found error")); 3439 pkt->pkt_reason = CMD_DEV_GONE; 3440 pkt->pkt_statistics = STAT_DISCON; 3441 break; 3442 case MFI_STAT_LD_LBA_OUT_OF_RANGE: 3443 pkt->pkt_state |= STATE_ARQ_DONE; 3444 pkt->pkt_reason = CMD_CMPLT; 3445 ((struct scsi_status *) 3446 pkt->pkt_scbp)->sts_chk = 1; 3447 3448 arqstat = (void *)(pkt->pkt_scbp); 3449 arqstat->sts_rqpkt_reason = CMD_CMPLT; 3450 arqstat->sts_rqpkt_resid = 0; 3451 arqstat->sts_rqpkt_state |= STATE_GOT_BUS 3452 | STATE_GOT_TARGET | STATE_SENT_CMD 3453 | STATE_XFERRED_DATA; 3454 *(uint8_t *)&arqstat->sts_rqpkt_status = 3455 STATUS_GOOD; 3456 3457 arqstat->sts_sensedata.es_valid = 1; 3458 arqstat->sts_sensedata.es_key = 3459 KEY_ILLEGAL_REQUEST; 3460 arqstat->sts_sensedata.es_class = 3461 CLASS_EXTENDED_SENSE; 3462 3463 /* 3464 * LOGICAL BLOCK ADDRESS OUT OF RANGE: 3465 * ASC: 0x21h; ASCQ: 0x00h; 3466 */ 3467 arqstat->sts_sensedata.es_add_code = 0x21; 3468 arqstat->sts_sensedata.es_qual_code = 0x00; 3469 3470 break; 3471 3472 default: 3473 con_log(CL_ANN, (CE_CONT, "Unknown status!")); 3474 pkt->pkt_reason = CMD_TRAN_ERR; 3475 3476 break; 3477 } 3478 3479 atomic_add_16(&instance->fw_outstanding, (-1)); 3480 3481 (void) mrsas_common_check(instance, cmd); 3482 3483 if (acmd->cmd_dmahandle) { 3484 if (mrsas_check_dma_handle( 3485 acmd->cmd_dmahandle) != DDI_SUCCESS) { 3486 ddi_fm_service_impact(instance->dip, 3487 DDI_SERVICE_UNAFFECTED); 3488 pkt->pkt_reason = CMD_TRAN_ERR; 3489 pkt->pkt_statistics = 0; 3490 } 3491 } 3492 3493 /* Call the callback routine */ 3494 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 3495 pkt->pkt_comp) { 3496 3497 con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr: " 3498 "posting to scsa cmd %p index %x pkt %p " 3499 "time %llx", (void *)cmd, cmd->index, 3500 (void *)pkt, gethrtime())); 3501 (*pkt->pkt_comp)(pkt); 3502 3503 } 3504 return_mfi_pkt(instance, cmd); 3505 break; 3506 case MFI_CMD_OP_SMP: 3507 case MFI_CMD_OP_STP: 3508 complete_cmd_in_sync_mode(instance, cmd); 3509 break; 3510 case MFI_CMD_OP_DCMD: 3511 /* see if got an event notification */ 3512 if (ddi_get32(cmd->frame_dma_obj.acc_handle, 3513 &cmd->frame->dcmd.opcode) == 3514 MR_DCMD_CTRL_EVENT_WAIT) { 3515 if ((instance->aen_cmd == cmd) && 3516 (instance->aen_cmd->abort_aen)) { 3517 con_log(CL_ANN, (CE_WARN, 3518 "mrsas_softintr: " 3519 "aborted_aen returned")); 3520 } else { 3521 atomic_add_16(&instance->fw_outstanding, 3522 (-1)); 3523 service_mfi_aen(instance, cmd); 3524 } 3525 } else { 3526 complete_cmd_in_sync_mode(instance, cmd); 3527 } 3528 3529 break; 3530 case MFI_CMD_OP_ABORT: 3531 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete")); 3532 /* 3533 * MFI_CMD_OP_ABORT successfully completed 3534 * in the synchronous mode 3535 */ 3536 complete_cmd_in_sync_mode(instance, cmd); 3537 break; 3538 default: 3539 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 3540 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3541 3542 if (cmd->pkt != NULL) { 3543 pkt = cmd->pkt; 3544 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 3545 pkt->pkt_comp) { 3546 3547 con_log(CL_ANN1, (CE_CONT, "posting to " 3548 "scsa cmd %p index %x pkt %p" 3549 "time %llx, default ", (void *)cmd, 3550 cmd->index, (void *)pkt, 3551 gethrtime())); 3552 3553 (*pkt->pkt_comp)(pkt); 3554 3555 } 3556 } 3557 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !")); 3558 break; 3559 } 3560 } 3561 3562 instance->softint_running = 0; 3563 3564 return (DDI_INTR_CLAIMED); 3565 } 3566 3567 /* 3568 * mrsas_alloc_dma_obj 3569 * 3570 * Allocate the memory and other resources for an dma object. 3571 */ 3572 static int 3573 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj, 3574 uchar_t endian_flags) 3575 { 3576 int i; 3577 size_t alen = 0; 3578 uint_t cookie_cnt; 3579 struct ddi_device_acc_attr tmp_endian_attr; 3580 3581 tmp_endian_attr = endian_attr; 3582 tmp_endian_attr.devacc_attr_endian_flags = endian_flags; 3583 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 3584 3585 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr, 3586 DDI_DMA_SLEEP, NULL, &obj->dma_handle); 3587 if (i != DDI_SUCCESS) { 3588 3589 switch (i) { 3590 case DDI_DMA_BADATTR : 3591 con_log(CL_ANN, (CE_WARN, 3592 "Failed ddi_dma_alloc_handle- Bad attribute")); 3593 break; 3594 case DDI_DMA_NORESOURCES : 3595 con_log(CL_ANN, (CE_WARN, 3596 "Failed ddi_dma_alloc_handle- No Resources")); 3597 break; 3598 default : 3599 con_log(CL_ANN, (CE_WARN, 3600 "Failed ddi_dma_alloc_handle: " 3601 "unknown status %d", i)); 3602 break; 3603 } 3604 3605 return (-1); 3606 } 3607 3608 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr, 3609 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 3610 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) || 3611 alen < obj->size) { 3612 3613 ddi_dma_free_handle(&obj->dma_handle); 3614 3615 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc")); 3616 3617 return (-1); 3618 } 3619 3620 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer, 3621 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 3622 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) { 3623 3624 ddi_dma_mem_free(&obj->acc_handle); 3625 ddi_dma_free_handle(&obj->dma_handle); 3626 3627 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle")); 3628 3629 return (-1); 3630 } 3631 3632 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) { 3633 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3634 return (-1); 3635 } 3636 3637 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) { 3638 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3639 return (-1); 3640 } 3641 3642 return (cookie_cnt); 3643 } 3644 3645 /* 3646 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t) 3647 * 3648 * De-allocate the memory and other resources for an dma object, which must 3649 * have been alloated by a previous call to mrsas_alloc_dma_obj() 3650 */ 3651 static int 3652 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj) 3653 { 3654 3655 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) { 3656 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3657 return (DDI_FAILURE); 3658 } 3659 3660 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) { 3661 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3662 return (DDI_FAILURE); 3663 } 3664 3665 (void) ddi_dma_unbind_handle(obj.dma_handle); 3666 ddi_dma_mem_free(&obj.acc_handle); 3667 ddi_dma_free_handle(&obj.dma_handle); 3668 3669 return (DDI_SUCCESS); 3670 } 3671 3672 /* 3673 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *, 3674 * int, int (*)()) 3675 * 3676 * Allocate dma resources for a new scsi command 3677 */ 3678 static int 3679 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt, 3680 struct buf *bp, int flags, int (*callback)()) 3681 { 3682 int dma_flags; 3683 int (*cb)(caddr_t); 3684 int i; 3685 3686 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr; 3687 struct scsa_cmd *acmd = PKT2CMD(pkt); 3688 3689 acmd->cmd_buf = bp; 3690 3691 if (bp->b_flags & B_READ) { 3692 acmd->cmd_flags &= ~CFLAG_DMASEND; 3693 dma_flags = DDI_DMA_READ; 3694 } else { 3695 acmd->cmd_flags |= CFLAG_DMASEND; 3696 dma_flags = DDI_DMA_WRITE; 3697 } 3698 3699 if (flags & PKT_CONSISTENT) { 3700 acmd->cmd_flags |= CFLAG_CONSISTENT; 3701 dma_flags |= DDI_DMA_CONSISTENT; 3702 } 3703 3704 if (flags & PKT_DMA_PARTIAL) { 3705 dma_flags |= DDI_DMA_PARTIAL; 3706 } 3707 3708 dma_flags |= DDI_DMA_REDZONE; 3709 3710 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP; 3711 3712 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge; 3713 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 3714 3715 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr, 3716 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) { 3717 switch (i) { 3718 case DDI_DMA_BADATTR: 3719 bioerror(bp, EFAULT); 3720 return (DDI_FAILURE); 3721 3722 case DDI_DMA_NORESOURCES: 3723 bioerror(bp, 0); 3724 return (DDI_FAILURE); 3725 3726 default: 3727 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: " 3728 "impossible result (0x%x)", i)); 3729 bioerror(bp, EFAULT); 3730 return (DDI_FAILURE); 3731 } 3732 } 3733 3734 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags, 3735 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies); 3736 3737 switch (i) { 3738 case DDI_DMA_PARTIAL_MAP: 3739 if ((dma_flags & DDI_DMA_PARTIAL) == 0) { 3740 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3741 "DDI_DMA_PARTIAL_MAP impossible")); 3742 goto no_dma_cookies; 3743 } 3744 3745 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) == 3746 DDI_FAILURE) { 3747 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed")); 3748 goto no_dma_cookies; 3749 } 3750 3751 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3752 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3753 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3754 DDI_FAILURE) { 3755 3756 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed")); 3757 goto no_dma_cookies; 3758 } 3759 3760 goto get_dma_cookies; 3761 case DDI_DMA_MAPPED: 3762 acmd->cmd_nwin = 1; 3763 acmd->cmd_dma_len = 0; 3764 acmd->cmd_dma_offset = 0; 3765 3766 get_dma_cookies: 3767 i = 0; 3768 acmd->cmd_dmacount = 0; 3769 for (;;) { 3770 acmd->cmd_dmacount += 3771 acmd->cmd_dmacookies[i++].dmac_size; 3772 3773 if (i == instance->max_num_sge || 3774 i == acmd->cmd_ncookies) 3775 break; 3776 3777 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3778 &acmd->cmd_dmacookies[i]); 3779 } 3780 3781 acmd->cmd_cookie = i; 3782 acmd->cmd_cookiecnt = i; 3783 3784 acmd->cmd_flags |= CFLAG_DMAVALID; 3785 3786 if (bp->b_bcount >= acmd->cmd_dmacount) { 3787 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3788 } else { 3789 pkt->pkt_resid = 0; 3790 } 3791 3792 return (DDI_SUCCESS); 3793 case DDI_DMA_NORESOURCES: 3794 bioerror(bp, 0); 3795 break; 3796 case DDI_DMA_NOMAPPING: 3797 bioerror(bp, EFAULT); 3798 break; 3799 case DDI_DMA_TOOBIG: 3800 bioerror(bp, EINVAL); 3801 break; 3802 case DDI_DMA_INUSE: 3803 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:" 3804 " DDI_DMA_INUSE impossible")); 3805 break; 3806 default: 3807 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3808 "impossible result (0x%x)", i)); 3809 break; 3810 } 3811 3812 no_dma_cookies: 3813 ddi_dma_free_handle(&acmd->cmd_dmahandle); 3814 acmd->cmd_dmahandle = NULL; 3815 acmd->cmd_flags &= ~CFLAG_DMAVALID; 3816 return (DDI_FAILURE); 3817 } 3818 3819 /* 3820 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *) 3821 * 3822 * move dma resources to next dma window 3823 * 3824 */ 3825 static int 3826 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt, 3827 struct buf *bp) 3828 { 3829 int i = 0; 3830 3831 struct scsa_cmd *acmd = PKT2CMD(pkt); 3832 3833 /* 3834 * If there are no more cookies remaining in this window, 3835 * must move to the next window first. 3836 */ 3837 if (acmd->cmd_cookie == acmd->cmd_ncookies) { 3838 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) { 3839 return (DDI_SUCCESS); 3840 } 3841 3842 /* at last window, cannot move */ 3843 if (++acmd->cmd_curwin >= acmd->cmd_nwin) { 3844 return (DDI_FAILURE); 3845 } 3846 3847 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3848 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3849 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3850 DDI_FAILURE) { 3851 return (DDI_FAILURE); 3852 } 3853 3854 acmd->cmd_cookie = 0; 3855 } else { 3856 /* still more cookies in this window - get the next one */ 3857 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3858 &acmd->cmd_dmacookies[0]); 3859 } 3860 3861 /* get remaining cookies in this window, up to our maximum */ 3862 for (;;) { 3863 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size; 3864 acmd->cmd_cookie++; 3865 3866 if (i == instance->max_num_sge || 3867 acmd->cmd_cookie == acmd->cmd_ncookies) { 3868 break; 3869 } 3870 3871 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3872 &acmd->cmd_dmacookies[i]); 3873 } 3874 3875 acmd->cmd_cookiecnt = i; 3876 3877 if (bp->b_bcount >= acmd->cmd_dmacount) { 3878 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3879 } else { 3880 pkt->pkt_resid = 0; 3881 } 3882 3883 return (DDI_SUCCESS); 3884 } 3885 3886 /* 3887 * build_cmd 3888 */ 3889 static struct mrsas_cmd * 3890 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap, 3891 struct scsi_pkt *pkt, uchar_t *cmd_done) 3892 { 3893 uint16_t flags = 0; 3894 uint32_t i; 3895 uint32_t context; 3896 uint32_t sge_bytes; 3897 ddi_acc_handle_t acc_handle; 3898 struct mrsas_cmd *cmd; 3899 struct mrsas_sge64 *mfi_sgl; 3900 struct mrsas_sge_ieee *mfi_sgl_ieee; 3901 struct scsa_cmd *acmd = PKT2CMD(pkt); 3902 struct mrsas_pthru_frame *pthru; 3903 struct mrsas_io_frame *ldio; 3904 3905 /* find out if this is logical or physical drive command. */ 3906 acmd->islogical = MRDRV_IS_LOGICAL(ap); 3907 acmd->device_id = MAP_DEVICE_ID(instance, ap); 3908 *cmd_done = 0; 3909 3910 /* get the command packet */ 3911 if (!(cmd = get_mfi_pkt(instance))) { 3912 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t, 3913 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 3914 return (NULL); 3915 } 3916 3917 cmd->retry_count_for_ocr = 0; 3918 3919 acc_handle = cmd->frame_dma_obj.acc_handle; 3920 3921 /* Clear the frame buffer and assign back the context id */ 3922 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 3923 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index); 3924 3925 cmd->pkt = pkt; 3926 cmd->cmd = acmd; 3927 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0], 3928 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len); 3929 3930 /* lets get the command directions */ 3931 if (acmd->cmd_flags & CFLAG_DMASEND) { 3932 flags = MFI_FRAME_DIR_WRITE; 3933 3934 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3935 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3936 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3937 DDI_DMA_SYNC_FORDEV); 3938 } 3939 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) { 3940 flags = MFI_FRAME_DIR_READ; 3941 3942 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3943 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3944 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3945 DDI_DMA_SYNC_FORCPU); 3946 } 3947 } else { 3948 flags = MFI_FRAME_DIR_NONE; 3949 } 3950 3951 if (instance->flag_ieee) { 3952 flags |= MFI_FRAME_IEEE; 3953 } 3954 flags |= MFI_FRAME_SGL64; 3955 3956 switch (pkt->pkt_cdbp[0]) { 3957 3958 /* 3959 * case SCMD_SYNCHRONIZE_CACHE: 3960 * flush_cache(instance); 3961 * return_mfi_pkt(instance, cmd); 3962 * *cmd_done = 1; 3963 * 3964 * return (NULL); 3965 */ 3966 3967 case SCMD_READ: 3968 case SCMD_WRITE: 3969 case SCMD_READ_G1: 3970 case SCMD_WRITE_G1: 3971 if (acmd->islogical) { 3972 ldio = (struct mrsas_io_frame *)cmd->frame; 3973 3974 /* 3975 * preare the Logical IO frame: 3976 * 2nd bit is zero for all read cmds 3977 */ 3978 ddi_put8(acc_handle, &ldio->cmd, 3979 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE 3980 : MFI_CMD_OP_LD_READ); 3981 ddi_put8(acc_handle, &ldio->cmd_status, 0x0); 3982 ddi_put8(acc_handle, &ldio->scsi_status, 0x0); 3983 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id); 3984 ddi_put16(acc_handle, &ldio->timeout, 0); 3985 ddi_put8(acc_handle, &ldio->reserved_0, 0); 3986 ddi_put16(acc_handle, &ldio->pad_0, 0); 3987 ddi_put16(acc_handle, &ldio->flags, flags); 3988 3989 /* Initialize sense Information */ 3990 bzero(cmd->sense, SENSE_LENGTH); 3991 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH); 3992 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0); 3993 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo, 3994 cmd->sense_phys_addr); 3995 ddi_put32(acc_handle, &ldio->start_lba_hi, 0); 3996 ddi_put8(acc_handle, &ldio->access_byte, 3997 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0); 3998 ddi_put8(acc_handle, &ldio->sge_count, 3999 acmd->cmd_cookiecnt); 4000 if (instance->flag_ieee) { 4001 mfi_sgl_ieee = 4002 (struct mrsas_sge_ieee *)&ldio->sgl; 4003 } else { 4004 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl; 4005 } 4006 4007 context = ddi_get32(acc_handle, &ldio->context); 4008 4009 if (acmd->cmd_cdblen == CDB_GROUP0) { 4010 ddi_put32(acc_handle, &ldio->lba_count, ( 4011 (uint16_t)(pkt->pkt_cdbp[4]))); 4012 4013 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 4014 ((uint32_t)(pkt->pkt_cdbp[3])) | 4015 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) | 4016 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) 4017 << 16))); 4018 } else if (acmd->cmd_cdblen == CDB_GROUP1) { 4019 ddi_put32(acc_handle, &ldio->lba_count, ( 4020 ((uint16_t)(pkt->pkt_cdbp[8])) | 4021 ((uint16_t)(pkt->pkt_cdbp[7]) << 8))); 4022 4023 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 4024 ((uint32_t)(pkt->pkt_cdbp[5])) | 4025 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 4026 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 4027 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 4028 } else if (acmd->cmd_cdblen == CDB_GROUP2) { 4029 ddi_put32(acc_handle, &ldio->lba_count, ( 4030 ((uint16_t)(pkt->pkt_cdbp[9])) | 4031 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) | 4032 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) | 4033 ((uint16_t)(pkt->pkt_cdbp[6]) << 24))); 4034 4035 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 4036 ((uint32_t)(pkt->pkt_cdbp[5])) | 4037 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 4038 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 4039 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 4040 } else if (acmd->cmd_cdblen == CDB_GROUP3) { 4041 ddi_put32(acc_handle, &ldio->lba_count, ( 4042 ((uint16_t)(pkt->pkt_cdbp[13])) | 4043 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) | 4044 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) | 4045 ((uint16_t)(pkt->pkt_cdbp[10]) << 24))); 4046 4047 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 4048 ((uint32_t)(pkt->pkt_cdbp[9])) | 4049 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | 4050 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | 4051 ((uint32_t)(pkt->pkt_cdbp[6]) << 24))); 4052 4053 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 4054 ((uint32_t)(pkt->pkt_cdbp[5])) | 4055 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 4056 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 4057 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 4058 } 4059 4060 break; 4061 } 4062 /* fall through For all non-rd/wr cmds */ 4063 default: 4064 4065 switch (pkt->pkt_cdbp[0]) { 4066 case SCMD_MODE_SENSE: 4067 case SCMD_MODE_SENSE_G1: { 4068 union scsi_cdb *cdbp; 4069 uint16_t page_code; 4070 4071 cdbp = (void *)pkt->pkt_cdbp; 4072 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0]; 4073 switch (page_code) { 4074 case 0x3: 4075 case 0x4: 4076 (void) mrsas_mode_sense_build(pkt); 4077 return_mfi_pkt(instance, cmd); 4078 *cmd_done = 1; 4079 return (NULL); 4080 } 4081 break; 4082 } 4083 default: 4084 break; 4085 } 4086 4087 pthru = (struct mrsas_pthru_frame *)cmd->frame; 4088 4089 /* prepare the DCDB frame */ 4090 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ? 4091 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI); 4092 ddi_put8(acc_handle, &pthru->cmd_status, 0x0); 4093 ddi_put8(acc_handle, &pthru->scsi_status, 0x0); 4094 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id); 4095 ddi_put8(acc_handle, &pthru->lun, 0); 4096 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen); 4097 ddi_put16(acc_handle, &pthru->timeout, 0); 4098 ddi_put16(acc_handle, &pthru->flags, flags); 4099 ddi_put32(acc_handle, &pthru->data_xfer_len, 4100 acmd->cmd_dmacount); 4101 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt); 4102 if (instance->flag_ieee) { 4103 mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl; 4104 } else { 4105 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl; 4106 } 4107 4108 bzero(cmd->sense, SENSE_LENGTH); 4109 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH); 4110 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); 4111 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 4112 cmd->sense_phys_addr); 4113 4114 context = ddi_get32(acc_handle, &pthru->context); 4115 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp, 4116 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR); 4117 4118 break; 4119 } 4120 #ifdef lint 4121 context = context; 4122 #endif 4123 /* prepare the scatter-gather list for the firmware */ 4124 if (instance->flag_ieee) { 4125 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) { 4126 ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr, 4127 acmd->cmd_dmacookies[i].dmac_laddress); 4128 ddi_put32(acc_handle, &mfi_sgl_ieee->length, 4129 acmd->cmd_dmacookies[i].dmac_size); 4130 } 4131 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt; 4132 } else { 4133 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) { 4134 ddi_put64(acc_handle, &mfi_sgl->phys_addr, 4135 acmd->cmd_dmacookies[i].dmac_laddress); 4136 ddi_put32(acc_handle, &mfi_sgl->length, 4137 acmd->cmd_dmacookies[i].dmac_size); 4138 } 4139 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt; 4140 } 4141 4142 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) + 4143 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1; 4144 4145 if (cmd->frame_count >= 8) { 4146 cmd->frame_count = 8; 4147 } 4148 4149 return (cmd); 4150 } 4151 #ifndef __sparc 4152 static int 4153 wait_for_outstanding(struct mrsas_instance *instance) 4154 { 4155 int i; 4156 uint32_t wait_time = 90; 4157 4158 for (i = 0; i < wait_time; i++) { 4159 if (!instance->fw_outstanding) { 4160 break; 4161 } 4162 drv_usecwait(MILLISEC); /* wait for 1000 usecs */; 4163 } 4164 4165 if (instance->fw_outstanding) { 4166 return (1); 4167 } 4168 4169 return (0); 4170 } 4171 #endif /* __sparc */ 4172 /* 4173 * issue_mfi_pthru 4174 */ 4175 static int 4176 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4177 struct mrsas_cmd *cmd, int mode) 4178 { 4179 void *ubuf; 4180 uint32_t kphys_addr = 0; 4181 uint32_t xferlen = 0; 4182 uint_t model; 4183 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 4184 dma_obj_t pthru_dma_obj; 4185 struct mrsas_pthru_frame *kpthru; 4186 struct mrsas_pthru_frame *pthru; 4187 int i; 4188 pthru = &cmd->frame->pthru; 4189 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0]; 4190 4191 if (instance->adapterresetinprogress) { 4192 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: Reset flag set, " 4193 "returning mfi_pkt and setting TRAN_BUSY\n")); 4194 return (DDI_FAILURE); 4195 } 4196 model = ddi_model_convert_from(mode & FMODELS); 4197 if (model == DDI_MODEL_ILP32) { 4198 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 4199 4200 xferlen = kpthru->sgl.sge32[0].length; 4201 4202 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 4203 } else { 4204 #ifdef _ILP32 4205 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 4206 xferlen = kpthru->sgl.sge32[0].length; 4207 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 4208 #else 4209 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64")); 4210 xferlen = kpthru->sgl.sge64[0].length; 4211 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr; 4212 #endif 4213 } 4214 4215 if (xferlen) { 4216 /* means IOCTL requires DMA */ 4217 /* allocate the data transfer buffer */ 4218 pthru_dma_obj.size = xferlen; 4219 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr; 4220 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4221 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4222 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1; 4223 pthru_dma_obj.dma_attr.dma_attr_align = 1; 4224 4225 /* allocate kernel buffer for DMA */ 4226 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj, 4227 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4228 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 4229 "could not allocate data transfer buffer.")); 4230 return (DDI_FAILURE); 4231 } 4232 (void) memset(pthru_dma_obj.buffer, 0, xferlen); 4233 4234 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4235 if (kpthru->flags & MFI_FRAME_DIR_WRITE) { 4236 for (i = 0; i < xferlen; i++) { 4237 if (ddi_copyin((uint8_t *)ubuf+i, 4238 (uint8_t *)pthru_dma_obj.buffer+i, 4239 1, mode)) { 4240 con_log(CL_ANN, (CE_WARN, 4241 "issue_mfi_pthru : " 4242 "copy from user space failed")); 4243 return (DDI_FAILURE); 4244 } 4245 } 4246 } 4247 4248 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address; 4249 } 4250 4251 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd); 4252 ddi_put8(acc_handle, &pthru->sense_len, 0); 4253 ddi_put8(acc_handle, &pthru->cmd_status, 0); 4254 ddi_put8(acc_handle, &pthru->scsi_status, 0); 4255 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id); 4256 ddi_put8(acc_handle, &pthru->lun, kpthru->lun); 4257 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len); 4258 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count); 4259 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout); 4260 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len); 4261 4262 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); 4263 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */ 4264 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); 4265 4266 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb, 4267 pthru->cdb_len, DDI_DEV_AUTOINCR); 4268 4269 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64); 4270 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen); 4271 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr); 4272 4273 cmd->sync_cmd = MRSAS_TRUE; 4274 cmd->frame_count = 1; 4275 4276 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4277 con_log(CL_ANN, (CE_WARN, 4278 "issue_mfi_pthru: fw_ioctl failed")); 4279 } else { 4280 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) { 4281 for (i = 0; i < xferlen; i++) { 4282 if (ddi_copyout( 4283 (uint8_t *)pthru_dma_obj.buffer+i, 4284 (uint8_t *)ubuf+i, 1, mode)) { 4285 con_log(CL_ANN, (CE_WARN, 4286 "issue_mfi_pthru : " 4287 "copy to user space failed")); 4288 return (DDI_FAILURE); 4289 } 4290 } 4291 } 4292 } 4293 4294 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status); 4295 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status); 4296 4297 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, " 4298 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status)); 4299 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t, 4300 kpthru->cmd_status, uint8_t, kpthru->scsi_status); 4301 4302 if (xferlen) { 4303 /* free kernel buffer */ 4304 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS) 4305 return (DDI_FAILURE); 4306 } 4307 4308 return (DDI_SUCCESS); 4309 } 4310 4311 /* 4312 * issue_mfi_dcmd 4313 */ 4314 static int 4315 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4316 struct mrsas_cmd *cmd, int mode) 4317 { 4318 void *ubuf; 4319 uint32_t kphys_addr = 0; 4320 uint32_t xferlen = 0; 4321 uint32_t model; 4322 dma_obj_t dcmd_dma_obj; 4323 struct mrsas_dcmd_frame *kdcmd; 4324 struct mrsas_dcmd_frame *dcmd; 4325 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 4326 int i; 4327 dcmd = &cmd->frame->dcmd; 4328 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; 4329 if (instance->adapterresetinprogress) { 4330 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, " 4331 "returning mfi_pkt and setting TRAN_BUSY\n")); 4332 return (DDI_FAILURE); 4333 } 4334 model = ddi_model_convert_from(mode & FMODELS); 4335 if (model == DDI_MODEL_ILP32) { 4336 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 4337 4338 xferlen = kdcmd->sgl.sge32[0].length; 4339 4340 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4341 } else { 4342 #ifdef _ILP32 4343 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 4344 xferlen = kdcmd->sgl.sge32[0].length; 4345 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4346 #else 4347 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64")); 4348 xferlen = kdcmd->sgl.sge64[0].length; 4349 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 4350 #endif 4351 } 4352 if (xferlen) { 4353 /* means IOCTL requires DMA */ 4354 /* allocate the data transfer buffer */ 4355 dcmd_dma_obj.size = xferlen; 4356 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; 4357 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4358 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4359 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 4360 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 4361 4362 /* allocate kernel buffer for DMA */ 4363 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, 4364 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4365 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 4366 "could not allocate data transfer buffer.")); 4367 return (DDI_FAILURE); 4368 } 4369 (void) memset(dcmd_dma_obj.buffer, 0, xferlen); 4370 4371 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4372 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) { 4373 for (i = 0; i < xferlen; i++) { 4374 if (ddi_copyin((uint8_t *)ubuf + i, 4375 (uint8_t *)dcmd_dma_obj.buffer + i, 4376 1, mode)) { 4377 con_log(CL_ANN, (CE_WARN, 4378 "issue_mfi_dcmd : " 4379 "copy from user space failed")); 4380 return (DDI_FAILURE); 4381 } 4382 } 4383 } 4384 4385 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 4386 } 4387 4388 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd); 4389 ddi_put8(acc_handle, &dcmd->cmd_status, 0); 4390 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count); 4391 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout); 4392 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len); 4393 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode); 4394 4395 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b, 4396 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR); 4397 4398 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64); 4399 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen); 4400 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr); 4401 4402 cmd->sync_cmd = MRSAS_TRUE; 4403 cmd->frame_count = 1; 4404 4405 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4406 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed")); 4407 } else { 4408 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) { 4409 for (i = 0; i < xferlen; i++) { 4410 if (ddi_copyout( 4411 (uint8_t *)dcmd_dma_obj.buffer + i, 4412 (uint8_t *)ubuf + i, 4413 1, mode)) { 4414 con_log(CL_ANN, (CE_WARN, 4415 "issue_mfi_dcmd : " 4416 "copy to user space failed")); 4417 return (DDI_FAILURE); 4418 } 4419 } 4420 } 4421 } 4422 4423 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status); 4424 DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t, 4425 kdcmd->cmd, uint8_t, kdcmd->cmd_status); 4426 4427 if (xferlen) { 4428 /* free kernel buffer */ 4429 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 4430 return (DDI_FAILURE); 4431 } 4432 4433 return (DDI_SUCCESS); 4434 } 4435 4436 /* 4437 * issue_mfi_smp 4438 */ 4439 static int 4440 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4441 struct mrsas_cmd *cmd, int mode) 4442 { 4443 void *request_ubuf; 4444 void *response_ubuf; 4445 uint32_t request_xferlen = 0; 4446 uint32_t response_xferlen = 0; 4447 uint_t model; 4448 dma_obj_t request_dma_obj; 4449 dma_obj_t response_dma_obj; 4450 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 4451 struct mrsas_smp_frame *ksmp; 4452 struct mrsas_smp_frame *smp; 4453 struct mrsas_sge32 *sge32; 4454 #ifndef _ILP32 4455 struct mrsas_sge64 *sge64; 4456 #endif 4457 int i; 4458 uint64_t tmp_sas_addr; 4459 4460 smp = &cmd->frame->smp; 4461 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0]; 4462 4463 if (instance->adapterresetinprogress) { 4464 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, " 4465 "returning mfi_pkt and setting TRAN_BUSY\n")); 4466 return (DDI_FAILURE); 4467 } 4468 model = ddi_model_convert_from(mode & FMODELS); 4469 if (model == DDI_MODEL_ILP32) { 4470 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 4471 4472 sge32 = &ksmp->sgl[0].sge32[0]; 4473 response_xferlen = sge32[0].length; 4474 request_xferlen = sge32[1].length; 4475 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 4476 "response_xferlen = %x, request_xferlen = %x", 4477 response_xferlen, request_xferlen)); 4478 4479 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 4480 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 4481 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 4482 "response_ubuf = %p, request_ubuf = %p", 4483 response_ubuf, request_ubuf)); 4484 } else { 4485 #ifdef _ILP32 4486 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 4487 4488 sge32 = &ksmp->sgl[0].sge32[0]; 4489 response_xferlen = sge32[0].length; 4490 request_xferlen = sge32[1].length; 4491 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 4492 "response_xferlen = %x, request_xferlen = %x", 4493 response_xferlen, request_xferlen)); 4494 4495 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 4496 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 4497 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 4498 "response_ubuf = %p, request_ubuf = %p", 4499 response_ubuf, request_ubuf)); 4500 #else 4501 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64")); 4502 4503 sge64 = &ksmp->sgl[0].sge64[0]; 4504 response_xferlen = sge64[0].length; 4505 request_xferlen = sge64[1].length; 4506 4507 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr; 4508 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr; 4509 #endif 4510 } 4511 if (request_xferlen) { 4512 /* means IOCTL requires DMA */ 4513 /* allocate the data transfer buffer */ 4514 request_dma_obj.size = request_xferlen; 4515 request_dma_obj.dma_attr = mrsas_generic_dma_attr; 4516 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4517 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4518 request_dma_obj.dma_attr.dma_attr_sgllen = 1; 4519 request_dma_obj.dma_attr.dma_attr_align = 1; 4520 4521 /* allocate kernel buffer for DMA */ 4522 if (mrsas_alloc_dma_obj(instance, &request_dma_obj, 4523 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4524 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4525 "could not allocate data transfer buffer.")); 4526 return (DDI_FAILURE); 4527 } 4528 (void) memset(request_dma_obj.buffer, 0, request_xferlen); 4529 4530 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4531 for (i = 0; i < request_xferlen; i++) { 4532 if (ddi_copyin((uint8_t *)request_ubuf + i, 4533 (uint8_t *)request_dma_obj.buffer + i, 4534 1, mode)) { 4535 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4536 "copy from user space failed")); 4537 return (DDI_FAILURE); 4538 } 4539 } 4540 } 4541 4542 if (response_xferlen) { 4543 /* means IOCTL requires DMA */ 4544 /* allocate the data transfer buffer */ 4545 response_dma_obj.size = response_xferlen; 4546 response_dma_obj.dma_attr = mrsas_generic_dma_attr; 4547 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4548 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4549 response_dma_obj.dma_attr.dma_attr_sgllen = 1; 4550 response_dma_obj.dma_attr.dma_attr_align = 1; 4551 4552 /* allocate kernel buffer for DMA */ 4553 if (mrsas_alloc_dma_obj(instance, &response_dma_obj, 4554 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4555 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4556 "could not allocate data transfer buffer.")); 4557 return (DDI_FAILURE); 4558 } 4559 (void) memset(response_dma_obj.buffer, 0, response_xferlen); 4560 4561 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4562 for (i = 0; i < response_xferlen; i++) { 4563 if (ddi_copyin((uint8_t *)response_ubuf + i, 4564 (uint8_t *)response_dma_obj.buffer + i, 4565 1, mode)) { 4566 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4567 "copy from user space failed")); 4568 return (DDI_FAILURE); 4569 } 4570 } 4571 } 4572 4573 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd); 4574 ddi_put8(acc_handle, &smp->cmd_status, 0); 4575 ddi_put8(acc_handle, &smp->connection_status, 0); 4576 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count); 4577 /* smp->context = ksmp->context; */ 4578 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout); 4579 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len); 4580 4581 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr, 4582 sizeof (uint64_t)); 4583 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr); 4584 4585 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64); 4586 4587 model = ddi_model_convert_from(mode & FMODELS); 4588 if (model == DDI_MODEL_ILP32) { 4589 con_log(CL_ANN1, (CE_NOTE, 4590 "issue_mfi_smp: DDI_MODEL_ILP32")); 4591 4592 sge32 = &smp->sgl[0].sge32[0]; 4593 ddi_put32(acc_handle, &sge32[0].length, response_xferlen); 4594 ddi_put32(acc_handle, &sge32[0].phys_addr, 4595 response_dma_obj.dma_cookie[0].dmac_address); 4596 ddi_put32(acc_handle, &sge32[1].length, request_xferlen); 4597 ddi_put32(acc_handle, &sge32[1].phys_addr, 4598 request_dma_obj.dma_cookie[0].dmac_address); 4599 } else { 4600 #ifdef _ILP32 4601 con_log(CL_ANN1, (CE_NOTE, 4602 "issue_mfi_smp: DDI_MODEL_ILP32")); 4603 sge32 = &smp->sgl[0].sge32[0]; 4604 ddi_put32(acc_handle, &sge32[0].length, response_xferlen); 4605 ddi_put32(acc_handle, &sge32[0].phys_addr, 4606 response_dma_obj.dma_cookie[0].dmac_address); 4607 ddi_put32(acc_handle, &sge32[1].length, request_xferlen); 4608 ddi_put32(acc_handle, &sge32[1].phys_addr, 4609 request_dma_obj.dma_cookie[0].dmac_address); 4610 #else 4611 con_log(CL_ANN1, (CE_NOTE, 4612 "issue_mfi_smp: DDI_MODEL_LP64")); 4613 sge64 = &smp->sgl[0].sge64[0]; 4614 ddi_put32(acc_handle, &sge64[0].length, response_xferlen); 4615 ddi_put64(acc_handle, &sge64[0].phys_addr, 4616 response_dma_obj.dma_cookie[0].dmac_address); 4617 ddi_put32(acc_handle, &sge64[1].length, request_xferlen); 4618 ddi_put64(acc_handle, &sge64[1].phys_addr, 4619 request_dma_obj.dma_cookie[0].dmac_address); 4620 #endif 4621 } 4622 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp : " 4623 "smp->response_xferlen = %d, smp->request_xferlen = %d " 4624 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length), 4625 ddi_get32(acc_handle, &sge32[1].length), 4626 ddi_get32(acc_handle, &smp->data_xfer_len))); 4627 4628 cmd->sync_cmd = MRSAS_TRUE; 4629 cmd->frame_count = 1; 4630 4631 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4632 con_log(CL_ANN, (CE_WARN, 4633 "issue_mfi_smp: fw_ioctl failed")); 4634 } else { 4635 con_log(CL_ANN1, (CE_NOTE, 4636 "issue_mfi_smp: copy to user space")); 4637 4638 if (request_xferlen) { 4639 for (i = 0; i < request_xferlen; i++) { 4640 if (ddi_copyout( 4641 (uint8_t *)request_dma_obj.buffer + 4642 i, (uint8_t *)request_ubuf + i, 4643 1, mode)) { 4644 con_log(CL_ANN, (CE_WARN, 4645 "issue_mfi_smp : copy to user space" 4646 " failed")); 4647 return (DDI_FAILURE); 4648 } 4649 } 4650 } 4651 4652 if (response_xferlen) { 4653 for (i = 0; i < response_xferlen; i++) { 4654 if (ddi_copyout( 4655 (uint8_t *)response_dma_obj.buffer 4656 + i, (uint8_t *)response_ubuf 4657 + i, 1, mode)) { 4658 con_log(CL_ANN, (CE_WARN, 4659 "issue_mfi_smp : copy to " 4660 "user space failed")); 4661 return (DDI_FAILURE); 4662 } 4663 } 4664 } 4665 } 4666 4667 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status); 4668 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d", 4669 ddi_get8(acc_handle, &smp->cmd_status))); 4670 DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status); 4671 4672 if (request_xferlen) { 4673 /* free kernel buffer */ 4674 if (mrsas_free_dma_obj(instance, request_dma_obj) != 4675 DDI_SUCCESS) 4676 return (DDI_FAILURE); 4677 } 4678 4679 if (response_xferlen) { 4680 /* free kernel buffer */ 4681 if (mrsas_free_dma_obj(instance, response_dma_obj) != 4682 DDI_SUCCESS) 4683 return (DDI_FAILURE); 4684 } 4685 4686 return (DDI_SUCCESS); 4687 } 4688 4689 /* 4690 * issue_mfi_stp 4691 */ 4692 static int 4693 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4694 struct mrsas_cmd *cmd, int mode) 4695 { 4696 void *fis_ubuf; 4697 void *data_ubuf; 4698 uint32_t fis_xferlen = 0; 4699 uint32_t data_xferlen = 0; 4700 uint_t model; 4701 dma_obj_t fis_dma_obj; 4702 dma_obj_t data_dma_obj; 4703 struct mrsas_stp_frame *kstp; 4704 struct mrsas_stp_frame *stp; 4705 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 4706 int i; 4707 4708 stp = &cmd->frame->stp; 4709 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0]; 4710 4711 if (instance->adapterresetinprogress) { 4712 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, " 4713 "returning mfi_pkt and setting TRAN_BUSY\n")); 4714 return (DDI_FAILURE); 4715 } 4716 model = ddi_model_convert_from(mode & FMODELS); 4717 if (model == DDI_MODEL_ILP32) { 4718 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4719 4720 fis_xferlen = kstp->sgl.sge32[0].length; 4721 data_xferlen = kstp->sgl.sge32[1].length; 4722 4723 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4724 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4725 } 4726 else 4727 { 4728 #ifdef _ILP32 4729 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4730 4731 fis_xferlen = kstp->sgl.sge32[0].length; 4732 data_xferlen = kstp->sgl.sge32[1].length; 4733 4734 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4735 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4736 #else 4737 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64")); 4738 4739 fis_xferlen = kstp->sgl.sge64[0].length; 4740 data_xferlen = kstp->sgl.sge64[1].length; 4741 4742 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr; 4743 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr; 4744 #endif 4745 } 4746 4747 4748 if (fis_xferlen) { 4749 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: " 4750 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen)); 4751 4752 /* means IOCTL requires DMA */ 4753 /* allocate the data transfer buffer */ 4754 fis_dma_obj.size = fis_xferlen; 4755 fis_dma_obj.dma_attr = mrsas_generic_dma_attr; 4756 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4757 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4758 fis_dma_obj.dma_attr.dma_attr_sgllen = 1; 4759 fis_dma_obj.dma_attr.dma_attr_align = 1; 4760 4761 /* allocate kernel buffer for DMA */ 4762 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj, 4763 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4764 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : " 4765 "could not allocate data transfer buffer.")); 4766 return (DDI_FAILURE); 4767 } 4768 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen); 4769 4770 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4771 for (i = 0; i < fis_xferlen; i++) { 4772 if (ddi_copyin((uint8_t *)fis_ubuf + i, 4773 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) { 4774 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4775 "copy from user space failed")); 4776 return (DDI_FAILURE); 4777 } 4778 } 4779 } 4780 4781 if (data_xferlen) { 4782 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p " 4783 "data_xferlen = %x", data_ubuf, data_xferlen)); 4784 4785 /* means IOCTL requires DMA */ 4786 /* allocate the data transfer buffer */ 4787 data_dma_obj.size = data_xferlen; 4788 data_dma_obj.dma_attr = mrsas_generic_dma_attr; 4789 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4790 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4791 data_dma_obj.dma_attr.dma_attr_sgllen = 1; 4792 data_dma_obj.dma_attr.dma_attr_align = 1; 4793 4794 /* allocate kernel buffer for DMA */ 4795 if (mrsas_alloc_dma_obj(instance, &data_dma_obj, 4796 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4797 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4798 "could not allocate data transfer buffer.")); 4799 return (DDI_FAILURE); 4800 } 4801 (void) memset(data_dma_obj.buffer, 0, data_xferlen); 4802 4803 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4804 for (i = 0; i < data_xferlen; i++) { 4805 if (ddi_copyin((uint8_t *)data_ubuf + i, 4806 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) { 4807 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4808 "copy from user space failed")); 4809 return (DDI_FAILURE); 4810 } 4811 } 4812 } 4813 4814 ddi_put8(acc_handle, &stp->cmd, kstp->cmd); 4815 ddi_put8(acc_handle, &stp->cmd_status, 0); 4816 ddi_put8(acc_handle, &stp->connection_status, 0); 4817 ddi_put8(acc_handle, &stp->target_id, kstp->target_id); 4818 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count); 4819 4820 ddi_put16(acc_handle, &stp->timeout, kstp->timeout); 4821 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len); 4822 4823 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10, 4824 DDI_DEV_AUTOINCR); 4825 4826 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64); 4827 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags); 4828 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen); 4829 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr, 4830 fis_dma_obj.dma_cookie[0].dmac_address); 4831 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen); 4832 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr, 4833 data_dma_obj.dma_cookie[0].dmac_address); 4834 4835 cmd->sync_cmd = MRSAS_TRUE; 4836 cmd->frame_count = 1; 4837 4838 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4839 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed")); 4840 } else { 4841 4842 if (fis_xferlen) { 4843 for (i = 0; i < fis_xferlen; i++) { 4844 if (ddi_copyout( 4845 (uint8_t *)fis_dma_obj.buffer + i, 4846 (uint8_t *)fis_ubuf + i, 1, mode)) { 4847 con_log(CL_ANN, (CE_WARN, 4848 "issue_mfi_stp : copy to " 4849 "user space failed")); 4850 return (DDI_FAILURE); 4851 } 4852 } 4853 } 4854 } 4855 if (data_xferlen) { 4856 for (i = 0; i < data_xferlen; i++) { 4857 if (ddi_copyout( 4858 (uint8_t *)data_dma_obj.buffer + i, 4859 (uint8_t *)data_ubuf + i, 1, mode)) { 4860 con_log(CL_ANN, (CE_WARN, 4861 "issue_mfi_stp : copy to" 4862 " user space failed")); 4863 return (DDI_FAILURE); 4864 } 4865 } 4866 } 4867 4868 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status); 4869 DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status); 4870 4871 if (fis_xferlen) { 4872 /* free kernel buffer */ 4873 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS) 4874 return (DDI_FAILURE); 4875 } 4876 4877 if (data_xferlen) { 4878 /* free kernel buffer */ 4879 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS) 4880 return (DDI_FAILURE); 4881 } 4882 4883 return (DDI_SUCCESS); 4884 } 4885 4886 /* 4887 * fill_up_drv_ver 4888 */ 4889 static void 4890 fill_up_drv_ver(struct mrsas_drv_ver *dv) 4891 { 4892 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver)); 4893 4894 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$")); 4895 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris")); 4896 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas")); 4897 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION)); 4898 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE, 4899 strlen(MRSAS_RELDATE)); 4900 } 4901 4902 /* 4903 * handle_drv_ioctl 4904 */ 4905 static int 4906 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4907 int mode) 4908 { 4909 int i; 4910 int rval = DDI_SUCCESS; 4911 int *props = NULL; 4912 void *ubuf; 4913 4914 uint8_t *pci_conf_buf; 4915 uint32_t xferlen; 4916 uint32_t num_props; 4917 uint_t model; 4918 struct mrsas_dcmd_frame *kdcmd; 4919 struct mrsas_drv_ver dv; 4920 struct mrsas_pci_information pi; 4921 4922 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; 4923 4924 model = ddi_model_convert_from(mode & FMODELS); 4925 if (model == DDI_MODEL_ILP32) { 4926 con_log(CL_ANN1, (CE_NOTE, 4927 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4928 4929 xferlen = kdcmd->sgl.sge32[0].length; 4930 4931 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4932 } else { 4933 #ifdef _ILP32 4934 con_log(CL_ANN1, (CE_NOTE, 4935 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4936 xferlen = kdcmd->sgl.sge32[0].length; 4937 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4938 #else 4939 con_log(CL_ANN1, (CE_NOTE, 4940 "handle_drv_ioctl: DDI_MODEL_LP64")); 4941 xferlen = kdcmd->sgl.sge64[0].length; 4942 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 4943 #endif 4944 } 4945 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4946 "dataBuf=%p size=%d bytes", ubuf, xferlen)); 4947 4948 switch (kdcmd->opcode) { 4949 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION: 4950 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4951 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION")); 4952 4953 fill_up_drv_ver(&dv); 4954 4955 if (ddi_copyout(&dv, ubuf, xferlen, mode)) { 4956 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4957 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : " 4958 "copy to user space failed")); 4959 kdcmd->cmd_status = 1; 4960 rval = 1; 4961 } else { 4962 kdcmd->cmd_status = 0; 4963 } 4964 break; 4965 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION: 4966 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4967 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON")); 4968 4969 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip, 4970 0, "reg", &props, &num_props)) { 4971 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4972 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : " 4973 "ddi_prop_look_int_array failed")); 4974 rval = DDI_FAILURE; 4975 } else { 4976 4977 pi.busNumber = (props[0] >> 16) & 0xFF; 4978 pi.deviceNumber = (props[0] >> 11) & 0x1f; 4979 pi.functionNumber = (props[0] >> 8) & 0x7; 4980 ddi_prop_free((void *)props); 4981 } 4982 4983 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo; 4984 4985 for (i = 0; i < (sizeof (struct mrsas_pci_information) - 4986 offsetof(struct mrsas_pci_information, pciHeaderInfo)); 4987 i++) { 4988 pci_conf_buf[i] = 4989 pci_config_get8(instance->pci_handle, i); 4990 } 4991 4992 if (ddi_copyout(&pi, ubuf, xferlen, mode)) { 4993 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4994 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : " 4995 "copy to user space failed")); 4996 kdcmd->cmd_status = 1; 4997 rval = 1; 4998 } else { 4999 kdcmd->cmd_status = 0; 5000 } 5001 break; 5002 default: 5003 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 5004 "invalid driver specific IOCTL opcode = 0x%x", 5005 kdcmd->opcode)); 5006 kdcmd->cmd_status = 1; 5007 rval = DDI_FAILURE; 5008 break; 5009 } 5010 5011 return (rval); 5012 } 5013 5014 /* 5015 * handle_mfi_ioctl 5016 */ 5017 static int 5018 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 5019 int mode) 5020 { 5021 int rval = DDI_SUCCESS; 5022 5023 struct mrsas_header *hdr; 5024 struct mrsas_cmd *cmd; 5025 5026 cmd = get_mfi_pkt(instance); 5027 5028 if (!cmd) { 5029 con_log(CL_ANN, (CE_WARN, "mr_sas: " 5030 "failed to get a cmd packet")); 5031 DTRACE_PROBE2(mfi_ioctl_err, uint16_t, 5032 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 5033 return (DDI_FAILURE); 5034 } 5035 cmd->retry_count_for_ocr = 0; 5036 5037 /* Clear the frame buffer and assign back the context id */ 5038 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 5039 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 5040 cmd->index); 5041 5042 hdr = (struct mrsas_header *)&ioctl->frame[0]; 5043 5044 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) { 5045 case MFI_CMD_OP_DCMD: 5046 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode); 5047 break; 5048 case MFI_CMD_OP_SMP: 5049 rval = issue_mfi_smp(instance, ioctl, cmd, mode); 5050 break; 5051 case MFI_CMD_OP_STP: 5052 rval = issue_mfi_stp(instance, ioctl, cmd, mode); 5053 break; 5054 case MFI_CMD_OP_LD_SCSI: 5055 case MFI_CMD_OP_PD_SCSI: 5056 rval = issue_mfi_pthru(instance, ioctl, cmd, mode); 5057 break; 5058 default: 5059 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: " 5060 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd)); 5061 rval = DDI_FAILURE; 5062 break; 5063 } 5064 5065 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) 5066 rval = DDI_FAILURE; 5067 5068 return_mfi_pkt(instance, cmd); 5069 5070 return (rval); 5071 } 5072 5073 /* 5074 * AEN 5075 */ 5076 static int 5077 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen) 5078 { 5079 int rval = 0; 5080 5081 rval = register_mfi_aen(instance, instance->aen_seq_num, 5082 aen->class_locale_word); 5083 5084 aen->cmd_status = (uint8_t)rval; 5085 5086 return (rval); 5087 } 5088 5089 static int 5090 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num, 5091 uint32_t class_locale_word) 5092 { 5093 int ret_val; 5094 5095 struct mrsas_cmd *cmd, *aen_cmd; 5096 struct mrsas_dcmd_frame *dcmd; 5097 union mrsas_evt_class_locale curr_aen; 5098 union mrsas_evt_class_locale prev_aen; 5099 5100 /* 5101 * If there an AEN pending already (aen_cmd), check if the 5102 * class_locale of that pending AEN is inclusive of the new 5103 * AEN request we currently have. If it is, then we don't have 5104 * to do anything. In other words, whichever events the current 5105 * AEN request is subscribing to, have already been subscribed 5106 * to. 5107 * 5108 * If the old_cmd is _not_ inclusive, then we have to abort 5109 * that command, form a class_locale that is superset of both 5110 * old and current and re-issue to the FW 5111 */ 5112 5113 curr_aen.word = LE_32(class_locale_word); 5114 curr_aen.members.locale = LE_16(curr_aen.members.locale); 5115 aen_cmd = instance->aen_cmd; 5116 if (aen_cmd) { 5117 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle, 5118 &aen_cmd->frame->dcmd.mbox.w[1]); 5119 prev_aen.word = LE_32(prev_aen.word); 5120 prev_aen.members.locale = LE_16(prev_aen.members.locale); 5121 /* 5122 * A class whose enum value is smaller is inclusive of all 5123 * higher values. If a PROGRESS (= -1) was previously 5124 * registered, then a new registration requests for higher 5125 * classes need not be sent to FW. They are automatically 5126 * included. 5127 * 5128 * Locale numbers don't have such hierarchy. They are bitmap 5129 * values 5130 */ 5131 if ((prev_aen.members.class <= curr_aen.members.class) && 5132 !((prev_aen.members.locale & curr_aen.members.locale) ^ 5133 curr_aen.members.locale)) { 5134 /* 5135 * Previously issued event registration includes 5136 * current request. Nothing to do. 5137 */ 5138 5139 return (0); 5140 } else { 5141 curr_aen.members.locale |= prev_aen.members.locale; 5142 5143 if (prev_aen.members.class < curr_aen.members.class) 5144 curr_aen.members.class = prev_aen.members.class; 5145 5146 ret_val = abort_aen_cmd(instance, aen_cmd); 5147 5148 if (ret_val) { 5149 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: " 5150 "failed to abort prevous AEN command")); 5151 5152 return (ret_val); 5153 } 5154 } 5155 } else { 5156 curr_aen.word = LE_32(class_locale_word); 5157 curr_aen.members.locale = LE_16(curr_aen.members.locale); 5158 } 5159 5160 cmd = get_mfi_pkt(instance); 5161 5162 if (!cmd) { 5163 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding, 5164 uint16_t, instance->max_fw_cmds); 5165 return (ENOMEM); 5166 } 5167 cmd->retry_count_for_ocr = 0; 5168 /* Clear the frame buffer and assign back the context id */ 5169 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 5170 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 5171 cmd->index); 5172 5173 dcmd = &cmd->frame->dcmd; 5174 5175 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */ 5176 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 5177 5178 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 5179 sizeof (struct mrsas_evt_detail)); 5180 5181 /* Prepare DCMD for aen registration */ 5182 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 5183 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0); 5184 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 5185 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 5186 MFI_FRAME_DIR_READ); 5187 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 5188 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 5189 sizeof (struct mrsas_evt_detail)); 5190 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 5191 MR_DCMD_CTRL_EVENT_WAIT); 5192 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num); 5193 curr_aen.members.locale = LE_16(curr_aen.members.locale); 5194 curr_aen.word = LE_32(curr_aen.word); 5195 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1], 5196 curr_aen.word); 5197 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 5198 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address); 5199 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 5200 sizeof (struct mrsas_evt_detail)); 5201 5202 instance->aen_seq_num = seq_num; 5203 5204 5205 /* 5206 * Store reference to the cmd used to register for AEN. When an 5207 * application wants us to register for AEN, we have to abort this 5208 * cmd and re-register with a new EVENT LOCALE supplied by that app 5209 */ 5210 instance->aen_cmd = cmd; 5211 5212 cmd->frame_count = 1; 5213 5214 /* Issue the aen registration frame */ 5215 /* atomic_add_16 (&instance->fw_outstanding, 1); */ 5216 instance->func_ptr->issue_cmd(cmd, instance); 5217 5218 return (0); 5219 } 5220 5221 static void 5222 display_scsi_inquiry(caddr_t scsi_inq) 5223 { 5224 #define MAX_SCSI_DEVICE_CODE 14 5225 int i; 5226 char inquiry_buf[256] = {0}; 5227 int len; 5228 const char *const scsi_device_types[] = { 5229 "Direct-Access ", 5230 "Sequential-Access", 5231 "Printer ", 5232 "Processor ", 5233 "WORM ", 5234 "CD-ROM ", 5235 "Scanner ", 5236 "Optical Device ", 5237 "Medium Changer ", 5238 "Communications ", 5239 "Unknown ", 5240 "Unknown ", 5241 "Unknown ", 5242 "Enclosure ", 5243 }; 5244 5245 len = 0; 5246 5247 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: "); 5248 for (i = 8; i < 16; i++) { 5249 len += snprintf(inquiry_buf + len, 265 - len, "%c", 5250 scsi_inq[i]); 5251 } 5252 5253 len += snprintf(inquiry_buf + len, 265 - len, " Model: "); 5254 5255 for (i = 16; i < 32; i++) { 5256 len += snprintf(inquiry_buf + len, 265 - len, "%c", 5257 scsi_inq[i]); 5258 } 5259 5260 len += snprintf(inquiry_buf + len, 265 - len, " Rev: "); 5261 5262 for (i = 32; i < 36; i++) { 5263 len += snprintf(inquiry_buf + len, 265 - len, "%c", 5264 scsi_inq[i]); 5265 } 5266 5267 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 5268 5269 5270 i = scsi_inq[0] & 0x1f; 5271 5272 5273 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ", 5274 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : 5275 "Unknown "); 5276 5277 5278 len += snprintf(inquiry_buf + len, 265 - len, 5279 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); 5280 5281 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) { 5282 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n"); 5283 } else { 5284 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 5285 } 5286 5287 con_log(CL_ANN1, (CE_CONT, inquiry_buf)); 5288 } 5289 5290 static void 5291 io_timeout_checker(void *arg) 5292 { 5293 struct scsi_pkt *pkt; 5294 struct mrsas_instance *instance = arg; 5295 struct mrsas_cmd *cmd = NULL; 5296 struct mrsas_header *hdr; 5297 int time = 0; 5298 int counter = 0; 5299 struct mlist_head *pos, *next; 5300 mlist_t process_list; 5301 5302 instance->timeout_id = (timeout_id_t)-1; 5303 if (instance->adapterresetinprogress == 1) { 5304 con_log(CL_ANN1, (CE_NOTE, "io_timeout_checker" 5305 " reset in progress")); 5306 instance->timeout_id = timeout(io_timeout_checker, 5307 (void *) instance, drv_usectohz(MRSAS_1_SECOND)); 5308 return; 5309 } 5310 5311 /* See if this check needs to be in the beginning or last in ISR */ 5312 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) { 5313 con_log(CL_ANN1, (CE_NOTE, 5314 "Fw Fault state Handling in io_timeout_checker")); 5315 if (instance->adapterresetinprogress == 0) { 5316 (void) mrsas_reset_ppc(instance); 5317 } 5318 instance->timeout_id = timeout(io_timeout_checker, 5319 (void *) instance, drv_usectohz(MRSAS_1_SECOND)); 5320 return; 5321 } 5322 5323 INIT_LIST_HEAD(&process_list); 5324 5325 mutex_enter(&instance->cmd_pend_mtx); 5326 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) { 5327 cmd = mlist_entry(pos, struct mrsas_cmd, list); 5328 5329 if (cmd == NULL) { 5330 continue; 5331 } 5332 5333 if (cmd->sync_cmd == MRSAS_TRUE) { 5334 hdr = (struct mrsas_header *)&cmd->frame->hdr; 5335 if (hdr == NULL) { 5336 continue; 5337 } 5338 time = --hdr->timeout; 5339 } else { 5340 pkt = cmd->pkt; 5341 if (pkt == NULL) { 5342 continue; 5343 } 5344 time = --cmd->drv_pkt_time; 5345 } 5346 if (time <= 0) { 5347 con_log(CL_ANN1, (CE_NOTE, "%llx: " 5348 "io_timeout_checker: TIMING OUT: pkt " 5349 ": %p, cmd %p", gethrtime(), (void *)pkt, 5350 (void *)cmd)); 5351 counter++; 5352 break; 5353 } 5354 } 5355 mutex_exit(&instance->cmd_pend_mtx); 5356 5357 if (counter) { 5358 con_log(CL_ANN1, (CE_NOTE, 5359 "io_timeout_checker " 5360 "cmd->retrycount_for_ocr %d, " 5361 "cmd index %d , cmd address %p ", 5362 cmd->retry_count_for_ocr+1, cmd->index, (void *)cmd)); 5363 5364 if (instance->disable_online_ctrl_reset == 1) { 5365 con_log(CL_ANN1, (CE_NOTE, "mrsas: " 5366 "OCR is not supported by the Firmware " 5367 "Failing all the queued packets \n")); 5368 5369 (void) mrsas_kill_adapter(instance); 5370 } else { 5371 if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) { 5372 if (instance->adapterresetinprogress == 0) { 5373 con_log(CL_ANN1, (CE_NOTE, "mrsas: " 5374 "OCR is supported by FW " 5375 "triggering mrsas_reset_ppc")); 5376 (void) mrsas_reset_ppc(instance); 5377 } 5378 } else { 5379 con_log(CL_ANN1, (CE_NOTE, 5380 "io_timeout_checker:" 5381 " cmdindex: %d,cmd address: %p " 5382 "timed out even after 3 resets: " 5383 "so kill adapter", cmd->index, 5384 (void *)cmd)); 5385 (void) mrsas_kill_adapter(instance); 5386 return; 5387 } 5388 } 5389 } 5390 5391 5392 if (!mlist_empty(&instance->cmd_pend_list)) { 5393 con_log(CL_ANN1, (CE_NOTE, "mrsas: " 5394 "schedule next timeout check: " 5395 "do timeout \n")); 5396 if (instance->timeout_id == (timeout_id_t)-1) { 5397 instance->timeout_id = 5398 timeout(io_timeout_checker, (void *)instance, 5399 drv_usectohz(MRSAS_1_SECOND)); 5400 } 5401 } 5402 5403 } 5404 static int 5405 read_fw_status_reg_ppc(struct mrsas_instance *instance) 5406 { 5407 return ((int)RD_OB_SCRATCH_PAD_0(instance)); 5408 } 5409 5410 static void 5411 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance) 5412 { 5413 struct scsi_pkt *pkt; 5414 atomic_add_16(&instance->fw_outstanding, 1); 5415 5416 pkt = cmd->pkt; 5417 if (pkt) { 5418 con_log(CL_ANN1, (CE_CONT, "%llx : issue_cmd_ppc:" 5419 "ISSUED CMD TO FW : called : cmd:" 5420 ": %p instance : %p pkt : %p pkt_time : %x\n", 5421 gethrtime(), (void *)cmd, (void *)instance, 5422 (void *)pkt, cmd->drv_pkt_time)); 5423 if (instance->adapterresetinprogress) { 5424 cmd->drv_pkt_time = (uint16_t)debug_timeout_g; 5425 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer")); 5426 } else { 5427 cmd->drv_pkt_time = (uint16_t)debug_timeout_g; 5428 push_pending_mfi_pkt(instance, cmd); 5429 } 5430 5431 if (pkt) { 5432 con_log(CL_ANN1, (CE_NOTE, 5433 "TO ISSUE: cmd %p index %x " 5434 "pkt %p time %llx", 5435 (void *)cmd, cmd->index, (void *)pkt, 5436 gethrtime())); 5437 } 5438 } else { 5439 con_log(CL_ANN1, (CE_CONT, "%llx : issue_cmd_ppc:" 5440 "ISSUED CMD TO FW : called : cmd : %p, instance: %p" 5441 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance)); 5442 } 5443 /* Issue the command to the FW */ 5444 WR_IB_QPORT((cmd->frame_phys_addr) | 5445 (((cmd->frame_count - 1) << 1) | 1), instance); 5446 } 5447 5448 /* 5449 * issue_cmd_in_sync_mode 5450 */ 5451 static int 5452 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance, 5453 struct mrsas_cmd *cmd) 5454 { 5455 int i; 5456 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC); 5457 5458 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called")); 5459 5460 if (instance->adapterresetinprogress) { 5461 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: " 5462 "issue and return in reset case\n")); 5463 WR_IB_QPORT((cmd->frame_phys_addr) | 5464 (((cmd->frame_count - 1) << 1) | 1), instance); 5465 return (DDI_SUCCESS); 5466 } else { 5467 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n")); 5468 push_pending_mfi_pkt(instance, cmd); 5469 } 5470 5471 cmd->cmd_status = ENODATA; 5472 5473 WR_IB_QPORT((cmd->frame_phys_addr) | 5474 (((cmd->frame_count - 1) << 1) | 1), instance); 5475 5476 mutex_enter(&instance->int_cmd_mtx); 5477 5478 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 5479 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 5480 } 5481 5482 mutex_exit(&instance->int_cmd_mtx); 5483 5484 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done")); 5485 5486 if (i < (msecs -1)) { 5487 return (DDI_SUCCESS); 5488 } else { 5489 return (DDI_FAILURE); 5490 } 5491 } 5492 5493 /* 5494 * issue_cmd_in_poll_mode 5495 */ 5496 static int 5497 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance, 5498 struct mrsas_cmd *cmd) 5499 { 5500 int i; 5501 uint16_t flags; 5502 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC; 5503 struct mrsas_header *frame_hdr; 5504 5505 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called")); 5506 5507 frame_hdr = (struct mrsas_header *)cmd->frame; 5508 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status, 5509 MFI_CMD_STATUS_POLL_MODE); 5510 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags); 5511 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 5512 5513 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags); 5514 5515 /* issue the frame using inbound queue port */ 5516 WR_IB_QPORT((cmd->frame_phys_addr) | 5517 (((cmd->frame_count - 1) << 1) | 1), instance); 5518 5519 /* wait for cmd_status to change from 0xFF */ 5520 for (i = 0; i < msecs && ( 5521 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) 5522 == MFI_CMD_STATUS_POLL_MODE); i++) { 5523 drv_usecwait(MILLISEC); /* wait for 1000 usecs */ 5524 } 5525 5526 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) 5527 == MFI_CMD_STATUS_POLL_MODE) { 5528 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode: " 5529 "cmd polling timed out")); 5530 return (DDI_FAILURE); 5531 } 5532 5533 return (DDI_SUCCESS); 5534 } 5535 5536 static void 5537 enable_intr_ppc(struct mrsas_instance *instance) 5538 { 5539 uint32_t mask; 5540 5541 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called")); 5542 5543 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */ 5544 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance); 5545 5546 /* WR_OB_INTR_MASK(~0x80000000, instance); */ 5547 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance); 5548 5549 /* dummy read to force PCI flush */ 5550 mask = RD_OB_INTR_MASK(instance); 5551 5552 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: " 5553 "outbound_intr_mask = 0x%x", mask)); 5554 } 5555 5556 static void 5557 disable_intr_ppc(struct mrsas_instance *instance) 5558 { 5559 uint32_t mask; 5560 5561 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called")); 5562 5563 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : " 5564 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance))); 5565 5566 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */ 5567 WR_OB_INTR_MASK(OB_INTR_MASK, instance); 5568 5569 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : " 5570 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance))); 5571 5572 /* dummy read to force PCI flush */ 5573 mask = RD_OB_INTR_MASK(instance); 5574 #ifdef lint 5575 mask = mask; 5576 #endif 5577 } 5578 5579 static int 5580 intr_ack_ppc(struct mrsas_instance *instance) 5581 { 5582 uint32_t status; 5583 int ret = DDI_INTR_CLAIMED; 5584 5585 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called")); 5586 5587 /* check if it is our interrupt */ 5588 status = RD_OB_INTR_STATUS(instance); 5589 5590 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status)); 5591 5592 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) { 5593 ret = DDI_INTR_UNCLAIMED; 5594 } 5595 5596 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 5597 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 5598 ret = DDI_INTR_UNCLAIMED; 5599 } 5600 5601 if (ret == DDI_INTR_UNCLAIMED) { 5602 return (ret); 5603 } 5604 /* clear the interrupt by writing back the same value */ 5605 WR_OB_DOORBELL_CLEAR(status, instance); 5606 5607 /* dummy READ */ 5608 status = RD_OB_INTR_STATUS(instance); 5609 5610 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared")); 5611 5612 return (ret); 5613 } 5614 5615 /* 5616 * Marks HBA as bad. This will be called either when an 5617 * IO packet times out even after 3 FW resets 5618 * or FW is found to be fault even after 3 continuous resets. 5619 */ 5620 5621 static int 5622 mrsas_kill_adapter(struct mrsas_instance *instance) 5623 { 5624 if (instance->deadadapter == 1) 5625 return (DDI_FAILURE); 5626 5627 con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: " 5628 "Writing to doorbell with MFI_STOP_ADP ")); 5629 mutex_enter(&instance->ocr_flags_mtx); 5630 instance->deadadapter = 1; 5631 mutex_exit(&instance->ocr_flags_mtx); 5632 instance->func_ptr->disable_intr(instance); 5633 WR_IB_DOORBELL(MFI_STOP_ADP, instance); 5634 (void) mrsas_complete_pending_cmds(instance); 5635 return (DDI_SUCCESS); 5636 } 5637 5638 5639 static int 5640 mrsas_reset_ppc(struct mrsas_instance *instance) 5641 { 5642 uint32_t status; 5643 uint32_t retry = 0; 5644 uint32_t cur_abs_reg_val; 5645 uint32_t fw_state; 5646 5647 if (instance->deadadapter == 1) { 5648 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5649 "no more resets as HBA has been marked dead ")); 5650 return (DDI_FAILURE); 5651 } 5652 mutex_enter(&instance->ocr_flags_mtx); 5653 instance->adapterresetinprogress = 1; 5654 mutex_exit(&instance->ocr_flags_mtx); 5655 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress " 5656 "flag set, time %llx", gethrtime())); 5657 instance->func_ptr->disable_intr(instance); 5658 retry_reset: 5659 WR_IB_WRITE_SEQ(0, instance); 5660 WR_IB_WRITE_SEQ(4, instance); 5661 WR_IB_WRITE_SEQ(0xb, instance); 5662 WR_IB_WRITE_SEQ(2, instance); 5663 WR_IB_WRITE_SEQ(7, instance); 5664 WR_IB_WRITE_SEQ(0xd, instance); 5665 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written " 5666 "to write sequence register\n")); 5667 delay(100 * drv_usectohz(MILLISEC)); 5668 status = RD_OB_DRWE(instance); 5669 5670 while (!(status & DIAG_WRITE_ENABLE)) { 5671 delay(100 * drv_usectohz(MILLISEC)); 5672 status = RD_OB_DRWE(instance); 5673 if (retry++ == 100) { 5674 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: DRWE bit " 5675 "check retry count %d\n", retry)); 5676 return (DDI_FAILURE); 5677 } 5678 } 5679 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance); 5680 delay(100 * drv_usectohz(MILLISEC)); 5681 status = RD_OB_DRWE(instance); 5682 while (status & DIAG_RESET_ADAPTER) { 5683 delay(100 * drv_usectohz(MILLISEC)); 5684 status = RD_OB_DRWE(instance); 5685 if (retry++ == 100) { 5686 (void) mrsas_kill_adapter(instance); 5687 return (DDI_FAILURE); 5688 } 5689 } 5690 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete")); 5691 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5692 "Calling mfi_state_transition_to_ready")); 5693 5694 /* Mark HBA as bad, if FW is fault after 3 continuous resets */ 5695 if (mfi_state_transition_to_ready(instance) || 5696 debug_fw_faults_after_ocr_g == 1) { 5697 cur_abs_reg_val = 5698 instance->func_ptr->read_fw_status_reg(instance); 5699 fw_state = cur_abs_reg_val & MFI_STATE_MASK; 5700 5701 #ifdef OCRDEBUG 5702 con_log(CL_ANN1, (CE_NOTE, 5703 "mrsas_reset_ppc :before fake: FW is not ready " 5704 "FW state = 0x%x", fw_state)); 5705 if (debug_fw_faults_after_ocr_g == 1) 5706 fw_state = MFI_STATE_FAULT; 5707 #endif 5708 5709 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc : FW is not ready " 5710 "FW state = 0x%x", fw_state)); 5711 5712 if (fw_state == MFI_STATE_FAULT) { 5713 // increment the count 5714 instance->fw_fault_count_after_ocr++; 5715 if (instance->fw_fault_count_after_ocr 5716 < MAX_FW_RESET_COUNT) { 5717 con_log(CL_ANN1, (CE_WARN, "mrsas_reset_ppc: " 5718 "FW is in fault after OCR count %d ", 5719 instance->fw_fault_count_after_ocr)); 5720 goto retry_reset; 5721 5722 } else { 5723 con_log(CL_ANN1, (CE_WARN, "mrsas_reset_ppc: " 5724 "Max Reset Count exceeded " 5725 "Mark HBA as bad")); 5726 (void) mrsas_kill_adapter(instance); 5727 return (DDI_FAILURE); 5728 } 5729 } 5730 } 5731 // reset the counter as FW is up after OCR 5732 instance->fw_fault_count_after_ocr = 0; 5733 5734 5735 ddi_put32(instance->mfi_internal_dma_obj.acc_handle, 5736 instance->producer, 0); 5737 5738 ddi_put32(instance->mfi_internal_dma_obj.acc_handle, 5739 instance->consumer, 0); 5740 5741 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5742 " after resetting produconsumer chck indexs:" 5743 "producer %x consumer %x", *instance->producer, 5744 *instance->consumer)); 5745 5746 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5747 "Calling mrsas_issue_init_mfi")); 5748 (void) mrsas_issue_init_mfi(instance); 5749 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5750 "mrsas_issue_init_mfi Done")); 5751 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5752 "Calling mrsas_print_pending_cmd\n")); 5753 (void) mrsas_print_pending_cmds(instance); 5754 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5755 "mrsas_print_pending_cmd done\n")); 5756 instance->func_ptr->enable_intr(instance); 5757 instance->fw_outstanding = 0; 5758 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5759 "Calling mrsas_issue_pending_cmds")); 5760 (void) mrsas_issue_pending_cmds(instance); 5761 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5762 "Complete")); 5763 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5764 "Calling aen registration")); 5765 instance->func_ptr->issue_cmd(instance->aen_cmd, instance); 5766 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n")); 5767 mutex_enter(&instance->ocr_flags_mtx); 5768 instance->adapterresetinprogress = 0; 5769 mutex_exit(&instance->ocr_flags_mtx); 5770 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 5771 "adpterresetinprogress flag unset")); 5772 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n")); 5773 return (DDI_SUCCESS); 5774 } 5775 static int 5776 mrsas_common_check(struct mrsas_instance *instance, 5777 struct mrsas_cmd *cmd) 5778 { 5779 int ret = DDI_SUCCESS; 5780 5781 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 5782 DDI_SUCCESS) { 5783 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 5784 if (cmd->pkt != NULL) { 5785 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 5786 cmd->pkt->pkt_statistics = 0; 5787 } 5788 ret = DDI_FAILURE; 5789 } 5790 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 5791 != DDI_SUCCESS) { 5792 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 5793 if (cmd->pkt != NULL) { 5794 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 5795 cmd->pkt->pkt_statistics = 0; 5796 } 5797 ret = DDI_FAILURE; 5798 } 5799 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) != 5800 DDI_SUCCESS) { 5801 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 5802 if (cmd->pkt != NULL) { 5803 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 5804 cmd->pkt->pkt_statistics = 0; 5805 } 5806 ret = DDI_FAILURE; 5807 } 5808 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 5809 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 5810 5811 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0); 5812 5813 if (cmd->pkt != NULL) { 5814 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 5815 cmd->pkt->pkt_statistics = 0; 5816 } 5817 ret = DDI_FAILURE; 5818 } 5819 5820 return (ret); 5821 } 5822 5823 /*ARGSUSED*/ 5824 static int 5825 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5826 { 5827 /* 5828 * as the driver can always deal with an error in any dma or 5829 * access handle, we can just return the fme_status value. 5830 */ 5831 pci_ereport_post(dip, err, NULL); 5832 return (err->fme_status); 5833 } 5834 5835 static void 5836 mrsas_fm_init(struct mrsas_instance *instance) 5837 { 5838 /* Need to change iblock to priority for new MSI intr */ 5839 ddi_iblock_cookie_t fm_ibc; 5840 5841 /* Only register with IO Fault Services if we have some capability */ 5842 if (instance->fm_capabilities) { 5843 /* Adjust access and dma attributes for FMA */ 5844 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5845 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 5846 5847 /* 5848 * Register capabilities with IO Fault Services. 5849 * fm_capabilities will be updated to indicate 5850 * capabilities actually supported (not requested.) 5851 */ 5852 5853 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc); 5854 5855 /* 5856 * Initialize pci ereport capabilities if ereport 5857 * capable (should always be.) 5858 */ 5859 5860 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 5861 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 5862 pci_ereport_setup(instance->dip); 5863 } 5864 5865 /* 5866 * Register error callback if error callback capable. 5867 */ 5868 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 5869 ddi_fm_handler_register(instance->dip, 5870 mrsas_fm_error_cb, (void*) instance); 5871 } 5872 } else { 5873 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5874 mrsas_generic_dma_attr.dma_attr_flags = 0; 5875 } 5876 } 5877 5878 static void 5879 mrsas_fm_fini(struct mrsas_instance *instance) 5880 { 5881 /* Only unregister FMA capabilities if registered */ 5882 if (instance->fm_capabilities) { 5883 /* 5884 * Un-register error callback if error callback capable. 5885 */ 5886 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 5887 ddi_fm_handler_unregister(instance->dip); 5888 } 5889 5890 /* 5891 * Release any resources allocated by pci_ereport_setup() 5892 */ 5893 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 5894 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 5895 pci_ereport_teardown(instance->dip); 5896 } 5897 5898 /* Unregister from IO Fault Services */ 5899 ddi_fm_fini(instance->dip); 5900 5901 /* Adjust access and dma attributes for FMA */ 5902 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5903 mrsas_generic_dma_attr.dma_attr_flags = 0; 5904 } 5905 } 5906 5907 int 5908 mrsas_check_acc_handle(ddi_acc_handle_t handle) 5909 { 5910 ddi_fm_error_t de; 5911 5912 if (handle == NULL) { 5913 return (DDI_FAILURE); 5914 } 5915 5916 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5917 5918 return (de.fme_status); 5919 } 5920 5921 int 5922 mrsas_check_dma_handle(ddi_dma_handle_t handle) 5923 { 5924 ddi_fm_error_t de; 5925 5926 if (handle == NULL) { 5927 return (DDI_FAILURE); 5928 } 5929 5930 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5931 5932 return (de.fme_status); 5933 } 5934 5935 void 5936 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail) 5937 { 5938 uint64_t ena; 5939 char buf[FM_MAX_CLASS]; 5940 5941 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 5942 ena = fm_ena_generate(0, FM_ENA_FMT1); 5943 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) { 5944 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP, 5945 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 5946 } 5947 } 5948 5949 static int 5950 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type) 5951 { 5952 5953 dev_info_t *dip = instance->dip; 5954 int avail, actual, count; 5955 int i, flag, ret; 5956 5957 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: intr_type = %x", 5958 intr_type)); 5959 5960 /* Get number of interrupts */ 5961 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 5962 if ((ret != DDI_SUCCESS) || (count == 0)) { 5963 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:" 5964 "ret %d count %d", ret, count)); 5965 5966 return (DDI_FAILURE); 5967 } 5968 5969 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: count = %d ", count)); 5970 5971 /* Get number of available interrupts */ 5972 ret = ddi_intr_get_navail(dip, intr_type, &avail); 5973 if ((ret != DDI_SUCCESS) || (avail == 0)) { 5974 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:" 5975 "ret %d avail %d", ret, avail)); 5976 5977 return (DDI_FAILURE); 5978 } 5979 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: avail = %d ", avail)); 5980 5981 /* Only one interrupt routine. So limit the count to 1 */ 5982 if (count > 1) { 5983 count = 1; 5984 } 5985 5986 /* 5987 * Allocate an array of interrupt handlers. Currently we support 5988 * only one interrupt. The framework can be extended later. 5989 */ 5990 instance->intr_size = count * sizeof (ddi_intr_handle_t); 5991 instance->intr_htable = kmem_zalloc(instance->intr_size, KM_SLEEP); 5992 ASSERT(instance->intr_htable); 5993 5994 flag = ((intr_type == DDI_INTR_TYPE_MSI) || (intr_type == 5995 DDI_INTR_TYPE_MSIX)) ? DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL; 5996 5997 /* Allocate interrupt */ 5998 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0, 5999 count, &actual, flag); 6000 6001 if ((ret != DDI_SUCCESS) || (actual == 0)) { 6002 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 6003 "avail = %d", avail)); 6004 kmem_free(instance->intr_htable, instance->intr_size); 6005 return (DDI_FAILURE); 6006 } 6007 if (actual < count) { 6008 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 6009 "Requested = %d Received = %d", count, actual)); 6010 } 6011 instance->intr_cnt = actual; 6012 6013 /* 6014 * Get the priority of the interrupt allocated. 6015 */ 6016 if ((ret = ddi_intr_get_pri(instance->intr_htable[0], 6017 &instance->intr_pri)) != DDI_SUCCESS) { 6018 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 6019 "get priority call failed")); 6020 6021 for (i = 0; i < actual; i++) { 6022 (void) ddi_intr_free(instance->intr_htable[i]); 6023 } 6024 kmem_free(instance->intr_htable, instance->intr_size); 6025 return (DDI_FAILURE); 6026 } 6027 6028 /* 6029 * Test for high level mutex. we don't support them. 6030 */ 6031 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) { 6032 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 6033 "High level interrupts not supported.")); 6034 6035 for (i = 0; i < actual; i++) { 6036 (void) ddi_intr_free(instance->intr_htable[i]); 6037 } 6038 kmem_free(instance->intr_htable, instance->intr_size); 6039 return (DDI_FAILURE); 6040 } 6041 6042 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ", 6043 instance->intr_pri)); 6044 6045 /* Call ddi_intr_add_handler() */ 6046 for (i = 0; i < actual; i++) { 6047 ret = ddi_intr_add_handler(instance->intr_htable[i], 6048 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance, 6049 (caddr_t)(uintptr_t)i); 6050 6051 if (ret != DDI_SUCCESS) { 6052 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:" 6053 "failed %d", ret)); 6054 6055 for (i = 0; i < actual; i++) { 6056 (void) ddi_intr_free(instance->intr_htable[i]); 6057 } 6058 kmem_free(instance->intr_htable, instance->intr_size); 6059 return (DDI_FAILURE); 6060 } 6061 6062 } 6063 6064 con_log(CL_DLEVEL1, (CE_WARN, " ddi_intr_add_handler done")); 6065 6066 if ((ret = ddi_intr_get_cap(instance->intr_htable[0], 6067 &instance->intr_cap)) != DDI_SUCCESS) { 6068 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d", 6069 ret)); 6070 6071 /* Free already allocated intr */ 6072 for (i = 0; i < actual; i++) { 6073 (void) ddi_intr_remove_handler( 6074 instance->intr_htable[i]); 6075 (void) ddi_intr_free(instance->intr_htable[i]); 6076 } 6077 kmem_free(instance->intr_htable, instance->intr_size); 6078 return (DDI_FAILURE); 6079 } 6080 6081 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { 6082 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable")); 6083 6084 (void) ddi_intr_block_enable(instance->intr_htable, 6085 instance->intr_cnt); 6086 } else { 6087 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable")); 6088 6089 for (i = 0; i < instance->intr_cnt; i++) { 6090 (void) ddi_intr_enable(instance->intr_htable[i]); 6091 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns " 6092 "%d", i)); 6093 } 6094 } 6095 6096 return (DDI_SUCCESS); 6097 6098 } 6099 6100 6101 static void 6102 mrsas_rem_intrs(struct mrsas_instance *instance) 6103 { 6104 int i; 6105 6106 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called")); 6107 6108 /* Disable all interrupts first */ 6109 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { 6110 (void) ddi_intr_block_disable(instance->intr_htable, 6111 instance->intr_cnt); 6112 } else { 6113 for (i = 0; i < instance->intr_cnt; i++) { 6114 (void) ddi_intr_disable(instance->intr_htable[i]); 6115 } 6116 } 6117 6118 /* Remove all the handlers */ 6119 6120 for (i = 0; i < instance->intr_cnt; i++) { 6121 (void) ddi_intr_remove_handler(instance->intr_htable[i]); 6122 (void) ddi_intr_free(instance->intr_htable[i]); 6123 } 6124 6125 kmem_free(instance->intr_htable, instance->intr_size); 6126 } 6127 6128 static int 6129 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags, 6130 ddi_bus_config_op_t op, void *arg, dev_info_t **childp) 6131 { 6132 struct mrsas_instance *instance; 6133 int config; 6134 int rval; 6135 6136 char *ptr = NULL; 6137 int tgt, lun; 6138 6139 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op)); 6140 6141 if ((instance = ddi_get_soft_state(mrsas_state, 6142 ddi_get_instance(parent))) == NULL) { 6143 return (NDI_FAILURE); 6144 } 6145 6146 /* Hold nexus during bus_config */ 6147 ndi_devi_enter(parent, &config); 6148 switch (op) { 6149 case BUS_CONFIG_ONE: { 6150 6151 /* parse wwid/target name out of name given */ 6152 if ((ptr = strchr((char *)arg, '@')) == NULL) { 6153 rval = NDI_FAILURE; 6154 break; 6155 } 6156 ptr++; 6157 6158 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) { 6159 rval = NDI_FAILURE; 6160 break; 6161 } 6162 6163 if (lun == 0) { 6164 rval = mrsas_config_ld(instance, tgt, lun, childp); 6165 } else { 6166 rval = NDI_FAILURE; 6167 } 6168 6169 break; 6170 } 6171 case BUS_CONFIG_DRIVER: 6172 case BUS_CONFIG_ALL: { 6173 6174 rval = mrsas_config_all_devices(instance); 6175 6176 rval = NDI_SUCCESS; 6177 break; 6178 } 6179 } 6180 6181 if (rval == NDI_SUCCESS) { 6182 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0); 6183 6184 } 6185 ndi_devi_exit(parent, config); 6186 6187 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x", 6188 rval)); 6189 return (rval); 6190 } 6191 6192 static int 6193 mrsas_config_all_devices(struct mrsas_instance *instance) 6194 { 6195 int rval, tgt; 6196 6197 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { 6198 (void) mrsas_config_ld(instance, tgt, 0, NULL); 6199 6200 } 6201 6202 rval = NDI_SUCCESS; 6203 return (rval); 6204 } 6205 6206 static int 6207 mrsas_parse_devname(char *devnm, int *tgt, int *lun) 6208 { 6209 char devbuf[SCSI_MAXNAMELEN]; 6210 char *addr; 6211 char *p, *tp, *lp; 6212 long num; 6213 6214 /* Parse dev name and address */ 6215 (void) strcpy(devbuf, devnm); 6216 addr = ""; 6217 for (p = devbuf; *p != '\0'; p++) { 6218 if (*p == '@') { 6219 addr = p + 1; 6220 *p = '\0'; 6221 } else if (*p == ':') { 6222 *p = '\0'; 6223 break; 6224 } 6225 } 6226 6227 /* Parse target and lun */ 6228 for (p = tp = addr, lp = NULL; *p != '\0'; p++) { 6229 if (*p == ',') { 6230 lp = p + 1; 6231 *p = '\0'; 6232 break; 6233 } 6234 } 6235 if (tgt && tp) { 6236 if (ddi_strtol(tp, NULL, 0x10, &num)) { 6237 return (DDI_FAILURE); /* Can declare this as constant */ 6238 } 6239 *tgt = (int)num; 6240 } 6241 if (lun && lp) { 6242 if (ddi_strtol(lp, NULL, 0x10, &num)) { 6243 return (DDI_FAILURE); 6244 } 6245 *lun = (int)num; 6246 } 6247 return (DDI_SUCCESS); /* Success case */ 6248 } 6249 6250 static int 6251 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt, 6252 uint8_t lun, dev_info_t **ldip) 6253 { 6254 struct scsi_device *sd; 6255 dev_info_t *child; 6256 int rval; 6257 6258 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d", 6259 tgt, lun)); 6260 6261 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) { 6262 if (ldip) { 6263 *ldip = child; 6264 } 6265 con_log(CL_ANN1, (CE_NOTE, 6266 "mrsas_config_ld: Child = %p found t = %d l = %d", 6267 (void *)child, tgt, lun)); 6268 return (NDI_SUCCESS); 6269 } 6270 6271 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP); 6272 sd->sd_address.a_hba_tran = instance->tran; 6273 sd->sd_address.a_target = (uint16_t)tgt; 6274 sd->sd_address.a_lun = (uint8_t)lun; 6275 6276 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) 6277 rval = mrsas_config_scsi_device(instance, sd, ldip); 6278 else 6279 rval = NDI_FAILURE; 6280 6281 /* sd_unprobe is blank now. Free buffer manually */ 6282 if (sd->sd_inq) { 6283 kmem_free(sd->sd_inq, SUN_INQSIZE); 6284 sd->sd_inq = (struct scsi_inquiry *)NULL; 6285 } 6286 6287 kmem_free(sd, sizeof (struct scsi_device)); 6288 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_ld: return rval = %d", 6289 rval)); 6290 return (rval); 6291 } 6292 6293 static int 6294 mrsas_config_scsi_device(struct mrsas_instance *instance, 6295 struct scsi_device *sd, dev_info_t **dipp) 6296 { 6297 char *nodename = NULL; 6298 char **compatible = NULL; 6299 int ncompatible = 0; 6300 char *childname; 6301 dev_info_t *ldip = NULL; 6302 int tgt = sd->sd_address.a_target; 6303 int lun = sd->sd_address.a_lun; 6304 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 6305 int rval; 6306 6307 con_log(CL_ANN1, (CE_WARN, "mr_sas: scsi_device t%dL%d", tgt, lun)); 6308 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, 6309 NULL, &nodename, &compatible, &ncompatible); 6310 6311 if (nodename == NULL) { 6312 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver " 6313 "for t%dL%d", tgt, lun)); 6314 rval = NDI_FAILURE; 6315 goto finish; 6316 } 6317 6318 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename; 6319 con_log(CL_ANN1, (CE_WARN, 6320 "mr_sas: Childname = %2s nodename = %s", childname, nodename)); 6321 6322 /* Create a dev node */ 6323 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip); 6324 con_log(CL_ANN1, (CE_WARN, 6325 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval)); 6326 if (rval == NDI_SUCCESS) { 6327 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) != 6328 DDI_PROP_SUCCESS) { 6329 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 6330 "property for t%dl%d target", tgt, lun)); 6331 rval = NDI_FAILURE; 6332 goto finish; 6333 } 6334 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) != 6335 DDI_PROP_SUCCESS) { 6336 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 6337 "property for t%dl%d lun", tgt, lun)); 6338 rval = NDI_FAILURE; 6339 goto finish; 6340 } 6341 6342 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip, 6343 "compatible", compatible, ncompatible) != 6344 DDI_PROP_SUCCESS) { 6345 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 6346 "property for t%dl%d compatible", tgt, lun)); 6347 rval = NDI_FAILURE; 6348 goto finish; 6349 } 6350 6351 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH); 6352 if (rval != NDI_SUCCESS) { 6353 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online " 6354 "t%dl%d", tgt, lun)); 6355 ndi_prop_remove_all(ldip); 6356 (void) ndi_devi_free(ldip); 6357 } else { 6358 con_log(CL_ANN1, (CE_WARN, "mr_sas: online Done :" 6359 "0 t%dl%d", tgt, lun)); 6360 } 6361 6362 } 6363 finish: 6364 if (dipp) { 6365 *dipp = ldip; 6366 } 6367 6368 con_log(CL_DLEVEL1, (CE_WARN, 6369 "mr_sas: config_scsi_device rval = %d t%dL%d", 6370 rval, tgt, lun)); 6371 scsi_hba_nodename_compatible_free(nodename, compatible); 6372 return (rval); 6373 } 6374 6375 /*ARGSUSED*/ 6376 static int 6377 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event, 6378 uint64_t wwn) 6379 { 6380 struct mrsas_eventinfo *mrevt = NULL; 6381 6382 con_log(CL_ANN1, (CE_NOTE, 6383 "mrsas_service_evt called for t%dl%d event = %d", 6384 tgt, lun, event)); 6385 6386 if ((instance->taskq == NULL) || (mrevt = 6387 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) { 6388 return (ENOMEM); 6389 } 6390 6391 mrevt->instance = instance; 6392 mrevt->tgt = tgt; 6393 mrevt->lun = lun; 6394 mrevt->event = event; 6395 6396 if ((ddi_taskq_dispatch(instance->taskq, 6397 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) != 6398 DDI_SUCCESS) { 6399 con_log(CL_ANN1, (CE_NOTE, 6400 "mr_sas: Event task failed for t%dl%d event = %d", 6401 tgt, lun, event)); 6402 kmem_free(mrevt, sizeof (struct mrsas_eventinfo)); 6403 return (DDI_FAILURE); 6404 } 6405 DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event); 6406 return (DDI_SUCCESS); 6407 } 6408 6409 static void 6410 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt) 6411 { 6412 struct mrsas_instance *instance = mrevt->instance; 6413 dev_info_t *dip, *pdip; 6414 int circ1 = 0; 6415 char *devname; 6416 6417 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for" 6418 " tgt %d lun %d event %d", 6419 mrevt->tgt, mrevt->lun, mrevt->event)); 6420 6421 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) { 6422 dip = instance->mr_ld_list[mrevt->tgt].dip; 6423 } else { 6424 return; 6425 } 6426 6427 ndi_devi_enter(instance->dip, &circ1); 6428 switch (mrevt->event) { 6429 case MRSAS_EVT_CONFIG_TGT: 6430 if (dip == NULL) { 6431 6432 if (mrevt->lun == 0) { 6433 (void) mrsas_config_ld(instance, mrevt->tgt, 6434 0, NULL); 6435 } 6436 con_log(CL_ANN1, (CE_NOTE, 6437 "mr_sas: EVT_CONFIG_TGT called:" 6438 " for tgt %d lun %d event %d", 6439 mrevt->tgt, mrevt->lun, mrevt->event)); 6440 6441 } else { 6442 con_log(CL_ANN1, (CE_NOTE, 6443 "mr_sas: EVT_CONFIG_TGT dip != NULL:" 6444 " for tgt %d lun %d event %d", 6445 mrevt->tgt, mrevt->lun, mrevt->event)); 6446 } 6447 break; 6448 case MRSAS_EVT_UNCONFIG_TGT: 6449 if (dip) { 6450 if (i_ddi_devi_attached(dip)) { 6451 6452 pdip = ddi_get_parent(dip); 6453 6454 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP); 6455 (void) ddi_deviname(dip, devname); 6456 6457 (void) devfs_clean(pdip, devname + 1, 6458 DV_CLEAN_FORCE); 6459 kmem_free(devname, MAXNAMELEN + 1); 6460 } 6461 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE); 6462 con_log(CL_ANN1, (CE_NOTE, 6463 "mr_sas: EVT_UNCONFIG_TGT called:" 6464 " for tgt %d lun %d event %d", 6465 mrevt->tgt, mrevt->lun, mrevt->event)); 6466 } else { 6467 con_log(CL_ANN1, (CE_NOTE, 6468 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:" 6469 " for tgt %d lun %d event %d", 6470 mrevt->tgt, mrevt->lun, mrevt->event)); 6471 } 6472 break; 6473 } 6474 kmem_free(mrevt, sizeof (struct mrsas_eventinfo)); 6475 ndi_devi_exit(instance->dip, circ1); 6476 } 6477 6478 static int 6479 mrsas_mode_sense_build(struct scsi_pkt *pkt) 6480 { 6481 union scsi_cdb *cdbp; 6482 uint16_t page_code; 6483 struct scsa_cmd *acmd; 6484 struct buf *bp; 6485 struct mode_header *modehdrp; 6486 6487 cdbp = (void *)pkt->pkt_cdbp; 6488 page_code = cdbp->cdb_un.sg.scsi[0]; 6489 acmd = PKT2CMD(pkt); 6490 bp = acmd->cmd_buf; 6491 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) { 6492 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command")); 6493 /* ADD pkt statistics as Command failed. */ 6494 return (NULL); 6495 } 6496 6497 bp_mapin(bp); 6498 bzero(bp->b_un.b_addr, bp->b_bcount); 6499 6500 switch (page_code) { 6501 case 0x3: { 6502 struct mode_format *page3p = NULL; 6503 modehdrp = (struct mode_header *)(bp->b_un.b_addr); 6504 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH; 6505 6506 page3p = (void *)((caddr_t)modehdrp + 6507 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 6508 page3p->mode_page.code = 0x3; 6509 page3p->mode_page.length = 6510 (uchar_t)(sizeof (struct mode_format)); 6511 page3p->data_bytes_sect = 512; 6512 page3p->sect_track = 63; 6513 break; 6514 } 6515 case 0x4: { 6516 struct mode_geometry *page4p = NULL; 6517 modehdrp = (struct mode_header *)(bp->b_un.b_addr); 6518 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH; 6519 6520 page4p = (void *)((caddr_t)modehdrp + 6521 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 6522 page4p->mode_page.code = 0x4; 6523 page4p->mode_page.length = 6524 (uchar_t)(sizeof (struct mode_geometry)); 6525 page4p->heads = 255; 6526 page4p->rpm = 10000; 6527 break; 6528 } 6529 default: 6530 break; 6531 } 6532 return (NULL); 6533 } 6534