1 /* 2 * mr_sas.c: source for mr_sas driver 3 * 4 * MegaRAID device driver for SAS2.0 controllers 5 * Copyright (c) 2008-2009, LSI Logic Corporation. 6 * All rights reserved. 7 * 8 * Version: 9 * Author: 10 * Arun Chandrashekhar 11 * Manju R 12 * Rajesh Prabhakaran 13 * Seokmann Ju 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions are met: 17 * 18 * 1. Redistributions of source code must retain the above copyright notice, 19 * this list of conditions and the following disclaimer. 20 * 21 * 2. Redistributions in binary form must reproduce the above copyright notice, 22 * this list of conditions and the following disclaimer in the documentation 23 * and/or other materials provided with the distribution. 24 * 25 * 3. Neither the name of the author nor the names of its contributors may be 26 * used to endorse or promote products derived from this software without 27 * specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 32 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 33 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 35 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 36 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 37 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 38 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 39 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 40 * DAMAGE. 41 */ 42 43 /* 44 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 45 * Use is subject to license terms. 46 */ 47 48 #include <sys/types.h> 49 #include <sys/param.h> 50 #include <sys/file.h> 51 #include <sys/errno.h> 52 #include <sys/open.h> 53 #include <sys/cred.h> 54 #include <sys/modctl.h> 55 #include <sys/conf.h> 56 #include <sys/devops.h> 57 #include <sys/cmn_err.h> 58 #include <sys/kmem.h> 59 #include <sys/stat.h> 60 #include <sys/mkdev.h> 61 #include <sys/pci.h> 62 #include <sys/scsi/scsi.h> 63 #include <sys/ddi.h> 64 #include <sys/sunddi.h> 65 #include <sys/atomic.h> 66 #include <sys/signal.h> 67 #include <sys/byteorder.h> 68 #include <sys/sdt.h> 69 #include <sys/fs/dv_node.h> /* devfs_clean */ 70 71 #include "mr_sas.h" 72 73 /* 74 * FMA header files 75 */ 76 #include <sys/ddifm.h> 77 #include <sys/fm/protocol.h> 78 #include <sys/fm/util.h> 79 #include <sys/fm/io/ddi.h> 80 81 /* 82 * Local static data 83 */ 84 static void *mrsas_state = NULL; 85 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE; 86 static volatile int debug_level_g = CL_NONE; 87 static volatile int msi_enable = 1; 88 89 #pragma weak scsi_hba_open 90 #pragma weak scsi_hba_close 91 #pragma weak scsi_hba_ioctl 92 93 static ddi_dma_attr_t mrsas_generic_dma_attr = { 94 DMA_ATTR_V0, /* dma_attr_version */ 95 0, /* low DMA address range */ 96 0xFFFFFFFFU, /* high DMA address range */ 97 0xFFFFFFFFU, /* DMA counter register */ 98 8, /* DMA address alignment */ 99 0x07, /* DMA burstsizes */ 100 1, /* min DMA size */ 101 0xFFFFFFFFU, /* max DMA size */ 102 0xFFFFFFFFU, /* segment boundary */ 103 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */ 104 512, /* granularity of device */ 105 0 /* bus specific DMA flags */ 106 }; 107 108 int32_t mrsas_max_cap_maxxfer = 0x1000000; 109 110 /* 111 * cb_ops contains base level routines 112 */ 113 static struct cb_ops mrsas_cb_ops = { 114 mrsas_open, /* open */ 115 mrsas_close, /* close */ 116 nodev, /* strategy */ 117 nodev, /* print */ 118 nodev, /* dump */ 119 nodev, /* read */ 120 nodev, /* write */ 121 mrsas_ioctl, /* ioctl */ 122 nodev, /* devmap */ 123 nodev, /* mmap */ 124 nodev, /* segmap */ 125 nochpoll, /* poll */ 126 nodev, /* cb_prop_op */ 127 0, /* streamtab */ 128 D_NEW | D_HOTPLUG, /* cb_flag */ 129 CB_REV, /* cb_rev */ 130 nodev, /* cb_aread */ 131 nodev /* cb_awrite */ 132 }; 133 134 /* 135 * dev_ops contains configuration routines 136 */ 137 static struct dev_ops mrsas_ops = { 138 DEVO_REV, /* rev, */ 139 0, /* refcnt */ 140 mrsas_getinfo, /* getinfo */ 141 nulldev, /* identify */ 142 nulldev, /* probe */ 143 mrsas_attach, /* attach */ 144 mrsas_detach, /* detach */ 145 mrsas_reset, /* reset */ 146 &mrsas_cb_ops, /* char/block ops */ 147 NULL, /* bus ops */ 148 NULL, /* power */ 149 ddi_quiesce_not_supported, /* quiesce */ 150 }; 151 152 char _depends_on[] = "misc/scsi"; 153 154 static struct modldrv modldrv = { 155 &mod_driverops, /* module type - driver */ 156 MRSAS_VERSION, 157 &mrsas_ops, /* driver ops */ 158 }; 159 160 static struct modlinkage modlinkage = { 161 MODREV_1, /* ml_rev - must be MODREV_1 */ 162 &modldrv, /* ml_linkage */ 163 NULL /* end of driver linkage */ 164 }; 165 166 static struct ddi_device_acc_attr endian_attr = { 167 DDI_DEVICE_ATTR_V1, 168 DDI_STRUCTURE_LE_ACC, 169 DDI_STRICTORDER_ACC, 170 DDI_DEFAULT_ACC 171 }; 172 173 174 /* 175 * ************************************************************************** * 176 * * 177 * common entry points - for loadable kernel modules * 178 * * 179 * ************************************************************************** * 180 */ 181 182 int 183 _init(void) 184 { 185 int ret; 186 187 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 188 189 ret = ddi_soft_state_init(&mrsas_state, 190 sizeof (struct mrsas_instance), 0); 191 192 if (ret != DDI_SUCCESS) { 193 con_log(CL_ANN, (CE_WARN, "mr_sas: could not init state")); 194 return (ret); 195 } 196 197 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) { 198 con_log(CL_ANN, (CE_WARN, "mr_sas: could not init scsi hba")); 199 ddi_soft_state_fini(&mrsas_state); 200 return (ret); 201 } 202 203 ret = mod_install(&modlinkage); 204 205 if (ret != DDI_SUCCESS) { 206 con_log(CL_ANN, (CE_WARN, "mr_sas: mod_install failed")); 207 scsi_hba_fini(&modlinkage); 208 ddi_soft_state_fini(&mrsas_state); 209 } 210 211 return (ret); 212 } 213 214 int 215 _info(struct modinfo *modinfop) 216 { 217 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 218 219 return (mod_info(&modlinkage, modinfop)); 220 } 221 222 int 223 _fini(void) 224 { 225 int ret; 226 227 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 228 229 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) 230 return (ret); 231 232 scsi_hba_fini(&modlinkage); 233 234 ddi_soft_state_fini(&mrsas_state); 235 236 return (ret); 237 } 238 239 240 /* 241 * ************************************************************************** * 242 * * 243 * common entry points - for autoconfiguration * 244 * * 245 * ************************************************************************** * 246 */ 247 248 static int 249 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 250 { 251 int instance_no; 252 int nregs; 253 uint8_t added_isr_f = 0; 254 uint8_t added_soft_isr_f = 0; 255 uint8_t create_devctl_node_f = 0; 256 uint8_t create_scsi_node_f = 0; 257 uint8_t create_ioc_node_f = 0; 258 uint8_t tran_alloc_f = 0; 259 uint8_t irq; 260 uint16_t vendor_id; 261 uint16_t device_id; 262 uint16_t subsysvid; 263 uint16_t subsysid; 264 uint16_t command; 265 off_t reglength = 0; 266 int intr_types = 0; 267 char *data; 268 269 scsi_hba_tran_t *tran; 270 ddi_dma_attr_t tran_dma_attr; 271 struct mrsas_instance *instance; 272 273 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 274 275 /* CONSTCOND */ 276 ASSERT(NO_COMPETING_THREADS); 277 278 instance_no = ddi_get_instance(dip); 279 280 /* 281 * check to see whether this device is in a DMA-capable slot. 282 */ 283 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 284 con_log(CL_ANN, (CE_WARN, 285 "mr_sas%d: Device in slave-only slot, unused", 286 instance_no)); 287 return (DDI_FAILURE); 288 } 289 290 switch (cmd) { 291 case DDI_ATTACH: 292 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: DDI_ATTACH")); 293 /* allocate the soft state for the instance */ 294 if (ddi_soft_state_zalloc(mrsas_state, instance_no) 295 != DDI_SUCCESS) { 296 con_log(CL_ANN, (CE_WARN, 297 "mr_sas%d: Failed to allocate soft state", 298 instance_no)); 299 300 return (DDI_FAILURE); 301 } 302 303 instance = (struct mrsas_instance *)ddi_get_soft_state 304 (mrsas_state, instance_no); 305 306 if (instance == NULL) { 307 con_log(CL_ANN, (CE_WARN, 308 "mr_sas%d: Bad soft state", instance_no)); 309 310 ddi_soft_state_free(mrsas_state, instance_no); 311 312 return (DDI_FAILURE); 313 } 314 315 bzero((caddr_t)instance, 316 sizeof (struct mrsas_instance)); 317 318 instance->func_ptr = kmem_zalloc( 319 sizeof (struct mrsas_func_ptr), KM_SLEEP); 320 ASSERT(instance->func_ptr); 321 322 /* Setup the PCI configuration space handles */ 323 if (pci_config_setup(dip, &instance->pci_handle) != 324 DDI_SUCCESS) { 325 con_log(CL_ANN, (CE_WARN, 326 "mr_sas%d: pci config setup failed ", 327 instance_no)); 328 329 kmem_free(instance->func_ptr, 330 sizeof (struct mrsas_func_ptr)); 331 ddi_soft_state_free(mrsas_state, instance_no); 332 333 return (DDI_FAILURE); 334 } 335 336 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { 337 con_log(CL_ANN, (CE_WARN, 338 "mr_sas: failed to get registers.")); 339 340 pci_config_teardown(&instance->pci_handle); 341 kmem_free(instance->func_ptr, 342 sizeof (struct mrsas_func_ptr)); 343 ddi_soft_state_free(mrsas_state, instance_no); 344 345 return (DDI_FAILURE); 346 } 347 348 vendor_id = pci_config_get16(instance->pci_handle, 349 PCI_CONF_VENID); 350 device_id = pci_config_get16(instance->pci_handle, 351 PCI_CONF_DEVID); 352 353 subsysvid = pci_config_get16(instance->pci_handle, 354 PCI_CONF_SUBVENID); 355 subsysid = pci_config_get16(instance->pci_handle, 356 PCI_CONF_SUBSYSID); 357 358 pci_config_put16(instance->pci_handle, PCI_CONF_COMM, 359 (pci_config_get16(instance->pci_handle, 360 PCI_CONF_COMM) | PCI_COMM_ME)); 361 irq = pci_config_get8(instance->pci_handle, 362 PCI_CONF_ILINE); 363 364 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 365 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s", 366 instance_no, vendor_id, device_id, subsysvid, 367 subsysid, irq, MRSAS_VERSION)); 368 369 /* enable bus-mastering */ 370 command = pci_config_get16(instance->pci_handle, 371 PCI_CONF_COMM); 372 373 if (!(command & PCI_COMM_ME)) { 374 command |= PCI_COMM_ME; 375 376 pci_config_put16(instance->pci_handle, 377 PCI_CONF_COMM, command); 378 379 con_log(CL_ANN, (CE_CONT, "mr_sas%d: " 380 "enable bus-mastering", instance_no)); 381 } else { 382 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 383 "bus-mastering already set", instance_no)); 384 } 385 386 /* initialize function pointers */ 387 if ((device_id == PCI_DEVICE_ID_LSI_2108VDE) || 388 (device_id == PCI_DEVICE_ID_LSI_2108V)) { 389 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 390 "2108V/DE detected", instance_no)); 391 instance->func_ptr->read_fw_status_reg = 392 read_fw_status_reg_ppc; 393 instance->func_ptr->issue_cmd = issue_cmd_ppc; 394 instance->func_ptr->issue_cmd_in_sync_mode = 395 issue_cmd_in_sync_mode_ppc; 396 instance->func_ptr->issue_cmd_in_poll_mode = 397 issue_cmd_in_poll_mode_ppc; 398 instance->func_ptr->enable_intr = 399 enable_intr_ppc; 400 instance->func_ptr->disable_intr = 401 disable_intr_ppc; 402 instance->func_ptr->intr_ack = intr_ack_ppc; 403 } else { 404 con_log(CL_ANN, (CE_WARN, 405 "mr_sas: Invalid device detected")); 406 407 pci_config_teardown(&instance->pci_handle); 408 kmem_free(instance->func_ptr, 409 sizeof (struct mrsas_func_ptr)); 410 ddi_soft_state_free(mrsas_state, instance_no); 411 412 return (DDI_FAILURE); 413 } 414 415 instance->baseaddress = pci_config_get32( 416 instance->pci_handle, PCI_CONF_BASE0); 417 instance->baseaddress &= 0x0fffc; 418 419 instance->dip = dip; 420 instance->vendor_id = vendor_id; 421 instance->device_id = device_id; 422 instance->subsysvid = subsysvid; 423 instance->subsysid = subsysid; 424 instance->instance = instance_no; 425 426 /* Initialize FMA */ 427 instance->fm_capabilities = ddi_prop_get_int( 428 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS, 429 "fm-capable", DDI_FM_EREPORT_CAPABLE | 430 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE 431 | DDI_FM_ERRCB_CAPABLE); 432 433 mrsas_fm_init(instance); 434 435 /* Initialize Interrupts */ 436 if ((ddi_dev_regsize(instance->dip, 437 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) || 438 reglength < MINIMUM_MFI_MEM_SZ) { 439 return (DDI_FAILURE); 440 } 441 if (reglength > DEFAULT_MFI_MEM_SZ) { 442 reglength = DEFAULT_MFI_MEM_SZ; 443 con_log(CL_DLEVEL1, (CE_NOTE, 444 "mr_sas: register length to map is " 445 "0x%lx bytes", reglength)); 446 } 447 if (ddi_regs_map_setup(instance->dip, 448 REGISTER_SET_IO_2108, &instance->regmap, 0, 449 reglength, &endian_attr, &instance->regmap_handle) 450 != DDI_SUCCESS) { 451 con_log(CL_ANN, (CE_NOTE, 452 "mr_sas: couldn't map control registers")); 453 goto fail_attach; 454 } 455 456 /* 457 * Disable Interrupt Now. 458 * Setup Software interrupt 459 */ 460 instance->func_ptr->disable_intr(instance); 461 462 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 463 "mrsas-enable-msi", &data) == DDI_SUCCESS) { 464 if (strncmp(data, "no", 3) == 0) { 465 msi_enable = 0; 466 con_log(CL_ANN1, (CE_WARN, 467 "msi_enable = %d disabled", 468 msi_enable)); 469 } 470 ddi_prop_free(data); 471 } 472 473 con_log(CL_DLEVEL1, (CE_WARN, "msi_enable = %d", 474 msi_enable)); 475 476 /* Check for all supported interrupt types */ 477 if (ddi_intr_get_supported_types( 478 dip, &intr_types) != DDI_SUCCESS) { 479 con_log(CL_ANN, (CE_WARN, 480 "ddi_intr_get_supported_types() failed")); 481 goto fail_attach; 482 } 483 484 con_log(CL_DLEVEL1, (CE_NOTE, 485 "ddi_intr_get_supported_types() ret: 0x%x", 486 intr_types)); 487 488 /* Initialize and Setup Interrupt handler */ 489 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) { 490 if (mrsas_add_intrs(instance, 491 DDI_INTR_TYPE_MSIX) != DDI_SUCCESS) { 492 con_log(CL_ANN, (CE_WARN, 493 "MSIX interrupt query failed")); 494 goto fail_attach; 495 } 496 instance->intr_type = DDI_INTR_TYPE_MSIX; 497 } else if (msi_enable && (intr_types & 498 DDI_INTR_TYPE_MSI)) { 499 if (mrsas_add_intrs(instance, 500 DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 501 con_log(CL_ANN, (CE_WARN, 502 "MSI interrupt query failed")); 503 goto fail_attach; 504 } 505 instance->intr_type = DDI_INTR_TYPE_MSI; 506 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 507 msi_enable = 0; 508 if (mrsas_add_intrs(instance, 509 DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 510 con_log(CL_ANN, (CE_WARN, 511 "FIXED interrupt query failed")); 512 goto fail_attach; 513 } 514 instance->intr_type = DDI_INTR_TYPE_FIXED; 515 } else { 516 con_log(CL_ANN, (CE_WARN, "Device cannot " 517 "suppport either FIXED or MSI/X " 518 "interrupts")); 519 goto fail_attach; 520 } 521 522 added_isr_f = 1; 523 524 /* setup the mfi based low level driver */ 525 if (init_mfi(instance) != DDI_SUCCESS) { 526 con_log(CL_ANN, (CE_WARN, "mr_sas: " 527 "could not initialize the low level driver")); 528 529 goto fail_attach; 530 } 531 532 /* Initialize all Mutex */ 533 INIT_LIST_HEAD(&instance->completed_pool_list); 534 mutex_init(&instance->completed_pool_mtx, 535 "completed_pool_mtx", MUTEX_DRIVER, 536 DDI_INTR_PRI(instance->intr_pri)); 537 538 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx", 539 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 540 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL); 541 542 mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx", 543 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 544 545 /* Register our soft-isr for highlevel interrupts. */ 546 instance->isr_level = instance->intr_pri; 547 if (instance->isr_level == HIGH_LEVEL_INTR) { 548 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH, 549 &instance->soft_intr_id, NULL, NULL, 550 mrsas_softintr, (caddr_t)instance) != 551 DDI_SUCCESS) { 552 con_log(CL_ANN, (CE_WARN, 553 " Software ISR did not register")); 554 555 goto fail_attach; 556 } 557 558 added_soft_isr_f = 1; 559 } 560 561 /* Allocate a transport structure */ 562 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 563 564 if (tran == NULL) { 565 con_log(CL_ANN, (CE_WARN, 566 "scsi_hba_tran_alloc failed")); 567 goto fail_attach; 568 } 569 570 tran_alloc_f = 1; 571 572 instance->tran = tran; 573 574 tran->tran_hba_private = instance; 575 tran->tran_tgt_init = mrsas_tran_tgt_init; 576 tran->tran_tgt_probe = scsi_hba_probe; 577 tran->tran_tgt_free = mrsas_tran_tgt_free; 578 tran->tran_init_pkt = mrsas_tran_init_pkt; 579 tran->tran_start = mrsas_tran_start; 580 tran->tran_abort = mrsas_tran_abort; 581 tran->tran_reset = mrsas_tran_reset; 582 tran->tran_getcap = mrsas_tran_getcap; 583 tran->tran_setcap = mrsas_tran_setcap; 584 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt; 585 tran->tran_dmafree = mrsas_tran_dmafree; 586 tran->tran_sync_pkt = mrsas_tran_sync_pkt; 587 tran->tran_bus_config = mrsas_tran_bus_config; 588 589 if (mrsas_relaxed_ordering) 590 mrsas_generic_dma_attr.dma_attr_flags |= 591 DDI_DMA_RELAXED_ORDERING; 592 593 594 tran_dma_attr = mrsas_generic_dma_attr; 595 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge; 596 597 /* Attach this instance of the hba */ 598 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0) 599 != DDI_SUCCESS) { 600 con_log(CL_ANN, (CE_WARN, 601 "scsi_hba_attach failed")); 602 603 goto fail_attach; 604 } 605 606 /* create devctl node for cfgadm command */ 607 if (ddi_create_minor_node(dip, "devctl", 608 S_IFCHR, INST2DEVCTL(instance_no), 609 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) { 610 con_log(CL_ANN, (CE_WARN, 611 "mr_sas: failed to create devctl node.")); 612 613 goto fail_attach; 614 } 615 616 create_devctl_node_f = 1; 617 618 /* create scsi node for cfgadm command */ 619 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, 620 INST2SCSI(instance_no), 621 DDI_NT_SCSI_ATTACHMENT_POINT, 0) == 622 DDI_FAILURE) { 623 con_log(CL_ANN, (CE_WARN, 624 "mr_sas: failed to create scsi node.")); 625 626 goto fail_attach; 627 } 628 629 create_scsi_node_f = 1; 630 631 (void) sprintf(instance->iocnode, "%d:lsirdctl", 632 instance_no); 633 634 /* 635 * Create a node for applications 636 * for issuing ioctl to the driver. 637 */ 638 if (ddi_create_minor_node(dip, instance->iocnode, 639 S_IFCHR, INST2LSIRDCTL(instance_no), 640 DDI_PSEUDO, 0) == DDI_FAILURE) { 641 con_log(CL_ANN, (CE_WARN, 642 "mr_sas: failed to create ioctl node.")); 643 644 goto fail_attach; 645 } 646 647 create_ioc_node_f = 1; 648 649 /* Create a taskq to handle dr events */ 650 if ((instance->taskq = ddi_taskq_create(dip, 651 "mrsas_dr_taskq", 1, 652 TASKQ_DEFAULTPRI, 0)) == NULL) { 653 con_log(CL_ANN, (CE_WARN, 654 "mr_sas: failed to create taskq ")); 655 instance->taskq = NULL; 656 goto fail_attach; 657 } 658 659 /* enable interrupt */ 660 instance->func_ptr->enable_intr(instance); 661 662 /* initiate AEN */ 663 if (start_mfi_aen(instance)) { 664 con_log(CL_ANN, (CE_WARN, 665 "mr_sas: failed to initiate AEN.")); 666 goto fail_initiate_aen; 667 } 668 669 con_log(CL_DLEVEL1, (CE_NOTE, 670 "AEN started for instance %d.", instance_no)); 671 672 /* Finally! We are on the air. */ 673 ddi_report_dev(dip); 674 675 if (mrsas_check_acc_handle(instance->regmap_handle) != 676 DDI_SUCCESS) { 677 goto fail_attach; 678 } 679 if (mrsas_check_acc_handle(instance->pci_handle) != 680 DDI_SUCCESS) { 681 goto fail_attach; 682 } 683 instance->mr_ld_list = 684 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld), 685 KM_SLEEP); 686 break; 687 case DDI_PM_RESUME: 688 con_log(CL_ANN, (CE_NOTE, 689 "mr_sas: DDI_PM_RESUME")); 690 break; 691 case DDI_RESUME: 692 con_log(CL_ANN, (CE_NOTE, 693 "mr_sas: DDI_RESUME")); 694 break; 695 default: 696 con_log(CL_ANN, (CE_WARN, 697 "mr_sas: invalid attach cmd=%x", cmd)); 698 return (DDI_FAILURE); 699 } 700 701 return (DDI_SUCCESS); 702 703 fail_initiate_aen: 704 fail_attach: 705 if (create_devctl_node_f) { 706 ddi_remove_minor_node(dip, "devctl"); 707 } 708 709 if (create_scsi_node_f) { 710 ddi_remove_minor_node(dip, "scsi"); 711 } 712 713 if (create_ioc_node_f) { 714 ddi_remove_minor_node(dip, instance->iocnode); 715 } 716 717 if (tran_alloc_f) { 718 scsi_hba_tran_free(tran); 719 } 720 721 722 if (added_soft_isr_f) { 723 ddi_remove_softintr(instance->soft_intr_id); 724 } 725 726 if (added_isr_f) { 727 mrsas_rem_intrs(instance); 728 } 729 730 if (instance && instance->taskq) { 731 ddi_taskq_destroy(instance->taskq); 732 } 733 734 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 735 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 736 737 mrsas_fm_fini(instance); 738 739 pci_config_teardown(&instance->pci_handle); 740 741 ddi_soft_state_free(mrsas_state, instance_no); 742 743 con_log(CL_ANN, (CE_NOTE, 744 "mr_sas: return failure from mrsas_attach")); 745 746 return (DDI_FAILURE); 747 } 748 749 /*ARGSUSED*/ 750 static int 751 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 752 { 753 int rval; 754 int mrsas_minor = getminor((dev_t)arg); 755 756 struct mrsas_instance *instance; 757 758 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 759 760 switch (cmd) { 761 case DDI_INFO_DEVT2DEVINFO: 762 instance = (struct mrsas_instance *) 763 ddi_get_soft_state(mrsas_state, 764 MINOR2INST(mrsas_minor)); 765 766 if (instance == NULL) { 767 *resultp = NULL; 768 rval = DDI_FAILURE; 769 } else { 770 *resultp = instance->dip; 771 rval = DDI_SUCCESS; 772 } 773 break; 774 case DDI_INFO_DEVT2INSTANCE: 775 *resultp = (void *)(intptr_t) 776 (MINOR2INST(getminor((dev_t)arg))); 777 rval = DDI_SUCCESS; 778 break; 779 default: 780 *resultp = NULL; 781 rval = DDI_FAILURE; 782 } 783 784 return (rval); 785 } 786 787 static int 788 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 789 { 790 int instance_no; 791 792 struct mrsas_instance *instance; 793 794 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 795 796 /* CONSTCOND */ 797 ASSERT(NO_COMPETING_THREADS); 798 799 instance_no = ddi_get_instance(dip); 800 801 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state, 802 instance_no); 803 804 if (!instance) { 805 con_log(CL_ANN, (CE_WARN, 806 "mr_sas:%d could not get instance in detach", 807 instance_no)); 808 809 return (DDI_FAILURE); 810 } 811 812 con_log(CL_ANN, (CE_NOTE, 813 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x", 814 instance_no, instance->vendor_id, instance->device_id, 815 instance->subsysvid, instance->subsysid)); 816 817 switch (cmd) { 818 case DDI_DETACH: 819 con_log(CL_ANN, (CE_NOTE, 820 "mrsas_detach: DDI_DETACH")); 821 822 if (scsi_hba_detach(dip) != DDI_SUCCESS) { 823 con_log(CL_ANN, (CE_WARN, 824 "mr_sas:%d failed to detach", 825 instance_no)); 826 827 return (DDI_FAILURE); 828 } 829 830 scsi_hba_tran_free(instance->tran); 831 832 flush_cache(instance); 833 834 if (abort_aen_cmd(instance, instance->aen_cmd)) { 835 con_log(CL_ANN, (CE_WARN, "mrsas_detach: " 836 "failed to abort prevous AEN command")); 837 838 return (DDI_FAILURE); 839 } 840 841 instance->func_ptr->disable_intr(instance); 842 843 if (instance->isr_level == HIGH_LEVEL_INTR) { 844 ddi_remove_softintr(instance->soft_intr_id); 845 } 846 847 mrsas_rem_intrs(instance); 848 849 if (instance->taskq) { 850 ddi_taskq_destroy(instance->taskq); 851 } 852 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD 853 * sizeof (struct mrsas_ld)); 854 free_space_for_mfi(instance); 855 856 mrsas_fm_fini(instance); 857 858 pci_config_teardown(&instance->pci_handle); 859 860 kmem_free(instance->func_ptr, 861 sizeof (struct mrsas_func_ptr)); 862 863 ddi_soft_state_free(mrsas_state, instance_no); 864 break; 865 case DDI_PM_SUSPEND: 866 con_log(CL_ANN, (CE_NOTE, 867 "mrsas_detach: DDI_PM_SUSPEND")); 868 869 break; 870 case DDI_SUSPEND: 871 con_log(CL_ANN, (CE_NOTE, 872 "mrsas_detach: DDI_SUSPEND")); 873 874 break; 875 default: 876 con_log(CL_ANN, (CE_WARN, 877 "invalid detach command:0x%x", cmd)); 878 return (DDI_FAILURE); 879 } 880 881 return (DDI_SUCCESS); 882 } 883 884 /* 885 * ************************************************************************** * 886 * * 887 * common entry points - for character driver types * 888 * * 889 * ************************************************************************** * 890 */ 891 static int 892 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp) 893 { 894 int rval = 0; 895 896 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 897 898 /* Check root permissions */ 899 if (drv_priv(credp) != 0) { 900 con_log(CL_ANN, (CE_WARN, 901 "mr_sas: Non-root ioctl access denied!")); 902 return (EPERM); 903 } 904 905 /* Verify we are being opened as a character device */ 906 if (otyp != OTYP_CHR) { 907 con_log(CL_ANN, (CE_WARN, 908 "mr_sas: ioctl node must be a char node")); 909 return (EINVAL); 910 } 911 912 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev))) 913 == NULL) { 914 return (ENXIO); 915 } 916 917 if (scsi_hba_open) { 918 rval = scsi_hba_open(dev, openflags, otyp, credp); 919 } 920 921 return (rval); 922 } 923 924 static int 925 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp) 926 { 927 int rval = 0; 928 929 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 930 931 /* no need for locks! */ 932 933 if (scsi_hba_close) { 934 rval = scsi_hba_close(dev, openflags, otyp, credp); 935 } 936 937 return (rval); 938 } 939 940 static int 941 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 942 int *rvalp) 943 { 944 int rval = 0; 945 946 struct mrsas_instance *instance; 947 struct mrsas_ioctl *ioctl; 948 struct mrsas_aen aen; 949 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 950 951 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev))); 952 953 if (instance == NULL) { 954 /* invalid minor number */ 955 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found.")); 956 return (ENXIO); 957 } 958 959 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl), 960 KM_SLEEP); 961 ASSERT(ioctl); 962 963 switch ((uint_t)cmd) { 964 case MRSAS_IOCTL_FIRMWARE: 965 if (ddi_copyin((void *)arg, ioctl, 966 sizeof (struct mrsas_ioctl), mode)) { 967 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: " 968 "ERROR IOCTL copyin")); 969 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 970 return (EFAULT); 971 } 972 973 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) { 974 rval = handle_drv_ioctl(instance, ioctl, mode); 975 } else { 976 rval = handle_mfi_ioctl(instance, ioctl, mode); 977 } 978 979 if (ddi_copyout((void *)ioctl, (void *)arg, 980 (sizeof (struct mrsas_ioctl) - 1), mode)) { 981 con_log(CL_ANN, (CE_WARN, 982 "mrsas_ioctl: copy_to_user failed")); 983 rval = 1; 984 } 985 986 break; 987 case MRSAS_IOCTL_AEN: 988 if (ddi_copyin((void *) arg, &aen, 989 sizeof (struct mrsas_aen), mode)) { 990 con_log(CL_ANN, (CE_WARN, 991 "mrsas_ioctl: ERROR AEN copyin")); 992 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 993 return (EFAULT); 994 } 995 996 rval = handle_mfi_aen(instance, &aen); 997 998 if (ddi_copyout((void *) &aen, (void *)arg, 999 sizeof (struct mrsas_aen), mode)) { 1000 con_log(CL_ANN, (CE_WARN, 1001 "mrsas_ioctl: copy_to_user failed")); 1002 rval = 1; 1003 } 1004 1005 break; 1006 default: 1007 rval = scsi_hba_ioctl(dev, cmd, arg, 1008 mode, credp, rvalp); 1009 1010 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: " 1011 "scsi_hba_ioctl called, ret = %x.", rval)); 1012 } 1013 1014 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 1015 return (rval); 1016 } 1017 1018 /* 1019 * ************************************************************************** * 1020 * * 1021 * common entry points - for block driver types * 1022 * * 1023 * ************************************************************************** * 1024 */ 1025 /*ARGSUSED*/ 1026 static int 1027 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1028 { 1029 int instance_no; 1030 1031 struct mrsas_instance *instance; 1032 1033 instance_no = ddi_get_instance(dip); 1034 instance = (struct mrsas_instance *)ddi_get_soft_state 1035 (mrsas_state, instance_no); 1036 1037 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1038 1039 if (!instance) { 1040 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter " 1041 "in reset", instance_no)); 1042 return (DDI_FAILURE); 1043 } 1044 1045 instance->func_ptr->disable_intr(instance); 1046 1047 con_log(CL_ANN1, (CE_NOTE, "flushing cache for instance %d", 1048 instance_no)); 1049 1050 flush_cache(instance); 1051 1052 return (DDI_SUCCESS); 1053 } 1054 1055 1056 /* 1057 * ************************************************************************** * 1058 * * 1059 * entry points (SCSI HBA) * 1060 * * 1061 * ************************************************************************** * 1062 */ 1063 /*ARGSUSED*/ 1064 static int 1065 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1066 scsi_hba_tran_t *tran, struct scsi_device *sd) 1067 { 1068 struct mrsas_instance *instance; 1069 uint16_t tgt = sd->sd_address.a_target; 1070 uint8_t lun = sd->sd_address.a_lun; 1071 1072 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init target %d lun %d", 1073 tgt, lun)); 1074 1075 instance = ADDR2MR(&sd->sd_address); 1076 1077 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 1078 (void) ndi_merge_node(tgt_dip, mrsas_name_node); 1079 ddi_set_name_addr(tgt_dip, NULL); 1080 1081 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init in " 1082 "ndi_dev_is_persistent_node DDI_FAILURE t = %d l = %d", 1083 tgt, lun)); 1084 return (DDI_FAILURE); 1085 } 1086 1087 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p", 1088 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip)); 1089 1090 if (tgt < MRDRV_MAX_LD && lun == 0) { 1091 if (instance->mr_ld_list[tgt].dip == NULL && 1092 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) { 1093 instance->mr_ld_list[tgt].dip = tgt_dip; 1094 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN; 1095 } 1096 } 1097 return (DDI_SUCCESS); 1098 } 1099 1100 /*ARGSUSED*/ 1101 static void 1102 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1103 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1104 { 1105 struct mrsas_instance *instance; 1106 int tgt = sd->sd_address.a_target; 1107 int lun = sd->sd_address.a_lun; 1108 1109 instance = ADDR2MR(&sd->sd_address); 1110 1111 con_log(CL_ANN1, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun)); 1112 1113 if (tgt < MRDRV_MAX_LD && lun == 0) { 1114 if (instance->mr_ld_list[tgt].dip == tgt_dip) { 1115 instance->mr_ld_list[tgt].dip = NULL; 1116 } 1117 } 1118 } 1119 1120 static dev_info_t * 1121 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun) 1122 { 1123 dev_info_t *child = NULL; 1124 char addr[SCSI_MAXNAMELEN]; 1125 char tmp[MAXNAMELEN]; 1126 1127 (void) sprintf(addr, "%x,%x", tgt, lun); 1128 for (child = ddi_get_child(instance->dip); child; 1129 child = ddi_get_next_sibling(child)) { 1130 1131 if (mrsas_name_node(child, tmp, MAXNAMELEN) != 1132 DDI_SUCCESS) { 1133 continue; 1134 } 1135 1136 if (strcmp(addr, tmp) == 0) { 1137 break; 1138 } 1139 } 1140 con_log(CL_ANN1, (CE_NOTE, "mrsas_find_child: return child = %p", 1141 (void *)child)); 1142 return (child); 1143 } 1144 1145 static int 1146 mrsas_name_node(dev_info_t *dip, char *name, int len) 1147 { 1148 int tgt, lun; 1149 1150 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1151 DDI_PROP_DONTPASS, "target", -1); 1152 con_log(CL_ANN1, (CE_NOTE, 1153 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt)); 1154 if (tgt == -1) { 1155 return (DDI_FAILURE); 1156 } 1157 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1158 "lun", -1); 1159 con_log(CL_ANN1, 1160 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun)); 1161 if (lun == -1) { 1162 return (DDI_FAILURE); 1163 } 1164 (void) snprintf(name, len, "%x,%x", tgt, lun); 1165 return (DDI_SUCCESS); 1166 } 1167 1168 static struct scsi_pkt * 1169 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt, 1170 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1171 int flags, int (*callback)(), caddr_t arg) 1172 { 1173 struct scsa_cmd *acmd; 1174 struct mrsas_instance *instance; 1175 struct scsi_pkt *new_pkt; 1176 1177 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1178 1179 instance = ADDR2MR(ap); 1180 1181 /* step #1 : pkt allocation */ 1182 if (pkt == NULL) { 1183 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen, 1184 tgtlen, sizeof (struct scsa_cmd), callback, arg); 1185 if (pkt == NULL) { 1186 return (NULL); 1187 } 1188 1189 acmd = PKT2CMD(pkt); 1190 1191 /* 1192 * Initialize the new pkt - we redundantly initialize 1193 * all the fields for illustrative purposes. 1194 */ 1195 acmd->cmd_pkt = pkt; 1196 acmd->cmd_flags = 0; 1197 acmd->cmd_scblen = statuslen; 1198 acmd->cmd_cdblen = cmdlen; 1199 acmd->cmd_dmahandle = NULL; 1200 acmd->cmd_ncookies = 0; 1201 acmd->cmd_cookie = 0; 1202 acmd->cmd_cookiecnt = 0; 1203 acmd->cmd_nwin = 0; 1204 1205 pkt->pkt_address = *ap; 1206 pkt->pkt_comp = (void (*)())NULL; 1207 pkt->pkt_flags = 0; 1208 pkt->pkt_time = 0; 1209 pkt->pkt_resid = 0; 1210 pkt->pkt_state = 0; 1211 pkt->pkt_statistics = 0; 1212 pkt->pkt_reason = 0; 1213 new_pkt = pkt; 1214 } else { 1215 acmd = PKT2CMD(pkt); 1216 new_pkt = NULL; 1217 } 1218 1219 /* step #2 : dma allocation/move */ 1220 if (bp && bp->b_bcount != 0) { 1221 if (acmd->cmd_dmahandle == NULL) { 1222 if (mrsas_dma_alloc(instance, pkt, bp, flags, 1223 callback) == DDI_FAILURE) { 1224 if (new_pkt) { 1225 scsi_hba_pkt_free(ap, new_pkt); 1226 } 1227 return ((struct scsi_pkt *)NULL); 1228 } 1229 } else { 1230 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) { 1231 return ((struct scsi_pkt *)NULL); 1232 } 1233 } 1234 } 1235 1236 return (pkt); 1237 } 1238 1239 static int 1240 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt) 1241 { 1242 uchar_t cmd_done = 0; 1243 1244 struct mrsas_instance *instance = ADDR2MR(ap); 1245 struct mrsas_cmd *cmd; 1246 1247 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x", 1248 __func__, __LINE__, pkt->pkt_cdbp[0])); 1249 1250 pkt->pkt_reason = CMD_CMPLT; 1251 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 1252 1253 cmd = build_cmd(instance, ap, pkt, &cmd_done); 1254 1255 /* 1256 * Check if the command is already completed by the mrsas_build_cmd() 1257 * routine. In which case the busy_flag would be clear and scb will be 1258 * NULL and appropriate reason provided in pkt_reason field 1259 */ 1260 if (cmd_done) { 1261 pkt->pkt_reason = CMD_CMPLT; 1262 pkt->pkt_scbp[0] = STATUS_GOOD; 1263 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET 1264 | STATE_SENT_CMD; 1265 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) { 1266 (*pkt->pkt_comp)(pkt); 1267 } 1268 1269 return (TRAN_ACCEPT); 1270 } 1271 1272 if (cmd == NULL) { 1273 return (TRAN_BUSY); 1274 } 1275 1276 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { 1277 if (instance->fw_outstanding > instance->max_fw_cmds) { 1278 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy")); 1279 DTRACE_PROBE2(start_tran_err, 1280 uint16_t, instance->fw_outstanding, 1281 uint16_t, instance->max_fw_cmds); 1282 return_mfi_pkt(instance, cmd); 1283 return (TRAN_BUSY); 1284 } 1285 1286 /* Synchronize the Cmd frame for the controller */ 1287 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, 1288 DDI_DMA_SYNC_FORDEV); 1289 1290 instance->func_ptr->issue_cmd(cmd, instance); 1291 1292 } else { 1293 struct mrsas_header *hdr = &cmd->frame->hdr; 1294 1295 cmd->sync_cmd = MRSAS_TRUE; 1296 1297 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd); 1298 1299 pkt->pkt_reason = CMD_CMPLT; 1300 pkt->pkt_statistics = 0; 1301 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS; 1302 1303 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, 1304 &hdr->cmd_status)) { 1305 case MFI_STAT_OK: 1306 pkt->pkt_scbp[0] = STATUS_GOOD; 1307 break; 1308 1309 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1310 1311 pkt->pkt_reason = CMD_CMPLT; 1312 pkt->pkt_statistics = 0; 1313 1314 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; 1315 break; 1316 1317 case MFI_STAT_DEVICE_NOT_FOUND: 1318 pkt->pkt_reason = CMD_DEV_GONE; 1319 pkt->pkt_statistics = STAT_DISCON; 1320 break; 1321 1322 default: 1323 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1; 1324 } 1325 1326 (void) mrsas_common_check(instance, cmd); 1327 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd, 1328 uint8_t, hdr->cmd_status); 1329 return_mfi_pkt(instance, cmd); 1330 1331 if (pkt->pkt_comp) { 1332 (*pkt->pkt_comp)(pkt); 1333 } 1334 1335 } 1336 1337 return (TRAN_ACCEPT); 1338 } 1339 1340 /*ARGSUSED*/ 1341 static int 1342 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1343 { 1344 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1345 1346 /* abort command not supported by H/W */ 1347 1348 return (DDI_FAILURE); 1349 } 1350 1351 /*ARGSUSED*/ 1352 static int 1353 mrsas_tran_reset(struct scsi_address *ap, int level) 1354 { 1355 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1356 1357 /* reset command not supported by H/W */ 1358 1359 return (DDI_FAILURE); 1360 1361 } 1362 1363 /*ARGSUSED*/ 1364 static int 1365 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom) 1366 { 1367 int rval = 0; 1368 1369 struct mrsas_instance *instance = ADDR2MR(ap); 1370 1371 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1372 1373 /* we do allow inquiring about capabilities for other targets */ 1374 if (cap == NULL) { 1375 return (-1); 1376 } 1377 1378 switch (scsi_hba_lookup_capstr(cap)) { 1379 case SCSI_CAP_DMA_MAX: 1380 /* Limit to 16MB max transfer */ 1381 rval = mrsas_max_cap_maxxfer; 1382 break; 1383 case SCSI_CAP_MSG_OUT: 1384 rval = 1; 1385 break; 1386 case SCSI_CAP_DISCONNECT: 1387 rval = 0; 1388 break; 1389 case SCSI_CAP_SYNCHRONOUS: 1390 rval = 0; 1391 break; 1392 case SCSI_CAP_WIDE_XFER: 1393 rval = 1; 1394 break; 1395 case SCSI_CAP_TAGGED_QING: 1396 rval = 1; 1397 break; 1398 case SCSI_CAP_UNTAGGED_QING: 1399 rval = 1; 1400 break; 1401 case SCSI_CAP_PARITY: 1402 rval = 1; 1403 break; 1404 case SCSI_CAP_INITIATOR_ID: 1405 rval = instance->init_id; 1406 break; 1407 case SCSI_CAP_ARQ: 1408 rval = 1; 1409 break; 1410 case SCSI_CAP_LINKED_CMDS: 1411 rval = 0; 1412 break; 1413 case SCSI_CAP_RESET_NOTIFICATION: 1414 rval = 1; 1415 break; 1416 case SCSI_CAP_GEOMETRY: 1417 rval = -1; 1418 1419 break; 1420 default: 1421 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x", 1422 scsi_hba_lookup_capstr(cap))); 1423 rval = -1; 1424 break; 1425 } 1426 1427 return (rval); 1428 } 1429 1430 /*ARGSUSED*/ 1431 static int 1432 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1433 { 1434 int rval = 1; 1435 1436 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1437 1438 /* We don't allow setting capabilities for other targets */ 1439 if (cap == NULL || whom == 0) { 1440 return (-1); 1441 } 1442 1443 switch (scsi_hba_lookup_capstr(cap)) { 1444 case SCSI_CAP_DMA_MAX: 1445 case SCSI_CAP_MSG_OUT: 1446 case SCSI_CAP_PARITY: 1447 case SCSI_CAP_LINKED_CMDS: 1448 case SCSI_CAP_RESET_NOTIFICATION: 1449 case SCSI_CAP_DISCONNECT: 1450 case SCSI_CAP_SYNCHRONOUS: 1451 case SCSI_CAP_UNTAGGED_QING: 1452 case SCSI_CAP_WIDE_XFER: 1453 case SCSI_CAP_INITIATOR_ID: 1454 case SCSI_CAP_ARQ: 1455 /* 1456 * None of these are settable via 1457 * the capability interface. 1458 */ 1459 break; 1460 case SCSI_CAP_TAGGED_QING: 1461 rval = 1; 1462 break; 1463 case SCSI_CAP_SECTOR_SIZE: 1464 rval = 1; 1465 break; 1466 1467 case SCSI_CAP_TOTAL_SECTORS: 1468 rval = 1; 1469 break; 1470 default: 1471 rval = -1; 1472 break; 1473 } 1474 1475 return (rval); 1476 } 1477 1478 static void 1479 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1480 { 1481 struct scsa_cmd *acmd = PKT2CMD(pkt); 1482 1483 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1484 1485 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1486 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1487 1488 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1489 1490 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1491 1492 acmd->cmd_dmahandle = NULL; 1493 } 1494 1495 /* free the pkt */ 1496 scsi_hba_pkt_free(ap, pkt); 1497 } 1498 1499 /*ARGSUSED*/ 1500 static void 1501 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1502 { 1503 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1504 1505 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1506 1507 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1508 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1509 1510 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1511 1512 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1513 1514 acmd->cmd_dmahandle = NULL; 1515 } 1516 } 1517 1518 /*ARGSUSED*/ 1519 static void 1520 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1521 { 1522 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1523 1524 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1525 1526 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1527 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset, 1528 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ? 1529 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU); 1530 } 1531 } 1532 1533 /* 1534 * mrsas_isr(caddr_t) 1535 * 1536 * The Interrupt Service Routine 1537 * 1538 * Collect status for all completed commands and do callback 1539 * 1540 */ 1541 static uint_t 1542 mrsas_isr(struct mrsas_instance *instance) 1543 { 1544 int need_softintr; 1545 uint32_t producer; 1546 uint32_t consumer; 1547 uint32_t context; 1548 1549 struct mrsas_cmd *cmd; 1550 1551 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1552 1553 ASSERT(instance); 1554 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) && 1555 !instance->func_ptr->intr_ack(instance)) { 1556 return (DDI_INTR_UNCLAIMED); 1557 } 1558 1559 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1560 0, 0, DDI_DMA_SYNC_FORCPU); 1561 1562 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 1563 != DDI_SUCCESS) { 1564 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 1565 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 1566 return (DDI_INTR_CLAIMED); 1567 } 1568 1569 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1570 instance->producer); 1571 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1572 instance->consumer); 1573 1574 con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ", 1575 producer, consumer)); 1576 if (producer == consumer) { 1577 con_log(CL_ANN1, (CE_WARN, "producer = consumer case")); 1578 DTRACE_PROBE2(isr_pc_err, uint32_t, producer, 1579 uint32_t, consumer); 1580 return (DDI_INTR_CLAIMED); 1581 } 1582 mutex_enter(&instance->completed_pool_mtx); 1583 1584 while (consumer != producer) { 1585 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1586 &instance->reply_queue[consumer]); 1587 cmd = instance->cmd_list[context]; 1588 mlist_add_tail(&cmd->list, &instance->completed_pool_list); 1589 1590 consumer++; 1591 if (consumer == (instance->max_fw_cmds + 1)) { 1592 consumer = 0; 1593 } 1594 } 1595 1596 mutex_exit(&instance->completed_pool_mtx); 1597 1598 ddi_put32(instance->mfi_internal_dma_obj.acc_handle, 1599 instance->consumer, consumer); 1600 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1601 0, 0, DDI_DMA_SYNC_FORDEV); 1602 1603 if (instance->softint_running) { 1604 need_softintr = 0; 1605 } else { 1606 need_softintr = 1; 1607 } 1608 1609 if (instance->isr_level == HIGH_LEVEL_INTR) { 1610 if (need_softintr) { 1611 ddi_trigger_softintr(instance->soft_intr_id); 1612 } 1613 } else { 1614 /* 1615 * Not a high-level interrupt, therefore call the soft level 1616 * interrupt explicitly 1617 */ 1618 (void) mrsas_softintr(instance); 1619 } 1620 1621 return (DDI_INTR_CLAIMED); 1622 } 1623 1624 1625 /* 1626 * ************************************************************************** * 1627 * * 1628 * libraries * 1629 * * 1630 * ************************************************************************** * 1631 */ 1632 /* 1633 * get_mfi_pkt : Get a command from the free pool 1634 * After successful allocation, the caller of this routine 1635 * must clear the frame buffer (memset to zero) before 1636 * using the packet further. 1637 * 1638 * ***** Note ***** 1639 * After clearing the frame buffer the context id of the 1640 * frame buffer SHOULD be restored back. 1641 */ 1642 static struct mrsas_cmd * 1643 get_mfi_pkt(struct mrsas_instance *instance) 1644 { 1645 mlist_t *head = &instance->cmd_pool_list; 1646 struct mrsas_cmd *cmd = NULL; 1647 1648 mutex_enter(&instance->cmd_pool_mtx); 1649 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1650 1651 if (!mlist_empty(head)) { 1652 cmd = mlist_entry(head->next, struct mrsas_cmd, list); 1653 mlist_del_init(head->next); 1654 } 1655 if (cmd != NULL) 1656 cmd->pkt = NULL; 1657 mutex_exit(&instance->cmd_pool_mtx); 1658 1659 return (cmd); 1660 } 1661 1662 /* 1663 * return_mfi_pkt : Return a cmd to free command pool 1664 */ 1665 static void 1666 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 1667 { 1668 mutex_enter(&instance->cmd_pool_mtx); 1669 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1670 1671 mlist_add(&cmd->list, &instance->cmd_pool_list); 1672 1673 mutex_exit(&instance->cmd_pool_mtx); 1674 } 1675 1676 /* 1677 * destroy_mfi_frame_pool 1678 */ 1679 static void 1680 destroy_mfi_frame_pool(struct mrsas_instance *instance) 1681 { 1682 int i; 1683 uint32_t max_cmd = instance->max_fw_cmds; 1684 1685 struct mrsas_cmd *cmd; 1686 1687 /* return all frames to pool */ 1688 for (i = 0; i < max_cmd+1; i++) { 1689 1690 cmd = instance->cmd_list[i]; 1691 1692 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) 1693 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj); 1694 1695 cmd->frame_dma_obj_status = DMA_OBJ_FREED; 1696 } 1697 1698 } 1699 1700 /* 1701 * create_mfi_frame_pool 1702 */ 1703 static int 1704 create_mfi_frame_pool(struct mrsas_instance *instance) 1705 { 1706 int i = 0; 1707 int cookie_cnt; 1708 uint16_t max_cmd; 1709 uint16_t sge_sz; 1710 uint32_t sgl_sz; 1711 uint32_t tot_frame_size; 1712 struct mrsas_cmd *cmd; 1713 1714 max_cmd = instance->max_fw_cmds; 1715 1716 sge_sz = sizeof (struct mrsas_sge64); 1717 1718 /* calculated the number of 64byte frames required for SGL */ 1719 sgl_sz = sge_sz * instance->max_num_sge; 1720 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH; 1721 1722 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: " 1723 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size)); 1724 1725 while (i < max_cmd+1) { 1726 cmd = instance->cmd_list[i]; 1727 1728 cmd->frame_dma_obj.size = tot_frame_size; 1729 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr; 1730 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1731 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 1732 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1; 1733 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64; 1734 1735 1736 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj, 1737 (uchar_t)DDI_STRUCTURE_LE_ACC); 1738 1739 if (cookie_cnt == -1 || cookie_cnt > 1) { 1740 con_log(CL_ANN, (CE_WARN, 1741 "create_mfi_frame_pool: could not alloc.")); 1742 return (DDI_FAILURE); 1743 } 1744 1745 bzero(cmd->frame_dma_obj.buffer, tot_frame_size); 1746 1747 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED; 1748 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer; 1749 cmd->frame_phys_addr = 1750 cmd->frame_dma_obj.dma_cookie[0].dmac_address; 1751 1752 cmd->sense = (uint8_t *)(((unsigned long) 1753 cmd->frame_dma_obj.buffer) + 1754 tot_frame_size - SENSE_LENGTH); 1755 cmd->sense_phys_addr = 1756 cmd->frame_dma_obj.dma_cookie[0].dmac_address + 1757 tot_frame_size - SENSE_LENGTH; 1758 1759 if (!cmd->frame || !cmd->sense) { 1760 con_log(CL_ANN, (CE_NOTE, 1761 "mr_sas: pci_pool_alloc failed")); 1762 1763 return (ENOMEM); 1764 } 1765 1766 ddi_put32(cmd->frame_dma_obj.acc_handle, 1767 &cmd->frame->io.context, cmd->index); 1768 i++; 1769 1770 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x", 1771 cmd->index, cmd->frame_phys_addr)); 1772 } 1773 1774 return (DDI_SUCCESS); 1775 } 1776 1777 /* 1778 * free_additional_dma_buffer 1779 */ 1780 static void 1781 free_additional_dma_buffer(struct mrsas_instance *instance) 1782 { 1783 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { 1784 (void) mrsas_free_dma_obj(instance, 1785 instance->mfi_internal_dma_obj); 1786 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; 1787 } 1788 1789 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) { 1790 (void) mrsas_free_dma_obj(instance, 1791 instance->mfi_evt_detail_obj); 1792 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; 1793 } 1794 } 1795 1796 /* 1797 * alloc_additional_dma_buffer 1798 */ 1799 static int 1800 alloc_additional_dma_buffer(struct mrsas_instance *instance) 1801 { 1802 uint32_t reply_q_sz; 1803 uint32_t internal_buf_size = PAGESIZE*2; 1804 1805 /* max cmds plus 1 + producer & consumer */ 1806 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2); 1807 1808 instance->mfi_internal_dma_obj.size = internal_buf_size; 1809 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr; 1810 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1811 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 1812 0xFFFFFFFFU; 1813 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1; 1814 1815 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj, 1816 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 1817 con_log(CL_ANN, (CE_WARN, 1818 "mr_sas: could not alloc reply queue")); 1819 return (DDI_FAILURE); 1820 } 1821 1822 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size); 1823 1824 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED; 1825 1826 instance->producer = (uint32_t *)((unsigned long) 1827 instance->mfi_internal_dma_obj.buffer); 1828 instance->consumer = (uint32_t *)((unsigned long) 1829 instance->mfi_internal_dma_obj.buffer + 4); 1830 instance->reply_queue = (uint32_t *)((unsigned long) 1831 instance->mfi_internal_dma_obj.buffer + 8); 1832 instance->internal_buf = (caddr_t)(((unsigned long) 1833 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8); 1834 instance->internal_buf_dmac_add = 1835 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 1836 (reply_q_sz + 8); 1837 instance->internal_buf_size = internal_buf_size - 1838 (reply_q_sz + 8); 1839 1840 /* allocate evt_detail */ 1841 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail); 1842 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr; 1843 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1844 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 1845 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1; 1846 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1; 1847 1848 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj, 1849 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 1850 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: " 1851 "could not allocate data transfer buffer.")); 1852 return (DDI_FAILURE); 1853 } 1854 1855 bzero(instance->mfi_evt_detail_obj.buffer, 1856 sizeof (struct mrsas_evt_detail)); 1857 1858 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED; 1859 1860 return (DDI_SUCCESS); 1861 } 1862 1863 /* 1864 * free_space_for_mfi 1865 */ 1866 static void 1867 free_space_for_mfi(struct mrsas_instance *instance) 1868 { 1869 int i; 1870 uint32_t max_cmd = instance->max_fw_cmds; 1871 1872 /* already freed */ 1873 if (instance->cmd_list == NULL) { 1874 return; 1875 } 1876 1877 free_additional_dma_buffer(instance); 1878 1879 /* first free the MFI frame pool */ 1880 destroy_mfi_frame_pool(instance); 1881 1882 /* free all the commands in the cmd_list */ 1883 for (i = 0; i < instance->max_fw_cmds+1; i++) { 1884 kmem_free(instance->cmd_list[i], 1885 sizeof (struct mrsas_cmd)); 1886 1887 instance->cmd_list[i] = NULL; 1888 } 1889 1890 /* free the cmd_list buffer itself */ 1891 kmem_free(instance->cmd_list, 1892 sizeof (struct mrsas_cmd *) * (max_cmd+1)); 1893 1894 instance->cmd_list = NULL; 1895 1896 INIT_LIST_HEAD(&instance->cmd_pool_list); 1897 } 1898 1899 /* 1900 * alloc_space_for_mfi 1901 */ 1902 static int 1903 alloc_space_for_mfi(struct mrsas_instance *instance) 1904 { 1905 int i; 1906 uint32_t max_cmd; 1907 size_t sz; 1908 1909 struct mrsas_cmd *cmd; 1910 1911 max_cmd = instance->max_fw_cmds; 1912 1913 /* reserve 1 more slot for flush_cache */ 1914 sz = sizeof (struct mrsas_cmd *) * (max_cmd+1); 1915 1916 /* 1917 * instance->cmd_list is an array of struct mrsas_cmd pointers. 1918 * Allocate the dynamic array first and then allocate individual 1919 * commands. 1920 */ 1921 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); 1922 ASSERT(instance->cmd_list); 1923 1924 for (i = 0; i < max_cmd+1; i++) { 1925 instance->cmd_list[i] = kmem_zalloc(sizeof (struct mrsas_cmd), 1926 KM_SLEEP); 1927 ASSERT(instance->cmd_list[i]); 1928 } 1929 1930 INIT_LIST_HEAD(&instance->cmd_pool_list); 1931 1932 /* add all the commands to command pool (instance->cmd_pool) */ 1933 for (i = 0; i < max_cmd; i++) { 1934 cmd = instance->cmd_list[i]; 1935 cmd->index = i; 1936 1937 mlist_add_tail(&cmd->list, &instance->cmd_pool_list); 1938 } 1939 1940 /* single slot for flush_cache won't be added in command pool */ 1941 cmd = instance->cmd_list[max_cmd]; 1942 cmd->index = i; 1943 1944 /* create a frame pool and assign one frame to each cmd */ 1945 if (create_mfi_frame_pool(instance)) { 1946 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool")); 1947 return (DDI_FAILURE); 1948 } 1949 1950 /* create a frame pool and assign one frame to each cmd */ 1951 if (alloc_additional_dma_buffer(instance)) { 1952 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool")); 1953 return (DDI_FAILURE); 1954 } 1955 1956 return (DDI_SUCCESS); 1957 } 1958 1959 /* 1960 * get_ctrl_info 1961 */ 1962 static int 1963 get_ctrl_info(struct mrsas_instance *instance, 1964 struct mrsas_ctrl_info *ctrl_info) 1965 { 1966 int ret = 0; 1967 1968 struct mrsas_cmd *cmd; 1969 struct mrsas_dcmd_frame *dcmd; 1970 struct mrsas_ctrl_info *ci; 1971 1972 cmd = get_mfi_pkt(instance); 1973 1974 if (!cmd) { 1975 con_log(CL_ANN, (CE_WARN, 1976 "Failed to get a cmd for ctrl info")); 1977 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding, 1978 uint16_t, instance->max_fw_cmds); 1979 return (DDI_FAILURE); 1980 } 1981 /* Clear the frame buffer and assign back the context id */ 1982 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 1983 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 1984 cmd->index); 1985 1986 dcmd = &cmd->frame->dcmd; 1987 1988 ci = (struct mrsas_ctrl_info *)instance->internal_buf; 1989 1990 if (!ci) { 1991 con_log(CL_ANN, (CE_WARN, 1992 "Failed to alloc mem for ctrl info")); 1993 return_mfi_pkt(instance, cmd); 1994 return (DDI_FAILURE); 1995 } 1996 1997 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info)); 1998 1999 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */ 2000 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2001 2002 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 2003 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 2004 MFI_CMD_STATUS_POLL_MODE); 2005 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 2006 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 2007 MFI_FRAME_DIR_READ); 2008 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 2009 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 2010 sizeof (struct mrsas_ctrl_info)); 2011 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2012 MR_DCMD_CTRL_GET_INFO); 2013 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 2014 instance->internal_buf_dmac_add); 2015 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 2016 sizeof (struct mrsas_ctrl_info)); 2017 2018 cmd->frame_count = 1; 2019 2020 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2021 ret = 0; 2022 ctrl_info->max_request_size = ddi_get32( 2023 cmd->frame_dma_obj.acc_handle, &ci->max_request_size); 2024 ctrl_info->ld_present_count = ddi_get16( 2025 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count); 2026 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, 2027 (uint8_t *)(ctrl_info->product_name), 2028 (uint8_t *)(ci->product_name), 80 * sizeof (char), 2029 DDI_DEV_AUTOINCR); 2030 /* should get more members of ci with ddi_get when needed */ 2031 } else { 2032 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed")); 2033 ret = -1; 2034 } 2035 2036 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2037 ret = -1; 2038 } 2039 return_mfi_pkt(instance, cmd); 2040 2041 return (ret); 2042 } 2043 2044 /* 2045 * abort_aen_cmd 2046 */ 2047 static int 2048 abort_aen_cmd(struct mrsas_instance *instance, 2049 struct mrsas_cmd *cmd_to_abort) 2050 { 2051 int ret = 0; 2052 2053 struct mrsas_cmd *cmd; 2054 struct mrsas_abort_frame *abort_fr; 2055 2056 cmd = get_mfi_pkt(instance); 2057 2058 if (!cmd) { 2059 con_log(CL_ANN, (CE_WARN, 2060 "Failed to get a cmd for ctrl info")); 2061 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding, 2062 uint16_t, instance->max_fw_cmds); 2063 return (DDI_FAILURE); 2064 } 2065 /* Clear the frame buffer and assign back the context id */ 2066 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2067 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2068 cmd->index); 2069 2070 abort_fr = &cmd->frame->abort; 2071 2072 /* prepare and issue the abort frame */ 2073 ddi_put8(cmd->frame_dma_obj.acc_handle, 2074 &abort_fr->cmd, MFI_CMD_OP_ABORT); 2075 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status, 2076 MFI_CMD_STATUS_SYNC_MODE); 2077 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0); 2078 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context, 2079 cmd_to_abort->index); 2080 ddi_put32(cmd->frame_dma_obj.acc_handle, 2081 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr); 2082 ddi_put32(cmd->frame_dma_obj.acc_handle, 2083 &abort_fr->abort_mfi_phys_addr_hi, 0); 2084 2085 instance->aen_cmd->abort_aen = 1; 2086 2087 cmd->sync_cmd = MRSAS_TRUE; 2088 cmd->frame_count = 1; 2089 2090 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2091 con_log(CL_ANN, (CE_WARN, 2092 "abort_aen_cmd: issue_cmd_in_sync_mode failed")); 2093 ret = -1; 2094 } else { 2095 ret = 0; 2096 } 2097 2098 instance->aen_cmd->abort_aen = 1; 2099 instance->aen_cmd = 0; 2100 2101 (void) mrsas_common_check(instance, cmd); 2102 2103 return_mfi_pkt(instance, cmd); 2104 2105 return (ret); 2106 } 2107 2108 /* 2109 * init_mfi 2110 */ 2111 static int 2112 init_mfi(struct mrsas_instance *instance) 2113 { 2114 struct mrsas_cmd *cmd; 2115 struct mrsas_ctrl_info ctrl_info; 2116 struct mrsas_init_frame *init_frame; 2117 struct mrsas_init_queue_info *initq_info; 2118 2119 /* we expect the FW state to be READY */ 2120 if (mfi_state_transition_to_ready(instance)) { 2121 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready")); 2122 goto fail_ready_state; 2123 } 2124 2125 /* get various operational parameters from status register */ 2126 instance->max_num_sge = 2127 (instance->func_ptr->read_fw_status_reg(instance) & 2128 0xFF0000) >> 0x10; 2129 /* 2130 * Reduce the max supported cmds by 1. This is to ensure that the 2131 * reply_q_sz (1 more than the max cmd that driver may send) 2132 * does not exceed max cmds that the FW can support 2133 */ 2134 instance->max_fw_cmds = 2135 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF; 2136 instance->max_fw_cmds = instance->max_fw_cmds - 1; 2137 2138 instance->max_num_sge = 2139 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ? 2140 MRSAS_MAX_SGE_CNT : instance->max_num_sge; 2141 2142 /* create a pool of commands */ 2143 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) 2144 goto fail_alloc_fw_space; 2145 2146 /* 2147 * Prepare a init frame. Note the init frame points to queue info 2148 * structure. Each frame has SGL allocated after first 64 bytes. For 2149 * this frame - since we don't need any SGL - we use SGL's space as 2150 * queue info structure 2151 */ 2152 cmd = get_mfi_pkt(instance); 2153 /* Clear the frame buffer and assign back the context id */ 2154 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2155 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2156 cmd->index); 2157 2158 init_frame = (struct mrsas_init_frame *)cmd->frame; 2159 initq_info = (struct mrsas_init_queue_info *) 2160 ((unsigned long)init_frame + 64); 2161 2162 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE); 2163 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info)); 2164 2165 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0); 2166 2167 ddi_put32(cmd->frame_dma_obj.acc_handle, 2168 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1); 2169 2170 ddi_put32(cmd->frame_dma_obj.acc_handle, 2171 &initq_info->producer_index_phys_addr_hi, 0); 2172 ddi_put32(cmd->frame_dma_obj.acc_handle, 2173 &initq_info->producer_index_phys_addr_lo, 2174 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address); 2175 2176 ddi_put32(cmd->frame_dma_obj.acc_handle, 2177 &initq_info->consumer_index_phys_addr_hi, 0); 2178 ddi_put32(cmd->frame_dma_obj.acc_handle, 2179 &initq_info->consumer_index_phys_addr_lo, 2180 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4); 2181 2182 ddi_put32(cmd->frame_dma_obj.acc_handle, 2183 &initq_info->reply_queue_start_phys_addr_hi, 0); 2184 ddi_put32(cmd->frame_dma_obj.acc_handle, 2185 &initq_info->reply_queue_start_phys_addr_lo, 2186 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8); 2187 2188 ddi_put8(cmd->frame_dma_obj.acc_handle, 2189 &init_frame->cmd, MFI_CMD_OP_INIT); 2190 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status, 2191 MFI_CMD_STATUS_POLL_MODE); 2192 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0); 2193 ddi_put32(cmd->frame_dma_obj.acc_handle, 2194 &init_frame->queue_info_new_phys_addr_lo, 2195 cmd->frame_phys_addr + 64); 2196 ddi_put32(cmd->frame_dma_obj.acc_handle, 2197 &init_frame->queue_info_new_phys_addr_hi, 0); 2198 2199 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len, 2200 sizeof (struct mrsas_init_queue_info)); 2201 2202 cmd->frame_count = 1; 2203 2204 /* issue the init frame in polled mode */ 2205 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2206 con_log(CL_ANN, (CE_WARN, "failed to init firmware")); 2207 return_mfi_pkt(instance, cmd); 2208 goto fail_fw_init; 2209 } 2210 2211 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2212 return_mfi_pkt(instance, cmd); 2213 goto fail_fw_init; 2214 } 2215 2216 return_mfi_pkt(instance, cmd); 2217 2218 /* gather misc FW related information */ 2219 if (!get_ctrl_info(instance, &ctrl_info)) { 2220 instance->max_sectors_per_req = ctrl_info.max_request_size; 2221 con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d", 2222 ctrl_info.product_name, ctrl_info.ld_present_count)); 2223 } else { 2224 instance->max_sectors_per_req = instance->max_num_sge * 2225 PAGESIZE / 512; 2226 } 2227 2228 return (DDI_SUCCESS); 2229 2230 fail_fw_init: 2231 fail_alloc_fw_space: 2232 2233 free_space_for_mfi(instance); 2234 2235 fail_ready_state: 2236 ddi_regs_map_free(&instance->regmap_handle); 2237 2238 fail_mfi_reg_setup: 2239 return (DDI_FAILURE); 2240 } 2241 2242 /* 2243 * mfi_state_transition_to_ready : Move the FW to READY state 2244 * 2245 * @reg_set : MFI register set 2246 */ 2247 static int 2248 mfi_state_transition_to_ready(struct mrsas_instance *instance) 2249 { 2250 int i; 2251 uint8_t max_wait; 2252 uint32_t fw_ctrl; 2253 uint32_t fw_state; 2254 uint32_t cur_state; 2255 2256 fw_state = 2257 instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK; 2258 con_log(CL_ANN1, (CE_NOTE, 2259 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state)); 2260 2261 while (fw_state != MFI_STATE_READY) { 2262 con_log(CL_ANN, (CE_NOTE, 2263 "mfi_state_transition_to_ready:FW state%x", fw_state)); 2264 2265 switch (fw_state) { 2266 case MFI_STATE_FAULT: 2267 con_log(CL_ANN, (CE_NOTE, 2268 "mr_sas: FW in FAULT state!!")); 2269 2270 return (ENODEV); 2271 case MFI_STATE_WAIT_HANDSHAKE: 2272 /* set the CLR bit in IMR0 */ 2273 con_log(CL_ANN, (CE_NOTE, 2274 "mr_sas: FW waiting for HANDSHAKE")); 2275 /* 2276 * PCI_Hot Plug: MFI F/W requires 2277 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2278 * to be set 2279 */ 2280 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */ 2281 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE | 2282 MFI_INIT_HOTPLUG, instance); 2283 2284 max_wait = 2; 2285 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2286 break; 2287 case MFI_STATE_BOOT_MESSAGE_PENDING: 2288 /* set the CLR bit in IMR0 */ 2289 con_log(CL_ANN, (CE_NOTE, 2290 "mr_sas: FW state boot message pending")); 2291 /* 2292 * PCI_Hot Plug: MFI F/W requires 2293 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2294 * to be set 2295 */ 2296 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance); 2297 2298 max_wait = 10; 2299 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2300 break; 2301 case MFI_STATE_OPERATIONAL: 2302 /* bring it to READY state; assuming max wait 2 secs */ 2303 instance->func_ptr->disable_intr(instance); 2304 con_log(CL_ANN1, (CE_NOTE, 2305 "mr_sas: FW in OPERATIONAL state")); 2306 /* 2307 * PCI_Hot Plug: MFI F/W requires 2308 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT) 2309 * to be set 2310 */ 2311 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */ 2312 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance); 2313 2314 max_wait = 10; 2315 cur_state = MFI_STATE_OPERATIONAL; 2316 break; 2317 case MFI_STATE_UNDEFINED: 2318 /* this state should not last for more than 2 seconds */ 2319 con_log(CL_ANN, (CE_NOTE, "FW state undefined")); 2320 2321 max_wait = 2; 2322 cur_state = MFI_STATE_UNDEFINED; 2323 break; 2324 case MFI_STATE_BB_INIT: 2325 max_wait = 2; 2326 cur_state = MFI_STATE_BB_INIT; 2327 break; 2328 case MFI_STATE_FW_INIT: 2329 max_wait = 2; 2330 cur_state = MFI_STATE_FW_INIT; 2331 break; 2332 case MFI_STATE_DEVICE_SCAN: 2333 max_wait = 10; 2334 cur_state = MFI_STATE_DEVICE_SCAN; 2335 break; 2336 default: 2337 con_log(CL_ANN, (CE_NOTE, 2338 "mr_sas: Unknown state 0x%x", fw_state)); 2339 return (ENODEV); 2340 } 2341 2342 /* the cur_state should not last for more than max_wait secs */ 2343 for (i = 0; i < (max_wait * MILLISEC); i++) { 2344 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */ 2345 fw_state = 2346 instance->func_ptr->read_fw_status_reg(instance) & 2347 MFI_STATE_MASK; 2348 2349 if (fw_state == cur_state) { 2350 delay(1 * drv_usectohz(MILLISEC)); 2351 } else { 2352 break; 2353 } 2354 } 2355 2356 /* return error if fw_state hasn't changed after max_wait */ 2357 if (fw_state == cur_state) { 2358 con_log(CL_ANN, (CE_NOTE, 2359 "FW state hasn't changed in %d secs", max_wait)); 2360 return (ENODEV); 2361 } 2362 }; 2363 2364 fw_ctrl = RD_IB_DOORBELL(instance); 2365 2366 con_log(CL_ANN1, (CE_NOTE, 2367 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl)); 2368 2369 /* 2370 * Write 0xF to the doorbell register to do the following. 2371 * - Abort all outstanding commands (bit 0). 2372 * - Transition from OPERATIONAL to READY state (bit 1). 2373 * - Discard (possible) low MFA posted in 64-bit mode (bit-2). 2374 * - Set to release FW to continue running (i.e. BIOS handshake 2375 * (bit 3). 2376 */ 2377 WR_IB_DOORBELL(0xF, instance); 2378 2379 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 2380 return (ENODEV); 2381 } 2382 return (DDI_SUCCESS); 2383 } 2384 2385 /* 2386 * get_seq_num 2387 */ 2388 static int 2389 get_seq_num(struct mrsas_instance *instance, 2390 struct mrsas_evt_log_info *eli) 2391 { 2392 int ret = DDI_SUCCESS; 2393 2394 dma_obj_t dcmd_dma_obj; 2395 struct mrsas_cmd *cmd; 2396 struct mrsas_dcmd_frame *dcmd; 2397 struct mrsas_evt_log_info *eli_tmp; 2398 cmd = get_mfi_pkt(instance); 2399 2400 if (!cmd) { 2401 cmn_err(CE_WARN, "mr_sas: failed to get a cmd"); 2402 DTRACE_PROBE2(seq_num_mfi_err, uint16_t, 2403 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 2404 return (ENOMEM); 2405 } 2406 /* Clear the frame buffer and assign back the context id */ 2407 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2408 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2409 cmd->index); 2410 2411 dcmd = &cmd->frame->dcmd; 2412 2413 /* allocate the data transfer buffer */ 2414 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info); 2415 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; 2416 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 2417 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 2418 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 2419 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 2420 2421 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, 2422 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 2423 con_log(CL_ANN, (CE_WARN, 2424 "get_seq_num: could not allocate data transfer buffer.")); 2425 return (DDI_FAILURE); 2426 } 2427 2428 (void) memset(dcmd_dma_obj.buffer, 0, 2429 sizeof (struct mrsas_evt_log_info)); 2430 2431 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2432 2433 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 2434 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0); 2435 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 2436 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 2437 MFI_FRAME_DIR_READ); 2438 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 2439 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 2440 sizeof (struct mrsas_evt_log_info)); 2441 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2442 MR_DCMD_CTRL_EVENT_GET_INFO); 2443 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 2444 sizeof (struct mrsas_evt_log_info)); 2445 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 2446 dcmd_dma_obj.dma_cookie[0].dmac_address); 2447 2448 cmd->sync_cmd = MRSAS_TRUE; 2449 cmd->frame_count = 1; 2450 2451 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2452 cmn_err(CE_WARN, "get_seq_num: " 2453 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO"); 2454 ret = DDI_FAILURE; 2455 } else { 2456 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer; 2457 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle, 2458 &eli_tmp->newest_seq_num); 2459 ret = DDI_SUCCESS; 2460 } 2461 2462 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 2463 ret = DDI_FAILURE; 2464 2465 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2466 ret = DDI_FAILURE; 2467 } 2468 2469 return_mfi_pkt(instance, cmd); 2470 2471 return (ret); 2472 } 2473 2474 /* 2475 * start_mfi_aen 2476 */ 2477 static int 2478 start_mfi_aen(struct mrsas_instance *instance) 2479 { 2480 int ret = 0; 2481 2482 struct mrsas_evt_log_info eli; 2483 union mrsas_evt_class_locale class_locale; 2484 2485 /* get the latest sequence number from FW */ 2486 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info)); 2487 2488 if (get_seq_num(instance, &eli)) { 2489 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num"); 2490 return (-1); 2491 } 2492 2493 /* register AEN with FW for latest sequence number plus 1 */ 2494 class_locale.members.reserved = 0; 2495 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL); 2496 class_locale.members.class = MR_EVT_CLASS_INFO; 2497 class_locale.word = LE_32(class_locale.word); 2498 ret = register_mfi_aen(instance, eli.newest_seq_num + 1, 2499 class_locale.word); 2500 2501 if (ret) { 2502 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed"); 2503 return (-1); 2504 } 2505 2506 return (ret); 2507 } 2508 2509 /* 2510 * flush_cache 2511 */ 2512 static void 2513 flush_cache(struct mrsas_instance *instance) 2514 { 2515 struct mrsas_cmd *cmd = NULL; 2516 struct mrsas_dcmd_frame *dcmd; 2517 uint32_t max_cmd = instance->max_fw_cmds; 2518 2519 cmd = instance->cmd_list[max_cmd]; 2520 2521 if (cmd == NULL) 2522 return; 2523 2524 dcmd = &cmd->frame->dcmd; 2525 2526 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2527 2528 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 2529 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0); 2530 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0); 2531 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 2532 MFI_FRAME_DIR_NONE); 2533 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 2534 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0); 2535 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2536 MR_DCMD_CTRL_CACHE_FLUSH); 2537 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0], 2538 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE); 2539 2540 cmd->frame_count = 1; 2541 2542 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2543 con_log(CL_ANN1, (CE_WARN, 2544 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH")); 2545 } 2546 con_log(CL_DLEVEL1, (CE_NOTE, "done")); 2547 } 2548 2549 /* 2550 * service_mfi_aen- Completes an AEN command 2551 * @instance: Adapter soft state 2552 * @cmd: Command to be completed 2553 * 2554 */ 2555 static void 2556 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 2557 { 2558 uint32_t seq_num; 2559 struct mrsas_evt_detail *evt_detail = 2560 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer; 2561 int rval = 0; 2562 int tgt = 0; 2563 ddi_acc_handle_t acc_handle; 2564 2565 acc_handle = cmd->frame_dma_obj.acc_handle; 2566 2567 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status); 2568 2569 if (cmd->cmd_status == ENODATA) { 2570 cmd->cmd_status = 0; 2571 } 2572 2573 /* 2574 * log the MFI AEN event to the sysevent queue so that 2575 * application will get noticed 2576 */ 2577 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS", 2578 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) { 2579 int instance_no = ddi_get_instance(instance->dip); 2580 con_log(CL_ANN, (CE_WARN, 2581 "mr_sas%d: Failed to log AEN event", instance_no)); 2582 } 2583 /* 2584 * Check for any ld devices that has changed state. i.e. online 2585 * or offline. 2586 */ 2587 con_log(CL_ANN1, (CE_NOTE, 2588 "AEN: code = %x class = %x locale = %x args = %x", 2589 ddi_get32(acc_handle, &evt_detail->code), 2590 evt_detail->cl.members.class, 2591 ddi_get16(acc_handle, &evt_detail->cl.members.locale), 2592 ddi_get8(acc_handle, &evt_detail->arg_type))); 2593 2594 switch (ddi_get32(acc_handle, &evt_detail->code)) { 2595 case MR_EVT_CFG_CLEARED: { 2596 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { 2597 if (instance->mr_ld_list[tgt].dip != NULL) { 2598 rval = mrsas_service_evt(instance, tgt, 0, 2599 MRSAS_EVT_UNCONFIG_TGT, NULL); 2600 con_log(CL_ANN1, (CE_WARN, 2601 "mr_sas: CFG CLEARED AEN rval = %d " 2602 "tgt id = %d", rval, tgt)); 2603 } 2604 } 2605 break; 2606 } 2607 2608 case MR_EVT_LD_DELETED: { 2609 rval = mrsas_service_evt(instance, 2610 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, 2611 MRSAS_EVT_UNCONFIG_TGT, NULL); 2612 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d " 2613 "tgt id = %d index = %d", rval, 2614 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 2615 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); 2616 break; 2617 } /* End of MR_EVT_LD_DELETED */ 2618 2619 case MR_EVT_LD_CREATED: { 2620 rval = mrsas_service_evt(instance, 2621 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, 2622 MRSAS_EVT_CONFIG_TGT, NULL); 2623 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d " 2624 "tgt id = %d index = %d", rval, 2625 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 2626 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); 2627 break; 2628 } /* End of MR_EVT_LD_CREATED */ 2629 } /* End of Main Switch */ 2630 2631 /* get copy of seq_num and class/locale for re-registration */ 2632 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num); 2633 seq_num++; 2634 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 2635 sizeof (struct mrsas_evt_detail)); 2636 2637 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0); 2638 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num); 2639 2640 instance->aen_seq_num = seq_num; 2641 2642 cmd->frame_count = 1; 2643 2644 /* Issue the aen registration frame */ 2645 instance->func_ptr->issue_cmd(cmd, instance); 2646 } 2647 2648 /* 2649 * complete_cmd_in_sync_mode - Completes an internal command 2650 * @instance: Adapter soft state 2651 * @cmd: Command to be completed 2652 * 2653 * The issue_cmd_in_sync_mode() function waits for a command to complete 2654 * after it issues a command. This function wakes up that waiting routine by 2655 * calling wake_up() on the wait queue. 2656 */ 2657 static void 2658 complete_cmd_in_sync_mode(struct mrsas_instance *instance, 2659 struct mrsas_cmd *cmd) 2660 { 2661 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle, 2662 &cmd->frame->io.cmd_status); 2663 2664 cmd->sync_cmd = MRSAS_FALSE; 2665 2666 if (cmd->cmd_status == ENODATA) { 2667 cmd->cmd_status = 0; 2668 } 2669 2670 cv_broadcast(&instance->int_cmd_cv); 2671 } 2672 2673 /* 2674 * mrsas_softintr - The Software ISR 2675 * @param arg : HBA soft state 2676 * 2677 * called from high-level interrupt if hi-level interrupt are not there, 2678 * otherwise triggered as a soft interrupt 2679 */ 2680 static uint_t 2681 mrsas_softintr(struct mrsas_instance *instance) 2682 { 2683 struct scsi_pkt *pkt; 2684 struct scsa_cmd *acmd; 2685 struct mrsas_cmd *cmd; 2686 struct mlist_head *pos, *next; 2687 mlist_t process_list; 2688 struct mrsas_header *hdr; 2689 struct scsi_arq_status *arqstat; 2690 2691 con_log(CL_ANN1, (CE_CONT, "mrsas_softintr called")); 2692 2693 ASSERT(instance); 2694 mutex_enter(&instance->completed_pool_mtx); 2695 2696 if (mlist_empty(&instance->completed_pool_list)) { 2697 mutex_exit(&instance->completed_pool_mtx); 2698 return (DDI_INTR_CLAIMED); 2699 } 2700 2701 instance->softint_running = 1; 2702 2703 INIT_LIST_HEAD(&process_list); 2704 mlist_splice(&instance->completed_pool_list, &process_list); 2705 INIT_LIST_HEAD(&instance->completed_pool_list); 2706 2707 mutex_exit(&instance->completed_pool_mtx); 2708 2709 /* perform all callbacks first, before releasing the SCBs */ 2710 mlist_for_each_safe(pos, next, &process_list) { 2711 cmd = mlist_entry(pos, struct mrsas_cmd, list); 2712 2713 /* syncronize the Cmd frame for the controller */ 2714 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 2715 0, 0, DDI_DMA_SYNC_FORCPU); 2716 2717 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 2718 DDI_SUCCESS) { 2719 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 2720 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 2721 return (DDI_INTR_CLAIMED); 2722 } 2723 2724 hdr = &cmd->frame->hdr; 2725 2726 /* remove the internal command from the process list */ 2727 mlist_del_init(&cmd->list); 2728 2729 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) { 2730 case MFI_CMD_OP_PD_SCSI: 2731 case MFI_CMD_OP_LD_SCSI: 2732 case MFI_CMD_OP_LD_READ: 2733 case MFI_CMD_OP_LD_WRITE: 2734 /* 2735 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI 2736 * could have been issued either through an 2737 * IO path or an IOCTL path. If it was via IOCTL, 2738 * we will send it to internal completion. 2739 */ 2740 if (cmd->sync_cmd == MRSAS_TRUE) { 2741 complete_cmd_in_sync_mode(instance, cmd); 2742 break; 2743 } 2744 2745 /* regular commands */ 2746 acmd = cmd->cmd; 2747 pkt = CMD2PKT(acmd); 2748 2749 if (acmd->cmd_flags & CFLAG_DMAVALID) { 2750 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 2751 (void) ddi_dma_sync(acmd->cmd_dmahandle, 2752 acmd->cmd_dma_offset, 2753 acmd->cmd_dma_len, 2754 DDI_DMA_SYNC_FORCPU); 2755 } 2756 } 2757 2758 pkt->pkt_reason = CMD_CMPLT; 2759 pkt->pkt_statistics = 0; 2760 pkt->pkt_state = STATE_GOT_BUS 2761 | STATE_GOT_TARGET | STATE_SENT_CMD 2762 | STATE_XFERRED_DATA | STATE_GOT_STATUS; 2763 2764 con_log(CL_ANN1, (CE_CONT, 2765 "CDB[0] = %x completed for %s: size %lx context %x", 2766 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"), 2767 acmd->cmd_dmacount, hdr->context)); 2768 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0], 2769 uint_t, acmd->cmd_cdblen, ulong_t, 2770 acmd->cmd_dmacount); 2771 2772 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) { 2773 struct scsi_inquiry *inq; 2774 2775 if (acmd->cmd_dmacount != 0) { 2776 bp_mapin(acmd->cmd_buf); 2777 inq = (struct scsi_inquiry *) 2778 acmd->cmd_buf->b_un.b_addr; 2779 2780 /* don't expose physical drives to OS */ 2781 if (acmd->islogical && 2782 (hdr->cmd_status == MFI_STAT_OK)) { 2783 display_scsi_inquiry( 2784 (caddr_t)inq); 2785 } else if ((hdr->cmd_status == 2786 MFI_STAT_OK) && inq->inq_dtype == 2787 DTYPE_DIRECT) { 2788 2789 display_scsi_inquiry( 2790 (caddr_t)inq); 2791 2792 /* for physical disk */ 2793 hdr->cmd_status = 2794 MFI_STAT_DEVICE_NOT_FOUND; 2795 } 2796 } 2797 } 2798 2799 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd, 2800 uint8_t, hdr->cmd_status); 2801 2802 switch (hdr->cmd_status) { 2803 case MFI_STAT_OK: 2804 pkt->pkt_scbp[0] = STATUS_GOOD; 2805 break; 2806 case MFI_STAT_LD_CC_IN_PROGRESS: 2807 case MFI_STAT_LD_RECON_IN_PROGRESS: 2808 pkt->pkt_scbp[0] = STATUS_GOOD; 2809 break; 2810 case MFI_STAT_LD_INIT_IN_PROGRESS: 2811 con_log(CL_ANN, 2812 (CE_WARN, "Initialization in Progress")); 2813 pkt->pkt_reason = CMD_TRAN_ERR; 2814 2815 break; 2816 case MFI_STAT_SCSI_DONE_WITH_ERROR: 2817 con_log(CL_ANN1, (CE_CONT, "scsi_done error")); 2818 2819 pkt->pkt_reason = CMD_CMPLT; 2820 ((struct scsi_status *) 2821 pkt->pkt_scbp)->sts_chk = 1; 2822 2823 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) { 2824 2825 con_log(CL_ANN, 2826 (CE_WARN, "TEST_UNIT_READY fail")); 2827 2828 } else { 2829 pkt->pkt_state |= STATE_ARQ_DONE; 2830 arqstat = (void *)(pkt->pkt_scbp); 2831 arqstat->sts_rqpkt_reason = CMD_CMPLT; 2832 arqstat->sts_rqpkt_resid = 0; 2833 arqstat->sts_rqpkt_state |= 2834 STATE_GOT_BUS | STATE_GOT_TARGET 2835 | STATE_SENT_CMD 2836 | STATE_XFERRED_DATA; 2837 *(uint8_t *)&arqstat->sts_rqpkt_status = 2838 STATUS_GOOD; 2839 ddi_rep_get8( 2840 cmd->frame_dma_obj.acc_handle, 2841 (uint8_t *) 2842 &(arqstat->sts_sensedata), 2843 cmd->sense, 2844 acmd->cmd_scblen - 2845 offsetof(struct scsi_arq_status, 2846 sts_sensedata), DDI_DEV_AUTOINCR); 2847 } 2848 break; 2849 case MFI_STAT_LD_OFFLINE: 2850 case MFI_STAT_DEVICE_NOT_FOUND: 2851 con_log(CL_ANN1, (CE_CONT, 2852 "device not found error")); 2853 pkt->pkt_reason = CMD_DEV_GONE; 2854 pkt->pkt_statistics = STAT_DISCON; 2855 break; 2856 case MFI_STAT_LD_LBA_OUT_OF_RANGE: 2857 pkt->pkt_state |= STATE_ARQ_DONE; 2858 pkt->pkt_reason = CMD_CMPLT; 2859 ((struct scsi_status *) 2860 pkt->pkt_scbp)->sts_chk = 1; 2861 2862 arqstat = (void *)(pkt->pkt_scbp); 2863 arqstat->sts_rqpkt_reason = CMD_CMPLT; 2864 arqstat->sts_rqpkt_resid = 0; 2865 arqstat->sts_rqpkt_state |= STATE_GOT_BUS 2866 | STATE_GOT_TARGET | STATE_SENT_CMD 2867 | STATE_XFERRED_DATA; 2868 *(uint8_t *)&arqstat->sts_rqpkt_status = 2869 STATUS_GOOD; 2870 2871 arqstat->sts_sensedata.es_valid = 1; 2872 arqstat->sts_sensedata.es_key = 2873 KEY_ILLEGAL_REQUEST; 2874 arqstat->sts_sensedata.es_class = 2875 CLASS_EXTENDED_SENSE; 2876 2877 /* 2878 * LOGICAL BLOCK ADDRESS OUT OF RANGE: 2879 * ASC: 0x21h; ASCQ: 0x00h; 2880 */ 2881 arqstat->sts_sensedata.es_add_code = 0x21; 2882 arqstat->sts_sensedata.es_qual_code = 0x00; 2883 2884 break; 2885 2886 default: 2887 con_log(CL_ANN, (CE_CONT, "Unknown status!")); 2888 pkt->pkt_reason = CMD_TRAN_ERR; 2889 2890 break; 2891 } 2892 2893 atomic_add_16(&instance->fw_outstanding, (-1)); 2894 2895 (void) mrsas_common_check(instance, cmd); 2896 2897 if (acmd->cmd_dmahandle) { 2898 if (mrsas_check_dma_handle( 2899 acmd->cmd_dmahandle) != DDI_SUCCESS) { 2900 ddi_fm_service_impact(instance->dip, 2901 DDI_SERVICE_UNAFFECTED); 2902 pkt->pkt_reason = CMD_TRAN_ERR; 2903 pkt->pkt_statistics = 0; 2904 } 2905 } 2906 2907 return_mfi_pkt(instance, cmd); 2908 2909 /* Call the callback routine */ 2910 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 2911 pkt->pkt_comp) { 2912 (*pkt->pkt_comp)(pkt); 2913 } 2914 2915 break; 2916 case MFI_CMD_OP_SMP: 2917 case MFI_CMD_OP_STP: 2918 complete_cmd_in_sync_mode(instance, cmd); 2919 break; 2920 case MFI_CMD_OP_DCMD: 2921 /* see if got an event notification */ 2922 if (ddi_get32(cmd->frame_dma_obj.acc_handle, 2923 &cmd->frame->dcmd.opcode) == 2924 MR_DCMD_CTRL_EVENT_WAIT) { 2925 if ((instance->aen_cmd == cmd) && 2926 (instance->aen_cmd->abort_aen)) { 2927 con_log(CL_ANN, (CE_WARN, 2928 "mrsas_softintr: " 2929 "aborted_aen returned")); 2930 } else { 2931 atomic_add_16(&instance->fw_outstanding, 2932 (-1)); 2933 service_mfi_aen(instance, cmd); 2934 } 2935 } else { 2936 complete_cmd_in_sync_mode(instance, cmd); 2937 } 2938 2939 break; 2940 case MFI_CMD_OP_ABORT: 2941 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete")); 2942 /* 2943 * MFI_CMD_OP_ABORT successfully completed 2944 * in the synchronous mode 2945 */ 2946 complete_cmd_in_sync_mode(instance, cmd); 2947 break; 2948 default: 2949 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 2950 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 2951 2952 if (cmd->pkt != NULL) { 2953 pkt = cmd->pkt; 2954 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 2955 pkt->pkt_comp) { 2956 (*pkt->pkt_comp)(pkt); 2957 } 2958 } 2959 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !")); 2960 break; 2961 } 2962 } 2963 2964 instance->softint_running = 0; 2965 2966 return (DDI_INTR_CLAIMED); 2967 } 2968 2969 /* 2970 * mrsas_alloc_dma_obj 2971 * 2972 * Allocate the memory and other resources for an dma object. 2973 */ 2974 static int 2975 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj, 2976 uchar_t endian_flags) 2977 { 2978 int i; 2979 size_t alen = 0; 2980 uint_t cookie_cnt; 2981 struct ddi_device_acc_attr tmp_endian_attr; 2982 2983 tmp_endian_attr = endian_attr; 2984 tmp_endian_attr.devacc_attr_endian_flags = endian_flags; 2985 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 2986 2987 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr, 2988 DDI_DMA_SLEEP, NULL, &obj->dma_handle); 2989 if (i != DDI_SUCCESS) { 2990 2991 switch (i) { 2992 case DDI_DMA_BADATTR : 2993 con_log(CL_ANN, (CE_WARN, 2994 "Failed ddi_dma_alloc_handle- Bad attribute")); 2995 break; 2996 case DDI_DMA_NORESOURCES : 2997 con_log(CL_ANN, (CE_WARN, 2998 "Failed ddi_dma_alloc_handle- No Resources")); 2999 break; 3000 default : 3001 con_log(CL_ANN, (CE_WARN, 3002 "Failed ddi_dma_alloc_handle: " 3003 "unknown status %d", i)); 3004 break; 3005 } 3006 3007 return (-1); 3008 } 3009 3010 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr, 3011 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 3012 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) || 3013 alen < obj->size) { 3014 3015 ddi_dma_free_handle(&obj->dma_handle); 3016 3017 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc")); 3018 3019 return (-1); 3020 } 3021 3022 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer, 3023 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 3024 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) { 3025 3026 ddi_dma_mem_free(&obj->acc_handle); 3027 ddi_dma_free_handle(&obj->dma_handle); 3028 3029 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle")); 3030 3031 return (-1); 3032 } 3033 3034 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) { 3035 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3036 return (-1); 3037 } 3038 3039 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) { 3040 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3041 return (-1); 3042 } 3043 3044 return (cookie_cnt); 3045 } 3046 3047 /* 3048 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t) 3049 * 3050 * De-allocate the memory and other resources for an dma object, which must 3051 * have been alloated by a previous call to mrsas_alloc_dma_obj() 3052 */ 3053 static int 3054 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj) 3055 { 3056 3057 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) { 3058 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3059 return (DDI_FAILURE); 3060 } 3061 3062 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) { 3063 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3064 return (DDI_FAILURE); 3065 } 3066 3067 (void) ddi_dma_unbind_handle(obj.dma_handle); 3068 ddi_dma_mem_free(&obj.acc_handle); 3069 ddi_dma_free_handle(&obj.dma_handle); 3070 3071 return (DDI_SUCCESS); 3072 } 3073 3074 /* 3075 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *, 3076 * int, int (*)()) 3077 * 3078 * Allocate dma resources for a new scsi command 3079 */ 3080 static int 3081 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt, 3082 struct buf *bp, int flags, int (*callback)()) 3083 { 3084 int dma_flags; 3085 int (*cb)(caddr_t); 3086 int i; 3087 3088 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr; 3089 struct scsa_cmd *acmd = PKT2CMD(pkt); 3090 3091 acmd->cmd_buf = bp; 3092 3093 if (bp->b_flags & B_READ) { 3094 acmd->cmd_flags &= ~CFLAG_DMASEND; 3095 dma_flags = DDI_DMA_READ; 3096 } else { 3097 acmd->cmd_flags |= CFLAG_DMASEND; 3098 dma_flags = DDI_DMA_WRITE; 3099 } 3100 3101 if (flags & PKT_CONSISTENT) { 3102 acmd->cmd_flags |= CFLAG_CONSISTENT; 3103 dma_flags |= DDI_DMA_CONSISTENT; 3104 } 3105 3106 if (flags & PKT_DMA_PARTIAL) { 3107 dma_flags |= DDI_DMA_PARTIAL; 3108 } 3109 3110 dma_flags |= DDI_DMA_REDZONE; 3111 3112 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP; 3113 3114 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge; 3115 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 3116 3117 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr, 3118 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) { 3119 switch (i) { 3120 case DDI_DMA_BADATTR: 3121 bioerror(bp, EFAULT); 3122 return (DDI_FAILURE); 3123 3124 case DDI_DMA_NORESOURCES: 3125 bioerror(bp, 0); 3126 return (DDI_FAILURE); 3127 3128 default: 3129 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: " 3130 "impossible result (0x%x)", i)); 3131 bioerror(bp, EFAULT); 3132 return (DDI_FAILURE); 3133 } 3134 } 3135 3136 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags, 3137 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies); 3138 3139 switch (i) { 3140 case DDI_DMA_PARTIAL_MAP: 3141 if ((dma_flags & DDI_DMA_PARTIAL) == 0) { 3142 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3143 "DDI_DMA_PARTIAL_MAP impossible")); 3144 goto no_dma_cookies; 3145 } 3146 3147 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) == 3148 DDI_FAILURE) { 3149 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed")); 3150 goto no_dma_cookies; 3151 } 3152 3153 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3154 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3155 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3156 DDI_FAILURE) { 3157 3158 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed")); 3159 goto no_dma_cookies; 3160 } 3161 3162 goto get_dma_cookies; 3163 case DDI_DMA_MAPPED: 3164 acmd->cmd_nwin = 1; 3165 acmd->cmd_dma_len = 0; 3166 acmd->cmd_dma_offset = 0; 3167 3168 get_dma_cookies: 3169 i = 0; 3170 acmd->cmd_dmacount = 0; 3171 for (;;) { 3172 acmd->cmd_dmacount += 3173 acmd->cmd_dmacookies[i++].dmac_size; 3174 3175 if (i == instance->max_num_sge || 3176 i == acmd->cmd_ncookies) 3177 break; 3178 3179 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3180 &acmd->cmd_dmacookies[i]); 3181 } 3182 3183 acmd->cmd_cookie = i; 3184 acmd->cmd_cookiecnt = i; 3185 3186 acmd->cmd_flags |= CFLAG_DMAVALID; 3187 3188 if (bp->b_bcount >= acmd->cmd_dmacount) { 3189 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3190 } else { 3191 pkt->pkt_resid = 0; 3192 } 3193 3194 return (DDI_SUCCESS); 3195 case DDI_DMA_NORESOURCES: 3196 bioerror(bp, 0); 3197 break; 3198 case DDI_DMA_NOMAPPING: 3199 bioerror(bp, EFAULT); 3200 break; 3201 case DDI_DMA_TOOBIG: 3202 bioerror(bp, EINVAL); 3203 break; 3204 case DDI_DMA_INUSE: 3205 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:" 3206 " DDI_DMA_INUSE impossible")); 3207 break; 3208 default: 3209 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3210 "impossible result (0x%x)", i)); 3211 break; 3212 } 3213 3214 no_dma_cookies: 3215 ddi_dma_free_handle(&acmd->cmd_dmahandle); 3216 acmd->cmd_dmahandle = NULL; 3217 acmd->cmd_flags &= ~CFLAG_DMAVALID; 3218 return (DDI_FAILURE); 3219 } 3220 3221 /* 3222 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *) 3223 * 3224 * move dma resources to next dma window 3225 * 3226 */ 3227 static int 3228 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt, 3229 struct buf *bp) 3230 { 3231 int i = 0; 3232 3233 struct scsa_cmd *acmd = PKT2CMD(pkt); 3234 3235 /* 3236 * If there are no more cookies remaining in this window, 3237 * must move to the next window first. 3238 */ 3239 if (acmd->cmd_cookie == acmd->cmd_ncookies) { 3240 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) { 3241 return (DDI_SUCCESS); 3242 } 3243 3244 /* at last window, cannot move */ 3245 if (++acmd->cmd_curwin >= acmd->cmd_nwin) { 3246 return (DDI_FAILURE); 3247 } 3248 3249 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3250 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3251 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3252 DDI_FAILURE) { 3253 return (DDI_FAILURE); 3254 } 3255 3256 acmd->cmd_cookie = 0; 3257 } else { 3258 /* still more cookies in this window - get the next one */ 3259 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3260 &acmd->cmd_dmacookies[0]); 3261 } 3262 3263 /* get remaining cookies in this window, up to our maximum */ 3264 for (;;) { 3265 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size; 3266 acmd->cmd_cookie++; 3267 3268 if (i == instance->max_num_sge || 3269 acmd->cmd_cookie == acmd->cmd_ncookies) { 3270 break; 3271 } 3272 3273 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3274 &acmd->cmd_dmacookies[i]); 3275 } 3276 3277 acmd->cmd_cookiecnt = i; 3278 3279 if (bp->b_bcount >= acmd->cmd_dmacount) { 3280 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3281 } else { 3282 pkt->pkt_resid = 0; 3283 } 3284 3285 return (DDI_SUCCESS); 3286 } 3287 3288 /* 3289 * build_cmd 3290 */ 3291 static struct mrsas_cmd * 3292 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap, 3293 struct scsi_pkt *pkt, uchar_t *cmd_done) 3294 { 3295 uint16_t flags = 0; 3296 uint32_t i; 3297 uint32_t context; 3298 uint32_t sge_bytes; 3299 ddi_acc_handle_t acc_handle; 3300 struct mrsas_cmd *cmd; 3301 struct mrsas_sge64 *mfi_sgl; 3302 struct scsa_cmd *acmd = PKT2CMD(pkt); 3303 struct mrsas_pthru_frame *pthru; 3304 struct mrsas_io_frame *ldio; 3305 3306 /* find out if this is logical or physical drive command. */ 3307 acmd->islogical = MRDRV_IS_LOGICAL(ap); 3308 acmd->device_id = MAP_DEVICE_ID(instance, ap); 3309 *cmd_done = 0; 3310 3311 /* get the command packet */ 3312 if (!(cmd = get_mfi_pkt(instance))) { 3313 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t, 3314 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 3315 return (NULL); 3316 } 3317 3318 acc_handle = cmd->frame_dma_obj.acc_handle; 3319 3320 /* Clear the frame buffer and assign back the context id */ 3321 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 3322 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index); 3323 3324 cmd->pkt = pkt; 3325 cmd->cmd = acmd; 3326 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0], 3327 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len); 3328 3329 /* lets get the command directions */ 3330 if (acmd->cmd_flags & CFLAG_DMASEND) { 3331 flags = MFI_FRAME_DIR_WRITE; 3332 3333 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3334 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3335 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3336 DDI_DMA_SYNC_FORDEV); 3337 } 3338 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) { 3339 flags = MFI_FRAME_DIR_READ; 3340 3341 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3342 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3343 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3344 DDI_DMA_SYNC_FORCPU); 3345 } 3346 } else { 3347 flags = MFI_FRAME_DIR_NONE; 3348 } 3349 3350 flags |= MFI_FRAME_SGL64; 3351 3352 switch (pkt->pkt_cdbp[0]) { 3353 3354 /* 3355 * case SCMD_SYNCHRONIZE_CACHE: 3356 * flush_cache(instance); 3357 * return_mfi_pkt(instance, cmd); 3358 * *cmd_done = 1; 3359 * 3360 * return (NULL); 3361 */ 3362 3363 case SCMD_READ: 3364 case SCMD_WRITE: 3365 case SCMD_READ_G1: 3366 case SCMD_WRITE_G1: 3367 if (acmd->islogical) { 3368 ldio = (struct mrsas_io_frame *)cmd->frame; 3369 3370 /* 3371 * preare the Logical IO frame: 3372 * 2nd bit is zero for all read cmds 3373 */ 3374 ddi_put8(acc_handle, &ldio->cmd, 3375 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE 3376 : MFI_CMD_OP_LD_READ); 3377 ddi_put8(acc_handle, &ldio->cmd_status, 0x0); 3378 ddi_put8(acc_handle, &ldio->scsi_status, 0x0); 3379 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id); 3380 ddi_put16(acc_handle, &ldio->timeout, 0); 3381 ddi_put8(acc_handle, &ldio->reserved_0, 0); 3382 ddi_put16(acc_handle, &ldio->pad_0, 0); 3383 ddi_put16(acc_handle, &ldio->flags, flags); 3384 3385 /* Initialize sense Information */ 3386 bzero(cmd->sense, SENSE_LENGTH); 3387 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH); 3388 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0); 3389 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo, 3390 cmd->sense_phys_addr); 3391 ddi_put32(acc_handle, &ldio->start_lba_hi, 0); 3392 ddi_put8(acc_handle, &ldio->access_byte, 3393 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0); 3394 ddi_put8(acc_handle, &ldio->sge_count, 3395 acmd->cmd_cookiecnt); 3396 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl; 3397 3398 context = ddi_get32(acc_handle, &ldio->context); 3399 3400 if (acmd->cmd_cdblen == CDB_GROUP0) { 3401 ddi_put32(acc_handle, &ldio->lba_count, ( 3402 (uint16_t)(pkt->pkt_cdbp[4]))); 3403 3404 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3405 ((uint32_t)(pkt->pkt_cdbp[3])) | 3406 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) | 3407 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) 3408 << 16))); 3409 } else if (acmd->cmd_cdblen == CDB_GROUP1) { 3410 ddi_put32(acc_handle, &ldio->lba_count, ( 3411 ((uint16_t)(pkt->pkt_cdbp[8])) | 3412 ((uint16_t)(pkt->pkt_cdbp[7]) << 8))); 3413 3414 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3415 ((uint32_t)(pkt->pkt_cdbp[5])) | 3416 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3417 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3418 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 3419 } else if (acmd->cmd_cdblen == CDB_GROUP2) { 3420 ddi_put32(acc_handle, &ldio->lba_count, ( 3421 ((uint16_t)(pkt->pkt_cdbp[9])) | 3422 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) | 3423 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) | 3424 ((uint16_t)(pkt->pkt_cdbp[6]) << 24))); 3425 3426 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3427 ((uint32_t)(pkt->pkt_cdbp[5])) | 3428 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3429 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3430 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 3431 } else if (acmd->cmd_cdblen == CDB_GROUP3) { 3432 ddi_put32(acc_handle, &ldio->lba_count, ( 3433 ((uint16_t)(pkt->pkt_cdbp[13])) | 3434 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) | 3435 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) | 3436 ((uint16_t)(pkt->pkt_cdbp[10]) << 24))); 3437 3438 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3439 ((uint32_t)(pkt->pkt_cdbp[9])) | 3440 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | 3441 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | 3442 ((uint32_t)(pkt->pkt_cdbp[6]) << 24))); 3443 3444 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3445 ((uint32_t)(pkt->pkt_cdbp[5])) | 3446 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3447 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3448 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 3449 } 3450 3451 break; 3452 } 3453 /* fall through For all non-rd/wr cmds */ 3454 default: 3455 3456 switch (pkt->pkt_cdbp[0]) { 3457 case SCMD_MODE_SENSE: 3458 case SCMD_MODE_SENSE_G1: { 3459 union scsi_cdb *cdbp; 3460 uint16_t page_code; 3461 3462 cdbp = (void *)pkt->pkt_cdbp; 3463 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0]; 3464 switch (page_code) { 3465 case 0x3: 3466 case 0x4: 3467 (void) mrsas_mode_sense_build(pkt); 3468 return_mfi_pkt(instance, cmd); 3469 *cmd_done = 1; 3470 return (NULL); 3471 } 3472 break; 3473 } 3474 default: 3475 break; 3476 } 3477 3478 pthru = (struct mrsas_pthru_frame *)cmd->frame; 3479 3480 /* prepare the DCDB frame */ 3481 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ? 3482 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI); 3483 ddi_put8(acc_handle, &pthru->cmd_status, 0x0); 3484 ddi_put8(acc_handle, &pthru->scsi_status, 0x0); 3485 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id); 3486 ddi_put8(acc_handle, &pthru->lun, 0); 3487 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen); 3488 ddi_put16(acc_handle, &pthru->timeout, 0); 3489 ddi_put16(acc_handle, &pthru->flags, flags); 3490 ddi_put32(acc_handle, &pthru->data_xfer_len, 3491 acmd->cmd_dmacount); 3492 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt); 3493 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl; 3494 3495 bzero(cmd->sense, SENSE_LENGTH); 3496 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH); 3497 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); 3498 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 3499 cmd->sense_phys_addr); 3500 3501 context = ddi_get32(acc_handle, &pthru->context); 3502 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp, 3503 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR); 3504 3505 break; 3506 } 3507 #ifdef lint 3508 context = context; 3509 #endif 3510 /* prepare the scatter-gather list for the firmware */ 3511 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) { 3512 ddi_put64(acc_handle, &mfi_sgl->phys_addr, 3513 acmd->cmd_dmacookies[i].dmac_laddress); 3514 ddi_put32(acc_handle, &mfi_sgl->length, 3515 acmd->cmd_dmacookies[i].dmac_size); 3516 } 3517 3518 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt; 3519 3520 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) + 3521 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1; 3522 3523 if (cmd->frame_count >= 8) { 3524 cmd->frame_count = 8; 3525 } 3526 3527 return (cmd); 3528 } 3529 3530 /* 3531 * issue_mfi_pthru 3532 */ 3533 static int 3534 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 3535 struct mrsas_cmd *cmd, int mode) 3536 { 3537 void *ubuf; 3538 uint32_t kphys_addr = 0; 3539 uint32_t xferlen = 0; 3540 uint_t model; 3541 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 3542 dma_obj_t pthru_dma_obj; 3543 struct mrsas_pthru_frame *kpthru; 3544 struct mrsas_pthru_frame *pthru; 3545 int i; 3546 pthru = &cmd->frame->pthru; 3547 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0]; 3548 3549 model = ddi_model_convert_from(mode & FMODELS); 3550 if (model == DDI_MODEL_ILP32) { 3551 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3552 3553 xferlen = kpthru->sgl.sge32[0].length; 3554 3555 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3556 } else { 3557 #ifdef _ILP32 3558 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3559 xferlen = kpthru->sgl.sge32[0].length; 3560 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3561 #else 3562 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64")); 3563 xferlen = kpthru->sgl.sge64[0].length; 3564 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr; 3565 #endif 3566 } 3567 3568 if (xferlen) { 3569 /* means IOCTL requires DMA */ 3570 /* allocate the data transfer buffer */ 3571 pthru_dma_obj.size = xferlen; 3572 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr; 3573 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3574 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3575 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1; 3576 pthru_dma_obj.dma_attr.dma_attr_align = 1; 3577 3578 /* allocate kernel buffer for DMA */ 3579 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj, 3580 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3581 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3582 "could not allocate data transfer buffer.")); 3583 return (DDI_FAILURE); 3584 } 3585 (void) memset(pthru_dma_obj.buffer, 0, xferlen); 3586 3587 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3588 if (kpthru->flags & MFI_FRAME_DIR_WRITE) { 3589 for (i = 0; i < xferlen; i++) { 3590 if (ddi_copyin((uint8_t *)ubuf+i, 3591 (uint8_t *)pthru_dma_obj.buffer+i, 3592 1, mode)) { 3593 con_log(CL_ANN, (CE_WARN, 3594 "issue_mfi_pthru : " 3595 "copy from user space failed")); 3596 return (DDI_FAILURE); 3597 } 3598 } 3599 } 3600 3601 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address; 3602 } 3603 3604 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd); 3605 ddi_put8(acc_handle, &pthru->sense_len, 0); 3606 ddi_put8(acc_handle, &pthru->cmd_status, 0); 3607 ddi_put8(acc_handle, &pthru->scsi_status, 0); 3608 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id); 3609 ddi_put8(acc_handle, &pthru->lun, kpthru->lun); 3610 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len); 3611 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count); 3612 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout); 3613 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len); 3614 3615 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); 3616 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */ 3617 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); 3618 3619 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb, 3620 pthru->cdb_len, DDI_DEV_AUTOINCR); 3621 3622 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64); 3623 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen); 3624 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr); 3625 3626 cmd->sync_cmd = MRSAS_TRUE; 3627 cmd->frame_count = 1; 3628 3629 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3630 con_log(CL_ANN, (CE_WARN, 3631 "issue_mfi_pthru: fw_ioctl failed")); 3632 } else { 3633 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) { 3634 for (i = 0; i < xferlen; i++) { 3635 if (ddi_copyout( 3636 (uint8_t *)pthru_dma_obj.buffer+i, 3637 (uint8_t *)ubuf+i, 1, mode)) { 3638 con_log(CL_ANN, (CE_WARN, 3639 "issue_mfi_pthru : " 3640 "copy to user space failed")); 3641 return (DDI_FAILURE); 3642 } 3643 } 3644 } 3645 } 3646 3647 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status); 3648 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status); 3649 3650 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, " 3651 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status)); 3652 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t, 3653 kpthru->cmd_status, uint8_t, kpthru->scsi_status); 3654 3655 if (xferlen) { 3656 /* free kernel buffer */ 3657 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS) 3658 return (DDI_FAILURE); 3659 } 3660 3661 return (DDI_SUCCESS); 3662 } 3663 3664 /* 3665 * issue_mfi_dcmd 3666 */ 3667 static int 3668 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 3669 struct mrsas_cmd *cmd, int mode) 3670 { 3671 void *ubuf; 3672 uint32_t kphys_addr = 0; 3673 uint32_t xferlen = 0; 3674 uint32_t model; 3675 dma_obj_t dcmd_dma_obj; 3676 struct mrsas_dcmd_frame *kdcmd; 3677 struct mrsas_dcmd_frame *dcmd; 3678 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 3679 int i; 3680 dcmd = &cmd->frame->dcmd; 3681 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; 3682 3683 model = ddi_model_convert_from(mode & FMODELS); 3684 if (model == DDI_MODEL_ILP32) { 3685 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3686 3687 xferlen = kdcmd->sgl.sge32[0].length; 3688 3689 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3690 } else { 3691 #ifdef _ILP32 3692 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3693 xferlen = kdcmd->sgl.sge32[0].length; 3694 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3695 #else 3696 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64")); 3697 xferlen = kdcmd->sgl.sge64[0].length; 3698 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 3699 #endif 3700 } 3701 if (xferlen) { 3702 /* means IOCTL requires DMA */ 3703 /* allocate the data transfer buffer */ 3704 dcmd_dma_obj.size = xferlen; 3705 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; 3706 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3707 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3708 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 3709 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 3710 3711 /* allocate kernel buffer for DMA */ 3712 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, 3713 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3714 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 3715 "could not allocate data transfer buffer.")); 3716 return (DDI_FAILURE); 3717 } 3718 (void) memset(dcmd_dma_obj.buffer, 0, xferlen); 3719 3720 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3721 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) { 3722 for (i = 0; i < xferlen; i++) { 3723 if (ddi_copyin((uint8_t *)ubuf + i, 3724 (uint8_t *)dcmd_dma_obj.buffer + i, 3725 1, mode)) { 3726 con_log(CL_ANN, (CE_WARN, 3727 "issue_mfi_dcmd : " 3728 "copy from user space failed")); 3729 return (DDI_FAILURE); 3730 } 3731 } 3732 } 3733 3734 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 3735 } 3736 3737 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd); 3738 ddi_put8(acc_handle, &dcmd->cmd_status, 0); 3739 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count); 3740 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout); 3741 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len); 3742 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode); 3743 3744 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b, 3745 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR); 3746 3747 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64); 3748 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen); 3749 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr); 3750 3751 cmd->sync_cmd = MRSAS_TRUE; 3752 cmd->frame_count = 1; 3753 3754 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3755 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed")); 3756 } else { 3757 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) { 3758 for (i = 0; i < xferlen; i++) { 3759 if (ddi_copyout( 3760 (uint8_t *)dcmd_dma_obj.buffer + i, 3761 (uint8_t *)ubuf + i, 3762 1, mode)) { 3763 con_log(CL_ANN, (CE_WARN, 3764 "issue_mfi_dcmd : " 3765 "copy to user space failed")); 3766 return (DDI_FAILURE); 3767 } 3768 } 3769 } 3770 } 3771 3772 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status); 3773 DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t, 3774 kdcmd->cmd, uint8_t, kdcmd->cmd_status); 3775 3776 if (xferlen) { 3777 /* free kernel buffer */ 3778 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 3779 return (DDI_FAILURE); 3780 } 3781 3782 return (DDI_SUCCESS); 3783 } 3784 3785 /* 3786 * issue_mfi_smp 3787 */ 3788 static int 3789 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 3790 struct mrsas_cmd *cmd, int mode) 3791 { 3792 void *request_ubuf; 3793 void *response_ubuf; 3794 uint32_t request_xferlen = 0; 3795 uint32_t response_xferlen = 0; 3796 uint_t model; 3797 dma_obj_t request_dma_obj; 3798 dma_obj_t response_dma_obj; 3799 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 3800 struct mrsas_smp_frame *ksmp; 3801 struct mrsas_smp_frame *smp; 3802 struct mrsas_sge32 *sge32; 3803 #ifndef _ILP32 3804 struct mrsas_sge64 *sge64; 3805 #endif 3806 int i; 3807 uint64_t tmp_sas_addr; 3808 3809 smp = &cmd->frame->smp; 3810 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0]; 3811 3812 model = ddi_model_convert_from(mode & FMODELS); 3813 if (model == DDI_MODEL_ILP32) { 3814 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 3815 3816 sge32 = &ksmp->sgl[0].sge32[0]; 3817 response_xferlen = sge32[0].length; 3818 request_xferlen = sge32[1].length; 3819 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 3820 "response_xferlen = %x, request_xferlen = %x", 3821 response_xferlen, request_xferlen)); 3822 3823 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 3824 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 3825 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3826 "response_ubuf = %p, request_ubuf = %p", 3827 response_ubuf, request_ubuf)); 3828 } else { 3829 #ifdef _ILP32 3830 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 3831 3832 sge32 = &ksmp->sgl[0].sge32[0]; 3833 response_xferlen = sge32[0].length; 3834 request_xferlen = sge32[1].length; 3835 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 3836 "response_xferlen = %x, request_xferlen = %x", 3837 response_xferlen, request_xferlen)); 3838 3839 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 3840 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 3841 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3842 "response_ubuf = %p, request_ubuf = %p", 3843 response_ubuf, request_ubuf)); 3844 #else 3845 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64")); 3846 3847 sge64 = &ksmp->sgl[0].sge64[0]; 3848 response_xferlen = sge64[0].length; 3849 request_xferlen = sge64[1].length; 3850 3851 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr; 3852 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr; 3853 #endif 3854 } 3855 if (request_xferlen) { 3856 /* means IOCTL requires DMA */ 3857 /* allocate the data transfer buffer */ 3858 request_dma_obj.size = request_xferlen; 3859 request_dma_obj.dma_attr = mrsas_generic_dma_attr; 3860 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3861 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3862 request_dma_obj.dma_attr.dma_attr_sgllen = 1; 3863 request_dma_obj.dma_attr.dma_attr_align = 1; 3864 3865 /* allocate kernel buffer for DMA */ 3866 if (mrsas_alloc_dma_obj(instance, &request_dma_obj, 3867 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3868 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3869 "could not allocate data transfer buffer.")); 3870 return (DDI_FAILURE); 3871 } 3872 (void) memset(request_dma_obj.buffer, 0, request_xferlen); 3873 3874 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3875 for (i = 0; i < request_xferlen; i++) { 3876 if (ddi_copyin((uint8_t *)request_ubuf + i, 3877 (uint8_t *)request_dma_obj.buffer + i, 3878 1, mode)) { 3879 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3880 "copy from user space failed")); 3881 return (DDI_FAILURE); 3882 } 3883 } 3884 } 3885 3886 if (response_xferlen) { 3887 /* means IOCTL requires DMA */ 3888 /* allocate the data transfer buffer */ 3889 response_dma_obj.size = response_xferlen; 3890 response_dma_obj.dma_attr = mrsas_generic_dma_attr; 3891 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3892 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3893 response_dma_obj.dma_attr.dma_attr_sgllen = 1; 3894 response_dma_obj.dma_attr.dma_attr_align = 1; 3895 3896 /* allocate kernel buffer for DMA */ 3897 if (mrsas_alloc_dma_obj(instance, &response_dma_obj, 3898 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3899 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3900 "could not allocate data transfer buffer.")); 3901 return (DDI_FAILURE); 3902 } 3903 (void) memset(response_dma_obj.buffer, 0, response_xferlen); 3904 3905 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3906 for (i = 0; i < response_xferlen; i++) { 3907 if (ddi_copyin((uint8_t *)response_ubuf + i, 3908 (uint8_t *)response_dma_obj.buffer + i, 3909 1, mode)) { 3910 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3911 "copy from user space failed")); 3912 return (DDI_FAILURE); 3913 } 3914 } 3915 } 3916 3917 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd); 3918 ddi_put8(acc_handle, &smp->cmd_status, 0); 3919 ddi_put8(acc_handle, &smp->connection_status, 0); 3920 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count); 3921 /* smp->context = ksmp->context; */ 3922 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout); 3923 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len); 3924 3925 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr, 3926 sizeof (uint64_t)); 3927 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr); 3928 3929 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64); 3930 3931 model = ddi_model_convert_from(mode & FMODELS); 3932 if (model == DDI_MODEL_ILP32) { 3933 con_log(CL_ANN1, (CE_NOTE, 3934 "issue_mfi_smp: DDI_MODEL_ILP32")); 3935 3936 sge32 = &smp->sgl[0].sge32[0]; 3937 ddi_put32(acc_handle, &sge32[0].length, response_xferlen); 3938 ddi_put32(acc_handle, &sge32[0].phys_addr, 3939 response_dma_obj.dma_cookie[0].dmac_address); 3940 ddi_put32(acc_handle, &sge32[1].length, request_xferlen); 3941 ddi_put32(acc_handle, &sge32[1].phys_addr, 3942 request_dma_obj.dma_cookie[0].dmac_address); 3943 } else { 3944 #ifdef _ILP32 3945 con_log(CL_ANN1, (CE_NOTE, 3946 "issue_mfi_smp: DDI_MODEL_ILP32")); 3947 sge32 = &smp->sgl[0].sge32[0]; 3948 ddi_put32(acc_handle, &sge32[0].length, response_xferlen); 3949 ddi_put32(acc_handle, &sge32[0].phys_addr, 3950 response_dma_obj.dma_cookie[0].dmac_address); 3951 ddi_put32(acc_handle, &sge32[1].length, request_xferlen); 3952 ddi_put32(acc_handle, &sge32[1].phys_addr, 3953 request_dma_obj.dma_cookie[0].dmac_address); 3954 #else 3955 con_log(CL_ANN1, (CE_NOTE, 3956 "issue_mfi_smp: DDI_MODEL_LP64")); 3957 sge64 = &smp->sgl[0].sge64[0]; 3958 ddi_put32(acc_handle, &sge64[0].length, response_xferlen); 3959 ddi_put64(acc_handle, &sge64[0].phys_addr, 3960 response_dma_obj.dma_cookie[0].dmac_address); 3961 ddi_put32(acc_handle, &sge64[1].length, request_xferlen); 3962 ddi_put64(acc_handle, &sge64[1].phys_addr, 3963 request_dma_obj.dma_cookie[0].dmac_address); 3964 #endif 3965 } 3966 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp : " 3967 "smp->response_xferlen = %d, smp->request_xferlen = %d " 3968 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length), 3969 ddi_get32(acc_handle, &sge32[1].length), 3970 ddi_get32(acc_handle, &smp->data_xfer_len))); 3971 3972 cmd->sync_cmd = MRSAS_TRUE; 3973 cmd->frame_count = 1; 3974 3975 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3976 con_log(CL_ANN, (CE_WARN, 3977 "issue_mfi_smp: fw_ioctl failed")); 3978 } else { 3979 con_log(CL_ANN1, (CE_NOTE, 3980 "issue_mfi_smp: copy to user space")); 3981 3982 if (request_xferlen) { 3983 for (i = 0; i < request_xferlen; i++) { 3984 if (ddi_copyout( 3985 (uint8_t *)request_dma_obj.buffer + 3986 i, (uint8_t *)request_ubuf + i, 3987 1, mode)) { 3988 con_log(CL_ANN, (CE_WARN, 3989 "issue_mfi_smp : copy to user space" 3990 " failed")); 3991 return (DDI_FAILURE); 3992 } 3993 } 3994 } 3995 3996 if (response_xferlen) { 3997 for (i = 0; i < response_xferlen; i++) { 3998 if (ddi_copyout( 3999 (uint8_t *)response_dma_obj.buffer 4000 + i, (uint8_t *)response_ubuf 4001 + i, 1, mode)) { 4002 con_log(CL_ANN, (CE_WARN, 4003 "issue_mfi_smp : copy to " 4004 "user space failed")); 4005 return (DDI_FAILURE); 4006 } 4007 } 4008 } 4009 } 4010 4011 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status); 4012 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d", 4013 ddi_get8(acc_handle, &smp->cmd_status))); 4014 DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status); 4015 4016 if (request_xferlen) { 4017 /* free kernel buffer */ 4018 if (mrsas_free_dma_obj(instance, request_dma_obj) != 4019 DDI_SUCCESS) 4020 return (DDI_FAILURE); 4021 } 4022 4023 if (response_xferlen) { 4024 /* free kernel buffer */ 4025 if (mrsas_free_dma_obj(instance, response_dma_obj) != 4026 DDI_SUCCESS) 4027 return (DDI_FAILURE); 4028 } 4029 4030 return (DDI_SUCCESS); 4031 } 4032 4033 /* 4034 * issue_mfi_stp 4035 */ 4036 static int 4037 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4038 struct mrsas_cmd *cmd, int mode) 4039 { 4040 void *fis_ubuf; 4041 void *data_ubuf; 4042 uint32_t fis_xferlen = 0; 4043 uint32_t data_xferlen = 0; 4044 uint_t model; 4045 dma_obj_t fis_dma_obj; 4046 dma_obj_t data_dma_obj; 4047 struct mrsas_stp_frame *kstp; 4048 struct mrsas_stp_frame *stp; 4049 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 4050 int i; 4051 4052 stp = &cmd->frame->stp; 4053 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0]; 4054 4055 model = ddi_model_convert_from(mode & FMODELS); 4056 if (model == DDI_MODEL_ILP32) { 4057 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4058 4059 fis_xferlen = kstp->sgl.sge32[0].length; 4060 data_xferlen = kstp->sgl.sge32[1].length; 4061 4062 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4063 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4064 } 4065 else 4066 { 4067 #ifdef _ILP32 4068 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4069 4070 fis_xferlen = kstp->sgl.sge32[0].length; 4071 data_xferlen = kstp->sgl.sge32[1].length; 4072 4073 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4074 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4075 #else 4076 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64")); 4077 4078 fis_xferlen = kstp->sgl.sge64[0].length; 4079 data_xferlen = kstp->sgl.sge64[1].length; 4080 4081 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr; 4082 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr; 4083 #endif 4084 } 4085 4086 4087 if (fis_xferlen) { 4088 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: " 4089 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen)); 4090 4091 /* means IOCTL requires DMA */ 4092 /* allocate the data transfer buffer */ 4093 fis_dma_obj.size = fis_xferlen; 4094 fis_dma_obj.dma_attr = mrsas_generic_dma_attr; 4095 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4096 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4097 fis_dma_obj.dma_attr.dma_attr_sgllen = 1; 4098 fis_dma_obj.dma_attr.dma_attr_align = 1; 4099 4100 /* allocate kernel buffer for DMA */ 4101 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj, 4102 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4103 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : " 4104 "could not allocate data transfer buffer.")); 4105 return (DDI_FAILURE); 4106 } 4107 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen); 4108 4109 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4110 for (i = 0; i < fis_xferlen; i++) { 4111 if (ddi_copyin((uint8_t *)fis_ubuf + i, 4112 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) { 4113 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4114 "copy from user space failed")); 4115 return (DDI_FAILURE); 4116 } 4117 } 4118 } 4119 4120 if (data_xferlen) { 4121 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p " 4122 "data_xferlen = %x", data_ubuf, data_xferlen)); 4123 4124 /* means IOCTL requires DMA */ 4125 /* allocate the data transfer buffer */ 4126 data_dma_obj.size = data_xferlen; 4127 data_dma_obj.dma_attr = mrsas_generic_dma_attr; 4128 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4129 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4130 data_dma_obj.dma_attr.dma_attr_sgllen = 1; 4131 data_dma_obj.dma_attr.dma_attr_align = 1; 4132 4133 /* allocate kernel buffer for DMA */ 4134 if (mrsas_alloc_dma_obj(instance, &data_dma_obj, 4135 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4136 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4137 "could not allocate data transfer buffer.")); 4138 return (DDI_FAILURE); 4139 } 4140 (void) memset(data_dma_obj.buffer, 0, data_xferlen); 4141 4142 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4143 for (i = 0; i < data_xferlen; i++) { 4144 if (ddi_copyin((uint8_t *)data_ubuf + i, 4145 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) { 4146 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4147 "copy from user space failed")); 4148 return (DDI_FAILURE); 4149 } 4150 } 4151 } 4152 4153 ddi_put8(acc_handle, &stp->cmd, kstp->cmd); 4154 ddi_put8(acc_handle, &stp->cmd_status, 0); 4155 ddi_put8(acc_handle, &stp->connection_status, 0); 4156 ddi_put8(acc_handle, &stp->target_id, kstp->target_id); 4157 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count); 4158 4159 ddi_put16(acc_handle, &stp->timeout, kstp->timeout); 4160 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len); 4161 4162 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10, 4163 DDI_DEV_AUTOINCR); 4164 4165 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64); 4166 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags); 4167 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen); 4168 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr, 4169 fis_dma_obj.dma_cookie[0].dmac_address); 4170 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen); 4171 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr, 4172 data_dma_obj.dma_cookie[0].dmac_address); 4173 4174 cmd->sync_cmd = MRSAS_TRUE; 4175 cmd->frame_count = 1; 4176 4177 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4178 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed")); 4179 } else { 4180 4181 if (fis_xferlen) { 4182 for (i = 0; i < fis_xferlen; i++) { 4183 if (ddi_copyout( 4184 (uint8_t *)fis_dma_obj.buffer + i, 4185 (uint8_t *)fis_ubuf + i, 1, mode)) { 4186 con_log(CL_ANN, (CE_WARN, 4187 "issue_mfi_stp : copy to " 4188 "user space failed")); 4189 return (DDI_FAILURE); 4190 } 4191 } 4192 } 4193 } 4194 if (data_xferlen) { 4195 for (i = 0; i < data_xferlen; i++) { 4196 if (ddi_copyout( 4197 (uint8_t *)data_dma_obj.buffer + i, 4198 (uint8_t *)data_ubuf + i, 1, mode)) { 4199 con_log(CL_ANN, (CE_WARN, 4200 "issue_mfi_stp : copy to" 4201 " user space failed")); 4202 return (DDI_FAILURE); 4203 } 4204 } 4205 } 4206 4207 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status); 4208 DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status); 4209 4210 if (fis_xferlen) { 4211 /* free kernel buffer */ 4212 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS) 4213 return (DDI_FAILURE); 4214 } 4215 4216 if (data_xferlen) { 4217 /* free kernel buffer */ 4218 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS) 4219 return (DDI_FAILURE); 4220 } 4221 4222 return (DDI_SUCCESS); 4223 } 4224 4225 /* 4226 * fill_up_drv_ver 4227 */ 4228 static void 4229 fill_up_drv_ver(struct mrsas_drv_ver *dv) 4230 { 4231 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver)); 4232 4233 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$")); 4234 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris")); 4235 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas")); 4236 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION)); 4237 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE, 4238 strlen(MRSAS_RELDATE)); 4239 } 4240 4241 /* 4242 * handle_drv_ioctl 4243 */ 4244 static int 4245 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4246 int mode) 4247 { 4248 int i; 4249 int rval = DDI_SUCCESS; 4250 int *props = NULL; 4251 void *ubuf; 4252 4253 uint8_t *pci_conf_buf; 4254 uint32_t xferlen; 4255 uint32_t num_props; 4256 uint_t model; 4257 struct mrsas_dcmd_frame *kdcmd; 4258 struct mrsas_drv_ver dv; 4259 struct mrsas_pci_information pi; 4260 4261 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; 4262 4263 model = ddi_model_convert_from(mode & FMODELS); 4264 if (model == DDI_MODEL_ILP32) { 4265 con_log(CL_ANN1, (CE_NOTE, 4266 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4267 4268 xferlen = kdcmd->sgl.sge32[0].length; 4269 4270 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4271 } else { 4272 #ifdef _ILP32 4273 con_log(CL_ANN1, (CE_NOTE, 4274 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4275 xferlen = kdcmd->sgl.sge32[0].length; 4276 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4277 #else 4278 con_log(CL_ANN1, (CE_NOTE, 4279 "handle_drv_ioctl: DDI_MODEL_LP64")); 4280 xferlen = kdcmd->sgl.sge64[0].length; 4281 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 4282 #endif 4283 } 4284 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4285 "dataBuf=%p size=%d bytes", ubuf, xferlen)); 4286 4287 switch (kdcmd->opcode) { 4288 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION: 4289 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4290 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION")); 4291 4292 fill_up_drv_ver(&dv); 4293 4294 if (ddi_copyout(&dv, ubuf, xferlen, mode)) { 4295 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4296 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : " 4297 "copy to user space failed")); 4298 kdcmd->cmd_status = 1; 4299 rval = 1; 4300 } else { 4301 kdcmd->cmd_status = 0; 4302 } 4303 break; 4304 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION: 4305 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4306 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON")); 4307 4308 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip, 4309 0, "reg", &props, &num_props)) { 4310 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4311 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : " 4312 "ddi_prop_look_int_array failed")); 4313 rval = DDI_FAILURE; 4314 } else { 4315 4316 pi.busNumber = (props[0] >> 16) & 0xFF; 4317 pi.deviceNumber = (props[0] >> 11) & 0x1f; 4318 pi.functionNumber = (props[0] >> 8) & 0x7; 4319 ddi_prop_free((void *)props); 4320 } 4321 4322 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo; 4323 4324 for (i = 0; i < (sizeof (struct mrsas_pci_information) - 4325 offsetof(struct mrsas_pci_information, pciHeaderInfo)); 4326 i++) { 4327 pci_conf_buf[i] = 4328 pci_config_get8(instance->pci_handle, i); 4329 } 4330 4331 if (ddi_copyout(&pi, ubuf, xferlen, mode)) { 4332 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4333 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : " 4334 "copy to user space failed")); 4335 kdcmd->cmd_status = 1; 4336 rval = 1; 4337 } else { 4338 kdcmd->cmd_status = 0; 4339 } 4340 break; 4341 default: 4342 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4343 "invalid driver specific IOCTL opcode = 0x%x", 4344 kdcmd->opcode)); 4345 kdcmd->cmd_status = 1; 4346 rval = DDI_FAILURE; 4347 break; 4348 } 4349 4350 return (rval); 4351 } 4352 4353 /* 4354 * handle_mfi_ioctl 4355 */ 4356 static int 4357 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4358 int mode) 4359 { 4360 int rval = DDI_SUCCESS; 4361 4362 struct mrsas_header *hdr; 4363 struct mrsas_cmd *cmd; 4364 4365 cmd = get_mfi_pkt(instance); 4366 4367 if (!cmd) { 4368 con_log(CL_ANN, (CE_WARN, "mr_sas: " 4369 "failed to get a cmd packet")); 4370 DTRACE_PROBE2(mfi_ioctl_err, uint16_t, 4371 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 4372 return (DDI_FAILURE); 4373 } 4374 4375 /* Clear the frame buffer and assign back the context id */ 4376 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 4377 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 4378 cmd->index); 4379 4380 hdr = (struct mrsas_header *)&ioctl->frame[0]; 4381 4382 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) { 4383 case MFI_CMD_OP_DCMD: 4384 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode); 4385 break; 4386 case MFI_CMD_OP_SMP: 4387 rval = issue_mfi_smp(instance, ioctl, cmd, mode); 4388 break; 4389 case MFI_CMD_OP_STP: 4390 rval = issue_mfi_stp(instance, ioctl, cmd, mode); 4391 break; 4392 case MFI_CMD_OP_LD_SCSI: 4393 case MFI_CMD_OP_PD_SCSI: 4394 rval = issue_mfi_pthru(instance, ioctl, cmd, mode); 4395 break; 4396 default: 4397 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: " 4398 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd)); 4399 rval = DDI_FAILURE; 4400 break; 4401 } 4402 4403 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) 4404 rval = DDI_FAILURE; 4405 4406 return_mfi_pkt(instance, cmd); 4407 4408 return (rval); 4409 } 4410 4411 /* 4412 * AEN 4413 */ 4414 static int 4415 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen) 4416 { 4417 int rval = 0; 4418 4419 rval = register_mfi_aen(instance, instance->aen_seq_num, 4420 aen->class_locale_word); 4421 4422 aen->cmd_status = (uint8_t)rval; 4423 4424 return (rval); 4425 } 4426 4427 static int 4428 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num, 4429 uint32_t class_locale_word) 4430 { 4431 int ret_val; 4432 4433 struct mrsas_cmd *cmd, *aen_cmd; 4434 struct mrsas_dcmd_frame *dcmd; 4435 union mrsas_evt_class_locale curr_aen; 4436 union mrsas_evt_class_locale prev_aen; 4437 4438 /* 4439 * If there an AEN pending already (aen_cmd), check if the 4440 * class_locale of that pending AEN is inclusive of the new 4441 * AEN request we currently have. If it is, then we don't have 4442 * to do anything. In other words, whichever events the current 4443 * AEN request is subscribing to, have already been subscribed 4444 * to. 4445 * 4446 * If the old_cmd is _not_ inclusive, then we have to abort 4447 * that command, form a class_locale that is superset of both 4448 * old and current and re-issue to the FW 4449 */ 4450 4451 curr_aen.word = LE_32(class_locale_word); 4452 curr_aen.members.locale = LE_16(curr_aen.members.locale); 4453 aen_cmd = instance->aen_cmd; 4454 if (aen_cmd) { 4455 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle, 4456 &aen_cmd->frame->dcmd.mbox.w[1]); 4457 prev_aen.word = LE_32(prev_aen.word); 4458 prev_aen.members.locale = LE_16(prev_aen.members.locale); 4459 /* 4460 * A class whose enum value is smaller is inclusive of all 4461 * higher values. If a PROGRESS (= -1) was previously 4462 * registered, then a new registration requests for higher 4463 * classes need not be sent to FW. They are automatically 4464 * included. 4465 * 4466 * Locale numbers don't have such hierarchy. They are bitmap 4467 * values 4468 */ 4469 if ((prev_aen.members.class <= curr_aen.members.class) && 4470 !((prev_aen.members.locale & curr_aen.members.locale) ^ 4471 curr_aen.members.locale)) { 4472 /* 4473 * Previously issued event registration includes 4474 * current request. Nothing to do. 4475 */ 4476 4477 return (0); 4478 } else { 4479 curr_aen.members.locale |= prev_aen.members.locale; 4480 4481 if (prev_aen.members.class < curr_aen.members.class) 4482 curr_aen.members.class = prev_aen.members.class; 4483 4484 ret_val = abort_aen_cmd(instance, aen_cmd); 4485 4486 if (ret_val) { 4487 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: " 4488 "failed to abort prevous AEN command")); 4489 4490 return (ret_val); 4491 } 4492 } 4493 } else { 4494 curr_aen.word = LE_32(class_locale_word); 4495 curr_aen.members.locale = LE_16(curr_aen.members.locale); 4496 } 4497 4498 cmd = get_mfi_pkt(instance); 4499 4500 if (!cmd) { 4501 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding, 4502 uint16_t, instance->max_fw_cmds); 4503 return (ENOMEM); 4504 } 4505 /* Clear the frame buffer and assign back the context id */ 4506 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 4507 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 4508 cmd->index); 4509 4510 dcmd = &cmd->frame->dcmd; 4511 4512 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */ 4513 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 4514 4515 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 4516 sizeof (struct mrsas_evt_detail)); 4517 4518 /* Prepare DCMD for aen registration */ 4519 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 4520 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0); 4521 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 4522 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 4523 MFI_FRAME_DIR_READ); 4524 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 4525 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 4526 sizeof (struct mrsas_evt_detail)); 4527 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 4528 MR_DCMD_CTRL_EVENT_WAIT); 4529 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num); 4530 curr_aen.members.locale = LE_16(curr_aen.members.locale); 4531 curr_aen.word = LE_32(curr_aen.word); 4532 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1], 4533 curr_aen.word); 4534 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 4535 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address); 4536 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 4537 sizeof (struct mrsas_evt_detail)); 4538 4539 instance->aen_seq_num = seq_num; 4540 4541 4542 /* 4543 * Store reference to the cmd used to register for AEN. When an 4544 * application wants us to register for AEN, we have to abort this 4545 * cmd and re-register with a new EVENT LOCALE supplied by that app 4546 */ 4547 instance->aen_cmd = cmd; 4548 4549 cmd->frame_count = 1; 4550 4551 /* Issue the aen registration frame */ 4552 /* atomic_add_16 (&instance->fw_outstanding, 1); */ 4553 instance->func_ptr->issue_cmd(cmd, instance); 4554 4555 return (0); 4556 } 4557 4558 static void 4559 display_scsi_inquiry(caddr_t scsi_inq) 4560 { 4561 #define MAX_SCSI_DEVICE_CODE 14 4562 int i; 4563 char inquiry_buf[256] = {0}; 4564 int len; 4565 const char *const scsi_device_types[] = { 4566 "Direct-Access ", 4567 "Sequential-Access", 4568 "Printer ", 4569 "Processor ", 4570 "WORM ", 4571 "CD-ROM ", 4572 "Scanner ", 4573 "Optical Device ", 4574 "Medium Changer ", 4575 "Communications ", 4576 "Unknown ", 4577 "Unknown ", 4578 "Unknown ", 4579 "Enclosure ", 4580 }; 4581 4582 len = 0; 4583 4584 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: "); 4585 for (i = 8; i < 16; i++) { 4586 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4587 scsi_inq[i]); 4588 } 4589 4590 len += snprintf(inquiry_buf + len, 265 - len, " Model: "); 4591 4592 for (i = 16; i < 32; i++) { 4593 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4594 scsi_inq[i]); 4595 } 4596 4597 len += snprintf(inquiry_buf + len, 265 - len, " Rev: "); 4598 4599 for (i = 32; i < 36; i++) { 4600 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4601 scsi_inq[i]); 4602 } 4603 4604 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4605 4606 4607 i = scsi_inq[0] & 0x1f; 4608 4609 4610 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ", 4611 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : 4612 "Unknown "); 4613 4614 4615 len += snprintf(inquiry_buf + len, 265 - len, 4616 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); 4617 4618 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) { 4619 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n"); 4620 } else { 4621 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4622 } 4623 4624 con_log(CL_ANN1, (CE_CONT, inquiry_buf)); 4625 } 4626 4627 static int 4628 read_fw_status_reg_ppc(struct mrsas_instance *instance) 4629 { 4630 return ((int)RD_OB_SCRATCH_PAD_0(instance)); 4631 } 4632 4633 static void 4634 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance) 4635 { 4636 atomic_add_16(&instance->fw_outstanding, 1); 4637 4638 /* Issue the command to the FW */ 4639 WR_IB_QPORT((cmd->frame_phys_addr) | 4640 (((cmd->frame_count - 1) << 1) | 1), instance); 4641 } 4642 4643 /* 4644 * issue_cmd_in_sync_mode 4645 */ 4646 static int 4647 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance, 4648 struct mrsas_cmd *cmd) 4649 { 4650 int i; 4651 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC); 4652 4653 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called")); 4654 4655 cmd->cmd_status = ENODATA; 4656 4657 WR_IB_QPORT((cmd->frame_phys_addr) | 4658 (((cmd->frame_count - 1) << 1) | 1), instance); 4659 4660 mutex_enter(&instance->int_cmd_mtx); 4661 4662 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 4663 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 4664 } 4665 4666 mutex_exit(&instance->int_cmd_mtx); 4667 4668 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done")); 4669 4670 if (i < (msecs -1)) { 4671 return (DDI_SUCCESS); 4672 } else { 4673 return (DDI_FAILURE); 4674 } 4675 } 4676 4677 /* 4678 * issue_cmd_in_poll_mode 4679 */ 4680 static int 4681 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance, 4682 struct mrsas_cmd *cmd) 4683 { 4684 int i; 4685 uint16_t flags; 4686 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC; 4687 struct mrsas_header *frame_hdr; 4688 4689 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called")); 4690 4691 frame_hdr = (struct mrsas_header *)cmd->frame; 4692 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status, 4693 MFI_CMD_STATUS_POLL_MODE); 4694 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags); 4695 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 4696 4697 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags); 4698 4699 /* issue the frame using inbound queue port */ 4700 WR_IB_QPORT((cmd->frame_phys_addr) | 4701 (((cmd->frame_count - 1) << 1) | 1), instance); 4702 4703 /* wait for cmd_status to change from 0xFF */ 4704 for (i = 0; i < msecs && ( 4705 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) 4706 == MFI_CMD_STATUS_POLL_MODE); i++) { 4707 drv_usecwait(MILLISEC); /* wait for 1000 usecs */ 4708 } 4709 4710 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) 4711 == MFI_CMD_STATUS_POLL_MODE) { 4712 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: " 4713 "cmd polling timed out")); 4714 return (DDI_FAILURE); 4715 } 4716 4717 return (DDI_SUCCESS); 4718 } 4719 4720 static void 4721 enable_intr_ppc(struct mrsas_instance *instance) 4722 { 4723 uint32_t mask; 4724 4725 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called")); 4726 4727 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */ 4728 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance); 4729 4730 /* WR_OB_INTR_MASK(~0x80000000, instance); */ 4731 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance); 4732 4733 /* dummy read to force PCI flush */ 4734 mask = RD_OB_INTR_MASK(instance); 4735 4736 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: " 4737 "outbound_intr_mask = 0x%x", mask)); 4738 } 4739 4740 static void 4741 disable_intr_ppc(struct mrsas_instance *instance) 4742 { 4743 uint32_t mask; 4744 4745 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called")); 4746 4747 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : " 4748 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance))); 4749 4750 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */ 4751 WR_OB_INTR_MASK(OB_INTR_MASK, instance); 4752 4753 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : " 4754 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance))); 4755 4756 /* dummy read to force PCI flush */ 4757 mask = RD_OB_INTR_MASK(instance); 4758 #ifdef lint 4759 mask = mask; 4760 #endif 4761 } 4762 4763 static int 4764 intr_ack_ppc(struct mrsas_instance *instance) 4765 { 4766 uint32_t status; 4767 int ret = DDI_INTR_CLAIMED; 4768 4769 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called")); 4770 4771 /* check if it is our interrupt */ 4772 status = RD_OB_INTR_STATUS(instance); 4773 4774 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status)); 4775 4776 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) { 4777 ret = DDI_INTR_UNCLAIMED; 4778 } 4779 4780 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 4781 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 4782 ret = DDI_INTR_UNCLAIMED; 4783 } 4784 4785 if (ret == DDI_INTR_UNCLAIMED) { 4786 return (ret); 4787 } 4788 /* clear the interrupt by writing back the same value */ 4789 WR_OB_DOORBELL_CLEAR(status, instance); 4790 4791 /* dummy READ */ 4792 status = RD_OB_INTR_STATUS(instance); 4793 4794 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared")); 4795 4796 return (ret); 4797 } 4798 4799 static int 4800 mrsas_common_check(struct mrsas_instance *instance, 4801 struct mrsas_cmd *cmd) 4802 { 4803 int ret = DDI_SUCCESS; 4804 4805 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 4806 DDI_SUCCESS) { 4807 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4808 if (cmd->pkt != NULL) { 4809 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4810 cmd->pkt->pkt_statistics = 0; 4811 } 4812 ret = DDI_FAILURE; 4813 } 4814 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 4815 != DDI_SUCCESS) { 4816 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4817 if (cmd->pkt != NULL) { 4818 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4819 cmd->pkt->pkt_statistics = 0; 4820 } 4821 ret = DDI_FAILURE; 4822 } 4823 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) != 4824 DDI_SUCCESS) { 4825 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4826 if (cmd->pkt != NULL) { 4827 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4828 cmd->pkt->pkt_statistics = 0; 4829 } 4830 ret = DDI_FAILURE; 4831 } 4832 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 4833 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4834 4835 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0); 4836 4837 if (cmd->pkt != NULL) { 4838 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4839 cmd->pkt->pkt_statistics = 0; 4840 } 4841 ret = DDI_FAILURE; 4842 } 4843 4844 return (ret); 4845 } 4846 4847 /*ARGSUSED*/ 4848 static int 4849 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 4850 { 4851 /* 4852 * as the driver can always deal with an error in any dma or 4853 * access handle, we can just return the fme_status value. 4854 */ 4855 pci_ereport_post(dip, err, NULL); 4856 return (err->fme_status); 4857 } 4858 4859 static void 4860 mrsas_fm_init(struct mrsas_instance *instance) 4861 { 4862 /* Need to change iblock to priority for new MSI intr */ 4863 ddi_iblock_cookie_t fm_ibc; 4864 4865 /* Only register with IO Fault Services if we have some capability */ 4866 if (instance->fm_capabilities) { 4867 /* Adjust access and dma attributes for FMA */ 4868 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC; 4869 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 4870 4871 /* 4872 * Register capabilities with IO Fault Services. 4873 * fm_capabilities will be updated to indicate 4874 * capabilities actually supported (not requested.) 4875 */ 4876 4877 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc); 4878 4879 /* 4880 * Initialize pci ereport capabilities if ereport 4881 * capable (should always be.) 4882 */ 4883 4884 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 4885 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4886 pci_ereport_setup(instance->dip); 4887 } 4888 4889 /* 4890 * Register error callback if error callback capable. 4891 */ 4892 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4893 ddi_fm_handler_register(instance->dip, 4894 mrsas_fm_error_cb, (void*) instance); 4895 } 4896 } else { 4897 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 4898 mrsas_generic_dma_attr.dma_attr_flags = 0; 4899 } 4900 } 4901 4902 static void 4903 mrsas_fm_fini(struct mrsas_instance *instance) 4904 { 4905 /* Only unregister FMA capabilities if registered */ 4906 if (instance->fm_capabilities) { 4907 /* 4908 * Un-register error callback if error callback capable. 4909 */ 4910 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4911 ddi_fm_handler_unregister(instance->dip); 4912 } 4913 4914 /* 4915 * Release any resources allocated by pci_ereport_setup() 4916 */ 4917 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 4918 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4919 pci_ereport_teardown(instance->dip); 4920 } 4921 4922 /* Unregister from IO Fault Services */ 4923 ddi_fm_fini(instance->dip); 4924 4925 /* Adjust access and dma attributes for FMA */ 4926 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 4927 mrsas_generic_dma_attr.dma_attr_flags = 0; 4928 } 4929 } 4930 4931 int 4932 mrsas_check_acc_handle(ddi_acc_handle_t handle) 4933 { 4934 ddi_fm_error_t de; 4935 4936 if (handle == NULL) { 4937 return (DDI_FAILURE); 4938 } 4939 4940 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 4941 4942 return (de.fme_status); 4943 } 4944 4945 int 4946 mrsas_check_dma_handle(ddi_dma_handle_t handle) 4947 { 4948 ddi_fm_error_t de; 4949 4950 if (handle == NULL) { 4951 return (DDI_FAILURE); 4952 } 4953 4954 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 4955 4956 return (de.fme_status); 4957 } 4958 4959 void 4960 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail) 4961 { 4962 uint64_t ena; 4963 char buf[FM_MAX_CLASS]; 4964 4965 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 4966 ena = fm_ena_generate(0, FM_ENA_FMT1); 4967 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) { 4968 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP, 4969 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 4970 } 4971 } 4972 4973 static int 4974 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type) 4975 { 4976 4977 dev_info_t *dip = instance->dip; 4978 int avail, actual, count; 4979 int i, flag, ret; 4980 4981 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: intr_type = %x", 4982 intr_type)); 4983 4984 /* Get number of interrupts */ 4985 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 4986 if ((ret != DDI_SUCCESS) || (count == 0)) { 4987 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:" 4988 "ret %d count %d", ret, count)); 4989 4990 return (DDI_FAILURE); 4991 } 4992 4993 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: count = %d ", count)); 4994 4995 /* Get number of available interrupts */ 4996 ret = ddi_intr_get_navail(dip, intr_type, &avail); 4997 if ((ret != DDI_SUCCESS) || (avail == 0)) { 4998 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:" 4999 "ret %d avail %d", ret, avail)); 5000 5001 return (DDI_FAILURE); 5002 } 5003 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: avail = %d ", avail)); 5004 5005 /* Only one interrupt routine. So limit the count to 1 */ 5006 if (count > 1) { 5007 count = 1; 5008 } 5009 5010 /* 5011 * Allocate an array of interrupt handlers. Currently we support 5012 * only one interrupt. The framework can be extended later. 5013 */ 5014 instance->intr_size = count * sizeof (ddi_intr_handle_t); 5015 instance->intr_htable = kmem_zalloc(instance->intr_size, KM_SLEEP); 5016 ASSERT(instance->intr_htable); 5017 5018 flag = ((intr_type == DDI_INTR_TYPE_MSI) || (intr_type == 5019 DDI_INTR_TYPE_MSIX)) ? DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL; 5020 5021 /* Allocate interrupt */ 5022 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0, 5023 count, &actual, flag); 5024 5025 if ((ret != DDI_SUCCESS) || (actual == 0)) { 5026 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 5027 "avail = %d", avail)); 5028 kmem_free(instance->intr_htable, instance->intr_size); 5029 return (DDI_FAILURE); 5030 } 5031 if (actual < count) { 5032 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 5033 "Requested = %d Received = %d", count, actual)); 5034 } 5035 instance->intr_cnt = actual; 5036 5037 /* 5038 * Get the priority of the interrupt allocated. 5039 */ 5040 if ((ret = ddi_intr_get_pri(instance->intr_htable[0], 5041 &instance->intr_pri)) != DDI_SUCCESS) { 5042 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 5043 "get priority call failed")); 5044 5045 for (i = 0; i < actual; i++) { 5046 (void) ddi_intr_free(instance->intr_htable[i]); 5047 } 5048 kmem_free(instance->intr_htable, instance->intr_size); 5049 return (DDI_FAILURE); 5050 } 5051 5052 /* 5053 * Test for high level mutex. we don't support them. 5054 */ 5055 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) { 5056 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 5057 "High level interrupts not supported.")); 5058 5059 for (i = 0; i < actual; i++) { 5060 (void) ddi_intr_free(instance->intr_htable[i]); 5061 } 5062 kmem_free(instance->intr_htable, instance->intr_size); 5063 return (DDI_FAILURE); 5064 } 5065 5066 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ", 5067 instance->intr_pri)); 5068 5069 /* Call ddi_intr_add_handler() */ 5070 for (i = 0; i < actual; i++) { 5071 ret = ddi_intr_add_handler(instance->intr_htable[i], 5072 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance, 5073 (caddr_t)(uintptr_t)i); 5074 5075 if (ret != DDI_SUCCESS) { 5076 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:" 5077 "failed %d", ret)); 5078 5079 for (i = 0; i < actual; i++) { 5080 (void) ddi_intr_free(instance->intr_htable[i]); 5081 } 5082 kmem_free(instance->intr_htable, instance->intr_size); 5083 return (DDI_FAILURE); 5084 } 5085 5086 } 5087 5088 con_log(CL_DLEVEL1, (CE_WARN, " ddi_intr_add_handler done")); 5089 5090 if ((ret = ddi_intr_get_cap(instance->intr_htable[0], 5091 &instance->intr_cap)) != DDI_SUCCESS) { 5092 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d", 5093 ret)); 5094 5095 /* Free already allocated intr */ 5096 for (i = 0; i < actual; i++) { 5097 (void) ddi_intr_remove_handler( 5098 instance->intr_htable[i]); 5099 (void) ddi_intr_free(instance->intr_htable[i]); 5100 } 5101 kmem_free(instance->intr_htable, instance->intr_size); 5102 return (DDI_FAILURE); 5103 } 5104 5105 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { 5106 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable")); 5107 5108 (void) ddi_intr_block_enable(instance->intr_htable, 5109 instance->intr_cnt); 5110 } else { 5111 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable")); 5112 5113 for (i = 0; i < instance->intr_cnt; i++) { 5114 (void) ddi_intr_enable(instance->intr_htable[i]); 5115 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns " 5116 "%d", i)); 5117 } 5118 } 5119 5120 return (DDI_SUCCESS); 5121 5122 } 5123 5124 5125 static void 5126 mrsas_rem_intrs(struct mrsas_instance *instance) 5127 { 5128 int i; 5129 5130 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called")); 5131 5132 /* Disable all interrupts first */ 5133 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { 5134 (void) ddi_intr_block_disable(instance->intr_htable, 5135 instance->intr_cnt); 5136 } else { 5137 for (i = 0; i < instance->intr_cnt; i++) { 5138 (void) ddi_intr_disable(instance->intr_htable[i]); 5139 } 5140 } 5141 5142 /* Remove all the handlers */ 5143 5144 for (i = 0; i < instance->intr_cnt; i++) { 5145 (void) ddi_intr_remove_handler(instance->intr_htable[i]); 5146 (void) ddi_intr_free(instance->intr_htable[i]); 5147 } 5148 5149 kmem_free(instance->intr_htable, instance->intr_size); 5150 } 5151 5152 static int 5153 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags, 5154 ddi_bus_config_op_t op, void *arg, dev_info_t **childp) 5155 { 5156 struct mrsas_instance *instance; 5157 int config; 5158 int rval; 5159 5160 char *ptr = NULL; 5161 int tgt, lun; 5162 5163 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op)); 5164 5165 if ((instance = ddi_get_soft_state(mrsas_state, 5166 ddi_get_instance(parent))) == NULL) { 5167 return (NDI_FAILURE); 5168 } 5169 5170 /* Hold nexus during bus_config */ 5171 ndi_devi_enter(parent, &config); 5172 switch (op) { 5173 case BUS_CONFIG_ONE: { 5174 5175 /* parse wwid/target name out of name given */ 5176 if ((ptr = strchr((char *)arg, '@')) == NULL) { 5177 rval = NDI_FAILURE; 5178 break; 5179 } 5180 ptr++; 5181 5182 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) { 5183 rval = NDI_FAILURE; 5184 break; 5185 } 5186 5187 if (lun == 0) { 5188 rval = mrsas_config_ld(instance, tgt, lun, childp); 5189 } else { 5190 rval = NDI_FAILURE; 5191 } 5192 5193 break; 5194 } 5195 case BUS_CONFIG_DRIVER: 5196 case BUS_CONFIG_ALL: { 5197 5198 rval = mrsas_config_all_devices(instance); 5199 5200 rval = NDI_SUCCESS; 5201 break; 5202 } 5203 } 5204 5205 if (rval == NDI_SUCCESS) { 5206 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0); 5207 5208 } 5209 ndi_devi_exit(parent, config); 5210 5211 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x", 5212 rval)); 5213 return (rval); 5214 } 5215 5216 static int 5217 mrsas_config_all_devices(struct mrsas_instance *instance) 5218 { 5219 int rval, tgt; 5220 5221 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { 5222 (void) mrsas_config_ld(instance, tgt, 0, NULL); 5223 5224 } 5225 5226 rval = NDI_SUCCESS; 5227 return (rval); 5228 } 5229 5230 static int 5231 mrsas_parse_devname(char *devnm, int *tgt, int *lun) 5232 { 5233 char devbuf[SCSI_MAXNAMELEN]; 5234 char *addr; 5235 char *p, *tp, *lp; 5236 long num; 5237 5238 /* Parse dev name and address */ 5239 (void) strcpy(devbuf, devnm); 5240 addr = ""; 5241 for (p = devbuf; *p != '\0'; p++) { 5242 if (*p == '@') { 5243 addr = p + 1; 5244 *p = '\0'; 5245 } else if (*p == ':') { 5246 *p = '\0'; 5247 break; 5248 } 5249 } 5250 5251 /* Parse target and lun */ 5252 for (p = tp = addr, lp = NULL; *p != '\0'; p++) { 5253 if (*p == ',') { 5254 lp = p + 1; 5255 *p = '\0'; 5256 break; 5257 } 5258 } 5259 if (tgt && tp) { 5260 if (ddi_strtol(tp, NULL, 0x10, &num)) { 5261 return (DDI_FAILURE); /* Can declare this as constant */ 5262 } 5263 *tgt = (int)num; 5264 } 5265 if (lun && lp) { 5266 if (ddi_strtol(lp, NULL, 0x10, &num)) { 5267 return (DDI_FAILURE); 5268 } 5269 *lun = (int)num; 5270 } 5271 return (DDI_SUCCESS); /* Success case */ 5272 } 5273 5274 static int 5275 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt, 5276 uint8_t lun, dev_info_t **ldip) 5277 { 5278 struct scsi_device *sd; 5279 dev_info_t *child; 5280 int rval; 5281 5282 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d", 5283 tgt, lun)); 5284 5285 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) { 5286 if (ldip) { 5287 *ldip = child; 5288 } 5289 con_log(CL_ANN1, (CE_NOTE, 5290 "mrsas_config_ld: Child = %p found t = %d l = %d", 5291 (void *)child, tgt, lun)); 5292 return (NDI_SUCCESS); 5293 } 5294 5295 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP); 5296 sd->sd_address.a_hba_tran = instance->tran; 5297 sd->sd_address.a_target = (uint16_t)tgt; 5298 sd->sd_address.a_lun = (uint8_t)lun; 5299 5300 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) 5301 rval = mrsas_config_scsi_device(instance, sd, ldip); 5302 else 5303 rval = NDI_FAILURE; 5304 5305 /* sd_unprobe is blank now. Free buffer manually */ 5306 if (sd->sd_inq) { 5307 kmem_free(sd->sd_inq, SUN_INQSIZE); 5308 sd->sd_inq = (struct scsi_inquiry *)NULL; 5309 } 5310 5311 kmem_free(sd, sizeof (struct scsi_device)); 5312 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_ld: return rval = %d", 5313 rval)); 5314 return (rval); 5315 } 5316 5317 static int 5318 mrsas_config_scsi_device(struct mrsas_instance *instance, 5319 struct scsi_device *sd, dev_info_t **dipp) 5320 { 5321 char *nodename = NULL; 5322 char **compatible = NULL; 5323 int ncompatible = 0; 5324 char *childname; 5325 dev_info_t *ldip = NULL; 5326 int tgt = sd->sd_address.a_target; 5327 int lun = sd->sd_address.a_lun; 5328 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 5329 int rval; 5330 5331 con_log(CL_ANN1, (CE_WARN, "mr_sas: scsi_device t%dL%d", tgt, lun)); 5332 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, 5333 NULL, &nodename, &compatible, &ncompatible); 5334 5335 if (nodename == NULL) { 5336 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver " 5337 "for t%dL%d", tgt, lun)); 5338 rval = NDI_FAILURE; 5339 goto finish; 5340 } 5341 5342 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename; 5343 con_log(CL_ANN1, (CE_WARN, 5344 "mr_sas: Childname = %2s nodename = %s", childname, nodename)); 5345 5346 /* Create a dev node */ 5347 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip); 5348 con_log(CL_ANN1, (CE_WARN, 5349 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval)); 5350 if (rval == NDI_SUCCESS) { 5351 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) != 5352 DDI_PROP_SUCCESS) { 5353 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 5354 "property for t%dl%d target", tgt, lun)); 5355 rval = NDI_FAILURE; 5356 goto finish; 5357 } 5358 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) != 5359 DDI_PROP_SUCCESS) { 5360 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 5361 "property for t%dl%d lun", tgt, lun)); 5362 rval = NDI_FAILURE; 5363 goto finish; 5364 } 5365 5366 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip, 5367 "compatible", compatible, ncompatible) != 5368 DDI_PROP_SUCCESS) { 5369 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 5370 "property for t%dl%d compatible", tgt, lun)); 5371 rval = NDI_FAILURE; 5372 goto finish; 5373 } 5374 5375 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH); 5376 if (rval != NDI_SUCCESS) { 5377 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online " 5378 "t%dl%d", tgt, lun)); 5379 ndi_prop_remove_all(ldip); 5380 (void) ndi_devi_free(ldip); 5381 } else { 5382 con_log(CL_ANN1, (CE_WARN, "mr_sas: online Done :" 5383 "0 t%dl%d", tgt, lun)); 5384 } 5385 5386 } 5387 finish: 5388 if (dipp) { 5389 *dipp = ldip; 5390 } 5391 5392 con_log(CL_DLEVEL1, (CE_WARN, 5393 "mr_sas: config_scsi_device rval = %d t%dL%d", 5394 rval, tgt, lun)); 5395 scsi_hba_nodename_compatible_free(nodename, compatible); 5396 return (rval); 5397 } 5398 5399 /*ARGSUSED*/ 5400 static int 5401 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event, 5402 uint64_t wwn) 5403 { 5404 struct mrsas_eventinfo *mrevt = NULL; 5405 5406 con_log(CL_ANN1, (CE_NOTE, 5407 "mrsas_service_evt called for t%dl%d event = %d", 5408 tgt, lun, event)); 5409 5410 if ((instance->taskq == NULL) || (mrevt = 5411 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) { 5412 return (ENOMEM); 5413 } 5414 5415 mrevt->instance = instance; 5416 mrevt->tgt = tgt; 5417 mrevt->lun = lun; 5418 mrevt->event = event; 5419 5420 if ((ddi_taskq_dispatch(instance->taskq, 5421 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) != 5422 DDI_SUCCESS) { 5423 con_log(CL_ANN1, (CE_NOTE, 5424 "mr_sas: Event task failed for t%dl%d event = %d", 5425 tgt, lun, event)); 5426 kmem_free(mrevt, sizeof (struct mrsas_eventinfo)); 5427 return (DDI_FAILURE); 5428 } 5429 DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event); 5430 return (DDI_SUCCESS); 5431 } 5432 5433 static void 5434 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt) 5435 { 5436 struct mrsas_instance *instance = mrevt->instance; 5437 dev_info_t *dip, *pdip; 5438 int circ1 = 0; 5439 char *devname; 5440 5441 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for" 5442 " tgt %d lun %d event %d", 5443 mrevt->tgt, mrevt->lun, mrevt->event)); 5444 5445 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) { 5446 dip = instance->mr_ld_list[mrevt->tgt].dip; 5447 } else { 5448 return; 5449 } 5450 5451 ndi_devi_enter(instance->dip, &circ1); 5452 switch (mrevt->event) { 5453 case MRSAS_EVT_CONFIG_TGT: 5454 if (dip == NULL) { 5455 5456 if (mrevt->lun == 0) { 5457 (void) mrsas_config_ld(instance, mrevt->tgt, 5458 0, NULL); 5459 } 5460 con_log(CL_ANN1, (CE_NOTE, 5461 "mr_sas: EVT_CONFIG_TGT called:" 5462 " for tgt %d lun %d event %d", 5463 mrevt->tgt, mrevt->lun, mrevt->event)); 5464 5465 } else { 5466 con_log(CL_ANN1, (CE_NOTE, 5467 "mr_sas: EVT_CONFIG_TGT dip != NULL:" 5468 " for tgt %d lun %d event %d", 5469 mrevt->tgt, mrevt->lun, mrevt->event)); 5470 } 5471 break; 5472 case MRSAS_EVT_UNCONFIG_TGT: 5473 if (dip) { 5474 if (i_ddi_devi_attached(dip)) { 5475 5476 pdip = ddi_get_parent(dip); 5477 5478 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP); 5479 (void) ddi_deviname(dip, devname); 5480 5481 (void) devfs_clean(pdip, devname + 1, 5482 DV_CLEAN_FORCE); 5483 kmem_free(devname, MAXNAMELEN + 1); 5484 } 5485 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE); 5486 con_log(CL_ANN1, (CE_NOTE, 5487 "mr_sas: EVT_UNCONFIG_TGT called:" 5488 " for tgt %d lun %d event %d", 5489 mrevt->tgt, mrevt->lun, mrevt->event)); 5490 } else { 5491 con_log(CL_ANN1, (CE_NOTE, 5492 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:" 5493 " for tgt %d lun %d event %d", 5494 mrevt->tgt, mrevt->lun, mrevt->event)); 5495 } 5496 break; 5497 } 5498 kmem_free(mrevt, sizeof (struct mrsas_eventinfo)); 5499 ndi_devi_exit(instance->dip, circ1); 5500 } 5501 5502 static int 5503 mrsas_mode_sense_build(struct scsi_pkt *pkt) 5504 { 5505 union scsi_cdb *cdbp; 5506 uint16_t page_code; 5507 struct scsa_cmd *acmd; 5508 struct buf *bp; 5509 struct mode_header *modehdrp; 5510 5511 cdbp = (void *)pkt->pkt_cdbp; 5512 page_code = cdbp->cdb_un.sg.scsi[0]; 5513 acmd = PKT2CMD(pkt); 5514 bp = acmd->cmd_buf; 5515 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) { 5516 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command")); 5517 /* ADD pkt statistics as Command failed. */ 5518 return (NULL); 5519 } 5520 5521 bp_mapin(bp); 5522 bzero(bp->b_un.b_addr, bp->b_bcount); 5523 5524 switch (page_code) { 5525 case 0x3: { 5526 struct mode_format *page3p = NULL; 5527 modehdrp = (struct mode_header *)(bp->b_un.b_addr); 5528 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH; 5529 5530 page3p = (void *)((caddr_t)modehdrp + 5531 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 5532 page3p->mode_page.code = 0x3; 5533 page3p->mode_page.length = 5534 (uchar_t)(sizeof (struct mode_format)); 5535 page3p->data_bytes_sect = 512; 5536 page3p->sect_track = 63; 5537 break; 5538 } 5539 case 0x4: { 5540 struct mode_geometry *page4p = NULL; 5541 modehdrp = (struct mode_header *)(bp->b_un.b_addr); 5542 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH; 5543 5544 page4p = (void *)((caddr_t)modehdrp + 5545 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 5546 page4p->mode_page.code = 0x4; 5547 page4p->mode_page.length = 5548 (uchar_t)(sizeof (struct mode_geometry)); 5549 page4p->heads = 255; 5550 page4p->rpm = 10000; 5551 break; 5552 } 5553 default: 5554 break; 5555 } 5556 return (NULL); 5557 } 5558