1 /* 2 * mr_sas.c: source for mr_sas driver 3 * 4 * MegaRAID device driver for SAS2.0 controllers 5 * Copyright (c) 2008-2009, LSI Logic Corporation. 6 * All rights reserved. 7 * 8 * Version: 9 * Author: 10 * Arun Chandrashekhar 11 * Manju R 12 * Rajesh Prabhakaran 13 * Seokmann Ju 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions are met: 17 * 18 * 1. Redistributions of source code must retain the above copyright notice, 19 * this list of conditions and the following disclaimer. 20 * 21 * 2. Redistributions in binary form must reproduce the above copyright notice, 22 * this list of conditions and the following disclaimer in the documentation 23 * and/or other materials provided with the distribution. 24 * 25 * 3. Neither the name of the author nor the names of its contributors may be 26 * used to endorse or promote products derived from this software without 27 * specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 32 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 33 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 35 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 36 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 37 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 38 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 39 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 40 * DAMAGE. 41 */ 42 43 /* 44 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 45 * Use is subject to license terms. 46 */ 47 48 #include <sys/types.h> 49 #include <sys/param.h> 50 #include <sys/file.h> 51 #include <sys/errno.h> 52 #include <sys/open.h> 53 #include <sys/cred.h> 54 #include <sys/modctl.h> 55 #include <sys/conf.h> 56 #include <sys/devops.h> 57 #include <sys/cmn_err.h> 58 #include <sys/kmem.h> 59 #include <sys/stat.h> 60 #include <sys/mkdev.h> 61 #include <sys/pci.h> 62 #include <sys/scsi/scsi.h> 63 #include <sys/ddi.h> 64 #include <sys/sunddi.h> 65 #include <sys/atomic.h> 66 #include <sys/signal.h> 67 #include <sys/byteorder.h> 68 #include <sys/sdt.h> 69 #include <sys/fs/dv_node.h> /* devfs_clean */ 70 71 #include "mr_sas.h" 72 73 /* 74 * FMA header files 75 */ 76 #include <sys/ddifm.h> 77 #include <sys/fm/protocol.h> 78 #include <sys/fm/util.h> 79 #include <sys/fm/io/ddi.h> 80 81 /* 82 * Local static data 83 */ 84 static void *mrsas_state = NULL; 85 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE; 86 static volatile int debug_level_g = CL_NONE; 87 static volatile int msi_enable = 1; 88 89 #pragma weak scsi_hba_open 90 #pragma weak scsi_hba_close 91 #pragma weak scsi_hba_ioctl 92 93 static ddi_dma_attr_t mrsas_generic_dma_attr = { 94 DMA_ATTR_V0, /* dma_attr_version */ 95 0, /* low DMA address range */ 96 0xFFFFFFFFU, /* high DMA address range */ 97 0xFFFFFFFFU, /* DMA counter register */ 98 8, /* DMA address alignment */ 99 0x07, /* DMA burstsizes */ 100 1, /* min DMA size */ 101 0xFFFFFFFFU, /* max DMA size */ 102 0xFFFFFFFFU, /* segment boundary */ 103 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */ 104 512, /* granularity of device */ 105 0 /* bus specific DMA flags */ 106 }; 107 108 int32_t mrsas_max_cap_maxxfer = 0x1000000; 109 110 /* 111 * cb_ops contains base level routines 112 */ 113 static struct cb_ops mrsas_cb_ops = { 114 mrsas_open, /* open */ 115 mrsas_close, /* close */ 116 nodev, /* strategy */ 117 nodev, /* print */ 118 nodev, /* dump */ 119 nodev, /* read */ 120 nodev, /* write */ 121 mrsas_ioctl, /* ioctl */ 122 nodev, /* devmap */ 123 nodev, /* mmap */ 124 nodev, /* segmap */ 125 nochpoll, /* poll */ 126 nodev, /* cb_prop_op */ 127 0, /* streamtab */ 128 D_NEW | D_HOTPLUG, /* cb_flag */ 129 CB_REV, /* cb_rev */ 130 nodev, /* cb_aread */ 131 nodev /* cb_awrite */ 132 }; 133 134 /* 135 * dev_ops contains configuration routines 136 */ 137 static struct dev_ops mrsas_ops = { 138 DEVO_REV, /* rev, */ 139 0, /* refcnt */ 140 mrsas_getinfo, /* getinfo */ 141 nulldev, /* identify */ 142 nulldev, /* probe */ 143 mrsas_attach, /* attach */ 144 mrsas_detach, /* detach */ 145 mrsas_reset, /* reset */ 146 &mrsas_cb_ops, /* char/block ops */ 147 NULL, /* bus ops */ 148 NULL, /* power */ 149 ddi_quiesce_not_supported, /* quiesce */ 150 }; 151 152 char _depends_on[] = "misc/scsi"; 153 154 static struct modldrv modldrv = { 155 &mod_driverops, /* module type - driver */ 156 MRSAS_VERSION, 157 &mrsas_ops, /* driver ops */ 158 }; 159 160 static struct modlinkage modlinkage = { 161 MODREV_1, /* ml_rev - must be MODREV_1 */ 162 &modldrv, /* ml_linkage */ 163 NULL /* end of driver linkage */ 164 }; 165 166 static struct ddi_device_acc_attr endian_attr = { 167 DDI_DEVICE_ATTR_V0, 168 DDI_STRUCTURE_LE_ACC, 169 DDI_STRICTORDER_ACC 170 }; 171 172 173 /* 174 * ************************************************************************** * 175 * * 176 * common entry points - for loadable kernel modules * 177 * * 178 * ************************************************************************** * 179 */ 180 181 int 182 _init(void) 183 { 184 int ret; 185 186 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 187 188 ret = ddi_soft_state_init(&mrsas_state, 189 sizeof (struct mrsas_instance), 0); 190 191 if (ret != DDI_SUCCESS) { 192 con_log(CL_ANN, (CE_WARN, "mr_sas: could not init state")); 193 return (ret); 194 } 195 196 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) { 197 con_log(CL_ANN, (CE_WARN, "mr_sas: could not init scsi hba")); 198 ddi_soft_state_fini(&mrsas_state); 199 return (ret); 200 } 201 202 ret = mod_install(&modlinkage); 203 204 if (ret != DDI_SUCCESS) { 205 con_log(CL_ANN, (CE_WARN, "mr_sas: mod_install failed")); 206 scsi_hba_fini(&modlinkage); 207 ddi_soft_state_fini(&mrsas_state); 208 } 209 210 return (ret); 211 } 212 213 int 214 _info(struct modinfo *modinfop) 215 { 216 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 217 218 return (mod_info(&modlinkage, modinfop)); 219 } 220 221 int 222 _fini(void) 223 { 224 int ret; 225 226 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 227 228 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) 229 return (ret); 230 231 scsi_hba_fini(&modlinkage); 232 233 ddi_soft_state_fini(&mrsas_state); 234 235 return (ret); 236 } 237 238 239 /* 240 * ************************************************************************** * 241 * * 242 * common entry points - for autoconfiguration * 243 * * 244 * ************************************************************************** * 245 */ 246 247 static int 248 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 249 { 250 int instance_no; 251 int nregs; 252 uint8_t added_isr_f = 0; 253 uint8_t added_soft_isr_f = 0; 254 uint8_t create_devctl_node_f = 0; 255 uint8_t create_scsi_node_f = 0; 256 uint8_t create_ioc_node_f = 0; 257 uint8_t tran_alloc_f = 0; 258 uint8_t irq; 259 uint16_t vendor_id; 260 uint16_t device_id; 261 uint16_t subsysvid; 262 uint16_t subsysid; 263 uint16_t command; 264 off_t reglength = 0; 265 int intr_types = 0; 266 char *data; 267 268 scsi_hba_tran_t *tran; 269 ddi_dma_attr_t tran_dma_attr; 270 struct mrsas_instance *instance; 271 272 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 273 274 /* CONSTCOND */ 275 ASSERT(NO_COMPETING_THREADS); 276 277 instance_no = ddi_get_instance(dip); 278 279 /* 280 * check to see whether this device is in a DMA-capable slot. 281 */ 282 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 283 con_log(CL_ANN, (CE_WARN, 284 "mr_sas%d: Device in slave-only slot, unused", 285 instance_no)); 286 return (DDI_FAILURE); 287 } 288 289 switch (cmd) { 290 case DDI_ATTACH: 291 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: DDI_ATTACH")); 292 /* allocate the soft state for the instance */ 293 if (ddi_soft_state_zalloc(mrsas_state, instance_no) 294 != DDI_SUCCESS) { 295 con_log(CL_ANN, (CE_WARN, 296 "mr_sas%d: Failed to allocate soft state", 297 instance_no)); 298 299 return (DDI_FAILURE); 300 } 301 302 instance = (struct mrsas_instance *)ddi_get_soft_state 303 (mrsas_state, instance_no); 304 305 if (instance == NULL) { 306 con_log(CL_ANN, (CE_WARN, 307 "mr_sas%d: Bad soft state", instance_no)); 308 309 ddi_soft_state_free(mrsas_state, instance_no); 310 311 return (DDI_FAILURE); 312 } 313 314 bzero((caddr_t)instance, 315 sizeof (struct mrsas_instance)); 316 317 instance->func_ptr = kmem_zalloc( 318 sizeof (struct mrsas_func_ptr), KM_SLEEP); 319 ASSERT(instance->func_ptr); 320 321 /* Setup the PCI configuration space handles */ 322 if (pci_config_setup(dip, &instance->pci_handle) != 323 DDI_SUCCESS) { 324 con_log(CL_ANN, (CE_WARN, 325 "mr_sas%d: pci config setup failed ", 326 instance_no)); 327 328 kmem_free(instance->func_ptr, 329 sizeof (struct mrsas_func_ptr)); 330 ddi_soft_state_free(mrsas_state, instance_no); 331 332 return (DDI_FAILURE); 333 } 334 335 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { 336 con_log(CL_ANN, (CE_WARN, 337 "mr_sas: failed to get registers.")); 338 339 pci_config_teardown(&instance->pci_handle); 340 kmem_free(instance->func_ptr, 341 sizeof (struct mrsas_func_ptr)); 342 ddi_soft_state_free(mrsas_state, instance_no); 343 344 return (DDI_FAILURE); 345 } 346 347 vendor_id = pci_config_get16(instance->pci_handle, 348 PCI_CONF_VENID); 349 device_id = pci_config_get16(instance->pci_handle, 350 PCI_CONF_DEVID); 351 352 subsysvid = pci_config_get16(instance->pci_handle, 353 PCI_CONF_SUBVENID); 354 subsysid = pci_config_get16(instance->pci_handle, 355 PCI_CONF_SUBSYSID); 356 357 pci_config_put16(instance->pci_handle, PCI_CONF_COMM, 358 (pci_config_get16(instance->pci_handle, 359 PCI_CONF_COMM) | PCI_COMM_ME)); 360 irq = pci_config_get8(instance->pci_handle, 361 PCI_CONF_ILINE); 362 363 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 364 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s", 365 instance_no, vendor_id, device_id, subsysvid, 366 subsysid, irq, MRSAS_VERSION)); 367 368 /* enable bus-mastering */ 369 command = pci_config_get16(instance->pci_handle, 370 PCI_CONF_COMM); 371 372 if (!(command & PCI_COMM_ME)) { 373 command |= PCI_COMM_ME; 374 375 pci_config_put16(instance->pci_handle, 376 PCI_CONF_COMM, command); 377 378 con_log(CL_ANN, (CE_CONT, "mr_sas%d: " 379 "enable bus-mastering", instance_no)); 380 } else { 381 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 382 "bus-mastering already set", instance_no)); 383 } 384 385 /* initialize function pointers */ 386 if ((device_id == PCI_DEVICE_ID_LSI_2108VDE) || 387 (device_id == PCI_DEVICE_ID_LSI_2108V)) { 388 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 389 "2108V/DE detected", instance_no)); 390 instance->func_ptr->read_fw_status_reg = 391 read_fw_status_reg_ppc; 392 instance->func_ptr->issue_cmd = issue_cmd_ppc; 393 instance->func_ptr->issue_cmd_in_sync_mode = 394 issue_cmd_in_sync_mode_ppc; 395 instance->func_ptr->issue_cmd_in_poll_mode = 396 issue_cmd_in_poll_mode_ppc; 397 instance->func_ptr->enable_intr = 398 enable_intr_ppc; 399 instance->func_ptr->disable_intr = 400 disable_intr_ppc; 401 instance->func_ptr->intr_ack = intr_ack_ppc; 402 } else { 403 con_log(CL_ANN, (CE_WARN, 404 "mr_sas: Invalid device detected")); 405 406 pci_config_teardown(&instance->pci_handle); 407 kmem_free(instance->func_ptr, 408 sizeof (struct mrsas_func_ptr)); 409 ddi_soft_state_free(mrsas_state, instance_no); 410 411 return (DDI_FAILURE); 412 } 413 414 instance->baseaddress = pci_config_get32( 415 instance->pci_handle, PCI_CONF_BASE0); 416 instance->baseaddress &= 0x0fffc; 417 418 instance->dip = dip; 419 instance->vendor_id = vendor_id; 420 instance->device_id = device_id; 421 instance->subsysvid = subsysvid; 422 instance->subsysid = subsysid; 423 instance->instance = instance_no; 424 425 /* Initialize FMA */ 426 instance->fm_capabilities = ddi_prop_get_int( 427 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS, 428 "fm-capable", DDI_FM_EREPORT_CAPABLE | 429 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE 430 | DDI_FM_ERRCB_CAPABLE); 431 432 mrsas_fm_init(instance); 433 434 /* Initialize Interrupts */ 435 if ((ddi_dev_regsize(instance->dip, 436 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) || 437 reglength < MINIMUM_MFI_MEM_SZ) { 438 return (DDI_FAILURE); 439 } 440 if (reglength > DEFAULT_MFI_MEM_SZ) { 441 reglength = DEFAULT_MFI_MEM_SZ; 442 con_log(CL_DLEVEL1, (CE_NOTE, 443 "mr_sas: register length to map is " 444 "0x%lx bytes", reglength)); 445 } 446 if (ddi_regs_map_setup(instance->dip, 447 REGISTER_SET_IO_2108, &instance->regmap, 0, 448 reglength, &endian_attr, &instance->regmap_handle) 449 != DDI_SUCCESS) { 450 con_log(CL_ANN, (CE_NOTE, 451 "mr_sas: couldn't map control registers")); 452 goto fail_attach; 453 } 454 455 /* 456 * Disable Interrupt Now. 457 * Setup Software interrupt 458 */ 459 instance->func_ptr->disable_intr(instance); 460 461 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 462 "mrsas-enable-msi", &data) == DDI_SUCCESS) { 463 if (strncmp(data, "no", 3) == 0) { 464 msi_enable = 0; 465 con_log(CL_ANN1, (CE_WARN, 466 "msi_enable = %d disabled", 467 msi_enable)); 468 } 469 ddi_prop_free(data); 470 } 471 472 con_log(CL_DLEVEL1, (CE_WARN, "msi_enable = %d", 473 msi_enable)); 474 475 /* Check for all supported interrupt types */ 476 if (ddi_intr_get_supported_types( 477 dip, &intr_types) != DDI_SUCCESS) { 478 con_log(CL_ANN, (CE_WARN, 479 "ddi_intr_get_supported_types() failed")); 480 goto fail_attach; 481 } 482 483 con_log(CL_DLEVEL1, (CE_NOTE, 484 "ddi_intr_get_supported_types() ret: 0x%x", 485 intr_types)); 486 487 /* Initialize and Setup Interrupt handler */ 488 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) { 489 if (mrsas_add_intrs(instance, 490 DDI_INTR_TYPE_MSIX) != DDI_SUCCESS) { 491 con_log(CL_ANN, (CE_WARN, 492 "MSIX interrupt query failed")); 493 goto fail_attach; 494 } 495 instance->intr_type = DDI_INTR_TYPE_MSIX; 496 } else if (msi_enable && (intr_types & 497 DDI_INTR_TYPE_MSI)) { 498 if (mrsas_add_intrs(instance, 499 DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 500 con_log(CL_ANN, (CE_WARN, 501 "MSI interrupt query failed")); 502 goto fail_attach; 503 } 504 instance->intr_type = DDI_INTR_TYPE_MSI; 505 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 506 msi_enable = 0; 507 if (mrsas_add_intrs(instance, 508 DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 509 con_log(CL_ANN, (CE_WARN, 510 "FIXED interrupt query failed")); 511 goto fail_attach; 512 } 513 instance->intr_type = DDI_INTR_TYPE_FIXED; 514 } else { 515 con_log(CL_ANN, (CE_WARN, "Device cannot " 516 "suppport either FIXED or MSI/X " 517 "interrupts")); 518 goto fail_attach; 519 } 520 521 added_isr_f = 1; 522 523 /* setup the mfi based low level driver */ 524 if (init_mfi(instance) != DDI_SUCCESS) { 525 con_log(CL_ANN, (CE_WARN, "mr_sas: " 526 "could not initialize the low level driver")); 527 528 goto fail_attach; 529 } 530 531 /* Initialize all Mutex */ 532 INIT_LIST_HEAD(&instance->completed_pool_list); 533 mutex_init(&instance->completed_pool_mtx, 534 "completed_pool_mtx", MUTEX_DRIVER, 535 DDI_INTR_PRI(instance->intr_pri)); 536 537 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx", 538 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 539 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL); 540 541 mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx", 542 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 543 544 /* Register our soft-isr for highlevel interrupts. */ 545 instance->isr_level = instance->intr_pri; 546 if (instance->isr_level == HIGH_LEVEL_INTR) { 547 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH, 548 &instance->soft_intr_id, NULL, NULL, 549 mrsas_softintr, (caddr_t)instance) != 550 DDI_SUCCESS) { 551 con_log(CL_ANN, (CE_WARN, 552 " Software ISR did not register")); 553 554 goto fail_attach; 555 } 556 557 added_soft_isr_f = 1; 558 } 559 560 /* Allocate a transport structure */ 561 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 562 563 if (tran == NULL) { 564 con_log(CL_ANN, (CE_WARN, 565 "scsi_hba_tran_alloc failed")); 566 goto fail_attach; 567 } 568 569 tran_alloc_f = 1; 570 571 instance->tran = tran; 572 573 tran->tran_hba_private = instance; 574 tran->tran_tgt_init = mrsas_tran_tgt_init; 575 tran->tran_tgt_probe = scsi_hba_probe; 576 tran->tran_tgt_free = mrsas_tran_tgt_free; 577 tran->tran_init_pkt = mrsas_tran_init_pkt; 578 tran->tran_start = mrsas_tran_start; 579 tran->tran_abort = mrsas_tran_abort; 580 tran->tran_reset = mrsas_tran_reset; 581 tran->tran_getcap = mrsas_tran_getcap; 582 tran->tran_setcap = mrsas_tran_setcap; 583 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt; 584 tran->tran_dmafree = mrsas_tran_dmafree; 585 tran->tran_sync_pkt = mrsas_tran_sync_pkt; 586 tran->tran_bus_config = mrsas_tran_bus_config; 587 588 if (mrsas_relaxed_ordering) 589 mrsas_generic_dma_attr.dma_attr_flags |= 590 DDI_DMA_RELAXED_ORDERING; 591 592 593 tran_dma_attr = mrsas_generic_dma_attr; 594 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge; 595 596 /* Attach this instance of the hba */ 597 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0) 598 != DDI_SUCCESS) { 599 con_log(CL_ANN, (CE_WARN, 600 "scsi_hba_attach failed")); 601 602 goto fail_attach; 603 } 604 605 /* create devctl node for cfgadm command */ 606 if (ddi_create_minor_node(dip, "devctl", 607 S_IFCHR, INST2DEVCTL(instance_no), 608 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) { 609 con_log(CL_ANN, (CE_WARN, 610 "mr_sas: failed to create devctl node.")); 611 612 goto fail_attach; 613 } 614 615 create_devctl_node_f = 1; 616 617 /* create scsi node for cfgadm command */ 618 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, 619 INST2SCSI(instance_no), 620 DDI_NT_SCSI_ATTACHMENT_POINT, 0) == 621 DDI_FAILURE) { 622 con_log(CL_ANN, (CE_WARN, 623 "mr_sas: failed to create scsi node.")); 624 625 goto fail_attach; 626 } 627 628 create_scsi_node_f = 1; 629 630 (void) sprintf(instance->iocnode, "%d:lsirdctl", 631 instance_no); 632 633 /* 634 * Create a node for applications 635 * for issuing ioctl to the driver. 636 */ 637 if (ddi_create_minor_node(dip, instance->iocnode, 638 S_IFCHR, INST2LSIRDCTL(instance_no), 639 DDI_PSEUDO, 0) == DDI_FAILURE) { 640 con_log(CL_ANN, (CE_WARN, 641 "mr_sas: failed to create ioctl node.")); 642 643 goto fail_attach; 644 } 645 646 create_ioc_node_f = 1; 647 648 /* Create a taskq to handle dr events */ 649 if ((instance->taskq = ddi_taskq_create(dip, 650 "mrsas_dr_taskq", 1, 651 TASKQ_DEFAULTPRI, 0)) == NULL) { 652 con_log(CL_ANN, (CE_WARN, 653 "mr_sas: failed to create taskq ")); 654 instance->taskq = NULL; 655 goto fail_attach; 656 } 657 658 /* enable interrupt */ 659 instance->func_ptr->enable_intr(instance); 660 661 /* initiate AEN */ 662 if (start_mfi_aen(instance)) { 663 con_log(CL_ANN, (CE_WARN, 664 "mr_sas: failed to initiate AEN.")); 665 goto fail_initiate_aen; 666 } 667 668 con_log(CL_DLEVEL1, (CE_NOTE, 669 "AEN started for instance %d.", instance_no)); 670 671 /* Finally! We are on the air. */ 672 ddi_report_dev(dip); 673 674 if (mrsas_check_acc_handle(instance->regmap_handle) != 675 DDI_SUCCESS) { 676 goto fail_attach; 677 } 678 if (mrsas_check_acc_handle(instance->pci_handle) != 679 DDI_SUCCESS) { 680 goto fail_attach; 681 } 682 instance->mr_ld_list = 683 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld), 684 KM_SLEEP); 685 break; 686 case DDI_PM_RESUME: 687 con_log(CL_ANN, (CE_NOTE, 688 "mr_sas: DDI_PM_RESUME")); 689 break; 690 case DDI_RESUME: 691 con_log(CL_ANN, (CE_NOTE, 692 "mr_sas: DDI_RESUME")); 693 break; 694 default: 695 con_log(CL_ANN, (CE_WARN, 696 "mr_sas: invalid attach cmd=%x", cmd)); 697 return (DDI_FAILURE); 698 } 699 700 return (DDI_SUCCESS); 701 702 fail_initiate_aen: 703 fail_attach: 704 if (create_devctl_node_f) { 705 ddi_remove_minor_node(dip, "devctl"); 706 } 707 708 if (create_scsi_node_f) { 709 ddi_remove_minor_node(dip, "scsi"); 710 } 711 712 if (create_ioc_node_f) { 713 ddi_remove_minor_node(dip, instance->iocnode); 714 } 715 716 if (tran_alloc_f) { 717 scsi_hba_tran_free(tran); 718 } 719 720 721 if (added_soft_isr_f) { 722 ddi_remove_softintr(instance->soft_intr_id); 723 } 724 725 if (added_isr_f) { 726 mrsas_rem_intrs(instance); 727 } 728 729 if (instance && instance->taskq) { 730 ddi_taskq_destroy(instance->taskq); 731 } 732 733 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 734 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 735 736 mrsas_fm_fini(instance); 737 738 pci_config_teardown(&instance->pci_handle); 739 740 ddi_soft_state_free(mrsas_state, instance_no); 741 742 con_log(CL_ANN, (CE_NOTE, 743 "mr_sas: return failure from mrsas_attach")); 744 745 return (DDI_FAILURE); 746 } 747 748 /*ARGSUSED*/ 749 static int 750 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 751 { 752 int rval; 753 int mrsas_minor = getminor((dev_t)arg); 754 755 struct mrsas_instance *instance; 756 757 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 758 759 switch (cmd) { 760 case DDI_INFO_DEVT2DEVINFO: 761 instance = (struct mrsas_instance *) 762 ddi_get_soft_state(mrsas_state, 763 MINOR2INST(mrsas_minor)); 764 765 if (instance == NULL) { 766 *resultp = NULL; 767 rval = DDI_FAILURE; 768 } else { 769 *resultp = instance->dip; 770 rval = DDI_SUCCESS; 771 } 772 break; 773 case DDI_INFO_DEVT2INSTANCE: 774 *resultp = (void *)(intptr_t) 775 (MINOR2INST(getminor((dev_t)arg))); 776 rval = DDI_SUCCESS; 777 break; 778 default: 779 *resultp = NULL; 780 rval = DDI_FAILURE; 781 } 782 783 return (rval); 784 } 785 786 static int 787 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 788 { 789 int instance_no; 790 791 struct mrsas_instance *instance; 792 793 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 794 795 /* CONSTCOND */ 796 ASSERT(NO_COMPETING_THREADS); 797 798 instance_no = ddi_get_instance(dip); 799 800 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state, 801 instance_no); 802 803 if (!instance) { 804 con_log(CL_ANN, (CE_WARN, 805 "mr_sas:%d could not get instance in detach", 806 instance_no)); 807 808 return (DDI_FAILURE); 809 } 810 811 con_log(CL_ANN, (CE_NOTE, 812 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x", 813 instance_no, instance->vendor_id, instance->device_id, 814 instance->subsysvid, instance->subsysid)); 815 816 switch (cmd) { 817 case DDI_DETACH: 818 con_log(CL_ANN, (CE_NOTE, 819 "mrsas_detach: DDI_DETACH")); 820 821 if (scsi_hba_detach(dip) != DDI_SUCCESS) { 822 con_log(CL_ANN, (CE_WARN, 823 "mr_sas:%d failed to detach", 824 instance_no)); 825 826 return (DDI_FAILURE); 827 } 828 829 scsi_hba_tran_free(instance->tran); 830 831 flush_cache(instance); 832 833 if (abort_aen_cmd(instance, instance->aen_cmd)) { 834 con_log(CL_ANN, (CE_WARN, "mrsas_detach: " 835 "failed to abort prevous AEN command")); 836 837 return (DDI_FAILURE); 838 } 839 840 instance->func_ptr->disable_intr(instance); 841 842 if (instance->isr_level == HIGH_LEVEL_INTR) { 843 ddi_remove_softintr(instance->soft_intr_id); 844 } 845 846 mrsas_rem_intrs(instance); 847 848 if (instance->taskq) { 849 ddi_taskq_destroy(instance->taskq); 850 } 851 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD 852 * sizeof (struct mrsas_ld)); 853 free_space_for_mfi(instance); 854 855 mrsas_fm_fini(instance); 856 857 pci_config_teardown(&instance->pci_handle); 858 859 kmem_free(instance->func_ptr, 860 sizeof (struct mrsas_func_ptr)); 861 862 ddi_soft_state_free(mrsas_state, instance_no); 863 break; 864 case DDI_PM_SUSPEND: 865 con_log(CL_ANN, (CE_NOTE, 866 "mrsas_detach: DDI_PM_SUSPEND")); 867 868 break; 869 case DDI_SUSPEND: 870 con_log(CL_ANN, (CE_NOTE, 871 "mrsas_detach: DDI_SUSPEND")); 872 873 break; 874 default: 875 con_log(CL_ANN, (CE_WARN, 876 "invalid detach command:0x%x", cmd)); 877 return (DDI_FAILURE); 878 } 879 880 return (DDI_SUCCESS); 881 } 882 883 /* 884 * ************************************************************************** * 885 * * 886 * common entry points - for character driver types * 887 * * 888 * ************************************************************************** * 889 */ 890 static int 891 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp) 892 { 893 int rval = 0; 894 895 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 896 897 /* Check root permissions */ 898 if (drv_priv(credp) != 0) { 899 con_log(CL_ANN, (CE_WARN, 900 "mr_sas: Non-root ioctl access denied!")); 901 return (EPERM); 902 } 903 904 /* Verify we are being opened as a character device */ 905 if (otyp != OTYP_CHR) { 906 con_log(CL_ANN, (CE_WARN, 907 "mr_sas: ioctl node must be a char node")); 908 return (EINVAL); 909 } 910 911 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev))) 912 == NULL) { 913 return (ENXIO); 914 } 915 916 if (scsi_hba_open) { 917 rval = scsi_hba_open(dev, openflags, otyp, credp); 918 } 919 920 return (rval); 921 } 922 923 static int 924 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp) 925 { 926 int rval = 0; 927 928 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 929 930 /* no need for locks! */ 931 932 if (scsi_hba_close) { 933 rval = scsi_hba_close(dev, openflags, otyp, credp); 934 } 935 936 return (rval); 937 } 938 939 static int 940 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 941 int *rvalp) 942 { 943 int rval = 0; 944 945 struct mrsas_instance *instance; 946 struct mrsas_ioctl *ioctl; 947 struct mrsas_aen aen; 948 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 949 950 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev))); 951 952 if (instance == NULL) { 953 /* invalid minor number */ 954 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found.")); 955 return (ENXIO); 956 } 957 958 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl), 959 KM_SLEEP); 960 ASSERT(ioctl); 961 962 switch ((uint_t)cmd) { 963 case MRSAS_IOCTL_FIRMWARE: 964 if (ddi_copyin((void *)arg, ioctl, 965 sizeof (struct mrsas_ioctl), mode)) { 966 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: " 967 "ERROR IOCTL copyin")); 968 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 969 return (EFAULT); 970 } 971 972 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) { 973 rval = handle_drv_ioctl(instance, ioctl, mode); 974 } else { 975 rval = handle_mfi_ioctl(instance, ioctl, mode); 976 } 977 978 if (ddi_copyout((void *)ioctl, (void *)arg, 979 (sizeof (struct mrsas_ioctl) - 1), mode)) { 980 con_log(CL_ANN, (CE_WARN, 981 "mrsas_ioctl: copy_to_user failed")); 982 rval = 1; 983 } 984 985 break; 986 case MRSAS_IOCTL_AEN: 987 if (ddi_copyin((void *) arg, &aen, 988 sizeof (struct mrsas_aen), mode)) { 989 con_log(CL_ANN, (CE_WARN, 990 "mrsas_ioctl: ERROR AEN copyin")); 991 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 992 return (EFAULT); 993 } 994 995 rval = handle_mfi_aen(instance, &aen); 996 997 if (ddi_copyout((void *) &aen, (void *)arg, 998 sizeof (struct mrsas_aen), mode)) { 999 con_log(CL_ANN, (CE_WARN, 1000 "mrsas_ioctl: copy_to_user failed")); 1001 rval = 1; 1002 } 1003 1004 break; 1005 default: 1006 rval = scsi_hba_ioctl(dev, cmd, arg, 1007 mode, credp, rvalp); 1008 1009 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: " 1010 "scsi_hba_ioctl called, ret = %x.", rval)); 1011 } 1012 1013 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 1014 return (rval); 1015 } 1016 1017 /* 1018 * ************************************************************************** * 1019 * * 1020 * common entry points - for block driver types * 1021 * * 1022 * ************************************************************************** * 1023 */ 1024 /*ARGSUSED*/ 1025 static int 1026 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1027 { 1028 int instance_no; 1029 1030 struct mrsas_instance *instance; 1031 1032 instance_no = ddi_get_instance(dip); 1033 instance = (struct mrsas_instance *)ddi_get_soft_state 1034 (mrsas_state, instance_no); 1035 1036 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1037 1038 if (!instance) { 1039 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter " 1040 "in reset", instance_no)); 1041 return (DDI_FAILURE); 1042 } 1043 1044 instance->func_ptr->disable_intr(instance); 1045 1046 con_log(CL_ANN1, (CE_NOTE, "flushing cache for instance %d", 1047 instance_no)); 1048 1049 flush_cache(instance); 1050 1051 return (DDI_SUCCESS); 1052 } 1053 1054 1055 /* 1056 * ************************************************************************** * 1057 * * 1058 * entry points (SCSI HBA) * 1059 * * 1060 * ************************************************************************** * 1061 */ 1062 /*ARGSUSED*/ 1063 static int 1064 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1065 scsi_hba_tran_t *tran, struct scsi_device *sd) 1066 { 1067 struct mrsas_instance *instance; 1068 uint16_t tgt = sd->sd_address.a_target; 1069 uint8_t lun = sd->sd_address.a_lun; 1070 1071 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init target %d lun %d", 1072 tgt, lun)); 1073 1074 instance = ADDR2MR(&sd->sd_address); 1075 1076 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 1077 (void) ndi_merge_node(tgt_dip, mrsas_name_node); 1078 ddi_set_name_addr(tgt_dip, NULL); 1079 1080 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init in " 1081 "ndi_dev_is_persistent_node DDI_FAILURE t = %d l = %d", 1082 tgt, lun)); 1083 return (DDI_FAILURE); 1084 } 1085 1086 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p", 1087 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip)); 1088 1089 if (tgt < MRDRV_MAX_LD && lun == 0) { 1090 if (instance->mr_ld_list[tgt].dip == NULL && 1091 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) { 1092 instance->mr_ld_list[tgt].dip = tgt_dip; 1093 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN; 1094 } 1095 } 1096 return (DDI_SUCCESS); 1097 } 1098 1099 /*ARGSUSED*/ 1100 static void 1101 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1102 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1103 { 1104 struct mrsas_instance *instance; 1105 int tgt = sd->sd_address.a_target; 1106 int lun = sd->sd_address.a_lun; 1107 1108 instance = ADDR2MR(&sd->sd_address); 1109 1110 con_log(CL_ANN1, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun)); 1111 1112 if (tgt < MRDRV_MAX_LD && lun == 0) { 1113 if (instance->mr_ld_list[tgt].dip == tgt_dip) { 1114 instance->mr_ld_list[tgt].dip = NULL; 1115 } 1116 } 1117 } 1118 1119 static dev_info_t * 1120 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun) 1121 { 1122 dev_info_t *child = NULL; 1123 char addr[SCSI_MAXNAMELEN]; 1124 char tmp[MAXNAMELEN]; 1125 1126 (void) sprintf(addr, "%x,%x", tgt, lun); 1127 for (child = ddi_get_child(instance->dip); child; 1128 child = ddi_get_next_sibling(child)) { 1129 1130 if (mrsas_name_node(child, tmp, MAXNAMELEN) != 1131 DDI_SUCCESS) { 1132 continue; 1133 } 1134 1135 if (strcmp(addr, tmp) == 0) { 1136 break; 1137 } 1138 } 1139 con_log(CL_ANN1, (CE_NOTE, "mrsas_find_child: return child = %p", 1140 (void *)child)); 1141 return (child); 1142 } 1143 1144 static int 1145 mrsas_name_node(dev_info_t *dip, char *name, int len) 1146 { 1147 int tgt, lun; 1148 1149 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1150 DDI_PROP_DONTPASS, "target", -1); 1151 con_log(CL_ANN1, (CE_NOTE, 1152 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt)); 1153 if (tgt == -1) { 1154 return (DDI_FAILURE); 1155 } 1156 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1157 "lun", -1); 1158 con_log(CL_ANN1, 1159 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun)); 1160 if (lun == -1) { 1161 return (DDI_FAILURE); 1162 } 1163 (void) snprintf(name, len, "%x,%x", tgt, lun); 1164 return (DDI_SUCCESS); 1165 } 1166 1167 static struct scsi_pkt * 1168 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt, 1169 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1170 int flags, int (*callback)(), caddr_t arg) 1171 { 1172 struct scsa_cmd *acmd; 1173 struct mrsas_instance *instance; 1174 struct scsi_pkt *new_pkt; 1175 1176 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1177 1178 instance = ADDR2MR(ap); 1179 1180 /* step #1 : pkt allocation */ 1181 if (pkt == NULL) { 1182 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen, 1183 tgtlen, sizeof (struct scsa_cmd), callback, arg); 1184 if (pkt == NULL) { 1185 return (NULL); 1186 } 1187 1188 acmd = PKT2CMD(pkt); 1189 1190 /* 1191 * Initialize the new pkt - we redundantly initialize 1192 * all the fields for illustrative purposes. 1193 */ 1194 acmd->cmd_pkt = pkt; 1195 acmd->cmd_flags = 0; 1196 acmd->cmd_scblen = statuslen; 1197 acmd->cmd_cdblen = cmdlen; 1198 acmd->cmd_dmahandle = NULL; 1199 acmd->cmd_ncookies = 0; 1200 acmd->cmd_cookie = 0; 1201 acmd->cmd_cookiecnt = 0; 1202 acmd->cmd_nwin = 0; 1203 1204 pkt->pkt_address = *ap; 1205 pkt->pkt_comp = (void (*)())NULL; 1206 pkt->pkt_flags = 0; 1207 pkt->pkt_time = 0; 1208 pkt->pkt_resid = 0; 1209 pkt->pkt_state = 0; 1210 pkt->pkt_statistics = 0; 1211 pkt->pkt_reason = 0; 1212 new_pkt = pkt; 1213 } else { 1214 acmd = PKT2CMD(pkt); 1215 new_pkt = NULL; 1216 } 1217 1218 /* step #2 : dma allocation/move */ 1219 if (bp && bp->b_bcount != 0) { 1220 if (acmd->cmd_dmahandle == NULL) { 1221 if (mrsas_dma_alloc(instance, pkt, bp, flags, 1222 callback) == DDI_FAILURE) { 1223 if (new_pkt) { 1224 scsi_hba_pkt_free(ap, new_pkt); 1225 } 1226 return ((struct scsi_pkt *)NULL); 1227 } 1228 } else { 1229 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) { 1230 return ((struct scsi_pkt *)NULL); 1231 } 1232 } 1233 } 1234 1235 return (pkt); 1236 } 1237 1238 static int 1239 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt) 1240 { 1241 uchar_t cmd_done = 0; 1242 1243 struct mrsas_instance *instance = ADDR2MR(ap); 1244 struct mrsas_cmd *cmd; 1245 1246 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x", 1247 __func__, __LINE__, pkt->pkt_cdbp[0])); 1248 1249 pkt->pkt_reason = CMD_CMPLT; 1250 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 1251 1252 cmd = build_cmd(instance, ap, pkt, &cmd_done); 1253 1254 /* 1255 * Check if the command is already completed by the mrsas_build_cmd() 1256 * routine. In which case the busy_flag would be clear and scb will be 1257 * NULL and appropriate reason provided in pkt_reason field 1258 */ 1259 if (cmd_done) { 1260 pkt->pkt_reason = CMD_CMPLT; 1261 pkt->pkt_scbp[0] = STATUS_GOOD; 1262 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET 1263 | STATE_SENT_CMD; 1264 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) { 1265 (*pkt->pkt_comp)(pkt); 1266 } 1267 1268 return (TRAN_ACCEPT); 1269 } 1270 1271 if (cmd == NULL) { 1272 return (TRAN_BUSY); 1273 } 1274 1275 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { 1276 if (instance->fw_outstanding > instance->max_fw_cmds) { 1277 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy")); 1278 DTRACE_PROBE2(start_tran_err, 1279 uint16_t, instance->fw_outstanding, 1280 uint16_t, instance->max_fw_cmds); 1281 return_mfi_pkt(instance, cmd); 1282 return (TRAN_BUSY); 1283 } 1284 1285 /* Synchronize the Cmd frame for the controller */ 1286 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, 1287 DDI_DMA_SYNC_FORDEV); 1288 1289 instance->func_ptr->issue_cmd(cmd, instance); 1290 1291 } else { 1292 struct mrsas_header *hdr = &cmd->frame->hdr; 1293 1294 cmd->sync_cmd = MRSAS_TRUE; 1295 1296 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd); 1297 1298 pkt->pkt_reason = CMD_CMPLT; 1299 pkt->pkt_statistics = 0; 1300 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS; 1301 1302 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, 1303 &hdr->cmd_status)) { 1304 case MFI_STAT_OK: 1305 pkt->pkt_scbp[0] = STATUS_GOOD; 1306 break; 1307 1308 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1309 1310 pkt->pkt_reason = CMD_CMPLT; 1311 pkt->pkt_statistics = 0; 1312 1313 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; 1314 break; 1315 1316 case MFI_STAT_DEVICE_NOT_FOUND: 1317 pkt->pkt_reason = CMD_DEV_GONE; 1318 pkt->pkt_statistics = STAT_DISCON; 1319 break; 1320 1321 default: 1322 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1; 1323 } 1324 1325 (void) mrsas_common_check(instance, cmd); 1326 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd, 1327 uint8_t, hdr->cmd_status); 1328 return_mfi_pkt(instance, cmd); 1329 1330 if (pkt->pkt_comp) { 1331 (*pkt->pkt_comp)(pkt); 1332 } 1333 1334 } 1335 1336 return (TRAN_ACCEPT); 1337 } 1338 1339 /*ARGSUSED*/ 1340 static int 1341 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1342 { 1343 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1344 1345 /* abort command not supported by H/W */ 1346 1347 return (DDI_FAILURE); 1348 } 1349 1350 /*ARGSUSED*/ 1351 static int 1352 mrsas_tran_reset(struct scsi_address *ap, int level) 1353 { 1354 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1355 1356 /* reset command not supported by H/W */ 1357 1358 return (DDI_FAILURE); 1359 1360 } 1361 1362 /*ARGSUSED*/ 1363 static int 1364 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom) 1365 { 1366 int rval = 0; 1367 1368 struct mrsas_instance *instance = ADDR2MR(ap); 1369 1370 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1371 1372 /* we do allow inquiring about capabilities for other targets */ 1373 if (cap == NULL) { 1374 return (-1); 1375 } 1376 1377 switch (scsi_hba_lookup_capstr(cap)) { 1378 case SCSI_CAP_DMA_MAX: 1379 /* Limit to 16MB max transfer */ 1380 rval = mrsas_max_cap_maxxfer; 1381 break; 1382 case SCSI_CAP_MSG_OUT: 1383 rval = 1; 1384 break; 1385 case SCSI_CAP_DISCONNECT: 1386 rval = 0; 1387 break; 1388 case SCSI_CAP_SYNCHRONOUS: 1389 rval = 0; 1390 break; 1391 case SCSI_CAP_WIDE_XFER: 1392 rval = 1; 1393 break; 1394 case SCSI_CAP_TAGGED_QING: 1395 rval = 1; 1396 break; 1397 case SCSI_CAP_UNTAGGED_QING: 1398 rval = 1; 1399 break; 1400 case SCSI_CAP_PARITY: 1401 rval = 1; 1402 break; 1403 case SCSI_CAP_INITIATOR_ID: 1404 rval = instance->init_id; 1405 break; 1406 case SCSI_CAP_ARQ: 1407 rval = 1; 1408 break; 1409 case SCSI_CAP_LINKED_CMDS: 1410 rval = 0; 1411 break; 1412 case SCSI_CAP_RESET_NOTIFICATION: 1413 rval = 1; 1414 break; 1415 case SCSI_CAP_GEOMETRY: 1416 rval = -1; 1417 1418 break; 1419 default: 1420 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x", 1421 scsi_hba_lookup_capstr(cap))); 1422 rval = -1; 1423 break; 1424 } 1425 1426 return (rval); 1427 } 1428 1429 /*ARGSUSED*/ 1430 static int 1431 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1432 { 1433 int rval = 1; 1434 1435 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1436 1437 /* We don't allow setting capabilities for other targets */ 1438 if (cap == NULL || whom == 0) { 1439 return (-1); 1440 } 1441 1442 switch (scsi_hba_lookup_capstr(cap)) { 1443 case SCSI_CAP_DMA_MAX: 1444 case SCSI_CAP_MSG_OUT: 1445 case SCSI_CAP_PARITY: 1446 case SCSI_CAP_LINKED_CMDS: 1447 case SCSI_CAP_RESET_NOTIFICATION: 1448 case SCSI_CAP_DISCONNECT: 1449 case SCSI_CAP_SYNCHRONOUS: 1450 case SCSI_CAP_UNTAGGED_QING: 1451 case SCSI_CAP_WIDE_XFER: 1452 case SCSI_CAP_INITIATOR_ID: 1453 case SCSI_CAP_ARQ: 1454 /* 1455 * None of these are settable via 1456 * the capability interface. 1457 */ 1458 break; 1459 case SCSI_CAP_TAGGED_QING: 1460 rval = 1; 1461 break; 1462 case SCSI_CAP_SECTOR_SIZE: 1463 rval = 1; 1464 break; 1465 1466 case SCSI_CAP_TOTAL_SECTORS: 1467 rval = 1; 1468 break; 1469 default: 1470 rval = -1; 1471 break; 1472 } 1473 1474 return (rval); 1475 } 1476 1477 static void 1478 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1479 { 1480 struct scsa_cmd *acmd = PKT2CMD(pkt); 1481 1482 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1483 1484 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1485 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1486 1487 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1488 1489 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1490 1491 acmd->cmd_dmahandle = NULL; 1492 } 1493 1494 /* free the pkt */ 1495 scsi_hba_pkt_free(ap, pkt); 1496 } 1497 1498 /*ARGSUSED*/ 1499 static void 1500 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1501 { 1502 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1503 1504 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1505 1506 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1507 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1508 1509 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1510 1511 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1512 1513 acmd->cmd_dmahandle = NULL; 1514 } 1515 } 1516 1517 /*ARGSUSED*/ 1518 static void 1519 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1520 { 1521 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1522 1523 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1524 1525 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1526 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset, 1527 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ? 1528 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU); 1529 } 1530 } 1531 1532 /* 1533 * mrsas_isr(caddr_t) 1534 * 1535 * The Interrupt Service Routine 1536 * 1537 * Collect status for all completed commands and do callback 1538 * 1539 */ 1540 static uint_t 1541 mrsas_isr(struct mrsas_instance *instance) 1542 { 1543 int need_softintr; 1544 uint32_t producer; 1545 uint32_t consumer; 1546 uint32_t context; 1547 1548 struct mrsas_cmd *cmd; 1549 1550 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1551 1552 ASSERT(instance); 1553 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) && 1554 !instance->func_ptr->intr_ack(instance)) { 1555 return (DDI_INTR_UNCLAIMED); 1556 } 1557 1558 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1559 0, 0, DDI_DMA_SYNC_FORCPU); 1560 1561 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 1562 != DDI_SUCCESS) { 1563 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 1564 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 1565 return (DDI_INTR_CLAIMED); 1566 } 1567 1568 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1569 instance->producer); 1570 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1571 instance->consumer); 1572 1573 con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ", 1574 producer, consumer)); 1575 if (producer == consumer) { 1576 con_log(CL_ANN1, (CE_WARN, "producer = consumer case")); 1577 DTRACE_PROBE2(isr_pc_err, uint32_t, producer, 1578 uint32_t, consumer); 1579 return (DDI_INTR_CLAIMED); 1580 } 1581 mutex_enter(&instance->completed_pool_mtx); 1582 1583 while (consumer != producer) { 1584 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1585 &instance->reply_queue[consumer]); 1586 cmd = instance->cmd_list[context]; 1587 mlist_add_tail(&cmd->list, &instance->completed_pool_list); 1588 1589 consumer++; 1590 if (consumer == (instance->max_fw_cmds + 1)) { 1591 consumer = 0; 1592 } 1593 } 1594 1595 mutex_exit(&instance->completed_pool_mtx); 1596 1597 ddi_put32(instance->mfi_internal_dma_obj.acc_handle, 1598 instance->consumer, consumer); 1599 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1600 0, 0, DDI_DMA_SYNC_FORDEV); 1601 1602 if (instance->softint_running) { 1603 need_softintr = 0; 1604 } else { 1605 need_softintr = 1; 1606 } 1607 1608 if (instance->isr_level == HIGH_LEVEL_INTR) { 1609 if (need_softintr) { 1610 ddi_trigger_softintr(instance->soft_intr_id); 1611 } 1612 } else { 1613 /* 1614 * Not a high-level interrupt, therefore call the soft level 1615 * interrupt explicitly 1616 */ 1617 (void) mrsas_softintr(instance); 1618 } 1619 1620 return (DDI_INTR_CLAIMED); 1621 } 1622 1623 1624 /* 1625 * ************************************************************************** * 1626 * * 1627 * libraries * 1628 * * 1629 * ************************************************************************** * 1630 */ 1631 /* 1632 * get_mfi_pkt : Get a command from the free pool 1633 * After successful allocation, the caller of this routine 1634 * must clear the frame buffer (memset to zero) before 1635 * using the packet further. 1636 * 1637 * ***** Note ***** 1638 * After clearing the frame buffer the context id of the 1639 * frame buffer SHOULD be restored back. 1640 */ 1641 static struct mrsas_cmd * 1642 get_mfi_pkt(struct mrsas_instance *instance) 1643 { 1644 mlist_t *head = &instance->cmd_pool_list; 1645 struct mrsas_cmd *cmd = NULL; 1646 1647 mutex_enter(&instance->cmd_pool_mtx); 1648 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1649 1650 if (!mlist_empty(head)) { 1651 cmd = mlist_entry(head->next, struct mrsas_cmd, list); 1652 mlist_del_init(head->next); 1653 } 1654 if (cmd != NULL) 1655 cmd->pkt = NULL; 1656 mutex_exit(&instance->cmd_pool_mtx); 1657 1658 return (cmd); 1659 } 1660 1661 /* 1662 * return_mfi_pkt : Return a cmd to free command pool 1663 */ 1664 static void 1665 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 1666 { 1667 mutex_enter(&instance->cmd_pool_mtx); 1668 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1669 1670 mlist_add(&cmd->list, &instance->cmd_pool_list); 1671 1672 mutex_exit(&instance->cmd_pool_mtx); 1673 } 1674 1675 /* 1676 * destroy_mfi_frame_pool 1677 */ 1678 static void 1679 destroy_mfi_frame_pool(struct mrsas_instance *instance) 1680 { 1681 int i; 1682 uint32_t max_cmd = instance->max_fw_cmds; 1683 1684 struct mrsas_cmd *cmd; 1685 1686 /* return all frames to pool */ 1687 for (i = 0; i < max_cmd+1; i++) { 1688 1689 cmd = instance->cmd_list[i]; 1690 1691 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) 1692 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj); 1693 1694 cmd->frame_dma_obj_status = DMA_OBJ_FREED; 1695 } 1696 1697 } 1698 1699 /* 1700 * create_mfi_frame_pool 1701 */ 1702 static int 1703 create_mfi_frame_pool(struct mrsas_instance *instance) 1704 { 1705 int i = 0; 1706 int cookie_cnt; 1707 uint16_t max_cmd; 1708 uint16_t sge_sz; 1709 uint32_t sgl_sz; 1710 uint32_t tot_frame_size; 1711 struct mrsas_cmd *cmd; 1712 1713 max_cmd = instance->max_fw_cmds; 1714 1715 sge_sz = sizeof (struct mrsas_sge64); 1716 1717 /* calculated the number of 64byte frames required for SGL */ 1718 sgl_sz = sge_sz * instance->max_num_sge; 1719 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH; 1720 1721 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: " 1722 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size)); 1723 1724 while (i < max_cmd+1) { 1725 cmd = instance->cmd_list[i]; 1726 1727 cmd->frame_dma_obj.size = tot_frame_size; 1728 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr; 1729 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1730 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 1731 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1; 1732 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64; 1733 1734 1735 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj, 1736 (uchar_t)DDI_STRUCTURE_LE_ACC); 1737 1738 if (cookie_cnt == -1 || cookie_cnt > 1) { 1739 con_log(CL_ANN, (CE_WARN, 1740 "create_mfi_frame_pool: could not alloc.")); 1741 return (DDI_FAILURE); 1742 } 1743 1744 bzero(cmd->frame_dma_obj.buffer, tot_frame_size); 1745 1746 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED; 1747 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer; 1748 cmd->frame_phys_addr = 1749 cmd->frame_dma_obj.dma_cookie[0].dmac_address; 1750 1751 cmd->sense = (uint8_t *)(((unsigned long) 1752 cmd->frame_dma_obj.buffer) + 1753 tot_frame_size - SENSE_LENGTH); 1754 cmd->sense_phys_addr = 1755 cmd->frame_dma_obj.dma_cookie[0].dmac_address + 1756 tot_frame_size - SENSE_LENGTH; 1757 1758 if (!cmd->frame || !cmd->sense) { 1759 con_log(CL_ANN, (CE_NOTE, 1760 "mr_sas: pci_pool_alloc failed")); 1761 1762 return (ENOMEM); 1763 } 1764 1765 ddi_put32(cmd->frame_dma_obj.acc_handle, 1766 &cmd->frame->io.context, cmd->index); 1767 i++; 1768 1769 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x", 1770 cmd->index, cmd->frame_phys_addr)); 1771 } 1772 1773 return (DDI_SUCCESS); 1774 } 1775 1776 /* 1777 * free_additional_dma_buffer 1778 */ 1779 static void 1780 free_additional_dma_buffer(struct mrsas_instance *instance) 1781 { 1782 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { 1783 (void) mrsas_free_dma_obj(instance, 1784 instance->mfi_internal_dma_obj); 1785 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; 1786 } 1787 1788 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) { 1789 (void) mrsas_free_dma_obj(instance, 1790 instance->mfi_evt_detail_obj); 1791 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; 1792 } 1793 } 1794 1795 /* 1796 * alloc_additional_dma_buffer 1797 */ 1798 static int 1799 alloc_additional_dma_buffer(struct mrsas_instance *instance) 1800 { 1801 uint32_t reply_q_sz; 1802 uint32_t internal_buf_size = PAGESIZE*2; 1803 1804 /* max cmds plus 1 + producer & consumer */ 1805 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2); 1806 1807 instance->mfi_internal_dma_obj.size = internal_buf_size; 1808 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr; 1809 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1810 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 1811 0xFFFFFFFFU; 1812 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1; 1813 1814 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj, 1815 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 1816 con_log(CL_ANN, (CE_WARN, 1817 "mr_sas: could not alloc reply queue")); 1818 return (DDI_FAILURE); 1819 } 1820 1821 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size); 1822 1823 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED; 1824 1825 instance->producer = (uint32_t *)((unsigned long) 1826 instance->mfi_internal_dma_obj.buffer); 1827 instance->consumer = (uint32_t *)((unsigned long) 1828 instance->mfi_internal_dma_obj.buffer + 4); 1829 instance->reply_queue = (uint32_t *)((unsigned long) 1830 instance->mfi_internal_dma_obj.buffer + 8); 1831 instance->internal_buf = (caddr_t)(((unsigned long) 1832 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8); 1833 instance->internal_buf_dmac_add = 1834 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 1835 (reply_q_sz + 8); 1836 instance->internal_buf_size = internal_buf_size - 1837 (reply_q_sz + 8); 1838 1839 /* allocate evt_detail */ 1840 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail); 1841 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr; 1842 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1843 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 1844 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1; 1845 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1; 1846 1847 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj, 1848 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 1849 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: " 1850 "could not allocate data transfer buffer.")); 1851 return (DDI_FAILURE); 1852 } 1853 1854 bzero(instance->mfi_evt_detail_obj.buffer, 1855 sizeof (struct mrsas_evt_detail)); 1856 1857 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED; 1858 1859 return (DDI_SUCCESS); 1860 } 1861 1862 /* 1863 * free_space_for_mfi 1864 */ 1865 static void 1866 free_space_for_mfi(struct mrsas_instance *instance) 1867 { 1868 int i; 1869 uint32_t max_cmd = instance->max_fw_cmds; 1870 1871 /* already freed */ 1872 if (instance->cmd_list == NULL) { 1873 return; 1874 } 1875 1876 free_additional_dma_buffer(instance); 1877 1878 /* first free the MFI frame pool */ 1879 destroy_mfi_frame_pool(instance); 1880 1881 /* free all the commands in the cmd_list */ 1882 for (i = 0; i < instance->max_fw_cmds+1; i++) { 1883 kmem_free(instance->cmd_list[i], 1884 sizeof (struct mrsas_cmd)); 1885 1886 instance->cmd_list[i] = NULL; 1887 } 1888 1889 /* free the cmd_list buffer itself */ 1890 kmem_free(instance->cmd_list, 1891 sizeof (struct mrsas_cmd *) * (max_cmd+1)); 1892 1893 instance->cmd_list = NULL; 1894 1895 INIT_LIST_HEAD(&instance->cmd_pool_list); 1896 } 1897 1898 /* 1899 * alloc_space_for_mfi 1900 */ 1901 static int 1902 alloc_space_for_mfi(struct mrsas_instance *instance) 1903 { 1904 int i; 1905 uint32_t max_cmd; 1906 size_t sz; 1907 1908 struct mrsas_cmd *cmd; 1909 1910 max_cmd = instance->max_fw_cmds; 1911 1912 /* reserve 1 more slot for flush_cache */ 1913 sz = sizeof (struct mrsas_cmd *) * (max_cmd+1); 1914 1915 /* 1916 * instance->cmd_list is an array of struct mrsas_cmd pointers. 1917 * Allocate the dynamic array first and then allocate individual 1918 * commands. 1919 */ 1920 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); 1921 ASSERT(instance->cmd_list); 1922 1923 for (i = 0; i < max_cmd+1; i++) { 1924 instance->cmd_list[i] = kmem_zalloc(sizeof (struct mrsas_cmd), 1925 KM_SLEEP); 1926 ASSERT(instance->cmd_list[i]); 1927 } 1928 1929 INIT_LIST_HEAD(&instance->cmd_pool_list); 1930 1931 /* add all the commands to command pool (instance->cmd_pool) */ 1932 for (i = 0; i < max_cmd; i++) { 1933 cmd = instance->cmd_list[i]; 1934 cmd->index = i; 1935 1936 mlist_add_tail(&cmd->list, &instance->cmd_pool_list); 1937 } 1938 1939 /* single slot for flush_cache won't be added in command pool */ 1940 cmd = instance->cmd_list[max_cmd]; 1941 cmd->index = i; 1942 1943 /* create a frame pool and assign one frame to each cmd */ 1944 if (create_mfi_frame_pool(instance)) { 1945 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool")); 1946 return (DDI_FAILURE); 1947 } 1948 1949 /* create a frame pool and assign one frame to each cmd */ 1950 if (alloc_additional_dma_buffer(instance)) { 1951 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool")); 1952 return (DDI_FAILURE); 1953 } 1954 1955 return (DDI_SUCCESS); 1956 } 1957 1958 /* 1959 * get_ctrl_info 1960 */ 1961 static int 1962 get_ctrl_info(struct mrsas_instance *instance, 1963 struct mrsas_ctrl_info *ctrl_info) 1964 { 1965 int ret = 0; 1966 1967 struct mrsas_cmd *cmd; 1968 struct mrsas_dcmd_frame *dcmd; 1969 struct mrsas_ctrl_info *ci; 1970 1971 cmd = get_mfi_pkt(instance); 1972 1973 if (!cmd) { 1974 con_log(CL_ANN, (CE_WARN, 1975 "Failed to get a cmd for ctrl info")); 1976 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding, 1977 uint16_t, instance->max_fw_cmds); 1978 return (DDI_FAILURE); 1979 } 1980 /* Clear the frame buffer and assign back the context id */ 1981 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 1982 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 1983 cmd->index); 1984 1985 dcmd = &cmd->frame->dcmd; 1986 1987 ci = (struct mrsas_ctrl_info *)instance->internal_buf; 1988 1989 if (!ci) { 1990 con_log(CL_ANN, (CE_WARN, 1991 "Failed to alloc mem for ctrl info")); 1992 return_mfi_pkt(instance, cmd); 1993 return (DDI_FAILURE); 1994 } 1995 1996 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info)); 1997 1998 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */ 1999 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2000 2001 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 2002 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 2003 MFI_CMD_STATUS_POLL_MODE); 2004 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 2005 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 2006 MFI_FRAME_DIR_READ); 2007 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 2008 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 2009 sizeof (struct mrsas_ctrl_info)); 2010 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2011 MR_DCMD_CTRL_GET_INFO); 2012 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 2013 instance->internal_buf_dmac_add); 2014 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 2015 sizeof (struct mrsas_ctrl_info)); 2016 2017 cmd->frame_count = 1; 2018 2019 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2020 ret = 0; 2021 ctrl_info->max_request_size = ddi_get32( 2022 cmd->frame_dma_obj.acc_handle, &ci->max_request_size); 2023 ctrl_info->ld_present_count = ddi_get16( 2024 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count); 2025 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, 2026 (uint8_t *)(ctrl_info->product_name), 2027 (uint8_t *)(ci->product_name), 80 * sizeof (char), 2028 DDI_DEV_AUTOINCR); 2029 /* should get more members of ci with ddi_get when needed */ 2030 } else { 2031 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed")); 2032 ret = -1; 2033 } 2034 2035 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2036 ret = -1; 2037 } 2038 return_mfi_pkt(instance, cmd); 2039 2040 return (ret); 2041 } 2042 2043 /* 2044 * abort_aen_cmd 2045 */ 2046 static int 2047 abort_aen_cmd(struct mrsas_instance *instance, 2048 struct mrsas_cmd *cmd_to_abort) 2049 { 2050 int ret = 0; 2051 2052 struct mrsas_cmd *cmd; 2053 struct mrsas_abort_frame *abort_fr; 2054 2055 cmd = get_mfi_pkt(instance); 2056 2057 if (!cmd) { 2058 con_log(CL_ANN, (CE_WARN, 2059 "Failed to get a cmd for ctrl info")); 2060 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding, 2061 uint16_t, instance->max_fw_cmds); 2062 return (DDI_FAILURE); 2063 } 2064 /* Clear the frame buffer and assign back the context id */ 2065 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2066 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2067 cmd->index); 2068 2069 abort_fr = &cmd->frame->abort; 2070 2071 /* prepare and issue the abort frame */ 2072 ddi_put8(cmd->frame_dma_obj.acc_handle, 2073 &abort_fr->cmd, MFI_CMD_OP_ABORT); 2074 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status, 2075 MFI_CMD_STATUS_SYNC_MODE); 2076 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0); 2077 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context, 2078 cmd_to_abort->index); 2079 ddi_put32(cmd->frame_dma_obj.acc_handle, 2080 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr); 2081 ddi_put32(cmd->frame_dma_obj.acc_handle, 2082 &abort_fr->abort_mfi_phys_addr_hi, 0); 2083 2084 instance->aen_cmd->abort_aen = 1; 2085 2086 cmd->sync_cmd = MRSAS_TRUE; 2087 cmd->frame_count = 1; 2088 2089 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2090 con_log(CL_ANN, (CE_WARN, 2091 "abort_aen_cmd: issue_cmd_in_sync_mode failed")); 2092 ret = -1; 2093 } else { 2094 ret = 0; 2095 } 2096 2097 instance->aen_cmd->abort_aen = 1; 2098 instance->aen_cmd = 0; 2099 2100 (void) mrsas_common_check(instance, cmd); 2101 2102 return_mfi_pkt(instance, cmd); 2103 2104 return (ret); 2105 } 2106 2107 /* 2108 * init_mfi 2109 */ 2110 static int 2111 init_mfi(struct mrsas_instance *instance) 2112 { 2113 struct mrsas_cmd *cmd; 2114 struct mrsas_ctrl_info ctrl_info; 2115 struct mrsas_init_frame *init_frame; 2116 struct mrsas_init_queue_info *initq_info; 2117 2118 /* we expect the FW state to be READY */ 2119 if (mfi_state_transition_to_ready(instance)) { 2120 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready")); 2121 goto fail_ready_state; 2122 } 2123 2124 /* get various operational parameters from status register */ 2125 instance->max_num_sge = 2126 (instance->func_ptr->read_fw_status_reg(instance) & 2127 0xFF0000) >> 0x10; 2128 /* 2129 * Reduce the max supported cmds by 1. This is to ensure that the 2130 * reply_q_sz (1 more than the max cmd that driver may send) 2131 * does not exceed max cmds that the FW can support 2132 */ 2133 instance->max_fw_cmds = 2134 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF; 2135 instance->max_fw_cmds = instance->max_fw_cmds - 1; 2136 2137 instance->max_num_sge = 2138 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ? 2139 MRSAS_MAX_SGE_CNT : instance->max_num_sge; 2140 2141 /* create a pool of commands */ 2142 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) 2143 goto fail_alloc_fw_space; 2144 2145 /* 2146 * Prepare a init frame. Note the init frame points to queue info 2147 * structure. Each frame has SGL allocated after first 64 bytes. For 2148 * this frame - since we don't need any SGL - we use SGL's space as 2149 * queue info structure 2150 */ 2151 cmd = get_mfi_pkt(instance); 2152 /* Clear the frame buffer and assign back the context id */ 2153 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2154 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2155 cmd->index); 2156 2157 init_frame = (struct mrsas_init_frame *)cmd->frame; 2158 initq_info = (struct mrsas_init_queue_info *) 2159 ((unsigned long)init_frame + 64); 2160 2161 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE); 2162 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info)); 2163 2164 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0); 2165 2166 ddi_put32(cmd->frame_dma_obj.acc_handle, 2167 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1); 2168 2169 ddi_put32(cmd->frame_dma_obj.acc_handle, 2170 &initq_info->producer_index_phys_addr_hi, 0); 2171 ddi_put32(cmd->frame_dma_obj.acc_handle, 2172 &initq_info->producer_index_phys_addr_lo, 2173 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address); 2174 2175 ddi_put32(cmd->frame_dma_obj.acc_handle, 2176 &initq_info->consumer_index_phys_addr_hi, 0); 2177 ddi_put32(cmd->frame_dma_obj.acc_handle, 2178 &initq_info->consumer_index_phys_addr_lo, 2179 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4); 2180 2181 ddi_put32(cmd->frame_dma_obj.acc_handle, 2182 &initq_info->reply_queue_start_phys_addr_hi, 0); 2183 ddi_put32(cmd->frame_dma_obj.acc_handle, 2184 &initq_info->reply_queue_start_phys_addr_lo, 2185 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8); 2186 2187 ddi_put8(cmd->frame_dma_obj.acc_handle, 2188 &init_frame->cmd, MFI_CMD_OP_INIT); 2189 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status, 2190 MFI_CMD_STATUS_POLL_MODE); 2191 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0); 2192 ddi_put32(cmd->frame_dma_obj.acc_handle, 2193 &init_frame->queue_info_new_phys_addr_lo, 2194 cmd->frame_phys_addr + 64); 2195 ddi_put32(cmd->frame_dma_obj.acc_handle, 2196 &init_frame->queue_info_new_phys_addr_hi, 0); 2197 2198 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len, 2199 sizeof (struct mrsas_init_queue_info)); 2200 2201 cmd->frame_count = 1; 2202 2203 /* issue the init frame in polled mode */ 2204 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2205 con_log(CL_ANN, (CE_WARN, "failed to init firmware")); 2206 return_mfi_pkt(instance, cmd); 2207 goto fail_fw_init; 2208 } 2209 2210 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2211 return_mfi_pkt(instance, cmd); 2212 goto fail_fw_init; 2213 } 2214 2215 return_mfi_pkt(instance, cmd); 2216 2217 /* gather misc FW related information */ 2218 if (!get_ctrl_info(instance, &ctrl_info)) { 2219 instance->max_sectors_per_req = ctrl_info.max_request_size; 2220 con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d", 2221 ctrl_info.product_name, ctrl_info.ld_present_count)); 2222 } else { 2223 instance->max_sectors_per_req = instance->max_num_sge * 2224 PAGESIZE / 512; 2225 } 2226 2227 return (DDI_SUCCESS); 2228 2229 fail_fw_init: 2230 fail_alloc_fw_space: 2231 2232 free_space_for_mfi(instance); 2233 2234 fail_ready_state: 2235 ddi_regs_map_free(&instance->regmap_handle); 2236 2237 fail_mfi_reg_setup: 2238 return (DDI_FAILURE); 2239 } 2240 2241 /* 2242 * mfi_state_transition_to_ready : Move the FW to READY state 2243 * 2244 * @reg_set : MFI register set 2245 */ 2246 static int 2247 mfi_state_transition_to_ready(struct mrsas_instance *instance) 2248 { 2249 int i; 2250 uint8_t max_wait; 2251 uint32_t fw_ctrl; 2252 uint32_t fw_state; 2253 uint32_t cur_state; 2254 2255 fw_state = 2256 instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK; 2257 con_log(CL_ANN1, (CE_NOTE, 2258 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state)); 2259 2260 while (fw_state != MFI_STATE_READY) { 2261 con_log(CL_ANN, (CE_NOTE, 2262 "mfi_state_transition_to_ready:FW state%x", fw_state)); 2263 2264 switch (fw_state) { 2265 case MFI_STATE_FAULT: 2266 con_log(CL_ANN, (CE_NOTE, 2267 "mr_sas: FW in FAULT state!!")); 2268 2269 return (ENODEV); 2270 case MFI_STATE_WAIT_HANDSHAKE: 2271 /* set the CLR bit in IMR0 */ 2272 con_log(CL_ANN, (CE_NOTE, 2273 "mr_sas: FW waiting for HANDSHAKE")); 2274 /* 2275 * PCI_Hot Plug: MFI F/W requires 2276 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2277 * to be set 2278 */ 2279 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */ 2280 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE | 2281 MFI_INIT_HOTPLUG, instance); 2282 2283 max_wait = 2; 2284 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2285 break; 2286 case MFI_STATE_BOOT_MESSAGE_PENDING: 2287 /* set the CLR bit in IMR0 */ 2288 con_log(CL_ANN, (CE_NOTE, 2289 "mr_sas: FW state boot message pending")); 2290 /* 2291 * PCI_Hot Plug: MFI F/W requires 2292 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2293 * to be set 2294 */ 2295 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance); 2296 2297 max_wait = 10; 2298 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2299 break; 2300 case MFI_STATE_OPERATIONAL: 2301 /* bring it to READY state; assuming max wait 2 secs */ 2302 instance->func_ptr->disable_intr(instance); 2303 con_log(CL_ANN1, (CE_NOTE, 2304 "mr_sas: FW in OPERATIONAL state")); 2305 /* 2306 * PCI_Hot Plug: MFI F/W requires 2307 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT) 2308 * to be set 2309 */ 2310 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */ 2311 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance); 2312 2313 max_wait = 10; 2314 cur_state = MFI_STATE_OPERATIONAL; 2315 break; 2316 case MFI_STATE_UNDEFINED: 2317 /* this state should not last for more than 2 seconds */ 2318 con_log(CL_ANN, (CE_NOTE, "FW state undefined")); 2319 2320 max_wait = 2; 2321 cur_state = MFI_STATE_UNDEFINED; 2322 break; 2323 case MFI_STATE_BB_INIT: 2324 max_wait = 2; 2325 cur_state = MFI_STATE_BB_INIT; 2326 break; 2327 case MFI_STATE_FW_INIT: 2328 max_wait = 2; 2329 cur_state = MFI_STATE_FW_INIT; 2330 break; 2331 case MFI_STATE_DEVICE_SCAN: 2332 max_wait = 10; 2333 cur_state = MFI_STATE_DEVICE_SCAN; 2334 break; 2335 default: 2336 con_log(CL_ANN, (CE_NOTE, 2337 "mr_sas: Unknown state 0x%x", fw_state)); 2338 return (ENODEV); 2339 } 2340 2341 /* the cur_state should not last for more than max_wait secs */ 2342 for (i = 0; i < (max_wait * MILLISEC); i++) { 2343 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */ 2344 fw_state = 2345 instance->func_ptr->read_fw_status_reg(instance) & 2346 MFI_STATE_MASK; 2347 2348 if (fw_state == cur_state) { 2349 delay(1 * drv_usectohz(MILLISEC)); 2350 } else { 2351 break; 2352 } 2353 } 2354 2355 /* return error if fw_state hasn't changed after max_wait */ 2356 if (fw_state == cur_state) { 2357 con_log(CL_ANN, (CE_NOTE, 2358 "FW state hasn't changed in %d secs", max_wait)); 2359 return (ENODEV); 2360 } 2361 }; 2362 2363 fw_ctrl = RD_IB_DOORBELL(instance); 2364 2365 con_log(CL_ANN1, (CE_NOTE, 2366 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl)); 2367 2368 /* 2369 * Write 0xF to the doorbell register to do the following. 2370 * - Abort all outstanding commands (bit 0). 2371 * - Transition from OPERATIONAL to READY state (bit 1). 2372 * - Discard (possible) low MFA posted in 64-bit mode (bit-2). 2373 * - Set to release FW to continue running (i.e. BIOS handshake 2374 * (bit 3). 2375 */ 2376 WR_IB_DOORBELL(0xF, instance); 2377 2378 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 2379 return (ENODEV); 2380 } 2381 return (DDI_SUCCESS); 2382 } 2383 2384 /* 2385 * get_seq_num 2386 */ 2387 static int 2388 get_seq_num(struct mrsas_instance *instance, 2389 struct mrsas_evt_log_info *eli) 2390 { 2391 int ret = DDI_SUCCESS; 2392 2393 dma_obj_t dcmd_dma_obj; 2394 struct mrsas_cmd *cmd; 2395 struct mrsas_dcmd_frame *dcmd; 2396 struct mrsas_evt_log_info *eli_tmp; 2397 cmd = get_mfi_pkt(instance); 2398 2399 if (!cmd) { 2400 cmn_err(CE_WARN, "mr_sas: failed to get a cmd"); 2401 DTRACE_PROBE2(seq_num_mfi_err, uint16_t, 2402 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 2403 return (ENOMEM); 2404 } 2405 /* Clear the frame buffer and assign back the context id */ 2406 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2407 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2408 cmd->index); 2409 2410 dcmd = &cmd->frame->dcmd; 2411 2412 /* allocate the data transfer buffer */ 2413 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info); 2414 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; 2415 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 2416 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 2417 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 2418 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 2419 2420 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, 2421 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 2422 con_log(CL_ANN, (CE_WARN, 2423 "get_seq_num: could not allocate data transfer buffer.")); 2424 return (DDI_FAILURE); 2425 } 2426 2427 (void) memset(dcmd_dma_obj.buffer, 0, 2428 sizeof (struct mrsas_evt_log_info)); 2429 2430 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2431 2432 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 2433 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0); 2434 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 2435 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 2436 MFI_FRAME_DIR_READ); 2437 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 2438 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 2439 sizeof (struct mrsas_evt_log_info)); 2440 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2441 MR_DCMD_CTRL_EVENT_GET_INFO); 2442 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 2443 sizeof (struct mrsas_evt_log_info)); 2444 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 2445 dcmd_dma_obj.dma_cookie[0].dmac_address); 2446 2447 cmd->sync_cmd = MRSAS_TRUE; 2448 cmd->frame_count = 1; 2449 2450 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2451 cmn_err(CE_WARN, "get_seq_num: " 2452 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO"); 2453 ret = DDI_FAILURE; 2454 } else { 2455 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer; 2456 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle, 2457 &eli_tmp->newest_seq_num); 2458 ret = DDI_SUCCESS; 2459 } 2460 2461 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 2462 ret = DDI_FAILURE; 2463 2464 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2465 ret = DDI_FAILURE; 2466 } 2467 2468 return_mfi_pkt(instance, cmd); 2469 2470 return (ret); 2471 } 2472 2473 /* 2474 * start_mfi_aen 2475 */ 2476 static int 2477 start_mfi_aen(struct mrsas_instance *instance) 2478 { 2479 int ret = 0; 2480 2481 struct mrsas_evt_log_info eli; 2482 union mrsas_evt_class_locale class_locale; 2483 2484 /* get the latest sequence number from FW */ 2485 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info)); 2486 2487 if (get_seq_num(instance, &eli)) { 2488 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num"); 2489 return (-1); 2490 } 2491 2492 /* register AEN with FW for latest sequence number plus 1 */ 2493 class_locale.members.reserved = 0; 2494 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL); 2495 class_locale.members.class = MR_EVT_CLASS_INFO; 2496 class_locale.word = LE_32(class_locale.word); 2497 ret = register_mfi_aen(instance, eli.newest_seq_num + 1, 2498 class_locale.word); 2499 2500 if (ret) { 2501 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed"); 2502 return (-1); 2503 } 2504 2505 return (ret); 2506 } 2507 2508 /* 2509 * flush_cache 2510 */ 2511 static void 2512 flush_cache(struct mrsas_instance *instance) 2513 { 2514 struct mrsas_cmd *cmd = NULL; 2515 struct mrsas_dcmd_frame *dcmd; 2516 uint32_t max_cmd = instance->max_fw_cmds; 2517 2518 cmd = instance->cmd_list[max_cmd]; 2519 2520 if (cmd == NULL) 2521 return; 2522 2523 dcmd = &cmd->frame->dcmd; 2524 2525 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2526 2527 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 2528 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0); 2529 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0); 2530 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 2531 MFI_FRAME_DIR_NONE); 2532 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 2533 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0); 2534 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2535 MR_DCMD_CTRL_CACHE_FLUSH); 2536 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0], 2537 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE); 2538 2539 cmd->frame_count = 1; 2540 2541 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2542 con_log(CL_ANN1, (CE_WARN, 2543 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH")); 2544 } 2545 con_log(CL_DLEVEL1, (CE_NOTE, "done")); 2546 } 2547 2548 /* 2549 * service_mfi_aen- Completes an AEN command 2550 * @instance: Adapter soft state 2551 * @cmd: Command to be completed 2552 * 2553 */ 2554 static void 2555 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 2556 { 2557 uint32_t seq_num; 2558 struct mrsas_evt_detail *evt_detail = 2559 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer; 2560 int rval = 0; 2561 int tgt = 0; 2562 ddi_acc_handle_t acc_handle; 2563 2564 acc_handle = cmd->frame_dma_obj.acc_handle; 2565 2566 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status); 2567 2568 if (cmd->cmd_status == ENODATA) { 2569 cmd->cmd_status = 0; 2570 } 2571 2572 /* 2573 * log the MFI AEN event to the sysevent queue so that 2574 * application will get noticed 2575 */ 2576 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS", 2577 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) { 2578 int instance_no = ddi_get_instance(instance->dip); 2579 con_log(CL_ANN, (CE_WARN, 2580 "mr_sas%d: Failed to log AEN event", instance_no)); 2581 } 2582 /* 2583 * Check for any ld devices that has changed state. i.e. online 2584 * or offline. 2585 */ 2586 con_log(CL_ANN1, (CE_NOTE, 2587 "AEN: code = %x class = %x locale = %x args = %x", 2588 ddi_get32(acc_handle, &evt_detail->code), 2589 evt_detail->cl.members.class, 2590 ddi_get16(acc_handle, &evt_detail->cl.members.locale), 2591 ddi_get8(acc_handle, &evt_detail->arg_type))); 2592 2593 switch (ddi_get32(acc_handle, &evt_detail->code)) { 2594 case MR_EVT_CFG_CLEARED: { 2595 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { 2596 if (instance->mr_ld_list[tgt].dip != NULL) { 2597 rval = mrsas_service_evt(instance, tgt, 0, 2598 MRSAS_EVT_UNCONFIG_TGT, NULL); 2599 con_log(CL_ANN1, (CE_WARN, 2600 "mr_sas: CFG CLEARED AEN rval = %d " 2601 "tgt id = %d", rval, tgt)); 2602 } 2603 } 2604 break; 2605 } 2606 2607 case MR_EVT_LD_DELETED: { 2608 rval = mrsas_service_evt(instance, 2609 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, 2610 MRSAS_EVT_UNCONFIG_TGT, NULL); 2611 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d " 2612 "tgt id = %d index = %d", rval, 2613 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 2614 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); 2615 break; 2616 } /* End of MR_EVT_LD_DELETED */ 2617 2618 case MR_EVT_LD_CREATED: { 2619 rval = mrsas_service_evt(instance, 2620 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, 2621 MRSAS_EVT_CONFIG_TGT, NULL); 2622 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d " 2623 "tgt id = %d index = %d", rval, 2624 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 2625 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); 2626 break; 2627 } /* End of MR_EVT_LD_CREATED */ 2628 } /* End of Main Switch */ 2629 2630 /* get copy of seq_num and class/locale for re-registration */ 2631 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num); 2632 seq_num++; 2633 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 2634 sizeof (struct mrsas_evt_detail)); 2635 2636 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0); 2637 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num); 2638 2639 instance->aen_seq_num = seq_num; 2640 2641 cmd->frame_count = 1; 2642 2643 /* Issue the aen registration frame */ 2644 instance->func_ptr->issue_cmd(cmd, instance); 2645 } 2646 2647 /* 2648 * complete_cmd_in_sync_mode - Completes an internal command 2649 * @instance: Adapter soft state 2650 * @cmd: Command to be completed 2651 * 2652 * The issue_cmd_in_sync_mode() function waits for a command to complete 2653 * after it issues a command. This function wakes up that waiting routine by 2654 * calling wake_up() on the wait queue. 2655 */ 2656 static void 2657 complete_cmd_in_sync_mode(struct mrsas_instance *instance, 2658 struct mrsas_cmd *cmd) 2659 { 2660 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle, 2661 &cmd->frame->io.cmd_status); 2662 2663 cmd->sync_cmd = MRSAS_FALSE; 2664 2665 if (cmd->cmd_status == ENODATA) { 2666 cmd->cmd_status = 0; 2667 } 2668 2669 cv_broadcast(&instance->int_cmd_cv); 2670 } 2671 2672 /* 2673 * mrsas_softintr - The Software ISR 2674 * @param arg : HBA soft state 2675 * 2676 * called from high-level interrupt if hi-level interrupt are not there, 2677 * otherwise triggered as a soft interrupt 2678 */ 2679 static uint_t 2680 mrsas_softintr(struct mrsas_instance *instance) 2681 { 2682 struct scsi_pkt *pkt; 2683 struct scsa_cmd *acmd; 2684 struct mrsas_cmd *cmd; 2685 struct mlist_head *pos, *next; 2686 mlist_t process_list; 2687 struct mrsas_header *hdr; 2688 struct scsi_arq_status *arqstat; 2689 2690 con_log(CL_ANN1, (CE_CONT, "mrsas_softintr called")); 2691 2692 ASSERT(instance); 2693 mutex_enter(&instance->completed_pool_mtx); 2694 2695 if (mlist_empty(&instance->completed_pool_list)) { 2696 mutex_exit(&instance->completed_pool_mtx); 2697 return (DDI_INTR_CLAIMED); 2698 } 2699 2700 instance->softint_running = 1; 2701 2702 INIT_LIST_HEAD(&process_list); 2703 mlist_splice(&instance->completed_pool_list, &process_list); 2704 INIT_LIST_HEAD(&instance->completed_pool_list); 2705 2706 mutex_exit(&instance->completed_pool_mtx); 2707 2708 /* perform all callbacks first, before releasing the SCBs */ 2709 mlist_for_each_safe(pos, next, &process_list) { 2710 cmd = mlist_entry(pos, struct mrsas_cmd, list); 2711 2712 /* syncronize the Cmd frame for the controller */ 2713 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 2714 0, 0, DDI_DMA_SYNC_FORCPU); 2715 2716 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 2717 DDI_SUCCESS) { 2718 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 2719 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 2720 return (DDI_INTR_CLAIMED); 2721 } 2722 2723 hdr = &cmd->frame->hdr; 2724 2725 /* remove the internal command from the process list */ 2726 mlist_del_init(&cmd->list); 2727 2728 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) { 2729 case MFI_CMD_OP_PD_SCSI: 2730 case MFI_CMD_OP_LD_SCSI: 2731 case MFI_CMD_OP_LD_READ: 2732 case MFI_CMD_OP_LD_WRITE: 2733 /* 2734 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI 2735 * could have been issued either through an 2736 * IO path or an IOCTL path. If it was via IOCTL, 2737 * we will send it to internal completion. 2738 */ 2739 if (cmd->sync_cmd == MRSAS_TRUE) { 2740 complete_cmd_in_sync_mode(instance, cmd); 2741 break; 2742 } 2743 2744 /* regular commands */ 2745 acmd = cmd->cmd; 2746 pkt = CMD2PKT(acmd); 2747 2748 if (acmd->cmd_flags & CFLAG_DMAVALID) { 2749 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 2750 (void) ddi_dma_sync(acmd->cmd_dmahandle, 2751 acmd->cmd_dma_offset, 2752 acmd->cmd_dma_len, 2753 DDI_DMA_SYNC_FORCPU); 2754 } 2755 } 2756 2757 pkt->pkt_reason = CMD_CMPLT; 2758 pkt->pkt_statistics = 0; 2759 pkt->pkt_state = STATE_GOT_BUS 2760 | STATE_GOT_TARGET | STATE_SENT_CMD 2761 | STATE_XFERRED_DATA | STATE_GOT_STATUS; 2762 2763 con_log(CL_ANN1, (CE_CONT, 2764 "CDB[0] = %x completed for %s: size %lx context %x", 2765 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"), 2766 acmd->cmd_dmacount, hdr->context)); 2767 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0], 2768 uint_t, acmd->cmd_cdblen, ulong_t, 2769 acmd->cmd_dmacount); 2770 2771 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) { 2772 struct scsi_inquiry *inq; 2773 2774 if (acmd->cmd_dmacount != 0) { 2775 bp_mapin(acmd->cmd_buf); 2776 inq = (struct scsi_inquiry *) 2777 acmd->cmd_buf->b_un.b_addr; 2778 2779 /* don't expose physical drives to OS */ 2780 if (acmd->islogical && 2781 (hdr->cmd_status == MFI_STAT_OK)) { 2782 display_scsi_inquiry( 2783 (caddr_t)inq); 2784 } else if ((hdr->cmd_status == 2785 MFI_STAT_OK) && inq->inq_dtype == 2786 DTYPE_DIRECT) { 2787 2788 display_scsi_inquiry( 2789 (caddr_t)inq); 2790 2791 /* for physical disk */ 2792 hdr->cmd_status = 2793 MFI_STAT_DEVICE_NOT_FOUND; 2794 } 2795 } 2796 } 2797 2798 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd, 2799 uint8_t, hdr->cmd_status); 2800 2801 switch (hdr->cmd_status) { 2802 case MFI_STAT_OK: 2803 pkt->pkt_scbp[0] = STATUS_GOOD; 2804 break; 2805 case MFI_STAT_LD_CC_IN_PROGRESS: 2806 case MFI_STAT_LD_RECON_IN_PROGRESS: 2807 pkt->pkt_scbp[0] = STATUS_GOOD; 2808 break; 2809 case MFI_STAT_LD_INIT_IN_PROGRESS: 2810 con_log(CL_ANN, 2811 (CE_WARN, "Initialization in Progress")); 2812 pkt->pkt_reason = CMD_TRAN_ERR; 2813 2814 break; 2815 case MFI_STAT_SCSI_DONE_WITH_ERROR: 2816 con_log(CL_ANN1, (CE_CONT, "scsi_done error")); 2817 2818 pkt->pkt_reason = CMD_CMPLT; 2819 ((struct scsi_status *) 2820 pkt->pkt_scbp)->sts_chk = 1; 2821 2822 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) { 2823 2824 con_log(CL_ANN, 2825 (CE_WARN, "TEST_UNIT_READY fail")); 2826 2827 } else { 2828 pkt->pkt_state |= STATE_ARQ_DONE; 2829 arqstat = (void *)(pkt->pkt_scbp); 2830 arqstat->sts_rqpkt_reason = CMD_CMPLT; 2831 arqstat->sts_rqpkt_resid = 0; 2832 arqstat->sts_rqpkt_state |= 2833 STATE_GOT_BUS | STATE_GOT_TARGET 2834 | STATE_SENT_CMD 2835 | STATE_XFERRED_DATA; 2836 *(uint8_t *)&arqstat->sts_rqpkt_status = 2837 STATUS_GOOD; 2838 ddi_rep_get8( 2839 cmd->frame_dma_obj.acc_handle, 2840 (uint8_t *) 2841 &(arqstat->sts_sensedata), 2842 cmd->sense, 2843 acmd->cmd_scblen - 2844 offsetof(struct scsi_arq_status, 2845 sts_sensedata), DDI_DEV_AUTOINCR); 2846 } 2847 break; 2848 case MFI_STAT_LD_OFFLINE: 2849 case MFI_STAT_DEVICE_NOT_FOUND: 2850 con_log(CL_ANN1, (CE_CONT, 2851 "device not found error")); 2852 pkt->pkt_reason = CMD_DEV_GONE; 2853 pkt->pkt_statistics = STAT_DISCON; 2854 break; 2855 case MFI_STAT_LD_LBA_OUT_OF_RANGE: 2856 pkt->pkt_state |= STATE_ARQ_DONE; 2857 pkt->pkt_reason = CMD_CMPLT; 2858 ((struct scsi_status *) 2859 pkt->pkt_scbp)->sts_chk = 1; 2860 2861 arqstat = (void *)(pkt->pkt_scbp); 2862 arqstat->sts_rqpkt_reason = CMD_CMPLT; 2863 arqstat->sts_rqpkt_resid = 0; 2864 arqstat->sts_rqpkt_state |= STATE_GOT_BUS 2865 | STATE_GOT_TARGET | STATE_SENT_CMD 2866 | STATE_XFERRED_DATA; 2867 *(uint8_t *)&arqstat->sts_rqpkt_status = 2868 STATUS_GOOD; 2869 2870 arqstat->sts_sensedata.es_valid = 1; 2871 arqstat->sts_sensedata.es_key = 2872 KEY_ILLEGAL_REQUEST; 2873 arqstat->sts_sensedata.es_class = 2874 CLASS_EXTENDED_SENSE; 2875 2876 /* 2877 * LOGICAL BLOCK ADDRESS OUT OF RANGE: 2878 * ASC: 0x21h; ASCQ: 0x00h; 2879 */ 2880 arqstat->sts_sensedata.es_add_code = 0x21; 2881 arqstat->sts_sensedata.es_qual_code = 0x00; 2882 2883 break; 2884 2885 default: 2886 con_log(CL_ANN, (CE_CONT, "Unknown status!")); 2887 pkt->pkt_reason = CMD_TRAN_ERR; 2888 2889 break; 2890 } 2891 2892 atomic_add_16(&instance->fw_outstanding, (-1)); 2893 2894 (void) mrsas_common_check(instance, cmd); 2895 2896 if (acmd->cmd_dmahandle) { 2897 if (mrsas_check_dma_handle( 2898 acmd->cmd_dmahandle) != DDI_SUCCESS) { 2899 ddi_fm_service_impact(instance->dip, 2900 DDI_SERVICE_UNAFFECTED); 2901 pkt->pkt_reason = CMD_TRAN_ERR; 2902 pkt->pkt_statistics = 0; 2903 } 2904 } 2905 2906 return_mfi_pkt(instance, cmd); 2907 2908 /* Call the callback routine */ 2909 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 2910 pkt->pkt_comp) { 2911 (*pkt->pkt_comp)(pkt); 2912 } 2913 2914 break; 2915 case MFI_CMD_OP_SMP: 2916 case MFI_CMD_OP_STP: 2917 complete_cmd_in_sync_mode(instance, cmd); 2918 break; 2919 case MFI_CMD_OP_DCMD: 2920 /* see if got an event notification */ 2921 if (ddi_get32(cmd->frame_dma_obj.acc_handle, 2922 &cmd->frame->dcmd.opcode) == 2923 MR_DCMD_CTRL_EVENT_WAIT) { 2924 if ((instance->aen_cmd == cmd) && 2925 (instance->aen_cmd->abort_aen)) { 2926 con_log(CL_ANN, (CE_WARN, 2927 "mrsas_softintr: " 2928 "aborted_aen returned")); 2929 } else { 2930 atomic_add_16(&instance->fw_outstanding, 2931 (-1)); 2932 service_mfi_aen(instance, cmd); 2933 } 2934 } else { 2935 complete_cmd_in_sync_mode(instance, cmd); 2936 } 2937 2938 break; 2939 case MFI_CMD_OP_ABORT: 2940 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete")); 2941 /* 2942 * MFI_CMD_OP_ABORT successfully completed 2943 * in the synchronous mode 2944 */ 2945 complete_cmd_in_sync_mode(instance, cmd); 2946 break; 2947 default: 2948 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 2949 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 2950 2951 if (cmd->pkt != NULL) { 2952 pkt = cmd->pkt; 2953 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 2954 pkt->pkt_comp) { 2955 (*pkt->pkt_comp)(pkt); 2956 } 2957 } 2958 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !")); 2959 break; 2960 } 2961 } 2962 2963 instance->softint_running = 0; 2964 2965 return (DDI_INTR_CLAIMED); 2966 } 2967 2968 /* 2969 * mrsas_alloc_dma_obj 2970 * 2971 * Allocate the memory and other resources for an dma object. 2972 */ 2973 static int 2974 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj, 2975 uchar_t endian_flags) 2976 { 2977 int i; 2978 size_t alen = 0; 2979 uint_t cookie_cnt; 2980 struct ddi_device_acc_attr tmp_endian_attr; 2981 2982 tmp_endian_attr = endian_attr; 2983 tmp_endian_attr.devacc_attr_endian_flags = endian_flags; 2984 2985 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr, 2986 DDI_DMA_SLEEP, NULL, &obj->dma_handle); 2987 if (i != DDI_SUCCESS) { 2988 2989 switch (i) { 2990 case DDI_DMA_BADATTR : 2991 con_log(CL_ANN, (CE_WARN, 2992 "Failed ddi_dma_alloc_handle- Bad attribute")); 2993 break; 2994 case DDI_DMA_NORESOURCES : 2995 con_log(CL_ANN, (CE_WARN, 2996 "Failed ddi_dma_alloc_handle- No Resources")); 2997 break; 2998 default : 2999 con_log(CL_ANN, (CE_WARN, 3000 "Failed ddi_dma_alloc_handle: " 3001 "unknown status %d", i)); 3002 break; 3003 } 3004 3005 return (-1); 3006 } 3007 3008 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr, 3009 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 3010 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) || 3011 alen < obj->size) { 3012 3013 ddi_dma_free_handle(&obj->dma_handle); 3014 3015 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc")); 3016 3017 return (-1); 3018 } 3019 3020 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer, 3021 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 3022 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) { 3023 3024 ddi_dma_mem_free(&obj->acc_handle); 3025 ddi_dma_free_handle(&obj->dma_handle); 3026 3027 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle")); 3028 3029 return (-1); 3030 } 3031 3032 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) { 3033 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3034 return (-1); 3035 } 3036 3037 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) { 3038 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3039 return (-1); 3040 } 3041 3042 return (cookie_cnt); 3043 } 3044 3045 /* 3046 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t) 3047 * 3048 * De-allocate the memory and other resources for an dma object, which must 3049 * have been alloated by a previous call to mrsas_alloc_dma_obj() 3050 */ 3051 static int 3052 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj) 3053 { 3054 3055 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) { 3056 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3057 return (DDI_FAILURE); 3058 } 3059 3060 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) { 3061 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3062 return (DDI_FAILURE); 3063 } 3064 3065 (void) ddi_dma_unbind_handle(obj.dma_handle); 3066 ddi_dma_mem_free(&obj.acc_handle); 3067 ddi_dma_free_handle(&obj.dma_handle); 3068 3069 return (DDI_SUCCESS); 3070 } 3071 3072 /* 3073 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *, 3074 * int, int (*)()) 3075 * 3076 * Allocate dma resources for a new scsi command 3077 */ 3078 static int 3079 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt, 3080 struct buf *bp, int flags, int (*callback)()) 3081 { 3082 int dma_flags; 3083 int (*cb)(caddr_t); 3084 int i; 3085 3086 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr; 3087 struct scsa_cmd *acmd = PKT2CMD(pkt); 3088 3089 acmd->cmd_buf = bp; 3090 3091 if (bp->b_flags & B_READ) { 3092 acmd->cmd_flags &= ~CFLAG_DMASEND; 3093 dma_flags = DDI_DMA_READ; 3094 } else { 3095 acmd->cmd_flags |= CFLAG_DMASEND; 3096 dma_flags = DDI_DMA_WRITE; 3097 } 3098 3099 if (flags & PKT_CONSISTENT) { 3100 acmd->cmd_flags |= CFLAG_CONSISTENT; 3101 dma_flags |= DDI_DMA_CONSISTENT; 3102 } 3103 3104 if (flags & PKT_DMA_PARTIAL) { 3105 dma_flags |= DDI_DMA_PARTIAL; 3106 } 3107 3108 dma_flags |= DDI_DMA_REDZONE; 3109 3110 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP; 3111 3112 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge; 3113 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 3114 3115 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr, 3116 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) { 3117 switch (i) { 3118 case DDI_DMA_BADATTR: 3119 bioerror(bp, EFAULT); 3120 return (DDI_FAILURE); 3121 3122 case DDI_DMA_NORESOURCES: 3123 bioerror(bp, 0); 3124 return (DDI_FAILURE); 3125 3126 default: 3127 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: " 3128 "impossible result (0x%x)", i)); 3129 bioerror(bp, EFAULT); 3130 return (DDI_FAILURE); 3131 } 3132 } 3133 3134 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags, 3135 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies); 3136 3137 switch (i) { 3138 case DDI_DMA_PARTIAL_MAP: 3139 if ((dma_flags & DDI_DMA_PARTIAL) == 0) { 3140 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3141 "DDI_DMA_PARTIAL_MAP impossible")); 3142 goto no_dma_cookies; 3143 } 3144 3145 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) == 3146 DDI_FAILURE) { 3147 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed")); 3148 goto no_dma_cookies; 3149 } 3150 3151 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3152 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3153 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3154 DDI_FAILURE) { 3155 3156 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed")); 3157 goto no_dma_cookies; 3158 } 3159 3160 goto get_dma_cookies; 3161 case DDI_DMA_MAPPED: 3162 acmd->cmd_nwin = 1; 3163 acmd->cmd_dma_len = 0; 3164 acmd->cmd_dma_offset = 0; 3165 3166 get_dma_cookies: 3167 i = 0; 3168 acmd->cmd_dmacount = 0; 3169 for (;;) { 3170 acmd->cmd_dmacount += 3171 acmd->cmd_dmacookies[i++].dmac_size; 3172 3173 if (i == instance->max_num_sge || 3174 i == acmd->cmd_ncookies) 3175 break; 3176 3177 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3178 &acmd->cmd_dmacookies[i]); 3179 } 3180 3181 acmd->cmd_cookie = i; 3182 acmd->cmd_cookiecnt = i; 3183 3184 acmd->cmd_flags |= CFLAG_DMAVALID; 3185 3186 if (bp->b_bcount >= acmd->cmd_dmacount) { 3187 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3188 } else { 3189 pkt->pkt_resid = 0; 3190 } 3191 3192 return (DDI_SUCCESS); 3193 case DDI_DMA_NORESOURCES: 3194 bioerror(bp, 0); 3195 break; 3196 case DDI_DMA_NOMAPPING: 3197 bioerror(bp, EFAULT); 3198 break; 3199 case DDI_DMA_TOOBIG: 3200 bioerror(bp, EINVAL); 3201 break; 3202 case DDI_DMA_INUSE: 3203 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:" 3204 " DDI_DMA_INUSE impossible")); 3205 break; 3206 default: 3207 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3208 "impossible result (0x%x)", i)); 3209 break; 3210 } 3211 3212 no_dma_cookies: 3213 ddi_dma_free_handle(&acmd->cmd_dmahandle); 3214 acmd->cmd_dmahandle = NULL; 3215 acmd->cmd_flags &= ~CFLAG_DMAVALID; 3216 return (DDI_FAILURE); 3217 } 3218 3219 /* 3220 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *) 3221 * 3222 * move dma resources to next dma window 3223 * 3224 */ 3225 static int 3226 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt, 3227 struct buf *bp) 3228 { 3229 int i = 0; 3230 3231 struct scsa_cmd *acmd = PKT2CMD(pkt); 3232 3233 /* 3234 * If there are no more cookies remaining in this window, 3235 * must move to the next window first. 3236 */ 3237 if (acmd->cmd_cookie == acmd->cmd_ncookies) { 3238 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) { 3239 return (DDI_SUCCESS); 3240 } 3241 3242 /* at last window, cannot move */ 3243 if (++acmd->cmd_curwin >= acmd->cmd_nwin) { 3244 return (DDI_FAILURE); 3245 } 3246 3247 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3248 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3249 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3250 DDI_FAILURE) { 3251 return (DDI_FAILURE); 3252 } 3253 3254 acmd->cmd_cookie = 0; 3255 } else { 3256 /* still more cookies in this window - get the next one */ 3257 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3258 &acmd->cmd_dmacookies[0]); 3259 } 3260 3261 /* get remaining cookies in this window, up to our maximum */ 3262 for (;;) { 3263 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size; 3264 acmd->cmd_cookie++; 3265 3266 if (i == instance->max_num_sge || 3267 acmd->cmd_cookie == acmd->cmd_ncookies) { 3268 break; 3269 } 3270 3271 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3272 &acmd->cmd_dmacookies[i]); 3273 } 3274 3275 acmd->cmd_cookiecnt = i; 3276 3277 if (bp->b_bcount >= acmd->cmd_dmacount) { 3278 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3279 } else { 3280 pkt->pkt_resid = 0; 3281 } 3282 3283 return (DDI_SUCCESS); 3284 } 3285 3286 /* 3287 * build_cmd 3288 */ 3289 static struct mrsas_cmd * 3290 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap, 3291 struct scsi_pkt *pkt, uchar_t *cmd_done) 3292 { 3293 uint16_t flags = 0; 3294 uint32_t i; 3295 uint32_t context; 3296 uint32_t sge_bytes; 3297 ddi_acc_handle_t acc_handle; 3298 struct mrsas_cmd *cmd; 3299 struct mrsas_sge64 *mfi_sgl; 3300 struct scsa_cmd *acmd = PKT2CMD(pkt); 3301 struct mrsas_pthru_frame *pthru; 3302 struct mrsas_io_frame *ldio; 3303 3304 /* find out if this is logical or physical drive command. */ 3305 acmd->islogical = MRDRV_IS_LOGICAL(ap); 3306 acmd->device_id = MAP_DEVICE_ID(instance, ap); 3307 *cmd_done = 0; 3308 3309 /* get the command packet */ 3310 if (!(cmd = get_mfi_pkt(instance))) { 3311 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t, 3312 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 3313 return (NULL); 3314 } 3315 3316 acc_handle = cmd->frame_dma_obj.acc_handle; 3317 3318 /* Clear the frame buffer and assign back the context id */ 3319 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 3320 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index); 3321 3322 cmd->pkt = pkt; 3323 cmd->cmd = acmd; 3324 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0], 3325 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len); 3326 3327 /* lets get the command directions */ 3328 if (acmd->cmd_flags & CFLAG_DMASEND) { 3329 flags = MFI_FRAME_DIR_WRITE; 3330 3331 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3332 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3333 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3334 DDI_DMA_SYNC_FORDEV); 3335 } 3336 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) { 3337 flags = MFI_FRAME_DIR_READ; 3338 3339 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3340 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3341 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3342 DDI_DMA_SYNC_FORCPU); 3343 } 3344 } else { 3345 flags = MFI_FRAME_DIR_NONE; 3346 } 3347 3348 flags |= MFI_FRAME_SGL64; 3349 3350 switch (pkt->pkt_cdbp[0]) { 3351 3352 /* 3353 * case SCMD_SYNCHRONIZE_CACHE: 3354 * flush_cache(instance); 3355 * return_mfi_pkt(instance, cmd); 3356 * *cmd_done = 1; 3357 * 3358 * return (NULL); 3359 */ 3360 3361 case SCMD_READ: 3362 case SCMD_WRITE: 3363 case SCMD_READ_G1: 3364 case SCMD_WRITE_G1: 3365 if (acmd->islogical) { 3366 ldio = (struct mrsas_io_frame *)cmd->frame; 3367 3368 /* 3369 * preare the Logical IO frame: 3370 * 2nd bit is zero for all read cmds 3371 */ 3372 ddi_put8(acc_handle, &ldio->cmd, 3373 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE 3374 : MFI_CMD_OP_LD_READ); 3375 ddi_put8(acc_handle, &ldio->cmd_status, 0x0); 3376 ddi_put8(acc_handle, &ldio->scsi_status, 0x0); 3377 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id); 3378 ddi_put16(acc_handle, &ldio->timeout, 0); 3379 ddi_put8(acc_handle, &ldio->reserved_0, 0); 3380 ddi_put16(acc_handle, &ldio->pad_0, 0); 3381 ddi_put16(acc_handle, &ldio->flags, flags); 3382 3383 /* Initialize sense Information */ 3384 bzero(cmd->sense, SENSE_LENGTH); 3385 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH); 3386 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0); 3387 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo, 3388 cmd->sense_phys_addr); 3389 ddi_put32(acc_handle, &ldio->start_lba_hi, 0); 3390 ddi_put8(acc_handle, &ldio->access_byte, 3391 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0); 3392 ddi_put8(acc_handle, &ldio->sge_count, 3393 acmd->cmd_cookiecnt); 3394 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl; 3395 3396 context = ddi_get32(acc_handle, &ldio->context); 3397 3398 if (acmd->cmd_cdblen == CDB_GROUP0) { 3399 ddi_put32(acc_handle, &ldio->lba_count, ( 3400 (uint16_t)(pkt->pkt_cdbp[4]))); 3401 3402 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3403 ((uint32_t)(pkt->pkt_cdbp[3])) | 3404 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) | 3405 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) 3406 << 16))); 3407 } else if (acmd->cmd_cdblen == CDB_GROUP1) { 3408 ddi_put32(acc_handle, &ldio->lba_count, ( 3409 ((uint16_t)(pkt->pkt_cdbp[8])) | 3410 ((uint16_t)(pkt->pkt_cdbp[7]) << 8))); 3411 3412 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3413 ((uint32_t)(pkt->pkt_cdbp[5])) | 3414 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3415 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3416 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 3417 } else if (acmd->cmd_cdblen == CDB_GROUP2) { 3418 ddi_put32(acc_handle, &ldio->lba_count, ( 3419 ((uint16_t)(pkt->pkt_cdbp[9])) | 3420 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) | 3421 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) | 3422 ((uint16_t)(pkt->pkt_cdbp[6]) << 24))); 3423 3424 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3425 ((uint32_t)(pkt->pkt_cdbp[5])) | 3426 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3427 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3428 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 3429 } else if (acmd->cmd_cdblen == CDB_GROUP3) { 3430 ddi_put32(acc_handle, &ldio->lba_count, ( 3431 ((uint16_t)(pkt->pkt_cdbp[13])) | 3432 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) | 3433 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) | 3434 ((uint16_t)(pkt->pkt_cdbp[10]) << 24))); 3435 3436 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3437 ((uint32_t)(pkt->pkt_cdbp[9])) | 3438 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | 3439 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | 3440 ((uint32_t)(pkt->pkt_cdbp[6]) << 24))); 3441 3442 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3443 ((uint32_t)(pkt->pkt_cdbp[5])) | 3444 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3445 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3446 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 3447 } 3448 3449 break; 3450 } 3451 /* fall through For all non-rd/wr cmds */ 3452 default: 3453 3454 switch (pkt->pkt_cdbp[0]) { 3455 case SCMD_MODE_SENSE: 3456 case SCMD_MODE_SENSE_G1: { 3457 union scsi_cdb *cdbp; 3458 uint16_t page_code; 3459 3460 cdbp = (void *)pkt->pkt_cdbp; 3461 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0]; 3462 switch (page_code) { 3463 case 0x3: 3464 case 0x4: 3465 (void) mrsas_mode_sense_build(pkt); 3466 return_mfi_pkt(instance, cmd); 3467 *cmd_done = 1; 3468 return (NULL); 3469 } 3470 break; 3471 } 3472 default: 3473 break; 3474 } 3475 3476 pthru = (struct mrsas_pthru_frame *)cmd->frame; 3477 3478 /* prepare the DCDB frame */ 3479 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ? 3480 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI); 3481 ddi_put8(acc_handle, &pthru->cmd_status, 0x0); 3482 ddi_put8(acc_handle, &pthru->scsi_status, 0x0); 3483 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id); 3484 ddi_put8(acc_handle, &pthru->lun, 0); 3485 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen); 3486 ddi_put16(acc_handle, &pthru->timeout, 0); 3487 ddi_put16(acc_handle, &pthru->flags, flags); 3488 ddi_put32(acc_handle, &pthru->data_xfer_len, 3489 acmd->cmd_dmacount); 3490 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt); 3491 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl; 3492 3493 bzero(cmd->sense, SENSE_LENGTH); 3494 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH); 3495 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); 3496 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 3497 cmd->sense_phys_addr); 3498 3499 context = ddi_get32(acc_handle, &pthru->context); 3500 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp, 3501 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR); 3502 3503 break; 3504 } 3505 #ifdef lint 3506 context = context; 3507 #endif 3508 /* prepare the scatter-gather list for the firmware */ 3509 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) { 3510 ddi_put64(acc_handle, &mfi_sgl->phys_addr, 3511 acmd->cmd_dmacookies[i].dmac_laddress); 3512 ddi_put32(acc_handle, &mfi_sgl->length, 3513 acmd->cmd_dmacookies[i].dmac_size); 3514 } 3515 3516 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt; 3517 3518 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) + 3519 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1; 3520 3521 if (cmd->frame_count >= 8) { 3522 cmd->frame_count = 8; 3523 } 3524 3525 return (cmd); 3526 } 3527 3528 /* 3529 * issue_mfi_pthru 3530 */ 3531 static int 3532 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 3533 struct mrsas_cmd *cmd, int mode) 3534 { 3535 void *ubuf; 3536 uint32_t kphys_addr = 0; 3537 uint32_t xferlen = 0; 3538 uint_t model; 3539 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 3540 dma_obj_t pthru_dma_obj; 3541 struct mrsas_pthru_frame *kpthru; 3542 struct mrsas_pthru_frame *pthru; 3543 int i; 3544 pthru = &cmd->frame->pthru; 3545 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0]; 3546 3547 model = ddi_model_convert_from(mode & FMODELS); 3548 if (model == DDI_MODEL_ILP32) { 3549 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3550 3551 xferlen = kpthru->sgl.sge32[0].length; 3552 3553 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3554 } else { 3555 #ifdef _ILP32 3556 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3557 xferlen = kpthru->sgl.sge32[0].length; 3558 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3559 #else 3560 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64")); 3561 xferlen = kpthru->sgl.sge64[0].length; 3562 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr; 3563 #endif 3564 } 3565 3566 if (xferlen) { 3567 /* means IOCTL requires DMA */ 3568 /* allocate the data transfer buffer */ 3569 pthru_dma_obj.size = xferlen; 3570 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr; 3571 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3572 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3573 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1; 3574 pthru_dma_obj.dma_attr.dma_attr_align = 1; 3575 3576 /* allocate kernel buffer for DMA */ 3577 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj, 3578 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3579 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3580 "could not allocate data transfer buffer.")); 3581 return (DDI_FAILURE); 3582 } 3583 (void) memset(pthru_dma_obj.buffer, 0, xferlen); 3584 3585 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3586 if (kpthru->flags & MFI_FRAME_DIR_WRITE) { 3587 for (i = 0; i < xferlen; i++) { 3588 if (ddi_copyin((uint8_t *)ubuf+i, 3589 (uint8_t *)pthru_dma_obj.buffer+i, 3590 1, mode)) { 3591 con_log(CL_ANN, (CE_WARN, 3592 "issue_mfi_pthru : " 3593 "copy from user space failed")); 3594 return (DDI_FAILURE); 3595 } 3596 } 3597 } 3598 3599 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address; 3600 } 3601 3602 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd); 3603 ddi_put8(acc_handle, &pthru->sense_len, 0); 3604 ddi_put8(acc_handle, &pthru->cmd_status, 0); 3605 ddi_put8(acc_handle, &pthru->scsi_status, 0); 3606 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id); 3607 ddi_put8(acc_handle, &pthru->lun, kpthru->lun); 3608 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len); 3609 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count); 3610 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout); 3611 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len); 3612 3613 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); 3614 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */ 3615 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); 3616 3617 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb, 3618 pthru->cdb_len, DDI_DEV_AUTOINCR); 3619 3620 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64); 3621 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen); 3622 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr); 3623 3624 cmd->sync_cmd = MRSAS_TRUE; 3625 cmd->frame_count = 1; 3626 3627 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3628 con_log(CL_ANN, (CE_WARN, 3629 "issue_mfi_pthru: fw_ioctl failed")); 3630 } else { 3631 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) { 3632 for (i = 0; i < xferlen; i++) { 3633 if (ddi_copyout( 3634 (uint8_t *)pthru_dma_obj.buffer+i, 3635 (uint8_t *)ubuf+i, 1, mode)) { 3636 con_log(CL_ANN, (CE_WARN, 3637 "issue_mfi_pthru : " 3638 "copy to user space failed")); 3639 return (DDI_FAILURE); 3640 } 3641 } 3642 } 3643 } 3644 3645 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status); 3646 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status); 3647 3648 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, " 3649 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status)); 3650 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t, 3651 kpthru->cmd_status, uint8_t, kpthru->scsi_status); 3652 3653 if (xferlen) { 3654 /* free kernel buffer */ 3655 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS) 3656 return (DDI_FAILURE); 3657 } 3658 3659 return (DDI_SUCCESS); 3660 } 3661 3662 /* 3663 * issue_mfi_dcmd 3664 */ 3665 static int 3666 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 3667 struct mrsas_cmd *cmd, int mode) 3668 { 3669 void *ubuf; 3670 uint32_t kphys_addr = 0; 3671 uint32_t xferlen = 0; 3672 uint32_t model; 3673 dma_obj_t dcmd_dma_obj; 3674 struct mrsas_dcmd_frame *kdcmd; 3675 struct mrsas_dcmd_frame *dcmd; 3676 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 3677 int i; 3678 dcmd = &cmd->frame->dcmd; 3679 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; 3680 3681 model = ddi_model_convert_from(mode & FMODELS); 3682 if (model == DDI_MODEL_ILP32) { 3683 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3684 3685 xferlen = kdcmd->sgl.sge32[0].length; 3686 3687 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3688 } else { 3689 #ifdef _ILP32 3690 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3691 xferlen = kdcmd->sgl.sge32[0].length; 3692 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3693 #else 3694 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64")); 3695 xferlen = kdcmd->sgl.sge64[0].length; 3696 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 3697 #endif 3698 } 3699 if (xferlen) { 3700 /* means IOCTL requires DMA */ 3701 /* allocate the data transfer buffer */ 3702 dcmd_dma_obj.size = xferlen; 3703 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; 3704 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3705 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3706 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 3707 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 3708 3709 /* allocate kernel buffer for DMA */ 3710 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, 3711 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3712 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 3713 "could not allocate data transfer buffer.")); 3714 return (DDI_FAILURE); 3715 } 3716 (void) memset(dcmd_dma_obj.buffer, 0, xferlen); 3717 3718 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3719 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) { 3720 for (i = 0; i < xferlen; i++) { 3721 if (ddi_copyin((uint8_t *)ubuf + i, 3722 (uint8_t *)dcmd_dma_obj.buffer + i, 3723 1, mode)) { 3724 con_log(CL_ANN, (CE_WARN, 3725 "issue_mfi_dcmd : " 3726 "copy from user space failed")); 3727 return (DDI_FAILURE); 3728 } 3729 } 3730 } 3731 3732 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 3733 } 3734 3735 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd); 3736 ddi_put8(acc_handle, &dcmd->cmd_status, 0); 3737 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count); 3738 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout); 3739 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len); 3740 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode); 3741 3742 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b, 3743 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR); 3744 3745 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64); 3746 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen); 3747 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr); 3748 3749 cmd->sync_cmd = MRSAS_TRUE; 3750 cmd->frame_count = 1; 3751 3752 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3753 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed")); 3754 } else { 3755 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) { 3756 for (i = 0; i < xferlen; i++) { 3757 if (ddi_copyout( 3758 (uint8_t *)dcmd_dma_obj.buffer + i, 3759 (uint8_t *)ubuf + i, 3760 1, mode)) { 3761 con_log(CL_ANN, (CE_WARN, 3762 "issue_mfi_dcmd : " 3763 "copy to user space failed")); 3764 return (DDI_FAILURE); 3765 } 3766 } 3767 } 3768 } 3769 3770 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status); 3771 DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t, 3772 kdcmd->cmd, uint8_t, kdcmd->cmd_status); 3773 3774 if (xferlen) { 3775 /* free kernel buffer */ 3776 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 3777 return (DDI_FAILURE); 3778 } 3779 3780 return (DDI_SUCCESS); 3781 } 3782 3783 /* 3784 * issue_mfi_smp 3785 */ 3786 static int 3787 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 3788 struct mrsas_cmd *cmd, int mode) 3789 { 3790 void *request_ubuf; 3791 void *response_ubuf; 3792 uint32_t request_xferlen = 0; 3793 uint32_t response_xferlen = 0; 3794 uint_t model; 3795 dma_obj_t request_dma_obj; 3796 dma_obj_t response_dma_obj; 3797 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 3798 struct mrsas_smp_frame *ksmp; 3799 struct mrsas_smp_frame *smp; 3800 struct mrsas_sge32 *sge32; 3801 #ifndef _ILP32 3802 struct mrsas_sge64 *sge64; 3803 #endif 3804 int i; 3805 uint64_t tmp_sas_addr; 3806 3807 smp = &cmd->frame->smp; 3808 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0]; 3809 3810 model = ddi_model_convert_from(mode & FMODELS); 3811 if (model == DDI_MODEL_ILP32) { 3812 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 3813 3814 sge32 = &ksmp->sgl[0].sge32[0]; 3815 response_xferlen = sge32[0].length; 3816 request_xferlen = sge32[1].length; 3817 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 3818 "response_xferlen = %x, request_xferlen = %x", 3819 response_xferlen, request_xferlen)); 3820 3821 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 3822 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 3823 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3824 "response_ubuf = %p, request_ubuf = %p", 3825 response_ubuf, request_ubuf)); 3826 } else { 3827 #ifdef _ILP32 3828 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 3829 3830 sge32 = &ksmp->sgl[0].sge32[0]; 3831 response_xferlen = sge32[0].length; 3832 request_xferlen = sge32[1].length; 3833 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 3834 "response_xferlen = %x, request_xferlen = %x", 3835 response_xferlen, request_xferlen)); 3836 3837 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 3838 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 3839 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3840 "response_ubuf = %p, request_ubuf = %p", 3841 response_ubuf, request_ubuf)); 3842 #else 3843 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64")); 3844 3845 sge64 = &ksmp->sgl[0].sge64[0]; 3846 response_xferlen = sge64[0].length; 3847 request_xferlen = sge64[1].length; 3848 3849 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr; 3850 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr; 3851 #endif 3852 } 3853 if (request_xferlen) { 3854 /* means IOCTL requires DMA */ 3855 /* allocate the data transfer buffer */ 3856 request_dma_obj.size = request_xferlen; 3857 request_dma_obj.dma_attr = mrsas_generic_dma_attr; 3858 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3859 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3860 request_dma_obj.dma_attr.dma_attr_sgllen = 1; 3861 request_dma_obj.dma_attr.dma_attr_align = 1; 3862 3863 /* allocate kernel buffer for DMA */ 3864 if (mrsas_alloc_dma_obj(instance, &request_dma_obj, 3865 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3866 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3867 "could not allocate data transfer buffer.")); 3868 return (DDI_FAILURE); 3869 } 3870 (void) memset(request_dma_obj.buffer, 0, request_xferlen); 3871 3872 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3873 for (i = 0; i < request_xferlen; i++) { 3874 if (ddi_copyin((uint8_t *)request_ubuf + i, 3875 (uint8_t *)request_dma_obj.buffer + i, 3876 1, mode)) { 3877 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3878 "copy from user space failed")); 3879 return (DDI_FAILURE); 3880 } 3881 } 3882 } 3883 3884 if (response_xferlen) { 3885 /* means IOCTL requires DMA */ 3886 /* allocate the data transfer buffer */ 3887 response_dma_obj.size = response_xferlen; 3888 response_dma_obj.dma_attr = mrsas_generic_dma_attr; 3889 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3890 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3891 response_dma_obj.dma_attr.dma_attr_sgllen = 1; 3892 response_dma_obj.dma_attr.dma_attr_align = 1; 3893 3894 /* allocate kernel buffer for DMA */ 3895 if (mrsas_alloc_dma_obj(instance, &response_dma_obj, 3896 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3897 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3898 "could not allocate data transfer buffer.")); 3899 return (DDI_FAILURE); 3900 } 3901 (void) memset(response_dma_obj.buffer, 0, response_xferlen); 3902 3903 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3904 for (i = 0; i < response_xferlen; i++) { 3905 if (ddi_copyin((uint8_t *)response_ubuf + i, 3906 (uint8_t *)response_dma_obj.buffer + i, 3907 1, mode)) { 3908 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3909 "copy from user space failed")); 3910 return (DDI_FAILURE); 3911 } 3912 } 3913 } 3914 3915 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd); 3916 ddi_put8(acc_handle, &smp->cmd_status, 0); 3917 ddi_put8(acc_handle, &smp->connection_status, 0); 3918 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count); 3919 /* smp->context = ksmp->context; */ 3920 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout); 3921 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len); 3922 3923 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr, 3924 sizeof (uint64_t)); 3925 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr); 3926 3927 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64); 3928 3929 model = ddi_model_convert_from(mode & FMODELS); 3930 if (model == DDI_MODEL_ILP32) { 3931 con_log(CL_ANN1, (CE_NOTE, 3932 "issue_mfi_smp: DDI_MODEL_ILP32")); 3933 3934 sge32 = &smp->sgl[0].sge32[0]; 3935 ddi_put32(acc_handle, &sge32[0].length, response_xferlen); 3936 ddi_put32(acc_handle, &sge32[0].phys_addr, 3937 response_dma_obj.dma_cookie[0].dmac_address); 3938 ddi_put32(acc_handle, &sge32[1].length, request_xferlen); 3939 ddi_put32(acc_handle, &sge32[1].phys_addr, 3940 request_dma_obj.dma_cookie[0].dmac_address); 3941 } else { 3942 #ifdef _ILP32 3943 con_log(CL_ANN1, (CE_NOTE, 3944 "issue_mfi_smp: DDI_MODEL_ILP32")); 3945 sge32 = &smp->sgl[0].sge32[0]; 3946 ddi_put32(acc_handle, &sge32[0].length, response_xferlen); 3947 ddi_put32(acc_handle, &sge32[0].phys_addr, 3948 response_dma_obj.dma_cookie[0].dmac_address); 3949 ddi_put32(acc_handle, &sge32[1].length, request_xferlen); 3950 ddi_put32(acc_handle, &sge32[1].phys_addr, 3951 request_dma_obj.dma_cookie[0].dmac_address); 3952 #else 3953 con_log(CL_ANN1, (CE_NOTE, 3954 "issue_mfi_smp: DDI_MODEL_LP64")); 3955 sge64 = &smp->sgl[0].sge64[0]; 3956 ddi_put32(acc_handle, &sge64[0].length, response_xferlen); 3957 ddi_put64(acc_handle, &sge64[0].phys_addr, 3958 response_dma_obj.dma_cookie[0].dmac_address); 3959 ddi_put32(acc_handle, &sge64[1].length, request_xferlen); 3960 ddi_put64(acc_handle, &sge64[1].phys_addr, 3961 request_dma_obj.dma_cookie[0].dmac_address); 3962 #endif 3963 } 3964 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp : " 3965 "smp->response_xferlen = %d, smp->request_xferlen = %d " 3966 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length), 3967 ddi_get32(acc_handle, &sge32[1].length), 3968 ddi_get32(acc_handle, &smp->data_xfer_len))); 3969 3970 cmd->sync_cmd = MRSAS_TRUE; 3971 cmd->frame_count = 1; 3972 3973 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3974 con_log(CL_ANN, (CE_WARN, 3975 "issue_mfi_smp: fw_ioctl failed")); 3976 } else { 3977 con_log(CL_ANN1, (CE_NOTE, 3978 "issue_mfi_smp: copy to user space")); 3979 3980 if (request_xferlen) { 3981 for (i = 0; i < request_xferlen; i++) { 3982 if (ddi_copyout( 3983 (uint8_t *)request_dma_obj.buffer + 3984 i, (uint8_t *)request_ubuf + i, 3985 1, mode)) { 3986 con_log(CL_ANN, (CE_WARN, 3987 "issue_mfi_smp : copy to user space" 3988 " failed")); 3989 return (DDI_FAILURE); 3990 } 3991 } 3992 } 3993 3994 if (response_xferlen) { 3995 for (i = 0; i < response_xferlen; i++) { 3996 if (ddi_copyout( 3997 (uint8_t *)response_dma_obj.buffer 3998 + i, (uint8_t *)response_ubuf 3999 + i, 1, mode)) { 4000 con_log(CL_ANN, (CE_WARN, 4001 "issue_mfi_smp : copy to " 4002 "user space failed")); 4003 return (DDI_FAILURE); 4004 } 4005 } 4006 } 4007 } 4008 4009 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status); 4010 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d", 4011 ddi_get8(acc_handle, &smp->cmd_status))); 4012 DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status); 4013 4014 if (request_xferlen) { 4015 /* free kernel buffer */ 4016 if (mrsas_free_dma_obj(instance, request_dma_obj) != 4017 DDI_SUCCESS) 4018 return (DDI_FAILURE); 4019 } 4020 4021 if (response_xferlen) { 4022 /* free kernel buffer */ 4023 if (mrsas_free_dma_obj(instance, response_dma_obj) != 4024 DDI_SUCCESS) 4025 return (DDI_FAILURE); 4026 } 4027 4028 return (DDI_SUCCESS); 4029 } 4030 4031 /* 4032 * issue_mfi_stp 4033 */ 4034 static int 4035 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4036 struct mrsas_cmd *cmd, int mode) 4037 { 4038 void *fis_ubuf; 4039 void *data_ubuf; 4040 uint32_t fis_xferlen = 0; 4041 uint32_t data_xferlen = 0; 4042 uint_t model; 4043 dma_obj_t fis_dma_obj; 4044 dma_obj_t data_dma_obj; 4045 struct mrsas_stp_frame *kstp; 4046 struct mrsas_stp_frame *stp; 4047 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 4048 int i; 4049 4050 stp = &cmd->frame->stp; 4051 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0]; 4052 4053 model = ddi_model_convert_from(mode & FMODELS); 4054 if (model == DDI_MODEL_ILP32) { 4055 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4056 4057 fis_xferlen = kstp->sgl.sge32[0].length; 4058 data_xferlen = kstp->sgl.sge32[1].length; 4059 4060 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4061 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4062 } 4063 else 4064 { 4065 #ifdef _ILP32 4066 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4067 4068 fis_xferlen = kstp->sgl.sge32[0].length; 4069 data_xferlen = kstp->sgl.sge32[1].length; 4070 4071 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4072 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4073 #else 4074 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64")); 4075 4076 fis_xferlen = kstp->sgl.sge64[0].length; 4077 data_xferlen = kstp->sgl.sge64[1].length; 4078 4079 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr; 4080 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr; 4081 #endif 4082 } 4083 4084 4085 if (fis_xferlen) { 4086 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: " 4087 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen)); 4088 4089 /* means IOCTL requires DMA */ 4090 /* allocate the data transfer buffer */ 4091 fis_dma_obj.size = fis_xferlen; 4092 fis_dma_obj.dma_attr = mrsas_generic_dma_attr; 4093 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4094 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4095 fis_dma_obj.dma_attr.dma_attr_sgllen = 1; 4096 fis_dma_obj.dma_attr.dma_attr_align = 1; 4097 4098 /* allocate kernel buffer for DMA */ 4099 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj, 4100 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4101 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : " 4102 "could not allocate data transfer buffer.")); 4103 return (DDI_FAILURE); 4104 } 4105 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen); 4106 4107 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4108 for (i = 0; i < fis_xferlen; i++) { 4109 if (ddi_copyin((uint8_t *)fis_ubuf + i, 4110 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) { 4111 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4112 "copy from user space failed")); 4113 return (DDI_FAILURE); 4114 } 4115 } 4116 } 4117 4118 if (data_xferlen) { 4119 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p " 4120 "data_xferlen = %x", data_ubuf, data_xferlen)); 4121 4122 /* means IOCTL requires DMA */ 4123 /* allocate the data transfer buffer */ 4124 data_dma_obj.size = data_xferlen; 4125 data_dma_obj.dma_attr = mrsas_generic_dma_attr; 4126 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4127 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4128 data_dma_obj.dma_attr.dma_attr_sgllen = 1; 4129 data_dma_obj.dma_attr.dma_attr_align = 1; 4130 4131 /* allocate kernel buffer for DMA */ 4132 if (mrsas_alloc_dma_obj(instance, &data_dma_obj, 4133 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4134 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4135 "could not allocate data transfer buffer.")); 4136 return (DDI_FAILURE); 4137 } 4138 (void) memset(data_dma_obj.buffer, 0, data_xferlen); 4139 4140 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4141 for (i = 0; i < data_xferlen; i++) { 4142 if (ddi_copyin((uint8_t *)data_ubuf + i, 4143 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) { 4144 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4145 "copy from user space failed")); 4146 return (DDI_FAILURE); 4147 } 4148 } 4149 } 4150 4151 ddi_put8(acc_handle, &stp->cmd, kstp->cmd); 4152 ddi_put8(acc_handle, &stp->cmd_status, 0); 4153 ddi_put8(acc_handle, &stp->connection_status, 0); 4154 ddi_put8(acc_handle, &stp->target_id, kstp->target_id); 4155 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count); 4156 4157 ddi_put16(acc_handle, &stp->timeout, kstp->timeout); 4158 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len); 4159 4160 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10, 4161 DDI_DEV_AUTOINCR); 4162 4163 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64); 4164 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags); 4165 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen); 4166 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr, 4167 fis_dma_obj.dma_cookie[0].dmac_address); 4168 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen); 4169 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr, 4170 data_dma_obj.dma_cookie[0].dmac_address); 4171 4172 cmd->sync_cmd = MRSAS_TRUE; 4173 cmd->frame_count = 1; 4174 4175 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4176 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed")); 4177 } else { 4178 4179 if (fis_xferlen) { 4180 for (i = 0; i < fis_xferlen; i++) { 4181 if (ddi_copyout( 4182 (uint8_t *)fis_dma_obj.buffer + i, 4183 (uint8_t *)fis_ubuf + i, 1, mode)) { 4184 con_log(CL_ANN, (CE_WARN, 4185 "issue_mfi_stp : copy to " 4186 "user space failed")); 4187 return (DDI_FAILURE); 4188 } 4189 } 4190 } 4191 } 4192 if (data_xferlen) { 4193 for (i = 0; i < data_xferlen; i++) { 4194 if (ddi_copyout( 4195 (uint8_t *)data_dma_obj.buffer + i, 4196 (uint8_t *)data_ubuf + i, 1, mode)) { 4197 con_log(CL_ANN, (CE_WARN, 4198 "issue_mfi_stp : copy to" 4199 " user space failed")); 4200 return (DDI_FAILURE); 4201 } 4202 } 4203 } 4204 4205 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status); 4206 DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status); 4207 4208 if (fis_xferlen) { 4209 /* free kernel buffer */ 4210 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS) 4211 return (DDI_FAILURE); 4212 } 4213 4214 if (data_xferlen) { 4215 /* free kernel buffer */ 4216 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS) 4217 return (DDI_FAILURE); 4218 } 4219 4220 return (DDI_SUCCESS); 4221 } 4222 4223 /* 4224 * fill_up_drv_ver 4225 */ 4226 static void 4227 fill_up_drv_ver(struct mrsas_drv_ver *dv) 4228 { 4229 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver)); 4230 4231 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$")); 4232 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris")); 4233 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas")); 4234 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION)); 4235 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE, 4236 strlen(MRSAS_RELDATE)); 4237 } 4238 4239 /* 4240 * handle_drv_ioctl 4241 */ 4242 static int 4243 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4244 int mode) 4245 { 4246 int i; 4247 int rval = DDI_SUCCESS; 4248 int *props = NULL; 4249 void *ubuf; 4250 4251 uint8_t *pci_conf_buf; 4252 uint32_t xferlen; 4253 uint32_t num_props; 4254 uint_t model; 4255 struct mrsas_dcmd_frame *kdcmd; 4256 struct mrsas_drv_ver dv; 4257 struct mrsas_pci_information pi; 4258 4259 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; 4260 4261 model = ddi_model_convert_from(mode & FMODELS); 4262 if (model == DDI_MODEL_ILP32) { 4263 con_log(CL_ANN1, (CE_NOTE, 4264 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4265 4266 xferlen = kdcmd->sgl.sge32[0].length; 4267 4268 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4269 } else { 4270 #ifdef _ILP32 4271 con_log(CL_ANN1, (CE_NOTE, 4272 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4273 xferlen = kdcmd->sgl.sge32[0].length; 4274 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4275 #else 4276 con_log(CL_ANN1, (CE_NOTE, 4277 "handle_drv_ioctl: DDI_MODEL_LP64")); 4278 xferlen = kdcmd->sgl.sge64[0].length; 4279 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 4280 #endif 4281 } 4282 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4283 "dataBuf=%p size=%d bytes", ubuf, xferlen)); 4284 4285 switch (kdcmd->opcode) { 4286 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION: 4287 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4288 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION")); 4289 4290 fill_up_drv_ver(&dv); 4291 4292 if (ddi_copyout(&dv, ubuf, xferlen, mode)) { 4293 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4294 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : " 4295 "copy to user space failed")); 4296 kdcmd->cmd_status = 1; 4297 rval = 1; 4298 } else { 4299 kdcmd->cmd_status = 0; 4300 } 4301 break; 4302 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION: 4303 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4304 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON")); 4305 4306 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip, 4307 0, "reg", &props, &num_props)) { 4308 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4309 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : " 4310 "ddi_prop_look_int_array failed")); 4311 rval = DDI_FAILURE; 4312 } else { 4313 4314 pi.busNumber = (props[0] >> 16) & 0xFF; 4315 pi.deviceNumber = (props[0] >> 11) & 0x1f; 4316 pi.functionNumber = (props[0] >> 8) & 0x7; 4317 ddi_prop_free((void *)props); 4318 } 4319 4320 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo; 4321 4322 for (i = 0; i < (sizeof (struct mrsas_pci_information) - 4323 offsetof(struct mrsas_pci_information, pciHeaderInfo)); 4324 i++) { 4325 pci_conf_buf[i] = 4326 pci_config_get8(instance->pci_handle, i); 4327 } 4328 4329 if (ddi_copyout(&pi, ubuf, xferlen, mode)) { 4330 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4331 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : " 4332 "copy to user space failed")); 4333 kdcmd->cmd_status = 1; 4334 rval = 1; 4335 } else { 4336 kdcmd->cmd_status = 0; 4337 } 4338 break; 4339 default: 4340 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4341 "invalid driver specific IOCTL opcode = 0x%x", 4342 kdcmd->opcode)); 4343 kdcmd->cmd_status = 1; 4344 rval = DDI_FAILURE; 4345 break; 4346 } 4347 4348 return (rval); 4349 } 4350 4351 /* 4352 * handle_mfi_ioctl 4353 */ 4354 static int 4355 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4356 int mode) 4357 { 4358 int rval = DDI_SUCCESS; 4359 4360 struct mrsas_header *hdr; 4361 struct mrsas_cmd *cmd; 4362 4363 cmd = get_mfi_pkt(instance); 4364 4365 if (!cmd) { 4366 con_log(CL_ANN, (CE_WARN, "mr_sas: " 4367 "failed to get a cmd packet")); 4368 DTRACE_PROBE2(mfi_ioctl_err, uint16_t, 4369 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 4370 return (DDI_FAILURE); 4371 } 4372 4373 /* Clear the frame buffer and assign back the context id */ 4374 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 4375 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 4376 cmd->index); 4377 4378 hdr = (struct mrsas_header *)&ioctl->frame[0]; 4379 4380 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) { 4381 case MFI_CMD_OP_DCMD: 4382 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode); 4383 break; 4384 case MFI_CMD_OP_SMP: 4385 rval = issue_mfi_smp(instance, ioctl, cmd, mode); 4386 break; 4387 case MFI_CMD_OP_STP: 4388 rval = issue_mfi_stp(instance, ioctl, cmd, mode); 4389 break; 4390 case MFI_CMD_OP_LD_SCSI: 4391 case MFI_CMD_OP_PD_SCSI: 4392 rval = issue_mfi_pthru(instance, ioctl, cmd, mode); 4393 break; 4394 default: 4395 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: " 4396 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd)); 4397 rval = DDI_FAILURE; 4398 break; 4399 } 4400 4401 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) 4402 rval = DDI_FAILURE; 4403 4404 return_mfi_pkt(instance, cmd); 4405 4406 return (rval); 4407 } 4408 4409 /* 4410 * AEN 4411 */ 4412 static int 4413 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen) 4414 { 4415 int rval = 0; 4416 4417 rval = register_mfi_aen(instance, instance->aen_seq_num, 4418 aen->class_locale_word); 4419 4420 aen->cmd_status = (uint8_t)rval; 4421 4422 return (rval); 4423 } 4424 4425 static int 4426 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num, 4427 uint32_t class_locale_word) 4428 { 4429 int ret_val; 4430 4431 struct mrsas_cmd *cmd, *aen_cmd; 4432 struct mrsas_dcmd_frame *dcmd; 4433 union mrsas_evt_class_locale curr_aen; 4434 union mrsas_evt_class_locale prev_aen; 4435 4436 /* 4437 * If there an AEN pending already (aen_cmd), check if the 4438 * class_locale of that pending AEN is inclusive of the new 4439 * AEN request we currently have. If it is, then we don't have 4440 * to do anything. In other words, whichever events the current 4441 * AEN request is subscribing to, have already been subscribed 4442 * to. 4443 * 4444 * If the old_cmd is _not_ inclusive, then we have to abort 4445 * that command, form a class_locale that is superset of both 4446 * old and current and re-issue to the FW 4447 */ 4448 4449 curr_aen.word = LE_32(class_locale_word); 4450 curr_aen.members.locale = LE_16(curr_aen.members.locale); 4451 aen_cmd = instance->aen_cmd; 4452 if (aen_cmd) { 4453 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle, 4454 &aen_cmd->frame->dcmd.mbox.w[1]); 4455 prev_aen.word = LE_32(prev_aen.word); 4456 prev_aen.members.locale = LE_16(prev_aen.members.locale); 4457 /* 4458 * A class whose enum value is smaller is inclusive of all 4459 * higher values. If a PROGRESS (= -1) was previously 4460 * registered, then a new registration requests for higher 4461 * classes need not be sent to FW. They are automatically 4462 * included. 4463 * 4464 * Locale numbers don't have such hierarchy. They are bitmap 4465 * values 4466 */ 4467 if ((prev_aen.members.class <= curr_aen.members.class) && 4468 !((prev_aen.members.locale & curr_aen.members.locale) ^ 4469 curr_aen.members.locale)) { 4470 /* 4471 * Previously issued event registration includes 4472 * current request. Nothing to do. 4473 */ 4474 4475 return (0); 4476 } else { 4477 curr_aen.members.locale |= prev_aen.members.locale; 4478 4479 if (prev_aen.members.class < curr_aen.members.class) 4480 curr_aen.members.class = prev_aen.members.class; 4481 4482 ret_val = abort_aen_cmd(instance, aen_cmd); 4483 4484 if (ret_val) { 4485 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: " 4486 "failed to abort prevous AEN command")); 4487 4488 return (ret_val); 4489 } 4490 } 4491 } else { 4492 curr_aen.word = LE_32(class_locale_word); 4493 curr_aen.members.locale = LE_16(curr_aen.members.locale); 4494 } 4495 4496 cmd = get_mfi_pkt(instance); 4497 4498 if (!cmd) { 4499 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding, 4500 uint16_t, instance->max_fw_cmds); 4501 return (ENOMEM); 4502 } 4503 /* Clear the frame buffer and assign back the context id */ 4504 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 4505 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 4506 cmd->index); 4507 4508 dcmd = &cmd->frame->dcmd; 4509 4510 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */ 4511 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 4512 4513 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 4514 sizeof (struct mrsas_evt_detail)); 4515 4516 /* Prepare DCMD for aen registration */ 4517 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 4518 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0); 4519 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 4520 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 4521 MFI_FRAME_DIR_READ); 4522 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 4523 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 4524 sizeof (struct mrsas_evt_detail)); 4525 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 4526 MR_DCMD_CTRL_EVENT_WAIT); 4527 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num); 4528 curr_aen.members.locale = LE_16(curr_aen.members.locale); 4529 curr_aen.word = LE_32(curr_aen.word); 4530 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1], 4531 curr_aen.word); 4532 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 4533 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address); 4534 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 4535 sizeof (struct mrsas_evt_detail)); 4536 4537 instance->aen_seq_num = seq_num; 4538 4539 4540 /* 4541 * Store reference to the cmd used to register for AEN. When an 4542 * application wants us to register for AEN, we have to abort this 4543 * cmd and re-register with a new EVENT LOCALE supplied by that app 4544 */ 4545 instance->aen_cmd = cmd; 4546 4547 cmd->frame_count = 1; 4548 4549 /* Issue the aen registration frame */ 4550 /* atomic_add_16 (&instance->fw_outstanding, 1); */ 4551 instance->func_ptr->issue_cmd(cmd, instance); 4552 4553 return (0); 4554 } 4555 4556 static void 4557 display_scsi_inquiry(caddr_t scsi_inq) 4558 { 4559 #define MAX_SCSI_DEVICE_CODE 14 4560 int i; 4561 char inquiry_buf[256] = {0}; 4562 int len; 4563 const char *const scsi_device_types[] = { 4564 "Direct-Access ", 4565 "Sequential-Access", 4566 "Printer ", 4567 "Processor ", 4568 "WORM ", 4569 "CD-ROM ", 4570 "Scanner ", 4571 "Optical Device ", 4572 "Medium Changer ", 4573 "Communications ", 4574 "Unknown ", 4575 "Unknown ", 4576 "Unknown ", 4577 "Enclosure ", 4578 }; 4579 4580 len = 0; 4581 4582 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: "); 4583 for (i = 8; i < 16; i++) { 4584 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4585 scsi_inq[i]); 4586 } 4587 4588 len += snprintf(inquiry_buf + len, 265 - len, " Model: "); 4589 4590 for (i = 16; i < 32; i++) { 4591 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4592 scsi_inq[i]); 4593 } 4594 4595 len += snprintf(inquiry_buf + len, 265 - len, " Rev: "); 4596 4597 for (i = 32; i < 36; i++) { 4598 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4599 scsi_inq[i]); 4600 } 4601 4602 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4603 4604 4605 i = scsi_inq[0] & 0x1f; 4606 4607 4608 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ", 4609 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : 4610 "Unknown "); 4611 4612 4613 len += snprintf(inquiry_buf + len, 265 - len, 4614 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); 4615 4616 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) { 4617 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n"); 4618 } else { 4619 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4620 } 4621 4622 con_log(CL_ANN1, (CE_CONT, inquiry_buf)); 4623 } 4624 4625 static int 4626 read_fw_status_reg_ppc(struct mrsas_instance *instance) 4627 { 4628 return ((int)RD_OB_SCRATCH_PAD_0(instance)); 4629 } 4630 4631 static void 4632 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance) 4633 { 4634 atomic_add_16(&instance->fw_outstanding, 1); 4635 4636 /* Issue the command to the FW */ 4637 WR_IB_QPORT((cmd->frame_phys_addr) | 4638 (((cmd->frame_count - 1) << 1) | 1), instance); 4639 } 4640 4641 /* 4642 * issue_cmd_in_sync_mode 4643 */ 4644 static int 4645 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance, 4646 struct mrsas_cmd *cmd) 4647 { 4648 int i; 4649 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC); 4650 4651 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called")); 4652 4653 cmd->cmd_status = ENODATA; 4654 4655 WR_IB_QPORT((cmd->frame_phys_addr) | 4656 (((cmd->frame_count - 1) << 1) | 1), instance); 4657 4658 mutex_enter(&instance->int_cmd_mtx); 4659 4660 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 4661 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 4662 } 4663 4664 mutex_exit(&instance->int_cmd_mtx); 4665 4666 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done")); 4667 4668 if (i < (msecs -1)) { 4669 return (DDI_SUCCESS); 4670 } else { 4671 return (DDI_FAILURE); 4672 } 4673 } 4674 4675 /* 4676 * issue_cmd_in_poll_mode 4677 */ 4678 static int 4679 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance, 4680 struct mrsas_cmd *cmd) 4681 { 4682 int i; 4683 uint16_t flags; 4684 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC; 4685 struct mrsas_header *frame_hdr; 4686 4687 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called")); 4688 4689 frame_hdr = (struct mrsas_header *)cmd->frame; 4690 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status, 4691 MFI_CMD_STATUS_POLL_MODE); 4692 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags); 4693 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 4694 4695 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags); 4696 4697 /* issue the frame using inbound queue port */ 4698 WR_IB_QPORT((cmd->frame_phys_addr) | 4699 (((cmd->frame_count - 1) << 1) | 1), instance); 4700 4701 /* wait for cmd_status to change from 0xFF */ 4702 for (i = 0; i < msecs && ( 4703 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) 4704 == MFI_CMD_STATUS_POLL_MODE); i++) { 4705 drv_usecwait(MILLISEC); /* wait for 1000 usecs */ 4706 } 4707 4708 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) 4709 == MFI_CMD_STATUS_POLL_MODE) { 4710 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: " 4711 "cmd polling timed out")); 4712 return (DDI_FAILURE); 4713 } 4714 4715 return (DDI_SUCCESS); 4716 } 4717 4718 static void 4719 enable_intr_ppc(struct mrsas_instance *instance) 4720 { 4721 uint32_t mask; 4722 4723 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called")); 4724 4725 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */ 4726 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance); 4727 4728 /* WR_OB_INTR_MASK(~0x80000000, instance); */ 4729 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance); 4730 4731 /* dummy read to force PCI flush */ 4732 mask = RD_OB_INTR_MASK(instance); 4733 4734 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: " 4735 "outbound_intr_mask = 0x%x", mask)); 4736 } 4737 4738 static void 4739 disable_intr_ppc(struct mrsas_instance *instance) 4740 { 4741 uint32_t mask; 4742 4743 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called")); 4744 4745 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : " 4746 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance))); 4747 4748 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */ 4749 WR_OB_INTR_MASK(OB_INTR_MASK, instance); 4750 4751 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : " 4752 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance))); 4753 4754 /* dummy read to force PCI flush */ 4755 mask = RD_OB_INTR_MASK(instance); 4756 #ifdef lint 4757 mask = mask; 4758 #endif 4759 } 4760 4761 static int 4762 intr_ack_ppc(struct mrsas_instance *instance) 4763 { 4764 uint32_t status; 4765 int ret = DDI_INTR_CLAIMED; 4766 4767 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called")); 4768 4769 /* check if it is our interrupt */ 4770 status = RD_OB_INTR_STATUS(instance); 4771 4772 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status)); 4773 4774 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) { 4775 ret = DDI_INTR_UNCLAIMED; 4776 } 4777 4778 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 4779 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 4780 ret = DDI_INTR_UNCLAIMED; 4781 } 4782 4783 if (ret == DDI_INTR_UNCLAIMED) { 4784 return (ret); 4785 } 4786 /* clear the interrupt by writing back the same value */ 4787 WR_OB_DOORBELL_CLEAR(status, instance); 4788 4789 /* dummy READ */ 4790 status = RD_OB_INTR_STATUS(instance); 4791 4792 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared")); 4793 4794 return (ret); 4795 } 4796 4797 static int 4798 mrsas_common_check(struct mrsas_instance *instance, 4799 struct mrsas_cmd *cmd) 4800 { 4801 int ret = DDI_SUCCESS; 4802 4803 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 4804 DDI_SUCCESS) { 4805 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4806 if (cmd->pkt != NULL) { 4807 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4808 cmd->pkt->pkt_statistics = 0; 4809 } 4810 ret = DDI_FAILURE; 4811 } 4812 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 4813 != DDI_SUCCESS) { 4814 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4815 if (cmd->pkt != NULL) { 4816 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4817 cmd->pkt->pkt_statistics = 0; 4818 } 4819 ret = DDI_FAILURE; 4820 } 4821 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) != 4822 DDI_SUCCESS) { 4823 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4824 if (cmd->pkt != NULL) { 4825 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4826 cmd->pkt->pkt_statistics = 0; 4827 } 4828 ret = DDI_FAILURE; 4829 } 4830 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 4831 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4832 4833 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0); 4834 4835 if (cmd->pkt != NULL) { 4836 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4837 cmd->pkt->pkt_statistics = 0; 4838 } 4839 ret = DDI_FAILURE; 4840 } 4841 4842 return (ret); 4843 } 4844 4845 /*ARGSUSED*/ 4846 static int 4847 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 4848 { 4849 /* 4850 * as the driver can always deal with an error in any dma or 4851 * access handle, we can just return the fme_status value. 4852 */ 4853 pci_ereport_post(dip, err, NULL); 4854 return (err->fme_status); 4855 } 4856 4857 static void 4858 mrsas_fm_init(struct mrsas_instance *instance) 4859 { 4860 /* Need to change iblock to priority for new MSI intr */ 4861 ddi_iblock_cookie_t fm_ibc; 4862 4863 /* Only register with IO Fault Services if we have some capability */ 4864 if (instance->fm_capabilities) { 4865 /* Adjust access and dma attributes for FMA */ 4866 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC; 4867 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 4868 4869 /* 4870 * Register capabilities with IO Fault Services. 4871 * fm_capabilities will be updated to indicate 4872 * capabilities actually supported (not requested.) 4873 */ 4874 4875 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc); 4876 4877 /* 4878 * Initialize pci ereport capabilities if ereport 4879 * capable (should always be.) 4880 */ 4881 4882 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 4883 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4884 pci_ereport_setup(instance->dip); 4885 } 4886 4887 /* 4888 * Register error callback if error callback capable. 4889 */ 4890 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4891 ddi_fm_handler_register(instance->dip, 4892 mrsas_fm_error_cb, (void*) instance); 4893 } 4894 } else { 4895 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 4896 mrsas_generic_dma_attr.dma_attr_flags = 0; 4897 } 4898 } 4899 4900 static void 4901 mrsas_fm_fini(struct mrsas_instance *instance) 4902 { 4903 /* Only unregister FMA capabilities if registered */ 4904 if (instance->fm_capabilities) { 4905 /* 4906 * Un-register error callback if error callback capable. 4907 */ 4908 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4909 ddi_fm_handler_unregister(instance->dip); 4910 } 4911 4912 /* 4913 * Release any resources allocated by pci_ereport_setup() 4914 */ 4915 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 4916 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4917 pci_ereport_teardown(instance->dip); 4918 } 4919 4920 /* Unregister from IO Fault Services */ 4921 ddi_fm_fini(instance->dip); 4922 4923 /* Adjust access and dma attributes for FMA */ 4924 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 4925 mrsas_generic_dma_attr.dma_attr_flags = 0; 4926 } 4927 } 4928 4929 int 4930 mrsas_check_acc_handle(ddi_acc_handle_t handle) 4931 { 4932 ddi_fm_error_t de; 4933 4934 if (handle == NULL) { 4935 return (DDI_FAILURE); 4936 } 4937 4938 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 4939 4940 return (de.fme_status); 4941 } 4942 4943 int 4944 mrsas_check_dma_handle(ddi_dma_handle_t handle) 4945 { 4946 ddi_fm_error_t de; 4947 4948 if (handle == NULL) { 4949 return (DDI_FAILURE); 4950 } 4951 4952 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 4953 4954 return (de.fme_status); 4955 } 4956 4957 void 4958 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail) 4959 { 4960 uint64_t ena; 4961 char buf[FM_MAX_CLASS]; 4962 4963 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 4964 ena = fm_ena_generate(0, FM_ENA_FMT1); 4965 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) { 4966 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP, 4967 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 4968 } 4969 } 4970 4971 static int 4972 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type) 4973 { 4974 4975 dev_info_t *dip = instance->dip; 4976 int avail, actual, count; 4977 int i, flag, ret; 4978 4979 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: intr_type = %x", 4980 intr_type)); 4981 4982 /* Get number of interrupts */ 4983 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 4984 if ((ret != DDI_SUCCESS) || (count == 0)) { 4985 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:" 4986 "ret %d count %d", ret, count)); 4987 4988 return (DDI_FAILURE); 4989 } 4990 4991 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: count = %d ", count)); 4992 4993 /* Get number of available interrupts */ 4994 ret = ddi_intr_get_navail(dip, intr_type, &avail); 4995 if ((ret != DDI_SUCCESS) || (avail == 0)) { 4996 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:" 4997 "ret %d avail %d", ret, avail)); 4998 4999 return (DDI_FAILURE); 5000 } 5001 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: avail = %d ", avail)); 5002 5003 /* Only one interrupt routine. So limit the count to 1 */ 5004 if (count > 1) { 5005 count = 1; 5006 } 5007 5008 /* 5009 * Allocate an array of interrupt handlers. Currently we support 5010 * only one interrupt. The framework can be extended later. 5011 */ 5012 instance->intr_size = count * sizeof (ddi_intr_handle_t); 5013 instance->intr_htable = kmem_zalloc(instance->intr_size, KM_SLEEP); 5014 ASSERT(instance->intr_htable); 5015 5016 flag = ((intr_type == DDI_INTR_TYPE_MSI) || (intr_type == 5017 DDI_INTR_TYPE_MSIX)) ? DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL; 5018 5019 /* Allocate interrupt */ 5020 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0, 5021 count, &actual, flag); 5022 5023 if ((ret != DDI_SUCCESS) || (actual == 0)) { 5024 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 5025 "avail = %d", avail)); 5026 kmem_free(instance->intr_htable, instance->intr_size); 5027 return (DDI_FAILURE); 5028 } 5029 if (actual < count) { 5030 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 5031 "Requested = %d Received = %d", count, actual)); 5032 } 5033 instance->intr_cnt = actual; 5034 5035 /* 5036 * Get the priority of the interrupt allocated. 5037 */ 5038 if ((ret = ddi_intr_get_pri(instance->intr_htable[0], 5039 &instance->intr_pri)) != DDI_SUCCESS) { 5040 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 5041 "get priority call failed")); 5042 5043 for (i = 0; i < actual; i++) { 5044 (void) ddi_intr_free(instance->intr_htable[i]); 5045 } 5046 kmem_free(instance->intr_htable, instance->intr_size); 5047 return (DDI_FAILURE); 5048 } 5049 5050 /* 5051 * Test for high level mutex. we don't support them. 5052 */ 5053 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) { 5054 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 5055 "High level interrupts not supported.")); 5056 5057 for (i = 0; i < actual; i++) { 5058 (void) ddi_intr_free(instance->intr_htable[i]); 5059 } 5060 kmem_free(instance->intr_htable, instance->intr_size); 5061 return (DDI_FAILURE); 5062 } 5063 5064 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ", 5065 instance->intr_pri)); 5066 5067 /* Call ddi_intr_add_handler() */ 5068 for (i = 0; i < actual; i++) { 5069 ret = ddi_intr_add_handler(instance->intr_htable[i], 5070 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance, 5071 (caddr_t)(uintptr_t)i); 5072 5073 if (ret != DDI_SUCCESS) { 5074 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:" 5075 "failed %d", ret)); 5076 5077 for (i = 0; i < actual; i++) { 5078 (void) ddi_intr_free(instance->intr_htable[i]); 5079 } 5080 kmem_free(instance->intr_htable, instance->intr_size); 5081 return (DDI_FAILURE); 5082 } 5083 5084 } 5085 5086 con_log(CL_DLEVEL1, (CE_WARN, " ddi_intr_add_handler done")); 5087 5088 if ((ret = ddi_intr_get_cap(instance->intr_htable[0], 5089 &instance->intr_cap)) != DDI_SUCCESS) { 5090 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d", 5091 ret)); 5092 5093 /* Free already allocated intr */ 5094 for (i = 0; i < actual; i++) { 5095 (void) ddi_intr_remove_handler( 5096 instance->intr_htable[i]); 5097 (void) ddi_intr_free(instance->intr_htable[i]); 5098 } 5099 kmem_free(instance->intr_htable, instance->intr_size); 5100 return (DDI_FAILURE); 5101 } 5102 5103 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { 5104 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable")); 5105 5106 (void) ddi_intr_block_enable(instance->intr_htable, 5107 instance->intr_cnt); 5108 } else { 5109 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable")); 5110 5111 for (i = 0; i < instance->intr_cnt; i++) { 5112 (void) ddi_intr_enable(instance->intr_htable[i]); 5113 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns " 5114 "%d", i)); 5115 } 5116 } 5117 5118 return (DDI_SUCCESS); 5119 5120 } 5121 5122 5123 static void 5124 mrsas_rem_intrs(struct mrsas_instance *instance) 5125 { 5126 int i; 5127 5128 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called")); 5129 5130 /* Disable all interrupts first */ 5131 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { 5132 (void) ddi_intr_block_disable(instance->intr_htable, 5133 instance->intr_cnt); 5134 } else { 5135 for (i = 0; i < instance->intr_cnt; i++) { 5136 (void) ddi_intr_disable(instance->intr_htable[i]); 5137 } 5138 } 5139 5140 /* Remove all the handlers */ 5141 5142 for (i = 0; i < instance->intr_cnt; i++) { 5143 (void) ddi_intr_remove_handler(instance->intr_htable[i]); 5144 (void) ddi_intr_free(instance->intr_htable[i]); 5145 } 5146 5147 kmem_free(instance->intr_htable, instance->intr_size); 5148 } 5149 5150 static int 5151 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags, 5152 ddi_bus_config_op_t op, void *arg, dev_info_t **childp) 5153 { 5154 struct mrsas_instance *instance; 5155 int config; 5156 int rval; 5157 5158 char *ptr = NULL; 5159 int tgt, lun; 5160 5161 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op)); 5162 5163 if ((instance = ddi_get_soft_state(mrsas_state, 5164 ddi_get_instance(parent))) == NULL) { 5165 return (NDI_FAILURE); 5166 } 5167 5168 /* Hold nexus during bus_config */ 5169 ndi_devi_enter(parent, &config); 5170 switch (op) { 5171 case BUS_CONFIG_ONE: { 5172 5173 /* parse wwid/target name out of name given */ 5174 if ((ptr = strchr((char *)arg, '@')) == NULL) { 5175 rval = NDI_FAILURE; 5176 break; 5177 } 5178 ptr++; 5179 5180 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) { 5181 rval = NDI_FAILURE; 5182 break; 5183 } 5184 5185 if (lun == 0) { 5186 rval = mrsas_config_ld(instance, tgt, lun, childp); 5187 } else { 5188 rval = NDI_FAILURE; 5189 } 5190 5191 break; 5192 } 5193 case BUS_CONFIG_DRIVER: 5194 case BUS_CONFIG_ALL: { 5195 5196 rval = mrsas_config_all_devices(instance); 5197 5198 rval = NDI_SUCCESS; 5199 break; 5200 } 5201 } 5202 5203 if (rval == NDI_SUCCESS) { 5204 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0); 5205 5206 } 5207 ndi_devi_exit(parent, config); 5208 5209 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x", 5210 rval)); 5211 return (rval); 5212 } 5213 5214 static int 5215 mrsas_config_all_devices(struct mrsas_instance *instance) 5216 { 5217 int rval, tgt; 5218 5219 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { 5220 (void) mrsas_config_ld(instance, tgt, 0, NULL); 5221 5222 } 5223 5224 rval = NDI_SUCCESS; 5225 return (rval); 5226 } 5227 5228 static int 5229 mrsas_parse_devname(char *devnm, int *tgt, int *lun) 5230 { 5231 char devbuf[SCSI_MAXNAMELEN]; 5232 char *addr; 5233 char *p, *tp, *lp; 5234 long num; 5235 5236 /* Parse dev name and address */ 5237 (void) strcpy(devbuf, devnm); 5238 addr = ""; 5239 for (p = devbuf; *p != '\0'; p++) { 5240 if (*p == '@') { 5241 addr = p + 1; 5242 *p = '\0'; 5243 } else if (*p == ':') { 5244 *p = '\0'; 5245 break; 5246 } 5247 } 5248 5249 /* Parse target and lun */ 5250 for (p = tp = addr, lp = NULL; *p != '\0'; p++) { 5251 if (*p == ',') { 5252 lp = p + 1; 5253 *p = '\0'; 5254 break; 5255 } 5256 } 5257 if (tgt && tp) { 5258 if (ddi_strtol(tp, NULL, 0x10, &num)) { 5259 return (DDI_FAILURE); /* Can declare this as constant */ 5260 } 5261 *tgt = (int)num; 5262 } 5263 if (lun && lp) { 5264 if (ddi_strtol(lp, NULL, 0x10, &num)) { 5265 return (DDI_FAILURE); 5266 } 5267 *lun = (int)num; 5268 } 5269 return (DDI_SUCCESS); /* Success case */ 5270 } 5271 5272 static int 5273 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt, 5274 uint8_t lun, dev_info_t **ldip) 5275 { 5276 struct scsi_device *sd; 5277 dev_info_t *child; 5278 int rval; 5279 5280 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d", 5281 tgt, lun)); 5282 5283 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) { 5284 if (ldip) { 5285 *ldip = child; 5286 } 5287 con_log(CL_ANN1, (CE_NOTE, 5288 "mrsas_config_ld: Child = %p found t = %d l = %d", 5289 (void *)child, tgt, lun)); 5290 return (NDI_SUCCESS); 5291 } 5292 5293 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP); 5294 sd->sd_address.a_hba_tran = instance->tran; 5295 sd->sd_address.a_target = (uint16_t)tgt; 5296 sd->sd_address.a_lun = (uint8_t)lun; 5297 5298 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) 5299 rval = mrsas_config_scsi_device(instance, sd, ldip); 5300 else 5301 rval = NDI_FAILURE; 5302 5303 /* sd_unprobe is blank now. Free buffer manually */ 5304 if (sd->sd_inq) { 5305 kmem_free(sd->sd_inq, SUN_INQSIZE); 5306 sd->sd_inq = (struct scsi_inquiry *)NULL; 5307 } 5308 5309 kmem_free(sd, sizeof (struct scsi_device)); 5310 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_ld: return rval = %d", 5311 rval)); 5312 return (rval); 5313 } 5314 5315 static int 5316 mrsas_config_scsi_device(struct mrsas_instance *instance, 5317 struct scsi_device *sd, dev_info_t **dipp) 5318 { 5319 char *nodename = NULL; 5320 char **compatible = NULL; 5321 int ncompatible = 0; 5322 char *childname; 5323 dev_info_t *ldip = NULL; 5324 int tgt = sd->sd_address.a_target; 5325 int lun = sd->sd_address.a_lun; 5326 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 5327 int rval; 5328 5329 con_log(CL_ANN1, (CE_WARN, "mr_sas: scsi_device t%dL%d", tgt, lun)); 5330 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, 5331 NULL, &nodename, &compatible, &ncompatible); 5332 5333 if (nodename == NULL) { 5334 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver " 5335 "for t%dL%d", tgt, lun)); 5336 rval = NDI_FAILURE; 5337 goto finish; 5338 } 5339 5340 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename; 5341 con_log(CL_ANN1, (CE_WARN, 5342 "mr_sas: Childname = %2s nodename = %s", childname, nodename)); 5343 5344 /* Create a dev node */ 5345 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip); 5346 con_log(CL_ANN1, (CE_WARN, 5347 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval)); 5348 if (rval == NDI_SUCCESS) { 5349 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) != 5350 DDI_PROP_SUCCESS) { 5351 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 5352 "property for t%dl%d target", tgt, lun)); 5353 rval = NDI_FAILURE; 5354 goto finish; 5355 } 5356 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) != 5357 DDI_PROP_SUCCESS) { 5358 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 5359 "property for t%dl%d lun", tgt, lun)); 5360 rval = NDI_FAILURE; 5361 goto finish; 5362 } 5363 5364 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip, 5365 "compatible", compatible, ncompatible) != 5366 DDI_PROP_SUCCESS) { 5367 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 5368 "property for t%dl%d compatible", tgt, lun)); 5369 rval = NDI_FAILURE; 5370 goto finish; 5371 } 5372 5373 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH); 5374 if (rval != NDI_SUCCESS) { 5375 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online " 5376 "t%dl%d", tgt, lun)); 5377 ndi_prop_remove_all(ldip); 5378 (void) ndi_devi_free(ldip); 5379 } else { 5380 con_log(CL_ANN1, (CE_WARN, "mr_sas: online Done :" 5381 "0 t%dl%d", tgt, lun)); 5382 } 5383 5384 } 5385 finish: 5386 if (dipp) { 5387 *dipp = ldip; 5388 } 5389 5390 con_log(CL_DLEVEL1, (CE_WARN, 5391 "mr_sas: config_scsi_device rval = %d t%dL%d", 5392 rval, tgt, lun)); 5393 scsi_hba_nodename_compatible_free(nodename, compatible); 5394 return (rval); 5395 } 5396 5397 /*ARGSUSED*/ 5398 static int 5399 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event, 5400 uint64_t wwn) 5401 { 5402 struct mrsas_eventinfo *mrevt = NULL; 5403 5404 con_log(CL_ANN1, (CE_NOTE, 5405 "mrsas_service_evt called for t%dl%d event = %d", 5406 tgt, lun, event)); 5407 5408 if ((instance->taskq == NULL) || (mrevt = 5409 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) { 5410 return (ENOMEM); 5411 } 5412 5413 mrevt->instance = instance; 5414 mrevt->tgt = tgt; 5415 mrevt->lun = lun; 5416 mrevt->event = event; 5417 5418 if ((ddi_taskq_dispatch(instance->taskq, 5419 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) != 5420 DDI_SUCCESS) { 5421 con_log(CL_ANN1, (CE_NOTE, 5422 "mr_sas: Event task failed for t%dl%d event = %d", 5423 tgt, lun, event)); 5424 kmem_free(mrevt, sizeof (struct mrsas_eventinfo)); 5425 return (DDI_FAILURE); 5426 } 5427 DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event); 5428 return (DDI_SUCCESS); 5429 } 5430 5431 static void 5432 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt) 5433 { 5434 struct mrsas_instance *instance = mrevt->instance; 5435 dev_info_t *dip, *pdip; 5436 int circ1 = 0; 5437 char *devname; 5438 5439 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for" 5440 " tgt %d lun %d event %d", 5441 mrevt->tgt, mrevt->lun, mrevt->event)); 5442 5443 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) { 5444 dip = instance->mr_ld_list[mrevt->tgt].dip; 5445 } else { 5446 return; 5447 } 5448 5449 ndi_devi_enter(instance->dip, &circ1); 5450 switch (mrevt->event) { 5451 case MRSAS_EVT_CONFIG_TGT: 5452 if (dip == NULL) { 5453 5454 if (mrevt->lun == 0) { 5455 (void) mrsas_config_ld(instance, mrevt->tgt, 5456 0, NULL); 5457 } 5458 con_log(CL_ANN1, (CE_NOTE, 5459 "mr_sas: EVT_CONFIG_TGT called:" 5460 " for tgt %d lun %d event %d", 5461 mrevt->tgt, mrevt->lun, mrevt->event)); 5462 5463 } else { 5464 con_log(CL_ANN1, (CE_NOTE, 5465 "mr_sas: EVT_CONFIG_TGT dip != NULL:" 5466 " for tgt %d lun %d event %d", 5467 mrevt->tgt, mrevt->lun, mrevt->event)); 5468 } 5469 break; 5470 case MRSAS_EVT_UNCONFIG_TGT: 5471 if (dip) { 5472 if (i_ddi_devi_attached(dip)) { 5473 5474 pdip = ddi_get_parent(dip); 5475 5476 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP); 5477 (void) ddi_deviname(dip, devname); 5478 5479 (void) devfs_clean(pdip, devname + 1, 5480 DV_CLEAN_FORCE); 5481 kmem_free(devname, MAXNAMELEN + 1); 5482 } 5483 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE); 5484 con_log(CL_ANN1, (CE_NOTE, 5485 "mr_sas: EVT_UNCONFIG_TGT called:" 5486 " for tgt %d lun %d event %d", 5487 mrevt->tgt, mrevt->lun, mrevt->event)); 5488 } else { 5489 con_log(CL_ANN1, (CE_NOTE, 5490 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:" 5491 " for tgt %d lun %d event %d", 5492 mrevt->tgt, mrevt->lun, mrevt->event)); 5493 } 5494 break; 5495 } 5496 kmem_free(mrevt, sizeof (struct mrsas_eventinfo)); 5497 ndi_devi_exit(instance->dip, circ1); 5498 } 5499 5500 static int 5501 mrsas_mode_sense_build(struct scsi_pkt *pkt) 5502 { 5503 union scsi_cdb *cdbp; 5504 uint16_t page_code; 5505 struct scsa_cmd *acmd; 5506 struct buf *bp; 5507 struct mode_header *modehdrp; 5508 5509 cdbp = (void *)pkt->pkt_cdbp; 5510 page_code = cdbp->cdb_un.sg.scsi[0]; 5511 acmd = PKT2CMD(pkt); 5512 bp = acmd->cmd_buf; 5513 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) { 5514 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command")); 5515 /* ADD pkt statistics as Command failed. */ 5516 return (NULL); 5517 } 5518 5519 bp_mapin(bp); 5520 bzero(bp->b_un.b_addr, bp->b_bcount); 5521 5522 switch (page_code) { 5523 case 0x3: { 5524 struct mode_format *page3p = NULL; 5525 modehdrp = (struct mode_header *)(bp->b_un.b_addr); 5526 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH; 5527 5528 page3p = (void *)((caddr_t)modehdrp + 5529 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 5530 page3p->mode_page.code = 0x3; 5531 page3p->mode_page.length = 5532 (uchar_t)(sizeof (struct mode_format)); 5533 page3p->data_bytes_sect = 512; 5534 page3p->sect_track = 63; 5535 break; 5536 } 5537 case 0x4: { 5538 struct mode_geometry *page4p = NULL; 5539 modehdrp = (struct mode_header *)(bp->b_un.b_addr); 5540 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH; 5541 5542 page4p = (void *)((caddr_t)modehdrp + 5543 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 5544 page4p->mode_page.code = 0x4; 5545 page4p->mode_page.length = 5546 (uchar_t)(sizeof (struct mode_geometry)); 5547 page4p->heads = 255; 5548 page4p->rpm = 10000; 5549 break; 5550 } 5551 default: 5552 break; 5553 } 5554 return (NULL); 5555 } 5556