1 /* 2 * mr_sas.c: source for mr_sas driver 3 * 4 * MegaRAID device driver for SAS2.0 controllers 5 * Copyright (c) 2008-2009, LSI Logic Corporation. 6 * All rights reserved. 7 * 8 * Version: 9 * Author: 10 * Arun Chandrashekhar 11 * Manju R 12 * Rajesh Prabhakaran 13 * Seokmann Ju 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions are met: 17 * 18 * 1. Redistributions of source code must retain the above copyright notice, 19 * this list of conditions and the following disclaimer. 20 * 21 * 2. Redistributions in binary form must reproduce the above copyright notice, 22 * this list of conditions and the following disclaimer in the documentation 23 * and/or other materials provided with the distribution. 24 * 25 * 3. Neither the name of the author nor the names of its contributors may be 26 * used to endorse or promote products derived from this software without 27 * specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 32 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 33 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 35 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 36 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 37 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 38 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 39 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 40 * DAMAGE. 41 */ 42 43 /* 44 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 45 * Use is subject to license terms. 46 */ 47 48 #include <sys/types.h> 49 #include <sys/param.h> 50 #include <sys/file.h> 51 #include <sys/errno.h> 52 #include <sys/open.h> 53 #include <sys/cred.h> 54 #include <sys/modctl.h> 55 #include <sys/conf.h> 56 #include <sys/devops.h> 57 #include <sys/cmn_err.h> 58 #include <sys/kmem.h> 59 #include <sys/stat.h> 60 #include <sys/mkdev.h> 61 #include <sys/pci.h> 62 #include <sys/scsi/scsi.h> 63 #include <sys/ddi.h> 64 #include <sys/sunddi.h> 65 #include <sys/atomic.h> 66 #include <sys/signal.h> 67 #include <sys/byteorder.h> 68 #include <sys/fs/dv_node.h> /* devfs_clean */ 69 70 #include "mr_sas.h" 71 72 /* 73 * FMA header files 74 */ 75 #include <sys/ddifm.h> 76 #include <sys/fm/protocol.h> 77 #include <sys/fm/util.h> 78 #include <sys/fm/io/ddi.h> 79 80 /* 81 * Local static data 82 */ 83 static void *mrsas_state = NULL; 84 static int debug_level_g = CL_NONE; 85 boolean_t mrsas_relaxed_ordering = B_TRUE; 86 87 #pragma weak scsi_hba_open 88 #pragma weak scsi_hba_close 89 #pragma weak scsi_hba_ioctl 90 91 static ddi_dma_attr_t mrsas_generic_dma_attr = { 92 DMA_ATTR_V0, /* dma_attr_version */ 93 0, /* low DMA address range */ 94 0xFFFFFFFFU, /* high DMA address range */ 95 0xFFFFFFFFU, /* DMA counter register */ 96 8, /* DMA address alignment */ 97 0x07, /* DMA burstsizes */ 98 1, /* min DMA size */ 99 0xFFFFFFFFU, /* max DMA size */ 100 0xFFFFFFFFU, /* segment boundary */ 101 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */ 102 512, /* granularity of device */ 103 0 /* bus specific DMA flags */ 104 }; 105 106 int32_t mrsas_max_cap_maxxfer = 0x1000000; 107 108 /* 109 * cb_ops contains base level routines 110 */ 111 static struct cb_ops mrsas_cb_ops = { 112 mrsas_open, /* open */ 113 mrsas_close, /* close */ 114 nodev, /* strategy */ 115 nodev, /* print */ 116 nodev, /* dump */ 117 nodev, /* read */ 118 nodev, /* write */ 119 mrsas_ioctl, /* ioctl */ 120 nodev, /* devmap */ 121 nodev, /* mmap */ 122 nodev, /* segmap */ 123 nochpoll, /* poll */ 124 nodev, /* cb_prop_op */ 125 0, /* streamtab */ 126 D_NEW | D_HOTPLUG, /* cb_flag */ 127 CB_REV, /* cb_rev */ 128 nodev, /* cb_aread */ 129 nodev /* cb_awrite */ 130 }; 131 132 /* 133 * dev_ops contains configuration routines 134 */ 135 static struct dev_ops mrsas_ops = { 136 DEVO_REV, /* rev, */ 137 0, /* refcnt */ 138 mrsas_getinfo, /* getinfo */ 139 nulldev, /* identify */ 140 nulldev, /* probe */ 141 mrsas_attach, /* attach */ 142 mrsas_detach, /* detach */ 143 mrsas_reset, /* reset */ 144 &mrsas_cb_ops, /* char/block ops */ 145 NULL, /* bus ops */ 146 NULL, /* power */ 147 ddi_quiesce_not_supported, /* quiesce */ 148 }; 149 150 char _depends_on[] = "misc/scsi"; 151 152 static struct modldrv modldrv = { 153 &mod_driverops, /* module type - driver */ 154 MRSAS_VERSION, 155 &mrsas_ops, /* driver ops */ 156 }; 157 158 static struct modlinkage modlinkage = { 159 MODREV_1, /* ml_rev - must be MODREV_1 */ 160 &modldrv, /* ml_linkage */ 161 NULL /* end of driver linkage */ 162 }; 163 164 static struct ddi_device_acc_attr endian_attr = { 165 DDI_DEVICE_ATTR_V0, 166 DDI_STRUCTURE_LE_ACC, 167 DDI_STRICTORDER_ACC 168 }; 169 170 171 /* 172 * ************************************************************************** * 173 * * 174 * common entry points - for loadable kernel modules * 175 * * 176 * ************************************************************************** * 177 */ 178 179 int 180 _init(void) 181 { 182 int ret; 183 184 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 185 186 ret = ddi_soft_state_init(&mrsas_state, 187 sizeof (struct mrsas_instance), 0); 188 189 if (ret != DDI_SUCCESS) { 190 con_log(CL_ANN, (CE_WARN, "mr_sas: could not init state")); 191 return (ret); 192 } 193 194 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) { 195 con_log(CL_ANN, (CE_WARN, "mr_sas: could not init scsi hba")); 196 ddi_soft_state_fini(&mrsas_state); 197 return (ret); 198 } 199 200 ret = mod_install(&modlinkage); 201 202 if (ret != DDI_SUCCESS) { 203 con_log(CL_ANN, (CE_WARN, "mr_sas: mod_install failed")); 204 scsi_hba_fini(&modlinkage); 205 ddi_soft_state_fini(&mrsas_state); 206 } 207 208 return (ret); 209 } 210 211 int 212 _info(struct modinfo *modinfop) 213 { 214 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 215 216 return (mod_info(&modlinkage, modinfop)); 217 } 218 219 int 220 _fini(void) 221 { 222 int ret; 223 224 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 225 226 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) 227 return (ret); 228 229 scsi_hba_fini(&modlinkage); 230 231 ddi_soft_state_fini(&mrsas_state); 232 233 return (ret); 234 } 235 236 237 /* 238 * ************************************************************************** * 239 * * 240 * common entry points - for autoconfiguration * 241 * * 242 * ************************************************************************** * 243 */ 244 245 static int 246 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 247 { 248 int instance_no; 249 int nregs; 250 uint8_t added_isr_f = 0; 251 uint8_t added_soft_isr_f = 0; 252 uint8_t create_devctl_node_f = 0; 253 uint8_t create_scsi_node_f = 0; 254 uint8_t create_ioc_node_f = 0; 255 uint8_t tran_alloc_f = 0; 256 uint8_t irq; 257 uint16_t vendor_id; 258 uint16_t device_id; 259 uint16_t subsysvid; 260 uint16_t subsysid; 261 uint16_t command; 262 off_t reglength = 0; 263 int intr_types = 0; 264 char *data; 265 int msi_enable = 0; 266 267 scsi_hba_tran_t *tran; 268 ddi_dma_attr_t tran_dma_attr; 269 struct mrsas_instance *instance; 270 271 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 272 273 /* CONSTCOND */ 274 ASSERT(NO_COMPETING_THREADS); 275 276 instance_no = ddi_get_instance(dip); 277 278 /* 279 * check to see whether this device is in a DMA-capable slot. 280 */ 281 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 282 con_log(CL_ANN, (CE_WARN, 283 "mr_sas%d: Device in slave-only slot, unused", 284 instance_no)); 285 return (DDI_FAILURE); 286 } 287 288 switch (cmd) { 289 case DDI_ATTACH: 290 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: DDI_ATTACH")); 291 /* allocate the soft state for the instance */ 292 if (ddi_soft_state_zalloc(mrsas_state, instance_no) 293 != DDI_SUCCESS) { 294 con_log(CL_ANN, (CE_WARN, 295 "mr_sas%d: Failed to allocate soft state", 296 instance_no)); 297 298 return (DDI_FAILURE); 299 } 300 301 instance = (struct mrsas_instance *)ddi_get_soft_state 302 (mrsas_state, instance_no); 303 304 if (instance == NULL) { 305 con_log(CL_ANN, (CE_WARN, 306 "mr_sas%d: Bad soft state", instance_no)); 307 308 ddi_soft_state_free(mrsas_state, instance_no); 309 310 return (DDI_FAILURE); 311 } 312 313 bzero((caddr_t)instance, 314 sizeof (struct mrsas_instance)); 315 316 instance->func_ptr = kmem_zalloc( 317 sizeof (struct mrsas_func_ptr), KM_SLEEP); 318 ASSERT(instance->func_ptr); 319 320 /* Setup the PCI configuration space handles */ 321 if (pci_config_setup(dip, &instance->pci_handle) != 322 DDI_SUCCESS) { 323 con_log(CL_ANN, (CE_WARN, 324 "mr_sas%d: pci config setup failed ", 325 instance_no)); 326 327 kmem_free(instance->func_ptr, 328 sizeof (struct mrsas_func_ptr)); 329 ddi_soft_state_free(mrsas_state, instance_no); 330 331 return (DDI_FAILURE); 332 } 333 334 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { 335 con_log(CL_ANN, (CE_WARN, 336 "mr_sas: failed to get registers.")); 337 338 pci_config_teardown(&instance->pci_handle); 339 kmem_free(instance->func_ptr, 340 sizeof (struct mrsas_func_ptr)); 341 ddi_soft_state_free(mrsas_state, instance_no); 342 343 return (DDI_FAILURE); 344 } 345 346 vendor_id = pci_config_get16(instance->pci_handle, 347 PCI_CONF_VENID); 348 device_id = pci_config_get16(instance->pci_handle, 349 PCI_CONF_DEVID); 350 351 subsysvid = pci_config_get16(instance->pci_handle, 352 PCI_CONF_SUBVENID); 353 subsysid = pci_config_get16(instance->pci_handle, 354 PCI_CONF_SUBSYSID); 355 356 pci_config_put16(instance->pci_handle, PCI_CONF_COMM, 357 (pci_config_get16(instance->pci_handle, 358 PCI_CONF_COMM) | PCI_COMM_ME)); 359 irq = pci_config_get8(instance->pci_handle, 360 PCI_CONF_ILINE); 361 362 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 363 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s", 364 instance_no, vendor_id, device_id, subsysvid, 365 subsysid, irq, MRSAS_VERSION)); 366 367 /* enable bus-mastering */ 368 command = pci_config_get16(instance->pci_handle, 369 PCI_CONF_COMM); 370 371 if (!(command & PCI_COMM_ME)) { 372 command |= PCI_COMM_ME; 373 374 pci_config_put16(instance->pci_handle, 375 PCI_CONF_COMM, command); 376 377 con_log(CL_ANN, (CE_CONT, "mr_sas%d: " 378 "enable bus-mastering", instance_no)); 379 } else { 380 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 381 "bus-mastering already set", instance_no)); 382 } 383 384 /* initialize function pointers */ 385 if ((device_id == PCI_DEVICE_ID_LSI_2108VDE) || 386 (device_id == PCI_DEVICE_ID_LSI_2108V)) { 387 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 388 "2108V/DE detected", instance_no)); 389 instance->func_ptr->read_fw_status_reg = 390 read_fw_status_reg_ppc; 391 instance->func_ptr->issue_cmd = issue_cmd_ppc; 392 instance->func_ptr->issue_cmd_in_sync_mode = 393 issue_cmd_in_sync_mode_ppc; 394 instance->func_ptr->issue_cmd_in_poll_mode = 395 issue_cmd_in_poll_mode_ppc; 396 instance->func_ptr->enable_intr = 397 enable_intr_ppc; 398 instance->func_ptr->disable_intr = 399 disable_intr_ppc; 400 instance->func_ptr->intr_ack = intr_ack_ppc; 401 } else { 402 con_log(CL_ANN, (CE_WARN, 403 "mr_sas: Invalid device detected")); 404 405 pci_config_teardown(&instance->pci_handle); 406 kmem_free(instance->func_ptr, 407 sizeof (struct mrsas_func_ptr)); 408 ddi_soft_state_free(mrsas_state, instance_no); 409 410 return (DDI_FAILURE); 411 } 412 413 instance->baseaddress = pci_config_get32( 414 instance->pci_handle, PCI_CONF_BASE0); 415 instance->baseaddress &= 0x0fffc; 416 417 instance->dip = dip; 418 instance->vendor_id = vendor_id; 419 instance->device_id = device_id; 420 instance->subsysvid = subsysvid; 421 instance->subsysid = subsysid; 422 instance->instance = instance_no; 423 424 /* Initialize FMA */ 425 instance->fm_capabilities = ddi_prop_get_int( 426 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS, 427 "fm-capable", DDI_FM_EREPORT_CAPABLE | 428 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE 429 | DDI_FM_ERRCB_CAPABLE); 430 431 mrsas_fm_init(instance); 432 433 /* Initialize Interrupts */ 434 if ((ddi_dev_regsize(instance->dip, 435 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) || 436 reglength < MINIMUM_MFI_MEM_SZ) { 437 return (DDI_FAILURE); 438 } 439 if (reglength > DEFAULT_MFI_MEM_SZ) { 440 reglength = DEFAULT_MFI_MEM_SZ; 441 con_log(CL_DLEVEL1, (CE_NOTE, 442 "mr_sas: register length to map is " 443 "0x%lx bytes", reglength)); 444 } 445 if (ddi_regs_map_setup(instance->dip, 446 REGISTER_SET_IO_2108, &instance->regmap, 0, 447 reglength, &endian_attr, &instance->regmap_handle) 448 != DDI_SUCCESS) { 449 con_log(CL_ANN, (CE_NOTE, 450 "mr_sas: couldn't map control registers")); 451 goto fail_attach; 452 } 453 454 /* 455 * Disable Interrupt Now. 456 * Setup Software interrupt 457 */ 458 instance->func_ptr->disable_intr(instance); 459 460 msi_enable = 0; 461 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 462 "mrsas-enable-msi", &data) == DDI_SUCCESS) { 463 if (strncmp(data, "yes", 3) == 0) { 464 msi_enable = 1; 465 con_log(CL_ANN, (CE_WARN, 466 "msi_enable = %d ENABLED", 467 msi_enable)); 468 } 469 ddi_prop_free(data); 470 } 471 472 con_log(CL_DLEVEL1, (CE_WARN, "msi_enable = %d", 473 msi_enable)); 474 475 /* Check for all supported interrupt types */ 476 if (ddi_intr_get_supported_types( 477 dip, &intr_types) != DDI_SUCCESS) { 478 con_log(CL_ANN, (CE_WARN, 479 "ddi_intr_get_supported_types() failed")); 480 goto fail_attach; 481 } 482 483 con_log(CL_DLEVEL1, (CE_NOTE, 484 "ddi_intr_get_supported_types() ret: 0x%x", 485 intr_types)); 486 487 /* Initialize and Setup Interrupt handler */ 488 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) { 489 if (mrsas_add_intrs(instance, 490 DDI_INTR_TYPE_MSIX) != DDI_SUCCESS) { 491 con_log(CL_ANN, (CE_WARN, 492 "MSIX interrupt query failed")); 493 goto fail_attach; 494 } 495 instance->intr_type = DDI_INTR_TYPE_MSIX; 496 } else if (msi_enable && (intr_types & 497 DDI_INTR_TYPE_MSI)) { 498 if (mrsas_add_intrs(instance, 499 DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 500 con_log(CL_ANN, (CE_WARN, 501 "MSI interrupt query failed")); 502 goto fail_attach; 503 } 504 instance->intr_type = DDI_INTR_TYPE_MSI; 505 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 506 msi_enable = 0; 507 if (mrsas_add_intrs(instance, 508 DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 509 con_log(CL_ANN, (CE_WARN, 510 "FIXED interrupt query failed")); 511 goto fail_attach; 512 } 513 instance->intr_type = DDI_INTR_TYPE_FIXED; 514 } else { 515 con_log(CL_ANN, (CE_WARN, "Device cannot " 516 "suppport either FIXED or MSI/X " 517 "interrupts")); 518 goto fail_attach; 519 } 520 521 added_isr_f = 1; 522 523 /* setup the mfi based low level driver */ 524 if (init_mfi(instance) != DDI_SUCCESS) { 525 con_log(CL_ANN, (CE_WARN, "mr_sas: " 526 "could not initialize the low level driver")); 527 528 goto fail_attach; 529 } 530 531 /* Initialize all Mutex */ 532 INIT_LIST_HEAD(&instance->completed_pool_list); 533 mutex_init(&instance->completed_pool_mtx, 534 "completed_pool_mtx", MUTEX_DRIVER, 535 DDI_INTR_PRI(instance->intr_pri)); 536 537 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx", 538 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 539 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL); 540 541 mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx", 542 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 543 544 /* Register our soft-isr for highlevel interrupts. */ 545 instance->isr_level = instance->intr_pri; 546 if (instance->isr_level == HIGH_LEVEL_INTR) { 547 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH, 548 &instance->soft_intr_id, NULL, NULL, 549 mrsas_softintr, (caddr_t)instance) != 550 DDI_SUCCESS) { 551 con_log(CL_ANN, (CE_WARN, 552 " Software ISR did not register")); 553 554 goto fail_attach; 555 } 556 557 added_soft_isr_f = 1; 558 } 559 560 /* Allocate a transport structure */ 561 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 562 563 if (tran == NULL) { 564 con_log(CL_ANN, (CE_WARN, 565 "scsi_hba_tran_alloc failed")); 566 goto fail_attach; 567 } 568 569 tran_alloc_f = 1; 570 571 instance->tran = tran; 572 573 tran->tran_hba_private = instance; 574 tran->tran_tgt_init = mrsas_tran_tgt_init; 575 tran->tran_tgt_probe = scsi_hba_probe; 576 tran->tran_tgt_free = mrsas_tran_tgt_free; 577 tran->tran_init_pkt = mrsas_tran_init_pkt; 578 tran->tran_start = mrsas_tran_start; 579 tran->tran_abort = mrsas_tran_abort; 580 tran->tran_reset = mrsas_tran_reset; 581 tran->tran_getcap = mrsas_tran_getcap; 582 tran->tran_setcap = mrsas_tran_setcap; 583 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt; 584 tran->tran_dmafree = mrsas_tran_dmafree; 585 tran->tran_sync_pkt = mrsas_tran_sync_pkt; 586 tran->tran_bus_config = mrsas_tran_bus_config; 587 588 if (mrsas_relaxed_ordering) 589 mrsas_generic_dma_attr.dma_attr_flags |= 590 DDI_DMA_RELAXED_ORDERING; 591 592 593 tran_dma_attr = mrsas_generic_dma_attr; 594 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge; 595 596 /* Attach this instance of the hba */ 597 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0) 598 != DDI_SUCCESS) { 599 con_log(CL_ANN, (CE_WARN, 600 "scsi_hba_attach failed")); 601 602 goto fail_attach; 603 } 604 605 /* create devctl node for cfgadm command */ 606 if (ddi_create_minor_node(dip, "devctl", 607 S_IFCHR, INST2DEVCTL(instance_no), 608 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) { 609 con_log(CL_ANN, (CE_WARN, 610 "mr_sas: failed to create devctl node.")); 611 612 goto fail_attach; 613 } 614 615 create_devctl_node_f = 1; 616 617 /* create scsi node for cfgadm command */ 618 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, 619 INST2SCSI(instance_no), 620 DDI_NT_SCSI_ATTACHMENT_POINT, 0) == 621 DDI_FAILURE) { 622 con_log(CL_ANN, (CE_WARN, 623 "mr_sas: failed to create scsi node.")); 624 625 goto fail_attach; 626 } 627 628 create_scsi_node_f = 1; 629 630 (void) sprintf(instance->iocnode, "%d:lsirdctl", 631 instance_no); 632 633 /* 634 * Create a node for applications 635 * for issuing ioctl to the driver. 636 */ 637 if (ddi_create_minor_node(dip, instance->iocnode, 638 S_IFCHR, INST2LSIRDCTL(instance_no), 639 DDI_PSEUDO, 0) == DDI_FAILURE) { 640 con_log(CL_ANN, (CE_WARN, 641 "mr_sas: failed to create ioctl node.")); 642 643 goto fail_attach; 644 } 645 646 create_ioc_node_f = 1; 647 648 /* Create a taskq to handle dr events */ 649 if ((instance->taskq = ddi_taskq_create(dip, 650 "mrsas_dr_taskq", 1, 651 TASKQ_DEFAULTPRI, 0)) == NULL) { 652 con_log(CL_ANN, (CE_WARN, 653 "mr_sas: failed to create taskq ")); 654 instance->taskq = NULL; 655 goto fail_attach; 656 } 657 658 /* enable interrupt */ 659 instance->func_ptr->enable_intr(instance); 660 661 /* initiate AEN */ 662 if (start_mfi_aen(instance)) { 663 con_log(CL_ANN, (CE_WARN, 664 "mr_sas: failed to initiate AEN.")); 665 goto fail_initiate_aen; 666 } 667 668 con_log(CL_DLEVEL1, (CE_NOTE, 669 "AEN started for instance %d.", instance_no)); 670 671 /* Finally! We are on the air. */ 672 ddi_report_dev(dip); 673 674 if (mrsas_check_acc_handle(instance->regmap_handle) != 675 DDI_SUCCESS) { 676 goto fail_attach; 677 } 678 if (mrsas_check_acc_handle(instance->pci_handle) != 679 DDI_SUCCESS) { 680 goto fail_attach; 681 } 682 instance->mr_ld_list = 683 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld), 684 KM_SLEEP); 685 break; 686 case DDI_PM_RESUME: 687 con_log(CL_ANN, (CE_NOTE, 688 "mr_sas: DDI_PM_RESUME")); 689 break; 690 case DDI_RESUME: 691 con_log(CL_ANN, (CE_NOTE, 692 "mr_sas: DDI_RESUME")); 693 break; 694 default: 695 con_log(CL_ANN, (CE_WARN, 696 "mr_sas: invalid attach cmd=%x", cmd)); 697 return (DDI_FAILURE); 698 } 699 700 return (DDI_SUCCESS); 701 702 fail_initiate_aen: 703 fail_attach: 704 if (create_devctl_node_f) { 705 ddi_remove_minor_node(dip, "devctl"); 706 } 707 708 if (create_scsi_node_f) { 709 ddi_remove_minor_node(dip, "scsi"); 710 } 711 712 if (create_ioc_node_f) { 713 ddi_remove_minor_node(dip, instance->iocnode); 714 } 715 716 if (tran_alloc_f) { 717 scsi_hba_tran_free(tran); 718 } 719 720 721 if (added_soft_isr_f) { 722 ddi_remove_softintr(instance->soft_intr_id); 723 } 724 725 if (added_isr_f) { 726 mrsas_rem_intrs(instance); 727 } 728 729 if (instance && instance->taskq) { 730 ddi_taskq_destroy(instance->taskq); 731 } 732 733 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 734 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 735 736 mrsas_fm_fini(instance); 737 738 pci_config_teardown(&instance->pci_handle); 739 740 ddi_soft_state_free(mrsas_state, instance_no); 741 742 con_log(CL_ANN, (CE_NOTE, 743 "mr_sas: return failure from mrsas_attach")); 744 745 return (DDI_FAILURE); 746 } 747 748 /*ARGSUSED*/ 749 static int 750 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 751 { 752 int rval; 753 int mrsas_minor = getminor((dev_t)arg); 754 755 struct mrsas_instance *instance; 756 757 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 758 759 switch (cmd) { 760 case DDI_INFO_DEVT2DEVINFO: 761 instance = (struct mrsas_instance *) 762 ddi_get_soft_state(mrsas_state, 763 MINOR2INST(mrsas_minor)); 764 765 if (instance == NULL) { 766 *resultp = NULL; 767 rval = DDI_FAILURE; 768 } else { 769 *resultp = instance->dip; 770 rval = DDI_SUCCESS; 771 } 772 break; 773 case DDI_INFO_DEVT2INSTANCE: 774 *resultp = (void *)instance; 775 rval = DDI_SUCCESS; 776 break; 777 default: 778 *resultp = NULL; 779 rval = DDI_FAILURE; 780 } 781 782 return (rval); 783 } 784 785 static int 786 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 787 { 788 int instance_no; 789 790 struct mrsas_instance *instance; 791 792 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 793 794 /* CONSTCOND */ 795 ASSERT(NO_COMPETING_THREADS); 796 797 instance_no = ddi_get_instance(dip); 798 799 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state, 800 instance_no); 801 802 if (!instance) { 803 con_log(CL_ANN, (CE_WARN, 804 "mr_sas:%d could not get instance in detach", 805 instance_no)); 806 807 return (DDI_FAILURE); 808 } 809 810 con_log(CL_ANN, (CE_NOTE, 811 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x", 812 instance_no, instance->vendor_id, instance->device_id, 813 instance->subsysvid, instance->subsysid)); 814 815 switch (cmd) { 816 case DDI_DETACH: 817 con_log(CL_ANN, (CE_NOTE, 818 "mrsas_detach: DDI_DETACH")); 819 820 if (scsi_hba_detach(dip) != DDI_SUCCESS) { 821 con_log(CL_ANN, (CE_WARN, 822 "mr_sas:%d failed to detach", 823 instance_no)); 824 825 return (DDI_FAILURE); 826 } 827 828 scsi_hba_tran_free(instance->tran); 829 830 flush_cache(instance); 831 832 if (abort_aen_cmd(instance, instance->aen_cmd)) { 833 con_log(CL_ANN, (CE_WARN, "mrsas_detach: " 834 "failed to abort prevous AEN command")); 835 836 return (DDI_FAILURE); 837 } 838 839 instance->func_ptr->disable_intr(instance); 840 841 if (instance->isr_level == HIGH_LEVEL_INTR) { 842 ddi_remove_softintr(instance->soft_intr_id); 843 } 844 845 mrsas_rem_intrs(instance); 846 847 if (instance->taskq) { 848 ddi_taskq_destroy(instance->taskq); 849 } 850 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD 851 * sizeof (struct mrsas_ld)); 852 free_space_for_mfi(instance); 853 854 mrsas_fm_fini(instance); 855 856 pci_config_teardown(&instance->pci_handle); 857 858 kmem_free(instance->func_ptr, 859 sizeof (struct mrsas_func_ptr)); 860 861 ddi_soft_state_free(mrsas_state, instance_no); 862 break; 863 case DDI_PM_SUSPEND: 864 con_log(CL_ANN, (CE_NOTE, 865 "mrsas_detach: DDI_PM_SUSPEND")); 866 867 break; 868 case DDI_SUSPEND: 869 con_log(CL_ANN, (CE_NOTE, 870 "mrsas_detach: DDI_SUSPEND")); 871 872 break; 873 default: 874 con_log(CL_ANN, (CE_WARN, 875 "invalid detach command:0x%x", cmd)); 876 return (DDI_FAILURE); 877 } 878 879 return (DDI_SUCCESS); 880 } 881 882 /* 883 * ************************************************************************** * 884 * * 885 * common entry points - for character driver types * 886 * * 887 * ************************************************************************** * 888 */ 889 static int 890 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp) 891 { 892 int rval = 0; 893 894 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 895 896 /* Check root permissions */ 897 if (drv_priv(credp) != 0) { 898 con_log(CL_ANN, (CE_WARN, 899 "mr_sas: Non-root ioctl access denied!")); 900 return (EPERM); 901 } 902 903 /* Verify we are being opened as a character device */ 904 if (otyp != OTYP_CHR) { 905 con_log(CL_ANN, (CE_WARN, 906 "mr_sas: ioctl node must be a char node")); 907 return (EINVAL); 908 } 909 910 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev))) 911 == NULL) { 912 return (ENXIO); 913 } 914 915 if (scsi_hba_open) { 916 rval = scsi_hba_open(dev, openflags, otyp, credp); 917 } 918 919 return (rval); 920 } 921 922 static int 923 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp) 924 { 925 int rval = 0; 926 927 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 928 929 /* no need for locks! */ 930 931 if (scsi_hba_close) { 932 rval = scsi_hba_close(dev, openflags, otyp, credp); 933 } 934 935 return (rval); 936 } 937 938 static int 939 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 940 int *rvalp) 941 { 942 int rval = 0; 943 944 struct mrsas_instance *instance; 945 struct mrsas_ioctl *ioctl; 946 struct mrsas_aen aen; 947 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 948 949 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev))); 950 951 if (instance == NULL) { 952 /* invalid minor number */ 953 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found.")); 954 return (ENXIO); 955 } 956 957 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl), 958 KM_SLEEP); 959 ASSERT(ioctl); 960 961 switch ((uint_t)cmd) { 962 case MRSAS_IOCTL_FIRMWARE: 963 if (ddi_copyin((void *)arg, ioctl, 964 sizeof (struct mrsas_ioctl), mode)) { 965 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: " 966 "ERROR IOCTL copyin")); 967 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 968 return (EFAULT); 969 } 970 971 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) { 972 rval = handle_drv_ioctl(instance, ioctl, mode); 973 } else { 974 rval = handle_mfi_ioctl(instance, ioctl, mode); 975 } 976 977 if (ddi_copyout((void *)ioctl, (void *)arg, 978 (sizeof (struct mrsas_ioctl) - 1), mode)) { 979 con_log(CL_ANN, (CE_WARN, 980 "mrsas_ioctl: copy_to_user failed")); 981 rval = 1; 982 } 983 984 break; 985 case MRSAS_IOCTL_AEN: 986 if (ddi_copyin((void *) arg, &aen, 987 sizeof (struct mrsas_aen), mode)) { 988 con_log(CL_ANN, (CE_WARN, 989 "mrsas_ioctl: ERROR AEN copyin")); 990 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 991 return (EFAULT); 992 } 993 994 rval = handle_mfi_aen(instance, &aen); 995 996 if (ddi_copyout((void *) &aen, (void *)arg, 997 sizeof (struct mrsas_aen), mode)) { 998 con_log(CL_ANN, (CE_WARN, 999 "mrsas_ioctl: copy_to_user failed")); 1000 rval = 1; 1001 } 1002 1003 break; 1004 default: 1005 rval = scsi_hba_ioctl(dev, cmd, arg, 1006 mode, credp, rvalp); 1007 1008 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: " 1009 "scsi_hba_ioctl called, ret = %x.", rval)); 1010 } 1011 1012 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 1013 return (rval); 1014 } 1015 1016 /* 1017 * ************************************************************************** * 1018 * * 1019 * common entry points - for block driver types * 1020 * * 1021 * ************************************************************************** * 1022 */ 1023 /*ARGSUSED*/ 1024 static int 1025 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1026 { 1027 int instance_no; 1028 1029 struct mrsas_instance *instance; 1030 1031 instance_no = ddi_get_instance(dip); 1032 instance = (struct mrsas_instance *)ddi_get_soft_state 1033 (mrsas_state, instance_no); 1034 1035 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1036 1037 if (!instance) { 1038 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter " 1039 "in reset", instance_no)); 1040 return (DDI_FAILURE); 1041 } 1042 1043 instance->func_ptr->disable_intr(instance); 1044 1045 con_log(CL_ANN1, (CE_NOTE, "flushing cache for instance %d", 1046 instance_no)); 1047 1048 flush_cache(instance); 1049 1050 return (DDI_SUCCESS); 1051 } 1052 1053 1054 /* 1055 * ************************************************************************** * 1056 * * 1057 * entry points (SCSI HBA) * 1058 * * 1059 * ************************************************************************** * 1060 */ 1061 /*ARGSUSED*/ 1062 static int 1063 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1064 scsi_hba_tran_t *tran, struct scsi_device *sd) 1065 { 1066 struct mrsas_instance *instance; 1067 uint16_t tgt = sd->sd_address.a_target; 1068 uint8_t lun = sd->sd_address.a_lun; 1069 1070 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init target %d lun %d", 1071 tgt, lun)); 1072 1073 instance = ADDR2MR(&sd->sd_address); 1074 1075 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 1076 (void) ndi_merge_node(tgt_dip, mrsas_name_node); 1077 ddi_set_name_addr(tgt_dip, NULL); 1078 1079 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init in " 1080 "ndi_dev_is_persistent_node DDI_FAILURE t = %d l = %d", 1081 tgt, lun)); 1082 return (DDI_FAILURE); 1083 } 1084 1085 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p", 1086 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip)); 1087 1088 if (tgt < MRDRV_MAX_LD && lun == 0) { 1089 if (instance->mr_ld_list[tgt].dip == NULL && 1090 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) { 1091 instance->mr_ld_list[tgt].dip = tgt_dip; 1092 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN; 1093 } 1094 } 1095 return (DDI_SUCCESS); 1096 } 1097 1098 /*ARGSUSED*/ 1099 static void 1100 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1101 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1102 { 1103 struct mrsas_instance *instance; 1104 int tgt = sd->sd_address.a_target; 1105 int lun = sd->sd_address.a_lun; 1106 1107 instance = ADDR2MR(&sd->sd_address); 1108 1109 con_log(CL_ANN1, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun)); 1110 1111 if (tgt < MRDRV_MAX_LD && lun == 0) { 1112 if (instance->mr_ld_list[tgt].dip == tgt_dip) { 1113 instance->mr_ld_list[tgt].dip = NULL; 1114 } 1115 } 1116 } 1117 1118 static dev_info_t * 1119 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun) 1120 { 1121 dev_info_t *child = NULL; 1122 char addr[SCSI_MAXNAMELEN]; 1123 char tmp[MAXNAMELEN]; 1124 1125 (void) sprintf(addr, "%x,%x", tgt, lun); 1126 for (child = ddi_get_child(instance->dip); child; 1127 child = ddi_get_next_sibling(child)) { 1128 1129 if (mrsas_name_node(child, tmp, MAXNAMELEN) != 1130 DDI_SUCCESS) { 1131 continue; 1132 } 1133 1134 if (strcmp(addr, tmp) == 0) { 1135 break; 1136 } 1137 } 1138 con_log(CL_ANN1, (CE_NOTE, "mrsas_find_child: return child = %p", 1139 (void *)child)); 1140 return (child); 1141 } 1142 1143 static int 1144 mrsas_name_node(dev_info_t *dip, char *name, int len) 1145 { 1146 int tgt, lun; 1147 1148 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1149 DDI_PROP_DONTPASS, "target", -1); 1150 con_log(CL_ANN1, (CE_NOTE, 1151 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt)); 1152 if (tgt == -1) { 1153 return (DDI_FAILURE); 1154 } 1155 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1156 "lun", -1); 1157 con_log(CL_ANN1, 1158 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun)); 1159 if (lun == -1) { 1160 return (DDI_FAILURE); 1161 } 1162 (void) snprintf(name, len, "%x,%x", tgt, lun); 1163 return (DDI_SUCCESS); 1164 } 1165 1166 static struct scsi_pkt * 1167 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt, 1168 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1169 int flags, int (*callback)(), caddr_t arg) 1170 { 1171 struct scsa_cmd *acmd; 1172 struct mrsas_instance *instance; 1173 struct scsi_pkt *new_pkt; 1174 1175 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1176 1177 instance = ADDR2MR(ap); 1178 1179 /* step #1 : pkt allocation */ 1180 if (pkt == NULL) { 1181 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen, 1182 tgtlen, sizeof (struct scsa_cmd), callback, arg); 1183 if (pkt == NULL) { 1184 return (NULL); 1185 } 1186 1187 acmd = PKT2CMD(pkt); 1188 1189 /* 1190 * Initialize the new pkt - we redundantly initialize 1191 * all the fields for illustrative purposes. 1192 */ 1193 acmd->cmd_pkt = pkt; 1194 acmd->cmd_flags = 0; 1195 acmd->cmd_scblen = statuslen; 1196 acmd->cmd_cdblen = cmdlen; 1197 acmd->cmd_dmahandle = NULL; 1198 acmd->cmd_ncookies = 0; 1199 acmd->cmd_cookie = 0; 1200 acmd->cmd_cookiecnt = 0; 1201 acmd->cmd_nwin = 0; 1202 1203 pkt->pkt_address = *ap; 1204 pkt->pkt_comp = (void (*)())NULL; 1205 pkt->pkt_flags = 0; 1206 pkt->pkt_time = 0; 1207 pkt->pkt_resid = 0; 1208 pkt->pkt_state = 0; 1209 pkt->pkt_statistics = 0; 1210 pkt->pkt_reason = 0; 1211 new_pkt = pkt; 1212 } else { 1213 acmd = PKT2CMD(pkt); 1214 new_pkt = NULL; 1215 } 1216 1217 /* step #2 : dma allocation/move */ 1218 if (bp && bp->b_bcount != 0) { 1219 if (acmd->cmd_dmahandle == NULL) { 1220 if (mrsas_dma_alloc(instance, pkt, bp, flags, 1221 callback) == DDI_FAILURE) { 1222 if (new_pkt) { 1223 scsi_hba_pkt_free(ap, new_pkt); 1224 } 1225 return ((struct scsi_pkt *)NULL); 1226 } 1227 } else { 1228 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) { 1229 return ((struct scsi_pkt *)NULL); 1230 } 1231 } 1232 } 1233 1234 return (pkt); 1235 } 1236 1237 static int 1238 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt) 1239 { 1240 uchar_t cmd_done = 0; 1241 1242 struct mrsas_instance *instance = ADDR2MR(ap); 1243 struct mrsas_cmd *cmd; 1244 1245 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x", 1246 __func__, __LINE__, pkt->pkt_cdbp[0])); 1247 1248 pkt->pkt_reason = CMD_CMPLT; 1249 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 1250 1251 cmd = build_cmd(instance, ap, pkt, &cmd_done); 1252 1253 /* 1254 * Check if the command is already completed by the mrsas_build_cmd() 1255 * routine. In which case the busy_flag would be clear and scb will be 1256 * NULL and appropriate reason provided in pkt_reason field 1257 */ 1258 if (cmd_done) { 1259 pkt->pkt_reason = CMD_CMPLT; 1260 pkt->pkt_scbp[0] = STATUS_GOOD; 1261 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET 1262 | STATE_SENT_CMD; 1263 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) { 1264 (*pkt->pkt_comp)(pkt); 1265 } 1266 1267 return (TRAN_ACCEPT); 1268 } 1269 1270 if (cmd == NULL) { 1271 return (TRAN_BUSY); 1272 } 1273 1274 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { 1275 if (instance->fw_outstanding > instance->max_fw_cmds) { 1276 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy")); 1277 return_mfi_pkt(instance, cmd); 1278 return (TRAN_BUSY); 1279 } 1280 1281 /* Synchronize the Cmd frame for the controller */ 1282 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, 1283 DDI_DMA_SYNC_FORDEV); 1284 1285 instance->func_ptr->issue_cmd(cmd, instance); 1286 1287 } else { 1288 struct mrsas_header *hdr = &cmd->frame->hdr; 1289 1290 cmd->sync_cmd = MRSAS_TRUE; 1291 1292 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd); 1293 1294 pkt->pkt_reason = CMD_CMPLT; 1295 pkt->pkt_statistics = 0; 1296 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS; 1297 1298 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, 1299 &hdr->cmd_status)) { 1300 case MFI_STAT_OK: 1301 pkt->pkt_scbp[0] = STATUS_GOOD; 1302 break; 1303 1304 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1305 1306 pkt->pkt_reason = CMD_CMPLT; 1307 pkt->pkt_statistics = 0; 1308 1309 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; 1310 break; 1311 1312 case MFI_STAT_DEVICE_NOT_FOUND: 1313 pkt->pkt_reason = CMD_DEV_GONE; 1314 pkt->pkt_statistics = STAT_DISCON; 1315 break; 1316 1317 default: 1318 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1; 1319 } 1320 1321 return_mfi_pkt(instance, cmd); 1322 (void) mrsas_common_check(instance, cmd); 1323 1324 if (pkt->pkt_comp) { 1325 (*pkt->pkt_comp)(pkt); 1326 } 1327 1328 } 1329 1330 return (TRAN_ACCEPT); 1331 } 1332 1333 /*ARGSUSED*/ 1334 static int 1335 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1336 { 1337 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1338 1339 /* abort command not supported by H/W */ 1340 1341 return (DDI_FAILURE); 1342 } 1343 1344 /*ARGSUSED*/ 1345 static int 1346 mrsas_tran_reset(struct scsi_address *ap, int level) 1347 { 1348 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1349 1350 /* reset command not supported by H/W */ 1351 1352 return (DDI_FAILURE); 1353 1354 } 1355 1356 /*ARGSUSED*/ 1357 static int 1358 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom) 1359 { 1360 int rval = 0; 1361 1362 struct mrsas_instance *instance = ADDR2MR(ap); 1363 1364 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1365 1366 /* we do allow inquiring about capabilities for other targets */ 1367 if (cap == NULL) { 1368 return (-1); 1369 } 1370 1371 switch (scsi_hba_lookup_capstr(cap)) { 1372 case SCSI_CAP_DMA_MAX: 1373 /* Limit to 16MB max transfer */ 1374 rval = mrsas_max_cap_maxxfer; 1375 break; 1376 case SCSI_CAP_MSG_OUT: 1377 rval = 1; 1378 break; 1379 case SCSI_CAP_DISCONNECT: 1380 rval = 0; 1381 break; 1382 case SCSI_CAP_SYNCHRONOUS: 1383 rval = 0; 1384 break; 1385 case SCSI_CAP_WIDE_XFER: 1386 rval = 1; 1387 break; 1388 case SCSI_CAP_TAGGED_QING: 1389 rval = 1; 1390 break; 1391 case SCSI_CAP_UNTAGGED_QING: 1392 rval = 1; 1393 break; 1394 case SCSI_CAP_PARITY: 1395 rval = 1; 1396 break; 1397 case SCSI_CAP_INITIATOR_ID: 1398 rval = instance->init_id; 1399 break; 1400 case SCSI_CAP_ARQ: 1401 rval = 1; 1402 break; 1403 case SCSI_CAP_LINKED_CMDS: 1404 rval = 0; 1405 break; 1406 case SCSI_CAP_RESET_NOTIFICATION: 1407 rval = 1; 1408 break; 1409 case SCSI_CAP_GEOMETRY: 1410 rval = -1; 1411 1412 break; 1413 default: 1414 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x", 1415 scsi_hba_lookup_capstr(cap))); 1416 rval = -1; 1417 break; 1418 } 1419 1420 return (rval); 1421 } 1422 1423 /*ARGSUSED*/ 1424 static int 1425 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1426 { 1427 int rval = 1; 1428 1429 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1430 1431 /* We don't allow setting capabilities for other targets */ 1432 if (cap == NULL || whom == 0) { 1433 return (-1); 1434 } 1435 1436 switch (scsi_hba_lookup_capstr(cap)) { 1437 case SCSI_CAP_DMA_MAX: 1438 case SCSI_CAP_MSG_OUT: 1439 case SCSI_CAP_PARITY: 1440 case SCSI_CAP_LINKED_CMDS: 1441 case SCSI_CAP_RESET_NOTIFICATION: 1442 case SCSI_CAP_DISCONNECT: 1443 case SCSI_CAP_SYNCHRONOUS: 1444 case SCSI_CAP_UNTAGGED_QING: 1445 case SCSI_CAP_WIDE_XFER: 1446 case SCSI_CAP_INITIATOR_ID: 1447 case SCSI_CAP_ARQ: 1448 /* 1449 * None of these are settable via 1450 * the capability interface. 1451 */ 1452 break; 1453 case SCSI_CAP_TAGGED_QING: 1454 rval = 1; 1455 break; 1456 case SCSI_CAP_SECTOR_SIZE: 1457 rval = 1; 1458 break; 1459 1460 case SCSI_CAP_TOTAL_SECTORS: 1461 rval = 1; 1462 break; 1463 default: 1464 rval = -1; 1465 break; 1466 } 1467 1468 return (rval); 1469 } 1470 1471 static void 1472 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1473 { 1474 struct scsa_cmd *acmd = PKT2CMD(pkt); 1475 1476 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1477 1478 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1479 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1480 1481 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1482 1483 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1484 1485 acmd->cmd_dmahandle = NULL; 1486 } 1487 1488 /* free the pkt */ 1489 scsi_hba_pkt_free(ap, pkt); 1490 } 1491 1492 /*ARGSUSED*/ 1493 static void 1494 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1495 { 1496 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1497 1498 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1499 1500 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1501 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1502 1503 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1504 1505 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1506 1507 acmd->cmd_dmahandle = NULL; 1508 } 1509 } 1510 1511 /*ARGSUSED*/ 1512 static void 1513 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1514 { 1515 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1516 1517 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1518 1519 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1520 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset, 1521 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ? 1522 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU); 1523 } 1524 } 1525 1526 /* 1527 * mrsas_isr(caddr_t) 1528 * 1529 * The Interrupt Service Routine 1530 * 1531 * Collect status for all completed commands and do callback 1532 * 1533 */ 1534 static uint_t 1535 mrsas_isr(struct mrsas_instance *instance) 1536 { 1537 int need_softintr; 1538 uint32_t producer; 1539 uint32_t consumer; 1540 uint32_t context; 1541 1542 struct mrsas_cmd *cmd; 1543 1544 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1545 1546 ASSERT(instance); 1547 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) && 1548 !instance->func_ptr->intr_ack(instance)) { 1549 return (DDI_INTR_UNCLAIMED); 1550 } 1551 1552 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1553 0, 0, DDI_DMA_SYNC_FORCPU); 1554 1555 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 1556 != DDI_SUCCESS) { 1557 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 1558 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 1559 return (DDI_INTR_CLAIMED); 1560 } 1561 1562 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1563 instance->producer); 1564 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1565 instance->consumer); 1566 1567 con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ", 1568 producer, consumer)); 1569 if (producer == consumer) { 1570 con_log(CL_ANN1, (CE_WARN, "producer = consumer case")); 1571 return (DDI_INTR_CLAIMED); 1572 } 1573 mutex_enter(&instance->completed_pool_mtx); 1574 1575 while (consumer != producer) { 1576 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1577 &instance->reply_queue[consumer]); 1578 cmd = instance->cmd_list[context]; 1579 mlist_add_tail(&cmd->list, &instance->completed_pool_list); 1580 1581 consumer++; 1582 if (consumer == (instance->max_fw_cmds + 1)) { 1583 consumer = 0; 1584 } 1585 } 1586 1587 mutex_exit(&instance->completed_pool_mtx); 1588 1589 ddi_put32(instance->mfi_internal_dma_obj.acc_handle, 1590 instance->consumer, consumer); 1591 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1592 0, 0, DDI_DMA_SYNC_FORDEV); 1593 1594 if (instance->softint_running) { 1595 need_softintr = 0; 1596 } else { 1597 need_softintr = 1; 1598 } 1599 1600 if (instance->isr_level == HIGH_LEVEL_INTR) { 1601 if (need_softintr) { 1602 ddi_trigger_softintr(instance->soft_intr_id); 1603 } 1604 } else { 1605 /* 1606 * Not a high-level interrupt, therefore call the soft level 1607 * interrupt explicitly 1608 */ 1609 (void) mrsas_softintr(instance); 1610 } 1611 1612 return (DDI_INTR_CLAIMED); 1613 } 1614 1615 1616 /* 1617 * ************************************************************************** * 1618 * * 1619 * libraries * 1620 * * 1621 * ************************************************************************** * 1622 */ 1623 /* 1624 * get_mfi_pkt : Get a command from the free pool 1625 * After successful allocation, the caller of this routine 1626 * must clear the frame buffer (memset to zero) before 1627 * using the packet further. 1628 * 1629 * ***** Note ***** 1630 * After clearing the frame buffer the context id of the 1631 * frame buffer SHOULD be restored back. 1632 */ 1633 static struct mrsas_cmd * 1634 get_mfi_pkt(struct mrsas_instance *instance) 1635 { 1636 mlist_t *head = &instance->cmd_pool_list; 1637 struct mrsas_cmd *cmd = NULL; 1638 1639 mutex_enter(&instance->cmd_pool_mtx); 1640 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1641 1642 if (!mlist_empty(head)) { 1643 cmd = mlist_entry(head->next, struct mrsas_cmd, list); 1644 mlist_del_init(head->next); 1645 } 1646 if (cmd != NULL) 1647 cmd->pkt = NULL; 1648 mutex_exit(&instance->cmd_pool_mtx); 1649 1650 return (cmd); 1651 } 1652 1653 /* 1654 * return_mfi_pkt : Return a cmd to free command pool 1655 */ 1656 static void 1657 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 1658 { 1659 mutex_enter(&instance->cmd_pool_mtx); 1660 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1661 1662 mlist_add(&cmd->list, &instance->cmd_pool_list); 1663 1664 mutex_exit(&instance->cmd_pool_mtx); 1665 } 1666 1667 /* 1668 * destroy_mfi_frame_pool 1669 */ 1670 static void 1671 destroy_mfi_frame_pool(struct mrsas_instance *instance) 1672 { 1673 int i; 1674 uint32_t max_cmd = instance->max_fw_cmds; 1675 1676 struct mrsas_cmd *cmd; 1677 1678 /* return all frames to pool */ 1679 for (i = 0; i < max_cmd+1; i++) { 1680 1681 cmd = instance->cmd_list[i]; 1682 1683 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) 1684 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj); 1685 1686 cmd->frame_dma_obj_status = DMA_OBJ_FREED; 1687 } 1688 1689 } 1690 1691 /* 1692 * create_mfi_frame_pool 1693 */ 1694 static int 1695 create_mfi_frame_pool(struct mrsas_instance *instance) 1696 { 1697 int i = 0; 1698 int cookie_cnt; 1699 uint16_t max_cmd; 1700 uint16_t sge_sz; 1701 uint32_t sgl_sz; 1702 uint32_t tot_frame_size; 1703 struct mrsas_cmd *cmd; 1704 1705 max_cmd = instance->max_fw_cmds; 1706 1707 sge_sz = sizeof (struct mrsas_sge64); 1708 1709 /* calculated the number of 64byte frames required for SGL */ 1710 sgl_sz = sge_sz * instance->max_num_sge; 1711 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH; 1712 1713 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: " 1714 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size)); 1715 1716 while (i < max_cmd+1) { 1717 cmd = instance->cmd_list[i]; 1718 1719 cmd->frame_dma_obj.size = tot_frame_size; 1720 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr; 1721 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1722 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 1723 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1; 1724 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64; 1725 1726 1727 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj, 1728 (uchar_t)DDI_STRUCTURE_LE_ACC); 1729 1730 if (cookie_cnt == -1 || cookie_cnt > 1) { 1731 con_log(CL_ANN, (CE_WARN, 1732 "create_mfi_frame_pool: could not alloc.")); 1733 return (DDI_FAILURE); 1734 } 1735 1736 bzero(cmd->frame_dma_obj.buffer, tot_frame_size); 1737 1738 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED; 1739 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer; 1740 cmd->frame_phys_addr = 1741 cmd->frame_dma_obj.dma_cookie[0].dmac_address; 1742 1743 cmd->sense = (uint8_t *)(((unsigned long) 1744 cmd->frame_dma_obj.buffer) + 1745 tot_frame_size - SENSE_LENGTH); 1746 cmd->sense_phys_addr = 1747 cmd->frame_dma_obj.dma_cookie[0].dmac_address + 1748 tot_frame_size - SENSE_LENGTH; 1749 1750 if (!cmd->frame || !cmd->sense) { 1751 con_log(CL_ANN, (CE_NOTE, 1752 "mr_sas: pci_pool_alloc failed")); 1753 1754 return (ENOMEM); 1755 } 1756 1757 ddi_put32(cmd->frame_dma_obj.acc_handle, 1758 &cmd->frame->io.context, cmd->index); 1759 i++; 1760 1761 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x", 1762 cmd->index, cmd->frame_phys_addr)); 1763 } 1764 1765 return (DDI_SUCCESS); 1766 } 1767 1768 /* 1769 * free_additional_dma_buffer 1770 */ 1771 static void 1772 free_additional_dma_buffer(struct mrsas_instance *instance) 1773 { 1774 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { 1775 (void) mrsas_free_dma_obj(instance, 1776 instance->mfi_internal_dma_obj); 1777 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; 1778 } 1779 1780 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) { 1781 (void) mrsas_free_dma_obj(instance, 1782 instance->mfi_evt_detail_obj); 1783 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; 1784 } 1785 } 1786 1787 /* 1788 * alloc_additional_dma_buffer 1789 */ 1790 static int 1791 alloc_additional_dma_buffer(struct mrsas_instance *instance) 1792 { 1793 uint32_t reply_q_sz; 1794 uint32_t internal_buf_size = PAGESIZE*2; 1795 1796 /* max cmds plus 1 + producer & consumer */ 1797 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2); 1798 1799 instance->mfi_internal_dma_obj.size = internal_buf_size; 1800 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr; 1801 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1802 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 1803 0xFFFFFFFFU; 1804 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1; 1805 1806 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj, 1807 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 1808 con_log(CL_ANN, (CE_WARN, 1809 "mr_sas: could not alloc reply queue")); 1810 return (DDI_FAILURE); 1811 } 1812 1813 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size); 1814 1815 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED; 1816 1817 instance->producer = (uint32_t *)((unsigned long) 1818 instance->mfi_internal_dma_obj.buffer); 1819 instance->consumer = (uint32_t *)((unsigned long) 1820 instance->mfi_internal_dma_obj.buffer + 4); 1821 instance->reply_queue = (uint32_t *)((unsigned long) 1822 instance->mfi_internal_dma_obj.buffer + 8); 1823 instance->internal_buf = (caddr_t)(((unsigned long) 1824 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8); 1825 instance->internal_buf_dmac_add = 1826 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 1827 (reply_q_sz + 8); 1828 instance->internal_buf_size = internal_buf_size - 1829 (reply_q_sz + 8); 1830 1831 /* allocate evt_detail */ 1832 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail); 1833 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr; 1834 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1835 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 1836 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1; 1837 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1; 1838 1839 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj, 1840 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 1841 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: " 1842 "could not allocate data transfer buffer.")); 1843 return (DDI_FAILURE); 1844 } 1845 1846 bzero(instance->mfi_evt_detail_obj.buffer, 1847 sizeof (struct mrsas_evt_detail)); 1848 1849 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED; 1850 1851 return (DDI_SUCCESS); 1852 } 1853 1854 /* 1855 * free_space_for_mfi 1856 */ 1857 static void 1858 free_space_for_mfi(struct mrsas_instance *instance) 1859 { 1860 int i; 1861 uint32_t max_cmd = instance->max_fw_cmds; 1862 1863 /* already freed */ 1864 if (instance->cmd_list == NULL) { 1865 return; 1866 } 1867 1868 free_additional_dma_buffer(instance); 1869 1870 /* first free the MFI frame pool */ 1871 destroy_mfi_frame_pool(instance); 1872 1873 /* free all the commands in the cmd_list */ 1874 for (i = 0; i < instance->max_fw_cmds+1; i++) { 1875 kmem_free(instance->cmd_list[i], 1876 sizeof (struct mrsas_cmd)); 1877 1878 instance->cmd_list[i] = NULL; 1879 } 1880 1881 /* free the cmd_list buffer itself */ 1882 kmem_free(instance->cmd_list, 1883 sizeof (struct mrsas_cmd *) * (max_cmd+1)); 1884 1885 instance->cmd_list = NULL; 1886 1887 INIT_LIST_HEAD(&instance->cmd_pool_list); 1888 } 1889 1890 /* 1891 * alloc_space_for_mfi 1892 */ 1893 static int 1894 alloc_space_for_mfi(struct mrsas_instance *instance) 1895 { 1896 int i; 1897 uint32_t max_cmd; 1898 size_t sz; 1899 1900 struct mrsas_cmd *cmd; 1901 1902 max_cmd = instance->max_fw_cmds; 1903 1904 /* reserve 1 more slot for flush_cache */ 1905 sz = sizeof (struct mrsas_cmd *) * (max_cmd+1); 1906 1907 /* 1908 * instance->cmd_list is an array of struct mrsas_cmd pointers. 1909 * Allocate the dynamic array first and then allocate individual 1910 * commands. 1911 */ 1912 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); 1913 ASSERT(instance->cmd_list); 1914 1915 for (i = 0; i < max_cmd+1; i++) { 1916 instance->cmd_list[i] = kmem_zalloc(sizeof (struct mrsas_cmd), 1917 KM_SLEEP); 1918 ASSERT(instance->cmd_list[i]); 1919 } 1920 1921 INIT_LIST_HEAD(&instance->cmd_pool_list); 1922 1923 /* add all the commands to command pool (instance->cmd_pool) */ 1924 for (i = 0; i < max_cmd; i++) { 1925 cmd = instance->cmd_list[i]; 1926 cmd->index = i; 1927 1928 mlist_add_tail(&cmd->list, &instance->cmd_pool_list); 1929 } 1930 1931 /* single slot for flush_cache won't be added in command pool */ 1932 cmd = instance->cmd_list[max_cmd]; 1933 cmd->index = i; 1934 1935 /* create a frame pool and assign one frame to each cmd */ 1936 if (create_mfi_frame_pool(instance)) { 1937 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool")); 1938 return (DDI_FAILURE); 1939 } 1940 1941 /* create a frame pool and assign one frame to each cmd */ 1942 if (alloc_additional_dma_buffer(instance)) { 1943 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool")); 1944 return (DDI_FAILURE); 1945 } 1946 1947 return (DDI_SUCCESS); 1948 } 1949 1950 /* 1951 * get_ctrl_info 1952 */ 1953 static int 1954 get_ctrl_info(struct mrsas_instance *instance, 1955 struct mrsas_ctrl_info *ctrl_info) 1956 { 1957 int ret = 0; 1958 1959 struct mrsas_cmd *cmd; 1960 struct mrsas_dcmd_frame *dcmd; 1961 struct mrsas_ctrl_info *ci; 1962 1963 cmd = get_mfi_pkt(instance); 1964 1965 if (!cmd) { 1966 con_log(CL_ANN, (CE_WARN, 1967 "Failed to get a cmd for ctrl info")); 1968 return (DDI_FAILURE); 1969 } 1970 /* Clear the frame buffer and assign back the context id */ 1971 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 1972 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 1973 cmd->index); 1974 1975 dcmd = &cmd->frame->dcmd; 1976 1977 ci = (struct mrsas_ctrl_info *)instance->internal_buf; 1978 1979 if (!ci) { 1980 con_log(CL_ANN, (CE_WARN, 1981 "Failed to alloc mem for ctrl info")); 1982 return_mfi_pkt(instance, cmd); 1983 return (DDI_FAILURE); 1984 } 1985 1986 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info)); 1987 1988 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */ 1989 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 1990 1991 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 1992 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 1993 MFI_CMD_STATUS_POLL_MODE); 1994 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 1995 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 1996 MFI_FRAME_DIR_READ); 1997 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 1998 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 1999 sizeof (struct mrsas_ctrl_info)); 2000 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2001 MR_DCMD_CTRL_GET_INFO); 2002 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 2003 instance->internal_buf_dmac_add); 2004 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 2005 sizeof (struct mrsas_ctrl_info)); 2006 2007 cmd->frame_count = 1; 2008 2009 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2010 ret = 0; 2011 ctrl_info->max_request_size = ddi_get32( 2012 cmd->frame_dma_obj.acc_handle, &ci->max_request_size); 2013 ctrl_info->ld_present_count = ddi_get16( 2014 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count); 2015 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, 2016 (uint8_t *)(ctrl_info->product_name), 2017 (uint8_t *)(ci->product_name), 80 * sizeof (char), 2018 DDI_DEV_AUTOINCR); 2019 /* should get more members of ci with ddi_get when needed */ 2020 } else { 2021 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed")); 2022 ret = -1; 2023 } 2024 2025 return_mfi_pkt(instance, cmd); 2026 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2027 ret = -1; 2028 } 2029 2030 return (ret); 2031 } 2032 2033 /* 2034 * abort_aen_cmd 2035 */ 2036 static int 2037 abort_aen_cmd(struct mrsas_instance *instance, 2038 struct mrsas_cmd *cmd_to_abort) 2039 { 2040 int ret = 0; 2041 2042 struct mrsas_cmd *cmd; 2043 struct mrsas_abort_frame *abort_fr; 2044 2045 cmd = get_mfi_pkt(instance); 2046 2047 if (!cmd) { 2048 con_log(CL_ANN, (CE_WARN, 2049 "Failed to get a cmd for ctrl info")); 2050 return (DDI_FAILURE); 2051 } 2052 /* Clear the frame buffer and assign back the context id */ 2053 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2054 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2055 cmd->index); 2056 2057 abort_fr = &cmd->frame->abort; 2058 2059 /* prepare and issue the abort frame */ 2060 ddi_put8(cmd->frame_dma_obj.acc_handle, 2061 &abort_fr->cmd, MFI_CMD_OP_ABORT); 2062 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status, 2063 MFI_CMD_STATUS_SYNC_MODE); 2064 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0); 2065 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context, 2066 cmd_to_abort->index); 2067 ddi_put32(cmd->frame_dma_obj.acc_handle, 2068 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr); 2069 ddi_put32(cmd->frame_dma_obj.acc_handle, 2070 &abort_fr->abort_mfi_phys_addr_hi, 0); 2071 2072 instance->aen_cmd->abort_aen = 1; 2073 2074 cmd->sync_cmd = MRSAS_TRUE; 2075 cmd->frame_count = 1; 2076 2077 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2078 con_log(CL_ANN, (CE_WARN, 2079 "abort_aen_cmd: issue_cmd_in_sync_mode failed")); 2080 ret = -1; 2081 } else { 2082 ret = 0; 2083 } 2084 2085 instance->aen_cmd->abort_aen = 1; 2086 instance->aen_cmd = 0; 2087 2088 return_mfi_pkt(instance, cmd); 2089 (void) mrsas_common_check(instance, cmd); 2090 2091 return (ret); 2092 } 2093 2094 /* 2095 * init_mfi 2096 */ 2097 static int 2098 init_mfi(struct mrsas_instance *instance) 2099 { 2100 struct mrsas_cmd *cmd; 2101 struct mrsas_ctrl_info ctrl_info; 2102 struct mrsas_init_frame *init_frame; 2103 struct mrsas_init_queue_info *initq_info; 2104 2105 /* we expect the FW state to be READY */ 2106 if (mfi_state_transition_to_ready(instance)) { 2107 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready")); 2108 goto fail_ready_state; 2109 } 2110 2111 /* get various operational parameters from status register */ 2112 instance->max_num_sge = 2113 (instance->func_ptr->read_fw_status_reg(instance) & 2114 0xFF0000) >> 0x10; 2115 /* 2116 * Reduce the max supported cmds by 1. This is to ensure that the 2117 * reply_q_sz (1 more than the max cmd that driver may send) 2118 * does not exceed max cmds that the FW can support 2119 */ 2120 instance->max_fw_cmds = 2121 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF; 2122 instance->max_fw_cmds = instance->max_fw_cmds - 1; 2123 2124 instance->max_num_sge = 2125 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ? 2126 MRSAS_MAX_SGE_CNT : instance->max_num_sge; 2127 2128 /* create a pool of commands */ 2129 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) 2130 goto fail_alloc_fw_space; 2131 2132 /* 2133 * Prepare a init frame. Note the init frame points to queue info 2134 * structure. Each frame has SGL allocated after first 64 bytes. For 2135 * this frame - since we don't need any SGL - we use SGL's space as 2136 * queue info structure 2137 */ 2138 cmd = get_mfi_pkt(instance); 2139 /* Clear the frame buffer and assign back the context id */ 2140 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2141 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2142 cmd->index); 2143 2144 init_frame = (struct mrsas_init_frame *)cmd->frame; 2145 initq_info = (struct mrsas_init_queue_info *) 2146 ((unsigned long)init_frame + 64); 2147 2148 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE); 2149 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info)); 2150 2151 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0); 2152 2153 ddi_put32(cmd->frame_dma_obj.acc_handle, 2154 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1); 2155 2156 ddi_put32(cmd->frame_dma_obj.acc_handle, 2157 &initq_info->producer_index_phys_addr_hi, 0); 2158 ddi_put32(cmd->frame_dma_obj.acc_handle, 2159 &initq_info->producer_index_phys_addr_lo, 2160 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address); 2161 2162 ddi_put32(cmd->frame_dma_obj.acc_handle, 2163 &initq_info->consumer_index_phys_addr_hi, 0); 2164 ddi_put32(cmd->frame_dma_obj.acc_handle, 2165 &initq_info->consumer_index_phys_addr_lo, 2166 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4); 2167 2168 ddi_put32(cmd->frame_dma_obj.acc_handle, 2169 &initq_info->reply_queue_start_phys_addr_hi, 0); 2170 ddi_put32(cmd->frame_dma_obj.acc_handle, 2171 &initq_info->reply_queue_start_phys_addr_lo, 2172 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8); 2173 2174 ddi_put8(cmd->frame_dma_obj.acc_handle, 2175 &init_frame->cmd, MFI_CMD_OP_INIT); 2176 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status, 2177 MFI_CMD_STATUS_POLL_MODE); 2178 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0); 2179 ddi_put32(cmd->frame_dma_obj.acc_handle, 2180 &init_frame->queue_info_new_phys_addr_lo, 2181 cmd->frame_phys_addr + 64); 2182 ddi_put32(cmd->frame_dma_obj.acc_handle, 2183 &init_frame->queue_info_new_phys_addr_hi, 0); 2184 2185 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len, 2186 sizeof (struct mrsas_init_queue_info)); 2187 2188 cmd->frame_count = 1; 2189 2190 /* issue the init frame in polled mode */ 2191 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2192 con_log(CL_ANN, (CE_WARN, "failed to init firmware")); 2193 goto fail_fw_init; 2194 } 2195 2196 return_mfi_pkt(instance, cmd); 2197 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2198 goto fail_fw_init; 2199 } 2200 2201 /* gather misc FW related information */ 2202 if (!get_ctrl_info(instance, &ctrl_info)) { 2203 instance->max_sectors_per_req = ctrl_info.max_request_size; 2204 con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d", 2205 ctrl_info.product_name, ctrl_info.ld_present_count)); 2206 } else { 2207 instance->max_sectors_per_req = instance->max_num_sge * 2208 PAGESIZE / 512; 2209 } 2210 2211 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 2212 goto fail_fw_init; 2213 } 2214 2215 return (DDI_SUCCESS); 2216 2217 fail_fw_init: 2218 fail_alloc_fw_space: 2219 2220 free_space_for_mfi(instance); 2221 2222 fail_ready_state: 2223 ddi_regs_map_free(&instance->regmap_handle); 2224 2225 fail_mfi_reg_setup: 2226 return (DDI_FAILURE); 2227 } 2228 2229 /* 2230 * mfi_state_transition_to_ready : Move the FW to READY state 2231 * 2232 * @reg_set : MFI register set 2233 */ 2234 static int 2235 mfi_state_transition_to_ready(struct mrsas_instance *instance) 2236 { 2237 int i; 2238 uint8_t max_wait; 2239 uint32_t fw_ctrl; 2240 uint32_t fw_state; 2241 uint32_t cur_state; 2242 2243 fw_state = 2244 instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK; 2245 con_log(CL_ANN1, (CE_NOTE, 2246 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state)); 2247 2248 while (fw_state != MFI_STATE_READY) { 2249 con_log(CL_ANN, (CE_NOTE, 2250 "mfi_state_transition_to_ready:FW state%x", fw_state)); 2251 2252 switch (fw_state) { 2253 case MFI_STATE_FAULT: 2254 con_log(CL_ANN, (CE_NOTE, 2255 "mr_sas: FW in FAULT state!!")); 2256 2257 return (ENODEV); 2258 case MFI_STATE_WAIT_HANDSHAKE: 2259 /* set the CLR bit in IMR0 */ 2260 con_log(CL_ANN, (CE_NOTE, 2261 "mr_sas: FW waiting for HANDSHAKE")); 2262 /* 2263 * PCI_Hot Plug: MFI F/W requires 2264 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2265 * to be set 2266 */ 2267 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */ 2268 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE | 2269 MFI_INIT_HOTPLUG, instance); 2270 2271 max_wait = 2; 2272 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2273 break; 2274 case MFI_STATE_BOOT_MESSAGE_PENDING: 2275 /* set the CLR bit in IMR0 */ 2276 con_log(CL_ANN, (CE_NOTE, 2277 "mr_sas: FW state boot message pending")); 2278 /* 2279 * PCI_Hot Plug: MFI F/W requires 2280 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2281 * to be set 2282 */ 2283 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance); 2284 2285 max_wait = 10; 2286 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2287 break; 2288 case MFI_STATE_OPERATIONAL: 2289 /* bring it to READY state; assuming max wait 2 secs */ 2290 instance->func_ptr->disable_intr(instance); 2291 con_log(CL_ANN1, (CE_NOTE, 2292 "mr_sas: FW in OPERATIONAL state")); 2293 /* 2294 * PCI_Hot Plug: MFI F/W requires 2295 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT) 2296 * to be set 2297 */ 2298 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */ 2299 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance); 2300 2301 max_wait = 10; 2302 cur_state = MFI_STATE_OPERATIONAL; 2303 break; 2304 case MFI_STATE_UNDEFINED: 2305 /* this state should not last for more than 2 seconds */ 2306 con_log(CL_ANN, (CE_NOTE, "FW state undefined")); 2307 2308 max_wait = 2; 2309 cur_state = MFI_STATE_UNDEFINED; 2310 break; 2311 case MFI_STATE_BB_INIT: 2312 max_wait = 2; 2313 cur_state = MFI_STATE_BB_INIT; 2314 break; 2315 case MFI_STATE_FW_INIT: 2316 max_wait = 2; 2317 cur_state = MFI_STATE_FW_INIT; 2318 break; 2319 case MFI_STATE_DEVICE_SCAN: 2320 max_wait = 10; 2321 cur_state = MFI_STATE_DEVICE_SCAN; 2322 break; 2323 default: 2324 con_log(CL_ANN, (CE_NOTE, 2325 "mr_sas: Unknown state 0x%x", fw_state)); 2326 return (ENODEV); 2327 } 2328 2329 /* the cur_state should not last for more than max_wait secs */ 2330 for (i = 0; i < (max_wait * MILLISEC); i++) { 2331 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */ 2332 fw_state = 2333 instance->func_ptr->read_fw_status_reg(instance) & 2334 MFI_STATE_MASK; 2335 2336 if (fw_state == cur_state) { 2337 delay(1 * drv_usectohz(MILLISEC)); 2338 } else { 2339 break; 2340 } 2341 } 2342 2343 /* return error if fw_state hasn't changed after max_wait */ 2344 if (fw_state == cur_state) { 2345 con_log(CL_ANN, (CE_NOTE, 2346 "FW state hasn't changed in %d secs", max_wait)); 2347 return (ENODEV); 2348 } 2349 }; 2350 2351 fw_ctrl = RD_IB_DOORBELL(instance); 2352 2353 con_log(CL_ANN1, (CE_NOTE, 2354 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl)); 2355 2356 /* 2357 * Write 0xF to the doorbell register to do the following. 2358 * - Abort all outstanding commands (bit 0). 2359 * - Transition from OPERATIONAL to READY state (bit 1). 2360 * - Discard (possible) low MFA posted in 64-bit mode (bit-2). 2361 * - Set to release FW to continue running (i.e. BIOS handshake 2362 * (bit 3). 2363 */ 2364 WR_IB_DOORBELL(0xF, instance); 2365 2366 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 2367 return (ENODEV); 2368 } 2369 return (DDI_SUCCESS); 2370 } 2371 2372 /* 2373 * get_seq_num 2374 */ 2375 static int 2376 get_seq_num(struct mrsas_instance *instance, 2377 struct mrsas_evt_log_info *eli) 2378 { 2379 int ret = DDI_SUCCESS; 2380 2381 dma_obj_t dcmd_dma_obj; 2382 struct mrsas_cmd *cmd; 2383 struct mrsas_dcmd_frame *dcmd; 2384 struct mrsas_evt_log_info *eli_tmp; 2385 cmd = get_mfi_pkt(instance); 2386 2387 if (!cmd) { 2388 cmn_err(CE_WARN, "mr_sas: failed to get a cmd"); 2389 return (ENOMEM); 2390 } 2391 /* Clear the frame buffer and assign back the context id */ 2392 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2393 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2394 cmd->index); 2395 2396 dcmd = &cmd->frame->dcmd; 2397 2398 /* allocate the data transfer buffer */ 2399 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info); 2400 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; 2401 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 2402 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 2403 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 2404 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 2405 2406 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, 2407 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 2408 con_log(CL_ANN, (CE_WARN, 2409 "get_seq_num: could not allocate data transfer buffer.")); 2410 return (DDI_FAILURE); 2411 } 2412 2413 (void) memset(dcmd_dma_obj.buffer, 0, 2414 sizeof (struct mrsas_evt_log_info)); 2415 2416 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2417 2418 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 2419 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0); 2420 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 2421 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 2422 MFI_FRAME_DIR_READ); 2423 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 2424 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 2425 sizeof (struct mrsas_evt_log_info)); 2426 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2427 MR_DCMD_CTRL_EVENT_GET_INFO); 2428 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 2429 sizeof (struct mrsas_evt_log_info)); 2430 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 2431 dcmd_dma_obj.dma_cookie[0].dmac_address); 2432 2433 cmd->sync_cmd = MRSAS_TRUE; 2434 cmd->frame_count = 1; 2435 2436 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2437 cmn_err(CE_WARN, "get_seq_num: " 2438 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO"); 2439 ret = DDI_FAILURE; 2440 } else { 2441 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer; 2442 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle, 2443 &eli_tmp->newest_seq_num); 2444 ret = DDI_SUCCESS; 2445 } 2446 2447 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 2448 ret = DDI_FAILURE; 2449 2450 return_mfi_pkt(instance, cmd); 2451 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2452 ret = DDI_FAILURE; 2453 } 2454 return (ret); 2455 } 2456 2457 /* 2458 * start_mfi_aen 2459 */ 2460 static int 2461 start_mfi_aen(struct mrsas_instance *instance) 2462 { 2463 int ret = 0; 2464 2465 struct mrsas_evt_log_info eli; 2466 union mrsas_evt_class_locale class_locale; 2467 2468 /* get the latest sequence number from FW */ 2469 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info)); 2470 2471 if (get_seq_num(instance, &eli)) { 2472 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num"); 2473 return (-1); 2474 } 2475 2476 /* register AEN with FW for latest sequence number plus 1 */ 2477 class_locale.members.reserved = 0; 2478 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL); 2479 class_locale.members.class = MR_EVT_CLASS_INFO; 2480 class_locale.word = LE_32(class_locale.word); 2481 ret = register_mfi_aen(instance, eli.newest_seq_num + 1, 2482 class_locale.word); 2483 2484 if (ret) { 2485 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed"); 2486 return (-1); 2487 } 2488 2489 return (ret); 2490 } 2491 2492 /* 2493 * flush_cache 2494 */ 2495 static void 2496 flush_cache(struct mrsas_instance *instance) 2497 { 2498 struct mrsas_cmd *cmd = NULL; 2499 struct mrsas_dcmd_frame *dcmd; 2500 uint32_t max_cmd = instance->max_fw_cmds; 2501 2502 cmd = instance->cmd_list[max_cmd]; 2503 2504 if (cmd == NULL) 2505 return; 2506 2507 dcmd = &cmd->frame->dcmd; 2508 2509 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2510 2511 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 2512 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0); 2513 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0); 2514 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 2515 MFI_FRAME_DIR_NONE); 2516 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 2517 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0); 2518 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2519 MR_DCMD_CTRL_CACHE_FLUSH); 2520 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0], 2521 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE); 2522 2523 cmd->frame_count = 1; 2524 2525 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2526 con_log(CL_ANN1, (CE_WARN, 2527 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH")); 2528 } 2529 con_log(CL_DLEVEL1, (CE_NOTE, "done")); 2530 } 2531 2532 /* 2533 * service_mfi_aen- Completes an AEN command 2534 * @instance: Adapter soft state 2535 * @cmd: Command to be completed 2536 * 2537 */ 2538 static void 2539 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 2540 { 2541 uint32_t seq_num; 2542 struct mrsas_evt_detail *evt_detail = 2543 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer; 2544 int rval = 0; 2545 int tgt = 0; 2546 ddi_acc_handle_t acc_handle; 2547 2548 acc_handle = cmd->frame_dma_obj.acc_handle; 2549 2550 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status); 2551 2552 if (cmd->cmd_status == ENODATA) { 2553 cmd->cmd_status = 0; 2554 } 2555 2556 /* 2557 * log the MFI AEN event to the sysevent queue so that 2558 * application will get noticed 2559 */ 2560 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS", 2561 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) { 2562 int instance_no = ddi_get_instance(instance->dip); 2563 con_log(CL_ANN, (CE_WARN, 2564 "mr_sas%d: Failed to log AEN event", instance_no)); 2565 } 2566 /* 2567 * Check for any ld devices that has changed state. i.e. online 2568 * or offline. 2569 */ 2570 con_log(CL_ANN1, (CE_NOTE, 2571 "AEN: code = %x class = %x locale = %x args = %x", 2572 ddi_get32(acc_handle, &evt_detail->code), 2573 evt_detail->cl.members.class, 2574 ddi_get16(acc_handle, &evt_detail->cl.members.locale), 2575 ddi_get8(acc_handle, &evt_detail->arg_type))); 2576 2577 switch (ddi_get32(acc_handle, &evt_detail->code)) { 2578 case MR_EVT_CFG_CLEARED: { 2579 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { 2580 if (instance->mr_ld_list[tgt].dip != NULL) { 2581 rval = mrsas_service_evt(instance, tgt, 0, 2582 MRSAS_EVT_UNCONFIG_TGT, NULL); 2583 con_log(CL_ANN1, (CE_WARN, 2584 "mr_sas: CFG CLEARED AEN rval = %d " 2585 "tgt id = %d", rval, tgt)); 2586 } 2587 } 2588 break; 2589 } 2590 2591 case MR_EVT_LD_DELETED: { 2592 rval = mrsas_service_evt(instance, 2593 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, 2594 MRSAS_EVT_UNCONFIG_TGT, NULL); 2595 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d " 2596 "tgt id = %d index = %d", rval, 2597 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 2598 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); 2599 break; 2600 } /* End of MR_EVT_LD_DELETED */ 2601 2602 case MR_EVT_LD_CREATED: { 2603 rval = mrsas_service_evt(instance, 2604 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, 2605 MRSAS_EVT_CONFIG_TGT, NULL); 2606 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d " 2607 "tgt id = %d index = %d", rval, 2608 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 2609 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); 2610 break; 2611 } /* End of MR_EVT_LD_CREATED */ 2612 } /* End of Main Switch */ 2613 2614 /* get copy of seq_num and class/locale for re-registration */ 2615 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num); 2616 seq_num++; 2617 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 2618 sizeof (struct mrsas_evt_detail)); 2619 2620 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0); 2621 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num); 2622 2623 instance->aen_seq_num = seq_num; 2624 2625 cmd->frame_count = 1; 2626 2627 /* Issue the aen registration frame */ 2628 instance->func_ptr->issue_cmd(cmd, instance); 2629 } 2630 2631 /* 2632 * complete_cmd_in_sync_mode - Completes an internal command 2633 * @instance: Adapter soft state 2634 * @cmd: Command to be completed 2635 * 2636 * The issue_cmd_in_sync_mode() function waits for a command to complete 2637 * after it issues a command. This function wakes up that waiting routine by 2638 * calling wake_up() on the wait queue. 2639 */ 2640 static void 2641 complete_cmd_in_sync_mode(struct mrsas_instance *instance, 2642 struct mrsas_cmd *cmd) 2643 { 2644 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle, 2645 &cmd->frame->io.cmd_status); 2646 2647 cmd->sync_cmd = MRSAS_FALSE; 2648 2649 if (cmd->cmd_status == ENODATA) { 2650 cmd->cmd_status = 0; 2651 } 2652 2653 cv_broadcast(&instance->int_cmd_cv); 2654 } 2655 2656 /* 2657 * mrsas_softintr - The Software ISR 2658 * @param arg : HBA soft state 2659 * 2660 * called from high-level interrupt if hi-level interrupt are not there, 2661 * otherwise triggered as a soft interrupt 2662 */ 2663 static uint_t 2664 mrsas_softintr(struct mrsas_instance *instance) 2665 { 2666 struct scsi_pkt *pkt; 2667 struct scsa_cmd *acmd; 2668 struct mrsas_cmd *cmd; 2669 struct mlist_head *pos, *next; 2670 mlist_t process_list; 2671 struct mrsas_header *hdr; 2672 struct scsi_arq_status *arqstat; 2673 2674 con_log(CL_ANN1, (CE_CONT, "mrsas_softintr called")); 2675 2676 ASSERT(instance); 2677 mutex_enter(&instance->completed_pool_mtx); 2678 2679 if (mlist_empty(&instance->completed_pool_list)) { 2680 mutex_exit(&instance->completed_pool_mtx); 2681 return (DDI_INTR_CLAIMED); 2682 } 2683 2684 instance->softint_running = 1; 2685 2686 INIT_LIST_HEAD(&process_list); 2687 mlist_splice(&instance->completed_pool_list, &process_list); 2688 INIT_LIST_HEAD(&instance->completed_pool_list); 2689 2690 mutex_exit(&instance->completed_pool_mtx); 2691 2692 /* perform all callbacks first, before releasing the SCBs */ 2693 mlist_for_each_safe(pos, next, &process_list) { 2694 cmd = mlist_entry(pos, struct mrsas_cmd, list); 2695 2696 /* syncronize the Cmd frame for the controller */ 2697 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 2698 0, 0, DDI_DMA_SYNC_FORCPU); 2699 2700 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 2701 DDI_SUCCESS) { 2702 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 2703 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 2704 return (DDI_INTR_CLAIMED); 2705 } 2706 2707 hdr = &cmd->frame->hdr; 2708 2709 /* remove the internal command from the process list */ 2710 mlist_del_init(&cmd->list); 2711 2712 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) { 2713 case MFI_CMD_OP_PD_SCSI: 2714 case MFI_CMD_OP_LD_SCSI: 2715 case MFI_CMD_OP_LD_READ: 2716 case MFI_CMD_OP_LD_WRITE: 2717 /* 2718 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI 2719 * could have been issued either through an 2720 * IO path or an IOCTL path. If it was via IOCTL, 2721 * we will send it to internal completion. 2722 */ 2723 if (cmd->sync_cmd == MRSAS_TRUE) { 2724 complete_cmd_in_sync_mode(instance, cmd); 2725 break; 2726 } 2727 2728 /* regular commands */ 2729 acmd = cmd->cmd; 2730 pkt = CMD2PKT(acmd); 2731 2732 if (acmd->cmd_flags & CFLAG_DMAVALID) { 2733 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 2734 (void) ddi_dma_sync(acmd->cmd_dmahandle, 2735 acmd->cmd_dma_offset, 2736 acmd->cmd_dma_len, 2737 DDI_DMA_SYNC_FORCPU); 2738 } 2739 } 2740 2741 pkt->pkt_reason = CMD_CMPLT; 2742 pkt->pkt_statistics = 0; 2743 pkt->pkt_state = STATE_GOT_BUS 2744 | STATE_GOT_TARGET | STATE_SENT_CMD 2745 | STATE_XFERRED_DATA | STATE_GOT_STATUS; 2746 2747 con_log(CL_ANN1, (CE_CONT, 2748 "CDB[0] = %x completed for %s: size %lx context %x", 2749 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"), 2750 acmd->cmd_dmacount, hdr->context)); 2751 2752 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) { 2753 struct scsi_inquiry *inq; 2754 2755 if (acmd->cmd_dmacount != 0) { 2756 bp_mapin(acmd->cmd_buf); 2757 inq = (struct scsi_inquiry *) 2758 acmd->cmd_buf->b_un.b_addr; 2759 2760 /* don't expose physical drives to OS */ 2761 if (acmd->islogical && 2762 (hdr->cmd_status == MFI_STAT_OK)) { 2763 display_scsi_inquiry( 2764 (caddr_t)inq); 2765 } else if ((hdr->cmd_status == 2766 MFI_STAT_OK) && inq->inq_dtype == 2767 DTYPE_DIRECT) { 2768 2769 display_scsi_inquiry( 2770 (caddr_t)inq); 2771 2772 /* for physical disk */ 2773 hdr->cmd_status = 2774 MFI_STAT_DEVICE_NOT_FOUND; 2775 } 2776 } 2777 } 2778 2779 switch (hdr->cmd_status) { 2780 case MFI_STAT_OK: 2781 pkt->pkt_scbp[0] = STATUS_GOOD; 2782 break; 2783 case MFI_STAT_LD_CC_IN_PROGRESS: 2784 case MFI_STAT_LD_RECON_IN_PROGRESS: 2785 pkt->pkt_scbp[0] = STATUS_GOOD; 2786 break; 2787 case MFI_STAT_LD_INIT_IN_PROGRESS: 2788 con_log(CL_ANN, 2789 (CE_WARN, "Initialization in Progress")); 2790 pkt->pkt_reason = CMD_TRAN_ERR; 2791 2792 break; 2793 case MFI_STAT_SCSI_DONE_WITH_ERROR: 2794 con_log(CL_ANN1, (CE_CONT, "scsi_done error")); 2795 2796 pkt->pkt_reason = CMD_CMPLT; 2797 ((struct scsi_status *) 2798 pkt->pkt_scbp)->sts_chk = 1; 2799 2800 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) { 2801 2802 con_log(CL_ANN, 2803 (CE_WARN, "TEST_UNIT_READY fail")); 2804 2805 } else { 2806 pkt->pkt_state |= STATE_ARQ_DONE; 2807 arqstat = (void *)(pkt->pkt_scbp); 2808 arqstat->sts_rqpkt_reason = CMD_CMPLT; 2809 arqstat->sts_rqpkt_resid = 0; 2810 arqstat->sts_rqpkt_state |= 2811 STATE_GOT_BUS | STATE_GOT_TARGET 2812 | STATE_SENT_CMD 2813 | STATE_XFERRED_DATA; 2814 *(uint8_t *)&arqstat->sts_rqpkt_status = 2815 STATUS_GOOD; 2816 ddi_rep_get8( 2817 cmd->frame_dma_obj.acc_handle, 2818 (uint8_t *) 2819 &(arqstat->sts_sensedata), 2820 cmd->sense, 2821 acmd->cmd_scblen - 2822 offsetof(struct scsi_arq_status, 2823 sts_sensedata), DDI_DEV_AUTOINCR); 2824 } 2825 break; 2826 case MFI_STAT_LD_OFFLINE: 2827 case MFI_STAT_DEVICE_NOT_FOUND: 2828 con_log(CL_ANN1, (CE_CONT, 2829 "device not found error")); 2830 pkt->pkt_reason = CMD_DEV_GONE; 2831 pkt->pkt_statistics = STAT_DISCON; 2832 break; 2833 case MFI_STAT_LD_LBA_OUT_OF_RANGE: 2834 pkt->pkt_state |= STATE_ARQ_DONE; 2835 pkt->pkt_reason = CMD_CMPLT; 2836 ((struct scsi_status *) 2837 pkt->pkt_scbp)->sts_chk = 1; 2838 2839 arqstat = (void *)(pkt->pkt_scbp); 2840 arqstat->sts_rqpkt_reason = CMD_CMPLT; 2841 arqstat->sts_rqpkt_resid = 0; 2842 arqstat->sts_rqpkt_state |= STATE_GOT_BUS 2843 | STATE_GOT_TARGET | STATE_SENT_CMD 2844 | STATE_XFERRED_DATA; 2845 *(uint8_t *)&arqstat->sts_rqpkt_status = 2846 STATUS_GOOD; 2847 2848 arqstat->sts_sensedata.es_valid = 1; 2849 arqstat->sts_sensedata.es_key = 2850 KEY_ILLEGAL_REQUEST; 2851 arqstat->sts_sensedata.es_class = 2852 CLASS_EXTENDED_SENSE; 2853 2854 /* 2855 * LOGICAL BLOCK ADDRESS OUT OF RANGE: 2856 * ASC: 0x21h; ASCQ: 0x00h; 2857 */ 2858 arqstat->sts_sensedata.es_add_code = 0x21; 2859 arqstat->sts_sensedata.es_qual_code = 0x00; 2860 2861 break; 2862 2863 default: 2864 con_log(CL_ANN, (CE_CONT, "Unknown status!")); 2865 pkt->pkt_reason = CMD_TRAN_ERR; 2866 2867 break; 2868 } 2869 2870 atomic_add_16(&instance->fw_outstanding, (-1)); 2871 2872 return_mfi_pkt(instance, cmd); 2873 2874 (void) mrsas_common_check(instance, cmd); 2875 2876 if (acmd->cmd_dmahandle) { 2877 if (mrsas_check_dma_handle( 2878 acmd->cmd_dmahandle) != DDI_SUCCESS) { 2879 ddi_fm_service_impact(instance->dip, 2880 DDI_SERVICE_UNAFFECTED); 2881 pkt->pkt_reason = CMD_TRAN_ERR; 2882 pkt->pkt_statistics = 0; 2883 } 2884 } 2885 2886 /* Call the callback routine */ 2887 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 2888 pkt->pkt_comp) { 2889 (*pkt->pkt_comp)(pkt); 2890 } 2891 2892 break; 2893 case MFI_CMD_OP_SMP: 2894 case MFI_CMD_OP_STP: 2895 complete_cmd_in_sync_mode(instance, cmd); 2896 break; 2897 case MFI_CMD_OP_DCMD: 2898 /* see if got an event notification */ 2899 if (ddi_get32(cmd->frame_dma_obj.acc_handle, 2900 &cmd->frame->dcmd.opcode) == 2901 MR_DCMD_CTRL_EVENT_WAIT) { 2902 if ((instance->aen_cmd == cmd) && 2903 (instance->aen_cmd->abort_aen)) { 2904 con_log(CL_ANN, (CE_WARN, 2905 "mrsas_softintr: " 2906 "aborted_aen returned")); 2907 } else { 2908 atomic_add_16(&instance->fw_outstanding, 2909 (-1)); 2910 service_mfi_aen(instance, cmd); 2911 } 2912 } else { 2913 complete_cmd_in_sync_mode(instance, cmd); 2914 } 2915 2916 break; 2917 case MFI_CMD_OP_ABORT: 2918 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete")); 2919 /* 2920 * MFI_CMD_OP_ABORT successfully completed 2921 * in the synchronous mode 2922 */ 2923 complete_cmd_in_sync_mode(instance, cmd); 2924 break; 2925 default: 2926 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 2927 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 2928 2929 if (cmd->pkt != NULL) { 2930 pkt = cmd->pkt; 2931 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 2932 pkt->pkt_comp) { 2933 (*pkt->pkt_comp)(pkt); 2934 } 2935 } 2936 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !")); 2937 break; 2938 } 2939 } 2940 2941 instance->softint_running = 0; 2942 2943 return (DDI_INTR_CLAIMED); 2944 } 2945 2946 /* 2947 * mrsas_alloc_dma_obj 2948 * 2949 * Allocate the memory and other resources for an dma object. 2950 */ 2951 static int 2952 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj, 2953 uchar_t endian_flags) 2954 { 2955 int i; 2956 size_t alen = 0; 2957 uint_t cookie_cnt; 2958 struct ddi_device_acc_attr tmp_endian_attr; 2959 2960 tmp_endian_attr = endian_attr; 2961 tmp_endian_attr.devacc_attr_endian_flags = endian_flags; 2962 2963 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr, 2964 DDI_DMA_SLEEP, NULL, &obj->dma_handle); 2965 if (i != DDI_SUCCESS) { 2966 2967 switch (i) { 2968 case DDI_DMA_BADATTR : 2969 con_log(CL_ANN, (CE_WARN, 2970 "Failed ddi_dma_alloc_handle- Bad attribute")); 2971 break; 2972 case DDI_DMA_NORESOURCES : 2973 con_log(CL_ANN, (CE_WARN, 2974 "Failed ddi_dma_alloc_handle- No Resources")); 2975 break; 2976 default : 2977 con_log(CL_ANN, (CE_WARN, 2978 "Failed ddi_dma_alloc_handle: " 2979 "unknown status %d", i)); 2980 break; 2981 } 2982 2983 return (-1); 2984 } 2985 2986 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr, 2987 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 2988 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) || 2989 alen < obj->size) { 2990 2991 ddi_dma_free_handle(&obj->dma_handle); 2992 2993 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc")); 2994 2995 return (-1); 2996 } 2997 2998 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer, 2999 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 3000 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) { 3001 3002 ddi_dma_mem_free(&obj->acc_handle); 3003 ddi_dma_free_handle(&obj->dma_handle); 3004 3005 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle")); 3006 3007 return (-1); 3008 } 3009 3010 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) { 3011 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3012 return (-1); 3013 } 3014 3015 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) { 3016 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3017 return (-1); 3018 } 3019 3020 return (cookie_cnt); 3021 } 3022 3023 /* 3024 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t) 3025 * 3026 * De-allocate the memory and other resources for an dma object, which must 3027 * have been alloated by a previous call to mrsas_alloc_dma_obj() 3028 */ 3029 static int 3030 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj) 3031 { 3032 3033 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) { 3034 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3035 return (DDI_FAILURE); 3036 } 3037 3038 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) { 3039 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3040 return (DDI_FAILURE); 3041 } 3042 3043 (void) ddi_dma_unbind_handle(obj.dma_handle); 3044 ddi_dma_mem_free(&obj.acc_handle); 3045 ddi_dma_free_handle(&obj.dma_handle); 3046 3047 return (DDI_SUCCESS); 3048 } 3049 3050 /* 3051 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *, 3052 * int, int (*)()) 3053 * 3054 * Allocate dma resources for a new scsi command 3055 */ 3056 static int 3057 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt, 3058 struct buf *bp, int flags, int (*callback)()) 3059 { 3060 int dma_flags; 3061 int (*cb)(caddr_t); 3062 int i; 3063 3064 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr; 3065 struct scsa_cmd *acmd = PKT2CMD(pkt); 3066 3067 acmd->cmd_buf = bp; 3068 3069 if (bp->b_flags & B_READ) { 3070 acmd->cmd_flags &= ~CFLAG_DMASEND; 3071 dma_flags = DDI_DMA_READ; 3072 } else { 3073 acmd->cmd_flags |= CFLAG_DMASEND; 3074 dma_flags = DDI_DMA_WRITE; 3075 } 3076 3077 if (flags & PKT_CONSISTENT) { 3078 acmd->cmd_flags |= CFLAG_CONSISTENT; 3079 dma_flags |= DDI_DMA_CONSISTENT; 3080 } 3081 3082 if (flags & PKT_DMA_PARTIAL) { 3083 dma_flags |= DDI_DMA_PARTIAL; 3084 } 3085 3086 dma_flags |= DDI_DMA_REDZONE; 3087 3088 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP; 3089 3090 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge; 3091 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 3092 3093 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr, 3094 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) { 3095 switch (i) { 3096 case DDI_DMA_BADATTR: 3097 bioerror(bp, EFAULT); 3098 return (DDI_FAILURE); 3099 3100 case DDI_DMA_NORESOURCES: 3101 bioerror(bp, 0); 3102 return (DDI_FAILURE); 3103 3104 default: 3105 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: " 3106 "impossible result (0x%x)", i)); 3107 bioerror(bp, EFAULT); 3108 return (DDI_FAILURE); 3109 } 3110 } 3111 3112 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags, 3113 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies); 3114 3115 switch (i) { 3116 case DDI_DMA_PARTIAL_MAP: 3117 if ((dma_flags & DDI_DMA_PARTIAL) == 0) { 3118 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3119 "DDI_DMA_PARTIAL_MAP impossible")); 3120 goto no_dma_cookies; 3121 } 3122 3123 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) == 3124 DDI_FAILURE) { 3125 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed")); 3126 goto no_dma_cookies; 3127 } 3128 3129 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3130 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3131 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3132 DDI_FAILURE) { 3133 3134 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed")); 3135 goto no_dma_cookies; 3136 } 3137 3138 goto get_dma_cookies; 3139 case DDI_DMA_MAPPED: 3140 acmd->cmd_nwin = 1; 3141 acmd->cmd_dma_len = 0; 3142 acmd->cmd_dma_offset = 0; 3143 3144 get_dma_cookies: 3145 i = 0; 3146 acmd->cmd_dmacount = 0; 3147 for (;;) { 3148 acmd->cmd_dmacount += 3149 acmd->cmd_dmacookies[i++].dmac_size; 3150 3151 if (i == instance->max_num_sge || 3152 i == acmd->cmd_ncookies) 3153 break; 3154 3155 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3156 &acmd->cmd_dmacookies[i]); 3157 } 3158 3159 acmd->cmd_cookie = i; 3160 acmd->cmd_cookiecnt = i; 3161 3162 acmd->cmd_flags |= CFLAG_DMAVALID; 3163 3164 if (bp->b_bcount >= acmd->cmd_dmacount) { 3165 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3166 } else { 3167 pkt->pkt_resid = 0; 3168 } 3169 3170 return (DDI_SUCCESS); 3171 case DDI_DMA_NORESOURCES: 3172 bioerror(bp, 0); 3173 break; 3174 case DDI_DMA_NOMAPPING: 3175 bioerror(bp, EFAULT); 3176 break; 3177 case DDI_DMA_TOOBIG: 3178 bioerror(bp, EINVAL); 3179 break; 3180 case DDI_DMA_INUSE: 3181 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:" 3182 " DDI_DMA_INUSE impossible")); 3183 break; 3184 default: 3185 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3186 "impossible result (0x%x)", i)); 3187 break; 3188 } 3189 3190 no_dma_cookies: 3191 ddi_dma_free_handle(&acmd->cmd_dmahandle); 3192 acmd->cmd_dmahandle = NULL; 3193 acmd->cmd_flags &= ~CFLAG_DMAVALID; 3194 return (DDI_FAILURE); 3195 } 3196 3197 /* 3198 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *) 3199 * 3200 * move dma resources to next dma window 3201 * 3202 */ 3203 static int 3204 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt, 3205 struct buf *bp) 3206 { 3207 int i = 0; 3208 3209 struct scsa_cmd *acmd = PKT2CMD(pkt); 3210 3211 /* 3212 * If there are no more cookies remaining in this window, 3213 * must move to the next window first. 3214 */ 3215 if (acmd->cmd_cookie == acmd->cmd_ncookies) { 3216 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) { 3217 return (DDI_SUCCESS); 3218 } 3219 3220 /* at last window, cannot move */ 3221 if (++acmd->cmd_curwin >= acmd->cmd_nwin) { 3222 return (DDI_FAILURE); 3223 } 3224 3225 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3226 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3227 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3228 DDI_FAILURE) { 3229 return (DDI_FAILURE); 3230 } 3231 3232 acmd->cmd_cookie = 0; 3233 } else { 3234 /* still more cookies in this window - get the next one */ 3235 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3236 &acmd->cmd_dmacookies[0]); 3237 } 3238 3239 /* get remaining cookies in this window, up to our maximum */ 3240 for (;;) { 3241 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size; 3242 acmd->cmd_cookie++; 3243 3244 if (i == instance->max_num_sge || 3245 acmd->cmd_cookie == acmd->cmd_ncookies) { 3246 break; 3247 } 3248 3249 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3250 &acmd->cmd_dmacookies[i]); 3251 } 3252 3253 acmd->cmd_cookiecnt = i; 3254 3255 if (bp->b_bcount >= acmd->cmd_dmacount) { 3256 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3257 } else { 3258 pkt->pkt_resid = 0; 3259 } 3260 3261 return (DDI_SUCCESS); 3262 } 3263 3264 /* 3265 * build_cmd 3266 */ 3267 static struct mrsas_cmd * 3268 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap, 3269 struct scsi_pkt *pkt, uchar_t *cmd_done) 3270 { 3271 uint16_t flags = 0; 3272 uint32_t i; 3273 uint32_t context; 3274 uint32_t sge_bytes; 3275 ddi_acc_handle_t acc_handle; 3276 struct mrsas_cmd *cmd; 3277 struct mrsas_sge64 *mfi_sgl; 3278 struct scsa_cmd *acmd = PKT2CMD(pkt); 3279 struct mrsas_pthru_frame *pthru; 3280 struct mrsas_io_frame *ldio; 3281 3282 /* find out if this is logical or physical drive command. */ 3283 acmd->islogical = MRDRV_IS_LOGICAL(ap); 3284 acmd->device_id = MAP_DEVICE_ID(instance, ap); 3285 *cmd_done = 0; 3286 3287 /* get the command packet */ 3288 if (!(cmd = get_mfi_pkt(instance))) { 3289 return (NULL); 3290 } 3291 3292 acc_handle = cmd->frame_dma_obj.acc_handle; 3293 3294 /* Clear the frame buffer and assign back the context id */ 3295 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 3296 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index); 3297 3298 cmd->pkt = pkt; 3299 cmd->cmd = acmd; 3300 3301 /* lets get the command directions */ 3302 if (acmd->cmd_flags & CFLAG_DMASEND) { 3303 flags = MFI_FRAME_DIR_WRITE; 3304 3305 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3306 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3307 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3308 DDI_DMA_SYNC_FORDEV); 3309 } 3310 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) { 3311 flags = MFI_FRAME_DIR_READ; 3312 3313 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3314 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3315 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3316 DDI_DMA_SYNC_FORCPU); 3317 } 3318 } else { 3319 flags = MFI_FRAME_DIR_NONE; 3320 } 3321 3322 flags |= MFI_FRAME_SGL64; 3323 3324 switch (pkt->pkt_cdbp[0]) { 3325 3326 /* 3327 * case SCMD_SYNCHRONIZE_CACHE: 3328 * flush_cache(instance); 3329 * return_mfi_pkt(instance, cmd); 3330 * *cmd_done = 1; 3331 * 3332 * return (NULL); 3333 */ 3334 3335 case SCMD_READ: 3336 case SCMD_WRITE: 3337 case SCMD_READ_G1: 3338 case SCMD_WRITE_G1: 3339 if (acmd->islogical) { 3340 ldio = (struct mrsas_io_frame *)cmd->frame; 3341 3342 /* 3343 * preare the Logical IO frame: 3344 * 2nd bit is zero for all read cmds 3345 */ 3346 ddi_put8(acc_handle, &ldio->cmd, 3347 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE 3348 : MFI_CMD_OP_LD_READ); 3349 ddi_put8(acc_handle, &ldio->cmd_status, 0x0); 3350 ddi_put8(acc_handle, &ldio->scsi_status, 0x0); 3351 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id); 3352 ddi_put16(acc_handle, &ldio->timeout, 0); 3353 ddi_put8(acc_handle, &ldio->reserved_0, 0); 3354 ddi_put16(acc_handle, &ldio->pad_0, 0); 3355 ddi_put16(acc_handle, &ldio->flags, flags); 3356 3357 /* Initialize sense Information */ 3358 bzero(cmd->sense, SENSE_LENGTH); 3359 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH); 3360 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0); 3361 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo, 3362 cmd->sense_phys_addr); 3363 ddi_put32(acc_handle, &ldio->start_lba_hi, 0); 3364 ddi_put8(acc_handle, &ldio->access_byte, 3365 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0); 3366 ddi_put8(acc_handle, &ldio->sge_count, 3367 acmd->cmd_cookiecnt); 3368 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl; 3369 3370 context = ddi_get32(acc_handle, &ldio->context); 3371 3372 if (acmd->cmd_cdblen == CDB_GROUP0) { 3373 ddi_put32(acc_handle, &ldio->lba_count, ( 3374 (uint16_t)(pkt->pkt_cdbp[4]))); 3375 3376 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3377 ((uint32_t)(pkt->pkt_cdbp[3])) | 3378 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) | 3379 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) 3380 << 16))); 3381 } else if (acmd->cmd_cdblen == CDB_GROUP1) { 3382 ddi_put32(acc_handle, &ldio->lba_count, ( 3383 ((uint16_t)(pkt->pkt_cdbp[8])) | 3384 ((uint16_t)(pkt->pkt_cdbp[7]) << 8))); 3385 3386 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3387 ((uint32_t)(pkt->pkt_cdbp[5])) | 3388 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3389 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3390 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 3391 } else if (acmd->cmd_cdblen == CDB_GROUP2) { 3392 ddi_put32(acc_handle, &ldio->lba_count, ( 3393 ((uint16_t)(pkt->pkt_cdbp[9])) | 3394 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) | 3395 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) | 3396 ((uint16_t)(pkt->pkt_cdbp[6]) << 24))); 3397 3398 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3399 ((uint32_t)(pkt->pkt_cdbp[5])) | 3400 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3401 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3402 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 3403 } else if (acmd->cmd_cdblen == CDB_GROUP3) { 3404 ddi_put32(acc_handle, &ldio->lba_count, ( 3405 ((uint16_t)(pkt->pkt_cdbp[13])) | 3406 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) | 3407 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) | 3408 ((uint16_t)(pkt->pkt_cdbp[10]) << 24))); 3409 3410 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3411 ((uint32_t)(pkt->pkt_cdbp[9])) | 3412 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | 3413 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | 3414 ((uint32_t)(pkt->pkt_cdbp[6]) << 24))); 3415 3416 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3417 ((uint32_t)(pkt->pkt_cdbp[5])) | 3418 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3419 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3420 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 3421 } 3422 3423 break; 3424 } 3425 /* fall through For all non-rd/wr cmds */ 3426 default: 3427 3428 switch (pkt->pkt_cdbp[0]) { 3429 case SCMD_MODE_SENSE: 3430 case SCMD_MODE_SENSE_G1: { 3431 union scsi_cdb *cdbp; 3432 uint16_t page_code; 3433 3434 cdbp = (void *)pkt->pkt_cdbp; 3435 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0]; 3436 switch (page_code) { 3437 case 0x3: 3438 case 0x4: 3439 (void) mrsas_mode_sense_build(pkt); 3440 return_mfi_pkt(instance, cmd); 3441 *cmd_done = 1; 3442 return (NULL); 3443 } 3444 break; 3445 } 3446 default: 3447 break; 3448 } 3449 3450 pthru = (struct mrsas_pthru_frame *)cmd->frame; 3451 3452 /* prepare the DCDB frame */ 3453 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ? 3454 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI); 3455 ddi_put8(acc_handle, &pthru->cmd_status, 0x0); 3456 ddi_put8(acc_handle, &pthru->scsi_status, 0x0); 3457 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id); 3458 ddi_put8(acc_handle, &pthru->lun, 0); 3459 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen); 3460 ddi_put16(acc_handle, &pthru->timeout, 0); 3461 ddi_put16(acc_handle, &pthru->flags, flags); 3462 ddi_put32(acc_handle, &pthru->data_xfer_len, 3463 acmd->cmd_dmacount); 3464 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt); 3465 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl; 3466 3467 bzero(cmd->sense, SENSE_LENGTH); 3468 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH); 3469 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); 3470 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 3471 cmd->sense_phys_addr); 3472 3473 context = ddi_get32(acc_handle, &pthru->context); 3474 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp, 3475 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR); 3476 3477 break; 3478 } 3479 #ifdef lint 3480 context = context; 3481 #endif 3482 /* prepare the scatter-gather list for the firmware */ 3483 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) { 3484 ddi_put64(acc_handle, &mfi_sgl->phys_addr, 3485 acmd->cmd_dmacookies[i].dmac_laddress); 3486 ddi_put32(acc_handle, &mfi_sgl->length, 3487 acmd->cmd_dmacookies[i].dmac_size); 3488 } 3489 3490 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt; 3491 3492 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) + 3493 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1; 3494 3495 if (cmd->frame_count >= 8) { 3496 cmd->frame_count = 8; 3497 } 3498 3499 return (cmd); 3500 } 3501 3502 /* 3503 * issue_mfi_pthru 3504 */ 3505 static int 3506 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 3507 struct mrsas_cmd *cmd, int mode) 3508 { 3509 void *ubuf; 3510 uint32_t kphys_addr = 0; 3511 uint32_t xferlen = 0; 3512 uint_t model; 3513 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 3514 dma_obj_t pthru_dma_obj; 3515 struct mrsas_pthru_frame *kpthru; 3516 struct mrsas_pthru_frame *pthru; 3517 int i; 3518 pthru = &cmd->frame->pthru; 3519 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0]; 3520 3521 model = ddi_model_convert_from(mode & FMODELS); 3522 if (model == DDI_MODEL_ILP32) { 3523 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3524 3525 xferlen = kpthru->sgl.sge32[0].length; 3526 3527 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3528 } else { 3529 #ifdef _ILP32 3530 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3531 xferlen = kpthru->sgl.sge32[0].length; 3532 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3533 #else 3534 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64")); 3535 xferlen = kpthru->sgl.sge64[0].length; 3536 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr; 3537 #endif 3538 } 3539 3540 if (xferlen) { 3541 /* means IOCTL requires DMA */ 3542 /* allocate the data transfer buffer */ 3543 pthru_dma_obj.size = xferlen; 3544 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr; 3545 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3546 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3547 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1; 3548 pthru_dma_obj.dma_attr.dma_attr_align = 1; 3549 3550 /* allocate kernel buffer for DMA */ 3551 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj, 3552 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3553 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3554 "could not allocate data transfer buffer.")); 3555 return (DDI_FAILURE); 3556 } 3557 3558 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3559 if (kpthru->flags & MFI_FRAME_DIR_WRITE) { 3560 for (i = 0; i < xferlen; i++) { 3561 if (ddi_copyin((uint8_t *)ubuf+i, 3562 (uint8_t *)pthru_dma_obj.buffer+i, 3563 1, mode)) { 3564 con_log(CL_ANN, (CE_WARN, 3565 "issue_mfi_pthru : " 3566 "copy from user space failed")); 3567 return (DDI_FAILURE); 3568 } 3569 } 3570 } 3571 3572 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address; 3573 } 3574 3575 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd); 3576 ddi_put8(acc_handle, &pthru->sense_len, kpthru->sense_len); 3577 ddi_put8(acc_handle, &pthru->cmd_status, 0); 3578 ddi_put8(acc_handle, &pthru->scsi_status, 0); 3579 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id); 3580 ddi_put8(acc_handle, &pthru->lun, kpthru->lun); 3581 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len); 3582 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count); 3583 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout); 3584 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len); 3585 3586 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); 3587 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */ 3588 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); 3589 3590 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb, 3591 pthru->cdb_len, DDI_DEV_AUTOINCR); 3592 3593 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64); 3594 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen); 3595 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr); 3596 3597 cmd->sync_cmd = MRSAS_TRUE; 3598 cmd->frame_count = 1; 3599 3600 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3601 con_log(CL_ANN, (CE_WARN, 3602 "issue_mfi_pthru: fw_ioctl failed")); 3603 } else { 3604 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) { 3605 for (i = 0; i < xferlen; i++) { 3606 if (ddi_copyout( 3607 (uint8_t *)pthru_dma_obj.buffer+i, 3608 (uint8_t *)ubuf+i, 1, mode)) { 3609 con_log(CL_ANN, (CE_WARN, 3610 "issue_mfi_pthru : " 3611 "copy to user space failed")); 3612 return (DDI_FAILURE); 3613 } 3614 } 3615 } 3616 } 3617 3618 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status); 3619 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status); 3620 3621 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, " 3622 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status)); 3623 3624 if (xferlen) { 3625 /* free kernel buffer */ 3626 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS) 3627 return (DDI_FAILURE); 3628 } 3629 3630 return (DDI_SUCCESS); 3631 } 3632 3633 /* 3634 * issue_mfi_dcmd 3635 */ 3636 static int 3637 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 3638 struct mrsas_cmd *cmd, int mode) 3639 { 3640 void *ubuf; 3641 uint32_t kphys_addr = 0; 3642 uint32_t xferlen = 0; 3643 uint32_t model; 3644 dma_obj_t dcmd_dma_obj; 3645 struct mrsas_dcmd_frame *kdcmd; 3646 struct mrsas_dcmd_frame *dcmd; 3647 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 3648 int i; 3649 dcmd = &cmd->frame->dcmd; 3650 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; 3651 3652 model = ddi_model_convert_from(mode & FMODELS); 3653 if (model == DDI_MODEL_ILP32) { 3654 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3655 3656 xferlen = kdcmd->sgl.sge32[0].length; 3657 3658 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3659 } else { 3660 #ifdef _ILP32 3661 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3662 xferlen = kdcmd->sgl.sge32[0].length; 3663 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3664 #else 3665 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64")); 3666 xferlen = kdcmd->sgl.sge64[0].length; 3667 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 3668 #endif 3669 } 3670 if (xferlen) { 3671 /* means IOCTL requires DMA */ 3672 /* allocate the data transfer buffer */ 3673 dcmd_dma_obj.size = xferlen; 3674 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; 3675 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3676 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3677 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 3678 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 3679 3680 /* allocate kernel buffer for DMA */ 3681 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, 3682 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3683 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 3684 "could not allocate data transfer buffer.")); 3685 return (DDI_FAILURE); 3686 } 3687 3688 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3689 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) { 3690 for (i = 0; i < xferlen; i++) { 3691 if (ddi_copyin((uint8_t *)ubuf + i, 3692 (uint8_t *)dcmd_dma_obj.buffer + i, 3693 1, mode)) { 3694 con_log(CL_ANN, (CE_WARN, 3695 "issue_mfi_dcmd : " 3696 "copy from user space failed")); 3697 return (DDI_FAILURE); 3698 } 3699 } 3700 } 3701 3702 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 3703 } 3704 3705 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd); 3706 ddi_put8(acc_handle, &dcmd->cmd_status, 0); 3707 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count); 3708 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout); 3709 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len); 3710 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode); 3711 3712 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b, 3713 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR); 3714 3715 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64); 3716 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen); 3717 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr); 3718 3719 cmd->sync_cmd = MRSAS_TRUE; 3720 cmd->frame_count = 1; 3721 3722 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3723 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed")); 3724 } else { 3725 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) { 3726 for (i = 0; i < xferlen; i++) { 3727 if (ddi_copyout( 3728 (uint8_t *)dcmd_dma_obj.buffer + i, 3729 (uint8_t *)ubuf + i, 3730 1, mode)) { 3731 con_log(CL_ANN, (CE_WARN, 3732 "issue_mfi_dcmd : " 3733 "copy to user space failed")); 3734 return (DDI_FAILURE); 3735 } 3736 } 3737 } 3738 } 3739 3740 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status); 3741 3742 if (xferlen) { 3743 /* free kernel buffer */ 3744 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 3745 return (DDI_FAILURE); 3746 } 3747 3748 return (DDI_SUCCESS); 3749 } 3750 3751 /* 3752 * issue_mfi_smp 3753 */ 3754 static int 3755 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 3756 struct mrsas_cmd *cmd, int mode) 3757 { 3758 void *request_ubuf; 3759 void *response_ubuf; 3760 uint32_t request_xferlen = 0; 3761 uint32_t response_xferlen = 0; 3762 uint_t model; 3763 dma_obj_t request_dma_obj; 3764 dma_obj_t response_dma_obj; 3765 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 3766 struct mrsas_smp_frame *ksmp; 3767 struct mrsas_smp_frame *smp; 3768 struct mrsas_sge32 *sge32; 3769 #ifndef _ILP32 3770 struct mrsas_sge64 *sge64; 3771 #endif 3772 int i; 3773 uint64_t tmp_sas_addr; 3774 3775 smp = &cmd->frame->smp; 3776 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0]; 3777 3778 model = ddi_model_convert_from(mode & FMODELS); 3779 if (model == DDI_MODEL_ILP32) { 3780 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 3781 3782 sge32 = &ksmp->sgl[0].sge32[0]; 3783 response_xferlen = sge32[0].length; 3784 request_xferlen = sge32[1].length; 3785 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 3786 "response_xferlen = %x, request_xferlen = %x", 3787 response_xferlen, request_xferlen)); 3788 3789 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 3790 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 3791 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3792 "response_ubuf = %p, request_ubuf = %p", 3793 response_ubuf, request_ubuf)); 3794 } else { 3795 #ifdef _ILP32 3796 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 3797 3798 sge32 = &ksmp->sgl[0].sge32[0]; 3799 response_xferlen = sge32[0].length; 3800 request_xferlen = sge32[1].length; 3801 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 3802 "response_xferlen = %x, request_xferlen = %x", 3803 response_xferlen, request_xferlen)); 3804 3805 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 3806 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 3807 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3808 "response_ubuf = %p, request_ubuf = %p", 3809 response_ubuf, request_ubuf)); 3810 #else 3811 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64")); 3812 3813 sge64 = &ksmp->sgl[0].sge64[0]; 3814 response_xferlen = sge64[0].length; 3815 request_xferlen = sge64[1].length; 3816 3817 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr; 3818 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr; 3819 #endif 3820 } 3821 if (request_xferlen) { 3822 /* means IOCTL requires DMA */ 3823 /* allocate the data transfer buffer */ 3824 request_dma_obj.size = request_xferlen; 3825 request_dma_obj.dma_attr = mrsas_generic_dma_attr; 3826 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3827 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3828 request_dma_obj.dma_attr.dma_attr_sgllen = 1; 3829 request_dma_obj.dma_attr.dma_attr_align = 1; 3830 3831 /* allocate kernel buffer for DMA */ 3832 if (mrsas_alloc_dma_obj(instance, &request_dma_obj, 3833 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3834 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3835 "could not allocate data transfer buffer.")); 3836 return (DDI_FAILURE); 3837 } 3838 3839 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3840 for (i = 0; i < request_xferlen; i++) { 3841 if (ddi_copyin((uint8_t *)request_ubuf + i, 3842 (uint8_t *)request_dma_obj.buffer + i, 3843 1, mode)) { 3844 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3845 "copy from user space failed")); 3846 return (DDI_FAILURE); 3847 } 3848 } 3849 } 3850 3851 if (response_xferlen) { 3852 /* means IOCTL requires DMA */ 3853 /* allocate the data transfer buffer */ 3854 response_dma_obj.size = response_xferlen; 3855 response_dma_obj.dma_attr = mrsas_generic_dma_attr; 3856 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3857 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3858 response_dma_obj.dma_attr.dma_attr_sgllen = 1; 3859 response_dma_obj.dma_attr.dma_attr_align = 1; 3860 3861 /* allocate kernel buffer for DMA */ 3862 if (mrsas_alloc_dma_obj(instance, &response_dma_obj, 3863 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3864 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3865 "could not allocate data transfer buffer.")); 3866 return (DDI_FAILURE); 3867 } 3868 3869 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3870 for (i = 0; i < response_xferlen; i++) { 3871 if (ddi_copyin((uint8_t *)response_ubuf + i, 3872 (uint8_t *)response_dma_obj.buffer + i, 3873 1, mode)) { 3874 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3875 "copy from user space failed")); 3876 return (DDI_FAILURE); 3877 } 3878 } 3879 } 3880 3881 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd); 3882 ddi_put8(acc_handle, &smp->cmd_status, 0); 3883 ddi_put8(acc_handle, &smp->connection_status, 0); 3884 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count); 3885 /* smp->context = ksmp->context; */ 3886 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout); 3887 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len); 3888 3889 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr, 3890 sizeof (uint64_t)); 3891 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr); 3892 3893 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64); 3894 3895 model = ddi_model_convert_from(mode & FMODELS); 3896 if (model == DDI_MODEL_ILP32) { 3897 con_log(CL_ANN1, (CE_NOTE, 3898 "issue_mfi_smp: DDI_MODEL_ILP32")); 3899 3900 sge32 = &smp->sgl[0].sge32[0]; 3901 ddi_put32(acc_handle, &sge32[0].length, response_xferlen); 3902 ddi_put32(acc_handle, &sge32[0].phys_addr, 3903 response_dma_obj.dma_cookie[0].dmac_address); 3904 ddi_put32(acc_handle, &sge32[1].length, request_xferlen); 3905 ddi_put32(acc_handle, &sge32[1].phys_addr, 3906 request_dma_obj.dma_cookie[0].dmac_address); 3907 } else { 3908 #ifdef _ILP32 3909 con_log(CL_ANN1, (CE_NOTE, 3910 "issue_mfi_smp: DDI_MODEL_ILP32")); 3911 sge32 = &smp->sgl[0].sge32[0]; 3912 ddi_put32(acc_handle, &sge32[0].length, response_xferlen); 3913 ddi_put32(acc_handle, &sge32[0].phys_addr, 3914 response_dma_obj.dma_cookie[0].dmac_address); 3915 ddi_put32(acc_handle, &sge32[1].length, request_xferlen); 3916 ddi_put32(acc_handle, &sge32[1].phys_addr, 3917 request_dma_obj.dma_cookie[0].dmac_address); 3918 #else 3919 con_log(CL_ANN1, (CE_NOTE, 3920 "issue_mfi_smp: DDI_MODEL_LP64")); 3921 sge64 = &smp->sgl[0].sge64[0]; 3922 ddi_put32(acc_handle, &sge64[0].length, response_xferlen); 3923 ddi_put64(acc_handle, &sge64[0].phys_addr, 3924 response_dma_obj.dma_cookie[0].dmac_address); 3925 ddi_put32(acc_handle, &sge64[1].length, request_xferlen); 3926 ddi_put64(acc_handle, &sge64[1].phys_addr, 3927 request_dma_obj.dma_cookie[0].dmac_address); 3928 #endif 3929 } 3930 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp : " 3931 "smp->response_xferlen = %d, smp->request_xferlen = %d " 3932 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length), 3933 ddi_get32(acc_handle, &sge32[1].length), 3934 ddi_get32(acc_handle, &smp->data_xfer_len))); 3935 3936 cmd->sync_cmd = MRSAS_TRUE; 3937 cmd->frame_count = 1; 3938 3939 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3940 con_log(CL_ANN, (CE_WARN, 3941 "issue_mfi_smp: fw_ioctl failed")); 3942 } else { 3943 con_log(CL_ANN1, (CE_NOTE, 3944 "issue_mfi_smp: copy to user space")); 3945 3946 if (request_xferlen) { 3947 for (i = 0; i < request_xferlen; i++) { 3948 if (ddi_copyout( 3949 (uint8_t *)request_dma_obj.buffer + 3950 i, (uint8_t *)request_ubuf + i, 3951 1, mode)) { 3952 con_log(CL_ANN, (CE_WARN, 3953 "issue_mfi_smp : copy to user space" 3954 " failed")); 3955 return (DDI_FAILURE); 3956 } 3957 } 3958 } 3959 3960 if (response_xferlen) { 3961 for (i = 0; i < response_xferlen; i++) { 3962 if (ddi_copyout( 3963 (uint8_t *)response_dma_obj.buffer 3964 + i, (uint8_t *)response_ubuf 3965 + i, 1, mode)) { 3966 con_log(CL_ANN, (CE_WARN, 3967 "issue_mfi_smp : copy to " 3968 "user space failed")); 3969 return (DDI_FAILURE); 3970 } 3971 } 3972 } 3973 } 3974 3975 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status); 3976 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d", 3977 ddi_get8(acc_handle, &smp->cmd_status))); 3978 3979 3980 if (request_xferlen) { 3981 /* free kernel buffer */ 3982 if (mrsas_free_dma_obj(instance, request_dma_obj) != 3983 DDI_SUCCESS) 3984 return (DDI_FAILURE); 3985 } 3986 3987 if (response_xferlen) { 3988 /* free kernel buffer */ 3989 if (mrsas_free_dma_obj(instance, response_dma_obj) != 3990 DDI_SUCCESS) 3991 return (DDI_FAILURE); 3992 } 3993 3994 return (DDI_SUCCESS); 3995 } 3996 3997 /* 3998 * issue_mfi_stp 3999 */ 4000 static int 4001 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4002 struct mrsas_cmd *cmd, int mode) 4003 { 4004 void *fis_ubuf; 4005 void *data_ubuf; 4006 uint32_t fis_xferlen = 0; 4007 uint32_t data_xferlen = 0; 4008 uint_t model; 4009 dma_obj_t fis_dma_obj; 4010 dma_obj_t data_dma_obj; 4011 struct mrsas_stp_frame *kstp; 4012 struct mrsas_stp_frame *stp; 4013 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 4014 int i; 4015 4016 stp = &cmd->frame->stp; 4017 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0]; 4018 4019 model = ddi_model_convert_from(mode & FMODELS); 4020 if (model == DDI_MODEL_ILP32) { 4021 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4022 4023 fis_xferlen = kstp->sgl.sge32[0].length; 4024 data_xferlen = kstp->sgl.sge32[1].length; 4025 4026 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4027 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4028 } 4029 else 4030 { 4031 #ifdef _ILP32 4032 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4033 4034 fis_xferlen = kstp->sgl.sge32[0].length; 4035 data_xferlen = kstp->sgl.sge32[1].length; 4036 4037 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4038 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4039 #else 4040 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64")); 4041 4042 fis_xferlen = kstp->sgl.sge64[0].length; 4043 data_xferlen = kstp->sgl.sge64[1].length; 4044 4045 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr; 4046 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr; 4047 #endif 4048 } 4049 4050 4051 if (fis_xferlen) { 4052 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: " 4053 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen)); 4054 4055 /* means IOCTL requires DMA */ 4056 /* allocate the data transfer buffer */ 4057 fis_dma_obj.size = fis_xferlen; 4058 fis_dma_obj.dma_attr = mrsas_generic_dma_attr; 4059 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4060 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4061 fis_dma_obj.dma_attr.dma_attr_sgllen = 1; 4062 fis_dma_obj.dma_attr.dma_attr_align = 1; 4063 4064 /* allocate kernel buffer for DMA */ 4065 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj, 4066 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4067 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : " 4068 "could not allocate data transfer buffer.")); 4069 return (DDI_FAILURE); 4070 } 4071 4072 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4073 for (i = 0; i < fis_xferlen; i++) { 4074 if (ddi_copyin((uint8_t *)fis_ubuf + i, 4075 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) { 4076 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4077 "copy from user space failed")); 4078 return (DDI_FAILURE); 4079 } 4080 } 4081 } 4082 4083 if (data_xferlen) { 4084 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p " 4085 "data_xferlen = %x", data_ubuf, data_xferlen)); 4086 4087 /* means IOCTL requires DMA */ 4088 /* allocate the data transfer buffer */ 4089 data_dma_obj.size = data_xferlen; 4090 data_dma_obj.dma_attr = mrsas_generic_dma_attr; 4091 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4092 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4093 data_dma_obj.dma_attr.dma_attr_sgllen = 1; 4094 data_dma_obj.dma_attr.dma_attr_align = 1; 4095 4096 /* allocate kernel buffer for DMA */ 4097 if (mrsas_alloc_dma_obj(instance, &data_dma_obj, 4098 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4099 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4100 "could not allocate data transfer buffer.")); 4101 return (DDI_FAILURE); 4102 } 4103 4104 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4105 for (i = 0; i < data_xferlen; i++) { 4106 if (ddi_copyin((uint8_t *)data_ubuf + i, 4107 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) { 4108 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4109 "copy from user space failed")); 4110 return (DDI_FAILURE); 4111 } 4112 } 4113 } 4114 4115 ddi_put8(acc_handle, &stp->cmd, kstp->cmd); 4116 ddi_put8(acc_handle, &stp->cmd_status, 0); 4117 ddi_put8(acc_handle, &stp->connection_status, 0); 4118 ddi_put8(acc_handle, &stp->target_id, kstp->target_id); 4119 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count); 4120 4121 ddi_put16(acc_handle, &stp->timeout, kstp->timeout); 4122 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len); 4123 4124 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10, 4125 DDI_DEV_AUTOINCR); 4126 4127 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64); 4128 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags); 4129 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen); 4130 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr, 4131 fis_dma_obj.dma_cookie[0].dmac_address); 4132 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen); 4133 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr, 4134 data_dma_obj.dma_cookie[0].dmac_address); 4135 4136 cmd->sync_cmd = MRSAS_TRUE; 4137 cmd->frame_count = 1; 4138 4139 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4140 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed")); 4141 } else { 4142 4143 if (fis_xferlen) { 4144 for (i = 0; i < fis_xferlen; i++) { 4145 if (ddi_copyout( 4146 (uint8_t *)fis_dma_obj.buffer + i, 4147 (uint8_t *)fis_ubuf + i, 1, mode)) { 4148 con_log(CL_ANN, (CE_WARN, 4149 "issue_mfi_stp : copy to " 4150 "user space failed")); 4151 return (DDI_FAILURE); 4152 } 4153 } 4154 } 4155 } 4156 if (data_xferlen) { 4157 for (i = 0; i < data_xferlen; i++) { 4158 if (ddi_copyout( 4159 (uint8_t *)data_dma_obj.buffer + i, 4160 (uint8_t *)data_ubuf + i, 1, mode)) { 4161 con_log(CL_ANN, (CE_WARN, 4162 "issue_mfi_stp : copy to" 4163 " user space failed")); 4164 return (DDI_FAILURE); 4165 } 4166 } 4167 } 4168 4169 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status); 4170 4171 if (fis_xferlen) { 4172 /* free kernel buffer */ 4173 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS) 4174 return (DDI_FAILURE); 4175 } 4176 4177 if (data_xferlen) { 4178 /* free kernel buffer */ 4179 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS) 4180 return (DDI_FAILURE); 4181 } 4182 4183 return (DDI_SUCCESS); 4184 } 4185 4186 /* 4187 * fill_up_drv_ver 4188 */ 4189 static void 4190 fill_up_drv_ver(struct mrsas_drv_ver *dv) 4191 { 4192 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver)); 4193 4194 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$")); 4195 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris")); 4196 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas")); 4197 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION)); 4198 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE, 4199 strlen(MRSAS_RELDATE)); 4200 } 4201 4202 /* 4203 * handle_drv_ioctl 4204 */ 4205 static int 4206 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4207 int mode) 4208 { 4209 int i; 4210 int rval = DDI_SUCCESS; 4211 int *props = NULL; 4212 void *ubuf; 4213 4214 uint8_t *pci_conf_buf; 4215 uint32_t xferlen; 4216 uint32_t num_props; 4217 uint_t model; 4218 struct mrsas_dcmd_frame *kdcmd; 4219 struct mrsas_drv_ver dv; 4220 struct mrsas_pci_information pi; 4221 4222 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; 4223 4224 model = ddi_model_convert_from(mode & FMODELS); 4225 if (model == DDI_MODEL_ILP32) { 4226 con_log(CL_ANN1, (CE_NOTE, 4227 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4228 4229 xferlen = kdcmd->sgl.sge32[0].length; 4230 4231 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4232 } else { 4233 #ifdef _ILP32 4234 con_log(CL_ANN1, (CE_NOTE, 4235 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4236 xferlen = kdcmd->sgl.sge32[0].length; 4237 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4238 #else 4239 con_log(CL_ANN1, (CE_NOTE, 4240 "handle_drv_ioctl: DDI_MODEL_LP64")); 4241 xferlen = kdcmd->sgl.sge64[0].length; 4242 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 4243 #endif 4244 } 4245 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4246 "dataBuf=%p size=%d bytes", ubuf, xferlen)); 4247 4248 switch (kdcmd->opcode) { 4249 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION: 4250 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4251 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION")); 4252 4253 fill_up_drv_ver(&dv); 4254 4255 if (ddi_copyout(&dv, ubuf, xferlen, mode)) { 4256 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4257 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : " 4258 "copy to user space failed")); 4259 kdcmd->cmd_status = 1; 4260 rval = 1; 4261 } else { 4262 kdcmd->cmd_status = 0; 4263 } 4264 break; 4265 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION: 4266 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4267 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON")); 4268 4269 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip, 4270 0, "reg", &props, &num_props)) { 4271 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4272 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : " 4273 "ddi_prop_look_int_array failed")); 4274 rval = DDI_FAILURE; 4275 } else { 4276 4277 pi.busNumber = (props[0] >> 16) & 0xFF; 4278 pi.deviceNumber = (props[0] >> 11) & 0x1f; 4279 pi.functionNumber = (props[0] >> 8) & 0x7; 4280 ddi_prop_free((void *)props); 4281 } 4282 4283 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo; 4284 4285 for (i = 0; i < (sizeof (struct mrsas_pci_information) - 4286 offsetof(struct mrsas_pci_information, pciHeaderInfo)); 4287 i++) { 4288 pci_conf_buf[i] = 4289 pci_config_get8(instance->pci_handle, i); 4290 } 4291 4292 if (ddi_copyout(&pi, ubuf, xferlen, mode)) { 4293 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4294 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : " 4295 "copy to user space failed")); 4296 kdcmd->cmd_status = 1; 4297 rval = 1; 4298 } else { 4299 kdcmd->cmd_status = 0; 4300 } 4301 break; 4302 default: 4303 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4304 "invalid driver specific IOCTL opcode = 0x%x", 4305 kdcmd->opcode)); 4306 kdcmd->cmd_status = 1; 4307 rval = DDI_FAILURE; 4308 break; 4309 } 4310 4311 return (rval); 4312 } 4313 4314 /* 4315 * handle_mfi_ioctl 4316 */ 4317 static int 4318 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4319 int mode) 4320 { 4321 int rval = DDI_SUCCESS; 4322 4323 struct mrsas_header *hdr; 4324 struct mrsas_cmd *cmd; 4325 4326 cmd = get_mfi_pkt(instance); 4327 4328 if (!cmd) { 4329 con_log(CL_ANN, (CE_WARN, "mr_sas: " 4330 "failed to get a cmd packet")); 4331 return (DDI_FAILURE); 4332 } 4333 4334 /* Clear the frame buffer and assign back the context id */ 4335 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 4336 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 4337 cmd->index); 4338 4339 hdr = (struct mrsas_header *)&ioctl->frame[0]; 4340 4341 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) { 4342 case MFI_CMD_OP_DCMD: 4343 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode); 4344 break; 4345 case MFI_CMD_OP_SMP: 4346 rval = issue_mfi_smp(instance, ioctl, cmd, mode); 4347 break; 4348 case MFI_CMD_OP_STP: 4349 rval = issue_mfi_stp(instance, ioctl, cmd, mode); 4350 break; 4351 case MFI_CMD_OP_LD_SCSI: 4352 case MFI_CMD_OP_PD_SCSI: 4353 rval = issue_mfi_pthru(instance, ioctl, cmd, mode); 4354 break; 4355 default: 4356 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: " 4357 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd)); 4358 rval = DDI_FAILURE; 4359 break; 4360 } 4361 4362 4363 return_mfi_pkt(instance, cmd); 4364 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) 4365 rval = DDI_FAILURE; 4366 return (rval); 4367 } 4368 4369 /* 4370 * AEN 4371 */ 4372 static int 4373 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen) 4374 { 4375 int rval = 0; 4376 4377 rval = register_mfi_aen(instance, instance->aen_seq_num, 4378 aen->class_locale_word); 4379 4380 aen->cmd_status = (uint8_t)rval; 4381 4382 return (rval); 4383 } 4384 4385 static int 4386 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num, 4387 uint32_t class_locale_word) 4388 { 4389 int ret_val; 4390 4391 struct mrsas_cmd *cmd, *aen_cmd; 4392 struct mrsas_dcmd_frame *dcmd; 4393 union mrsas_evt_class_locale curr_aen; 4394 union mrsas_evt_class_locale prev_aen; 4395 4396 /* 4397 * If there an AEN pending already (aen_cmd), check if the 4398 * class_locale of that pending AEN is inclusive of the new 4399 * AEN request we currently have. If it is, then we don't have 4400 * to do anything. In other words, whichever events the current 4401 * AEN request is subscribing to, have already been subscribed 4402 * to. 4403 * 4404 * If the old_cmd is _not_ inclusive, then we have to abort 4405 * that command, form a class_locale that is superset of both 4406 * old and current and re-issue to the FW 4407 */ 4408 4409 curr_aen.word = LE_32(class_locale_word); 4410 curr_aen.members.locale = LE_16(curr_aen.members.locale); 4411 aen_cmd = instance->aen_cmd; 4412 if (aen_cmd) { 4413 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle, 4414 &aen_cmd->frame->dcmd.mbox.w[1]); 4415 prev_aen.word = LE_32(prev_aen.word); 4416 prev_aen.members.locale = LE_16(prev_aen.members.locale); 4417 /* 4418 * A class whose enum value is smaller is inclusive of all 4419 * higher values. If a PROGRESS (= -1) was previously 4420 * registered, then a new registration requests for higher 4421 * classes need not be sent to FW. They are automatically 4422 * included. 4423 * 4424 * Locale numbers don't have such hierarchy. They are bitmap 4425 * values 4426 */ 4427 if ((prev_aen.members.class <= curr_aen.members.class) && 4428 !((prev_aen.members.locale & curr_aen.members.locale) ^ 4429 curr_aen.members.locale)) { 4430 /* 4431 * Previously issued event registration includes 4432 * current request. Nothing to do. 4433 */ 4434 4435 return (0); 4436 } else { 4437 curr_aen.members.locale |= prev_aen.members.locale; 4438 4439 if (prev_aen.members.class < curr_aen.members.class) 4440 curr_aen.members.class = prev_aen.members.class; 4441 4442 ret_val = abort_aen_cmd(instance, aen_cmd); 4443 4444 if (ret_val) { 4445 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: " 4446 "failed to abort prevous AEN command")); 4447 4448 return (ret_val); 4449 } 4450 } 4451 } else { 4452 curr_aen.word = class_locale_word; 4453 } 4454 4455 cmd = get_mfi_pkt(instance); 4456 4457 if (!cmd) 4458 return (ENOMEM); 4459 /* Clear the frame buffer and assign back the context id */ 4460 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 4461 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 4462 cmd->index); 4463 4464 dcmd = &cmd->frame->dcmd; 4465 4466 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */ 4467 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 4468 4469 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 4470 sizeof (struct mrsas_evt_detail)); 4471 4472 /* Prepare DCMD for aen registration */ 4473 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 4474 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0); 4475 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 4476 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 4477 MFI_FRAME_DIR_READ); 4478 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 4479 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 4480 sizeof (struct mrsas_evt_detail)); 4481 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 4482 MR_DCMD_CTRL_EVENT_WAIT); 4483 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num); 4484 curr_aen.members.locale = LE_16(curr_aen.members.locale); 4485 curr_aen.word = LE_32(curr_aen.word); 4486 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1], 4487 curr_aen.word); 4488 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 4489 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address); 4490 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 4491 sizeof (struct mrsas_evt_detail)); 4492 4493 instance->aen_seq_num = seq_num; 4494 4495 4496 /* 4497 * Store reference to the cmd used to register for AEN. When an 4498 * application wants us to register for AEN, we have to abort this 4499 * cmd and re-register with a new EVENT LOCALE supplied by that app 4500 */ 4501 instance->aen_cmd = cmd; 4502 4503 cmd->frame_count = 1; 4504 4505 /* Issue the aen registration frame */ 4506 /* atomic_add_16 (&instance->fw_outstanding, 1); */ 4507 instance->func_ptr->issue_cmd(cmd, instance); 4508 4509 return (0); 4510 } 4511 4512 static void 4513 display_scsi_inquiry(caddr_t scsi_inq) 4514 { 4515 #define MAX_SCSI_DEVICE_CODE 14 4516 int i; 4517 char inquiry_buf[256] = {0}; 4518 int len; 4519 const char *const scsi_device_types[] = { 4520 "Direct-Access ", 4521 "Sequential-Access", 4522 "Printer ", 4523 "Processor ", 4524 "WORM ", 4525 "CD-ROM ", 4526 "Scanner ", 4527 "Optical Device ", 4528 "Medium Changer ", 4529 "Communications ", 4530 "Unknown ", 4531 "Unknown ", 4532 "Unknown ", 4533 "Enclosure ", 4534 }; 4535 4536 len = 0; 4537 4538 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: "); 4539 for (i = 8; i < 16; i++) { 4540 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4541 scsi_inq[i]); 4542 } 4543 4544 len += snprintf(inquiry_buf + len, 265 - len, " Model: "); 4545 4546 for (i = 16; i < 32; i++) { 4547 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4548 scsi_inq[i]); 4549 } 4550 4551 len += snprintf(inquiry_buf + len, 265 - len, " Rev: "); 4552 4553 for (i = 32; i < 36; i++) { 4554 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4555 scsi_inq[i]); 4556 } 4557 4558 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4559 4560 4561 i = scsi_inq[0] & 0x1f; 4562 4563 4564 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ", 4565 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : 4566 "Unknown "); 4567 4568 4569 len += snprintf(inquiry_buf + len, 265 - len, 4570 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); 4571 4572 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) { 4573 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n"); 4574 } else { 4575 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4576 } 4577 4578 con_log(CL_ANN1, (CE_CONT, inquiry_buf)); 4579 } 4580 4581 static int 4582 read_fw_status_reg_ppc(struct mrsas_instance *instance) 4583 { 4584 return ((int)RD_OB_SCRATCH_PAD_0(instance)); 4585 } 4586 4587 static void 4588 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance) 4589 { 4590 atomic_add_16(&instance->fw_outstanding, 1); 4591 4592 /* Issue the command to the FW */ 4593 WR_IB_QPORT((cmd->frame_phys_addr) | 4594 (((cmd->frame_count - 1) << 1) | 1), instance); 4595 } 4596 4597 /* 4598 * issue_cmd_in_sync_mode 4599 */ 4600 static int 4601 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance, 4602 struct mrsas_cmd *cmd) 4603 { 4604 int i; 4605 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC); 4606 4607 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called")); 4608 4609 cmd->cmd_status = ENODATA; 4610 4611 WR_IB_QPORT((cmd->frame_phys_addr) | 4612 (((cmd->frame_count - 1) << 1) | 1), instance); 4613 4614 mutex_enter(&instance->int_cmd_mtx); 4615 4616 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 4617 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 4618 } 4619 4620 mutex_exit(&instance->int_cmd_mtx); 4621 4622 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done")); 4623 4624 if (i < (msecs -1)) { 4625 return (DDI_SUCCESS); 4626 } else { 4627 return (DDI_FAILURE); 4628 } 4629 } 4630 4631 /* 4632 * issue_cmd_in_poll_mode 4633 */ 4634 static int 4635 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance, 4636 struct mrsas_cmd *cmd) 4637 { 4638 int i; 4639 uint16_t flags; 4640 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC; 4641 struct mrsas_header *frame_hdr; 4642 4643 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called")); 4644 4645 frame_hdr = (struct mrsas_header *)cmd->frame; 4646 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status, 4647 MFI_CMD_STATUS_POLL_MODE); 4648 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags); 4649 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 4650 4651 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags); 4652 4653 /* issue the frame using inbound queue port */ 4654 WR_IB_QPORT((cmd->frame_phys_addr) | 4655 (((cmd->frame_count - 1) << 1) | 1), instance); 4656 4657 /* wait for cmd_status to change from 0xFF */ 4658 for (i = 0; i < msecs && ( 4659 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) 4660 == MFI_CMD_STATUS_POLL_MODE); i++) { 4661 drv_usecwait(MILLISEC); /* wait for 1000 usecs */ 4662 } 4663 4664 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) 4665 == MFI_CMD_STATUS_POLL_MODE) { 4666 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: " 4667 "cmd polling timed out")); 4668 return (DDI_FAILURE); 4669 } 4670 4671 return (DDI_SUCCESS); 4672 } 4673 4674 static void 4675 enable_intr_ppc(struct mrsas_instance *instance) 4676 { 4677 uint32_t mask; 4678 4679 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called")); 4680 4681 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */ 4682 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance); 4683 4684 /* WR_OB_INTR_MASK(~0x80000000, instance); */ 4685 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance); 4686 4687 /* dummy read to force PCI flush */ 4688 mask = RD_OB_INTR_MASK(instance); 4689 4690 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: " 4691 "outbound_intr_mask = 0x%x", mask)); 4692 } 4693 4694 static void 4695 disable_intr_ppc(struct mrsas_instance *instance) 4696 { 4697 uint32_t mask; 4698 4699 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called")); 4700 4701 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : " 4702 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance))); 4703 4704 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */ 4705 WR_OB_INTR_MASK(OB_INTR_MASK, instance); 4706 4707 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : " 4708 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance))); 4709 4710 /* dummy read to force PCI flush */ 4711 mask = RD_OB_INTR_MASK(instance); 4712 #ifdef lint 4713 mask = mask; 4714 #endif 4715 } 4716 4717 static int 4718 intr_ack_ppc(struct mrsas_instance *instance) 4719 { 4720 uint32_t status; 4721 int ret = DDI_INTR_CLAIMED; 4722 4723 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called")); 4724 4725 /* check if it is our interrupt */ 4726 status = RD_OB_INTR_STATUS(instance); 4727 4728 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status)); 4729 4730 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) { 4731 ret = DDI_INTR_UNCLAIMED; 4732 } 4733 4734 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 4735 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4736 ret = DDI_INTR_UNCLAIMED; 4737 } 4738 4739 if (ret == DDI_INTR_UNCLAIMED) { 4740 return (ret); 4741 } 4742 /* clear the interrupt by writing back the same value */ 4743 WR_OB_DOORBELL_CLEAR(status, instance); 4744 4745 /* dummy READ */ 4746 status = RD_OB_INTR_STATUS(instance); 4747 4748 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared")); 4749 4750 return (ret); 4751 } 4752 4753 static int 4754 mrsas_common_check(struct mrsas_instance *instance, 4755 struct mrsas_cmd *cmd) 4756 { 4757 int ret = DDI_SUCCESS; 4758 4759 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 4760 DDI_SUCCESS) { 4761 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4762 if (cmd->pkt != NULL) { 4763 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4764 cmd->pkt->pkt_statistics = 0; 4765 } 4766 ret = DDI_FAILURE; 4767 } 4768 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 4769 != DDI_SUCCESS) { 4770 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4771 if (cmd->pkt != NULL) { 4772 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4773 cmd->pkt->pkt_statistics = 0; 4774 } 4775 ret = DDI_FAILURE; 4776 } 4777 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) != 4778 DDI_SUCCESS) { 4779 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4780 if (cmd->pkt != NULL) { 4781 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4782 cmd->pkt->pkt_statistics = 0; 4783 } 4784 ret = DDI_FAILURE; 4785 } 4786 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 4787 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4788 4789 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0); 4790 4791 if (cmd->pkt != NULL) { 4792 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4793 cmd->pkt->pkt_statistics = 0; 4794 } 4795 ret = DDI_FAILURE; 4796 } 4797 4798 return (ret); 4799 } 4800 4801 /*ARGSUSED*/ 4802 static int 4803 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 4804 { 4805 /* 4806 * as the driver can always deal with an error in any dma or 4807 * access handle, we can just return the fme_status value. 4808 */ 4809 pci_ereport_post(dip, err, NULL); 4810 return (err->fme_status); 4811 } 4812 4813 static void 4814 mrsas_fm_init(struct mrsas_instance *instance) 4815 { 4816 /* Need to change iblock to priority for new MSI intr */ 4817 ddi_iblock_cookie_t fm_ibc; 4818 4819 /* Only register with IO Fault Services if we have some capability */ 4820 if (instance->fm_capabilities) { 4821 /* Adjust access and dma attributes for FMA */ 4822 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC; 4823 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 4824 4825 /* 4826 * Register capabilities with IO Fault Services. 4827 * fm_capabilities will be updated to indicate 4828 * capabilities actually supported (not requested.) 4829 */ 4830 4831 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc); 4832 4833 /* 4834 * Initialize pci ereport capabilities if ereport 4835 * capable (should always be.) 4836 */ 4837 4838 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 4839 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4840 pci_ereport_setup(instance->dip); 4841 } 4842 4843 /* 4844 * Register error callback if error callback capable. 4845 */ 4846 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4847 ddi_fm_handler_register(instance->dip, 4848 mrsas_fm_error_cb, (void*) instance); 4849 } 4850 } else { 4851 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 4852 mrsas_generic_dma_attr.dma_attr_flags = 0; 4853 } 4854 } 4855 4856 static void 4857 mrsas_fm_fini(struct mrsas_instance *instance) 4858 { 4859 /* Only unregister FMA capabilities if registered */ 4860 if (instance->fm_capabilities) { 4861 /* 4862 * Un-register error callback if error callback capable. 4863 */ 4864 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4865 ddi_fm_handler_unregister(instance->dip); 4866 } 4867 4868 /* 4869 * Release any resources allocated by pci_ereport_setup() 4870 */ 4871 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 4872 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4873 pci_ereport_teardown(instance->dip); 4874 } 4875 4876 /* Unregister from IO Fault Services */ 4877 ddi_fm_fini(instance->dip); 4878 4879 /* Adjust access and dma attributes for FMA */ 4880 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 4881 mrsas_generic_dma_attr.dma_attr_flags = 0; 4882 } 4883 } 4884 4885 int 4886 mrsas_check_acc_handle(ddi_acc_handle_t handle) 4887 { 4888 ddi_fm_error_t de; 4889 4890 if (handle == NULL) { 4891 return (DDI_FAILURE); 4892 } 4893 4894 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 4895 4896 return (de.fme_status); 4897 } 4898 4899 int 4900 mrsas_check_dma_handle(ddi_dma_handle_t handle) 4901 { 4902 ddi_fm_error_t de; 4903 4904 if (handle == NULL) { 4905 return (DDI_FAILURE); 4906 } 4907 4908 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 4909 4910 return (de.fme_status); 4911 } 4912 4913 void 4914 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail) 4915 { 4916 uint64_t ena; 4917 char buf[FM_MAX_CLASS]; 4918 4919 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 4920 ena = fm_ena_generate(0, FM_ENA_FMT1); 4921 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) { 4922 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP, 4923 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 4924 } 4925 } 4926 4927 static int 4928 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type) 4929 { 4930 4931 dev_info_t *dip = instance->dip; 4932 int avail, actual, count; 4933 int i, flag, ret; 4934 4935 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: intr_type = %x", 4936 intr_type)); 4937 4938 /* Get number of interrupts */ 4939 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 4940 if ((ret != DDI_SUCCESS) || (count == 0)) { 4941 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:" 4942 "ret %d count %d", ret, count)); 4943 4944 return (DDI_FAILURE); 4945 } 4946 4947 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: count = %d ", count)); 4948 4949 /* Get number of available interrupts */ 4950 ret = ddi_intr_get_navail(dip, intr_type, &avail); 4951 if ((ret != DDI_SUCCESS) || (avail == 0)) { 4952 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:" 4953 "ret %d avail %d", ret, avail)); 4954 4955 return (DDI_FAILURE); 4956 } 4957 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: avail = %d ", avail)); 4958 4959 /* Only one interrupt routine. So limit the count to 1 */ 4960 if (count > 1) { 4961 count = 1; 4962 } 4963 4964 /* 4965 * Allocate an array of interrupt handlers. Currently we support 4966 * only one interrupt. The framework can be extended later. 4967 */ 4968 instance->intr_size = count * sizeof (ddi_intr_handle_t); 4969 instance->intr_htable = kmem_zalloc(instance->intr_size, KM_SLEEP); 4970 ASSERT(instance->intr_htable); 4971 4972 flag = ((intr_type == DDI_INTR_TYPE_MSI) || (intr_type == 4973 DDI_INTR_TYPE_MSIX)) ? DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL; 4974 4975 /* Allocate interrupt */ 4976 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0, 4977 count, &actual, flag); 4978 4979 if ((ret != DDI_SUCCESS) || (actual == 0)) { 4980 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 4981 "avail = %d", avail)); 4982 kmem_free(instance->intr_htable, instance->intr_size); 4983 return (DDI_FAILURE); 4984 } 4985 if (actual < count) { 4986 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 4987 "Requested = %d Received = %d", count, actual)); 4988 } 4989 instance->intr_cnt = actual; 4990 4991 /* 4992 * Get the priority of the interrupt allocated. 4993 */ 4994 if ((ret = ddi_intr_get_pri(instance->intr_htable[0], 4995 &instance->intr_pri)) != DDI_SUCCESS) { 4996 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 4997 "get priority call failed")); 4998 4999 for (i = 0; i < actual; i++) { 5000 (void) ddi_intr_free(instance->intr_htable[i]); 5001 } 5002 kmem_free(instance->intr_htable, instance->intr_size); 5003 return (DDI_FAILURE); 5004 } 5005 5006 /* 5007 * Test for high level mutex. we don't support them. 5008 */ 5009 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) { 5010 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 5011 "High level interrupts not supported.")); 5012 5013 for (i = 0; i < actual; i++) { 5014 (void) ddi_intr_free(instance->intr_htable[i]); 5015 } 5016 kmem_free(instance->intr_htable, instance->intr_size); 5017 return (DDI_FAILURE); 5018 } 5019 5020 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ", 5021 instance->intr_pri)); 5022 5023 /* Call ddi_intr_add_handler() */ 5024 for (i = 0; i < actual; i++) { 5025 ret = ddi_intr_add_handler(instance->intr_htable[i], 5026 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance, 5027 (caddr_t)(uintptr_t)i); 5028 5029 if (ret != DDI_SUCCESS) { 5030 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:" 5031 "failed %d", ret)); 5032 5033 for (i = 0; i < actual; i++) { 5034 (void) ddi_intr_free(instance->intr_htable[i]); 5035 } 5036 kmem_free(instance->intr_htable, instance->intr_size); 5037 return (DDI_FAILURE); 5038 } 5039 5040 } 5041 5042 con_log(CL_DLEVEL1, (CE_WARN, " ddi_intr_add_handler done")); 5043 5044 if ((ret = ddi_intr_get_cap(instance->intr_htable[0], 5045 &instance->intr_cap)) != DDI_SUCCESS) { 5046 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d", 5047 ret)); 5048 5049 /* Free already allocated intr */ 5050 for (i = 0; i < actual; i++) { 5051 (void) ddi_intr_remove_handler( 5052 instance->intr_htable[i]); 5053 (void) ddi_intr_free(instance->intr_htable[i]); 5054 } 5055 kmem_free(instance->intr_htable, instance->intr_size); 5056 return (DDI_FAILURE); 5057 } 5058 5059 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { 5060 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable")); 5061 5062 (void) ddi_intr_block_enable(instance->intr_htable, 5063 instance->intr_cnt); 5064 } else { 5065 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable")); 5066 5067 for (i = 0; i < instance->intr_cnt; i++) { 5068 (void) ddi_intr_enable(instance->intr_htable[i]); 5069 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns " 5070 "%d", i)); 5071 } 5072 } 5073 5074 return (DDI_SUCCESS); 5075 5076 } 5077 5078 5079 static void 5080 mrsas_rem_intrs(struct mrsas_instance *instance) 5081 { 5082 int i; 5083 5084 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called")); 5085 5086 /* Disable all interrupts first */ 5087 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { 5088 (void) ddi_intr_block_disable(instance->intr_htable, 5089 instance->intr_cnt); 5090 } else { 5091 for (i = 0; i < instance->intr_cnt; i++) { 5092 (void) ddi_intr_disable(instance->intr_htable[i]); 5093 } 5094 } 5095 5096 /* Remove all the handlers */ 5097 5098 for (i = 0; i < instance->intr_cnt; i++) { 5099 (void) ddi_intr_remove_handler(instance->intr_htable[i]); 5100 (void) ddi_intr_free(instance->intr_htable[i]); 5101 } 5102 5103 kmem_free(instance->intr_htable, instance->intr_size); 5104 } 5105 5106 static int 5107 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags, 5108 ddi_bus_config_op_t op, void *arg, dev_info_t **childp) 5109 { 5110 struct mrsas_instance *instance; 5111 int config; 5112 int rval; 5113 5114 char *ptr = NULL; 5115 int tgt, lun; 5116 5117 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op)); 5118 5119 if ((instance = ddi_get_soft_state(mrsas_state, 5120 ddi_get_instance(parent))) == NULL) { 5121 return (NDI_FAILURE); 5122 } 5123 5124 /* Hold nexus during bus_config */ 5125 ndi_devi_enter(parent, &config); 5126 switch (op) { 5127 case BUS_CONFIG_ONE: { 5128 5129 /* parse wwid/target name out of name given */ 5130 if ((ptr = strchr((char *)arg, '@')) == NULL) { 5131 rval = NDI_FAILURE; 5132 break; 5133 } 5134 ptr++; 5135 5136 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) { 5137 rval = NDI_FAILURE; 5138 break; 5139 } 5140 5141 if (lun == 0) { 5142 rval = mrsas_config_ld(instance, tgt, lun, childp); 5143 } else { 5144 rval = NDI_FAILURE; 5145 } 5146 5147 break; 5148 } 5149 case BUS_CONFIG_DRIVER: 5150 case BUS_CONFIG_ALL: { 5151 5152 rval = mrsas_config_all_devices(instance); 5153 5154 rval = NDI_SUCCESS; 5155 break; 5156 } 5157 } 5158 5159 if (rval == NDI_SUCCESS) { 5160 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0); 5161 5162 } 5163 ndi_devi_exit(parent, config); 5164 5165 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x", 5166 rval)); 5167 return (rval); 5168 } 5169 5170 static int 5171 mrsas_config_all_devices(struct mrsas_instance *instance) 5172 { 5173 int rval, tgt; 5174 5175 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { 5176 (void) mrsas_config_ld(instance, tgt, 0, NULL); 5177 5178 } 5179 5180 rval = NDI_SUCCESS; 5181 return (rval); 5182 } 5183 5184 static int 5185 mrsas_parse_devname(char *devnm, int *tgt, int *lun) 5186 { 5187 char devbuf[SCSI_MAXNAMELEN]; 5188 char *addr; 5189 char *p, *tp, *lp; 5190 long num; 5191 5192 /* Parse dev name and address */ 5193 (void) strcpy(devbuf, devnm); 5194 addr = ""; 5195 for (p = devbuf; *p != '\0'; p++) { 5196 if (*p == '@') { 5197 addr = p + 1; 5198 *p = '\0'; 5199 } else if (*p == ':') { 5200 *p = '\0'; 5201 break; 5202 } 5203 } 5204 5205 /* Parse target and lun */ 5206 for (p = tp = addr, lp = NULL; *p != '\0'; p++) { 5207 if (*p == ',') { 5208 lp = p + 1; 5209 *p = '\0'; 5210 break; 5211 } 5212 } 5213 if (tgt && tp) { 5214 if (ddi_strtol(tp, NULL, 0x10, &num)) { 5215 return (DDI_FAILURE); /* Can declare this as constant */ 5216 } 5217 *tgt = (int)num; 5218 } 5219 if (lun && lp) { 5220 if (ddi_strtol(lp, NULL, 0x10, &num)) { 5221 return (DDI_FAILURE); 5222 } 5223 *lun = (int)num; 5224 } 5225 return (DDI_SUCCESS); /* Success case */ 5226 } 5227 5228 static int 5229 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt, 5230 uint8_t lun, dev_info_t **ldip) 5231 { 5232 struct scsi_device *sd; 5233 dev_info_t *child; 5234 int rval; 5235 5236 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d", 5237 tgt, lun)); 5238 5239 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) { 5240 if (ldip) { 5241 *ldip = child; 5242 } 5243 con_log(CL_ANN1, (CE_NOTE, 5244 "mrsas_config_ld: Child = %p found t = %d l = %d", 5245 (void *)child, tgt, lun)); 5246 return (NDI_SUCCESS); 5247 } 5248 5249 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP); 5250 sd->sd_address.a_hba_tran = instance->tran; 5251 sd->sd_address.a_target = (uint16_t)tgt; 5252 sd->sd_address.a_lun = (uint8_t)lun; 5253 5254 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) 5255 rval = mrsas_config_scsi_device(instance, sd, ldip); 5256 else 5257 rval = NDI_FAILURE; 5258 5259 /* sd_unprobe is blank now. Free buffer manually */ 5260 if (sd->sd_inq) { 5261 kmem_free(sd->sd_inq, SUN_INQSIZE); 5262 sd->sd_inq = (struct scsi_inquiry *)NULL; 5263 } 5264 5265 kmem_free(sd, sizeof (struct scsi_device)); 5266 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_ld: return rval = %d", 5267 rval)); 5268 return (rval); 5269 } 5270 5271 static int 5272 mrsas_config_scsi_device(struct mrsas_instance *instance, 5273 struct scsi_device *sd, dev_info_t **dipp) 5274 { 5275 char *nodename = NULL; 5276 char **compatible = NULL; 5277 int ncompatible = 0; 5278 char *childname; 5279 dev_info_t *ldip = NULL; 5280 int tgt = sd->sd_address.a_target; 5281 int lun = sd->sd_address.a_lun; 5282 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 5283 int rval; 5284 5285 con_log(CL_ANN1, (CE_WARN, "mr_sas: scsi_device t%dL%d", tgt, lun)); 5286 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, 5287 NULL, &nodename, &compatible, &ncompatible); 5288 5289 if (nodename == NULL) { 5290 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver " 5291 "for t%dL%d", tgt, lun)); 5292 rval = NDI_FAILURE; 5293 goto finish; 5294 } 5295 5296 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename; 5297 con_log(CL_ANN1, (CE_WARN, 5298 "mr_sas: Childname = %2s nodename = %s", childname, nodename)); 5299 5300 /* Create a dev node */ 5301 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip); 5302 con_log(CL_ANN1, (CE_WARN, 5303 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval)); 5304 if (rval == NDI_SUCCESS) { 5305 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) != 5306 DDI_PROP_SUCCESS) { 5307 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 5308 "property for t%dl%d target", tgt, lun)); 5309 rval = NDI_FAILURE; 5310 goto finish; 5311 } 5312 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) != 5313 DDI_PROP_SUCCESS) { 5314 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 5315 "property for t%dl%d lun", tgt, lun)); 5316 rval = NDI_FAILURE; 5317 goto finish; 5318 } 5319 5320 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip, 5321 "compatible", compatible, ncompatible) != 5322 DDI_PROP_SUCCESS) { 5323 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 5324 "property for t%dl%d compatible", tgt, lun)); 5325 rval = NDI_FAILURE; 5326 goto finish; 5327 } 5328 5329 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH); 5330 if (rval != NDI_SUCCESS) { 5331 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online " 5332 "t%dl%d", tgt, lun)); 5333 ndi_prop_remove_all(ldip); 5334 (void) ndi_devi_free(ldip); 5335 } else { 5336 con_log(CL_ANN1, (CE_WARN, "mr_sas: online Done :" 5337 "0 t%dl%d", tgt, lun)); 5338 } 5339 5340 } 5341 finish: 5342 if (dipp) { 5343 *dipp = ldip; 5344 } 5345 5346 con_log(CL_DLEVEL1, (CE_WARN, 5347 "mr_sas: config_scsi_device rval = %d t%dL%d", 5348 rval, tgt, lun)); 5349 scsi_hba_nodename_compatible_free(nodename, compatible); 5350 return (rval); 5351 } 5352 5353 /*ARGSUSED*/ 5354 static int 5355 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event, 5356 uint64_t wwn) 5357 { 5358 struct mrsas_eventinfo *mrevt = NULL; 5359 5360 con_log(CL_ANN1, (CE_NOTE, 5361 "mrsas_service_evt called for t%dl%d event = %d", 5362 tgt, lun, event)); 5363 5364 if ((instance->taskq == NULL) || (mrevt = 5365 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) { 5366 return (ENOMEM); 5367 } 5368 5369 mrevt->instance = instance; 5370 mrevt->tgt = tgt; 5371 mrevt->lun = lun; 5372 mrevt->event = event; 5373 5374 if ((ddi_taskq_dispatch(instance->taskq, 5375 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) != 5376 DDI_SUCCESS) { 5377 con_log(CL_ANN1, (CE_NOTE, 5378 "mr_sas: Event task failed for t%dl%d event = %d", 5379 tgt, lun, event)); 5380 kmem_free(mrevt, sizeof (struct mrsas_eventinfo)); 5381 return (DDI_FAILURE); 5382 } 5383 return (DDI_SUCCESS); 5384 } 5385 5386 static void 5387 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt) 5388 { 5389 struct mrsas_instance *instance = mrevt->instance; 5390 dev_info_t *dip, *pdip; 5391 int circ1 = 0; 5392 char *devname; 5393 5394 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for" 5395 " tgt %d lun %d event %d", 5396 mrevt->tgt, mrevt->lun, mrevt->event)); 5397 5398 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) { 5399 dip = instance->mr_ld_list[mrevt->tgt].dip; 5400 } else { 5401 return; 5402 } 5403 5404 ndi_devi_enter(instance->dip, &circ1); 5405 switch (mrevt->event) { 5406 case MRSAS_EVT_CONFIG_TGT: 5407 if (dip == NULL) { 5408 5409 if (mrevt->lun == 0) { 5410 (void) mrsas_config_ld(instance, mrevt->tgt, 5411 0, NULL); 5412 } 5413 con_log(CL_ANN1, (CE_NOTE, 5414 "mr_sas: EVT_CONFIG_TGT called:" 5415 " for tgt %d lun %d event %d", 5416 mrevt->tgt, mrevt->lun, mrevt->event)); 5417 5418 } else { 5419 con_log(CL_ANN1, (CE_NOTE, 5420 "mr_sas: EVT_CONFIG_TGT dip != NULL:" 5421 " for tgt %d lun %d event %d", 5422 mrevt->tgt, mrevt->lun, mrevt->event)); 5423 } 5424 break; 5425 case MRSAS_EVT_UNCONFIG_TGT: 5426 if (dip) { 5427 if (i_ddi_devi_attached(dip)) { 5428 5429 pdip = ddi_get_parent(dip); 5430 5431 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP); 5432 (void) ddi_deviname(dip, devname); 5433 5434 (void) devfs_clean(pdip, devname + 1, 5435 DV_CLEAN_FORCE); 5436 kmem_free(devname, MAXNAMELEN + 1); 5437 } 5438 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE); 5439 con_log(CL_ANN1, (CE_NOTE, 5440 "mr_sas: EVT_UNCONFIG_TGT called:" 5441 " for tgt %d lun %d event %d", 5442 mrevt->tgt, mrevt->lun, mrevt->event)); 5443 } else { 5444 con_log(CL_ANN1, (CE_NOTE, 5445 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:" 5446 " for tgt %d lun %d event %d", 5447 mrevt->tgt, mrevt->lun, mrevt->event)); 5448 } 5449 break; 5450 } 5451 kmem_free(mrevt, sizeof (struct mrsas_eventinfo)); 5452 ndi_devi_exit(instance->dip, circ1); 5453 } 5454 5455 static int 5456 mrsas_mode_sense_build(struct scsi_pkt *pkt) 5457 { 5458 union scsi_cdb *cdbp; 5459 uint16_t page_code; 5460 struct scsa_cmd *acmd; 5461 struct buf *bp; 5462 struct mode_header *modehdrp; 5463 5464 cdbp = (void *)pkt->pkt_cdbp; 5465 page_code = cdbp->cdb_un.sg.scsi[0]; 5466 acmd = PKT2CMD(pkt); 5467 bp = acmd->cmd_buf; 5468 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) { 5469 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command")); 5470 /* ADD pkt statistics as Command failed. */ 5471 return (NULL); 5472 } 5473 5474 bp_mapin(bp); 5475 bzero(bp->b_un.b_addr, bp->b_bcount); 5476 5477 switch (page_code) { 5478 case 0x3: { 5479 struct mode_format *page3p = NULL; 5480 modehdrp = (struct mode_header *)(bp->b_un.b_addr); 5481 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH; 5482 5483 page3p = (void *)((caddr_t)modehdrp + 5484 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 5485 page3p->mode_page.code = 0x3; 5486 page3p->mode_page.length = 5487 (uchar_t)(sizeof (struct mode_format)); 5488 page3p->data_bytes_sect = 512; 5489 page3p->sect_track = 63; 5490 break; 5491 } 5492 case 0x4: { 5493 struct mode_geometry *page4p = NULL; 5494 modehdrp = (struct mode_header *)(bp->b_un.b_addr); 5495 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH; 5496 5497 page4p = (void *)((caddr_t)modehdrp + 5498 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 5499 page4p->mode_page.code = 0x4; 5500 page4p->mode_page.length = 5501 (uchar_t)(sizeof (struct mode_geometry)); 5502 page4p->heads = 255; 5503 page4p->rpm = 10000; 5504 break; 5505 } 5506 default: 5507 break; 5508 } 5509 return (NULL); 5510 } 5511