1 /* 2 * mr_sas.c: source for mr_sas driver 3 * 4 * MegaRAID device driver for SAS2.0 controllers 5 * Copyright (c) 2008-2009, LSI Logic Corporation. 6 * All rights reserved. 7 * 8 * Version: 9 * Author: 10 * Arun Chandrashekhar 11 * Manju R 12 * Rajesh Prabhakaran 13 * Seokmann Ju 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions are met: 17 * 18 * 1. Redistributions of source code must retain the above copyright notice, 19 * this list of conditions and the following disclaimer. 20 * 21 * 2. Redistributions in binary form must reproduce the above copyright notice, 22 * this list of conditions and the following disclaimer in the documentation 23 * and/or other materials provided with the distribution. 24 * 25 * 3. Neither the name of the author nor the names of its contributors may be 26 * used to endorse or promote products derived from this software without 27 * specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 32 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 33 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 35 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 36 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 37 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 38 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 39 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 40 * DAMAGE. 41 */ 42 43 /* 44 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 45 * Use is subject to license terms. 46 */ 47 48 #include <sys/types.h> 49 #include <sys/param.h> 50 #include <sys/file.h> 51 #include <sys/errno.h> 52 #include <sys/open.h> 53 #include <sys/cred.h> 54 #include <sys/modctl.h> 55 #include <sys/conf.h> 56 #include <sys/devops.h> 57 #include <sys/cmn_err.h> 58 #include <sys/kmem.h> 59 #include <sys/stat.h> 60 #include <sys/mkdev.h> 61 #include <sys/pci.h> 62 #include <sys/scsi/scsi.h> 63 #include <sys/ddi.h> 64 #include <sys/sunddi.h> 65 #include <sys/atomic.h> 66 #include <sys/signal.h> 67 #include <sys/byteorder.h> 68 #include <sys/fs/dv_node.h> /* devfs_clean */ 69 70 #include "mr_sas.h" 71 72 /* 73 * FMA header files 74 */ 75 #include <sys/ddifm.h> 76 #include <sys/fm/protocol.h> 77 #include <sys/fm/util.h> 78 #include <sys/fm/io/ddi.h> 79 80 /* 81 * Local static data 82 */ 83 static void *mrsas_state = NULL; 84 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE; 85 static volatile int debug_level_g = CL_NONE; 86 static volatile int msi_enable = 1; 87 88 #pragma weak scsi_hba_open 89 #pragma weak scsi_hba_close 90 #pragma weak scsi_hba_ioctl 91 92 static ddi_dma_attr_t mrsas_generic_dma_attr = { 93 DMA_ATTR_V0, /* dma_attr_version */ 94 0, /* low DMA address range */ 95 0xFFFFFFFFU, /* high DMA address range */ 96 0xFFFFFFFFU, /* DMA counter register */ 97 8, /* DMA address alignment */ 98 0x07, /* DMA burstsizes */ 99 1, /* min DMA size */ 100 0xFFFFFFFFU, /* max DMA size */ 101 0xFFFFFFFFU, /* segment boundary */ 102 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */ 103 512, /* granularity of device */ 104 0 /* bus specific DMA flags */ 105 }; 106 107 int32_t mrsas_max_cap_maxxfer = 0x1000000; 108 109 /* 110 * cb_ops contains base level routines 111 */ 112 static struct cb_ops mrsas_cb_ops = { 113 mrsas_open, /* open */ 114 mrsas_close, /* close */ 115 nodev, /* strategy */ 116 nodev, /* print */ 117 nodev, /* dump */ 118 nodev, /* read */ 119 nodev, /* write */ 120 mrsas_ioctl, /* ioctl */ 121 nodev, /* devmap */ 122 nodev, /* mmap */ 123 nodev, /* segmap */ 124 nochpoll, /* poll */ 125 nodev, /* cb_prop_op */ 126 0, /* streamtab */ 127 D_NEW | D_HOTPLUG, /* cb_flag */ 128 CB_REV, /* cb_rev */ 129 nodev, /* cb_aread */ 130 nodev /* cb_awrite */ 131 }; 132 133 /* 134 * dev_ops contains configuration routines 135 */ 136 static struct dev_ops mrsas_ops = { 137 DEVO_REV, /* rev, */ 138 0, /* refcnt */ 139 mrsas_getinfo, /* getinfo */ 140 nulldev, /* identify */ 141 nulldev, /* probe */ 142 mrsas_attach, /* attach */ 143 mrsas_detach, /* detach */ 144 mrsas_reset, /* reset */ 145 &mrsas_cb_ops, /* char/block ops */ 146 NULL, /* bus ops */ 147 NULL, /* power */ 148 ddi_quiesce_not_supported, /* quiesce */ 149 }; 150 151 char _depends_on[] = "misc/scsi"; 152 153 static struct modldrv modldrv = { 154 &mod_driverops, /* module type - driver */ 155 MRSAS_VERSION, 156 &mrsas_ops, /* driver ops */ 157 }; 158 159 static struct modlinkage modlinkage = { 160 MODREV_1, /* ml_rev - must be MODREV_1 */ 161 &modldrv, /* ml_linkage */ 162 NULL /* end of driver linkage */ 163 }; 164 165 static struct ddi_device_acc_attr endian_attr = { 166 DDI_DEVICE_ATTR_V0, 167 DDI_STRUCTURE_LE_ACC, 168 DDI_STRICTORDER_ACC 169 }; 170 171 172 /* 173 * ************************************************************************** * 174 * * 175 * common entry points - for loadable kernel modules * 176 * * 177 * ************************************************************************** * 178 */ 179 180 int 181 _init(void) 182 { 183 int ret; 184 185 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 186 187 ret = ddi_soft_state_init(&mrsas_state, 188 sizeof (struct mrsas_instance), 0); 189 190 if (ret != DDI_SUCCESS) { 191 con_log(CL_ANN, (CE_WARN, "mr_sas: could not init state")); 192 return (ret); 193 } 194 195 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) { 196 con_log(CL_ANN, (CE_WARN, "mr_sas: could not init scsi hba")); 197 ddi_soft_state_fini(&mrsas_state); 198 return (ret); 199 } 200 201 ret = mod_install(&modlinkage); 202 203 if (ret != DDI_SUCCESS) { 204 con_log(CL_ANN, (CE_WARN, "mr_sas: mod_install failed")); 205 scsi_hba_fini(&modlinkage); 206 ddi_soft_state_fini(&mrsas_state); 207 } 208 209 return (ret); 210 } 211 212 int 213 _info(struct modinfo *modinfop) 214 { 215 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 216 217 return (mod_info(&modlinkage, modinfop)); 218 } 219 220 int 221 _fini(void) 222 { 223 int ret; 224 225 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 226 227 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) 228 return (ret); 229 230 scsi_hba_fini(&modlinkage); 231 232 ddi_soft_state_fini(&mrsas_state); 233 234 return (ret); 235 } 236 237 238 /* 239 * ************************************************************************** * 240 * * 241 * common entry points - for autoconfiguration * 242 * * 243 * ************************************************************************** * 244 */ 245 246 static int 247 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 248 { 249 int instance_no; 250 int nregs; 251 uint8_t added_isr_f = 0; 252 uint8_t added_soft_isr_f = 0; 253 uint8_t create_devctl_node_f = 0; 254 uint8_t create_scsi_node_f = 0; 255 uint8_t create_ioc_node_f = 0; 256 uint8_t tran_alloc_f = 0; 257 uint8_t irq; 258 uint16_t vendor_id; 259 uint16_t device_id; 260 uint16_t subsysvid; 261 uint16_t subsysid; 262 uint16_t command; 263 off_t reglength = 0; 264 int intr_types = 0; 265 char *data; 266 267 scsi_hba_tran_t *tran; 268 ddi_dma_attr_t tran_dma_attr; 269 struct mrsas_instance *instance; 270 271 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 272 273 /* CONSTCOND */ 274 ASSERT(NO_COMPETING_THREADS); 275 276 instance_no = ddi_get_instance(dip); 277 278 /* 279 * check to see whether this device is in a DMA-capable slot. 280 */ 281 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 282 con_log(CL_ANN, (CE_WARN, 283 "mr_sas%d: Device in slave-only slot, unused", 284 instance_no)); 285 return (DDI_FAILURE); 286 } 287 288 switch (cmd) { 289 case DDI_ATTACH: 290 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: DDI_ATTACH")); 291 /* allocate the soft state for the instance */ 292 if (ddi_soft_state_zalloc(mrsas_state, instance_no) 293 != DDI_SUCCESS) { 294 con_log(CL_ANN, (CE_WARN, 295 "mr_sas%d: Failed to allocate soft state", 296 instance_no)); 297 298 return (DDI_FAILURE); 299 } 300 301 instance = (struct mrsas_instance *)ddi_get_soft_state 302 (mrsas_state, instance_no); 303 304 if (instance == NULL) { 305 con_log(CL_ANN, (CE_WARN, 306 "mr_sas%d: Bad soft state", instance_no)); 307 308 ddi_soft_state_free(mrsas_state, instance_no); 309 310 return (DDI_FAILURE); 311 } 312 313 bzero((caddr_t)instance, 314 sizeof (struct mrsas_instance)); 315 316 instance->func_ptr = kmem_zalloc( 317 sizeof (struct mrsas_func_ptr), KM_SLEEP); 318 ASSERT(instance->func_ptr); 319 320 /* Setup the PCI configuration space handles */ 321 if (pci_config_setup(dip, &instance->pci_handle) != 322 DDI_SUCCESS) { 323 con_log(CL_ANN, (CE_WARN, 324 "mr_sas%d: pci config setup failed ", 325 instance_no)); 326 327 kmem_free(instance->func_ptr, 328 sizeof (struct mrsas_func_ptr)); 329 ddi_soft_state_free(mrsas_state, instance_no); 330 331 return (DDI_FAILURE); 332 } 333 334 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { 335 con_log(CL_ANN, (CE_WARN, 336 "mr_sas: failed to get registers.")); 337 338 pci_config_teardown(&instance->pci_handle); 339 kmem_free(instance->func_ptr, 340 sizeof (struct mrsas_func_ptr)); 341 ddi_soft_state_free(mrsas_state, instance_no); 342 343 return (DDI_FAILURE); 344 } 345 346 vendor_id = pci_config_get16(instance->pci_handle, 347 PCI_CONF_VENID); 348 device_id = pci_config_get16(instance->pci_handle, 349 PCI_CONF_DEVID); 350 351 subsysvid = pci_config_get16(instance->pci_handle, 352 PCI_CONF_SUBVENID); 353 subsysid = pci_config_get16(instance->pci_handle, 354 PCI_CONF_SUBSYSID); 355 356 pci_config_put16(instance->pci_handle, PCI_CONF_COMM, 357 (pci_config_get16(instance->pci_handle, 358 PCI_CONF_COMM) | PCI_COMM_ME)); 359 irq = pci_config_get8(instance->pci_handle, 360 PCI_CONF_ILINE); 361 362 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 363 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s", 364 instance_no, vendor_id, device_id, subsysvid, 365 subsysid, irq, MRSAS_VERSION)); 366 367 /* enable bus-mastering */ 368 command = pci_config_get16(instance->pci_handle, 369 PCI_CONF_COMM); 370 371 if (!(command & PCI_COMM_ME)) { 372 command |= PCI_COMM_ME; 373 374 pci_config_put16(instance->pci_handle, 375 PCI_CONF_COMM, command); 376 377 con_log(CL_ANN, (CE_CONT, "mr_sas%d: " 378 "enable bus-mastering", instance_no)); 379 } else { 380 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 381 "bus-mastering already set", instance_no)); 382 } 383 384 /* initialize function pointers */ 385 if ((device_id == PCI_DEVICE_ID_LSI_2108VDE) || 386 (device_id == PCI_DEVICE_ID_LSI_2108V)) { 387 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 388 "2108V/DE detected", instance_no)); 389 instance->func_ptr->read_fw_status_reg = 390 read_fw_status_reg_ppc; 391 instance->func_ptr->issue_cmd = issue_cmd_ppc; 392 instance->func_ptr->issue_cmd_in_sync_mode = 393 issue_cmd_in_sync_mode_ppc; 394 instance->func_ptr->issue_cmd_in_poll_mode = 395 issue_cmd_in_poll_mode_ppc; 396 instance->func_ptr->enable_intr = 397 enable_intr_ppc; 398 instance->func_ptr->disable_intr = 399 disable_intr_ppc; 400 instance->func_ptr->intr_ack = intr_ack_ppc; 401 } else { 402 con_log(CL_ANN, (CE_WARN, 403 "mr_sas: Invalid device detected")); 404 405 pci_config_teardown(&instance->pci_handle); 406 kmem_free(instance->func_ptr, 407 sizeof (struct mrsas_func_ptr)); 408 ddi_soft_state_free(mrsas_state, instance_no); 409 410 return (DDI_FAILURE); 411 } 412 413 instance->baseaddress = pci_config_get32( 414 instance->pci_handle, PCI_CONF_BASE0); 415 instance->baseaddress &= 0x0fffc; 416 417 instance->dip = dip; 418 instance->vendor_id = vendor_id; 419 instance->device_id = device_id; 420 instance->subsysvid = subsysvid; 421 instance->subsysid = subsysid; 422 instance->instance = instance_no; 423 424 /* Initialize FMA */ 425 instance->fm_capabilities = ddi_prop_get_int( 426 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS, 427 "fm-capable", DDI_FM_EREPORT_CAPABLE | 428 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE 429 | DDI_FM_ERRCB_CAPABLE); 430 431 mrsas_fm_init(instance); 432 433 /* Initialize Interrupts */ 434 if ((ddi_dev_regsize(instance->dip, 435 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) || 436 reglength < MINIMUM_MFI_MEM_SZ) { 437 return (DDI_FAILURE); 438 } 439 if (reglength > DEFAULT_MFI_MEM_SZ) { 440 reglength = DEFAULT_MFI_MEM_SZ; 441 con_log(CL_DLEVEL1, (CE_NOTE, 442 "mr_sas: register length to map is " 443 "0x%lx bytes", reglength)); 444 } 445 if (ddi_regs_map_setup(instance->dip, 446 REGISTER_SET_IO_2108, &instance->regmap, 0, 447 reglength, &endian_attr, &instance->regmap_handle) 448 != DDI_SUCCESS) { 449 con_log(CL_ANN, (CE_NOTE, 450 "mr_sas: couldn't map control registers")); 451 goto fail_attach; 452 } 453 454 /* 455 * Disable Interrupt Now. 456 * Setup Software interrupt 457 */ 458 instance->func_ptr->disable_intr(instance); 459 460 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 461 "mrsas-enable-msi", &data) == DDI_SUCCESS) { 462 if (strncmp(data, "no", 3) == 0) { 463 msi_enable = 0; 464 con_log(CL_ANN1, (CE_WARN, 465 "msi_enable = %d disabled", 466 msi_enable)); 467 } 468 ddi_prop_free(data); 469 } 470 471 con_log(CL_DLEVEL1, (CE_WARN, "msi_enable = %d", 472 msi_enable)); 473 474 /* Check for all supported interrupt types */ 475 if (ddi_intr_get_supported_types( 476 dip, &intr_types) != DDI_SUCCESS) { 477 con_log(CL_ANN, (CE_WARN, 478 "ddi_intr_get_supported_types() failed")); 479 goto fail_attach; 480 } 481 482 con_log(CL_DLEVEL1, (CE_NOTE, 483 "ddi_intr_get_supported_types() ret: 0x%x", 484 intr_types)); 485 486 /* Initialize and Setup Interrupt handler */ 487 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) { 488 if (mrsas_add_intrs(instance, 489 DDI_INTR_TYPE_MSIX) != DDI_SUCCESS) { 490 con_log(CL_ANN, (CE_WARN, 491 "MSIX interrupt query failed")); 492 goto fail_attach; 493 } 494 instance->intr_type = DDI_INTR_TYPE_MSIX; 495 } else if (msi_enable && (intr_types & 496 DDI_INTR_TYPE_MSI)) { 497 if (mrsas_add_intrs(instance, 498 DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 499 con_log(CL_ANN, (CE_WARN, 500 "MSI interrupt query failed")); 501 goto fail_attach; 502 } 503 instance->intr_type = DDI_INTR_TYPE_MSI; 504 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 505 msi_enable = 0; 506 if (mrsas_add_intrs(instance, 507 DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 508 con_log(CL_ANN, (CE_WARN, 509 "FIXED interrupt query failed")); 510 goto fail_attach; 511 } 512 instance->intr_type = DDI_INTR_TYPE_FIXED; 513 } else { 514 con_log(CL_ANN, (CE_WARN, "Device cannot " 515 "suppport either FIXED or MSI/X " 516 "interrupts")); 517 goto fail_attach; 518 } 519 520 added_isr_f = 1; 521 522 /* setup the mfi based low level driver */ 523 if (init_mfi(instance) != DDI_SUCCESS) { 524 con_log(CL_ANN, (CE_WARN, "mr_sas: " 525 "could not initialize the low level driver")); 526 527 goto fail_attach; 528 } 529 530 /* Initialize all Mutex */ 531 INIT_LIST_HEAD(&instance->completed_pool_list); 532 mutex_init(&instance->completed_pool_mtx, 533 "completed_pool_mtx", MUTEX_DRIVER, 534 DDI_INTR_PRI(instance->intr_pri)); 535 536 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx", 537 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 538 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL); 539 540 mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx", 541 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 542 543 /* Register our soft-isr for highlevel interrupts. */ 544 instance->isr_level = instance->intr_pri; 545 if (instance->isr_level == HIGH_LEVEL_INTR) { 546 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH, 547 &instance->soft_intr_id, NULL, NULL, 548 mrsas_softintr, (caddr_t)instance) != 549 DDI_SUCCESS) { 550 con_log(CL_ANN, (CE_WARN, 551 " Software ISR did not register")); 552 553 goto fail_attach; 554 } 555 556 added_soft_isr_f = 1; 557 } 558 559 /* Allocate a transport structure */ 560 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 561 562 if (tran == NULL) { 563 con_log(CL_ANN, (CE_WARN, 564 "scsi_hba_tran_alloc failed")); 565 goto fail_attach; 566 } 567 568 tran_alloc_f = 1; 569 570 instance->tran = tran; 571 572 tran->tran_hba_private = instance; 573 tran->tran_tgt_init = mrsas_tran_tgt_init; 574 tran->tran_tgt_probe = scsi_hba_probe; 575 tran->tran_tgt_free = mrsas_tran_tgt_free; 576 tran->tran_init_pkt = mrsas_tran_init_pkt; 577 tran->tran_start = mrsas_tran_start; 578 tran->tran_abort = mrsas_tran_abort; 579 tran->tran_reset = mrsas_tran_reset; 580 tran->tran_getcap = mrsas_tran_getcap; 581 tran->tran_setcap = mrsas_tran_setcap; 582 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt; 583 tran->tran_dmafree = mrsas_tran_dmafree; 584 tran->tran_sync_pkt = mrsas_tran_sync_pkt; 585 tran->tran_bus_config = mrsas_tran_bus_config; 586 587 if (mrsas_relaxed_ordering) 588 mrsas_generic_dma_attr.dma_attr_flags |= 589 DDI_DMA_RELAXED_ORDERING; 590 591 592 tran_dma_attr = mrsas_generic_dma_attr; 593 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge; 594 595 /* Attach this instance of the hba */ 596 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0) 597 != DDI_SUCCESS) { 598 con_log(CL_ANN, (CE_WARN, 599 "scsi_hba_attach failed")); 600 601 goto fail_attach; 602 } 603 604 /* create devctl node for cfgadm command */ 605 if (ddi_create_minor_node(dip, "devctl", 606 S_IFCHR, INST2DEVCTL(instance_no), 607 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) { 608 con_log(CL_ANN, (CE_WARN, 609 "mr_sas: failed to create devctl node.")); 610 611 goto fail_attach; 612 } 613 614 create_devctl_node_f = 1; 615 616 /* create scsi node for cfgadm command */ 617 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, 618 INST2SCSI(instance_no), 619 DDI_NT_SCSI_ATTACHMENT_POINT, 0) == 620 DDI_FAILURE) { 621 con_log(CL_ANN, (CE_WARN, 622 "mr_sas: failed to create scsi node.")); 623 624 goto fail_attach; 625 } 626 627 create_scsi_node_f = 1; 628 629 (void) sprintf(instance->iocnode, "%d:lsirdctl", 630 instance_no); 631 632 /* 633 * Create a node for applications 634 * for issuing ioctl to the driver. 635 */ 636 if (ddi_create_minor_node(dip, instance->iocnode, 637 S_IFCHR, INST2LSIRDCTL(instance_no), 638 DDI_PSEUDO, 0) == DDI_FAILURE) { 639 con_log(CL_ANN, (CE_WARN, 640 "mr_sas: failed to create ioctl node.")); 641 642 goto fail_attach; 643 } 644 645 create_ioc_node_f = 1; 646 647 /* Create a taskq to handle dr events */ 648 if ((instance->taskq = ddi_taskq_create(dip, 649 "mrsas_dr_taskq", 1, 650 TASKQ_DEFAULTPRI, 0)) == NULL) { 651 con_log(CL_ANN, (CE_WARN, 652 "mr_sas: failed to create taskq ")); 653 instance->taskq = NULL; 654 goto fail_attach; 655 } 656 657 /* enable interrupt */ 658 instance->func_ptr->enable_intr(instance); 659 660 /* initiate AEN */ 661 if (start_mfi_aen(instance)) { 662 con_log(CL_ANN, (CE_WARN, 663 "mr_sas: failed to initiate AEN.")); 664 goto fail_initiate_aen; 665 } 666 667 con_log(CL_DLEVEL1, (CE_NOTE, 668 "AEN started for instance %d.", instance_no)); 669 670 /* Finally! We are on the air. */ 671 ddi_report_dev(dip); 672 673 if (mrsas_check_acc_handle(instance->regmap_handle) != 674 DDI_SUCCESS) { 675 goto fail_attach; 676 } 677 if (mrsas_check_acc_handle(instance->pci_handle) != 678 DDI_SUCCESS) { 679 goto fail_attach; 680 } 681 instance->mr_ld_list = 682 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld), 683 KM_SLEEP); 684 break; 685 case DDI_PM_RESUME: 686 con_log(CL_ANN, (CE_NOTE, 687 "mr_sas: DDI_PM_RESUME")); 688 break; 689 case DDI_RESUME: 690 con_log(CL_ANN, (CE_NOTE, 691 "mr_sas: DDI_RESUME")); 692 break; 693 default: 694 con_log(CL_ANN, (CE_WARN, 695 "mr_sas: invalid attach cmd=%x", cmd)); 696 return (DDI_FAILURE); 697 } 698 699 return (DDI_SUCCESS); 700 701 fail_initiate_aen: 702 fail_attach: 703 if (create_devctl_node_f) { 704 ddi_remove_minor_node(dip, "devctl"); 705 } 706 707 if (create_scsi_node_f) { 708 ddi_remove_minor_node(dip, "scsi"); 709 } 710 711 if (create_ioc_node_f) { 712 ddi_remove_minor_node(dip, instance->iocnode); 713 } 714 715 if (tran_alloc_f) { 716 scsi_hba_tran_free(tran); 717 } 718 719 720 if (added_soft_isr_f) { 721 ddi_remove_softintr(instance->soft_intr_id); 722 } 723 724 if (added_isr_f) { 725 mrsas_rem_intrs(instance); 726 } 727 728 if (instance && instance->taskq) { 729 ddi_taskq_destroy(instance->taskq); 730 } 731 732 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 733 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 734 735 mrsas_fm_fini(instance); 736 737 pci_config_teardown(&instance->pci_handle); 738 739 ddi_soft_state_free(mrsas_state, instance_no); 740 741 con_log(CL_ANN, (CE_NOTE, 742 "mr_sas: return failure from mrsas_attach")); 743 744 return (DDI_FAILURE); 745 } 746 747 /*ARGSUSED*/ 748 static int 749 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 750 { 751 int rval; 752 int mrsas_minor = getminor((dev_t)arg); 753 754 struct mrsas_instance *instance; 755 756 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 757 758 switch (cmd) { 759 case DDI_INFO_DEVT2DEVINFO: 760 instance = (struct mrsas_instance *) 761 ddi_get_soft_state(mrsas_state, 762 MINOR2INST(mrsas_minor)); 763 764 if (instance == NULL) { 765 *resultp = NULL; 766 rval = DDI_FAILURE; 767 } else { 768 *resultp = instance->dip; 769 rval = DDI_SUCCESS; 770 } 771 break; 772 case DDI_INFO_DEVT2INSTANCE: 773 *resultp = (void *)(intptr_t) 774 (MINOR2INST(getminor((dev_t)arg))); 775 rval = DDI_SUCCESS; 776 break; 777 default: 778 *resultp = NULL; 779 rval = DDI_FAILURE; 780 } 781 782 return (rval); 783 } 784 785 static int 786 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 787 { 788 int instance_no; 789 790 struct mrsas_instance *instance; 791 792 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 793 794 /* CONSTCOND */ 795 ASSERT(NO_COMPETING_THREADS); 796 797 instance_no = ddi_get_instance(dip); 798 799 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state, 800 instance_no); 801 802 if (!instance) { 803 con_log(CL_ANN, (CE_WARN, 804 "mr_sas:%d could not get instance in detach", 805 instance_no)); 806 807 return (DDI_FAILURE); 808 } 809 810 con_log(CL_ANN, (CE_NOTE, 811 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x", 812 instance_no, instance->vendor_id, instance->device_id, 813 instance->subsysvid, instance->subsysid)); 814 815 switch (cmd) { 816 case DDI_DETACH: 817 con_log(CL_ANN, (CE_NOTE, 818 "mrsas_detach: DDI_DETACH")); 819 820 if (scsi_hba_detach(dip) != DDI_SUCCESS) { 821 con_log(CL_ANN, (CE_WARN, 822 "mr_sas:%d failed to detach", 823 instance_no)); 824 825 return (DDI_FAILURE); 826 } 827 828 scsi_hba_tran_free(instance->tran); 829 830 flush_cache(instance); 831 832 if (abort_aen_cmd(instance, instance->aen_cmd)) { 833 con_log(CL_ANN, (CE_WARN, "mrsas_detach: " 834 "failed to abort prevous AEN command")); 835 836 return (DDI_FAILURE); 837 } 838 839 instance->func_ptr->disable_intr(instance); 840 841 if (instance->isr_level == HIGH_LEVEL_INTR) { 842 ddi_remove_softintr(instance->soft_intr_id); 843 } 844 845 mrsas_rem_intrs(instance); 846 847 if (instance->taskq) { 848 ddi_taskq_destroy(instance->taskq); 849 } 850 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD 851 * sizeof (struct mrsas_ld)); 852 free_space_for_mfi(instance); 853 854 mrsas_fm_fini(instance); 855 856 pci_config_teardown(&instance->pci_handle); 857 858 kmem_free(instance->func_ptr, 859 sizeof (struct mrsas_func_ptr)); 860 861 ddi_soft_state_free(mrsas_state, instance_no); 862 break; 863 case DDI_PM_SUSPEND: 864 con_log(CL_ANN, (CE_NOTE, 865 "mrsas_detach: DDI_PM_SUSPEND")); 866 867 break; 868 case DDI_SUSPEND: 869 con_log(CL_ANN, (CE_NOTE, 870 "mrsas_detach: DDI_SUSPEND")); 871 872 break; 873 default: 874 con_log(CL_ANN, (CE_WARN, 875 "invalid detach command:0x%x", cmd)); 876 return (DDI_FAILURE); 877 } 878 879 return (DDI_SUCCESS); 880 } 881 882 /* 883 * ************************************************************************** * 884 * * 885 * common entry points - for character driver types * 886 * * 887 * ************************************************************************** * 888 */ 889 static int 890 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp) 891 { 892 int rval = 0; 893 894 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 895 896 /* Check root permissions */ 897 if (drv_priv(credp) != 0) { 898 con_log(CL_ANN, (CE_WARN, 899 "mr_sas: Non-root ioctl access denied!")); 900 return (EPERM); 901 } 902 903 /* Verify we are being opened as a character device */ 904 if (otyp != OTYP_CHR) { 905 con_log(CL_ANN, (CE_WARN, 906 "mr_sas: ioctl node must be a char node")); 907 return (EINVAL); 908 } 909 910 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev))) 911 == NULL) { 912 return (ENXIO); 913 } 914 915 if (scsi_hba_open) { 916 rval = scsi_hba_open(dev, openflags, otyp, credp); 917 } 918 919 return (rval); 920 } 921 922 static int 923 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp) 924 { 925 int rval = 0; 926 927 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 928 929 /* no need for locks! */ 930 931 if (scsi_hba_close) { 932 rval = scsi_hba_close(dev, openflags, otyp, credp); 933 } 934 935 return (rval); 936 } 937 938 static int 939 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 940 int *rvalp) 941 { 942 int rval = 0; 943 944 struct mrsas_instance *instance; 945 struct mrsas_ioctl *ioctl; 946 struct mrsas_aen aen; 947 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 948 949 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev))); 950 951 if (instance == NULL) { 952 /* invalid minor number */ 953 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found.")); 954 return (ENXIO); 955 } 956 957 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl), 958 KM_SLEEP); 959 ASSERT(ioctl); 960 961 switch ((uint_t)cmd) { 962 case MRSAS_IOCTL_FIRMWARE: 963 if (ddi_copyin((void *)arg, ioctl, 964 sizeof (struct mrsas_ioctl), mode)) { 965 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: " 966 "ERROR IOCTL copyin")); 967 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 968 return (EFAULT); 969 } 970 971 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) { 972 rval = handle_drv_ioctl(instance, ioctl, mode); 973 } else { 974 rval = handle_mfi_ioctl(instance, ioctl, mode); 975 } 976 977 if (ddi_copyout((void *)ioctl, (void *)arg, 978 (sizeof (struct mrsas_ioctl) - 1), mode)) { 979 con_log(CL_ANN, (CE_WARN, 980 "mrsas_ioctl: copy_to_user failed")); 981 rval = 1; 982 } 983 984 break; 985 case MRSAS_IOCTL_AEN: 986 if (ddi_copyin((void *) arg, &aen, 987 sizeof (struct mrsas_aen), mode)) { 988 con_log(CL_ANN, (CE_WARN, 989 "mrsas_ioctl: ERROR AEN copyin")); 990 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 991 return (EFAULT); 992 } 993 994 rval = handle_mfi_aen(instance, &aen); 995 996 if (ddi_copyout((void *) &aen, (void *)arg, 997 sizeof (struct mrsas_aen), mode)) { 998 con_log(CL_ANN, (CE_WARN, 999 "mrsas_ioctl: copy_to_user failed")); 1000 rval = 1; 1001 } 1002 1003 break; 1004 default: 1005 rval = scsi_hba_ioctl(dev, cmd, arg, 1006 mode, credp, rvalp); 1007 1008 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: " 1009 "scsi_hba_ioctl called, ret = %x.", rval)); 1010 } 1011 1012 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 1013 return (rval); 1014 } 1015 1016 /* 1017 * ************************************************************************** * 1018 * * 1019 * common entry points - for block driver types * 1020 * * 1021 * ************************************************************************** * 1022 */ 1023 /*ARGSUSED*/ 1024 static int 1025 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1026 { 1027 int instance_no; 1028 1029 struct mrsas_instance *instance; 1030 1031 instance_no = ddi_get_instance(dip); 1032 instance = (struct mrsas_instance *)ddi_get_soft_state 1033 (mrsas_state, instance_no); 1034 1035 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1036 1037 if (!instance) { 1038 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter " 1039 "in reset", instance_no)); 1040 return (DDI_FAILURE); 1041 } 1042 1043 instance->func_ptr->disable_intr(instance); 1044 1045 con_log(CL_ANN1, (CE_NOTE, "flushing cache for instance %d", 1046 instance_no)); 1047 1048 flush_cache(instance); 1049 1050 return (DDI_SUCCESS); 1051 } 1052 1053 1054 /* 1055 * ************************************************************************** * 1056 * * 1057 * entry points (SCSI HBA) * 1058 * * 1059 * ************************************************************************** * 1060 */ 1061 /*ARGSUSED*/ 1062 static int 1063 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1064 scsi_hba_tran_t *tran, struct scsi_device *sd) 1065 { 1066 struct mrsas_instance *instance; 1067 uint16_t tgt = sd->sd_address.a_target; 1068 uint8_t lun = sd->sd_address.a_lun; 1069 1070 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init target %d lun %d", 1071 tgt, lun)); 1072 1073 instance = ADDR2MR(&sd->sd_address); 1074 1075 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 1076 (void) ndi_merge_node(tgt_dip, mrsas_name_node); 1077 ddi_set_name_addr(tgt_dip, NULL); 1078 1079 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init in " 1080 "ndi_dev_is_persistent_node DDI_FAILURE t = %d l = %d", 1081 tgt, lun)); 1082 return (DDI_FAILURE); 1083 } 1084 1085 con_log(CL_ANN1, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p", 1086 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip)); 1087 1088 if (tgt < MRDRV_MAX_LD && lun == 0) { 1089 if (instance->mr_ld_list[tgt].dip == NULL && 1090 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) { 1091 instance->mr_ld_list[tgt].dip = tgt_dip; 1092 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN; 1093 } 1094 } 1095 return (DDI_SUCCESS); 1096 } 1097 1098 /*ARGSUSED*/ 1099 static void 1100 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1101 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1102 { 1103 struct mrsas_instance *instance; 1104 int tgt = sd->sd_address.a_target; 1105 int lun = sd->sd_address.a_lun; 1106 1107 instance = ADDR2MR(&sd->sd_address); 1108 1109 con_log(CL_ANN1, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun)); 1110 1111 if (tgt < MRDRV_MAX_LD && lun == 0) { 1112 if (instance->mr_ld_list[tgt].dip == tgt_dip) { 1113 instance->mr_ld_list[tgt].dip = NULL; 1114 } 1115 } 1116 } 1117 1118 static dev_info_t * 1119 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun) 1120 { 1121 dev_info_t *child = NULL; 1122 char addr[SCSI_MAXNAMELEN]; 1123 char tmp[MAXNAMELEN]; 1124 1125 (void) sprintf(addr, "%x,%x", tgt, lun); 1126 for (child = ddi_get_child(instance->dip); child; 1127 child = ddi_get_next_sibling(child)) { 1128 1129 if (mrsas_name_node(child, tmp, MAXNAMELEN) != 1130 DDI_SUCCESS) { 1131 continue; 1132 } 1133 1134 if (strcmp(addr, tmp) == 0) { 1135 break; 1136 } 1137 } 1138 con_log(CL_ANN1, (CE_NOTE, "mrsas_find_child: return child = %p", 1139 (void *)child)); 1140 return (child); 1141 } 1142 1143 static int 1144 mrsas_name_node(dev_info_t *dip, char *name, int len) 1145 { 1146 int tgt, lun; 1147 1148 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1149 DDI_PROP_DONTPASS, "target", -1); 1150 con_log(CL_ANN1, (CE_NOTE, 1151 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt)); 1152 if (tgt == -1) { 1153 return (DDI_FAILURE); 1154 } 1155 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1156 "lun", -1); 1157 con_log(CL_ANN1, 1158 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun)); 1159 if (lun == -1) { 1160 return (DDI_FAILURE); 1161 } 1162 (void) snprintf(name, len, "%x,%x", tgt, lun); 1163 return (DDI_SUCCESS); 1164 } 1165 1166 static struct scsi_pkt * 1167 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt, 1168 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1169 int flags, int (*callback)(), caddr_t arg) 1170 { 1171 struct scsa_cmd *acmd; 1172 struct mrsas_instance *instance; 1173 struct scsi_pkt *new_pkt; 1174 1175 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1176 1177 instance = ADDR2MR(ap); 1178 1179 /* step #1 : pkt allocation */ 1180 if (pkt == NULL) { 1181 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen, 1182 tgtlen, sizeof (struct scsa_cmd), callback, arg); 1183 if (pkt == NULL) { 1184 return (NULL); 1185 } 1186 1187 acmd = PKT2CMD(pkt); 1188 1189 /* 1190 * Initialize the new pkt - we redundantly initialize 1191 * all the fields for illustrative purposes. 1192 */ 1193 acmd->cmd_pkt = pkt; 1194 acmd->cmd_flags = 0; 1195 acmd->cmd_scblen = statuslen; 1196 acmd->cmd_cdblen = cmdlen; 1197 acmd->cmd_dmahandle = NULL; 1198 acmd->cmd_ncookies = 0; 1199 acmd->cmd_cookie = 0; 1200 acmd->cmd_cookiecnt = 0; 1201 acmd->cmd_nwin = 0; 1202 1203 pkt->pkt_address = *ap; 1204 pkt->pkt_comp = (void (*)())NULL; 1205 pkt->pkt_flags = 0; 1206 pkt->pkt_time = 0; 1207 pkt->pkt_resid = 0; 1208 pkt->pkt_state = 0; 1209 pkt->pkt_statistics = 0; 1210 pkt->pkt_reason = 0; 1211 new_pkt = pkt; 1212 } else { 1213 acmd = PKT2CMD(pkt); 1214 new_pkt = NULL; 1215 } 1216 1217 /* step #2 : dma allocation/move */ 1218 if (bp && bp->b_bcount != 0) { 1219 if (acmd->cmd_dmahandle == NULL) { 1220 if (mrsas_dma_alloc(instance, pkt, bp, flags, 1221 callback) == DDI_FAILURE) { 1222 if (new_pkt) { 1223 scsi_hba_pkt_free(ap, new_pkt); 1224 } 1225 return ((struct scsi_pkt *)NULL); 1226 } 1227 } else { 1228 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) { 1229 return ((struct scsi_pkt *)NULL); 1230 } 1231 } 1232 } 1233 1234 return (pkt); 1235 } 1236 1237 static int 1238 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt) 1239 { 1240 uchar_t cmd_done = 0; 1241 1242 struct mrsas_instance *instance = ADDR2MR(ap); 1243 struct mrsas_cmd *cmd; 1244 1245 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x", 1246 __func__, __LINE__, pkt->pkt_cdbp[0])); 1247 1248 pkt->pkt_reason = CMD_CMPLT; 1249 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 1250 1251 cmd = build_cmd(instance, ap, pkt, &cmd_done); 1252 1253 /* 1254 * Check if the command is already completed by the mrsas_build_cmd() 1255 * routine. In which case the busy_flag would be clear and scb will be 1256 * NULL and appropriate reason provided in pkt_reason field 1257 */ 1258 if (cmd_done) { 1259 pkt->pkt_reason = CMD_CMPLT; 1260 pkt->pkt_scbp[0] = STATUS_GOOD; 1261 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET 1262 | STATE_SENT_CMD; 1263 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) { 1264 (*pkt->pkt_comp)(pkt); 1265 } 1266 1267 return (TRAN_ACCEPT); 1268 } 1269 1270 if (cmd == NULL) { 1271 return (TRAN_BUSY); 1272 } 1273 1274 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { 1275 if (instance->fw_outstanding > instance->max_fw_cmds) { 1276 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy")); 1277 return_mfi_pkt(instance, cmd); 1278 return (TRAN_BUSY); 1279 } 1280 1281 /* Synchronize the Cmd frame for the controller */ 1282 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, 1283 DDI_DMA_SYNC_FORDEV); 1284 1285 instance->func_ptr->issue_cmd(cmd, instance); 1286 1287 } else { 1288 struct mrsas_header *hdr = &cmd->frame->hdr; 1289 1290 cmd->sync_cmd = MRSAS_TRUE; 1291 1292 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd); 1293 1294 pkt->pkt_reason = CMD_CMPLT; 1295 pkt->pkt_statistics = 0; 1296 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS; 1297 1298 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, 1299 &hdr->cmd_status)) { 1300 case MFI_STAT_OK: 1301 pkt->pkt_scbp[0] = STATUS_GOOD; 1302 break; 1303 1304 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1305 1306 pkt->pkt_reason = CMD_CMPLT; 1307 pkt->pkt_statistics = 0; 1308 1309 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; 1310 break; 1311 1312 case MFI_STAT_DEVICE_NOT_FOUND: 1313 pkt->pkt_reason = CMD_DEV_GONE; 1314 pkt->pkt_statistics = STAT_DISCON; 1315 break; 1316 1317 default: 1318 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1; 1319 } 1320 1321 (void) mrsas_common_check(instance, cmd); 1322 return_mfi_pkt(instance, cmd); 1323 1324 if (pkt->pkt_comp) { 1325 (*pkt->pkt_comp)(pkt); 1326 } 1327 1328 } 1329 1330 return (TRAN_ACCEPT); 1331 } 1332 1333 /*ARGSUSED*/ 1334 static int 1335 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1336 { 1337 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1338 1339 /* abort command not supported by H/W */ 1340 1341 return (DDI_FAILURE); 1342 } 1343 1344 /*ARGSUSED*/ 1345 static int 1346 mrsas_tran_reset(struct scsi_address *ap, int level) 1347 { 1348 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1349 1350 /* reset command not supported by H/W */ 1351 1352 return (DDI_FAILURE); 1353 1354 } 1355 1356 /*ARGSUSED*/ 1357 static int 1358 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom) 1359 { 1360 int rval = 0; 1361 1362 struct mrsas_instance *instance = ADDR2MR(ap); 1363 1364 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1365 1366 /* we do allow inquiring about capabilities for other targets */ 1367 if (cap == NULL) { 1368 return (-1); 1369 } 1370 1371 switch (scsi_hba_lookup_capstr(cap)) { 1372 case SCSI_CAP_DMA_MAX: 1373 /* Limit to 16MB max transfer */ 1374 rval = mrsas_max_cap_maxxfer; 1375 break; 1376 case SCSI_CAP_MSG_OUT: 1377 rval = 1; 1378 break; 1379 case SCSI_CAP_DISCONNECT: 1380 rval = 0; 1381 break; 1382 case SCSI_CAP_SYNCHRONOUS: 1383 rval = 0; 1384 break; 1385 case SCSI_CAP_WIDE_XFER: 1386 rval = 1; 1387 break; 1388 case SCSI_CAP_TAGGED_QING: 1389 rval = 1; 1390 break; 1391 case SCSI_CAP_UNTAGGED_QING: 1392 rval = 1; 1393 break; 1394 case SCSI_CAP_PARITY: 1395 rval = 1; 1396 break; 1397 case SCSI_CAP_INITIATOR_ID: 1398 rval = instance->init_id; 1399 break; 1400 case SCSI_CAP_ARQ: 1401 rval = 1; 1402 break; 1403 case SCSI_CAP_LINKED_CMDS: 1404 rval = 0; 1405 break; 1406 case SCSI_CAP_RESET_NOTIFICATION: 1407 rval = 1; 1408 break; 1409 case SCSI_CAP_GEOMETRY: 1410 rval = -1; 1411 1412 break; 1413 default: 1414 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x", 1415 scsi_hba_lookup_capstr(cap))); 1416 rval = -1; 1417 break; 1418 } 1419 1420 return (rval); 1421 } 1422 1423 /*ARGSUSED*/ 1424 static int 1425 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1426 { 1427 int rval = 1; 1428 1429 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1430 1431 /* We don't allow setting capabilities for other targets */ 1432 if (cap == NULL || whom == 0) { 1433 return (-1); 1434 } 1435 1436 switch (scsi_hba_lookup_capstr(cap)) { 1437 case SCSI_CAP_DMA_MAX: 1438 case SCSI_CAP_MSG_OUT: 1439 case SCSI_CAP_PARITY: 1440 case SCSI_CAP_LINKED_CMDS: 1441 case SCSI_CAP_RESET_NOTIFICATION: 1442 case SCSI_CAP_DISCONNECT: 1443 case SCSI_CAP_SYNCHRONOUS: 1444 case SCSI_CAP_UNTAGGED_QING: 1445 case SCSI_CAP_WIDE_XFER: 1446 case SCSI_CAP_INITIATOR_ID: 1447 case SCSI_CAP_ARQ: 1448 /* 1449 * None of these are settable via 1450 * the capability interface. 1451 */ 1452 break; 1453 case SCSI_CAP_TAGGED_QING: 1454 rval = 1; 1455 break; 1456 case SCSI_CAP_SECTOR_SIZE: 1457 rval = 1; 1458 break; 1459 1460 case SCSI_CAP_TOTAL_SECTORS: 1461 rval = 1; 1462 break; 1463 default: 1464 rval = -1; 1465 break; 1466 } 1467 1468 return (rval); 1469 } 1470 1471 static void 1472 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1473 { 1474 struct scsa_cmd *acmd = PKT2CMD(pkt); 1475 1476 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1477 1478 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1479 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1480 1481 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1482 1483 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1484 1485 acmd->cmd_dmahandle = NULL; 1486 } 1487 1488 /* free the pkt */ 1489 scsi_hba_pkt_free(ap, pkt); 1490 } 1491 1492 /*ARGSUSED*/ 1493 static void 1494 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1495 { 1496 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1497 1498 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1499 1500 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1501 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1502 1503 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1504 1505 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1506 1507 acmd->cmd_dmahandle = NULL; 1508 } 1509 } 1510 1511 /*ARGSUSED*/ 1512 static void 1513 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1514 { 1515 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1516 1517 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1518 1519 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1520 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset, 1521 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ? 1522 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU); 1523 } 1524 } 1525 1526 /* 1527 * mrsas_isr(caddr_t) 1528 * 1529 * The Interrupt Service Routine 1530 * 1531 * Collect status for all completed commands and do callback 1532 * 1533 */ 1534 static uint_t 1535 mrsas_isr(struct mrsas_instance *instance) 1536 { 1537 int need_softintr; 1538 uint32_t producer; 1539 uint32_t consumer; 1540 uint32_t context; 1541 1542 struct mrsas_cmd *cmd; 1543 1544 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1545 1546 ASSERT(instance); 1547 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) && 1548 !instance->func_ptr->intr_ack(instance)) { 1549 return (DDI_INTR_UNCLAIMED); 1550 } 1551 1552 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1553 0, 0, DDI_DMA_SYNC_FORCPU); 1554 1555 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 1556 != DDI_SUCCESS) { 1557 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 1558 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 1559 return (DDI_INTR_CLAIMED); 1560 } 1561 1562 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1563 instance->producer); 1564 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1565 instance->consumer); 1566 1567 con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ", 1568 producer, consumer)); 1569 if (producer == consumer) { 1570 con_log(CL_ANN1, (CE_WARN, "producer = consumer case")); 1571 return (DDI_INTR_CLAIMED); 1572 } 1573 mutex_enter(&instance->completed_pool_mtx); 1574 1575 while (consumer != producer) { 1576 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 1577 &instance->reply_queue[consumer]); 1578 cmd = instance->cmd_list[context]; 1579 mlist_add_tail(&cmd->list, &instance->completed_pool_list); 1580 1581 consumer++; 1582 if (consumer == (instance->max_fw_cmds + 1)) { 1583 consumer = 0; 1584 } 1585 } 1586 1587 mutex_exit(&instance->completed_pool_mtx); 1588 1589 ddi_put32(instance->mfi_internal_dma_obj.acc_handle, 1590 instance->consumer, consumer); 1591 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1592 0, 0, DDI_DMA_SYNC_FORDEV); 1593 1594 if (instance->softint_running) { 1595 need_softintr = 0; 1596 } else { 1597 need_softintr = 1; 1598 } 1599 1600 if (instance->isr_level == HIGH_LEVEL_INTR) { 1601 if (need_softintr) { 1602 ddi_trigger_softintr(instance->soft_intr_id); 1603 } 1604 } else { 1605 /* 1606 * Not a high-level interrupt, therefore call the soft level 1607 * interrupt explicitly 1608 */ 1609 (void) mrsas_softintr(instance); 1610 } 1611 1612 return (DDI_INTR_CLAIMED); 1613 } 1614 1615 1616 /* 1617 * ************************************************************************** * 1618 * * 1619 * libraries * 1620 * * 1621 * ************************************************************************** * 1622 */ 1623 /* 1624 * get_mfi_pkt : Get a command from the free pool 1625 * After successful allocation, the caller of this routine 1626 * must clear the frame buffer (memset to zero) before 1627 * using the packet further. 1628 * 1629 * ***** Note ***** 1630 * After clearing the frame buffer the context id of the 1631 * frame buffer SHOULD be restored back. 1632 */ 1633 static struct mrsas_cmd * 1634 get_mfi_pkt(struct mrsas_instance *instance) 1635 { 1636 mlist_t *head = &instance->cmd_pool_list; 1637 struct mrsas_cmd *cmd = NULL; 1638 1639 mutex_enter(&instance->cmd_pool_mtx); 1640 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1641 1642 if (!mlist_empty(head)) { 1643 cmd = mlist_entry(head->next, struct mrsas_cmd, list); 1644 mlist_del_init(head->next); 1645 } 1646 if (cmd != NULL) 1647 cmd->pkt = NULL; 1648 mutex_exit(&instance->cmd_pool_mtx); 1649 1650 return (cmd); 1651 } 1652 1653 /* 1654 * return_mfi_pkt : Return a cmd to free command pool 1655 */ 1656 static void 1657 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 1658 { 1659 mutex_enter(&instance->cmd_pool_mtx); 1660 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1661 1662 mlist_add(&cmd->list, &instance->cmd_pool_list); 1663 1664 mutex_exit(&instance->cmd_pool_mtx); 1665 } 1666 1667 /* 1668 * destroy_mfi_frame_pool 1669 */ 1670 static void 1671 destroy_mfi_frame_pool(struct mrsas_instance *instance) 1672 { 1673 int i; 1674 uint32_t max_cmd = instance->max_fw_cmds; 1675 1676 struct mrsas_cmd *cmd; 1677 1678 /* return all frames to pool */ 1679 for (i = 0; i < max_cmd+1; i++) { 1680 1681 cmd = instance->cmd_list[i]; 1682 1683 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) 1684 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj); 1685 1686 cmd->frame_dma_obj_status = DMA_OBJ_FREED; 1687 } 1688 1689 } 1690 1691 /* 1692 * create_mfi_frame_pool 1693 */ 1694 static int 1695 create_mfi_frame_pool(struct mrsas_instance *instance) 1696 { 1697 int i = 0; 1698 int cookie_cnt; 1699 uint16_t max_cmd; 1700 uint16_t sge_sz; 1701 uint32_t sgl_sz; 1702 uint32_t tot_frame_size; 1703 struct mrsas_cmd *cmd; 1704 1705 max_cmd = instance->max_fw_cmds; 1706 1707 sge_sz = sizeof (struct mrsas_sge64); 1708 1709 /* calculated the number of 64byte frames required for SGL */ 1710 sgl_sz = sge_sz * instance->max_num_sge; 1711 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH; 1712 1713 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: " 1714 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size)); 1715 1716 while (i < max_cmd+1) { 1717 cmd = instance->cmd_list[i]; 1718 1719 cmd->frame_dma_obj.size = tot_frame_size; 1720 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr; 1721 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1722 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 1723 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1; 1724 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64; 1725 1726 1727 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj, 1728 (uchar_t)DDI_STRUCTURE_LE_ACC); 1729 1730 if (cookie_cnt == -1 || cookie_cnt > 1) { 1731 con_log(CL_ANN, (CE_WARN, 1732 "create_mfi_frame_pool: could not alloc.")); 1733 return (DDI_FAILURE); 1734 } 1735 1736 bzero(cmd->frame_dma_obj.buffer, tot_frame_size); 1737 1738 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED; 1739 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer; 1740 cmd->frame_phys_addr = 1741 cmd->frame_dma_obj.dma_cookie[0].dmac_address; 1742 1743 cmd->sense = (uint8_t *)(((unsigned long) 1744 cmd->frame_dma_obj.buffer) + 1745 tot_frame_size - SENSE_LENGTH); 1746 cmd->sense_phys_addr = 1747 cmd->frame_dma_obj.dma_cookie[0].dmac_address + 1748 tot_frame_size - SENSE_LENGTH; 1749 1750 if (!cmd->frame || !cmd->sense) { 1751 con_log(CL_ANN, (CE_NOTE, 1752 "mr_sas: pci_pool_alloc failed")); 1753 1754 return (ENOMEM); 1755 } 1756 1757 ddi_put32(cmd->frame_dma_obj.acc_handle, 1758 &cmd->frame->io.context, cmd->index); 1759 i++; 1760 1761 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x", 1762 cmd->index, cmd->frame_phys_addr)); 1763 } 1764 1765 return (DDI_SUCCESS); 1766 } 1767 1768 /* 1769 * free_additional_dma_buffer 1770 */ 1771 static void 1772 free_additional_dma_buffer(struct mrsas_instance *instance) 1773 { 1774 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { 1775 (void) mrsas_free_dma_obj(instance, 1776 instance->mfi_internal_dma_obj); 1777 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; 1778 } 1779 1780 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) { 1781 (void) mrsas_free_dma_obj(instance, 1782 instance->mfi_evt_detail_obj); 1783 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; 1784 } 1785 } 1786 1787 /* 1788 * alloc_additional_dma_buffer 1789 */ 1790 static int 1791 alloc_additional_dma_buffer(struct mrsas_instance *instance) 1792 { 1793 uint32_t reply_q_sz; 1794 uint32_t internal_buf_size = PAGESIZE*2; 1795 1796 /* max cmds plus 1 + producer & consumer */ 1797 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2); 1798 1799 instance->mfi_internal_dma_obj.size = internal_buf_size; 1800 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr; 1801 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1802 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 1803 0xFFFFFFFFU; 1804 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1; 1805 1806 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj, 1807 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 1808 con_log(CL_ANN, (CE_WARN, 1809 "mr_sas: could not alloc reply queue")); 1810 return (DDI_FAILURE); 1811 } 1812 1813 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size); 1814 1815 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED; 1816 1817 instance->producer = (uint32_t *)((unsigned long) 1818 instance->mfi_internal_dma_obj.buffer); 1819 instance->consumer = (uint32_t *)((unsigned long) 1820 instance->mfi_internal_dma_obj.buffer + 4); 1821 instance->reply_queue = (uint32_t *)((unsigned long) 1822 instance->mfi_internal_dma_obj.buffer + 8); 1823 instance->internal_buf = (caddr_t)(((unsigned long) 1824 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8); 1825 instance->internal_buf_dmac_add = 1826 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 1827 (reply_q_sz + 8); 1828 instance->internal_buf_size = internal_buf_size - 1829 (reply_q_sz + 8); 1830 1831 /* allocate evt_detail */ 1832 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail); 1833 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr; 1834 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1835 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 1836 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1; 1837 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1; 1838 1839 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj, 1840 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 1841 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: " 1842 "could not allocate data transfer buffer.")); 1843 return (DDI_FAILURE); 1844 } 1845 1846 bzero(instance->mfi_evt_detail_obj.buffer, 1847 sizeof (struct mrsas_evt_detail)); 1848 1849 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED; 1850 1851 return (DDI_SUCCESS); 1852 } 1853 1854 /* 1855 * free_space_for_mfi 1856 */ 1857 static void 1858 free_space_for_mfi(struct mrsas_instance *instance) 1859 { 1860 int i; 1861 uint32_t max_cmd = instance->max_fw_cmds; 1862 1863 /* already freed */ 1864 if (instance->cmd_list == NULL) { 1865 return; 1866 } 1867 1868 free_additional_dma_buffer(instance); 1869 1870 /* first free the MFI frame pool */ 1871 destroy_mfi_frame_pool(instance); 1872 1873 /* free all the commands in the cmd_list */ 1874 for (i = 0; i < instance->max_fw_cmds+1; i++) { 1875 kmem_free(instance->cmd_list[i], 1876 sizeof (struct mrsas_cmd)); 1877 1878 instance->cmd_list[i] = NULL; 1879 } 1880 1881 /* free the cmd_list buffer itself */ 1882 kmem_free(instance->cmd_list, 1883 sizeof (struct mrsas_cmd *) * (max_cmd+1)); 1884 1885 instance->cmd_list = NULL; 1886 1887 INIT_LIST_HEAD(&instance->cmd_pool_list); 1888 } 1889 1890 /* 1891 * alloc_space_for_mfi 1892 */ 1893 static int 1894 alloc_space_for_mfi(struct mrsas_instance *instance) 1895 { 1896 int i; 1897 uint32_t max_cmd; 1898 size_t sz; 1899 1900 struct mrsas_cmd *cmd; 1901 1902 max_cmd = instance->max_fw_cmds; 1903 1904 /* reserve 1 more slot for flush_cache */ 1905 sz = sizeof (struct mrsas_cmd *) * (max_cmd+1); 1906 1907 /* 1908 * instance->cmd_list is an array of struct mrsas_cmd pointers. 1909 * Allocate the dynamic array first and then allocate individual 1910 * commands. 1911 */ 1912 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); 1913 ASSERT(instance->cmd_list); 1914 1915 for (i = 0; i < max_cmd+1; i++) { 1916 instance->cmd_list[i] = kmem_zalloc(sizeof (struct mrsas_cmd), 1917 KM_SLEEP); 1918 ASSERT(instance->cmd_list[i]); 1919 } 1920 1921 INIT_LIST_HEAD(&instance->cmd_pool_list); 1922 1923 /* add all the commands to command pool (instance->cmd_pool) */ 1924 for (i = 0; i < max_cmd; i++) { 1925 cmd = instance->cmd_list[i]; 1926 cmd->index = i; 1927 1928 mlist_add_tail(&cmd->list, &instance->cmd_pool_list); 1929 } 1930 1931 /* single slot for flush_cache won't be added in command pool */ 1932 cmd = instance->cmd_list[max_cmd]; 1933 cmd->index = i; 1934 1935 /* create a frame pool and assign one frame to each cmd */ 1936 if (create_mfi_frame_pool(instance)) { 1937 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool")); 1938 return (DDI_FAILURE); 1939 } 1940 1941 /* create a frame pool and assign one frame to each cmd */ 1942 if (alloc_additional_dma_buffer(instance)) { 1943 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool")); 1944 return (DDI_FAILURE); 1945 } 1946 1947 return (DDI_SUCCESS); 1948 } 1949 1950 /* 1951 * get_ctrl_info 1952 */ 1953 static int 1954 get_ctrl_info(struct mrsas_instance *instance, 1955 struct mrsas_ctrl_info *ctrl_info) 1956 { 1957 int ret = 0; 1958 1959 struct mrsas_cmd *cmd; 1960 struct mrsas_dcmd_frame *dcmd; 1961 struct mrsas_ctrl_info *ci; 1962 1963 cmd = get_mfi_pkt(instance); 1964 1965 if (!cmd) { 1966 con_log(CL_ANN, (CE_WARN, 1967 "Failed to get a cmd for ctrl info")); 1968 return (DDI_FAILURE); 1969 } 1970 /* Clear the frame buffer and assign back the context id */ 1971 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 1972 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 1973 cmd->index); 1974 1975 dcmd = &cmd->frame->dcmd; 1976 1977 ci = (struct mrsas_ctrl_info *)instance->internal_buf; 1978 1979 if (!ci) { 1980 con_log(CL_ANN, (CE_WARN, 1981 "Failed to alloc mem for ctrl info")); 1982 return_mfi_pkt(instance, cmd); 1983 return (DDI_FAILURE); 1984 } 1985 1986 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info)); 1987 1988 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */ 1989 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 1990 1991 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 1992 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 1993 MFI_CMD_STATUS_POLL_MODE); 1994 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 1995 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 1996 MFI_FRAME_DIR_READ); 1997 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 1998 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 1999 sizeof (struct mrsas_ctrl_info)); 2000 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2001 MR_DCMD_CTRL_GET_INFO); 2002 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 2003 instance->internal_buf_dmac_add); 2004 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 2005 sizeof (struct mrsas_ctrl_info)); 2006 2007 cmd->frame_count = 1; 2008 2009 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2010 ret = 0; 2011 ctrl_info->max_request_size = ddi_get32( 2012 cmd->frame_dma_obj.acc_handle, &ci->max_request_size); 2013 ctrl_info->ld_present_count = ddi_get16( 2014 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count); 2015 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, 2016 (uint8_t *)(ctrl_info->product_name), 2017 (uint8_t *)(ci->product_name), 80 * sizeof (char), 2018 DDI_DEV_AUTOINCR); 2019 /* should get more members of ci with ddi_get when needed */ 2020 } else { 2021 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed")); 2022 ret = -1; 2023 } 2024 2025 return_mfi_pkt(instance, cmd); 2026 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2027 ret = -1; 2028 } 2029 2030 return (ret); 2031 } 2032 2033 /* 2034 * abort_aen_cmd 2035 */ 2036 static int 2037 abort_aen_cmd(struct mrsas_instance *instance, 2038 struct mrsas_cmd *cmd_to_abort) 2039 { 2040 int ret = 0; 2041 2042 struct mrsas_cmd *cmd; 2043 struct mrsas_abort_frame *abort_fr; 2044 2045 cmd = get_mfi_pkt(instance); 2046 2047 if (!cmd) { 2048 con_log(CL_ANN, (CE_WARN, 2049 "Failed to get a cmd for ctrl info")); 2050 return (DDI_FAILURE); 2051 } 2052 /* Clear the frame buffer and assign back the context id */ 2053 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2054 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2055 cmd->index); 2056 2057 abort_fr = &cmd->frame->abort; 2058 2059 /* prepare and issue the abort frame */ 2060 ddi_put8(cmd->frame_dma_obj.acc_handle, 2061 &abort_fr->cmd, MFI_CMD_OP_ABORT); 2062 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status, 2063 MFI_CMD_STATUS_SYNC_MODE); 2064 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0); 2065 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context, 2066 cmd_to_abort->index); 2067 ddi_put32(cmd->frame_dma_obj.acc_handle, 2068 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr); 2069 ddi_put32(cmd->frame_dma_obj.acc_handle, 2070 &abort_fr->abort_mfi_phys_addr_hi, 0); 2071 2072 instance->aen_cmd->abort_aen = 1; 2073 2074 cmd->sync_cmd = MRSAS_TRUE; 2075 cmd->frame_count = 1; 2076 2077 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2078 con_log(CL_ANN, (CE_WARN, 2079 "abort_aen_cmd: issue_cmd_in_sync_mode failed")); 2080 ret = -1; 2081 } else { 2082 ret = 0; 2083 } 2084 2085 instance->aen_cmd->abort_aen = 1; 2086 instance->aen_cmd = 0; 2087 2088 (void) mrsas_common_check(instance, cmd); 2089 2090 return_mfi_pkt(instance, cmd); 2091 2092 return (ret); 2093 } 2094 2095 /* 2096 * init_mfi 2097 */ 2098 static int 2099 init_mfi(struct mrsas_instance *instance) 2100 { 2101 struct mrsas_cmd *cmd; 2102 struct mrsas_ctrl_info ctrl_info; 2103 struct mrsas_init_frame *init_frame; 2104 struct mrsas_init_queue_info *initq_info; 2105 2106 /* we expect the FW state to be READY */ 2107 if (mfi_state_transition_to_ready(instance)) { 2108 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready")); 2109 goto fail_ready_state; 2110 } 2111 2112 /* get various operational parameters from status register */ 2113 instance->max_num_sge = 2114 (instance->func_ptr->read_fw_status_reg(instance) & 2115 0xFF0000) >> 0x10; 2116 /* 2117 * Reduce the max supported cmds by 1. This is to ensure that the 2118 * reply_q_sz (1 more than the max cmd that driver may send) 2119 * does not exceed max cmds that the FW can support 2120 */ 2121 instance->max_fw_cmds = 2122 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF; 2123 instance->max_fw_cmds = instance->max_fw_cmds - 1; 2124 2125 instance->max_num_sge = 2126 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ? 2127 MRSAS_MAX_SGE_CNT : instance->max_num_sge; 2128 2129 /* create a pool of commands */ 2130 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) 2131 goto fail_alloc_fw_space; 2132 2133 /* 2134 * Prepare a init frame. Note the init frame points to queue info 2135 * structure. Each frame has SGL allocated after first 64 bytes. For 2136 * this frame - since we don't need any SGL - we use SGL's space as 2137 * queue info structure 2138 */ 2139 cmd = get_mfi_pkt(instance); 2140 /* Clear the frame buffer and assign back the context id */ 2141 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2142 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2143 cmd->index); 2144 2145 init_frame = (struct mrsas_init_frame *)cmd->frame; 2146 initq_info = (struct mrsas_init_queue_info *) 2147 ((unsigned long)init_frame + 64); 2148 2149 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE); 2150 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info)); 2151 2152 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0); 2153 2154 ddi_put32(cmd->frame_dma_obj.acc_handle, 2155 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1); 2156 2157 ddi_put32(cmd->frame_dma_obj.acc_handle, 2158 &initq_info->producer_index_phys_addr_hi, 0); 2159 ddi_put32(cmd->frame_dma_obj.acc_handle, 2160 &initq_info->producer_index_phys_addr_lo, 2161 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address); 2162 2163 ddi_put32(cmd->frame_dma_obj.acc_handle, 2164 &initq_info->consumer_index_phys_addr_hi, 0); 2165 ddi_put32(cmd->frame_dma_obj.acc_handle, 2166 &initq_info->consumer_index_phys_addr_lo, 2167 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4); 2168 2169 ddi_put32(cmd->frame_dma_obj.acc_handle, 2170 &initq_info->reply_queue_start_phys_addr_hi, 0); 2171 ddi_put32(cmd->frame_dma_obj.acc_handle, 2172 &initq_info->reply_queue_start_phys_addr_lo, 2173 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8); 2174 2175 ddi_put8(cmd->frame_dma_obj.acc_handle, 2176 &init_frame->cmd, MFI_CMD_OP_INIT); 2177 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status, 2178 MFI_CMD_STATUS_POLL_MODE); 2179 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0); 2180 ddi_put32(cmd->frame_dma_obj.acc_handle, 2181 &init_frame->queue_info_new_phys_addr_lo, 2182 cmd->frame_phys_addr + 64); 2183 ddi_put32(cmd->frame_dma_obj.acc_handle, 2184 &init_frame->queue_info_new_phys_addr_hi, 0); 2185 2186 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len, 2187 sizeof (struct mrsas_init_queue_info)); 2188 2189 cmd->frame_count = 1; 2190 2191 /* issue the init frame in polled mode */ 2192 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2193 con_log(CL_ANN, (CE_WARN, "failed to init firmware")); 2194 return_mfi_pkt(instance, cmd); 2195 goto fail_fw_init; 2196 } 2197 2198 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2199 return_mfi_pkt(instance, cmd); 2200 goto fail_fw_init; 2201 } 2202 2203 return_mfi_pkt(instance, cmd); 2204 2205 /* gather misc FW related information */ 2206 if (!get_ctrl_info(instance, &ctrl_info)) { 2207 instance->max_sectors_per_req = ctrl_info.max_request_size; 2208 con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d", 2209 ctrl_info.product_name, ctrl_info.ld_present_count)); 2210 } else { 2211 instance->max_sectors_per_req = instance->max_num_sge * 2212 PAGESIZE / 512; 2213 } 2214 2215 return (DDI_SUCCESS); 2216 2217 fail_fw_init: 2218 fail_alloc_fw_space: 2219 2220 free_space_for_mfi(instance); 2221 2222 fail_ready_state: 2223 ddi_regs_map_free(&instance->regmap_handle); 2224 2225 fail_mfi_reg_setup: 2226 return (DDI_FAILURE); 2227 } 2228 2229 /* 2230 * mfi_state_transition_to_ready : Move the FW to READY state 2231 * 2232 * @reg_set : MFI register set 2233 */ 2234 static int 2235 mfi_state_transition_to_ready(struct mrsas_instance *instance) 2236 { 2237 int i; 2238 uint8_t max_wait; 2239 uint32_t fw_ctrl; 2240 uint32_t fw_state; 2241 uint32_t cur_state; 2242 2243 fw_state = 2244 instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK; 2245 con_log(CL_ANN1, (CE_NOTE, 2246 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state)); 2247 2248 while (fw_state != MFI_STATE_READY) { 2249 con_log(CL_ANN, (CE_NOTE, 2250 "mfi_state_transition_to_ready:FW state%x", fw_state)); 2251 2252 switch (fw_state) { 2253 case MFI_STATE_FAULT: 2254 con_log(CL_ANN, (CE_NOTE, 2255 "mr_sas: FW in FAULT state!!")); 2256 2257 return (ENODEV); 2258 case MFI_STATE_WAIT_HANDSHAKE: 2259 /* set the CLR bit in IMR0 */ 2260 con_log(CL_ANN, (CE_NOTE, 2261 "mr_sas: FW waiting for HANDSHAKE")); 2262 /* 2263 * PCI_Hot Plug: MFI F/W requires 2264 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2265 * to be set 2266 */ 2267 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */ 2268 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE | 2269 MFI_INIT_HOTPLUG, instance); 2270 2271 max_wait = 2; 2272 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2273 break; 2274 case MFI_STATE_BOOT_MESSAGE_PENDING: 2275 /* set the CLR bit in IMR0 */ 2276 con_log(CL_ANN, (CE_NOTE, 2277 "mr_sas: FW state boot message pending")); 2278 /* 2279 * PCI_Hot Plug: MFI F/W requires 2280 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2281 * to be set 2282 */ 2283 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance); 2284 2285 max_wait = 10; 2286 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2287 break; 2288 case MFI_STATE_OPERATIONAL: 2289 /* bring it to READY state; assuming max wait 2 secs */ 2290 instance->func_ptr->disable_intr(instance); 2291 con_log(CL_ANN1, (CE_NOTE, 2292 "mr_sas: FW in OPERATIONAL state")); 2293 /* 2294 * PCI_Hot Plug: MFI F/W requires 2295 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT) 2296 * to be set 2297 */ 2298 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */ 2299 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance); 2300 2301 max_wait = 10; 2302 cur_state = MFI_STATE_OPERATIONAL; 2303 break; 2304 case MFI_STATE_UNDEFINED: 2305 /* this state should not last for more than 2 seconds */ 2306 con_log(CL_ANN, (CE_NOTE, "FW state undefined")); 2307 2308 max_wait = 2; 2309 cur_state = MFI_STATE_UNDEFINED; 2310 break; 2311 case MFI_STATE_BB_INIT: 2312 max_wait = 2; 2313 cur_state = MFI_STATE_BB_INIT; 2314 break; 2315 case MFI_STATE_FW_INIT: 2316 max_wait = 2; 2317 cur_state = MFI_STATE_FW_INIT; 2318 break; 2319 case MFI_STATE_DEVICE_SCAN: 2320 max_wait = 10; 2321 cur_state = MFI_STATE_DEVICE_SCAN; 2322 break; 2323 default: 2324 con_log(CL_ANN, (CE_NOTE, 2325 "mr_sas: Unknown state 0x%x", fw_state)); 2326 return (ENODEV); 2327 } 2328 2329 /* the cur_state should not last for more than max_wait secs */ 2330 for (i = 0; i < (max_wait * MILLISEC); i++) { 2331 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */ 2332 fw_state = 2333 instance->func_ptr->read_fw_status_reg(instance) & 2334 MFI_STATE_MASK; 2335 2336 if (fw_state == cur_state) { 2337 delay(1 * drv_usectohz(MILLISEC)); 2338 } else { 2339 break; 2340 } 2341 } 2342 2343 /* return error if fw_state hasn't changed after max_wait */ 2344 if (fw_state == cur_state) { 2345 con_log(CL_ANN, (CE_NOTE, 2346 "FW state hasn't changed in %d secs", max_wait)); 2347 return (ENODEV); 2348 } 2349 }; 2350 2351 fw_ctrl = RD_IB_DOORBELL(instance); 2352 2353 con_log(CL_ANN1, (CE_NOTE, 2354 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl)); 2355 2356 /* 2357 * Write 0xF to the doorbell register to do the following. 2358 * - Abort all outstanding commands (bit 0). 2359 * - Transition from OPERATIONAL to READY state (bit 1). 2360 * - Discard (possible) low MFA posted in 64-bit mode (bit-2). 2361 * - Set to release FW to continue running (i.e. BIOS handshake 2362 * (bit 3). 2363 */ 2364 WR_IB_DOORBELL(0xF, instance); 2365 2366 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 2367 return (ENODEV); 2368 } 2369 return (DDI_SUCCESS); 2370 } 2371 2372 /* 2373 * get_seq_num 2374 */ 2375 static int 2376 get_seq_num(struct mrsas_instance *instance, 2377 struct mrsas_evt_log_info *eli) 2378 { 2379 int ret = DDI_SUCCESS; 2380 2381 dma_obj_t dcmd_dma_obj; 2382 struct mrsas_cmd *cmd; 2383 struct mrsas_dcmd_frame *dcmd; 2384 struct mrsas_evt_log_info *eli_tmp; 2385 cmd = get_mfi_pkt(instance); 2386 2387 if (!cmd) { 2388 cmn_err(CE_WARN, "mr_sas: failed to get a cmd"); 2389 return (ENOMEM); 2390 } 2391 /* Clear the frame buffer and assign back the context id */ 2392 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 2393 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 2394 cmd->index); 2395 2396 dcmd = &cmd->frame->dcmd; 2397 2398 /* allocate the data transfer buffer */ 2399 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info); 2400 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; 2401 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 2402 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 2403 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 2404 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 2405 2406 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, 2407 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 2408 con_log(CL_ANN, (CE_WARN, 2409 "get_seq_num: could not allocate data transfer buffer.")); 2410 return (DDI_FAILURE); 2411 } 2412 2413 (void) memset(dcmd_dma_obj.buffer, 0, 2414 sizeof (struct mrsas_evt_log_info)); 2415 2416 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2417 2418 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 2419 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0); 2420 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 2421 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 2422 MFI_FRAME_DIR_READ); 2423 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 2424 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 2425 sizeof (struct mrsas_evt_log_info)); 2426 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2427 MR_DCMD_CTRL_EVENT_GET_INFO); 2428 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 2429 sizeof (struct mrsas_evt_log_info)); 2430 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 2431 dcmd_dma_obj.dma_cookie[0].dmac_address); 2432 2433 cmd->sync_cmd = MRSAS_TRUE; 2434 cmd->frame_count = 1; 2435 2436 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2437 cmn_err(CE_WARN, "get_seq_num: " 2438 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO"); 2439 ret = DDI_FAILURE; 2440 } else { 2441 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer; 2442 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle, 2443 &eli_tmp->newest_seq_num); 2444 ret = DDI_SUCCESS; 2445 } 2446 2447 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 2448 ret = DDI_FAILURE; 2449 2450 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 2451 ret = DDI_FAILURE; 2452 } 2453 2454 return_mfi_pkt(instance, cmd); 2455 2456 return (ret); 2457 } 2458 2459 /* 2460 * start_mfi_aen 2461 */ 2462 static int 2463 start_mfi_aen(struct mrsas_instance *instance) 2464 { 2465 int ret = 0; 2466 2467 struct mrsas_evt_log_info eli; 2468 union mrsas_evt_class_locale class_locale; 2469 2470 /* get the latest sequence number from FW */ 2471 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info)); 2472 2473 if (get_seq_num(instance, &eli)) { 2474 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num"); 2475 return (-1); 2476 } 2477 2478 /* register AEN with FW for latest sequence number plus 1 */ 2479 class_locale.members.reserved = 0; 2480 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL); 2481 class_locale.members.class = MR_EVT_CLASS_INFO; 2482 class_locale.word = LE_32(class_locale.word); 2483 ret = register_mfi_aen(instance, eli.newest_seq_num + 1, 2484 class_locale.word); 2485 2486 if (ret) { 2487 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed"); 2488 return (-1); 2489 } 2490 2491 return (ret); 2492 } 2493 2494 /* 2495 * flush_cache 2496 */ 2497 static void 2498 flush_cache(struct mrsas_instance *instance) 2499 { 2500 struct mrsas_cmd *cmd = NULL; 2501 struct mrsas_dcmd_frame *dcmd; 2502 uint32_t max_cmd = instance->max_fw_cmds; 2503 2504 cmd = instance->cmd_list[max_cmd]; 2505 2506 if (cmd == NULL) 2507 return; 2508 2509 dcmd = &cmd->frame->dcmd; 2510 2511 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2512 2513 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 2514 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0); 2515 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0); 2516 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 2517 MFI_FRAME_DIR_NONE); 2518 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 2519 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0); 2520 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 2521 MR_DCMD_CTRL_CACHE_FLUSH); 2522 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0], 2523 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE); 2524 2525 cmd->frame_count = 1; 2526 2527 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2528 con_log(CL_ANN1, (CE_WARN, 2529 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH")); 2530 } 2531 con_log(CL_DLEVEL1, (CE_NOTE, "done")); 2532 } 2533 2534 /* 2535 * service_mfi_aen- Completes an AEN command 2536 * @instance: Adapter soft state 2537 * @cmd: Command to be completed 2538 * 2539 */ 2540 static void 2541 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 2542 { 2543 uint32_t seq_num; 2544 struct mrsas_evt_detail *evt_detail = 2545 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer; 2546 int rval = 0; 2547 int tgt = 0; 2548 ddi_acc_handle_t acc_handle; 2549 2550 acc_handle = cmd->frame_dma_obj.acc_handle; 2551 2552 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status); 2553 2554 if (cmd->cmd_status == ENODATA) { 2555 cmd->cmd_status = 0; 2556 } 2557 2558 /* 2559 * log the MFI AEN event to the sysevent queue so that 2560 * application will get noticed 2561 */ 2562 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS", 2563 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) { 2564 int instance_no = ddi_get_instance(instance->dip); 2565 con_log(CL_ANN, (CE_WARN, 2566 "mr_sas%d: Failed to log AEN event", instance_no)); 2567 } 2568 /* 2569 * Check for any ld devices that has changed state. i.e. online 2570 * or offline. 2571 */ 2572 con_log(CL_ANN1, (CE_NOTE, 2573 "AEN: code = %x class = %x locale = %x args = %x", 2574 ddi_get32(acc_handle, &evt_detail->code), 2575 evt_detail->cl.members.class, 2576 ddi_get16(acc_handle, &evt_detail->cl.members.locale), 2577 ddi_get8(acc_handle, &evt_detail->arg_type))); 2578 2579 switch (ddi_get32(acc_handle, &evt_detail->code)) { 2580 case MR_EVT_CFG_CLEARED: { 2581 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { 2582 if (instance->mr_ld_list[tgt].dip != NULL) { 2583 rval = mrsas_service_evt(instance, tgt, 0, 2584 MRSAS_EVT_UNCONFIG_TGT, NULL); 2585 con_log(CL_ANN1, (CE_WARN, 2586 "mr_sas: CFG CLEARED AEN rval = %d " 2587 "tgt id = %d", rval, tgt)); 2588 } 2589 } 2590 break; 2591 } 2592 2593 case MR_EVT_LD_DELETED: { 2594 rval = mrsas_service_evt(instance, 2595 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, 2596 MRSAS_EVT_UNCONFIG_TGT, NULL); 2597 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d " 2598 "tgt id = %d index = %d", rval, 2599 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 2600 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); 2601 break; 2602 } /* End of MR_EVT_LD_DELETED */ 2603 2604 case MR_EVT_LD_CREATED: { 2605 rval = mrsas_service_evt(instance, 2606 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, 2607 MRSAS_EVT_CONFIG_TGT, NULL); 2608 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d " 2609 "tgt id = %d index = %d", rval, 2610 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 2611 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); 2612 break; 2613 } /* End of MR_EVT_LD_CREATED */ 2614 } /* End of Main Switch */ 2615 2616 /* get copy of seq_num and class/locale for re-registration */ 2617 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num); 2618 seq_num++; 2619 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 2620 sizeof (struct mrsas_evt_detail)); 2621 2622 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0); 2623 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num); 2624 2625 instance->aen_seq_num = seq_num; 2626 2627 cmd->frame_count = 1; 2628 2629 /* Issue the aen registration frame */ 2630 instance->func_ptr->issue_cmd(cmd, instance); 2631 } 2632 2633 /* 2634 * complete_cmd_in_sync_mode - Completes an internal command 2635 * @instance: Adapter soft state 2636 * @cmd: Command to be completed 2637 * 2638 * The issue_cmd_in_sync_mode() function waits for a command to complete 2639 * after it issues a command. This function wakes up that waiting routine by 2640 * calling wake_up() on the wait queue. 2641 */ 2642 static void 2643 complete_cmd_in_sync_mode(struct mrsas_instance *instance, 2644 struct mrsas_cmd *cmd) 2645 { 2646 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle, 2647 &cmd->frame->io.cmd_status); 2648 2649 cmd->sync_cmd = MRSAS_FALSE; 2650 2651 if (cmd->cmd_status == ENODATA) { 2652 cmd->cmd_status = 0; 2653 } 2654 2655 cv_broadcast(&instance->int_cmd_cv); 2656 } 2657 2658 /* 2659 * mrsas_softintr - The Software ISR 2660 * @param arg : HBA soft state 2661 * 2662 * called from high-level interrupt if hi-level interrupt are not there, 2663 * otherwise triggered as a soft interrupt 2664 */ 2665 static uint_t 2666 mrsas_softintr(struct mrsas_instance *instance) 2667 { 2668 struct scsi_pkt *pkt; 2669 struct scsa_cmd *acmd; 2670 struct mrsas_cmd *cmd; 2671 struct mlist_head *pos, *next; 2672 mlist_t process_list; 2673 struct mrsas_header *hdr; 2674 struct scsi_arq_status *arqstat; 2675 2676 con_log(CL_ANN1, (CE_CONT, "mrsas_softintr called")); 2677 2678 ASSERT(instance); 2679 mutex_enter(&instance->completed_pool_mtx); 2680 2681 if (mlist_empty(&instance->completed_pool_list)) { 2682 mutex_exit(&instance->completed_pool_mtx); 2683 return (DDI_INTR_CLAIMED); 2684 } 2685 2686 instance->softint_running = 1; 2687 2688 INIT_LIST_HEAD(&process_list); 2689 mlist_splice(&instance->completed_pool_list, &process_list); 2690 INIT_LIST_HEAD(&instance->completed_pool_list); 2691 2692 mutex_exit(&instance->completed_pool_mtx); 2693 2694 /* perform all callbacks first, before releasing the SCBs */ 2695 mlist_for_each_safe(pos, next, &process_list) { 2696 cmd = mlist_entry(pos, struct mrsas_cmd, list); 2697 2698 /* syncronize the Cmd frame for the controller */ 2699 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 2700 0, 0, DDI_DMA_SYNC_FORCPU); 2701 2702 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 2703 DDI_SUCCESS) { 2704 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 2705 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 2706 return (DDI_INTR_CLAIMED); 2707 } 2708 2709 hdr = &cmd->frame->hdr; 2710 2711 /* remove the internal command from the process list */ 2712 mlist_del_init(&cmd->list); 2713 2714 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) { 2715 case MFI_CMD_OP_PD_SCSI: 2716 case MFI_CMD_OP_LD_SCSI: 2717 case MFI_CMD_OP_LD_READ: 2718 case MFI_CMD_OP_LD_WRITE: 2719 /* 2720 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI 2721 * could have been issued either through an 2722 * IO path or an IOCTL path. If it was via IOCTL, 2723 * we will send it to internal completion. 2724 */ 2725 if (cmd->sync_cmd == MRSAS_TRUE) { 2726 complete_cmd_in_sync_mode(instance, cmd); 2727 break; 2728 } 2729 2730 /* regular commands */ 2731 acmd = cmd->cmd; 2732 pkt = CMD2PKT(acmd); 2733 2734 if (acmd->cmd_flags & CFLAG_DMAVALID) { 2735 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 2736 (void) ddi_dma_sync(acmd->cmd_dmahandle, 2737 acmd->cmd_dma_offset, 2738 acmd->cmd_dma_len, 2739 DDI_DMA_SYNC_FORCPU); 2740 } 2741 } 2742 2743 pkt->pkt_reason = CMD_CMPLT; 2744 pkt->pkt_statistics = 0; 2745 pkt->pkt_state = STATE_GOT_BUS 2746 | STATE_GOT_TARGET | STATE_SENT_CMD 2747 | STATE_XFERRED_DATA | STATE_GOT_STATUS; 2748 2749 con_log(CL_ANN1, (CE_CONT, 2750 "CDB[0] = %x completed for %s: size %lx context %x", 2751 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"), 2752 acmd->cmd_dmacount, hdr->context)); 2753 2754 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) { 2755 struct scsi_inquiry *inq; 2756 2757 if (acmd->cmd_dmacount != 0) { 2758 bp_mapin(acmd->cmd_buf); 2759 inq = (struct scsi_inquiry *) 2760 acmd->cmd_buf->b_un.b_addr; 2761 2762 /* don't expose physical drives to OS */ 2763 if (acmd->islogical && 2764 (hdr->cmd_status == MFI_STAT_OK)) { 2765 display_scsi_inquiry( 2766 (caddr_t)inq); 2767 } else if ((hdr->cmd_status == 2768 MFI_STAT_OK) && inq->inq_dtype == 2769 DTYPE_DIRECT) { 2770 2771 display_scsi_inquiry( 2772 (caddr_t)inq); 2773 2774 /* for physical disk */ 2775 hdr->cmd_status = 2776 MFI_STAT_DEVICE_NOT_FOUND; 2777 } 2778 } 2779 } 2780 2781 switch (hdr->cmd_status) { 2782 case MFI_STAT_OK: 2783 pkt->pkt_scbp[0] = STATUS_GOOD; 2784 break; 2785 case MFI_STAT_LD_CC_IN_PROGRESS: 2786 case MFI_STAT_LD_RECON_IN_PROGRESS: 2787 pkt->pkt_scbp[0] = STATUS_GOOD; 2788 break; 2789 case MFI_STAT_LD_INIT_IN_PROGRESS: 2790 con_log(CL_ANN, 2791 (CE_WARN, "Initialization in Progress")); 2792 pkt->pkt_reason = CMD_TRAN_ERR; 2793 2794 break; 2795 case MFI_STAT_SCSI_DONE_WITH_ERROR: 2796 con_log(CL_ANN1, (CE_CONT, "scsi_done error")); 2797 2798 pkt->pkt_reason = CMD_CMPLT; 2799 ((struct scsi_status *) 2800 pkt->pkt_scbp)->sts_chk = 1; 2801 2802 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) { 2803 2804 con_log(CL_ANN, 2805 (CE_WARN, "TEST_UNIT_READY fail")); 2806 2807 } else { 2808 pkt->pkt_state |= STATE_ARQ_DONE; 2809 arqstat = (void *)(pkt->pkt_scbp); 2810 arqstat->sts_rqpkt_reason = CMD_CMPLT; 2811 arqstat->sts_rqpkt_resid = 0; 2812 arqstat->sts_rqpkt_state |= 2813 STATE_GOT_BUS | STATE_GOT_TARGET 2814 | STATE_SENT_CMD 2815 | STATE_XFERRED_DATA; 2816 *(uint8_t *)&arqstat->sts_rqpkt_status = 2817 STATUS_GOOD; 2818 ddi_rep_get8( 2819 cmd->frame_dma_obj.acc_handle, 2820 (uint8_t *) 2821 &(arqstat->sts_sensedata), 2822 cmd->sense, 2823 acmd->cmd_scblen - 2824 offsetof(struct scsi_arq_status, 2825 sts_sensedata), DDI_DEV_AUTOINCR); 2826 } 2827 break; 2828 case MFI_STAT_LD_OFFLINE: 2829 case MFI_STAT_DEVICE_NOT_FOUND: 2830 con_log(CL_ANN1, (CE_CONT, 2831 "device not found error")); 2832 pkt->pkt_reason = CMD_DEV_GONE; 2833 pkt->pkt_statistics = STAT_DISCON; 2834 break; 2835 case MFI_STAT_LD_LBA_OUT_OF_RANGE: 2836 pkt->pkt_state |= STATE_ARQ_DONE; 2837 pkt->pkt_reason = CMD_CMPLT; 2838 ((struct scsi_status *) 2839 pkt->pkt_scbp)->sts_chk = 1; 2840 2841 arqstat = (void *)(pkt->pkt_scbp); 2842 arqstat->sts_rqpkt_reason = CMD_CMPLT; 2843 arqstat->sts_rqpkt_resid = 0; 2844 arqstat->sts_rqpkt_state |= STATE_GOT_BUS 2845 | STATE_GOT_TARGET | STATE_SENT_CMD 2846 | STATE_XFERRED_DATA; 2847 *(uint8_t *)&arqstat->sts_rqpkt_status = 2848 STATUS_GOOD; 2849 2850 arqstat->sts_sensedata.es_valid = 1; 2851 arqstat->sts_sensedata.es_key = 2852 KEY_ILLEGAL_REQUEST; 2853 arqstat->sts_sensedata.es_class = 2854 CLASS_EXTENDED_SENSE; 2855 2856 /* 2857 * LOGICAL BLOCK ADDRESS OUT OF RANGE: 2858 * ASC: 0x21h; ASCQ: 0x00h; 2859 */ 2860 arqstat->sts_sensedata.es_add_code = 0x21; 2861 arqstat->sts_sensedata.es_qual_code = 0x00; 2862 2863 break; 2864 2865 default: 2866 con_log(CL_ANN, (CE_CONT, "Unknown status!")); 2867 pkt->pkt_reason = CMD_TRAN_ERR; 2868 2869 break; 2870 } 2871 2872 atomic_add_16(&instance->fw_outstanding, (-1)); 2873 2874 (void) mrsas_common_check(instance, cmd); 2875 2876 return_mfi_pkt(instance, cmd); 2877 2878 if (acmd->cmd_dmahandle) { 2879 if (mrsas_check_dma_handle( 2880 acmd->cmd_dmahandle) != DDI_SUCCESS) { 2881 ddi_fm_service_impact(instance->dip, 2882 DDI_SERVICE_UNAFFECTED); 2883 pkt->pkt_reason = CMD_TRAN_ERR; 2884 pkt->pkt_statistics = 0; 2885 } 2886 } 2887 2888 /* Call the callback routine */ 2889 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 2890 pkt->pkt_comp) { 2891 (*pkt->pkt_comp)(pkt); 2892 } 2893 2894 break; 2895 case MFI_CMD_OP_SMP: 2896 case MFI_CMD_OP_STP: 2897 complete_cmd_in_sync_mode(instance, cmd); 2898 break; 2899 case MFI_CMD_OP_DCMD: 2900 /* see if got an event notification */ 2901 if (ddi_get32(cmd->frame_dma_obj.acc_handle, 2902 &cmd->frame->dcmd.opcode) == 2903 MR_DCMD_CTRL_EVENT_WAIT) { 2904 if ((instance->aen_cmd == cmd) && 2905 (instance->aen_cmd->abort_aen)) { 2906 con_log(CL_ANN, (CE_WARN, 2907 "mrsas_softintr: " 2908 "aborted_aen returned")); 2909 } else { 2910 atomic_add_16(&instance->fw_outstanding, 2911 (-1)); 2912 service_mfi_aen(instance, cmd); 2913 } 2914 } else { 2915 complete_cmd_in_sync_mode(instance, cmd); 2916 } 2917 2918 break; 2919 case MFI_CMD_OP_ABORT: 2920 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete")); 2921 /* 2922 * MFI_CMD_OP_ABORT successfully completed 2923 * in the synchronous mode 2924 */ 2925 complete_cmd_in_sync_mode(instance, cmd); 2926 break; 2927 default: 2928 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 2929 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 2930 2931 if (cmd->pkt != NULL) { 2932 pkt = cmd->pkt; 2933 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 2934 pkt->pkt_comp) { 2935 (*pkt->pkt_comp)(pkt); 2936 } 2937 } 2938 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !")); 2939 break; 2940 } 2941 } 2942 2943 instance->softint_running = 0; 2944 2945 return (DDI_INTR_CLAIMED); 2946 } 2947 2948 /* 2949 * mrsas_alloc_dma_obj 2950 * 2951 * Allocate the memory and other resources for an dma object. 2952 */ 2953 static int 2954 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj, 2955 uchar_t endian_flags) 2956 { 2957 int i; 2958 size_t alen = 0; 2959 uint_t cookie_cnt; 2960 struct ddi_device_acc_attr tmp_endian_attr; 2961 2962 tmp_endian_attr = endian_attr; 2963 tmp_endian_attr.devacc_attr_endian_flags = endian_flags; 2964 2965 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr, 2966 DDI_DMA_SLEEP, NULL, &obj->dma_handle); 2967 if (i != DDI_SUCCESS) { 2968 2969 switch (i) { 2970 case DDI_DMA_BADATTR : 2971 con_log(CL_ANN, (CE_WARN, 2972 "Failed ddi_dma_alloc_handle- Bad attribute")); 2973 break; 2974 case DDI_DMA_NORESOURCES : 2975 con_log(CL_ANN, (CE_WARN, 2976 "Failed ddi_dma_alloc_handle- No Resources")); 2977 break; 2978 default : 2979 con_log(CL_ANN, (CE_WARN, 2980 "Failed ddi_dma_alloc_handle: " 2981 "unknown status %d", i)); 2982 break; 2983 } 2984 2985 return (-1); 2986 } 2987 2988 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr, 2989 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 2990 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) || 2991 alen < obj->size) { 2992 2993 ddi_dma_free_handle(&obj->dma_handle); 2994 2995 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc")); 2996 2997 return (-1); 2998 } 2999 3000 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer, 3001 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 3002 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) { 3003 3004 ddi_dma_mem_free(&obj->acc_handle); 3005 ddi_dma_free_handle(&obj->dma_handle); 3006 3007 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle")); 3008 3009 return (-1); 3010 } 3011 3012 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) { 3013 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3014 return (-1); 3015 } 3016 3017 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) { 3018 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3019 return (-1); 3020 } 3021 3022 return (cookie_cnt); 3023 } 3024 3025 /* 3026 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t) 3027 * 3028 * De-allocate the memory and other resources for an dma object, which must 3029 * have been alloated by a previous call to mrsas_alloc_dma_obj() 3030 */ 3031 static int 3032 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj) 3033 { 3034 3035 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) { 3036 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3037 return (DDI_FAILURE); 3038 } 3039 3040 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) { 3041 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3042 return (DDI_FAILURE); 3043 } 3044 3045 (void) ddi_dma_unbind_handle(obj.dma_handle); 3046 ddi_dma_mem_free(&obj.acc_handle); 3047 ddi_dma_free_handle(&obj.dma_handle); 3048 3049 return (DDI_SUCCESS); 3050 } 3051 3052 /* 3053 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *, 3054 * int, int (*)()) 3055 * 3056 * Allocate dma resources for a new scsi command 3057 */ 3058 static int 3059 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt, 3060 struct buf *bp, int flags, int (*callback)()) 3061 { 3062 int dma_flags; 3063 int (*cb)(caddr_t); 3064 int i; 3065 3066 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr; 3067 struct scsa_cmd *acmd = PKT2CMD(pkt); 3068 3069 acmd->cmd_buf = bp; 3070 3071 if (bp->b_flags & B_READ) { 3072 acmd->cmd_flags &= ~CFLAG_DMASEND; 3073 dma_flags = DDI_DMA_READ; 3074 } else { 3075 acmd->cmd_flags |= CFLAG_DMASEND; 3076 dma_flags = DDI_DMA_WRITE; 3077 } 3078 3079 if (flags & PKT_CONSISTENT) { 3080 acmd->cmd_flags |= CFLAG_CONSISTENT; 3081 dma_flags |= DDI_DMA_CONSISTENT; 3082 } 3083 3084 if (flags & PKT_DMA_PARTIAL) { 3085 dma_flags |= DDI_DMA_PARTIAL; 3086 } 3087 3088 dma_flags |= DDI_DMA_REDZONE; 3089 3090 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP; 3091 3092 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge; 3093 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 3094 3095 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr, 3096 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) { 3097 switch (i) { 3098 case DDI_DMA_BADATTR: 3099 bioerror(bp, EFAULT); 3100 return (DDI_FAILURE); 3101 3102 case DDI_DMA_NORESOURCES: 3103 bioerror(bp, 0); 3104 return (DDI_FAILURE); 3105 3106 default: 3107 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: " 3108 "impossible result (0x%x)", i)); 3109 bioerror(bp, EFAULT); 3110 return (DDI_FAILURE); 3111 } 3112 } 3113 3114 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags, 3115 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies); 3116 3117 switch (i) { 3118 case DDI_DMA_PARTIAL_MAP: 3119 if ((dma_flags & DDI_DMA_PARTIAL) == 0) { 3120 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3121 "DDI_DMA_PARTIAL_MAP impossible")); 3122 goto no_dma_cookies; 3123 } 3124 3125 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) == 3126 DDI_FAILURE) { 3127 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed")); 3128 goto no_dma_cookies; 3129 } 3130 3131 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3132 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3133 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3134 DDI_FAILURE) { 3135 3136 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed")); 3137 goto no_dma_cookies; 3138 } 3139 3140 goto get_dma_cookies; 3141 case DDI_DMA_MAPPED: 3142 acmd->cmd_nwin = 1; 3143 acmd->cmd_dma_len = 0; 3144 acmd->cmd_dma_offset = 0; 3145 3146 get_dma_cookies: 3147 i = 0; 3148 acmd->cmd_dmacount = 0; 3149 for (;;) { 3150 acmd->cmd_dmacount += 3151 acmd->cmd_dmacookies[i++].dmac_size; 3152 3153 if (i == instance->max_num_sge || 3154 i == acmd->cmd_ncookies) 3155 break; 3156 3157 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3158 &acmd->cmd_dmacookies[i]); 3159 } 3160 3161 acmd->cmd_cookie = i; 3162 acmd->cmd_cookiecnt = i; 3163 3164 acmd->cmd_flags |= CFLAG_DMAVALID; 3165 3166 if (bp->b_bcount >= acmd->cmd_dmacount) { 3167 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3168 } else { 3169 pkt->pkt_resid = 0; 3170 } 3171 3172 return (DDI_SUCCESS); 3173 case DDI_DMA_NORESOURCES: 3174 bioerror(bp, 0); 3175 break; 3176 case DDI_DMA_NOMAPPING: 3177 bioerror(bp, EFAULT); 3178 break; 3179 case DDI_DMA_TOOBIG: 3180 bioerror(bp, EINVAL); 3181 break; 3182 case DDI_DMA_INUSE: 3183 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:" 3184 " DDI_DMA_INUSE impossible")); 3185 break; 3186 default: 3187 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3188 "impossible result (0x%x)", i)); 3189 break; 3190 } 3191 3192 no_dma_cookies: 3193 ddi_dma_free_handle(&acmd->cmd_dmahandle); 3194 acmd->cmd_dmahandle = NULL; 3195 acmd->cmd_flags &= ~CFLAG_DMAVALID; 3196 return (DDI_FAILURE); 3197 } 3198 3199 /* 3200 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *) 3201 * 3202 * move dma resources to next dma window 3203 * 3204 */ 3205 static int 3206 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt, 3207 struct buf *bp) 3208 { 3209 int i = 0; 3210 3211 struct scsa_cmd *acmd = PKT2CMD(pkt); 3212 3213 /* 3214 * If there are no more cookies remaining in this window, 3215 * must move to the next window first. 3216 */ 3217 if (acmd->cmd_cookie == acmd->cmd_ncookies) { 3218 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) { 3219 return (DDI_SUCCESS); 3220 } 3221 3222 /* at last window, cannot move */ 3223 if (++acmd->cmd_curwin >= acmd->cmd_nwin) { 3224 return (DDI_FAILURE); 3225 } 3226 3227 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3228 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3229 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3230 DDI_FAILURE) { 3231 return (DDI_FAILURE); 3232 } 3233 3234 acmd->cmd_cookie = 0; 3235 } else { 3236 /* still more cookies in this window - get the next one */ 3237 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3238 &acmd->cmd_dmacookies[0]); 3239 } 3240 3241 /* get remaining cookies in this window, up to our maximum */ 3242 for (;;) { 3243 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size; 3244 acmd->cmd_cookie++; 3245 3246 if (i == instance->max_num_sge || 3247 acmd->cmd_cookie == acmd->cmd_ncookies) { 3248 break; 3249 } 3250 3251 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3252 &acmd->cmd_dmacookies[i]); 3253 } 3254 3255 acmd->cmd_cookiecnt = i; 3256 3257 if (bp->b_bcount >= acmd->cmd_dmacount) { 3258 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3259 } else { 3260 pkt->pkt_resid = 0; 3261 } 3262 3263 return (DDI_SUCCESS); 3264 } 3265 3266 /* 3267 * build_cmd 3268 */ 3269 static struct mrsas_cmd * 3270 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap, 3271 struct scsi_pkt *pkt, uchar_t *cmd_done) 3272 { 3273 uint16_t flags = 0; 3274 uint32_t i; 3275 uint32_t context; 3276 uint32_t sge_bytes; 3277 ddi_acc_handle_t acc_handle; 3278 struct mrsas_cmd *cmd; 3279 struct mrsas_sge64 *mfi_sgl; 3280 struct scsa_cmd *acmd = PKT2CMD(pkt); 3281 struct mrsas_pthru_frame *pthru; 3282 struct mrsas_io_frame *ldio; 3283 3284 /* find out if this is logical or physical drive command. */ 3285 acmd->islogical = MRDRV_IS_LOGICAL(ap); 3286 acmd->device_id = MAP_DEVICE_ID(instance, ap); 3287 *cmd_done = 0; 3288 3289 /* get the command packet */ 3290 if (!(cmd = get_mfi_pkt(instance))) { 3291 return (NULL); 3292 } 3293 3294 acc_handle = cmd->frame_dma_obj.acc_handle; 3295 3296 /* Clear the frame buffer and assign back the context id */ 3297 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 3298 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index); 3299 3300 cmd->pkt = pkt; 3301 cmd->cmd = acmd; 3302 3303 /* lets get the command directions */ 3304 if (acmd->cmd_flags & CFLAG_DMASEND) { 3305 flags = MFI_FRAME_DIR_WRITE; 3306 3307 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3308 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3309 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3310 DDI_DMA_SYNC_FORDEV); 3311 } 3312 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) { 3313 flags = MFI_FRAME_DIR_READ; 3314 3315 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3316 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3317 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3318 DDI_DMA_SYNC_FORCPU); 3319 } 3320 } else { 3321 flags = MFI_FRAME_DIR_NONE; 3322 } 3323 3324 flags |= MFI_FRAME_SGL64; 3325 3326 switch (pkt->pkt_cdbp[0]) { 3327 3328 /* 3329 * case SCMD_SYNCHRONIZE_CACHE: 3330 * flush_cache(instance); 3331 * return_mfi_pkt(instance, cmd); 3332 * *cmd_done = 1; 3333 * 3334 * return (NULL); 3335 */ 3336 3337 case SCMD_READ: 3338 case SCMD_WRITE: 3339 case SCMD_READ_G1: 3340 case SCMD_WRITE_G1: 3341 if (acmd->islogical) { 3342 ldio = (struct mrsas_io_frame *)cmd->frame; 3343 3344 /* 3345 * preare the Logical IO frame: 3346 * 2nd bit is zero for all read cmds 3347 */ 3348 ddi_put8(acc_handle, &ldio->cmd, 3349 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE 3350 : MFI_CMD_OP_LD_READ); 3351 ddi_put8(acc_handle, &ldio->cmd_status, 0x0); 3352 ddi_put8(acc_handle, &ldio->scsi_status, 0x0); 3353 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id); 3354 ddi_put16(acc_handle, &ldio->timeout, 0); 3355 ddi_put8(acc_handle, &ldio->reserved_0, 0); 3356 ddi_put16(acc_handle, &ldio->pad_0, 0); 3357 ddi_put16(acc_handle, &ldio->flags, flags); 3358 3359 /* Initialize sense Information */ 3360 bzero(cmd->sense, SENSE_LENGTH); 3361 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH); 3362 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0); 3363 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo, 3364 cmd->sense_phys_addr); 3365 ddi_put32(acc_handle, &ldio->start_lba_hi, 0); 3366 ddi_put8(acc_handle, &ldio->access_byte, 3367 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0); 3368 ddi_put8(acc_handle, &ldio->sge_count, 3369 acmd->cmd_cookiecnt); 3370 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl; 3371 3372 context = ddi_get32(acc_handle, &ldio->context); 3373 3374 if (acmd->cmd_cdblen == CDB_GROUP0) { 3375 ddi_put32(acc_handle, &ldio->lba_count, ( 3376 (uint16_t)(pkt->pkt_cdbp[4]))); 3377 3378 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3379 ((uint32_t)(pkt->pkt_cdbp[3])) | 3380 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) | 3381 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) 3382 << 16))); 3383 } else if (acmd->cmd_cdblen == CDB_GROUP1) { 3384 ddi_put32(acc_handle, &ldio->lba_count, ( 3385 ((uint16_t)(pkt->pkt_cdbp[8])) | 3386 ((uint16_t)(pkt->pkt_cdbp[7]) << 8))); 3387 3388 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3389 ((uint32_t)(pkt->pkt_cdbp[5])) | 3390 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3391 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3392 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 3393 } else if (acmd->cmd_cdblen == CDB_GROUP2) { 3394 ddi_put32(acc_handle, &ldio->lba_count, ( 3395 ((uint16_t)(pkt->pkt_cdbp[9])) | 3396 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) | 3397 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) | 3398 ((uint16_t)(pkt->pkt_cdbp[6]) << 24))); 3399 3400 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3401 ((uint32_t)(pkt->pkt_cdbp[5])) | 3402 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3403 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3404 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 3405 } else if (acmd->cmd_cdblen == CDB_GROUP3) { 3406 ddi_put32(acc_handle, &ldio->lba_count, ( 3407 ((uint16_t)(pkt->pkt_cdbp[13])) | 3408 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) | 3409 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) | 3410 ((uint16_t)(pkt->pkt_cdbp[10]) << 24))); 3411 3412 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3413 ((uint32_t)(pkt->pkt_cdbp[9])) | 3414 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | 3415 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | 3416 ((uint32_t)(pkt->pkt_cdbp[6]) << 24))); 3417 3418 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 3419 ((uint32_t)(pkt->pkt_cdbp[5])) | 3420 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3421 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3422 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 3423 } 3424 3425 break; 3426 } 3427 /* fall through For all non-rd/wr cmds */ 3428 default: 3429 3430 switch (pkt->pkt_cdbp[0]) { 3431 case SCMD_MODE_SENSE: 3432 case SCMD_MODE_SENSE_G1: { 3433 union scsi_cdb *cdbp; 3434 uint16_t page_code; 3435 3436 cdbp = (void *)pkt->pkt_cdbp; 3437 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0]; 3438 switch (page_code) { 3439 case 0x3: 3440 case 0x4: 3441 (void) mrsas_mode_sense_build(pkt); 3442 return_mfi_pkt(instance, cmd); 3443 *cmd_done = 1; 3444 return (NULL); 3445 } 3446 break; 3447 } 3448 default: 3449 break; 3450 } 3451 3452 pthru = (struct mrsas_pthru_frame *)cmd->frame; 3453 3454 /* prepare the DCDB frame */ 3455 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ? 3456 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI); 3457 ddi_put8(acc_handle, &pthru->cmd_status, 0x0); 3458 ddi_put8(acc_handle, &pthru->scsi_status, 0x0); 3459 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id); 3460 ddi_put8(acc_handle, &pthru->lun, 0); 3461 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen); 3462 ddi_put16(acc_handle, &pthru->timeout, 0); 3463 ddi_put16(acc_handle, &pthru->flags, flags); 3464 ddi_put32(acc_handle, &pthru->data_xfer_len, 3465 acmd->cmd_dmacount); 3466 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt); 3467 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl; 3468 3469 bzero(cmd->sense, SENSE_LENGTH); 3470 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH); 3471 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); 3472 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 3473 cmd->sense_phys_addr); 3474 3475 context = ddi_get32(acc_handle, &pthru->context); 3476 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp, 3477 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR); 3478 3479 break; 3480 } 3481 #ifdef lint 3482 context = context; 3483 #endif 3484 /* prepare the scatter-gather list for the firmware */ 3485 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) { 3486 ddi_put64(acc_handle, &mfi_sgl->phys_addr, 3487 acmd->cmd_dmacookies[i].dmac_laddress); 3488 ddi_put32(acc_handle, &mfi_sgl->length, 3489 acmd->cmd_dmacookies[i].dmac_size); 3490 } 3491 3492 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt; 3493 3494 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) + 3495 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1; 3496 3497 if (cmd->frame_count >= 8) { 3498 cmd->frame_count = 8; 3499 } 3500 3501 return (cmd); 3502 } 3503 3504 /* 3505 * issue_mfi_pthru 3506 */ 3507 static int 3508 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 3509 struct mrsas_cmd *cmd, int mode) 3510 { 3511 void *ubuf; 3512 uint32_t kphys_addr = 0; 3513 uint32_t xferlen = 0; 3514 uint_t model; 3515 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 3516 dma_obj_t pthru_dma_obj; 3517 struct mrsas_pthru_frame *kpthru; 3518 struct mrsas_pthru_frame *pthru; 3519 int i; 3520 pthru = &cmd->frame->pthru; 3521 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0]; 3522 3523 model = ddi_model_convert_from(mode & FMODELS); 3524 if (model == DDI_MODEL_ILP32) { 3525 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3526 3527 xferlen = kpthru->sgl.sge32[0].length; 3528 3529 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3530 } else { 3531 #ifdef _ILP32 3532 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3533 xferlen = kpthru->sgl.sge32[0].length; 3534 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3535 #else 3536 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64")); 3537 xferlen = kpthru->sgl.sge64[0].length; 3538 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr; 3539 #endif 3540 } 3541 3542 if (xferlen) { 3543 /* means IOCTL requires DMA */ 3544 /* allocate the data transfer buffer */ 3545 pthru_dma_obj.size = xferlen; 3546 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr; 3547 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3548 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3549 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1; 3550 pthru_dma_obj.dma_attr.dma_attr_align = 1; 3551 3552 /* allocate kernel buffer for DMA */ 3553 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj, 3554 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3555 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3556 "could not allocate data transfer buffer.")); 3557 return (DDI_FAILURE); 3558 } 3559 (void) memset(pthru_dma_obj.buffer, 0, xferlen); 3560 3561 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3562 if (kpthru->flags & MFI_FRAME_DIR_WRITE) { 3563 for (i = 0; i < xferlen; i++) { 3564 if (ddi_copyin((uint8_t *)ubuf+i, 3565 (uint8_t *)pthru_dma_obj.buffer+i, 3566 1, mode)) { 3567 con_log(CL_ANN, (CE_WARN, 3568 "issue_mfi_pthru : " 3569 "copy from user space failed")); 3570 return (DDI_FAILURE); 3571 } 3572 } 3573 } 3574 3575 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address; 3576 } 3577 3578 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd); 3579 ddi_put8(acc_handle, &pthru->sense_len, 0); 3580 ddi_put8(acc_handle, &pthru->cmd_status, 0); 3581 ddi_put8(acc_handle, &pthru->scsi_status, 0); 3582 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id); 3583 ddi_put8(acc_handle, &pthru->lun, kpthru->lun); 3584 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len); 3585 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count); 3586 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout); 3587 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len); 3588 3589 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); 3590 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */ 3591 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); 3592 3593 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb, 3594 pthru->cdb_len, DDI_DEV_AUTOINCR); 3595 3596 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64); 3597 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen); 3598 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr); 3599 3600 cmd->sync_cmd = MRSAS_TRUE; 3601 cmd->frame_count = 1; 3602 3603 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3604 con_log(CL_ANN, (CE_WARN, 3605 "issue_mfi_pthru: fw_ioctl failed")); 3606 } else { 3607 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) { 3608 for (i = 0; i < xferlen; i++) { 3609 if (ddi_copyout( 3610 (uint8_t *)pthru_dma_obj.buffer+i, 3611 (uint8_t *)ubuf+i, 1, mode)) { 3612 con_log(CL_ANN, (CE_WARN, 3613 "issue_mfi_pthru : " 3614 "copy to user space failed")); 3615 return (DDI_FAILURE); 3616 } 3617 } 3618 } 3619 } 3620 3621 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status); 3622 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status); 3623 3624 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, " 3625 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status)); 3626 3627 if (xferlen) { 3628 /* free kernel buffer */ 3629 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS) 3630 return (DDI_FAILURE); 3631 } 3632 3633 return (DDI_SUCCESS); 3634 } 3635 3636 /* 3637 * issue_mfi_dcmd 3638 */ 3639 static int 3640 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 3641 struct mrsas_cmd *cmd, int mode) 3642 { 3643 void *ubuf; 3644 uint32_t kphys_addr = 0; 3645 uint32_t xferlen = 0; 3646 uint32_t model; 3647 dma_obj_t dcmd_dma_obj; 3648 struct mrsas_dcmd_frame *kdcmd; 3649 struct mrsas_dcmd_frame *dcmd; 3650 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 3651 int i; 3652 dcmd = &cmd->frame->dcmd; 3653 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; 3654 3655 model = ddi_model_convert_from(mode & FMODELS); 3656 if (model == DDI_MODEL_ILP32) { 3657 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3658 3659 xferlen = kdcmd->sgl.sge32[0].length; 3660 3661 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3662 } else { 3663 #ifdef _ILP32 3664 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3665 xferlen = kdcmd->sgl.sge32[0].length; 3666 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3667 #else 3668 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64")); 3669 xferlen = kdcmd->sgl.sge64[0].length; 3670 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 3671 #endif 3672 } 3673 if (xferlen) { 3674 /* means IOCTL requires DMA */ 3675 /* allocate the data transfer buffer */ 3676 dcmd_dma_obj.size = xferlen; 3677 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; 3678 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3679 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3680 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 3681 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 3682 3683 /* allocate kernel buffer for DMA */ 3684 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, 3685 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3686 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 3687 "could not allocate data transfer buffer.")); 3688 return (DDI_FAILURE); 3689 } 3690 (void) memset(dcmd_dma_obj.buffer, 0, xferlen); 3691 3692 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3693 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) { 3694 for (i = 0; i < xferlen; i++) { 3695 if (ddi_copyin((uint8_t *)ubuf + i, 3696 (uint8_t *)dcmd_dma_obj.buffer + i, 3697 1, mode)) { 3698 con_log(CL_ANN, (CE_WARN, 3699 "issue_mfi_dcmd : " 3700 "copy from user space failed")); 3701 return (DDI_FAILURE); 3702 } 3703 } 3704 } 3705 3706 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 3707 } 3708 3709 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd); 3710 ddi_put8(acc_handle, &dcmd->cmd_status, 0); 3711 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count); 3712 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout); 3713 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len); 3714 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode); 3715 3716 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b, 3717 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR); 3718 3719 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64); 3720 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen); 3721 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr); 3722 3723 cmd->sync_cmd = MRSAS_TRUE; 3724 cmd->frame_count = 1; 3725 3726 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3727 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed")); 3728 } else { 3729 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) { 3730 for (i = 0; i < xferlen; i++) { 3731 if (ddi_copyout( 3732 (uint8_t *)dcmd_dma_obj.buffer + i, 3733 (uint8_t *)ubuf + i, 3734 1, mode)) { 3735 con_log(CL_ANN, (CE_WARN, 3736 "issue_mfi_dcmd : " 3737 "copy to user space failed")); 3738 return (DDI_FAILURE); 3739 } 3740 } 3741 } 3742 } 3743 3744 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status); 3745 3746 if (xferlen) { 3747 /* free kernel buffer */ 3748 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 3749 return (DDI_FAILURE); 3750 } 3751 3752 return (DDI_SUCCESS); 3753 } 3754 3755 /* 3756 * issue_mfi_smp 3757 */ 3758 static int 3759 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 3760 struct mrsas_cmd *cmd, int mode) 3761 { 3762 void *request_ubuf; 3763 void *response_ubuf; 3764 uint32_t request_xferlen = 0; 3765 uint32_t response_xferlen = 0; 3766 uint_t model; 3767 dma_obj_t request_dma_obj; 3768 dma_obj_t response_dma_obj; 3769 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 3770 struct mrsas_smp_frame *ksmp; 3771 struct mrsas_smp_frame *smp; 3772 struct mrsas_sge32 *sge32; 3773 #ifndef _ILP32 3774 struct mrsas_sge64 *sge64; 3775 #endif 3776 int i; 3777 uint64_t tmp_sas_addr; 3778 3779 smp = &cmd->frame->smp; 3780 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0]; 3781 3782 model = ddi_model_convert_from(mode & FMODELS); 3783 if (model == DDI_MODEL_ILP32) { 3784 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 3785 3786 sge32 = &ksmp->sgl[0].sge32[0]; 3787 response_xferlen = sge32[0].length; 3788 request_xferlen = sge32[1].length; 3789 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 3790 "response_xferlen = %x, request_xferlen = %x", 3791 response_xferlen, request_xferlen)); 3792 3793 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 3794 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 3795 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3796 "response_ubuf = %p, request_ubuf = %p", 3797 response_ubuf, request_ubuf)); 3798 } else { 3799 #ifdef _ILP32 3800 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 3801 3802 sge32 = &ksmp->sgl[0].sge32[0]; 3803 response_xferlen = sge32[0].length; 3804 request_xferlen = sge32[1].length; 3805 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 3806 "response_xferlen = %x, request_xferlen = %x", 3807 response_xferlen, request_xferlen)); 3808 3809 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 3810 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 3811 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3812 "response_ubuf = %p, request_ubuf = %p", 3813 response_ubuf, request_ubuf)); 3814 #else 3815 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64")); 3816 3817 sge64 = &ksmp->sgl[0].sge64[0]; 3818 response_xferlen = sge64[0].length; 3819 request_xferlen = sge64[1].length; 3820 3821 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr; 3822 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr; 3823 #endif 3824 } 3825 if (request_xferlen) { 3826 /* means IOCTL requires DMA */ 3827 /* allocate the data transfer buffer */ 3828 request_dma_obj.size = request_xferlen; 3829 request_dma_obj.dma_attr = mrsas_generic_dma_attr; 3830 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3831 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3832 request_dma_obj.dma_attr.dma_attr_sgllen = 1; 3833 request_dma_obj.dma_attr.dma_attr_align = 1; 3834 3835 /* allocate kernel buffer for DMA */ 3836 if (mrsas_alloc_dma_obj(instance, &request_dma_obj, 3837 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3838 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3839 "could not allocate data transfer buffer.")); 3840 return (DDI_FAILURE); 3841 } 3842 (void) memset(request_dma_obj.buffer, 0, request_xferlen); 3843 3844 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3845 for (i = 0; i < request_xferlen; i++) { 3846 if (ddi_copyin((uint8_t *)request_ubuf + i, 3847 (uint8_t *)request_dma_obj.buffer + i, 3848 1, mode)) { 3849 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3850 "copy from user space failed")); 3851 return (DDI_FAILURE); 3852 } 3853 } 3854 } 3855 3856 if (response_xferlen) { 3857 /* means IOCTL requires DMA */ 3858 /* allocate the data transfer buffer */ 3859 response_dma_obj.size = response_xferlen; 3860 response_dma_obj.dma_attr = mrsas_generic_dma_attr; 3861 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3862 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3863 response_dma_obj.dma_attr.dma_attr_sgllen = 1; 3864 response_dma_obj.dma_attr.dma_attr_align = 1; 3865 3866 /* allocate kernel buffer for DMA */ 3867 if (mrsas_alloc_dma_obj(instance, &response_dma_obj, 3868 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3869 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3870 "could not allocate data transfer buffer.")); 3871 return (DDI_FAILURE); 3872 } 3873 (void) memset(response_dma_obj.buffer, 0, response_xferlen); 3874 3875 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3876 for (i = 0; i < response_xferlen; i++) { 3877 if (ddi_copyin((uint8_t *)response_ubuf + i, 3878 (uint8_t *)response_dma_obj.buffer + i, 3879 1, mode)) { 3880 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3881 "copy from user space failed")); 3882 return (DDI_FAILURE); 3883 } 3884 } 3885 } 3886 3887 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd); 3888 ddi_put8(acc_handle, &smp->cmd_status, 0); 3889 ddi_put8(acc_handle, &smp->connection_status, 0); 3890 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count); 3891 /* smp->context = ksmp->context; */ 3892 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout); 3893 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len); 3894 3895 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr, 3896 sizeof (uint64_t)); 3897 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr); 3898 3899 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64); 3900 3901 model = ddi_model_convert_from(mode & FMODELS); 3902 if (model == DDI_MODEL_ILP32) { 3903 con_log(CL_ANN1, (CE_NOTE, 3904 "issue_mfi_smp: DDI_MODEL_ILP32")); 3905 3906 sge32 = &smp->sgl[0].sge32[0]; 3907 ddi_put32(acc_handle, &sge32[0].length, response_xferlen); 3908 ddi_put32(acc_handle, &sge32[0].phys_addr, 3909 response_dma_obj.dma_cookie[0].dmac_address); 3910 ddi_put32(acc_handle, &sge32[1].length, request_xferlen); 3911 ddi_put32(acc_handle, &sge32[1].phys_addr, 3912 request_dma_obj.dma_cookie[0].dmac_address); 3913 } else { 3914 #ifdef _ILP32 3915 con_log(CL_ANN1, (CE_NOTE, 3916 "issue_mfi_smp: DDI_MODEL_ILP32")); 3917 sge32 = &smp->sgl[0].sge32[0]; 3918 ddi_put32(acc_handle, &sge32[0].length, response_xferlen); 3919 ddi_put32(acc_handle, &sge32[0].phys_addr, 3920 response_dma_obj.dma_cookie[0].dmac_address); 3921 ddi_put32(acc_handle, &sge32[1].length, request_xferlen); 3922 ddi_put32(acc_handle, &sge32[1].phys_addr, 3923 request_dma_obj.dma_cookie[0].dmac_address); 3924 #else 3925 con_log(CL_ANN1, (CE_NOTE, 3926 "issue_mfi_smp: DDI_MODEL_LP64")); 3927 sge64 = &smp->sgl[0].sge64[0]; 3928 ddi_put32(acc_handle, &sge64[0].length, response_xferlen); 3929 ddi_put64(acc_handle, &sge64[0].phys_addr, 3930 response_dma_obj.dma_cookie[0].dmac_address); 3931 ddi_put32(acc_handle, &sge64[1].length, request_xferlen); 3932 ddi_put64(acc_handle, &sge64[1].phys_addr, 3933 request_dma_obj.dma_cookie[0].dmac_address); 3934 #endif 3935 } 3936 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp : " 3937 "smp->response_xferlen = %d, smp->request_xferlen = %d " 3938 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length), 3939 ddi_get32(acc_handle, &sge32[1].length), 3940 ddi_get32(acc_handle, &smp->data_xfer_len))); 3941 3942 cmd->sync_cmd = MRSAS_TRUE; 3943 cmd->frame_count = 1; 3944 3945 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3946 con_log(CL_ANN, (CE_WARN, 3947 "issue_mfi_smp: fw_ioctl failed")); 3948 } else { 3949 con_log(CL_ANN1, (CE_NOTE, 3950 "issue_mfi_smp: copy to user space")); 3951 3952 if (request_xferlen) { 3953 for (i = 0; i < request_xferlen; i++) { 3954 if (ddi_copyout( 3955 (uint8_t *)request_dma_obj.buffer + 3956 i, (uint8_t *)request_ubuf + i, 3957 1, mode)) { 3958 con_log(CL_ANN, (CE_WARN, 3959 "issue_mfi_smp : copy to user space" 3960 " failed")); 3961 return (DDI_FAILURE); 3962 } 3963 } 3964 } 3965 3966 if (response_xferlen) { 3967 for (i = 0; i < response_xferlen; i++) { 3968 if (ddi_copyout( 3969 (uint8_t *)response_dma_obj.buffer 3970 + i, (uint8_t *)response_ubuf 3971 + i, 1, mode)) { 3972 con_log(CL_ANN, (CE_WARN, 3973 "issue_mfi_smp : copy to " 3974 "user space failed")); 3975 return (DDI_FAILURE); 3976 } 3977 } 3978 } 3979 } 3980 3981 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status); 3982 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d", 3983 ddi_get8(acc_handle, &smp->cmd_status))); 3984 3985 3986 if (request_xferlen) { 3987 /* free kernel buffer */ 3988 if (mrsas_free_dma_obj(instance, request_dma_obj) != 3989 DDI_SUCCESS) 3990 return (DDI_FAILURE); 3991 } 3992 3993 if (response_xferlen) { 3994 /* free kernel buffer */ 3995 if (mrsas_free_dma_obj(instance, response_dma_obj) != 3996 DDI_SUCCESS) 3997 return (DDI_FAILURE); 3998 } 3999 4000 return (DDI_SUCCESS); 4001 } 4002 4003 /* 4004 * issue_mfi_stp 4005 */ 4006 static int 4007 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4008 struct mrsas_cmd *cmd, int mode) 4009 { 4010 void *fis_ubuf; 4011 void *data_ubuf; 4012 uint32_t fis_xferlen = 0; 4013 uint32_t data_xferlen = 0; 4014 uint_t model; 4015 dma_obj_t fis_dma_obj; 4016 dma_obj_t data_dma_obj; 4017 struct mrsas_stp_frame *kstp; 4018 struct mrsas_stp_frame *stp; 4019 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 4020 int i; 4021 4022 stp = &cmd->frame->stp; 4023 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0]; 4024 4025 model = ddi_model_convert_from(mode & FMODELS); 4026 if (model == DDI_MODEL_ILP32) { 4027 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4028 4029 fis_xferlen = kstp->sgl.sge32[0].length; 4030 data_xferlen = kstp->sgl.sge32[1].length; 4031 4032 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4033 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4034 } 4035 else 4036 { 4037 #ifdef _ILP32 4038 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4039 4040 fis_xferlen = kstp->sgl.sge32[0].length; 4041 data_xferlen = kstp->sgl.sge32[1].length; 4042 4043 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4044 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4045 #else 4046 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64")); 4047 4048 fis_xferlen = kstp->sgl.sge64[0].length; 4049 data_xferlen = kstp->sgl.sge64[1].length; 4050 4051 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr; 4052 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr; 4053 #endif 4054 } 4055 4056 4057 if (fis_xferlen) { 4058 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: " 4059 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen)); 4060 4061 /* means IOCTL requires DMA */ 4062 /* allocate the data transfer buffer */ 4063 fis_dma_obj.size = fis_xferlen; 4064 fis_dma_obj.dma_attr = mrsas_generic_dma_attr; 4065 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4066 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4067 fis_dma_obj.dma_attr.dma_attr_sgllen = 1; 4068 fis_dma_obj.dma_attr.dma_attr_align = 1; 4069 4070 /* allocate kernel buffer for DMA */ 4071 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj, 4072 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4073 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : " 4074 "could not allocate data transfer buffer.")); 4075 return (DDI_FAILURE); 4076 } 4077 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen); 4078 4079 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4080 for (i = 0; i < fis_xferlen; i++) { 4081 if (ddi_copyin((uint8_t *)fis_ubuf + i, 4082 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) { 4083 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4084 "copy from user space failed")); 4085 return (DDI_FAILURE); 4086 } 4087 } 4088 } 4089 4090 if (data_xferlen) { 4091 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p " 4092 "data_xferlen = %x", data_ubuf, data_xferlen)); 4093 4094 /* means IOCTL requires DMA */ 4095 /* allocate the data transfer buffer */ 4096 data_dma_obj.size = data_xferlen; 4097 data_dma_obj.dma_attr = mrsas_generic_dma_attr; 4098 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4099 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4100 data_dma_obj.dma_attr.dma_attr_sgllen = 1; 4101 data_dma_obj.dma_attr.dma_attr_align = 1; 4102 4103 /* allocate kernel buffer for DMA */ 4104 if (mrsas_alloc_dma_obj(instance, &data_dma_obj, 4105 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4106 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4107 "could not allocate data transfer buffer.")); 4108 return (DDI_FAILURE); 4109 } 4110 (void) memset(data_dma_obj.buffer, 0, data_xferlen); 4111 4112 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4113 for (i = 0; i < data_xferlen; i++) { 4114 if (ddi_copyin((uint8_t *)data_ubuf + i, 4115 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) { 4116 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4117 "copy from user space failed")); 4118 return (DDI_FAILURE); 4119 } 4120 } 4121 } 4122 4123 ddi_put8(acc_handle, &stp->cmd, kstp->cmd); 4124 ddi_put8(acc_handle, &stp->cmd_status, 0); 4125 ddi_put8(acc_handle, &stp->connection_status, 0); 4126 ddi_put8(acc_handle, &stp->target_id, kstp->target_id); 4127 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count); 4128 4129 ddi_put16(acc_handle, &stp->timeout, kstp->timeout); 4130 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len); 4131 4132 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10, 4133 DDI_DEV_AUTOINCR); 4134 4135 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64); 4136 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags); 4137 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen); 4138 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr, 4139 fis_dma_obj.dma_cookie[0].dmac_address); 4140 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen); 4141 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr, 4142 data_dma_obj.dma_cookie[0].dmac_address); 4143 4144 cmd->sync_cmd = MRSAS_TRUE; 4145 cmd->frame_count = 1; 4146 4147 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4148 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed")); 4149 } else { 4150 4151 if (fis_xferlen) { 4152 for (i = 0; i < fis_xferlen; i++) { 4153 if (ddi_copyout( 4154 (uint8_t *)fis_dma_obj.buffer + i, 4155 (uint8_t *)fis_ubuf + i, 1, mode)) { 4156 con_log(CL_ANN, (CE_WARN, 4157 "issue_mfi_stp : copy to " 4158 "user space failed")); 4159 return (DDI_FAILURE); 4160 } 4161 } 4162 } 4163 } 4164 if (data_xferlen) { 4165 for (i = 0; i < data_xferlen; i++) { 4166 if (ddi_copyout( 4167 (uint8_t *)data_dma_obj.buffer + i, 4168 (uint8_t *)data_ubuf + i, 1, mode)) { 4169 con_log(CL_ANN, (CE_WARN, 4170 "issue_mfi_stp : copy to" 4171 " user space failed")); 4172 return (DDI_FAILURE); 4173 } 4174 } 4175 } 4176 4177 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status); 4178 4179 if (fis_xferlen) { 4180 /* free kernel buffer */ 4181 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS) 4182 return (DDI_FAILURE); 4183 } 4184 4185 if (data_xferlen) { 4186 /* free kernel buffer */ 4187 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS) 4188 return (DDI_FAILURE); 4189 } 4190 4191 return (DDI_SUCCESS); 4192 } 4193 4194 /* 4195 * fill_up_drv_ver 4196 */ 4197 static void 4198 fill_up_drv_ver(struct mrsas_drv_ver *dv) 4199 { 4200 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver)); 4201 4202 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$")); 4203 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris")); 4204 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas")); 4205 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION)); 4206 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE, 4207 strlen(MRSAS_RELDATE)); 4208 } 4209 4210 /* 4211 * handle_drv_ioctl 4212 */ 4213 static int 4214 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4215 int mode) 4216 { 4217 int i; 4218 int rval = DDI_SUCCESS; 4219 int *props = NULL; 4220 void *ubuf; 4221 4222 uint8_t *pci_conf_buf; 4223 uint32_t xferlen; 4224 uint32_t num_props; 4225 uint_t model; 4226 struct mrsas_dcmd_frame *kdcmd; 4227 struct mrsas_drv_ver dv; 4228 struct mrsas_pci_information pi; 4229 4230 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; 4231 4232 model = ddi_model_convert_from(mode & FMODELS); 4233 if (model == DDI_MODEL_ILP32) { 4234 con_log(CL_ANN1, (CE_NOTE, 4235 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4236 4237 xferlen = kdcmd->sgl.sge32[0].length; 4238 4239 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4240 } else { 4241 #ifdef _ILP32 4242 con_log(CL_ANN1, (CE_NOTE, 4243 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4244 xferlen = kdcmd->sgl.sge32[0].length; 4245 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4246 #else 4247 con_log(CL_ANN1, (CE_NOTE, 4248 "handle_drv_ioctl: DDI_MODEL_LP64")); 4249 xferlen = kdcmd->sgl.sge64[0].length; 4250 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 4251 #endif 4252 } 4253 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4254 "dataBuf=%p size=%d bytes", ubuf, xferlen)); 4255 4256 switch (kdcmd->opcode) { 4257 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION: 4258 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4259 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION")); 4260 4261 fill_up_drv_ver(&dv); 4262 4263 if (ddi_copyout(&dv, ubuf, xferlen, mode)) { 4264 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4265 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : " 4266 "copy to user space failed")); 4267 kdcmd->cmd_status = 1; 4268 rval = 1; 4269 } else { 4270 kdcmd->cmd_status = 0; 4271 } 4272 break; 4273 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION: 4274 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4275 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON")); 4276 4277 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip, 4278 0, "reg", &props, &num_props)) { 4279 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4280 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : " 4281 "ddi_prop_look_int_array failed")); 4282 rval = DDI_FAILURE; 4283 } else { 4284 4285 pi.busNumber = (props[0] >> 16) & 0xFF; 4286 pi.deviceNumber = (props[0] >> 11) & 0x1f; 4287 pi.functionNumber = (props[0] >> 8) & 0x7; 4288 ddi_prop_free((void *)props); 4289 } 4290 4291 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo; 4292 4293 for (i = 0; i < (sizeof (struct mrsas_pci_information) - 4294 offsetof(struct mrsas_pci_information, pciHeaderInfo)); 4295 i++) { 4296 pci_conf_buf[i] = 4297 pci_config_get8(instance->pci_handle, i); 4298 } 4299 4300 if (ddi_copyout(&pi, ubuf, xferlen, mode)) { 4301 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4302 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : " 4303 "copy to user space failed")); 4304 kdcmd->cmd_status = 1; 4305 rval = 1; 4306 } else { 4307 kdcmd->cmd_status = 0; 4308 } 4309 break; 4310 default: 4311 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4312 "invalid driver specific IOCTL opcode = 0x%x", 4313 kdcmd->opcode)); 4314 kdcmd->cmd_status = 1; 4315 rval = DDI_FAILURE; 4316 break; 4317 } 4318 4319 return (rval); 4320 } 4321 4322 /* 4323 * handle_mfi_ioctl 4324 */ 4325 static int 4326 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 4327 int mode) 4328 { 4329 int rval = DDI_SUCCESS; 4330 4331 struct mrsas_header *hdr; 4332 struct mrsas_cmd *cmd; 4333 4334 cmd = get_mfi_pkt(instance); 4335 4336 if (!cmd) { 4337 con_log(CL_ANN, (CE_WARN, "mr_sas: " 4338 "failed to get a cmd packet")); 4339 return (DDI_FAILURE); 4340 } 4341 4342 /* Clear the frame buffer and assign back the context id */ 4343 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 4344 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 4345 cmd->index); 4346 4347 hdr = (struct mrsas_header *)&ioctl->frame[0]; 4348 4349 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) { 4350 case MFI_CMD_OP_DCMD: 4351 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode); 4352 break; 4353 case MFI_CMD_OP_SMP: 4354 rval = issue_mfi_smp(instance, ioctl, cmd, mode); 4355 break; 4356 case MFI_CMD_OP_STP: 4357 rval = issue_mfi_stp(instance, ioctl, cmd, mode); 4358 break; 4359 case MFI_CMD_OP_LD_SCSI: 4360 case MFI_CMD_OP_PD_SCSI: 4361 rval = issue_mfi_pthru(instance, ioctl, cmd, mode); 4362 break; 4363 default: 4364 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: " 4365 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd)); 4366 rval = DDI_FAILURE; 4367 break; 4368 } 4369 4370 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) 4371 rval = DDI_FAILURE; 4372 4373 return_mfi_pkt(instance, cmd); 4374 4375 return (rval); 4376 } 4377 4378 /* 4379 * AEN 4380 */ 4381 static int 4382 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen) 4383 { 4384 int rval = 0; 4385 4386 rval = register_mfi_aen(instance, instance->aen_seq_num, 4387 aen->class_locale_word); 4388 4389 aen->cmd_status = (uint8_t)rval; 4390 4391 return (rval); 4392 } 4393 4394 static int 4395 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num, 4396 uint32_t class_locale_word) 4397 { 4398 int ret_val; 4399 4400 struct mrsas_cmd *cmd, *aen_cmd; 4401 struct mrsas_dcmd_frame *dcmd; 4402 union mrsas_evt_class_locale curr_aen; 4403 union mrsas_evt_class_locale prev_aen; 4404 4405 /* 4406 * If there an AEN pending already (aen_cmd), check if the 4407 * class_locale of that pending AEN is inclusive of the new 4408 * AEN request we currently have. If it is, then we don't have 4409 * to do anything. In other words, whichever events the current 4410 * AEN request is subscribing to, have already been subscribed 4411 * to. 4412 * 4413 * If the old_cmd is _not_ inclusive, then we have to abort 4414 * that command, form a class_locale that is superset of both 4415 * old and current and re-issue to the FW 4416 */ 4417 4418 curr_aen.word = LE_32(class_locale_word); 4419 curr_aen.members.locale = LE_16(curr_aen.members.locale); 4420 aen_cmd = instance->aen_cmd; 4421 if (aen_cmd) { 4422 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle, 4423 &aen_cmd->frame->dcmd.mbox.w[1]); 4424 prev_aen.word = LE_32(prev_aen.word); 4425 prev_aen.members.locale = LE_16(prev_aen.members.locale); 4426 /* 4427 * A class whose enum value is smaller is inclusive of all 4428 * higher values. If a PROGRESS (= -1) was previously 4429 * registered, then a new registration requests for higher 4430 * classes need not be sent to FW. They are automatically 4431 * included. 4432 * 4433 * Locale numbers don't have such hierarchy. They are bitmap 4434 * values 4435 */ 4436 if ((prev_aen.members.class <= curr_aen.members.class) && 4437 !((prev_aen.members.locale & curr_aen.members.locale) ^ 4438 curr_aen.members.locale)) { 4439 /* 4440 * Previously issued event registration includes 4441 * current request. Nothing to do. 4442 */ 4443 4444 return (0); 4445 } else { 4446 curr_aen.members.locale |= prev_aen.members.locale; 4447 4448 if (prev_aen.members.class < curr_aen.members.class) 4449 curr_aen.members.class = prev_aen.members.class; 4450 4451 ret_val = abort_aen_cmd(instance, aen_cmd); 4452 4453 if (ret_val) { 4454 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: " 4455 "failed to abort prevous AEN command")); 4456 4457 return (ret_val); 4458 } 4459 } 4460 } else { 4461 curr_aen.word = LE_32(class_locale_word); 4462 curr_aen.members.locale = LE_16(curr_aen.members.locale); 4463 } 4464 4465 cmd = get_mfi_pkt(instance); 4466 4467 if (!cmd) 4468 return (ENOMEM); 4469 /* Clear the frame buffer and assign back the context id */ 4470 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 4471 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 4472 cmd->index); 4473 4474 dcmd = &cmd->frame->dcmd; 4475 4476 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */ 4477 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 4478 4479 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 4480 sizeof (struct mrsas_evt_detail)); 4481 4482 /* Prepare DCMD for aen registration */ 4483 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 4484 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0); 4485 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 4486 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 4487 MFI_FRAME_DIR_READ); 4488 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 4489 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 4490 sizeof (struct mrsas_evt_detail)); 4491 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 4492 MR_DCMD_CTRL_EVENT_WAIT); 4493 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num); 4494 curr_aen.members.locale = LE_16(curr_aen.members.locale); 4495 curr_aen.word = LE_32(curr_aen.word); 4496 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1], 4497 curr_aen.word); 4498 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 4499 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address); 4500 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 4501 sizeof (struct mrsas_evt_detail)); 4502 4503 instance->aen_seq_num = seq_num; 4504 4505 4506 /* 4507 * Store reference to the cmd used to register for AEN. When an 4508 * application wants us to register for AEN, we have to abort this 4509 * cmd and re-register with a new EVENT LOCALE supplied by that app 4510 */ 4511 instance->aen_cmd = cmd; 4512 4513 cmd->frame_count = 1; 4514 4515 /* Issue the aen registration frame */ 4516 /* atomic_add_16 (&instance->fw_outstanding, 1); */ 4517 instance->func_ptr->issue_cmd(cmd, instance); 4518 4519 return (0); 4520 } 4521 4522 static void 4523 display_scsi_inquiry(caddr_t scsi_inq) 4524 { 4525 #define MAX_SCSI_DEVICE_CODE 14 4526 int i; 4527 char inquiry_buf[256] = {0}; 4528 int len; 4529 const char *const scsi_device_types[] = { 4530 "Direct-Access ", 4531 "Sequential-Access", 4532 "Printer ", 4533 "Processor ", 4534 "WORM ", 4535 "CD-ROM ", 4536 "Scanner ", 4537 "Optical Device ", 4538 "Medium Changer ", 4539 "Communications ", 4540 "Unknown ", 4541 "Unknown ", 4542 "Unknown ", 4543 "Enclosure ", 4544 }; 4545 4546 len = 0; 4547 4548 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: "); 4549 for (i = 8; i < 16; i++) { 4550 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4551 scsi_inq[i]); 4552 } 4553 4554 len += snprintf(inquiry_buf + len, 265 - len, " Model: "); 4555 4556 for (i = 16; i < 32; i++) { 4557 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4558 scsi_inq[i]); 4559 } 4560 4561 len += snprintf(inquiry_buf + len, 265 - len, " Rev: "); 4562 4563 for (i = 32; i < 36; i++) { 4564 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4565 scsi_inq[i]); 4566 } 4567 4568 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4569 4570 4571 i = scsi_inq[0] & 0x1f; 4572 4573 4574 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ", 4575 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : 4576 "Unknown "); 4577 4578 4579 len += snprintf(inquiry_buf + len, 265 - len, 4580 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); 4581 4582 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) { 4583 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n"); 4584 } else { 4585 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4586 } 4587 4588 con_log(CL_ANN1, (CE_CONT, inquiry_buf)); 4589 } 4590 4591 static int 4592 read_fw_status_reg_ppc(struct mrsas_instance *instance) 4593 { 4594 return ((int)RD_OB_SCRATCH_PAD_0(instance)); 4595 } 4596 4597 static void 4598 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance) 4599 { 4600 atomic_add_16(&instance->fw_outstanding, 1); 4601 4602 /* Issue the command to the FW */ 4603 WR_IB_QPORT((cmd->frame_phys_addr) | 4604 (((cmd->frame_count - 1) << 1) | 1), instance); 4605 } 4606 4607 /* 4608 * issue_cmd_in_sync_mode 4609 */ 4610 static int 4611 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance, 4612 struct mrsas_cmd *cmd) 4613 { 4614 int i; 4615 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC); 4616 4617 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called")); 4618 4619 cmd->cmd_status = ENODATA; 4620 4621 WR_IB_QPORT((cmd->frame_phys_addr) | 4622 (((cmd->frame_count - 1) << 1) | 1), instance); 4623 4624 mutex_enter(&instance->int_cmd_mtx); 4625 4626 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 4627 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 4628 } 4629 4630 mutex_exit(&instance->int_cmd_mtx); 4631 4632 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done")); 4633 4634 if (i < (msecs -1)) { 4635 return (DDI_SUCCESS); 4636 } else { 4637 return (DDI_FAILURE); 4638 } 4639 } 4640 4641 /* 4642 * issue_cmd_in_poll_mode 4643 */ 4644 static int 4645 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance, 4646 struct mrsas_cmd *cmd) 4647 { 4648 int i; 4649 uint16_t flags; 4650 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC; 4651 struct mrsas_header *frame_hdr; 4652 4653 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called")); 4654 4655 frame_hdr = (struct mrsas_header *)cmd->frame; 4656 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status, 4657 MFI_CMD_STATUS_POLL_MODE); 4658 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags); 4659 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 4660 4661 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags); 4662 4663 /* issue the frame using inbound queue port */ 4664 WR_IB_QPORT((cmd->frame_phys_addr) | 4665 (((cmd->frame_count - 1) << 1) | 1), instance); 4666 4667 /* wait for cmd_status to change from 0xFF */ 4668 for (i = 0; i < msecs && ( 4669 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) 4670 == MFI_CMD_STATUS_POLL_MODE); i++) { 4671 drv_usecwait(MILLISEC); /* wait for 1000 usecs */ 4672 } 4673 4674 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) 4675 == MFI_CMD_STATUS_POLL_MODE) { 4676 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: " 4677 "cmd polling timed out")); 4678 return (DDI_FAILURE); 4679 } 4680 4681 return (DDI_SUCCESS); 4682 } 4683 4684 static void 4685 enable_intr_ppc(struct mrsas_instance *instance) 4686 { 4687 uint32_t mask; 4688 4689 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called")); 4690 4691 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */ 4692 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance); 4693 4694 /* WR_OB_INTR_MASK(~0x80000000, instance); */ 4695 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance); 4696 4697 /* dummy read to force PCI flush */ 4698 mask = RD_OB_INTR_MASK(instance); 4699 4700 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: " 4701 "outbound_intr_mask = 0x%x", mask)); 4702 } 4703 4704 static void 4705 disable_intr_ppc(struct mrsas_instance *instance) 4706 { 4707 uint32_t mask; 4708 4709 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called")); 4710 4711 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : " 4712 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance))); 4713 4714 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */ 4715 WR_OB_INTR_MASK(OB_INTR_MASK, instance); 4716 4717 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : " 4718 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance))); 4719 4720 /* dummy read to force PCI flush */ 4721 mask = RD_OB_INTR_MASK(instance); 4722 #ifdef lint 4723 mask = mask; 4724 #endif 4725 } 4726 4727 static int 4728 intr_ack_ppc(struct mrsas_instance *instance) 4729 { 4730 uint32_t status; 4731 int ret = DDI_INTR_CLAIMED; 4732 4733 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called")); 4734 4735 /* check if it is our interrupt */ 4736 status = RD_OB_INTR_STATUS(instance); 4737 4738 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status)); 4739 4740 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) { 4741 ret = DDI_INTR_UNCLAIMED; 4742 } 4743 4744 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 4745 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 4746 ret = DDI_INTR_UNCLAIMED; 4747 } 4748 4749 if (ret == DDI_INTR_UNCLAIMED) { 4750 return (ret); 4751 } 4752 /* clear the interrupt by writing back the same value */ 4753 WR_OB_DOORBELL_CLEAR(status, instance); 4754 4755 /* dummy READ */ 4756 status = RD_OB_INTR_STATUS(instance); 4757 4758 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared")); 4759 4760 return (ret); 4761 } 4762 4763 static int 4764 mrsas_common_check(struct mrsas_instance *instance, 4765 struct mrsas_cmd *cmd) 4766 { 4767 int ret = DDI_SUCCESS; 4768 4769 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 4770 DDI_SUCCESS) { 4771 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4772 if (cmd->pkt != NULL) { 4773 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4774 cmd->pkt->pkt_statistics = 0; 4775 } 4776 ret = DDI_FAILURE; 4777 } 4778 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 4779 != DDI_SUCCESS) { 4780 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4781 if (cmd->pkt != NULL) { 4782 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4783 cmd->pkt->pkt_statistics = 0; 4784 } 4785 ret = DDI_FAILURE; 4786 } 4787 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) != 4788 DDI_SUCCESS) { 4789 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4790 if (cmd->pkt != NULL) { 4791 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4792 cmd->pkt->pkt_statistics = 0; 4793 } 4794 ret = DDI_FAILURE; 4795 } 4796 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 4797 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4798 4799 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0); 4800 4801 if (cmd->pkt != NULL) { 4802 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4803 cmd->pkt->pkt_statistics = 0; 4804 } 4805 ret = DDI_FAILURE; 4806 } 4807 4808 return (ret); 4809 } 4810 4811 /*ARGSUSED*/ 4812 static int 4813 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 4814 { 4815 /* 4816 * as the driver can always deal with an error in any dma or 4817 * access handle, we can just return the fme_status value. 4818 */ 4819 pci_ereport_post(dip, err, NULL); 4820 return (err->fme_status); 4821 } 4822 4823 static void 4824 mrsas_fm_init(struct mrsas_instance *instance) 4825 { 4826 /* Need to change iblock to priority for new MSI intr */ 4827 ddi_iblock_cookie_t fm_ibc; 4828 4829 /* Only register with IO Fault Services if we have some capability */ 4830 if (instance->fm_capabilities) { 4831 /* Adjust access and dma attributes for FMA */ 4832 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC; 4833 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 4834 4835 /* 4836 * Register capabilities with IO Fault Services. 4837 * fm_capabilities will be updated to indicate 4838 * capabilities actually supported (not requested.) 4839 */ 4840 4841 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc); 4842 4843 /* 4844 * Initialize pci ereport capabilities if ereport 4845 * capable (should always be.) 4846 */ 4847 4848 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 4849 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4850 pci_ereport_setup(instance->dip); 4851 } 4852 4853 /* 4854 * Register error callback if error callback capable. 4855 */ 4856 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4857 ddi_fm_handler_register(instance->dip, 4858 mrsas_fm_error_cb, (void*) instance); 4859 } 4860 } else { 4861 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 4862 mrsas_generic_dma_attr.dma_attr_flags = 0; 4863 } 4864 } 4865 4866 static void 4867 mrsas_fm_fini(struct mrsas_instance *instance) 4868 { 4869 /* Only unregister FMA capabilities if registered */ 4870 if (instance->fm_capabilities) { 4871 /* 4872 * Un-register error callback if error callback capable. 4873 */ 4874 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4875 ddi_fm_handler_unregister(instance->dip); 4876 } 4877 4878 /* 4879 * Release any resources allocated by pci_ereport_setup() 4880 */ 4881 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 4882 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4883 pci_ereport_teardown(instance->dip); 4884 } 4885 4886 /* Unregister from IO Fault Services */ 4887 ddi_fm_fini(instance->dip); 4888 4889 /* Adjust access and dma attributes for FMA */ 4890 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 4891 mrsas_generic_dma_attr.dma_attr_flags = 0; 4892 } 4893 } 4894 4895 int 4896 mrsas_check_acc_handle(ddi_acc_handle_t handle) 4897 { 4898 ddi_fm_error_t de; 4899 4900 if (handle == NULL) { 4901 return (DDI_FAILURE); 4902 } 4903 4904 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 4905 4906 return (de.fme_status); 4907 } 4908 4909 int 4910 mrsas_check_dma_handle(ddi_dma_handle_t handle) 4911 { 4912 ddi_fm_error_t de; 4913 4914 if (handle == NULL) { 4915 return (DDI_FAILURE); 4916 } 4917 4918 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 4919 4920 return (de.fme_status); 4921 } 4922 4923 void 4924 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail) 4925 { 4926 uint64_t ena; 4927 char buf[FM_MAX_CLASS]; 4928 4929 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 4930 ena = fm_ena_generate(0, FM_ENA_FMT1); 4931 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) { 4932 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP, 4933 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 4934 } 4935 } 4936 4937 static int 4938 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type) 4939 { 4940 4941 dev_info_t *dip = instance->dip; 4942 int avail, actual, count; 4943 int i, flag, ret; 4944 4945 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: intr_type = %x", 4946 intr_type)); 4947 4948 /* Get number of interrupts */ 4949 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 4950 if ((ret != DDI_SUCCESS) || (count == 0)) { 4951 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:" 4952 "ret %d count %d", ret, count)); 4953 4954 return (DDI_FAILURE); 4955 } 4956 4957 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: count = %d ", count)); 4958 4959 /* Get number of available interrupts */ 4960 ret = ddi_intr_get_navail(dip, intr_type, &avail); 4961 if ((ret != DDI_SUCCESS) || (avail == 0)) { 4962 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:" 4963 "ret %d avail %d", ret, avail)); 4964 4965 return (DDI_FAILURE); 4966 } 4967 con_log(CL_DLEVEL1, (CE_WARN, "mrsas_add_intrs: avail = %d ", avail)); 4968 4969 /* Only one interrupt routine. So limit the count to 1 */ 4970 if (count > 1) { 4971 count = 1; 4972 } 4973 4974 /* 4975 * Allocate an array of interrupt handlers. Currently we support 4976 * only one interrupt. The framework can be extended later. 4977 */ 4978 instance->intr_size = count * sizeof (ddi_intr_handle_t); 4979 instance->intr_htable = kmem_zalloc(instance->intr_size, KM_SLEEP); 4980 ASSERT(instance->intr_htable); 4981 4982 flag = ((intr_type == DDI_INTR_TYPE_MSI) || (intr_type == 4983 DDI_INTR_TYPE_MSIX)) ? DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL; 4984 4985 /* Allocate interrupt */ 4986 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0, 4987 count, &actual, flag); 4988 4989 if ((ret != DDI_SUCCESS) || (actual == 0)) { 4990 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 4991 "avail = %d", avail)); 4992 kmem_free(instance->intr_htable, instance->intr_size); 4993 return (DDI_FAILURE); 4994 } 4995 if (actual < count) { 4996 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 4997 "Requested = %d Received = %d", count, actual)); 4998 } 4999 instance->intr_cnt = actual; 5000 5001 /* 5002 * Get the priority of the interrupt allocated. 5003 */ 5004 if ((ret = ddi_intr_get_pri(instance->intr_htable[0], 5005 &instance->intr_pri)) != DDI_SUCCESS) { 5006 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 5007 "get priority call failed")); 5008 5009 for (i = 0; i < actual; i++) { 5010 (void) ddi_intr_free(instance->intr_htable[i]); 5011 } 5012 kmem_free(instance->intr_htable, instance->intr_size); 5013 return (DDI_FAILURE); 5014 } 5015 5016 /* 5017 * Test for high level mutex. we don't support them. 5018 */ 5019 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) { 5020 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 5021 "High level interrupts not supported.")); 5022 5023 for (i = 0; i < actual; i++) { 5024 (void) ddi_intr_free(instance->intr_htable[i]); 5025 } 5026 kmem_free(instance->intr_htable, instance->intr_size); 5027 return (DDI_FAILURE); 5028 } 5029 5030 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ", 5031 instance->intr_pri)); 5032 5033 /* Call ddi_intr_add_handler() */ 5034 for (i = 0; i < actual; i++) { 5035 ret = ddi_intr_add_handler(instance->intr_htable[i], 5036 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance, 5037 (caddr_t)(uintptr_t)i); 5038 5039 if (ret != DDI_SUCCESS) { 5040 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:" 5041 "failed %d", ret)); 5042 5043 for (i = 0; i < actual; i++) { 5044 (void) ddi_intr_free(instance->intr_htable[i]); 5045 } 5046 kmem_free(instance->intr_htable, instance->intr_size); 5047 return (DDI_FAILURE); 5048 } 5049 5050 } 5051 5052 con_log(CL_DLEVEL1, (CE_WARN, " ddi_intr_add_handler done")); 5053 5054 if ((ret = ddi_intr_get_cap(instance->intr_htable[0], 5055 &instance->intr_cap)) != DDI_SUCCESS) { 5056 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d", 5057 ret)); 5058 5059 /* Free already allocated intr */ 5060 for (i = 0; i < actual; i++) { 5061 (void) ddi_intr_remove_handler( 5062 instance->intr_htable[i]); 5063 (void) ddi_intr_free(instance->intr_htable[i]); 5064 } 5065 kmem_free(instance->intr_htable, instance->intr_size); 5066 return (DDI_FAILURE); 5067 } 5068 5069 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { 5070 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable")); 5071 5072 (void) ddi_intr_block_enable(instance->intr_htable, 5073 instance->intr_cnt); 5074 } else { 5075 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable")); 5076 5077 for (i = 0; i < instance->intr_cnt; i++) { 5078 (void) ddi_intr_enable(instance->intr_htable[i]); 5079 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns " 5080 "%d", i)); 5081 } 5082 } 5083 5084 return (DDI_SUCCESS); 5085 5086 } 5087 5088 5089 static void 5090 mrsas_rem_intrs(struct mrsas_instance *instance) 5091 { 5092 int i; 5093 5094 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called")); 5095 5096 /* Disable all interrupts first */ 5097 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { 5098 (void) ddi_intr_block_disable(instance->intr_htable, 5099 instance->intr_cnt); 5100 } else { 5101 for (i = 0; i < instance->intr_cnt; i++) { 5102 (void) ddi_intr_disable(instance->intr_htable[i]); 5103 } 5104 } 5105 5106 /* Remove all the handlers */ 5107 5108 for (i = 0; i < instance->intr_cnt; i++) { 5109 (void) ddi_intr_remove_handler(instance->intr_htable[i]); 5110 (void) ddi_intr_free(instance->intr_htable[i]); 5111 } 5112 5113 kmem_free(instance->intr_htable, instance->intr_size); 5114 } 5115 5116 static int 5117 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags, 5118 ddi_bus_config_op_t op, void *arg, dev_info_t **childp) 5119 { 5120 struct mrsas_instance *instance; 5121 int config; 5122 int rval; 5123 5124 char *ptr = NULL; 5125 int tgt, lun; 5126 5127 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op)); 5128 5129 if ((instance = ddi_get_soft_state(mrsas_state, 5130 ddi_get_instance(parent))) == NULL) { 5131 return (NDI_FAILURE); 5132 } 5133 5134 /* Hold nexus during bus_config */ 5135 ndi_devi_enter(parent, &config); 5136 switch (op) { 5137 case BUS_CONFIG_ONE: { 5138 5139 /* parse wwid/target name out of name given */ 5140 if ((ptr = strchr((char *)arg, '@')) == NULL) { 5141 rval = NDI_FAILURE; 5142 break; 5143 } 5144 ptr++; 5145 5146 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) { 5147 rval = NDI_FAILURE; 5148 break; 5149 } 5150 5151 if (lun == 0) { 5152 rval = mrsas_config_ld(instance, tgt, lun, childp); 5153 } else { 5154 rval = NDI_FAILURE; 5155 } 5156 5157 break; 5158 } 5159 case BUS_CONFIG_DRIVER: 5160 case BUS_CONFIG_ALL: { 5161 5162 rval = mrsas_config_all_devices(instance); 5163 5164 rval = NDI_SUCCESS; 5165 break; 5166 } 5167 } 5168 5169 if (rval == NDI_SUCCESS) { 5170 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0); 5171 5172 } 5173 ndi_devi_exit(parent, config); 5174 5175 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x", 5176 rval)); 5177 return (rval); 5178 } 5179 5180 static int 5181 mrsas_config_all_devices(struct mrsas_instance *instance) 5182 { 5183 int rval, tgt; 5184 5185 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { 5186 (void) mrsas_config_ld(instance, tgt, 0, NULL); 5187 5188 } 5189 5190 rval = NDI_SUCCESS; 5191 return (rval); 5192 } 5193 5194 static int 5195 mrsas_parse_devname(char *devnm, int *tgt, int *lun) 5196 { 5197 char devbuf[SCSI_MAXNAMELEN]; 5198 char *addr; 5199 char *p, *tp, *lp; 5200 long num; 5201 5202 /* Parse dev name and address */ 5203 (void) strcpy(devbuf, devnm); 5204 addr = ""; 5205 for (p = devbuf; *p != '\0'; p++) { 5206 if (*p == '@') { 5207 addr = p + 1; 5208 *p = '\0'; 5209 } else if (*p == ':') { 5210 *p = '\0'; 5211 break; 5212 } 5213 } 5214 5215 /* Parse target and lun */ 5216 for (p = tp = addr, lp = NULL; *p != '\0'; p++) { 5217 if (*p == ',') { 5218 lp = p + 1; 5219 *p = '\0'; 5220 break; 5221 } 5222 } 5223 if (tgt && tp) { 5224 if (ddi_strtol(tp, NULL, 0x10, &num)) { 5225 return (DDI_FAILURE); /* Can declare this as constant */ 5226 } 5227 *tgt = (int)num; 5228 } 5229 if (lun && lp) { 5230 if (ddi_strtol(lp, NULL, 0x10, &num)) { 5231 return (DDI_FAILURE); 5232 } 5233 *lun = (int)num; 5234 } 5235 return (DDI_SUCCESS); /* Success case */ 5236 } 5237 5238 static int 5239 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt, 5240 uint8_t lun, dev_info_t **ldip) 5241 { 5242 struct scsi_device *sd; 5243 dev_info_t *child; 5244 int rval; 5245 5246 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d", 5247 tgt, lun)); 5248 5249 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) { 5250 if (ldip) { 5251 *ldip = child; 5252 } 5253 con_log(CL_ANN1, (CE_NOTE, 5254 "mrsas_config_ld: Child = %p found t = %d l = %d", 5255 (void *)child, tgt, lun)); 5256 return (NDI_SUCCESS); 5257 } 5258 5259 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP); 5260 sd->sd_address.a_hba_tran = instance->tran; 5261 sd->sd_address.a_target = (uint16_t)tgt; 5262 sd->sd_address.a_lun = (uint8_t)lun; 5263 5264 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) 5265 rval = mrsas_config_scsi_device(instance, sd, ldip); 5266 else 5267 rval = NDI_FAILURE; 5268 5269 /* sd_unprobe is blank now. Free buffer manually */ 5270 if (sd->sd_inq) { 5271 kmem_free(sd->sd_inq, SUN_INQSIZE); 5272 sd->sd_inq = (struct scsi_inquiry *)NULL; 5273 } 5274 5275 kmem_free(sd, sizeof (struct scsi_device)); 5276 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_ld: return rval = %d", 5277 rval)); 5278 return (rval); 5279 } 5280 5281 static int 5282 mrsas_config_scsi_device(struct mrsas_instance *instance, 5283 struct scsi_device *sd, dev_info_t **dipp) 5284 { 5285 char *nodename = NULL; 5286 char **compatible = NULL; 5287 int ncompatible = 0; 5288 char *childname; 5289 dev_info_t *ldip = NULL; 5290 int tgt = sd->sd_address.a_target; 5291 int lun = sd->sd_address.a_lun; 5292 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 5293 int rval; 5294 5295 con_log(CL_ANN1, (CE_WARN, "mr_sas: scsi_device t%dL%d", tgt, lun)); 5296 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, 5297 NULL, &nodename, &compatible, &ncompatible); 5298 5299 if (nodename == NULL) { 5300 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver " 5301 "for t%dL%d", tgt, lun)); 5302 rval = NDI_FAILURE; 5303 goto finish; 5304 } 5305 5306 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename; 5307 con_log(CL_ANN1, (CE_WARN, 5308 "mr_sas: Childname = %2s nodename = %s", childname, nodename)); 5309 5310 /* Create a dev node */ 5311 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip); 5312 con_log(CL_ANN1, (CE_WARN, 5313 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval)); 5314 if (rval == NDI_SUCCESS) { 5315 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) != 5316 DDI_PROP_SUCCESS) { 5317 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 5318 "property for t%dl%d target", tgt, lun)); 5319 rval = NDI_FAILURE; 5320 goto finish; 5321 } 5322 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) != 5323 DDI_PROP_SUCCESS) { 5324 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 5325 "property for t%dl%d lun", tgt, lun)); 5326 rval = NDI_FAILURE; 5327 goto finish; 5328 } 5329 5330 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip, 5331 "compatible", compatible, ncompatible) != 5332 DDI_PROP_SUCCESS) { 5333 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 5334 "property for t%dl%d compatible", tgt, lun)); 5335 rval = NDI_FAILURE; 5336 goto finish; 5337 } 5338 5339 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH); 5340 if (rval != NDI_SUCCESS) { 5341 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online " 5342 "t%dl%d", tgt, lun)); 5343 ndi_prop_remove_all(ldip); 5344 (void) ndi_devi_free(ldip); 5345 } else { 5346 con_log(CL_ANN1, (CE_WARN, "mr_sas: online Done :" 5347 "0 t%dl%d", tgt, lun)); 5348 } 5349 5350 } 5351 finish: 5352 if (dipp) { 5353 *dipp = ldip; 5354 } 5355 5356 con_log(CL_DLEVEL1, (CE_WARN, 5357 "mr_sas: config_scsi_device rval = %d t%dL%d", 5358 rval, tgt, lun)); 5359 scsi_hba_nodename_compatible_free(nodename, compatible); 5360 return (rval); 5361 } 5362 5363 /*ARGSUSED*/ 5364 static int 5365 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event, 5366 uint64_t wwn) 5367 { 5368 struct mrsas_eventinfo *mrevt = NULL; 5369 5370 con_log(CL_ANN1, (CE_NOTE, 5371 "mrsas_service_evt called for t%dl%d event = %d", 5372 tgt, lun, event)); 5373 5374 if ((instance->taskq == NULL) || (mrevt = 5375 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) { 5376 return (ENOMEM); 5377 } 5378 5379 mrevt->instance = instance; 5380 mrevt->tgt = tgt; 5381 mrevt->lun = lun; 5382 mrevt->event = event; 5383 5384 if ((ddi_taskq_dispatch(instance->taskq, 5385 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) != 5386 DDI_SUCCESS) { 5387 con_log(CL_ANN1, (CE_NOTE, 5388 "mr_sas: Event task failed for t%dl%d event = %d", 5389 tgt, lun, event)); 5390 kmem_free(mrevt, sizeof (struct mrsas_eventinfo)); 5391 return (DDI_FAILURE); 5392 } 5393 return (DDI_SUCCESS); 5394 } 5395 5396 static void 5397 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt) 5398 { 5399 struct mrsas_instance *instance = mrevt->instance; 5400 dev_info_t *dip, *pdip; 5401 int circ1 = 0; 5402 char *devname; 5403 5404 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for" 5405 " tgt %d lun %d event %d", 5406 mrevt->tgt, mrevt->lun, mrevt->event)); 5407 5408 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) { 5409 dip = instance->mr_ld_list[mrevt->tgt].dip; 5410 } else { 5411 return; 5412 } 5413 5414 ndi_devi_enter(instance->dip, &circ1); 5415 switch (mrevt->event) { 5416 case MRSAS_EVT_CONFIG_TGT: 5417 if (dip == NULL) { 5418 5419 if (mrevt->lun == 0) { 5420 (void) mrsas_config_ld(instance, mrevt->tgt, 5421 0, NULL); 5422 } 5423 con_log(CL_ANN1, (CE_NOTE, 5424 "mr_sas: EVT_CONFIG_TGT called:" 5425 " for tgt %d lun %d event %d", 5426 mrevt->tgt, mrevt->lun, mrevt->event)); 5427 5428 } else { 5429 con_log(CL_ANN1, (CE_NOTE, 5430 "mr_sas: EVT_CONFIG_TGT dip != NULL:" 5431 " for tgt %d lun %d event %d", 5432 mrevt->tgt, mrevt->lun, mrevt->event)); 5433 } 5434 break; 5435 case MRSAS_EVT_UNCONFIG_TGT: 5436 if (dip) { 5437 if (i_ddi_devi_attached(dip)) { 5438 5439 pdip = ddi_get_parent(dip); 5440 5441 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP); 5442 (void) ddi_deviname(dip, devname); 5443 5444 (void) devfs_clean(pdip, devname + 1, 5445 DV_CLEAN_FORCE); 5446 kmem_free(devname, MAXNAMELEN + 1); 5447 } 5448 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE); 5449 con_log(CL_ANN1, (CE_NOTE, 5450 "mr_sas: EVT_UNCONFIG_TGT called:" 5451 " for tgt %d lun %d event %d", 5452 mrevt->tgt, mrevt->lun, mrevt->event)); 5453 } else { 5454 con_log(CL_ANN1, (CE_NOTE, 5455 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:" 5456 " for tgt %d lun %d event %d", 5457 mrevt->tgt, mrevt->lun, mrevt->event)); 5458 } 5459 break; 5460 } 5461 kmem_free(mrevt, sizeof (struct mrsas_eventinfo)); 5462 ndi_devi_exit(instance->dip, circ1); 5463 } 5464 5465 static int 5466 mrsas_mode_sense_build(struct scsi_pkt *pkt) 5467 { 5468 union scsi_cdb *cdbp; 5469 uint16_t page_code; 5470 struct scsa_cmd *acmd; 5471 struct buf *bp; 5472 struct mode_header *modehdrp; 5473 5474 cdbp = (void *)pkt->pkt_cdbp; 5475 page_code = cdbp->cdb_un.sg.scsi[0]; 5476 acmd = PKT2CMD(pkt); 5477 bp = acmd->cmd_buf; 5478 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) { 5479 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command")); 5480 /* ADD pkt statistics as Command failed. */ 5481 return (NULL); 5482 } 5483 5484 bp_mapin(bp); 5485 bzero(bp->b_un.b_addr, bp->b_bcount); 5486 5487 switch (page_code) { 5488 case 0x3: { 5489 struct mode_format *page3p = NULL; 5490 modehdrp = (struct mode_header *)(bp->b_un.b_addr); 5491 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH; 5492 5493 page3p = (void *)((caddr_t)modehdrp + 5494 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 5495 page3p->mode_page.code = 0x3; 5496 page3p->mode_page.length = 5497 (uchar_t)(sizeof (struct mode_format)); 5498 page3p->data_bytes_sect = 512; 5499 page3p->sect_track = 63; 5500 break; 5501 } 5502 case 0x4: { 5503 struct mode_geometry *page4p = NULL; 5504 modehdrp = (struct mode_header *)(bp->b_un.b_addr); 5505 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH; 5506 5507 page4p = (void *)((caddr_t)modehdrp + 5508 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 5509 page4p->mode_page.code = 0x4; 5510 page4p->mode_page.length = 5511 (uchar_t)(sizeof (struct mode_geometry)); 5512 page4p->heads = 255; 5513 page4p->rpm = 10000; 5514 break; 5515 } 5516 default: 5517 break; 5518 } 5519 return (NULL); 5520 } 5521