1 /* 2 * megaraid_sas.c: source for mega_sas driver 3 * 4 * MegaRAID device driver for SAS controllers 5 * Copyright (c) 2005-2008, LSI Logic Corporation. 6 * All rights reserved. 7 * 8 * Version: 9 * Author: 10 * Rajesh Prabhakaran<Rajesh.Prabhakaran@lsil.com> 11 * Seokmann Ju 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright notice, 17 * this list of conditions and the following disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above copyright notice, 20 * this list of conditions and the following disclaimer in the documentation 21 * and/or other materials provided with the distribution. 22 * 23 * 3. Neither the name of the author nor the names of its contributors may be 24 * used to endorse or promote products derived from this software without 25 * specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 35 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 36 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 37 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 38 * DAMAGE. 39 */ 40 41 /* 42 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 43 * Use is subject to license terms. 44 */ 45 46 #include <sys/types.h> 47 #include <sys/param.h> 48 #include <sys/file.h> 49 #include <sys/errno.h> 50 #include <sys/open.h> 51 #include <sys/cred.h> 52 #include <sys/modctl.h> 53 #include <sys/conf.h> 54 #include <sys/devops.h> 55 #include <sys/cmn_err.h> 56 #include <sys/kmem.h> 57 #include <sys/stat.h> 58 #include <sys/mkdev.h> 59 #include <sys/pci.h> 60 #include <sys/scsi/scsi.h> 61 #include <sys/ddi.h> 62 #include <sys/sunddi.h> 63 #include <sys/atomic.h> 64 #include <sys/signal.h> 65 66 #include "megaraid_sas.h" 67 68 /* 69 * FMA header files 70 */ 71 #include <sys/ddifm.h> 72 #include <sys/fm/protocol.h> 73 #include <sys/fm/util.h> 74 #include <sys/fm/io/ddi.h> 75 76 /* 77 * Local static data 78 */ 79 static void *megasas_state = NULL; 80 static int debug_level_g = CL_ANN; 81 82 #pragma weak scsi_hba_open 83 #pragma weak scsi_hba_close 84 #pragma weak scsi_hba_ioctl 85 86 static ddi_dma_attr_t megasas_generic_dma_attr = { 87 DMA_ATTR_V0, /* dma_attr_version */ 88 (unsigned long long)0, /* low DMA address range */ 89 (unsigned long long)0xffffffff, /* high DMA address range */ 90 (unsigned long long)0xffffffff, /* DMA counter register */ 91 8, /* DMA address alignment */ 92 0x07, /* DMA burstsizes */ 93 1, /* min DMA size */ 94 (unsigned long long)0xffffffff, /* max DMA size */ 95 (unsigned long long)0xffffffff, /* segment boundary */ 96 MEGASAS_MAX_SGE_CNT, /* dma_attr_sglen */ 97 512, /* granularity of device */ 98 0 /* bus specific DMA flags */ 99 }; 100 101 int32_t megasas_max_cap_maxxfer = 0x1000000; 102 103 /* 104 * cb_ops contains base level routines 105 */ 106 static struct cb_ops megasas_cb_ops = { 107 megasas_open, /* open */ 108 megasas_close, /* close */ 109 nodev, /* strategy */ 110 nodev, /* print */ 111 nodev, /* dump */ 112 nodev, /* read */ 113 nodev, /* write */ 114 megasas_ioctl, /* ioctl */ 115 nodev, /* devmap */ 116 nodev, /* mmap */ 117 nodev, /* segmap */ 118 nochpoll, /* poll */ 119 nodev, /* cb_prop_op */ 120 0, /* streamtab */ 121 D_NEW | D_HOTPLUG, /* cb_flag */ 122 CB_REV, /* cb_rev */ 123 nodev, /* cb_aread */ 124 nodev /* cb_awrite */ 125 }; 126 127 /* 128 * dev_ops contains configuration routines 129 */ 130 static struct dev_ops megasas_ops = { 131 DEVO_REV, /* rev, */ 132 0, /* refcnt */ 133 megasas_getinfo, /* getinfo */ 134 nulldev, /* identify */ 135 nulldev, /* probe */ 136 megasas_attach, /* attach */ 137 megasas_detach, /* detach */ 138 megasas_reset, /* reset */ 139 &megasas_cb_ops, /* char/block ops */ 140 NULL /* bus ops */ 141 }; 142 143 char _depends_on[] = "misc/scsi"; 144 145 static struct modldrv modldrv = { 146 &mod_driverops, /* module type - driver */ 147 MEGASAS_VERSION, 148 &megasas_ops, /* driver ops */ 149 }; 150 151 static struct modlinkage modlinkage = { 152 MODREV_1, /* ml_rev - must be MODREV_1 */ 153 &modldrv, /* ml_linkage */ 154 NULL /* end of driver linkage */ 155 }; 156 157 static struct ddi_device_acc_attr endian_attr = { 158 DDI_DEVICE_ATTR_V0, 159 DDI_STRUCTURE_LE_ACC, 160 DDI_STRICTORDER_ACC 161 }; 162 163 164 /* 165 * ************************************************************************** * 166 * * 167 * common entry points - for loadable kernel modules * 168 * * 169 * ************************************************************************** * 170 */ 171 172 /* 173 * _init - initialize a loadable module 174 * @void 175 * 176 * The driver should perform any one-time resource allocation or data 177 * initialization during driver loading in _init(). For example, the driver 178 * should initialize any mutexes global to the driver in this routine. 179 * The driver should not, however, use _init() to allocate or initialize 180 * anything that has to do with a particular instance of the device. 181 * Per-instance initialization must be done in attach(). 182 */ 183 int 184 _init(void) 185 { 186 int ret; 187 188 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 189 190 ret = ddi_soft_state_init(&megasas_state, 191 sizeof (struct megasas_instance), 0); 192 193 if (ret != 0) { 194 con_log(CL_ANN, (CE_WARN, "megaraid: could not init state")); 195 return (ret); 196 } 197 198 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 199 con_log(CL_ANN, (CE_WARN, "megaraid: could not init scsi hba")); 200 ddi_soft_state_fini(&megasas_state); 201 return (ret); 202 } 203 204 ret = mod_install(&modlinkage); 205 206 if (ret != 0) { 207 con_log(CL_ANN, (CE_WARN, "megaraid: mod_install failed")); 208 scsi_hba_fini(&modlinkage); 209 ddi_soft_state_fini(&megasas_state); 210 } 211 212 return (ret); 213 } 214 215 /* 216 * _info - returns information about a loadable module. 217 * @void 218 * 219 * _info() is called to return module information. This is a typical entry 220 * point that does predefined role. It simply calls mod_info(). 221 */ 222 int 223 _info(struct modinfo *modinfop) 224 { 225 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 226 227 return (mod_info(&modlinkage, modinfop)); 228 } 229 230 /* 231 * _fini - prepare a loadable module for unloading 232 * @void 233 * 234 * In _fini(), the driver should release any resources that were allocated in 235 * _init(). The driver must remove itself from the system module list. 236 */ 237 int 238 _fini(void) 239 { 240 int ret; 241 242 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 243 244 if ((ret = mod_remove(&modlinkage)) != 0) 245 return (ret); 246 247 scsi_hba_fini(&modlinkage); 248 249 ddi_soft_state_fini(&megasas_state); 250 251 return (ret); 252 } 253 254 255 /* 256 * ************************************************************************** * 257 * * 258 * common entry points - for autoconfiguration * 259 * * 260 * ************************************************************************** * 261 */ 262 /* 263 * probe - called before attach for a given instance 264 * This is an optional entry for self-identifiable device. 265 * @dip: 266 * 267 * static int megasas_probe(dev_info_t *dip) 268 * { 269 * return (DDI_SUCCESS); 270 * } 271 */ 272 273 /* 274 * attach - adds a device to the system as part of initialization 275 * @dip: 276 * @cmd: 277 * 278 * The kernel calls a driver's attach() entry point to attach an instance of 279 * a device (for MegaRAID, it is instance of a controller) or to resume 280 * operation for an instance of a device that has been suspended or has been 281 * shut down by the power management framework 282 * The attach() entry point typically includes the following types of 283 * processing: 284 * - allocate a soft-state structure for the device instance (for MegaRAID, 285 * controller instance) 286 * - initialize per-instance mutexes 287 * - initialize condition variables 288 * - register the device's interrupts (for MegaRAID, controller's interrupts) 289 * - map the registers and memory of the device instance (for MegaRAID, 290 * controller instance) 291 * - create minor device nodes for the device instance (for MegaRAID, 292 * controller instance) 293 * - report that the device instance (for MegaRAID, controller instance) has 294 * attached 295 */ 296 static int 297 megasas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 298 { 299 int instance_no; 300 int nregs; 301 uint8_t added_isr_f = 0; 302 uint8_t added_soft_isr_f = 0; 303 uint8_t create_devctl_node_f = 0; 304 uint8_t create_scsi_node_f = 0; 305 uint8_t create_ioc_node_f = 0; 306 uint8_t tran_alloc_f = 0; 307 uint8_t irq; 308 uint16_t vendor_id; 309 uint16_t device_id; 310 uint16_t subsysvid; 311 uint16_t subsysid; 312 uint16_t command; 313 314 scsi_hba_tran_t *tran; 315 ddi_dma_attr_t tran_dma_attr; 316 struct megasas_instance *instance; 317 318 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 319 320 instance_no = ddi_get_instance(dip); 321 322 /* 323 * Since we know that some instantiations of this device can be 324 * plugged into slave-only SBus slots, check to see whether this is 325 * one such. 326 */ 327 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 328 con_log(CL_ANN, (CE_WARN, 329 "mega%d: Device in slave-only slot, unused", instance_no)); 330 331 return (DDI_FAILURE); 332 } 333 334 switch (cmd) { 335 case DDI_ATTACH: 336 con_log(CL_DLEVEL1, (CE_NOTE, "megasas: DDI_ATTACH")); 337 /* allocate the soft state for the instance */ 338 if (ddi_soft_state_zalloc(megasas_state, instance_no) 339 != DDI_SUCCESS) { 340 con_log(CL_ANN, (CE_WARN, 341 "mega%d: Failed to allocate soft state", 342 instance_no)); 343 344 return (DDI_FAILURE); 345 } 346 347 instance = (struct megasas_instance *)ddi_get_soft_state 348 (megasas_state, instance_no); 349 350 if (instance == NULL) { 351 con_log(CL_ANN, (CE_WARN, 352 "mega%d: Bad soft state", instance_no)); 353 354 ddi_soft_state_free(megasas_state, instance_no); 355 356 return (DDI_FAILURE); 357 } 358 359 bzero((caddr_t)instance, 360 sizeof (struct megasas_instance)); 361 362 instance->func_ptr = kmem_zalloc( 363 sizeof (struct megasas_func_ptr), KM_SLEEP); 364 ASSERT(instance->func_ptr); 365 366 /* Setup the PCI configuration space handles */ 367 if (pci_config_setup(dip, &instance->pci_handle) != 368 DDI_SUCCESS) { 369 con_log(CL_ANN, (CE_WARN, 370 "mega%d: pci config setup failed ", 371 instance_no)); 372 373 kmem_free(instance->func_ptr, 374 sizeof (struct megasas_func_ptr)); 375 ddi_soft_state_free(megasas_state, instance_no); 376 377 return (DDI_FAILURE); 378 } 379 380 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { 381 con_log(CL_ANN, (CE_WARN, 382 "megaraid: failed to get registers.")); 383 384 pci_config_teardown(&instance->pci_handle); 385 kmem_free(instance->func_ptr, 386 sizeof (struct megasas_func_ptr)); 387 ddi_soft_state_free(megasas_state, instance_no); 388 389 return (DDI_FAILURE); 390 } 391 392 vendor_id = pci_config_get16(instance->pci_handle, 393 PCI_CONF_VENID); 394 device_id = pci_config_get16(instance->pci_handle, 395 PCI_CONF_DEVID); 396 397 subsysvid = pci_config_get16(instance->pci_handle, 398 PCI_CONF_SUBVENID); 399 subsysid = pci_config_get16(instance->pci_handle, 400 PCI_CONF_SUBSYSID); 401 402 pci_config_put16(instance->pci_handle, PCI_CONF_COMM, 403 (pci_config_get16(instance->pci_handle, 404 PCI_CONF_COMM) | PCI_COMM_ME)); 405 irq = pci_config_get8(instance->pci_handle, 406 PCI_CONF_ILINE); 407 #ifdef lint 408 irq = irq; 409 #endif 410 con_log(CL_DLEVEL1, (CE_CONT, "megasas[%d]: " 411 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n", 412 instance_no, vendor_id, device_id, subsysvid, 413 subsysid, pci_config_get8(instance->pci_handle, 414 PCI_CONF_ILINE), MEGASAS_VERSION)); 415 416 /* enable bus-mastering */ 417 command = pci_config_get16(instance->pci_handle, 418 PCI_CONF_COMM); 419 420 if (!(command & PCI_COMM_ME)) { 421 command |= PCI_COMM_ME; 422 423 pci_config_put16(instance->pci_handle, 424 PCI_CONF_COMM, command); 425 426 con_log(CL_ANN, (CE_CONT, "megaraid[%d]: " 427 "enable bus-mastering\n", instance_no)); 428 } else { 429 con_log(CL_DLEVEL1, (CE_CONT, "megaraid[%d]: " 430 "bus-mastering already set\n", instance_no)); 431 } 432 433 /* initialize function pointers */ 434 if ((device_id == PCI_DEVICE_ID_LSI_1078) || 435 (device_id == PCI_DEVICE_ID_LSI_1078DE)) { 436 con_log(CL_DLEVEL1, (CE_CONT, "megasas[%d]: " 437 "1078R/DE detected\n", instance_no)); 438 instance->func_ptr->read_fw_status_reg = 439 read_fw_status_reg_ppc; 440 instance->func_ptr->issue_cmd = issue_cmd_ppc; 441 instance->func_ptr->issue_cmd_in_sync_mode = 442 issue_cmd_in_sync_mode_ppc; 443 instance->func_ptr->issue_cmd_in_poll_mode = 444 issue_cmd_in_poll_mode_ppc; 445 instance->func_ptr->enable_intr = 446 enable_intr_ppc; 447 instance->func_ptr->disable_intr = 448 disable_intr_ppc; 449 instance->func_ptr->intr_ack = intr_ack_ppc; 450 } else { 451 con_log(CL_DLEVEL1, (CE_CONT, "megasas[%d]: " 452 "1064/8R detected\n", instance_no)); 453 instance->func_ptr->read_fw_status_reg = 454 read_fw_status_reg_xscale; 455 instance->func_ptr->issue_cmd = 456 issue_cmd_xscale; 457 instance->func_ptr->issue_cmd_in_sync_mode = 458 issue_cmd_in_sync_mode_xscale; 459 instance->func_ptr->issue_cmd_in_poll_mode = 460 issue_cmd_in_poll_mode_xscale; 461 instance->func_ptr->enable_intr = 462 enable_intr_xscale; 463 instance->func_ptr->disable_intr = 464 disable_intr_xscale; 465 instance->func_ptr->intr_ack = 466 intr_ack_xscale; 467 } 468 469 instance->baseaddress = 470 pci_config_get32(instance->pci_handle, 0x10); 471 instance->baseaddress &= 0x0fffc; 472 473 instance->dip = dip; 474 instance->vendor_id = vendor_id; 475 instance->device_id = device_id; 476 instance->subsysvid = subsysvid; 477 instance->subsysid = subsysid; 478 479 /* Initialize FMA */ 480 instance->fm_capabilities = ddi_prop_get_int( 481 DDI_DEV_T_ANY, instance->dip, DDI_PROP_CANSLEEP 482 | DDI_PROP_DONTPASS, "fm-capable", 483 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE 484 | DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 485 486 megasas_fm_init(instance); 487 488 /* setup the mfi based low level driver */ 489 if (init_mfi(instance) != DDI_SUCCESS) { 490 con_log(CL_ANN, (CE_WARN, "megaraid: " 491 "could not initialize the low level driver")); 492 493 goto fail_attach; 494 } 495 496 /* 497 * Allocate the interrupt blocking cookie. 498 * It represents the information the framework 499 * needs to block interrupts. This cookie will 500 * be used by the locks shared accross our ISR. 501 * These locks must be initialized before we 502 * register our ISR. 503 * ddi_add_intr(9F) 504 */ 505 if (ddi_get_iblock_cookie(dip, 0, 506 &instance->iblock_cookie) != DDI_SUCCESS) { 507 508 goto fail_attach; 509 } 510 511 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_HIGH, 512 &instance->soft_iblock_cookie) != DDI_SUCCESS) { 513 514 goto fail_attach; 515 } 516 517 /* 518 * Initialize the driver mutexes common to 519 * normal/high level isr 520 */ 521 if (ddi_intr_hilevel(dip, 0)) { 522 instance->isr_level = HIGH_LEVEL_INTR; 523 mutex_init(&instance->cmd_pool_mtx, 524 "cmd_pool_mtx", MUTEX_DRIVER, 525 instance->soft_iblock_cookie); 526 mutex_init(&instance->cmd_pend_mtx, 527 "cmd_pend_mtx", MUTEX_DRIVER, 528 instance->soft_iblock_cookie); 529 } else { 530 /* 531 * Initialize the driver mutexes 532 * specific to soft-isr 533 */ 534 instance->isr_level = NORMAL_LEVEL_INTR; 535 mutex_init(&instance->cmd_pool_mtx, 536 "cmd_pool_mtx", MUTEX_DRIVER, 537 instance->iblock_cookie); 538 mutex_init(&instance->cmd_pend_mtx, 539 "cmd_pend_mtx", MUTEX_DRIVER, 540 instance->iblock_cookie); 541 } 542 543 mutex_init(&instance->completed_pool_mtx, 544 "completed_pool_mtx", MUTEX_DRIVER, 545 instance->iblock_cookie); 546 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx", 547 MUTEX_DRIVER, instance->iblock_cookie); 548 mutex_init(&instance->aen_cmd_mtx, "aen_cmd_mtx", 549 MUTEX_DRIVER, instance->iblock_cookie); 550 mutex_init(&instance->abort_cmd_mtx, "abort_cmd_mtx", 551 MUTEX_DRIVER, instance->iblock_cookie); 552 553 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL); 554 cv_init(&instance->abort_cmd_cv, NULL, CV_DRIVER, NULL); 555 556 INIT_LIST_HEAD(&instance->completed_pool_list); 557 558 /* Register our isr. */ 559 if (ddi_add_intr(dip, 0, NULL, NULL, megasas_isr, 560 (caddr_t)instance) != DDI_SUCCESS) { 561 con_log(CL_ANN, (CE_WARN, 562 " ISR did not register")); 563 564 goto fail_attach; 565 } 566 567 added_isr_f = 1; 568 569 /* Register our soft-isr for highlevel interrupts. */ 570 if (instance->isr_level == HIGH_LEVEL_INTR) { 571 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH, 572 &instance->soft_intr_id, NULL, NULL, 573 megasas_softintr, (caddr_t)instance) != 574 DDI_SUCCESS) { 575 con_log(CL_ANN, (CE_WARN, 576 " Software ISR did not register")); 577 578 goto fail_attach; 579 } 580 581 added_soft_isr_f = 1; 582 } 583 584 /* Allocate a transport structure */ 585 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 586 587 if (tran == NULL) { 588 con_log(CL_ANN, (CE_WARN, 589 "scsi_hba_tran_alloc failed")); 590 goto fail_attach; 591 } 592 593 tran_alloc_f = 1; 594 595 instance->tran = tran; 596 597 tran->tran_hba_private = instance; 598 tran->tran_tgt_private = NULL; 599 tran->tran_tgt_init = megasas_tran_tgt_init; 600 tran->tran_tgt_probe = scsi_hba_probe; 601 tran->tran_tgt_free = (void (*)())NULL; 602 tran->tran_init_pkt = megasas_tran_init_pkt; 603 tran->tran_start = megasas_tran_start; 604 tran->tran_abort = megasas_tran_abort; 605 tran->tran_reset = megasas_tran_reset; 606 tran->tran_bus_reset = megasas_tran_bus_reset; 607 tran->tran_getcap = megasas_tran_getcap; 608 tran->tran_setcap = megasas_tran_setcap; 609 tran->tran_destroy_pkt = megasas_tran_destroy_pkt; 610 tran->tran_dmafree = megasas_tran_dmafree; 611 tran->tran_sync_pkt = megasas_tran_sync_pkt; 612 tran->tran_reset_notify = NULL; 613 tran->tran_quiesce = megasas_tran_quiesce; 614 tran->tran_unquiesce = megasas_tran_unquiesce; 615 616 tran_dma_attr = megasas_generic_dma_attr; 617 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge; 618 619 /* Attach this instance of the hba */ 620 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0) 621 != DDI_SUCCESS) { 622 con_log(CL_ANN, (CE_WARN, 623 "scsi_hba_attach failed\n")); 624 625 goto fail_attach; 626 } 627 628 /* create devctl node for cfgadm command */ 629 if (ddi_create_minor_node(dip, "devctl", 630 S_IFCHR, INST2DEVCTL(instance_no), 631 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) { 632 con_log(CL_ANN, (CE_WARN, 633 "megaraid: failed to create devctl node.")); 634 635 goto fail_attach; 636 } 637 638 create_devctl_node_f = 1; 639 640 /* create scsi node for cfgadm command */ 641 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, 642 INST2SCSI(instance_no), 643 DDI_NT_SCSI_ATTACHMENT_POINT, 0) == 644 DDI_FAILURE) { 645 con_log(CL_ANN, (CE_WARN, 646 "megaraid: failed to create scsi node.")); 647 648 goto fail_attach; 649 } 650 651 create_scsi_node_f = 1; 652 653 (void) sprintf(instance->iocnode, "%d:lsirdctl", 654 instance_no); 655 656 /* 657 * Create a node for applications 658 * for issuing ioctl to the driver. 659 */ 660 if (ddi_create_minor_node(dip, instance->iocnode, 661 S_IFCHR, INST2LSIRDCTL(instance_no), 662 DDI_PSEUDO, 0) == DDI_FAILURE) { 663 con_log(CL_ANN, (CE_WARN, 664 "megaraid: failed to create ioctl node.")); 665 666 goto fail_attach; 667 } 668 669 create_ioc_node_f = 1; 670 671 /* enable interrupt */ 672 instance->func_ptr->enable_intr(instance); 673 674 /* initiate AEN */ 675 if (start_mfi_aen(instance)) { 676 con_log(CL_ANN, (CE_WARN, 677 "megaraid: failed to initiate AEN.")); 678 goto fail_initiate_aen; 679 } 680 681 con_log(CL_DLEVEL1, (CE_NOTE, 682 "AEN started for instance %d.", instance_no)); 683 684 /* Finally! We are on the air. */ 685 ddi_report_dev(dip); 686 687 if (megasas_check_acc_handle(instance->regmap_handle) != 688 DDI_SUCCESS) { 689 goto fail_attach; 690 } 691 if (megasas_check_acc_handle(instance->pci_handle) != 692 DDI_SUCCESS) { 693 goto fail_attach; 694 } 695 break; 696 case DDI_PM_RESUME: 697 con_log(CL_ANN, (CE_NOTE, 698 "megasas: DDI_PM_RESUME")); 699 break; 700 case DDI_RESUME: 701 con_log(CL_ANN, (CE_NOTE, 702 "megasas: DDI_RESUME")); 703 break; 704 default: 705 con_log(CL_ANN, (CE_WARN, 706 "megasas: invalid attach cmd=%x", cmd)); 707 return (DDI_FAILURE); 708 } 709 710 return (DDI_SUCCESS); 711 712 fail_initiate_aen: 713 fail_attach: 714 if (create_devctl_node_f) { 715 ddi_remove_minor_node(dip, "devctl"); 716 } 717 718 if (create_scsi_node_f) { 719 ddi_remove_minor_node(dip, "scsi"); 720 } 721 722 if (create_ioc_node_f) { 723 ddi_remove_minor_node(dip, instance->iocnode); 724 } 725 726 if (tran_alloc_f) { 727 scsi_hba_tran_free(tran); 728 } 729 730 731 if (added_soft_isr_f) { 732 ddi_remove_softintr(instance->soft_intr_id); 733 } 734 735 if (added_isr_f) { 736 ddi_remove_intr(dip, 0, instance->iblock_cookie); 737 } 738 739 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 740 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 741 742 megasas_fm_fini(instance); 743 744 pci_config_teardown(&instance->pci_handle); 745 746 ddi_soft_state_free(megasas_state, instance_no); 747 748 con_log(CL_ANN, (CE_NOTE, 749 "megasas: return failure from mega_attach\n")); 750 751 return (DDI_FAILURE); 752 } 753 754 /* 755 * getinfo - gets device information 756 * @dip: 757 * @cmd: 758 * @arg: 759 * @resultp: 760 * 761 * The system calls getinfo() to obtain configuration information that only 762 * the driver knows. The mapping of minor numbers to device instance is 763 * entirely under the control of the driver. The system sometimes needs to ask 764 * the driver which device a particular dev_t represents. 765 * Given the device number return the devinfo pointer from the scsi_device 766 * structure. 767 */ 768 /*ARGSUSED*/ 769 static int 770 megasas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 771 { 772 int rval; 773 int megasas_minor = getminor((dev_t)arg); 774 775 struct megasas_instance *instance; 776 777 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 778 779 switch (cmd) { 780 case DDI_INFO_DEVT2DEVINFO: 781 instance = (struct megasas_instance *) 782 ddi_get_soft_state(megasas_state, 783 MINOR2INST(megasas_minor)); 784 785 if (instance == NULL) { 786 *resultp = NULL; 787 rval = DDI_FAILURE; 788 } else { 789 *resultp = instance->dip; 790 rval = DDI_SUCCESS; 791 } 792 break; 793 case DDI_INFO_DEVT2INSTANCE: 794 *resultp = (void *)instance; 795 rval = DDI_SUCCESS; 796 break; 797 default: 798 *resultp = NULL; 799 rval = DDI_FAILURE; 800 } 801 802 return (rval); 803 } 804 805 /* 806 * detach - detaches a device from the system 807 * @dip: pointer to the device's dev_info structure 808 * @cmd: type of detach 809 * 810 * A driver's detach() entry point is called to detach an instance of a device 811 * that is bound to the driver. The entry point is called with the instance of 812 * the device node to be detached and with DDI_DETACH, which is specified as 813 * the cmd argument to the entry point. 814 * This routine is called during driver unload. We free all the allocated 815 * resources and call the corresponding LLD so that it can also release all 816 * its resources. 817 */ 818 static int 819 megasas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 820 { 821 int instance_no; 822 823 struct megasas_instance *instance; 824 825 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 826 827 instance_no = ddi_get_instance(dip); 828 829 instance = (struct megasas_instance *)ddi_get_soft_state(megasas_state, 830 instance_no); 831 832 if (!instance) { 833 con_log(CL_ANN, (CE_WARN, 834 "megasas:%d could not get instance in detach", 835 instance_no)); 836 837 return (DDI_FAILURE); 838 } 839 840 con_log(CL_ANN, (CE_NOTE, 841 "megasas: detaching device 0x%4x:0x%4x:0x%4x:0x%4x\n", 842 instance->vendor_id, instance->device_id, instance->subsysvid, 843 instance->subsysid)); 844 845 switch (cmd) { 846 case DDI_DETACH: 847 con_log(CL_ANN, (CE_NOTE, 848 "megasas_detach: DDI_DETACH\n")); 849 850 if (scsi_hba_detach(dip) != DDI_SUCCESS) { 851 con_log(CL_ANN, (CE_WARN, 852 "megasas:%d failed to detach", 853 instance_no)); 854 855 return (DDI_FAILURE); 856 } 857 858 scsi_hba_tran_free(instance->tran); 859 860 if (abort_aen_cmd(instance, instance->aen_cmd)) { 861 con_log(CL_ANN, (CE_WARN, "megasas_detach: " 862 "failed to abort prevous AEN command\n")); 863 864 return (DDI_FAILURE); 865 } 866 867 instance->func_ptr->disable_intr(instance); 868 869 if (instance->isr_level == HIGH_LEVEL_INTR) { 870 ddi_remove_softintr(instance->soft_intr_id); 871 } 872 873 ddi_remove_intr(dip, 0, instance->iblock_cookie); 874 875 free_space_for_mfi(instance); 876 877 megasas_fm_fini(instance); 878 879 pci_config_teardown(&instance->pci_handle); 880 881 kmem_free(instance->func_ptr, 882 sizeof (struct megasas_func_ptr)); 883 884 ddi_soft_state_free(megasas_state, instance_no); 885 break; 886 case DDI_PM_SUSPEND: 887 con_log(CL_ANN, (CE_NOTE, 888 "megasas_detach: DDI_PM_SUSPEND\n")); 889 890 break; 891 case DDI_SUSPEND: 892 con_log(CL_ANN, (CE_NOTE, 893 "megasas_detach: DDI_SUSPEND\n")); 894 895 break; 896 default: 897 con_log(CL_ANN, (CE_WARN, 898 "invalid detach command:0x%x", cmd)); 899 return (DDI_FAILURE); 900 } 901 902 return (DDI_SUCCESS); 903 } 904 905 906 /* 907 * ************************************************************************** * 908 * * 909 * common entry points - for character driver types * 910 * * 911 * ************************************************************************** * 912 */ 913 /* 914 * open - gets access to a device 915 * @dev: 916 * @openflags: 917 * @otyp: 918 * @credp: 919 * 920 * Access to a device by one or more application programs is controlled 921 * through the open() and close() entry points. The primary function of 922 * open() is to verify that the open request is allowed. 923 */ 924 static int 925 megasas_open(dev_t *dev, int openflags, int otyp, cred_t *credp) 926 { 927 int rval = 0; 928 929 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 930 931 /* Check root permissions */ 932 if (drv_priv(credp) != 0) { 933 con_log(CL_ANN, (CE_WARN, 934 "megaraid: Non-root ioctl access tried!")); 935 return (EPERM); 936 } 937 938 /* Verify we are being opened as a character device */ 939 if (otyp != OTYP_CHR) { 940 con_log(CL_ANN, (CE_WARN, 941 "megaraid: ioctl node must be a char node\n")); 942 return (EINVAL); 943 } 944 945 if (ddi_get_soft_state(megasas_state, MINOR2INST(getminor(*dev))) 946 == NULL) { 947 return (ENXIO); 948 } 949 950 if (scsi_hba_open) { 951 rval = scsi_hba_open(dev, openflags, otyp, credp); 952 } 953 954 return (rval); 955 } 956 957 /* 958 * close - gives up access to a device 959 * @dev: 960 * @openflags: 961 * @otyp: 962 * @credp: 963 * 964 * close() should perform any cleanup necessary to finish using the minor 965 * device, and prepare the device (and driver) to be opened again. 966 */ 967 static int 968 megasas_close(dev_t dev, int openflags, int otyp, cred_t *credp) 969 { 970 int rval = 0; 971 972 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 973 974 /* no need for locks! */ 975 976 if (scsi_hba_close) { 977 rval = scsi_hba_close(dev, openflags, otyp, credp); 978 } 979 980 return (rval); 981 } 982 983 /* 984 * ioctl - performs a range of I/O commands for character drivers 985 * @dev: 986 * @cmd: 987 * @arg: 988 * @mode: 989 * @credp: 990 * @rvalp: 991 * 992 * ioctl() routine must make sure that user data is copied into or out of the 993 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(), 994 * and ddi_copyout(), as appropriate. 995 * This is a wrapper routine to serialize access to the actual ioctl routine. 996 * ioctl() should return 0 on success, or the appropriate error number. The 997 * driver may also set the value returned to the calling process through rvalp. 998 */ 999 static int 1000 megasas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 1001 int *rvalp) 1002 { 1003 int rval = 0; 1004 1005 struct megasas_instance *instance; 1006 struct megasas_ioctl ioctl; 1007 struct megasas_aen aen; 1008 1009 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1010 1011 instance = ddi_get_soft_state(megasas_state, MINOR2INST(getminor(dev))); 1012 1013 if (instance == NULL) { 1014 /* invalid minor number */ 1015 con_log(CL_ANN, (CE_WARN, "megaraid: adapter not found.")); 1016 return (ENXIO); 1017 } 1018 1019 switch ((uint_t)cmd) { 1020 case MEGASAS_IOCTL_FIRMWARE: 1021 if (ddi_copyin((void *) arg, &ioctl, 1022 sizeof (struct megasas_ioctl), mode)) { 1023 con_log(CL_ANN, (CE_WARN, "megasas_ioctl: " 1024 "ERROR IOCTL copyin")); 1025 return (EFAULT); 1026 } 1027 1028 if (ioctl.control_code == MR_DRIVER_IOCTL_COMMON) { 1029 rval = handle_drv_ioctl(instance, &ioctl, mode); 1030 } else { 1031 rval = handle_mfi_ioctl(instance, &ioctl, mode); 1032 } 1033 1034 if (ddi_copyout((void *) &ioctl, (void *)arg, 1035 (sizeof (struct megasas_ioctl) - 1), mode)) { 1036 con_log(CL_ANN, (CE_WARN, 1037 "megasas_ioctl: copy_to_user failed\n")); 1038 rval = 1; 1039 } 1040 1041 break; 1042 case MEGASAS_IOCTL_AEN: 1043 if (ddi_copyin((void *) arg, &aen, 1044 sizeof (struct megasas_aen), mode)) { 1045 con_log(CL_ANN, (CE_WARN, 1046 "megasas_ioctl: ERROR AEN copyin")); 1047 return (EFAULT); 1048 } 1049 1050 rval = handle_mfi_aen(instance, &aen); 1051 1052 if (ddi_copyout((void *) &aen, (void *)arg, 1053 sizeof (struct megasas_aen), mode)) { 1054 con_log(CL_ANN, (CE_WARN, 1055 "megasas_ioctl: copy_to_user failed\n")); 1056 rval = 1; 1057 } 1058 1059 break; 1060 default: 1061 rval = scsi_hba_ioctl(dev, cmd, arg, 1062 mode, credp, rvalp); 1063 1064 con_log(CL_DLEVEL1, (CE_NOTE, "megasas_ioctl: " 1065 "scsi_hba_ioctl called, ret = %x.", rval)); 1066 } 1067 1068 return (rval); 1069 } 1070 1071 /* 1072 * ************************************************************************** * 1073 * * 1074 * common entry points - for block driver types * 1075 * * 1076 * ************************************************************************** * 1077 */ 1078 /* 1079 * reset - TBD 1080 * @dip: 1081 * @cmd: 1082 * 1083 * TBD 1084 */ 1085 /*ARGSUSED*/ 1086 static int 1087 megasas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1088 { 1089 int instance_no; 1090 1091 struct megasas_instance *instance; 1092 1093 instance_no = ddi_get_instance(dip); 1094 instance = (struct megasas_instance *)ddi_get_soft_state 1095 (megasas_state, instance_no); 1096 1097 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1098 1099 if (!instance) { 1100 con_log(CL_ANN, (CE_WARN, 1101 "megaraid:%d could not get adapter in reset", 1102 instance_no)); 1103 return (DDI_FAILURE); 1104 } 1105 1106 con_log(CL_ANN, (CE_NOTE, "flushing cache for instance %d ..", 1107 instance_no)); 1108 1109 flush_cache(instance); 1110 1111 return (DDI_SUCCESS); 1112 } 1113 1114 1115 /* 1116 * ************************************************************************** * 1117 * * 1118 * entry points (SCSI HBA) * 1119 * * 1120 * ************************************************************************** * 1121 */ 1122 /* 1123 * tran_tgt_init - initialize a target device instance 1124 * @hba_dip: 1125 * @tgt_dip: 1126 * @tran: 1127 * @sd: 1128 * 1129 * The tran_tgt_init() entry point enables the HBA to allocate and initialize 1130 * any per-target resources. tran_tgt_init() also enables the HBA to qualify 1131 * the device's address as valid and supportable for that particular HBA. 1132 * By returning DDI_FAILURE, the instance of the target driver for that device 1133 * is not probed or attached. 1134 */ 1135 /*ARGSUSED*/ 1136 static int 1137 megasas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1138 scsi_hba_tran_t *tran, struct scsi_device *sd) 1139 { 1140 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1141 1142 #ifdef NOT_YET 1143 int instance; 1144 int islogical; 1145 1146 instance = ddi_get_instance(hba_dip); 1147 adp = (struct megasas_instance *)ddi_get_soft_state(mega_state, 1148 instance_no); 1149 if ((sd->sd_address.a_target >= (adp->max_channel * 16 + MAX_LD_64)) || 1150 (sd->sd_address.a_lun != 0)) { 1151 1152 return (DDI_FAILURE); 1153 } 1154 1155 MRAID_IS_LOGICAL(sd->sd_address.a_target, islogical); 1156 1157 /* Allow non-disk device commands to pass */ 1158 if (!islogical) { 1159 return (DDI_SUCCESS); 1160 } 1161 1162 /* From Target 40 - 64 there will be no devices */ 1163 if (sd->sd_address.a_target > MAX_LOGICAL_DRIVES_40LD) { 1164 return (DDI_FAILURE); 1165 } 1166 1167 1168 /* 1169 * Get information about the logical drives. 1170 */ 1171 if (megaraid_ld_state_instance(adp) != DDI_SUCCESS) { 1172 con_log(CL_ANN, (CE_WARN, "megaraid: failed query adapter")); 1173 } 1174 1175 if (adp->ldrv_state[adp->device_ids[0][sd->sd_address.a_target]] 1176 == RDRV_DELETED || 1177 adp->ldrv_state[adp->device_ids[0][sd->sd_address.a_target]] 1178 == RDRV_OFFLINE) { 1179 1180 return (DDI_FAILURE); 1181 } 1182 #endif /* NOT_YET */ 1183 return (DDI_SUCCESS); 1184 } 1185 #if defined(USELESS) && !defined(lint) 1186 /* 1187 * tran_tgt_probe - probe for the existence of a target device 1188 * @sd: 1189 * @callback: 1190 * 1191 * The tran_tgt_probe() entry point enables the HBA to customize the operation 1192 * of scsi_probe(), if necessary. This entry point is called only when the 1193 * target driver calls scsi_probe(). The HBA driver can retain the normal 1194 * operation of scsi_probe() by calling scsi_hba_probe() and returning its 1195 * return value. This entry point is not required, and if not needed, the HBA 1196 * driver should set the tran_tgt_ probe vector in the scsi_hba_tran structure 1197 * to point to scsi_hba_probe(). 1198 */ 1199 static int 1200 megasas_tran_tgt_probe(struct scsi_device *sd, int (*callback)()) 1201 { 1202 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1203 1204 /* 1205 * the HBA driver should set the tran_tgt_probe vector in the 1206 * scsi_hba_tran structure to point to scsi_hba_probe() 1207 */ 1208 return (scsi_hba_probe(sd, callback)); 1209 } 1210 #endif /* defined (USELESS) && !defined (lint) */ 1211 1212 /* 1213 * tran_init_pkt - allocate & initialize a scsi_pkt structure 1214 * @ap: 1215 * @pkt: 1216 * @bp: 1217 * @cmdlen: 1218 * @statuslen: 1219 * @tgtlen: 1220 * @flags: 1221 * @callback: 1222 * 1223 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt 1224 * structure and DMA resources for a target driver request. The 1225 * tran_init_pkt() entry point is called when the target driver calls the 1226 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point 1227 * is a request to perform one or more of three possible services: 1228 * - allocation and initialization of a scsi_pkt structure 1229 * - allocation of DMA resources for data transfer 1230 * - reallocation of DMA resources for the next portion of the data transfer 1231 */ 1232 static struct scsi_pkt * 1233 megasas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt, 1234 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1235 int flags, int (*callback)(), caddr_t arg) 1236 { 1237 struct scsa_cmd *acmd; 1238 struct megasas_instance *instance; 1239 struct scsi_pkt *new_pkt; 1240 1241 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1242 1243 instance = ADDR2MEGA(ap); 1244 1245 /* step #1 : pkt allocation */ 1246 if (pkt == NULL) { 1247 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen, 1248 tgtlen, sizeof (struct scsa_cmd), callback, arg); 1249 if (pkt == NULL) { 1250 return (NULL); 1251 } 1252 1253 acmd = PKT2CMD(pkt); 1254 1255 /* 1256 * Initialize the new pkt - we redundantly initialize 1257 * all the fields for illustrative purposes. 1258 */ 1259 acmd->cmd_pkt = pkt; 1260 acmd->cmd_flags = 0; 1261 acmd->cmd_scblen = statuslen; 1262 acmd->cmd_cdblen = cmdlen; 1263 acmd->cmd_dmahandle = NULL; 1264 acmd->cmd_ncookies = 0; 1265 acmd->cmd_cookie = 0; 1266 acmd->cmd_cookiecnt = 0; 1267 acmd->cmd_nwin = 0; 1268 1269 pkt->pkt_address = *ap; 1270 pkt->pkt_comp = (void (*)())NULL; 1271 pkt->pkt_flags = 0; 1272 pkt->pkt_time = 0; 1273 pkt->pkt_resid = 0; 1274 pkt->pkt_state = 0; 1275 pkt->pkt_statistics = 0; 1276 pkt->pkt_reason = 0; 1277 new_pkt = pkt; 1278 } else { 1279 acmd = PKT2CMD(pkt); 1280 new_pkt = NULL; 1281 } 1282 1283 /* step #2 : dma allocation/move */ 1284 if (bp && bp->b_bcount != 0) { 1285 if (acmd->cmd_dmahandle == NULL) { 1286 if (megasas_dma_alloc(instance, pkt, bp, flags, 1287 callback) == -1) { 1288 if (new_pkt) { 1289 scsi_hba_pkt_free(ap, new_pkt); 1290 } 1291 1292 return ((struct scsi_pkt *)NULL); 1293 } 1294 } else { 1295 if (megasas_dma_move(instance, pkt, bp) == -1) { 1296 return ((struct scsi_pkt *)NULL); 1297 } 1298 } 1299 } 1300 1301 return (pkt); 1302 } 1303 1304 /* 1305 * tran_start - transport a SCSI command to the addressed target 1306 * @ap: 1307 * @pkt: 1308 * 1309 * The tran_start() entry point for a SCSI HBA driver is called to transport a 1310 * SCSI command to the addressed target. The SCSI command is described 1311 * entirely within the scsi_pkt structure, which the target driver allocated 1312 * through the HBA driver's tran_init_pkt() entry point. If the command 1313 * involves a data transfer, DMA resources must also have been allocated for 1314 * the scsi_pkt structure. 1315 * 1316 * Return Values : 1317 * TRAN_BUSY - request queue is full, no more free scbs 1318 * TRAN_ACCEPT - pkt has been submitted to the instance 1319 */ 1320 static int 1321 megasas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt) 1322 { 1323 uchar_t cmd_done = 0; 1324 1325 struct megasas_instance *instance = ADDR2MEGA(ap); 1326 struct megasas_cmd *cmd; 1327 1328 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x", 1329 __func__, __LINE__, pkt->pkt_cdbp[0])); 1330 1331 pkt->pkt_reason = CMD_CMPLT; 1332 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 1333 1334 cmd = build_cmd(instance, ap, pkt, &cmd_done); 1335 1336 /* 1337 * Check if the command is already completed by the mega_build_cmd() 1338 * routine. In which case the busy_flag would be clear and scb will be 1339 * NULL and appropriate reason provided in pkt_reason field 1340 */ 1341 if (cmd_done) { 1342 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) { 1343 (*pkt->pkt_comp)(pkt); 1344 } 1345 pkt->pkt_reason = CMD_CMPLT; 1346 pkt->pkt_scbp[0] = STATUS_GOOD; 1347 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET 1348 | STATE_SENT_CMD; 1349 return (TRAN_ACCEPT); 1350 } 1351 1352 if (cmd == NULL) { 1353 return (TRAN_BUSY); 1354 } 1355 1356 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { 1357 if (instance->fw_outstanding > instance->max_fw_cmds) { 1358 con_log(CL_ANN, (CE_CONT, "megasas:Firmware busy")); 1359 return_mfi_pkt(instance, cmd); 1360 return (TRAN_BUSY); 1361 } 1362 1363 /* Syncronize the Cmd frame for the controller */ 1364 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, 1365 DDI_DMA_SYNC_FORDEV); 1366 1367 instance->func_ptr->issue_cmd(cmd, instance); 1368 1369 #if defined(NOT_YET) && !defined(lint) 1370 /* 1371 * before return, set timer - for timeout checking 1372 * (for every 1 second) 1373 */ 1374 instance->timeout_id = timeout(io_timeout_checker, 1375 (void *) instance, drv_usectohz(MEGASAS_1_SECOND)); 1376 #endif /* defined(NOT_YET) && !defined(lint) */ 1377 } else { 1378 struct megasas_header *hdr = &cmd->frame->hdr; 1379 1380 cmd->sync_cmd = MEGASAS_TRUE; 1381 1382 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd); 1383 1384 pkt->pkt_reason = CMD_CMPLT; 1385 pkt->pkt_statistics = 0; 1386 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS; 1387 1388 switch (hdr->cmd_status) { 1389 case MFI_STAT_OK: 1390 pkt->pkt_scbp[0] = STATUS_GOOD; 1391 break; 1392 1393 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1394 1395 pkt->pkt_reason = CMD_CMPLT; 1396 pkt->pkt_statistics = 0; 1397 1398 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; 1399 break; 1400 1401 case MFI_STAT_DEVICE_NOT_FOUND: 1402 pkt->pkt_reason = CMD_DEV_GONE; 1403 pkt->pkt_statistics = STAT_DISCON; 1404 break; 1405 1406 default: 1407 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1; 1408 } 1409 1410 return_mfi_pkt(instance, cmd); 1411 (void) megasas_common_check(instance, cmd); 1412 1413 if (pkt->pkt_comp) { 1414 (*pkt->pkt_comp)(pkt); 1415 } 1416 1417 } 1418 1419 return (TRAN_ACCEPT); 1420 } 1421 1422 /* 1423 * tran_abort - Abort any commands that are currently in transport 1424 * @ap: 1425 * @pkt: 1426 * 1427 * The tran_abort() entry point for a SCSI HBA driver is called to abort any 1428 * commands that are currently in transport for a particular target. This entry 1429 * point is called when a target driver calls scsi_abort(). The tran_abort() 1430 * entry point should attempt to abort the command denoted by the pkt 1431 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to 1432 * abort all outstandidng commands in the transport layer for the particular 1433 * target or logical unit. 1434 */ 1435 /*ARGSUSED*/ 1436 static int 1437 megasas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1438 { 1439 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1440 1441 /* aborting command not supported by H/W */ 1442 1443 return (DDI_FAILURE); 1444 } 1445 1446 /* 1447 * tran_reset - reset either the SCSI bus or target 1448 * @ap: 1449 * @level: 1450 * 1451 * The tran_reset() entry point for a SCSI HBA driver is called to reset either 1452 * the SCSI bus or a particular SCSI target device. This entry point is called 1453 * when a target driver calls scsi_reset(). The tran_reset() entry point must 1454 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the 1455 * particular target or logical unit must be reset. 1456 */ 1457 /*ARGSUSED*/ 1458 static int 1459 megasas_tran_reset(struct scsi_address *ap, int level) 1460 { 1461 struct megasas_instance *instance = ADDR2MEGA(ap); 1462 1463 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1464 1465 if (wait_for_outstanding(instance)) { 1466 return (DDI_FAILURE); 1467 } else { 1468 return (DDI_SUCCESS); 1469 } 1470 } 1471 1472 /* 1473 * tran_bus_reset - reset the SCSI bus 1474 * @dip: 1475 * @level: 1476 * 1477 * The tran_bus_reset() vector in the scsi_hba_tran structure should be 1478 * initialized during the HBA driver's attach(). The vector should point to 1479 * an HBA entry point that is to be called when a user initiates a bus reset. 1480 * Implementation is hardware specific. If the HBA driver cannot reset the 1481 * SCSI bus without affecting the targets, the driver should fail RESET_BUS 1482 * or not initialize this vector. 1483 */ 1484 /*ARGSUSED*/ 1485 static int 1486 megasas_tran_bus_reset(dev_info_t *dip, int level) 1487 { 1488 int instance_no = ddi_get_instance(dip); 1489 1490 struct megasas_instance *instance = ddi_get_soft_state(megasas_state, 1491 instance_no); 1492 1493 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1494 1495 if (wait_for_outstanding(instance)) { 1496 return (DDI_FAILURE); 1497 } else { 1498 return (DDI_SUCCESS); 1499 } 1500 } 1501 1502 /* 1503 * tran_getcap - get one of a set of SCSA-defined capabilities 1504 * @ap: 1505 * @cap: 1506 * @whom: 1507 * 1508 * The target driver can request the current setting of the capability for a 1509 * particular target by setting the whom parameter to nonzero. A whom value of 1510 * zero indicates a request for the current setting of the general capability 1511 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1 1512 * for undefined capabilities or the current value of the requested capability. 1513 */ 1514 /*ARGSUSED*/ 1515 static int 1516 megasas_tran_getcap(struct scsi_address *ap, char *cap, int whom) 1517 { 1518 int rval = 0; 1519 1520 struct megasas_instance *instance = ADDR2MEGA(ap); 1521 1522 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1523 1524 /* we do allow inquiring about capabilities for other targets */ 1525 if (cap == NULL) { 1526 return (-1); 1527 } 1528 #if 0 1529 if (ap->a_target >= (adapter->max_channel * 16 + MAX_LD_64)) { 1530 1531 return (-1); 1532 } 1533 1534 acmdp = &acmd; 1535 #endif 1536 1537 switch (scsi_hba_lookup_capstr(cap)) { 1538 case SCSI_CAP_DMA_MAX: 1539 /* Limit to 16MB max transfer */ 1540 rval = megasas_max_cap_maxxfer; 1541 break; 1542 case SCSI_CAP_MSG_OUT: 1543 rval = 1; 1544 break; 1545 case SCSI_CAP_DISCONNECT: 1546 rval = 0; 1547 break; 1548 case SCSI_CAP_SYNCHRONOUS: 1549 rval = 0; 1550 break; 1551 case SCSI_CAP_WIDE_XFER: 1552 rval = 1; 1553 break; 1554 case SCSI_CAP_TAGGED_QING: 1555 rval = 1; 1556 break; 1557 case SCSI_CAP_UNTAGGED_QING: 1558 rval = 1; 1559 break; 1560 case SCSI_CAP_PARITY: 1561 rval = 1; 1562 break; 1563 case SCSI_CAP_INITIATOR_ID: 1564 rval = instance->init_id; 1565 break; 1566 case SCSI_CAP_ARQ: 1567 rval = 1; 1568 break; 1569 case SCSI_CAP_LINKED_CMDS: 1570 rval = 0; 1571 break; 1572 case SCSI_CAP_RESET_NOTIFICATION: 1573 rval = 1; 1574 break; 1575 case SCSI_CAP_GEOMETRY: 1576 #if 0 1577 int channel; 1578 int target; 1579 int islogical; 1580 1581 MRAID_GET_DEVICE_MAP(adapter, acmdp, channel, 1582 target, ap, islogical); 1583 1584 if (!islogical) { 1585 con_log(CL_ANN1, (CE_WARN, "megaraid%d: " 1586 "fail geometry for phy [%d:%d]\n", 1587 ddi_get_instance(adapter->dip), channel, 1588 target)); 1589 return (-1); 1590 } 1591 1592 if (adapter->read_ldidmap) 1593 target -= 0x80; 1594 1595 if ((adapter->ldrv_state[target] == RDRV_OFFLINE) || 1596 (adapter->ldrv_state[target] == RDRV_DELETED)) { 1597 return (-1); 1598 } 1599 1600 rval = (64 << 16) | 32; 1601 1602 if (adapter->ldrv_size[target] > 0x200000) { 1603 rval = (255 << 16) | 63; 1604 } 1605 1606 rval = (64 << 16) | 32; /* remove latter */ 1607 #endif 1608 rval = -1; 1609 1610 break; 1611 default: 1612 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x", 1613 scsi_hba_lookup_capstr(cap))); 1614 rval = -1; 1615 break; 1616 } 1617 1618 return (rval); 1619 } 1620 1621 /* 1622 * tran_setcap - set one of a set of SCSA-defined capabilities 1623 * @ap: 1624 * @cap: 1625 * @value: 1626 * @whom: 1627 * 1628 * The target driver might request that the new value be set for a particular 1629 * target by setting the whom parameter to nonzero. A whom value of zero 1630 * means that request is to set the new value for the SCSI bus or for adapter 1631 * hardware in general. 1632 * The tran_setcap() should return the following values as appropriate: 1633 * - -1 for undefined capabilities 1634 * - 0 if the HBA driver cannot set the capability to the requested value 1635 * - 1 if the HBA driver is able to set the capability to the requested value 1636 */ 1637 /*ARGSUSED*/ 1638 static int 1639 megasas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1640 { 1641 int rval = 1; 1642 1643 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1644 1645 /* We don't allow setting capabilities for other targets */ 1646 if (cap == NULL || whom == 0) { 1647 return (-1); 1648 } 1649 1650 switch (scsi_hba_lookup_capstr(cap)) { 1651 case SCSI_CAP_DMA_MAX: 1652 case SCSI_CAP_MSG_OUT: 1653 case SCSI_CAP_PARITY: 1654 case SCSI_CAP_LINKED_CMDS: 1655 case SCSI_CAP_RESET_NOTIFICATION: 1656 case SCSI_CAP_DISCONNECT: 1657 case SCSI_CAP_SYNCHRONOUS: 1658 case SCSI_CAP_UNTAGGED_QING: 1659 case SCSI_CAP_WIDE_XFER: 1660 case SCSI_CAP_INITIATOR_ID: 1661 case SCSI_CAP_ARQ: 1662 /* 1663 * None of these are settable via 1664 * the capability interface. 1665 */ 1666 break; 1667 case SCSI_CAP_TAGGED_QING: 1668 rval = 1; 1669 break; 1670 case SCSI_CAP_SECTOR_SIZE: 1671 rval = 1; 1672 break; 1673 1674 case SCSI_CAP_TOTAL_SECTORS: 1675 rval = 1; 1676 break; 1677 default: 1678 rval = -1; 1679 break; 1680 } 1681 1682 return (rval); 1683 } 1684 1685 /* 1686 * tran_destroy_pkt - deallocate scsi_pkt structure 1687 * @ap: 1688 * @pkt: 1689 * 1690 * The tran_destroy_pkt() entry point is the HBA driver function that 1691 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is 1692 * called when the target driver calls scsi_destroy_pkt(). The 1693 * tran_destroy_pkt() entry point must free any DMA resources that have been 1694 * allocated for the packet. An implicit DMA synchronization occurs if the 1695 * DMA resources are freed and any cached data remains after the completion 1696 * of the transfer. 1697 */ 1698 static void 1699 megasas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1700 { 1701 struct scsa_cmd *acmd = PKT2CMD(pkt); 1702 1703 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1704 1705 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1706 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1707 1708 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1709 1710 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1711 1712 acmd->cmd_dmahandle = NULL; 1713 } 1714 1715 /* free the pkt */ 1716 scsi_hba_pkt_free(ap, pkt); 1717 } 1718 1719 /* 1720 * tran_dmafree - deallocates DMA resources 1721 * @ap: 1722 * @pkt: 1723 * 1724 * The tran_dmafree() entry point deallocates DMAQ resources that have been 1725 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is 1726 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must 1727 * free only DMA resources allocated for a scsi_pkt structure, not the 1728 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is 1729 * implicitly performed. 1730 */ 1731 /*ARGSUSED*/ 1732 static void 1733 megasas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1734 { 1735 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1736 1737 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1738 1739 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1740 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1741 1742 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1743 1744 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1745 1746 acmd->cmd_dmahandle = NULL; 1747 } 1748 } 1749 1750 /* 1751 * tran_sync_pkt - synchronize the DMA object allocated 1752 * @ap: 1753 * @pkt: 1754 * 1755 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for 1756 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt() 1757 * entry point is called when the target driver calls scsi_sync_pkt(). If the 1758 * data transfer direction is a DMA read from device to memory, tran_sync_pkt() 1759 * must synchronize the CPU's view of the data. If the data transfer direction 1760 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the 1761 * device's view of the data. 1762 */ 1763 /*ARGSUSED*/ 1764 static void 1765 megasas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1766 { 1767 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1768 1769 /* 1770 * following 'ddi_dma_sync()' API call 1771 * already called for each I/O in the ISR 1772 */ 1773 #ifdef TBD 1774 int i; 1775 1776 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1777 1778 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1779 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset, 1780 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ? 1781 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU); 1782 } 1783 #endif /* TBD */ 1784 } 1785 1786 /*ARGSUSED*/ 1787 static int 1788 megasas_tran_quiesce(dev_info_t *dip) 1789 { 1790 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1791 1792 return (1); 1793 } 1794 1795 /*ARGSUSED*/ 1796 static int 1797 megasas_tran_unquiesce(dev_info_t *dip) 1798 { 1799 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1800 1801 return (1); 1802 } 1803 1804 /* 1805 * megasas_isr(caddr_t) 1806 * 1807 * The Interrupt Service Routine 1808 * 1809 * Collect status for all completed commands and do callback 1810 * 1811 */ 1812 static uint_t 1813 megasas_isr(caddr_t arg) 1814 { 1815 int need_softintr; 1816 uint32_t producer; 1817 uint32_t consumer; 1818 uint32_t context; 1819 1820 struct megasas_cmd *cmd; 1821 struct megasas_instance *instance; 1822 1823 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1824 1825 /* LINTED E_BAD_PTR_CAST_ALIGN */ 1826 instance = (struct megasas_instance *)arg; 1827 if (!instance->func_ptr->intr_ack(instance)) { 1828 return (DDI_INTR_UNCLAIMED); 1829 } 1830 1831 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1832 0, 0, DDI_DMA_SYNC_FORCPU); 1833 1834 if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 1835 != DDI_SUCCESS) { 1836 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 1837 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 1838 return (DDI_INTR_UNCLAIMED); 1839 } 1840 1841 producer = *instance->producer; 1842 consumer = *instance->consumer; 1843 1844 con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ", 1845 producer, consumer)); 1846 1847 mutex_enter(&instance->completed_pool_mtx); 1848 1849 while (consumer != producer) { 1850 context = instance->reply_queue[consumer]; 1851 /* 1852 * con_log(CL_ANN, (CE_WARN, 1853 * " context returned %x ",context)); 1854 */ 1855 cmd = instance->cmd_list[context]; 1856 mlist_add_tail(&cmd->list, &instance->completed_pool_list); 1857 1858 consumer++; 1859 if (consumer == (instance->max_fw_cmds + 1)) { 1860 consumer = 0; 1861 } 1862 } 1863 1864 mutex_exit(&instance->completed_pool_mtx); 1865 1866 *instance->consumer = consumer; 1867 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1868 0, 0, DDI_DMA_SYNC_FORDEV); 1869 1870 if (instance->softint_running) { 1871 need_softintr = 0; 1872 } else { 1873 need_softintr = 1; 1874 } 1875 1876 if (instance->isr_level == HIGH_LEVEL_INTR) { 1877 if (need_softintr) { 1878 ddi_trigger_softintr(instance->soft_intr_id); 1879 } 1880 } else { 1881 /* 1882 * Not a high-level interrupt, therefore call the soft level 1883 * interrupt explicitly 1884 */ 1885 (void) megasas_softintr((caddr_t)instance); 1886 } 1887 1888 return (DDI_INTR_CLAIMED); 1889 } 1890 1891 1892 /* 1893 * ************************************************************************** * 1894 * * 1895 * libraries * 1896 * * 1897 * ************************************************************************** * 1898 */ 1899 /* 1900 * get_mfi_pkt : Get a command from the free pool 1901 */ 1902 static struct megasas_cmd * 1903 get_mfi_pkt(struct megasas_instance *instance) 1904 { 1905 mlist_t *head = &instance->cmd_pool_list; 1906 struct megasas_cmd *cmd = NULL; 1907 1908 mutex_enter(&instance->cmd_pool_mtx); 1909 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1910 1911 if (!mlist_empty(head)) { 1912 /* LINTED E_BAD_PTR_CAST_ALIGN */ 1913 cmd = mlist_entry(head->next, struct megasas_cmd, list); 1914 mlist_del_init(head->next); 1915 } 1916 if (cmd != NULL) 1917 cmd->pkt = NULL; 1918 mutex_exit(&instance->cmd_pool_mtx); 1919 1920 return (cmd); 1921 } 1922 1923 /* 1924 * return_mfi_pkt : Return a cmd to free command pool 1925 */ 1926 static void 1927 return_mfi_pkt(struct megasas_instance *instance, struct megasas_cmd *cmd) 1928 { 1929 mutex_enter(&instance->cmd_pool_mtx); 1930 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1931 1932 mlist_add(&cmd->list, &instance->cmd_pool_list); 1933 1934 mutex_exit(&instance->cmd_pool_mtx); 1935 } 1936 1937 /* 1938 * get_mfi_pkt : Get a command from the free pool 1939 */ 1940 #ifndef lint 1941 static struct megasas_cmd * 1942 pull_pend_queue(struct megasas_instance *instance) 1943 { 1944 mlist_t *head = &instance->cmd_pend_list; 1945 struct megasas_cmd *cmd = NULL; 1946 1947 mutex_enter(&instance->cmd_pend_mtx); 1948 ASSERT(mutex_owned(&instance->cmd_pend_mtx)); 1949 1950 if (!mlist_empty(head)) { 1951 cmd = mlist_entry(head->next, struct megasas_cmd, list); 1952 mlist_del_init(head->next); 1953 } 1954 1955 mutex_exit(&instance->cmd_pend_mtx); 1956 1957 return (cmd); 1958 } 1959 1960 /* 1961 * return_mfi_pkt : Return a cmd to free command pool 1962 */ 1963 static void 1964 push_pend_queue(struct megasas_instance *instance, struct megasas_cmd *cmd) 1965 { 1966 mutex_enter(&instance->cmd_pend_mtx); 1967 ASSERT(mutex_owned(&instance->cmd_pend_mtx)); 1968 1969 mlist_add(&cmd->list, &instance->cmd_pend_list); 1970 1971 mutex_exit(&instance->cmd_pend_mtx); 1972 } 1973 #endif 1974 1975 /* 1976 * destroy_mfi_frame_pool 1977 */ 1978 static void 1979 destroy_mfi_frame_pool(struct megasas_instance *instance) 1980 { 1981 int i; 1982 uint32_t max_cmd = instance->max_fw_cmds; 1983 1984 struct megasas_cmd *cmd; 1985 1986 /* return all frames to pool */ 1987 for (i = 0; i < max_cmd; i++) { 1988 1989 cmd = instance->cmd_list[i]; 1990 1991 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) 1992 (void) mega_free_dma_obj(instance, cmd->frame_dma_obj); 1993 1994 cmd->frame_dma_obj_status = DMA_OBJ_FREED; 1995 } 1996 1997 } 1998 1999 /* 2000 * create_mfi_frame_pool 2001 */ 2002 static int 2003 create_mfi_frame_pool(struct megasas_instance *instance) 2004 { 2005 int i = 0; 2006 int cookie_cnt; 2007 uint16_t max_cmd; 2008 uint16_t sge_sz; 2009 uint32_t sgl_sz; 2010 uint32_t tot_frame_size; 2011 2012 struct megasas_cmd *cmd; 2013 2014 max_cmd = instance->max_fw_cmds; 2015 2016 sge_sz = sizeof (struct megasas_sge64); 2017 2018 /* calculated the number of 64byte frames required for SGL */ 2019 sgl_sz = sge_sz * instance->max_num_sge; 2020 tot_frame_size = sgl_sz + MEGAMFI_FRAME_SIZE + SENSE_LENGTH; 2021 2022 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: " 2023 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size)); 2024 2025 while (i < max_cmd) { 2026 cmd = instance->cmd_list[i]; 2027 2028 cmd->frame_dma_obj.size = tot_frame_size; 2029 cmd->frame_dma_obj.dma_attr = megasas_generic_dma_attr; 2030 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 2031 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 2032 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1; 2033 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64; 2034 2035 2036 cookie_cnt = mega_alloc_dma_obj(instance, &cmd->frame_dma_obj); 2037 2038 if (cookie_cnt == -1 || cookie_cnt > 1) { 2039 con_log(CL_ANN, (CE_WARN, 2040 "create_mfi_frame_pool: could not alloc.")); 2041 return (DDI_FAILURE); 2042 } 2043 2044 bzero(cmd->frame_dma_obj.buffer, tot_frame_size); 2045 2046 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED; 2047 cmd->frame = (union megasas_frame *)cmd->frame_dma_obj.buffer; 2048 cmd->frame_phys_addr = 2049 cmd->frame_dma_obj.dma_cookie[0].dmac_address; 2050 2051 cmd->sense = (uint8_t *)(((unsigned long) 2052 cmd->frame_dma_obj.buffer) + 2053 tot_frame_size - SENSE_LENGTH); 2054 cmd->sense_phys_addr = 2055 cmd->frame_dma_obj.dma_cookie[0].dmac_address + 2056 tot_frame_size - SENSE_LENGTH; 2057 2058 if (!cmd->frame || !cmd->sense) { 2059 con_log(CL_ANN, (CE_NOTE, 2060 "megasas: pci_pool_alloc failed \n")); 2061 2062 return (-ENOMEM); 2063 } 2064 2065 cmd->frame->io.context = cmd->index; 2066 i++; 2067 2068 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x", 2069 cmd->frame->io.context, cmd->frame_phys_addr)); 2070 } 2071 2072 return (DDI_SUCCESS); 2073 } 2074 2075 /* 2076 * free_additional_dma_buffer 2077 */ 2078 static void 2079 free_additional_dma_buffer(struct megasas_instance *instance) 2080 { 2081 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { 2082 (void) mega_free_dma_obj(instance, 2083 instance->mfi_internal_dma_obj); 2084 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; 2085 } 2086 2087 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) { 2088 (void) mega_free_dma_obj(instance, 2089 instance->mfi_evt_detail_obj); 2090 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; 2091 } 2092 } 2093 2094 /* 2095 * alloc_additional_dma_buffer 2096 */ 2097 static int 2098 alloc_additional_dma_buffer(struct megasas_instance *instance) 2099 { 2100 uint32_t reply_q_sz; 2101 uint32_t internal_buf_size = PAGESIZE*2; 2102 2103 /* max cmds plus 1 + procudure & consumer */ 2104 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2); 2105 2106 instance->mfi_internal_dma_obj.size = internal_buf_size; 2107 instance->mfi_internal_dma_obj.dma_attr = megasas_generic_dma_attr; 2108 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 2109 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 2110 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1; 2111 2112 if (mega_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj) 2113 != 1) { 2114 con_log(CL_ANN, (CE_WARN, "megaraid: could not alloc reply Q")); 2115 return (DDI_FAILURE); 2116 } 2117 2118 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size); 2119 2120 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED; 2121 2122 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2123 instance->producer = (uint32_t *)instance->mfi_internal_dma_obj.buffer; 2124 instance->consumer = (uint32_t *)((unsigned long) 2125 instance->mfi_internal_dma_obj.buffer + 4); 2126 instance->reply_queue = (uint32_t *)((unsigned long) 2127 instance->mfi_internal_dma_obj.buffer + 8); 2128 instance->internal_buf = (caddr_t)(((unsigned long) 2129 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8); 2130 instance->internal_buf_dmac_add = 2131 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 2132 reply_q_sz; 2133 instance->internal_buf_size = internal_buf_size - 2134 (reply_q_sz + 8); 2135 2136 /* allocate evt_detail */ 2137 instance->mfi_evt_detail_obj.size = sizeof (struct megasas_evt_detail); 2138 instance->mfi_evt_detail_obj.dma_attr = megasas_generic_dma_attr; 2139 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 2140 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xffffffff; 2141 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1; 2142 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1; 2143 2144 if (mega_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj) != 1) { 2145 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: " 2146 "could not data transfer buffer alloc.")); 2147 return (DDI_FAILURE); 2148 } 2149 2150 bzero(instance->mfi_evt_detail_obj.buffer, 2151 sizeof (struct megasas_evt_detail)); 2152 2153 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED; 2154 2155 return (DDI_SUCCESS); 2156 } 2157 2158 /* 2159 * free_space_for_mfi 2160 */ 2161 static void 2162 free_space_for_mfi(struct megasas_instance *instance) 2163 { 2164 int i; 2165 uint32_t max_cmd = instance->max_fw_cmds; 2166 2167 /* already freed */ 2168 if (instance->cmd_list == NULL) { 2169 return; 2170 } 2171 2172 free_additional_dma_buffer(instance); 2173 2174 /* first free the MFI frame pool */ 2175 destroy_mfi_frame_pool(instance); 2176 2177 /* free all the commands in the cmd_list */ 2178 for (i = 0; i < instance->max_fw_cmds; i++) { 2179 kmem_free(instance->cmd_list[i], 2180 sizeof (struct megasas_cmd)); 2181 2182 instance->cmd_list[i] = NULL; 2183 } 2184 2185 /* free the cmd_list buffer itself */ 2186 kmem_free(instance->cmd_list, 2187 sizeof (struct megasas_cmd *) * max_cmd); 2188 2189 instance->cmd_list = NULL; 2190 2191 INIT_LIST_HEAD(&instance->cmd_pool_list); 2192 } 2193 2194 /* 2195 * alloc_space_for_mfi 2196 */ 2197 static int 2198 alloc_space_for_mfi(struct megasas_instance *instance) 2199 { 2200 int i; 2201 uint32_t max_cmd; 2202 size_t sz; 2203 2204 struct megasas_cmd *cmd; 2205 2206 max_cmd = instance->max_fw_cmds; 2207 sz = sizeof (struct megasas_cmd *) * max_cmd; 2208 2209 /* 2210 * instance->cmd_list is an array of struct megasas_cmd pointers. 2211 * Allocate the dynamic array first and then allocate individual 2212 * commands. 2213 */ 2214 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); 2215 ASSERT(instance->cmd_list); 2216 2217 for (i = 0; i < max_cmd; i++) { 2218 instance->cmd_list[i] = kmem_zalloc(sizeof (struct megasas_cmd), 2219 KM_SLEEP); 2220 ASSERT(instance->cmd_list[i]); 2221 } 2222 2223 INIT_LIST_HEAD(&instance->cmd_pool_list); 2224 2225 /* add all the commands to command pool (instance->cmd_pool) */ 2226 for (i = 0; i < max_cmd; i++) { 2227 cmd = instance->cmd_list[i]; 2228 cmd->index = i; 2229 2230 mlist_add_tail(&cmd->list, &instance->cmd_pool_list); 2231 } 2232 2233 /* create a frame pool and assign one frame to each cmd */ 2234 if (create_mfi_frame_pool(instance)) { 2235 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n")); 2236 return (DDI_FAILURE); 2237 } 2238 2239 /* create a frame pool and assign one frame to each cmd */ 2240 if (alloc_additional_dma_buffer(instance)) { 2241 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n")); 2242 return (DDI_FAILURE); 2243 } 2244 2245 return (DDI_SUCCESS); 2246 } 2247 2248 /* 2249 * get_ctrl_info 2250 */ 2251 static int 2252 get_ctrl_info(struct megasas_instance *instance, 2253 struct megasas_ctrl_info *ctrl_info) 2254 { 2255 int ret = 0; 2256 2257 struct megasas_cmd *cmd; 2258 struct megasas_dcmd_frame *dcmd; 2259 struct megasas_ctrl_info *ci; 2260 2261 cmd = get_mfi_pkt(instance); 2262 2263 if (!cmd) { 2264 con_log(CL_ANN, (CE_WARN, 2265 "Failed to get a cmd for ctrl info\n")); 2266 return (DDI_FAILURE); 2267 } 2268 2269 dcmd = &cmd->frame->dcmd; 2270 2271 ci = (struct megasas_ctrl_info *)instance->internal_buf; 2272 2273 if (!ci) { 2274 con_log(CL_ANN, (CE_WARN, 2275 "Failed to alloc mem for ctrl info\n")); 2276 return_mfi_pkt(instance, cmd); 2277 return (DDI_FAILURE); 2278 } 2279 2280 (void) memset(ci, 0, sizeof (struct megasas_ctrl_info)); 2281 2282 /* for( i = 0; i < 12; i++ ) dcmd->mbox.b[i] = 0; */ 2283 (void) memset(dcmd->mbox.b, 0, 12); 2284 2285 dcmd->cmd = MFI_CMD_OP_DCMD; 2286 dcmd->cmd_status = 0xFF; 2287 dcmd->sge_count = 1; 2288 dcmd->flags = MFI_FRAME_DIR_READ; 2289 dcmd->timeout = 0; 2290 dcmd->data_xfer_len = sizeof (struct megasas_ctrl_info); 2291 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 2292 dcmd->sgl.sge32[0].phys_addr = instance->internal_buf_dmac_add; 2293 dcmd->sgl.sge32[0].length = sizeof (struct megasas_ctrl_info); 2294 2295 cmd->frame_count = 1; 2296 2297 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2298 ret = 0; 2299 (void) memcpy(ctrl_info, ci, sizeof (struct megasas_ctrl_info)); 2300 } else { 2301 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed\n")); 2302 ret = -1; 2303 } 2304 2305 return_mfi_pkt(instance, cmd); 2306 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) { 2307 ret = -1; 2308 } 2309 2310 return (ret); 2311 } 2312 2313 /* 2314 * abort_aen_cmd 2315 */ 2316 static int 2317 abort_aen_cmd(struct megasas_instance *instance, 2318 struct megasas_cmd *cmd_to_abort) 2319 { 2320 int ret = 0; 2321 2322 struct megasas_cmd *cmd; 2323 struct megasas_abort_frame *abort_fr; 2324 2325 cmd = get_mfi_pkt(instance); 2326 2327 if (!cmd) { 2328 con_log(CL_ANN, (CE_WARN, 2329 "Failed to get a cmd for ctrl info\n")); 2330 return (DDI_FAILURE); 2331 } 2332 2333 abort_fr = &cmd->frame->abort; 2334 2335 /* prepare and issue the abort frame */ 2336 abort_fr->cmd = MFI_CMD_OP_ABORT; 2337 abort_fr->cmd_status = 0xFF; 2338 abort_fr->flags = 0; 2339 abort_fr->abort_context = cmd_to_abort->index; 2340 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 2341 abort_fr->abort_mfi_phys_addr_hi = 0; 2342 2343 instance->aen_cmd->abort_aen = 1; 2344 2345 cmd->sync_cmd = MEGASAS_TRUE; 2346 cmd->frame_count = 1; 2347 2348 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2349 con_log(CL_ANN, (CE_WARN, 2350 "abort_aen_cmd: issue_cmd_in_sync_mode failed\n")); 2351 ret = -1; 2352 } else { 2353 ret = 0; 2354 } 2355 2356 instance->aen_cmd->abort_aen = 1; 2357 instance->aen_cmd = 0; 2358 2359 return_mfi_pkt(instance, cmd); 2360 (void) megasas_common_check(instance, cmd); 2361 2362 return (ret); 2363 } 2364 2365 /* 2366 * init_mfi 2367 */ 2368 static int 2369 init_mfi(struct megasas_instance *instance) 2370 { 2371 off_t reglength; 2372 struct megasas_cmd *cmd; 2373 struct megasas_ctrl_info ctrl_info; 2374 struct megasas_init_frame *init_frame; 2375 struct megasas_init_queue_info *initq_info; 2376 2377 if ((ddi_dev_regsize(instance->dip, REGISTER_SET_IO, ®length) 2378 != DDI_SUCCESS) || reglength < 4096) { 2379 return (DDI_FAILURE); 2380 } 2381 2382 if (reglength > 8192) { 2383 reglength = 8192; 2384 con_log(CL_DLEVEL1, (CE_NOTE, 2385 "mega: register length to map is 0x%lx bytes", reglength)); 2386 } 2387 2388 if (ddi_regs_map_setup(instance->dip, REGISTER_SET_IO, 2389 &instance->regmap, 0, reglength, &endian_attr, 2390 &instance->regmap_handle) != DDI_SUCCESS) { 2391 con_log(CL_ANN, (CE_NOTE, 2392 "megaraid: couldn't map control registers")); 2393 2394 goto fail_mfi_reg_setup; 2395 } 2396 2397 /* we expect the FW state to be READY */ 2398 if (mfi_state_transition_to_ready(instance)) { 2399 con_log(CL_ANN, (CE_WARN, "megaraid: F/W is not ready")); 2400 goto fail_ready_state; 2401 } 2402 2403 /* get various operational parameters from status register */ 2404 instance->max_num_sge = 2405 (instance->func_ptr->read_fw_status_reg(instance) & 2406 0xFF0000) >> 0x10; 2407 /* 2408 * Reduce the max supported cmds by 1. This is to ensure that the 2409 * reply_q_sz (1 more than the max cmd that driver may send) 2410 * does not exceed max cmds that the FW can support 2411 */ 2412 instance->max_fw_cmds = 2413 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF; 2414 instance->max_fw_cmds = instance->max_fw_cmds - 1; 2415 2416 /* 2417 * con_log(CL_ANN, (CE_WARN, "megaraid: " 2418 * "max_num_sge = %d max_fw_cmds = %d\n", 2419 * instance->max_num_sge, instance->max_fw_cmds)); 2420 */ 2421 2422 instance->max_num_sge = 2423 (instance->max_num_sge > MEGASAS_MAX_SGE_CNT) ? 2424 MEGASAS_MAX_SGE_CNT : instance->max_num_sge; 2425 2426 /* create a pool of commands */ 2427 if (alloc_space_for_mfi(instance)) 2428 goto fail_alloc_fw_space; 2429 2430 /* disable interrupt for initial preparation */ 2431 instance->func_ptr->disable_intr(instance); 2432 2433 /* 2434 * Prepare a init frame. Note the init frame points to queue info 2435 * structure. Each frame has SGL allocated after first 64 bytes. For 2436 * this frame - since we don't need any SGL - we use SGL's space as 2437 * queue info structure 2438 */ 2439 cmd = get_mfi_pkt(instance); 2440 2441 init_frame = (struct megasas_init_frame *)cmd->frame; 2442 initq_info = (struct megasas_init_queue_info *) 2443 ((unsigned long)init_frame + 64); 2444 2445 (void) memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 2446 (void) memset(initq_info, 0, sizeof (struct megasas_init_queue_info)); 2447 2448 initq_info->init_flags = 0; 2449 2450 initq_info->reply_queue_entries = instance->max_fw_cmds + 1; 2451 2452 initq_info->producer_index_phys_addr_hi = 0; 2453 initq_info->producer_index_phys_addr_lo = 2454 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address; 2455 2456 initq_info->consumer_index_phys_addr_hi = 0; 2457 initq_info->consumer_index_phys_addr_lo = 2458 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4; 2459 2460 initq_info->reply_queue_start_phys_addr_hi = 0; 2461 initq_info->reply_queue_start_phys_addr_lo = 2462 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8; 2463 2464 init_frame->cmd = MFI_CMD_OP_INIT; 2465 init_frame->cmd_status = 0xFF; 2466 init_frame->flags = 0; 2467 init_frame->queue_info_new_phys_addr_lo = 2468 cmd->frame_phys_addr + 64; 2469 init_frame->queue_info_new_phys_addr_hi = 0; 2470 2471 init_frame->data_xfer_len = sizeof (struct megasas_init_queue_info); 2472 2473 cmd->frame_count = 1; 2474 2475 /* issue the init frame in polled mode */ 2476 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2477 con_log(CL_ANN, (CE_WARN, "failed to init firmware")); 2478 goto fail_fw_init; 2479 } 2480 2481 return_mfi_pkt(instance, cmd); 2482 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) { 2483 goto fail_fw_init; 2484 } 2485 2486 /* gather misc FW related information */ 2487 if (!get_ctrl_info(instance, &ctrl_info)) { 2488 instance->max_sectors_per_req = ctrl_info.max_request_size; 2489 con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d", 2490 ctrl_info.product_name, ctrl_info.ld_present_count)); 2491 } else { 2492 instance->max_sectors_per_req = instance->max_num_sge * 2493 PAGESIZE / 512; 2494 } 2495 2496 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 2497 goto fail_fw_init; 2498 } 2499 2500 return (0); 2501 2502 fail_fw_init: 2503 fail_alloc_fw_space: 2504 2505 free_space_for_mfi(instance); 2506 2507 fail_ready_state: 2508 ddi_regs_map_free(&instance->regmap_handle); 2509 2510 fail_mfi_reg_setup: 2511 return (DDI_FAILURE); 2512 } 2513 2514 /* 2515 * mfi_state_transition_to_ready : Move the FW to READY state 2516 * 2517 * @reg_set : MFI register set 2518 */ 2519 static int 2520 mfi_state_transition_to_ready(struct megasas_instance *instance) 2521 { 2522 int i; 2523 uint8_t max_wait; 2524 uint32_t fw_ctrl; 2525 uint32_t fw_state; 2526 uint32_t cur_state; 2527 2528 fw_state = 2529 instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK; 2530 con_log(CL_ANN1, (CE_NOTE, 2531 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state)); 2532 2533 while (fw_state != MFI_STATE_READY) { 2534 con_log(CL_ANN, (CE_NOTE, 2535 "mfi_state_transition_to_ready:FW state%x", fw_state)); 2536 2537 switch (fw_state) { 2538 case MFI_STATE_FAULT: 2539 con_log(CL_ANN, (CE_NOTE, 2540 "megasas: FW in FAULT state!!")); 2541 2542 return (-ENODEV); 2543 case MFI_STATE_WAIT_HANDSHAKE: 2544 /* set the CLR bit in IMR0 */ 2545 con_log(CL_ANN, (CE_NOTE, 2546 "megasas: FW waiting for HANDSHAKE")); 2547 /* 2548 * PCI_Hot Plug: MFI F/W requires 2549 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2550 * to be set 2551 */ 2552 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */ 2553 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2554 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE | 2555 MFI_INIT_HOTPLUG, instance); 2556 2557 max_wait = 2; 2558 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2559 break; 2560 case MFI_STATE_BOOT_MESSAGE_PENDING: 2561 /* set the CLR bit in IMR0 */ 2562 con_log(CL_ANN, (CE_NOTE, 2563 "megasas: FW state boot message pending")); 2564 /* 2565 * PCI_Hot Plug: MFI F/W requires 2566 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2567 * to be set 2568 */ 2569 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2570 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance); 2571 2572 max_wait = 10; 2573 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2574 break; 2575 case MFI_STATE_OPERATIONAL: 2576 /* bring it to READY state; assuming max wait 2 secs */ 2577 instance->func_ptr->disable_intr(instance); 2578 con_log(CL_ANN1, (CE_NOTE, 2579 "megasas: FW in OPERATIONAL state")); 2580 /* 2581 * PCI_Hot Plug: MFI F/W requires 2582 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT) 2583 * to be set 2584 */ 2585 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */ 2586 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2587 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance); 2588 2589 max_wait = 10; 2590 cur_state = MFI_STATE_OPERATIONAL; 2591 break; 2592 case MFI_STATE_UNDEFINED: 2593 /* this state should not last for more than 2 seconds */ 2594 con_log(CL_ANN, (CE_NOTE, "FW state undefined\n")); 2595 2596 max_wait = 2; 2597 cur_state = MFI_STATE_UNDEFINED; 2598 break; 2599 case MFI_STATE_BB_INIT: 2600 max_wait = 2; 2601 cur_state = MFI_STATE_BB_INIT; 2602 break; 2603 case MFI_STATE_FW_INIT: 2604 max_wait = 2; 2605 cur_state = MFI_STATE_FW_INIT; 2606 break; 2607 case MFI_STATE_DEVICE_SCAN: 2608 max_wait = 10; 2609 cur_state = MFI_STATE_DEVICE_SCAN; 2610 break; 2611 default: 2612 con_log(CL_ANN, (CE_NOTE, 2613 "megasas: Unknown state 0x%x\n", fw_state)); 2614 return (-ENODEV); 2615 } 2616 2617 /* the cur_state should not last for more than max_wait secs */ 2618 for (i = 0; i < (max_wait * 1000); i++) { 2619 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */ 2620 fw_state = 2621 instance->func_ptr->read_fw_status_reg(instance) & 2622 MFI_STATE_MASK; 2623 2624 if (fw_state == cur_state) { 2625 delay(1 * drv_usectohz(1000)); 2626 } else { 2627 break; 2628 } 2629 } 2630 2631 /* return error if fw_state hasn't changed after max_wait */ 2632 if (fw_state == cur_state) { 2633 con_log(CL_ANN, (CE_NOTE, 2634 "FW state hasn't changed in %d secs\n", max_wait)); 2635 return (-ENODEV); 2636 } 2637 }; 2638 2639 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2640 fw_ctrl = RD_IB_DOORBELL(instance); 2641 #ifdef lint 2642 fw_ctrl = fw_ctrl; 2643 #endif 2644 con_log(CL_ANN1, (CE_NOTE, 2645 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl)); 2646 2647 /* 2648 * Write 0xF to the doorbell register to do the following. 2649 * - Abort all outstanding commands (bit 0). 2650 * - Transition from OPERATIONAL to READY state (bit 1). 2651 * - Discard (possible) low MFA posted in 64-bit mode (bit-2). 2652 * - Set to release FW to continue running (i.e. BIOS handshake 2653 * (bit 3). 2654 */ 2655 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2656 WR_IB_DOORBELL(0xF, instance); 2657 2658 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 2659 return (-ENODEV); 2660 } 2661 return (0); 2662 } 2663 2664 /* 2665 * get_seq_num 2666 */ 2667 static int 2668 get_seq_num(struct megasas_instance *instance, 2669 struct megasas_evt_log_info *eli) 2670 { 2671 int ret = 0; 2672 2673 dma_obj_t dcmd_dma_obj; 2674 struct megasas_cmd *cmd; 2675 struct megasas_dcmd_frame *dcmd; 2676 2677 cmd = get_mfi_pkt(instance); 2678 2679 if (!cmd) { 2680 cmn_err(CE_WARN, "megasas: failed to get a cmd\n"); 2681 return (-ENOMEM); 2682 } 2683 2684 dcmd = &cmd->frame->dcmd; 2685 2686 /* allocate the data transfer buffer */ 2687 dcmd_dma_obj.size = sizeof (struct megasas_evt_log_info); 2688 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr; 2689 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 2690 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 2691 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 2692 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 2693 2694 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) { 2695 con_log(CL_ANN, (CE_WARN, 2696 "get_seq_num: could not data transfer buffer alloc.")); 2697 return (DDI_FAILURE); 2698 } 2699 2700 (void) memset(dcmd_dma_obj.buffer, 0, 2701 sizeof (struct megasas_evt_log_info)); 2702 2703 (void) memset(dcmd->mbox.b, 0, 12); 2704 2705 dcmd->cmd = MFI_CMD_OP_DCMD; 2706 dcmd->cmd_status = 0; 2707 dcmd->sge_count = 1; 2708 dcmd->flags = MFI_FRAME_DIR_READ; 2709 dcmd->timeout = 0; 2710 dcmd->data_xfer_len = sizeof (struct megasas_evt_log_info); 2711 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 2712 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_log_info); 2713 dcmd->sgl.sge32[0].phys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 2714 2715 cmd->sync_cmd = MEGASAS_TRUE; 2716 cmd->frame_count = 1; 2717 2718 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2719 cmn_err(CE_WARN, "get_seq_num: " 2720 "failed to issue MR_DCMD_CTRL_EVENT_GET_INFO\n"); 2721 ret = -1; 2722 } else { 2723 /* copy the data back into callers buffer */ 2724 bcopy(dcmd_dma_obj.buffer, eli, 2725 sizeof (struct megasas_evt_log_info)); 2726 ret = 0; 2727 } 2728 2729 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 2730 ret = -1; 2731 2732 return_mfi_pkt(instance, cmd); 2733 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) { 2734 ret = -1; 2735 } 2736 return (ret); 2737 } 2738 2739 #ifndef lint 2740 static int 2741 get_seq_num_in_poll(struct megasas_instance *instance, 2742 struct megasas_evt_log_info *eli) 2743 { 2744 int ret = 0; 2745 2746 dma_obj_t dcmd_dma_obj; 2747 struct megasas_cmd *cmd; 2748 struct megasas_dcmd_frame *dcmd; 2749 2750 cmd = get_mfi_pkt(instance); 2751 2752 if (!cmd) { 2753 cmn_err(CE_WARN, "megasas: failed to get a cmd\n"); 2754 return (-ENOMEM); 2755 } 2756 2757 dcmd = &cmd->frame->dcmd; 2758 2759 /* allocate the data transfer buffer */ 2760 dcmd_dma_obj.size = sizeof (struct megasas_evt_log_info); 2761 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr; 2762 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 2763 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 2764 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 2765 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 2766 2767 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) { 2768 con_log(CL_ANN, (CE_WARN, "get_seq_num_in_poll: " 2769 "could not data transfer buffer alloc.")); 2770 return (DDI_FAILURE); 2771 } 2772 2773 (void) memset(dcmd_dma_obj.buffer, 0, 2774 sizeof (struct megasas_evt_log_info)); 2775 2776 /* for( i = 0; i < 12; i++ ) dcmd->mbox.b[i] = 0; */ 2777 (void) memset(dcmd->mbox.b, 0, 12); 2778 2779 dcmd->cmd = MFI_CMD_OP_DCMD; 2780 dcmd->cmd_status = 0; 2781 dcmd->sge_count = 1; 2782 dcmd->flags = MFI_FRAME_DIR_READ; 2783 dcmd->timeout = 0; 2784 dcmd->data_xfer_len = sizeof (struct megasas_evt_log_info); 2785 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 2786 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_log_info); 2787 dcmd->sgl.sge32[0].phys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 2788 2789 cmd->frame_count = 1; 2790 2791 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2792 cmn_err(CE_WARN, "get_seq_num_in_poll: " 2793 "failed to issue MR_DCMD_CTRL_EVENT_GET_INFO\n"); 2794 ret = -1; 2795 } else { 2796 cmn_err(CE_WARN, "get_seq_num_in_poll:done\n"); 2797 /* copy the data back into callers buffer */ 2798 bcopy(dcmd_dma_obj.buffer, eli, 2799 sizeof (struct megasas_evt_log_info)); 2800 ret = 0; 2801 } 2802 2803 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 2804 ret = -1; 2805 2806 return_mfi_pkt(instance, cmd); 2807 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) { 2808 ret = -1; 2809 } 2810 2811 return (ret); 2812 } 2813 #endif 2814 2815 /* 2816 * start_mfi_aen 2817 */ 2818 static int 2819 start_mfi_aen(struct megasas_instance *instance) 2820 { 2821 int ret = 0; 2822 2823 struct megasas_evt_log_info eli; 2824 union megasas_evt_class_locale class_locale; 2825 2826 /* get the latest sequence number from FW */ 2827 (void) memset(&eli, 0, sizeof (struct megasas_evt_log_info)); 2828 2829 if (get_seq_num(instance, &eli)) { 2830 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num\n"); 2831 return (-1); 2832 } 2833 2834 /* register AEN with FW for latest sequence number plus 1 */ 2835 class_locale.members.reserved = 0; 2836 class_locale.members.locale = MR_EVT_LOCALE_ALL; 2837 class_locale.members.class = MR_EVT_CLASS_CRITICAL; 2838 2839 ret = register_mfi_aen(instance, eli.newest_seq_num + 1, 2840 class_locale.word); 2841 2842 if (ret) { 2843 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed\n"); 2844 return (-1); 2845 } 2846 2847 return (ret); 2848 } 2849 2850 /* 2851 * flush_cache 2852 */ 2853 static void 2854 flush_cache(struct megasas_instance *instance) 2855 { 2856 struct megasas_cmd *cmd; 2857 struct megasas_dcmd_frame *dcmd; 2858 2859 if (!(cmd = get_mfi_pkt(instance))) 2860 return; 2861 2862 dcmd = &cmd->frame->dcmd; 2863 2864 (void) memset(dcmd->mbox.b, 0, 12); 2865 2866 dcmd->cmd = MFI_CMD_OP_DCMD; 2867 dcmd->cmd_status = 0x0; 2868 dcmd->sge_count = 0; 2869 dcmd->flags = MFI_FRAME_DIR_NONE; 2870 dcmd->timeout = 0; 2871 dcmd->data_xfer_len = 0; 2872 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 2873 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 2874 2875 cmd->frame_count = 1; 2876 2877 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2878 cmn_err(CE_WARN, 2879 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH\n"); 2880 } 2881 con_log(CL_DLEVEL1, (CE_NOTE, "done")); 2882 return_mfi_pkt(instance, cmd); 2883 (void) megasas_common_check(instance, cmd); 2884 } 2885 2886 /* 2887 * service_mfi_aen- Completes an AEN command 2888 * @instance: Adapter soft state 2889 * @cmd: Command to be completed 2890 * 2891 */ 2892 static void 2893 service_mfi_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 2894 { 2895 uint32_t seq_num; 2896 #ifdef TBD 2897 int ret = 0; 2898 union megasas_evt_class_locale class_locale; 2899 #endif /* TBD */ 2900 struct megasas_evt_detail *evt_detail = 2901 (struct megasas_evt_detail *)instance->mfi_evt_detail_obj.buffer; 2902 2903 cmd->cmd_status = cmd->frame->io.cmd_status; 2904 2905 if (cmd->cmd_status == ENODATA) { 2906 cmd->cmd_status = 0; 2907 } 2908 2909 /* 2910 * log the MFI AEN event to the sysevent queue so that 2911 * application will get noticed 2912 */ 2913 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS", 2914 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) { 2915 int instance_no = ddi_get_instance(instance->dip); 2916 con_log(CL_ANN, (CE_WARN, 2917 "mega%d: Failed to log AEN event", instance_no)); 2918 } 2919 2920 /* get copy of seq_num and class/locale for re-registration */ 2921 seq_num = evt_detail->seq_num; 2922 seq_num++; 2923 #ifdef TBD 2924 class_locale.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; 2925 instance->aen_cmd = 0; 2926 2927 return_mfi_pkt(instance, cmd); 2928 megasas_common_check(instance, cmd); 2929 ret = register_mfi_aen(instance, seq_num, class_locale.word); 2930 2931 if (ret) { 2932 cmn_err(CE_WARN, "service_mfi_aen: aen registration failed\n"); 2933 } 2934 #endif /* TBD */ 2935 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 2936 sizeof (struct megasas_evt_detail)); 2937 2938 cmd->frame->dcmd.cmd_status = 0x0; 2939 cmd->frame->dcmd.mbox.w[0] = seq_num; 2940 2941 instance->aen_seq_num = seq_num; 2942 2943 cmd->frame_count = 1; 2944 2945 /* Issue the aen registration frame */ 2946 instance->func_ptr->issue_cmd(cmd, instance); 2947 } 2948 2949 /* 2950 * complete_cmd_in_sync_mode - Completes an internal command 2951 * @instance: Adapter soft state 2952 * @cmd: Command to be completed 2953 * 2954 * The issue_cmd_in_sync_mode() function waits for a command to complete 2955 * after it issues a command. This function wakes up that waiting routine by 2956 * calling wake_up() on the wait queue. 2957 */ 2958 static void 2959 complete_cmd_in_sync_mode(struct megasas_instance *instance, 2960 struct megasas_cmd *cmd) 2961 { 2962 cmd->cmd_status = cmd->frame->io.cmd_status; 2963 2964 cmd->sync_cmd = MEGASAS_FALSE; 2965 2966 if (cmd->cmd_status == ENODATA) { 2967 cmd->cmd_status = 0; 2968 } 2969 2970 cv_broadcast(&instance->int_cmd_cv); 2971 } 2972 2973 /* 2974 * megasas_softintr - The Software ISR 2975 * @param arg : HBA soft state 2976 * 2977 * called from high-level interrupt if hi-level interrupt are not there, 2978 * otherwise triggered as a soft interrupt 2979 */ 2980 static uint_t 2981 megasas_softintr(caddr_t arg) 2982 { 2983 struct scsi_pkt *pkt; 2984 struct scsa_cmd *acmd; 2985 struct megasas_cmd *cmd; 2986 struct mlist_head *pos, *next; 2987 mlist_t process_list; 2988 struct megasas_header *hdr; 2989 struct megasas_instance *instance; 2990 struct scsi_arq_status *arqstat; 2991 2992 con_log(CL_ANN1, (CE_CONT, "megasas_softintr called")); 2993 2994 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2995 instance = (struct megasas_instance *)arg; 2996 mutex_enter(&instance->completed_pool_mtx); 2997 2998 if (mlist_empty(&instance->completed_pool_list)) { 2999 mutex_exit(&instance->completed_pool_mtx); 3000 return (DDI_INTR_UNCLAIMED); 3001 } 3002 3003 instance->softint_running = 1; 3004 3005 INIT_LIST_HEAD(&process_list); 3006 mlist_splice(&instance->completed_pool_list, &process_list); 3007 INIT_LIST_HEAD(&instance->completed_pool_list); 3008 3009 mutex_exit(&instance->completed_pool_mtx); 3010 3011 /* perform all callbacks first, before releasing the SCBs */ 3012 mlist_for_each_safe(pos, next, &process_list) { 3013 /* LINTED E_BAD_PTR_CAST_ALIGN */ 3014 cmd = mlist_entry(pos, struct megasas_cmd, list); 3015 3016 /* syncronize the Cmd frame for the controller */ 3017 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 3018 0, 0, DDI_DMA_SYNC_FORCPU); 3019 3020 if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 3021 DDI_SUCCESS) { 3022 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 3023 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3024 return (DDI_INTR_UNCLAIMED); 3025 } 3026 3027 hdr = &cmd->frame->hdr; 3028 3029 /* remove the internal command from the process list */ 3030 mlist_del_init(&cmd->list); 3031 3032 switch (hdr->cmd) { 3033 case MFI_CMD_OP_PD_SCSI: 3034 case MFI_CMD_OP_LD_SCSI: 3035 case MFI_CMD_OP_LD_READ: 3036 case MFI_CMD_OP_LD_WRITE: 3037 /* 3038 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI 3039 * could have been issued either through an 3040 * IO path or an IOCTL path. If it was via IOCTL, 3041 * we will send it to internal completion. 3042 */ 3043 if (cmd->sync_cmd == MEGASAS_TRUE) { 3044 complete_cmd_in_sync_mode(instance, cmd); 3045 break; 3046 } 3047 3048 /* regular commands */ 3049 acmd = cmd->cmd; 3050 pkt = CMD2PKT(acmd); 3051 /* con_log(CL_ANN, (CE_CONT,"pkt recived")); */ 3052 3053 if (acmd->cmd_flags & CFLAG_DMAVALID) { 3054 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3055 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3056 acmd->cmd_dma_offset, 3057 acmd->cmd_dma_len, 3058 DDI_DMA_SYNC_FORCPU); 3059 } 3060 } 3061 3062 pkt->pkt_reason = CMD_CMPLT; 3063 pkt->pkt_statistics = 0; 3064 pkt->pkt_state = STATE_GOT_BUS 3065 | STATE_GOT_TARGET | STATE_SENT_CMD 3066 | STATE_XFERRED_DATA | STATE_GOT_STATUS; 3067 3068 con_log(CL_ANN1, (CE_CONT, 3069 "CDB[0] = %x completed for %s: size %lx context %x", 3070 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"), 3071 acmd->cmd_dmacount, hdr->context)); 3072 3073 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) { 3074 struct scsi_inquiry *inq; 3075 3076 if (acmd->cmd_dmacount != 0) { 3077 bp_mapin(acmd->cmd_buf); 3078 inq = (struct scsi_inquiry *) 3079 acmd->cmd_buf->b_un.b_addr; 3080 3081 /* don't expose physical drives to OS */ 3082 if (acmd->islogical && 3083 (hdr->cmd_status == MFI_STAT_OK)) { 3084 display_scsi_inquiry( 3085 (caddr_t)inq); 3086 } else if ((hdr->cmd_status == 3087 MFI_STAT_OK) && inq->inq_dtype == 3088 DTYPE_DIRECT) { 3089 3090 display_scsi_inquiry( 3091 (caddr_t)inq); 3092 3093 /* for physical disk */ 3094 hdr->cmd_status = 3095 MFI_STAT_DEVICE_NOT_FOUND; 3096 } 3097 } 3098 } 3099 3100 switch (hdr->cmd_status) { 3101 case MFI_STAT_OK: 3102 pkt->pkt_scbp[0] = STATUS_GOOD; 3103 break; 3104 case MFI_STAT_LD_CC_IN_PROGRESS: 3105 case MFI_STAT_LD_RECON_IN_PROGRESS: 3106 /* SJ - these are not correct way */ 3107 pkt->pkt_scbp[0] = STATUS_GOOD; 3108 break; 3109 case MFI_STAT_LD_INIT_IN_PROGRESS: 3110 con_log(CL_ANN, 3111 (CE_WARN, "Initialization in Progress")); 3112 pkt->pkt_reason = CMD_TRAN_ERR; 3113 3114 break; 3115 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3116 con_log(CL_ANN1, (CE_CONT, "scsi_done error")); 3117 3118 pkt->pkt_reason = CMD_CMPLT; 3119 ((struct scsi_status *) 3120 pkt->pkt_scbp)->sts_chk = 1; 3121 3122 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) { 3123 3124 con_log(CL_ANN, 3125 (CE_WARN, "TEST_UNIT_READY fail")); 3126 3127 } else { 3128 pkt->pkt_state |= STATE_ARQ_DONE; 3129 arqstat = (void *)(pkt->pkt_scbp); 3130 arqstat->sts_rqpkt_reason = CMD_CMPLT; 3131 arqstat->sts_rqpkt_resid = 0; 3132 arqstat->sts_rqpkt_state |= 3133 STATE_GOT_BUS | STATE_GOT_TARGET 3134 | STATE_SENT_CMD 3135 | STATE_XFERRED_DATA; 3136 *(uint8_t *)&arqstat->sts_rqpkt_status = 3137 STATUS_GOOD; 3138 3139 bcopy(cmd->sense, 3140 &(arqstat->sts_sensedata), 3141 pkt->pkt_scblen - 3142 offsetof(struct scsi_arq_status, 3143 sts_sensedata)); 3144 } 3145 break; 3146 case MFI_STAT_LD_OFFLINE: 3147 case MFI_STAT_DEVICE_NOT_FOUND: 3148 con_log(CL_ANN1, (CE_CONT, 3149 "device not found error")); 3150 pkt->pkt_reason = CMD_DEV_GONE; 3151 pkt->pkt_statistics = STAT_DISCON; 3152 break; 3153 case MFI_STAT_LD_LBA_OUT_OF_RANGE: 3154 pkt->pkt_state |= STATE_ARQ_DONE; 3155 pkt->pkt_reason = CMD_CMPLT; 3156 ((struct scsi_status *) 3157 pkt->pkt_scbp)->sts_chk = 1; 3158 3159 arqstat = (void *)(pkt->pkt_scbp); 3160 arqstat->sts_rqpkt_reason = CMD_CMPLT; 3161 arqstat->sts_rqpkt_resid = 0; 3162 arqstat->sts_rqpkt_state |= STATE_GOT_BUS 3163 | STATE_GOT_TARGET | STATE_SENT_CMD 3164 | STATE_XFERRED_DATA; 3165 *(uint8_t *)&arqstat->sts_rqpkt_status = 3166 STATUS_GOOD; 3167 3168 arqstat->sts_sensedata.es_valid = 1; 3169 arqstat->sts_sensedata.es_key = 3170 KEY_ILLEGAL_REQUEST; 3171 arqstat->sts_sensedata.es_class = 3172 CLASS_EXTENDED_SENSE; 3173 3174 /* 3175 * LOGICAL BLOCK ADDRESS OUT OF RANGE: 3176 * ASC: 0x21h; ASCQ: 0x00h; 3177 */ 3178 arqstat->sts_sensedata.es_add_code = 0x21; 3179 arqstat->sts_sensedata.es_qual_code = 0x00; 3180 3181 break; 3182 3183 default: 3184 con_log(CL_ANN, (CE_CONT, "Unknown status!")); 3185 pkt->pkt_reason = CMD_TRAN_ERR; 3186 3187 break; 3188 } 3189 3190 atomic_add_16(&instance->fw_outstanding, (-1)); 3191 /* pull_pend_queue(instance); */ 3192 3193 return_mfi_pkt(instance, cmd); 3194 3195 (void) megasas_common_check(instance, cmd); 3196 3197 if (acmd->cmd_dmahandle) { 3198 if (megasas_check_dma_handle( 3199 acmd->cmd_dmahandle) != DDI_SUCCESS) { 3200 ddi_fm_service_impact(instance->dip, 3201 DDI_SERVICE_UNAFFECTED); 3202 pkt->pkt_reason = CMD_TRAN_ERR; 3203 pkt->pkt_statistics = 0; 3204 } 3205 } 3206 /* 3207 * con_log(CL_ANN, 3208 * (CE_CONT,"call add %lx",pkt->pkt_comp)); 3209 */ 3210 3211 /* Call the callback routine */ 3212 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 3213 pkt->pkt_comp) { 3214 (*pkt->pkt_comp)(pkt); 3215 } 3216 3217 /* con_log(CL_ANN, (CE_CONT, "call complete")); */ 3218 break; 3219 case MFI_CMD_OP_SMP: 3220 case MFI_CMD_OP_STP: 3221 complete_cmd_in_sync_mode(instance, cmd); 3222 break; 3223 case MFI_CMD_OP_DCMD: 3224 /* see if got an event notification */ 3225 if (cmd->frame->dcmd.opcode == 3226 MR_DCMD_CTRL_EVENT_WAIT) { 3227 if ((instance->aen_cmd == cmd) && 3228 (instance->aen_cmd->abort_aen)) { 3229 con_log(CL_ANN, (CE_WARN, 3230 "megasas_softintr: " 3231 "aborted_aen returned")); 3232 } else { 3233 service_mfi_aen(instance, cmd); 3234 } 3235 } else { 3236 complete_cmd_in_sync_mode(instance, cmd); 3237 } 3238 3239 break; 3240 case MFI_CMD_OP_ABORT: 3241 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete")); 3242 /* 3243 * MFI_CMD_OP_ABORT successfully completed 3244 * in the synchronous mode 3245 */ 3246 complete_cmd_in_sync_mode(instance, cmd); 3247 break; 3248 default: 3249 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 3250 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3251 3252 if (cmd->pkt != NULL) { 3253 pkt = cmd->pkt; 3254 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 3255 pkt->pkt_comp) { 3256 (*pkt->pkt_comp)(pkt); 3257 } 3258 } 3259 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !!")); 3260 break; 3261 } 3262 } 3263 3264 instance->softint_running = 0; 3265 3266 return (DDI_INTR_CLAIMED); 3267 } 3268 3269 /* 3270 * mega_alloc_dma_obj 3271 * 3272 * Allocate the memory and other resources for an dma object. 3273 */ 3274 static int 3275 mega_alloc_dma_obj(struct megasas_instance *instance, dma_obj_t *obj) 3276 { 3277 int i; 3278 size_t alen = 0; 3279 uint_t cookie_cnt; 3280 3281 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr, 3282 DDI_DMA_SLEEP, NULL, &obj->dma_handle); 3283 if (i != DDI_SUCCESS) { 3284 3285 switch (i) { 3286 case DDI_DMA_BADATTR : 3287 con_log(CL_ANN, (CE_WARN, 3288 "Failed ddi_dma_alloc_handle- Bad atrib")); 3289 break; 3290 case DDI_DMA_NORESOURCES : 3291 con_log(CL_ANN, (CE_WARN, 3292 "Failed ddi_dma_alloc_handle- No Resources")); 3293 break; 3294 default : 3295 con_log(CL_ANN, (CE_WARN, 3296 "Failed ddi_dma_alloc_handle :unknown %d", i)); 3297 break; 3298 } 3299 3300 return (-1); 3301 } 3302 3303 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &endian_attr, 3304 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 3305 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) || 3306 alen < obj->size) { 3307 3308 ddi_dma_free_handle(&obj->dma_handle); 3309 3310 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc")); 3311 3312 return (-1); 3313 } 3314 3315 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer, 3316 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 3317 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) { 3318 3319 ddi_dma_mem_free(&obj->acc_handle); 3320 ddi_dma_free_handle(&obj->dma_handle); 3321 3322 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle")); 3323 3324 return (-1); 3325 } 3326 3327 if (megasas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) { 3328 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3329 return (-1); 3330 } 3331 3332 if (megasas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) { 3333 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3334 return (-1); 3335 } 3336 3337 return (cookie_cnt); 3338 } 3339 3340 /* 3341 * mega_free_dma_obj(struct megasas_instance *, dma_obj_t) 3342 * 3343 * De-allocate the memory and other resources for an dma object, which must 3344 * have been alloated by a previous call to mega_alloc_dma_obj() 3345 */ 3346 static int 3347 mega_free_dma_obj(struct megasas_instance *instance, dma_obj_t obj) 3348 { 3349 3350 if (megasas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) { 3351 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3352 return (DDI_FAILURE); 3353 } 3354 3355 if (megasas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) { 3356 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3357 return (DDI_FAILURE); 3358 } 3359 3360 (void) ddi_dma_unbind_handle(obj.dma_handle); 3361 ddi_dma_mem_free(&obj.acc_handle); 3362 ddi_dma_free_handle(&obj.dma_handle); 3363 3364 return (DDI_SUCCESS); 3365 } 3366 3367 /* 3368 * megasas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *, 3369 * int, int (*)()) 3370 * 3371 * Allocate dma resources for a new scsi command 3372 */ 3373 static int 3374 megasas_dma_alloc(struct megasas_instance *instance, struct scsi_pkt *pkt, 3375 struct buf *bp, int flags, int (*callback)()) 3376 { 3377 int dma_flags; 3378 int (*cb)(caddr_t); 3379 int i; 3380 3381 ddi_dma_attr_t tmp_dma_attr = megasas_generic_dma_attr; 3382 struct scsa_cmd *acmd = PKT2CMD(pkt); 3383 3384 acmd->cmd_buf = bp; 3385 3386 if (bp->b_flags & B_READ) { 3387 acmd->cmd_flags &= ~CFLAG_DMASEND; 3388 dma_flags = DDI_DMA_READ; 3389 } else { 3390 acmd->cmd_flags |= CFLAG_DMASEND; 3391 dma_flags = DDI_DMA_WRITE; 3392 } 3393 3394 if (flags & PKT_CONSISTENT) { 3395 acmd->cmd_flags |= CFLAG_CONSISTENT; 3396 dma_flags |= DDI_DMA_CONSISTENT; 3397 } 3398 3399 if (flags & PKT_DMA_PARTIAL) { 3400 dma_flags |= DDI_DMA_PARTIAL; 3401 } 3402 3403 dma_flags |= DDI_DMA_REDZONE; 3404 3405 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP; 3406 3407 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge; 3408 3409 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr, 3410 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) { 3411 switch (i) { 3412 case DDI_DMA_BADATTR: 3413 bioerror(bp, EFAULT); 3414 return (-1); 3415 3416 case DDI_DMA_NORESOURCES: 3417 bioerror(bp, 0); 3418 return (-1); 3419 3420 default: 3421 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: " 3422 "0x%x impossible\n", i)); 3423 /* NOTREACHED */ 3424 break; 3425 } 3426 } 3427 3428 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags, 3429 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies); 3430 3431 switch (i) { 3432 case DDI_DMA_PARTIAL_MAP: 3433 if ((dma_flags & DDI_DMA_PARTIAL) == 0) { 3434 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3435 "DDI_DMA_PARTIAL_MAP impossible\n")); 3436 /* NOTREACHED */ 3437 } 3438 3439 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) == 3440 DDI_FAILURE) { 3441 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed\n")); 3442 /* NOTREACHED */ 3443 } 3444 3445 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3446 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3447 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3448 DDI_FAILURE) { 3449 3450 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed\n")); 3451 /* NOTREACHED */ 3452 } 3453 3454 goto get_dma_cookies; 3455 case DDI_DMA_MAPPED: 3456 acmd->cmd_nwin = 1; 3457 acmd->cmd_dma_len = 0; 3458 acmd->cmd_dma_offset = 0; 3459 3460 get_dma_cookies: 3461 i = 0; 3462 acmd->cmd_dmacount = 0; 3463 for (;;) { 3464 acmd->cmd_dmacount += 3465 acmd->cmd_dmacookies[i++].dmac_size; 3466 3467 if (i == instance->max_num_sge || 3468 i == acmd->cmd_ncookies) 3469 break; 3470 3471 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3472 &acmd->cmd_dmacookies[i]); 3473 } 3474 3475 acmd->cmd_cookie = i; 3476 acmd->cmd_cookiecnt = i; 3477 3478 acmd->cmd_flags |= CFLAG_DMAVALID; 3479 3480 if (bp->b_bcount >= acmd->cmd_dmacount) { 3481 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3482 } else { 3483 pkt->pkt_resid = 0; 3484 } 3485 3486 return (0); 3487 case DDI_DMA_NORESOURCES: 3488 bioerror(bp, 0); 3489 break; 3490 case DDI_DMA_NOMAPPING: 3491 bioerror(bp, EFAULT); 3492 break; 3493 case DDI_DMA_TOOBIG: 3494 bioerror(bp, EINVAL); 3495 break; 3496 case DDI_DMA_INUSE: 3497 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:" 3498 " DDI_DMA_INUSE impossible\n")); 3499 /* NOTREACHED */ 3500 break; 3501 default: 3502 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3503 "0x%x impossible\n", i)); 3504 /* NOTREACHED */ 3505 break; 3506 } 3507 3508 ddi_dma_free_handle(&acmd->cmd_dmahandle); 3509 acmd->cmd_dmahandle = NULL; 3510 acmd->cmd_flags &= ~CFLAG_DMAVALID; 3511 return (-1); 3512 } 3513 3514 /* 3515 * megasas_dma_move(struct megasas_instance *, struct scsi_pkt *, struct buf *) 3516 * 3517 * move dma resources to next dma window 3518 * 3519 */ 3520 static int 3521 megasas_dma_move(struct megasas_instance *instance, struct scsi_pkt *pkt, 3522 struct buf *bp) 3523 { 3524 int i = 0; 3525 3526 struct scsa_cmd *acmd = PKT2CMD(pkt); 3527 3528 /* 3529 * If there are no more cookies remaining in this window, 3530 * must move to the next window first. 3531 */ 3532 if (acmd->cmd_cookie == acmd->cmd_ncookies) { 3533 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) { 3534 return (0); 3535 } 3536 3537 /* at last window, cannot move */ 3538 if (++acmd->cmd_curwin >= acmd->cmd_nwin) { 3539 return (-1); 3540 } 3541 3542 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3543 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3544 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3545 DDI_FAILURE) { 3546 return (-1); 3547 } 3548 3549 acmd->cmd_cookie = 0; 3550 } else { 3551 /* still more cookies in this window - get the next one */ 3552 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3553 &acmd->cmd_dmacookies[0]); 3554 } 3555 3556 /* get remaining cookies in this window, up to our maximum */ 3557 for (;;) { 3558 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size; 3559 acmd->cmd_cookie++; 3560 3561 if (i == instance->max_num_sge || 3562 acmd->cmd_cookie == acmd->cmd_ncookies) { 3563 break; 3564 } 3565 3566 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3567 &acmd->cmd_dmacookies[i]); 3568 } 3569 3570 acmd->cmd_cookiecnt = i; 3571 3572 if (bp->b_bcount >= acmd->cmd_dmacount) { 3573 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3574 } else { 3575 pkt->pkt_resid = 0; 3576 } 3577 3578 return (0); 3579 } 3580 3581 /* 3582 * build_cmd 3583 */ 3584 static struct megasas_cmd * 3585 build_cmd(struct megasas_instance *instance, struct scsi_address *ap, 3586 struct scsi_pkt *pkt, uchar_t *cmd_done) 3587 { 3588 uint16_t flags = 0; 3589 uint32_t i; 3590 uint32_t context; 3591 uint32_t sge_bytes; 3592 3593 struct megasas_cmd *cmd; 3594 struct megasas_sge32 *mfi_sgl; 3595 struct scsa_cmd *acmd = PKT2CMD(pkt); 3596 struct megasas_pthru_frame *pthru; 3597 struct megasas_io_frame *ldio; 3598 3599 /* find out if this is logical or physical drive command. */ 3600 acmd->islogical = MEGADRV_IS_LOGICAL(ap); 3601 acmd->device_id = MAP_DEVICE_ID(instance, ap); 3602 *cmd_done = 0; 3603 3604 /* get the command packet */ 3605 if (!(cmd = get_mfi_pkt(instance))) { 3606 return (NULL); 3607 } 3608 3609 cmd->pkt = pkt; 3610 cmd->cmd = acmd; 3611 3612 /* lets get the command directions */ 3613 if (acmd->cmd_flags & CFLAG_DMASEND) { 3614 flags = MFI_FRAME_DIR_WRITE; 3615 3616 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3617 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3618 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3619 DDI_DMA_SYNC_FORDEV); 3620 } 3621 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) { 3622 flags = MFI_FRAME_DIR_READ; 3623 3624 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3625 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3626 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3627 DDI_DMA_SYNC_FORCPU); 3628 } 3629 } else { 3630 flags = MFI_FRAME_DIR_NONE; 3631 } 3632 3633 /* flags |= MFI_FRAME_SGL64; */ 3634 3635 switch (pkt->pkt_cdbp[0]) { 3636 3637 /* 3638 * case SCMD_SYNCHRONIZE_CACHE: 3639 * flush_cache(instance); 3640 * return_mfi_pkt(instance, cmd); 3641 * *cmd_done = 1; 3642 * 3643 * return (NULL); 3644 */ 3645 3646 case SCMD_READ: 3647 case SCMD_WRITE: 3648 case SCMD_READ_G1: 3649 case SCMD_WRITE_G1: 3650 if (acmd->islogical) { 3651 ldio = (struct megasas_io_frame *)cmd->frame; 3652 3653 /* 3654 * preare the Logical IO frame: 3655 * 2nd bit is zero for all read cmds 3656 */ 3657 ldio->cmd = (pkt->pkt_cdbp[0] & 0x02) ? 3658 MFI_CMD_OP_LD_WRITE : MFI_CMD_OP_LD_READ; 3659 ldio->cmd_status = 0x0; 3660 ldio->scsi_status = 0x0; 3661 ldio->target_id = acmd->device_id; 3662 ldio->timeout = 0; 3663 ldio->reserved_0 = 0; 3664 ldio->pad_0 = 0; 3665 ldio->flags = flags; 3666 3667 /* Initialize sense Information */ 3668 bzero(cmd->sense, SENSE_LENGTH); 3669 ldio->sense_len = SENSE_LENGTH; 3670 ldio->sense_buf_phys_addr_hi = 0; 3671 ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr; 3672 3673 ldio->start_lba_hi = 0; 3674 ldio->access_byte = (acmd->cmd_cdblen != 6) ? 3675 pkt->pkt_cdbp[1] : 0; 3676 ldio->sge_count = acmd->cmd_cookiecnt; 3677 mfi_sgl = (struct megasas_sge32 *)&ldio->sgl; 3678 3679 context = ldio->context; 3680 3681 if (acmd->cmd_cdblen == CDB_GROUP0) { 3682 ldio->lba_count = host_to_le16( 3683 (uint16_t)(pkt->pkt_cdbp[4])); 3684 3685 ldio->start_lba_lo = host_to_le32( 3686 ((uint32_t)(pkt->pkt_cdbp[3])) | 3687 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) | 3688 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) 3689 << 16)); 3690 } else if (acmd->cmd_cdblen == CDB_GROUP1) { 3691 ldio->lba_count = host_to_le16( 3692 ((uint16_t)(pkt->pkt_cdbp[8])) | 3693 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)); 3694 3695 ldio->start_lba_lo = host_to_le32( 3696 ((uint32_t)(pkt->pkt_cdbp[5])) | 3697 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3698 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3699 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); 3700 } else if (acmd->cmd_cdblen == CDB_GROUP2) { 3701 ldio->lba_count = host_to_le16( 3702 ((uint16_t)(pkt->pkt_cdbp[9])) | 3703 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) | 3704 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) | 3705 ((uint16_t)(pkt->pkt_cdbp[6]) << 24)); 3706 3707 ldio->start_lba_lo = host_to_le32( 3708 ((uint32_t)(pkt->pkt_cdbp[5])) | 3709 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3710 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3711 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); 3712 } else if (acmd->cmd_cdblen == CDB_GROUP3) { 3713 ldio->lba_count = host_to_le16( 3714 ((uint16_t)(pkt->pkt_cdbp[13])) | 3715 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) | 3716 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) | 3717 ((uint16_t)(pkt->pkt_cdbp[10]) << 24)); 3718 3719 ldio->start_lba_lo = host_to_le32( 3720 ((uint32_t)(pkt->pkt_cdbp[9])) | 3721 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | 3722 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | 3723 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)); 3724 3725 ldio->start_lba_lo = host_to_le32( 3726 ((uint32_t)(pkt->pkt_cdbp[5])) | 3727 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3728 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3729 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); 3730 } 3731 3732 break; 3733 } 3734 /* fall through For all non-rd/wr cmds */ 3735 default: 3736 pthru = (struct megasas_pthru_frame *)cmd->frame; 3737 3738 /* prepare the DCDB frame */ 3739 pthru->cmd = (acmd->islogical) ? 3740 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI; 3741 pthru->cmd_status = 0x0; 3742 pthru->scsi_status = 0x0; 3743 pthru->target_id = acmd->device_id; 3744 pthru->lun = 0; 3745 pthru->cdb_len = acmd->cmd_cdblen; 3746 pthru->timeout = 0; 3747 pthru->flags = flags; 3748 pthru->data_xfer_len = acmd->cmd_dmacount; 3749 pthru->sge_count = acmd->cmd_cookiecnt; 3750 mfi_sgl = (struct megasas_sge32 *)&pthru->sgl; 3751 3752 bzero(cmd->sense, SENSE_LENGTH); 3753 pthru->sense_len = SENSE_LENGTH; 3754 pthru->sense_buf_phys_addr_hi = 0; 3755 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; 3756 3757 context = pthru->context; 3758 3759 bcopy(pkt->pkt_cdbp, pthru->cdb, acmd->cmd_cdblen); 3760 3761 break; 3762 } 3763 #ifdef lint 3764 context = context; 3765 #endif 3766 /* bzero(mfi_sgl, sizeof (struct megasas_sge64) * MAX_SGL); */ 3767 3768 /* prepare the scatter-gather list for the firmware */ 3769 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) { 3770 mfi_sgl->phys_addr = acmd->cmd_dmacookies[i].dmac_laddress; 3771 mfi_sgl->length = acmd->cmd_dmacookies[i].dmac_size; 3772 } 3773 3774 sge_bytes = sizeof (struct megasas_sge32)*acmd->cmd_cookiecnt; 3775 3776 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 3777 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1; 3778 3779 if (cmd->frame_count >= 8) { 3780 cmd->frame_count = 8; 3781 } 3782 3783 return (cmd); 3784 } 3785 3786 /* 3787 * wait_for_outstanding - Wait for all outstanding cmds 3788 * @instance: Adapter soft state 3789 * 3790 * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to 3791 * complete all its outstanding commands. Returns error if one or more IOs 3792 * are pending after this time period. 3793 */ 3794 static int 3795 wait_for_outstanding(struct megasas_instance *instance) 3796 { 3797 int i; 3798 uint32_t wait_time = 90; 3799 3800 for (i = 0; i < wait_time; i++) { 3801 if (!instance->fw_outstanding) { 3802 break; 3803 } 3804 3805 drv_usecwait(1000); /* wait for 1000 usecs */; 3806 } 3807 3808 if (instance->fw_outstanding) { 3809 return (1); 3810 } 3811 3812 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VERSION); 3813 3814 return (0); 3815 } 3816 3817 /* 3818 * issue_mfi_pthru 3819 */ 3820 static int 3821 issue_mfi_pthru(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 3822 struct megasas_cmd *cmd, int mode) 3823 { 3824 void *ubuf; 3825 uint32_t kphys_addr = 0; 3826 uint32_t xferlen = 0; 3827 uint_t model; 3828 3829 dma_obj_t pthru_dma_obj; 3830 struct megasas_pthru_frame *kpthru; 3831 struct megasas_pthru_frame *pthru; 3832 3833 pthru = &cmd->frame->pthru; 3834 kpthru = (struct megasas_pthru_frame *)&ioctl->frame[0]; 3835 3836 model = ddi_model_convert_from(mode & FMODELS); 3837 if (model == DDI_MODEL_ILP32) { 3838 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3839 3840 xferlen = kpthru->sgl.sge32[0].length; 3841 3842 /* SJ! - ubuf needs to be virtual address. */ 3843 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3844 } else { 3845 #ifdef _ILP32 3846 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3847 xferlen = kpthru->sgl.sge32[0].length; 3848 /* SJ! - ubuf needs to be virtual address. */ 3849 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3850 #else 3851 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64")); 3852 xferlen = kpthru->sgl.sge64[0].length; 3853 /* SJ! - ubuf needs to be virtual address. */ 3854 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr; 3855 #endif 3856 } 3857 3858 if (xferlen) { 3859 /* means IOCTL requires DMA */ 3860 /* allocate the data transfer buffer */ 3861 pthru_dma_obj.size = xferlen; 3862 pthru_dma_obj.dma_attr = megasas_generic_dma_attr; 3863 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 3864 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 3865 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1; 3866 pthru_dma_obj.dma_attr.dma_attr_align = 1; 3867 3868 /* allocate kernel buffer for DMA */ 3869 if (mega_alloc_dma_obj(instance, &pthru_dma_obj) != 1) { 3870 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3871 "could not data transfer buffer alloc.")); 3872 return (DDI_FAILURE); 3873 } 3874 3875 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3876 if (kpthru->flags & MFI_FRAME_DIR_WRITE) { 3877 if (ddi_copyin(ubuf, (void *)pthru_dma_obj.buffer, 3878 xferlen, mode)) { 3879 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3880 "copy from user space failed\n")); 3881 return (1); 3882 } 3883 } 3884 3885 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address; 3886 } 3887 3888 pthru->cmd = kpthru->cmd; 3889 pthru->sense_len = kpthru->sense_len; 3890 pthru->cmd_status = kpthru->cmd_status; 3891 pthru->scsi_status = kpthru->scsi_status; 3892 pthru->target_id = kpthru->target_id; 3893 pthru->lun = kpthru->lun; 3894 pthru->cdb_len = kpthru->cdb_len; 3895 pthru->sge_count = kpthru->sge_count; 3896 pthru->timeout = kpthru->timeout; 3897 pthru->data_xfer_len = kpthru->data_xfer_len; 3898 3899 pthru->sense_buf_phys_addr_hi = 0; 3900 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */ 3901 pthru->sense_buf_phys_addr_lo = 0; 3902 3903 bcopy((void *)kpthru->cdb, (void *)pthru->cdb, pthru->cdb_len); 3904 3905 pthru->flags = kpthru->flags & ~MFI_FRAME_SGL64; 3906 pthru->sgl.sge32[0].length = xferlen; 3907 pthru->sgl.sge32[0].phys_addr = kphys_addr; 3908 3909 cmd->sync_cmd = MEGASAS_TRUE; 3910 cmd->frame_count = 1; 3911 3912 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3913 con_log(CL_ANN, (CE_WARN, 3914 "issue_mfi_pthru: fw_ioctl failed\n")); 3915 } else { 3916 if (xferlen && (kpthru->flags & MFI_FRAME_DIR_READ)) { 3917 /* 3918 * con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3919 * "copy to user space\n")); 3920 */ 3921 3922 if (ddi_copyout(pthru_dma_obj.buffer, ubuf, 3923 xferlen, mode)) { 3924 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3925 "copy to user space failed\n")); 3926 return (1); 3927 } 3928 } 3929 } 3930 3931 kpthru->cmd_status = pthru->cmd_status; 3932 kpthru->scsi_status = pthru->scsi_status; 3933 3934 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, " 3935 "scsi_status %x\n", pthru->cmd_status, pthru->scsi_status)); 3936 3937 if (xferlen) { 3938 /* free kernel buffer */ 3939 if (mega_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS) 3940 return (1); 3941 } 3942 3943 return (0); 3944 } 3945 3946 /* 3947 * issue_mfi_dcmd 3948 */ 3949 static int 3950 issue_mfi_dcmd(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 3951 struct megasas_cmd *cmd, int mode) 3952 { 3953 void *ubuf; 3954 uint32_t kphys_addr = 0; 3955 uint32_t xferlen = 0; 3956 uint32_t model; 3957 dma_obj_t dcmd_dma_obj; 3958 struct megasas_dcmd_frame *kdcmd; 3959 struct megasas_dcmd_frame *dcmd; 3960 3961 dcmd = &cmd->frame->dcmd; 3962 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0]; 3963 3964 model = ddi_model_convert_from(mode & FMODELS); 3965 if (model == DDI_MODEL_ILP32) { 3966 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3967 3968 xferlen = kdcmd->sgl.sge32[0].length; 3969 3970 /* SJ! - ubuf needs to be virtual address. */ 3971 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3972 } 3973 else 3974 { 3975 #ifdef _ILP32 3976 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3977 xferlen = kdcmd->sgl.sge32[0].length; 3978 /* SJ! - ubuf needs to be virtual address. */ 3979 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3980 #else 3981 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64")); 3982 xferlen = kdcmd->sgl.sge64[0].length; 3983 /* SJ! - ubuf needs to be virtual address. */ 3984 ubuf = (void *)(ulong_t)dcmd->sgl.sge64[0].phys_addr; 3985 #endif 3986 } 3987 if (xferlen) { 3988 /* means IOCTL requires DMA */ 3989 /* allocate the data transfer buffer */ 3990 dcmd_dma_obj.size = xferlen; 3991 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr; 3992 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 3993 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 3994 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 3995 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 3996 3997 /* allocate kernel buffer for DMA */ 3998 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) { 3999 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 4000 "could not data transfer buffer alloc.")); 4001 return (DDI_FAILURE); 4002 } 4003 4004 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4005 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) { 4006 if (ddi_copyin(ubuf, (void *)dcmd_dma_obj.buffer, 4007 xferlen, mode)) { 4008 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 4009 "copy from user space failed\n")); 4010 return (1); 4011 } 4012 } 4013 4014 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 4015 } 4016 4017 dcmd->cmd = kdcmd->cmd; 4018 dcmd->cmd_status = kdcmd->cmd_status; 4019 dcmd->sge_count = kdcmd->sge_count; 4020 dcmd->timeout = kdcmd->timeout; 4021 dcmd->data_xfer_len = kdcmd->data_xfer_len; 4022 dcmd->opcode = kdcmd->opcode; 4023 4024 bcopy((void *)kdcmd->mbox.b, (void *)dcmd->mbox.b, 12); 4025 4026 dcmd->flags = kdcmd->flags & ~MFI_FRAME_SGL64; 4027 dcmd->sgl.sge32[0].length = xferlen; 4028 dcmd->sgl.sge32[0].phys_addr = kphys_addr; 4029 4030 cmd->sync_cmd = MEGASAS_TRUE; 4031 cmd->frame_count = 1; 4032 4033 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4034 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed\n")); 4035 } else { 4036 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) { 4037 /* 4038 * con_log(CL_ANN, (CE_WARN,"issue_mfi_dcmd: " 4039 * copy to user space\n")); 4040 */ 4041 4042 if (ddi_copyout(dcmd_dma_obj.buffer, ubuf, 4043 xferlen, mode)) { 4044 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 4045 "copy to user space failed\n")); 4046 return (1); 4047 } 4048 } 4049 } 4050 4051 kdcmd->cmd_status = dcmd->cmd_status; 4052 4053 if (xferlen) { 4054 /* free kernel buffer */ 4055 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 4056 return (1); 4057 } 4058 4059 return (0); 4060 } 4061 4062 /* 4063 * issue_mfi_smp 4064 */ 4065 static int 4066 issue_mfi_smp(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 4067 struct megasas_cmd *cmd, int mode) 4068 { 4069 void *request_ubuf; 4070 void *response_ubuf; 4071 uint32_t request_xferlen = 0; 4072 uint32_t response_xferlen = 0; 4073 uint_t model; 4074 dma_obj_t request_dma_obj; 4075 dma_obj_t response_dma_obj; 4076 struct megasas_smp_frame *ksmp; 4077 struct megasas_smp_frame *smp; 4078 struct megasas_sge32 *sge32; 4079 #ifndef _ILP32 4080 struct megasas_sge64 *sge64; 4081 #endif 4082 4083 smp = &cmd->frame->smp; 4084 ksmp = (struct megasas_smp_frame *)&ioctl->frame[0]; 4085 4086 model = ddi_model_convert_from(mode & FMODELS); 4087 if (model == DDI_MODEL_ILP32) { 4088 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 4089 4090 sge32 = &ksmp->sgl[0].sge32[0]; 4091 response_xferlen = sge32[0].length; 4092 request_xferlen = sge32[1].length; 4093 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 4094 "response_xferlen = %x, request_xferlen = %x", 4095 response_xferlen, request_xferlen)); 4096 4097 /* SJ! - ubuf needs to be virtual address. */ 4098 4099 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 4100 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 4101 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 4102 "response_ubuf = %p, request_ubuf = %p", 4103 response_ubuf, request_ubuf)); 4104 } else { 4105 #ifdef _ILP32 4106 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 4107 4108 sge32 = &ksmp->sgl[0].sge32[0]; 4109 response_xferlen = sge32[0].length; 4110 request_xferlen = sge32[1].length; 4111 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 4112 "response_xferlen = %x, request_xferlen = %x", 4113 response_xferlen, request_xferlen)); 4114 4115 /* SJ! - ubuf needs to be virtual address. */ 4116 4117 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 4118 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 4119 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 4120 "response_ubuf = %p, request_ubuf = %p", 4121 response_ubuf, request_ubuf)); 4122 #else 4123 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64")); 4124 4125 sge64 = &ksmp->sgl[0].sge64[0]; 4126 response_xferlen = sge64[0].length; 4127 request_xferlen = sge64[1].length; 4128 4129 /* SJ! - ubuf needs to be virtual address. */ 4130 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr; 4131 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr; 4132 #endif 4133 } 4134 if (request_xferlen) { 4135 /* means IOCTL requires DMA */ 4136 /* allocate the data transfer buffer */ 4137 request_dma_obj.size = request_xferlen; 4138 request_dma_obj.dma_attr = megasas_generic_dma_attr; 4139 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 4140 request_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 4141 request_dma_obj.dma_attr.dma_attr_sgllen = 1; 4142 request_dma_obj.dma_attr.dma_attr_align = 1; 4143 4144 /* allocate kernel buffer for DMA */ 4145 if (mega_alloc_dma_obj(instance, &request_dma_obj) != 1) { 4146 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4147 "could not data transfer buffer alloc.")); 4148 return (DDI_FAILURE); 4149 } 4150 4151 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4152 if (ddi_copyin(request_ubuf, (void *) request_dma_obj.buffer, 4153 request_xferlen, mode)) { 4154 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4155 "copy from user space failed\n")); 4156 return (1); 4157 } 4158 } 4159 4160 if (response_xferlen) { 4161 /* means IOCTL requires DMA */ 4162 /* allocate the data transfer buffer */ 4163 response_dma_obj.size = response_xferlen; 4164 response_dma_obj.dma_attr = megasas_generic_dma_attr; 4165 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 4166 response_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 4167 response_dma_obj.dma_attr.dma_attr_sgllen = 1; 4168 response_dma_obj.dma_attr.dma_attr_align = 1; 4169 4170 /* allocate kernel buffer for DMA */ 4171 if (mega_alloc_dma_obj(instance, &response_dma_obj) != 1) { 4172 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4173 "could not data transfer buffer alloc.")); 4174 return (DDI_FAILURE); 4175 } 4176 4177 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4178 if (ddi_copyin(response_ubuf, (void *) response_dma_obj.buffer, 4179 response_xferlen, mode)) { 4180 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4181 "copy from user space failed\n")); 4182 return (1); 4183 } 4184 } 4185 4186 smp->cmd = ksmp->cmd; 4187 smp->cmd_status = ksmp->cmd_status; 4188 smp->connection_status = ksmp->connection_status; 4189 smp->sge_count = ksmp->sge_count; 4190 /* smp->context = ksmp->context; */ 4191 smp->timeout = ksmp->timeout; 4192 smp->data_xfer_len = ksmp->data_xfer_len; 4193 4194 bcopy((void *)&ksmp->sas_addr, (void *)&smp->sas_addr, 4195 sizeof (uint64_t)); 4196 4197 smp->flags = ksmp->flags & ~MFI_FRAME_SGL64; 4198 4199 model = ddi_model_convert_from(mode & FMODELS); 4200 if (model == DDI_MODEL_ILP32) { 4201 con_log(CL_ANN1, (CE_NOTE, 4202 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4203 4204 sge32 = &smp->sgl[0].sge32[0]; 4205 sge32[0].length = response_xferlen; 4206 sge32[0].phys_addr = 4207 response_dma_obj.dma_cookie[0].dmac_address; 4208 sge32[1].length = request_xferlen; 4209 sge32[1].phys_addr = 4210 request_dma_obj.dma_cookie[0].dmac_address; 4211 } else { 4212 #ifdef _ILP32 4213 con_log(CL_ANN1, (CE_NOTE, 4214 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4215 sge32 = &smp->sgl[0].sge32[0]; 4216 sge32[0].length = response_xferlen; 4217 sge32[0].phys_addr = 4218 response_dma_obj.dma_cookie[0].dmac_address; 4219 sge32[1].length = request_xferlen; 4220 sge32[1].phys_addr = 4221 request_dma_obj.dma_cookie[0].dmac_address; 4222 #else 4223 con_log(CL_ANN1, (CE_NOTE, 4224 "issue_mfi_smp: DDI_MODEL_LP64")); 4225 sge64 = &smp->sgl[0].sge64[0]; 4226 sge64[0].length = response_xferlen; 4227 sge64[0].phys_addr = 4228 response_dma_obj.dma_cookie[0].dmac_address; 4229 sge64[1].length = request_xferlen; 4230 sge64[1].phys_addr = 4231 request_dma_obj.dma_cookie[0].dmac_address; 4232 #endif 4233 } 4234 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 4235 "smp->response_xferlen = %d, smp->request_xferlen = %d " 4236 "smp->data_xfer_len = %d", sge32[0].length, sge32[1].length, 4237 smp->data_xfer_len)); 4238 4239 cmd->sync_cmd = MEGASAS_TRUE; 4240 cmd->frame_count = 1; 4241 4242 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4243 con_log(CL_ANN, (CE_WARN, 4244 "issue_mfi_smp: fw_ioctl failed\n")); 4245 } else { 4246 con_log(CL_ANN1, (CE_NOTE, 4247 "issue_mfi_smp: copy to user space\n")); 4248 4249 if (request_xferlen) { 4250 if (ddi_copyout(request_dma_obj.buffer, request_ubuf, 4251 request_xferlen, mode)) { 4252 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4253 "copy to user space failed\n")); 4254 return (1); 4255 } 4256 } 4257 4258 if (response_xferlen) { 4259 if (ddi_copyout(response_dma_obj.buffer, response_ubuf, 4260 response_xferlen, mode)) { 4261 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4262 "copy to user space failed\n")); 4263 return (1); 4264 } 4265 } 4266 } 4267 4268 ksmp->cmd_status = smp->cmd_status; 4269 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d", 4270 smp->cmd_status)); 4271 4272 4273 if (request_xferlen) { 4274 /* free kernel buffer */ 4275 if (mega_free_dma_obj(instance, request_dma_obj) != DDI_SUCCESS) 4276 return (1); 4277 } 4278 4279 if (response_xferlen) { 4280 /* free kernel buffer */ 4281 if (mega_free_dma_obj(instance, response_dma_obj) != 4282 DDI_SUCCESS) 4283 return (1); 4284 } 4285 4286 return (0); 4287 } 4288 4289 /* 4290 * issue_mfi_stp 4291 */ 4292 static int 4293 issue_mfi_stp(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 4294 struct megasas_cmd *cmd, int mode) 4295 { 4296 void *fis_ubuf; 4297 void *data_ubuf; 4298 uint32_t fis_xferlen = 0; 4299 uint32_t data_xferlen = 0; 4300 uint_t model; 4301 dma_obj_t fis_dma_obj; 4302 dma_obj_t data_dma_obj; 4303 struct megasas_stp_frame *kstp; 4304 struct megasas_stp_frame *stp; 4305 4306 stp = &cmd->frame->stp; 4307 kstp = (struct megasas_stp_frame *)&ioctl->frame[0]; 4308 4309 model = ddi_model_convert_from(mode & FMODELS); 4310 if (model == DDI_MODEL_ILP32) { 4311 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4312 4313 fis_xferlen = kstp->sgl.sge32[0].length; 4314 data_xferlen = kstp->sgl.sge32[1].length; 4315 4316 /* SJ! - ubuf needs to be virtual address. */ 4317 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4318 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4319 } 4320 else 4321 { 4322 #ifdef _ILP32 4323 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4324 4325 fis_xferlen = kstp->sgl.sge32[0].length; 4326 data_xferlen = kstp->sgl.sge32[1].length; 4327 4328 /* SJ! - ubuf needs to be virtual address. */ 4329 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4330 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4331 #else 4332 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64")); 4333 4334 fis_xferlen = kstp->sgl.sge64[0].length; 4335 data_xferlen = kstp->sgl.sge64[1].length; 4336 4337 /* SJ! - ubuf needs to be virtual address. */ 4338 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr; 4339 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr; 4340 #endif 4341 } 4342 4343 4344 if (fis_xferlen) { 4345 #ifdef DEBUG 4346 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: " 4347 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen)); 4348 #endif 4349 /* means IOCTL requires DMA */ 4350 /* allocate the data transfer buffer */ 4351 fis_dma_obj.size = fis_xferlen; 4352 fis_dma_obj.dma_attr = megasas_generic_dma_attr; 4353 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 4354 fis_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 4355 fis_dma_obj.dma_attr.dma_attr_sgllen = 1; 4356 fis_dma_obj.dma_attr.dma_attr_align = 1; 4357 4358 /* allocate kernel buffer for DMA */ 4359 if (mega_alloc_dma_obj(instance, &fis_dma_obj) != 1) { 4360 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4361 "could not data transfer buffer alloc.")); 4362 return (DDI_FAILURE); 4363 } 4364 4365 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4366 if (ddi_copyin(fis_ubuf, (void *)fis_dma_obj.buffer, 4367 fis_xferlen, mode)) { 4368 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4369 "copy from user space failed\n")); 4370 return (1); 4371 } 4372 } 4373 4374 if (data_xferlen) { 4375 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p " 4376 "data_xferlen = %x", data_ubuf, data_xferlen)); 4377 4378 /* means IOCTL requires DMA */ 4379 /* allocate the data transfer buffer */ 4380 data_dma_obj.size = data_xferlen; 4381 data_dma_obj.dma_attr = megasas_generic_dma_attr; 4382 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 4383 data_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 4384 data_dma_obj.dma_attr.dma_attr_sgllen = 1; 4385 data_dma_obj.dma_attr.dma_attr_align = 1; 4386 4387 /* allocate kernel buffer for DMA */ 4388 if (mega_alloc_dma_obj(instance, &data_dma_obj) != 1) { 4389 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4390 "could not data transfer buffer alloc.")); 4391 return (DDI_FAILURE); 4392 } 4393 4394 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4395 if (ddi_copyin(data_ubuf, (void *) data_dma_obj.buffer, 4396 data_xferlen, mode)) { 4397 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4398 "copy from user space failed\n")); 4399 return (1); 4400 } 4401 } 4402 4403 stp->cmd = kstp->cmd; 4404 stp->cmd_status = kstp->cmd_status; 4405 stp->connection_status = kstp->connection_status; 4406 stp->target_id = kstp->target_id; 4407 stp->sge_count = kstp->sge_count; 4408 /* stp->context = kstp->context; */ 4409 stp->timeout = kstp->timeout; 4410 stp->data_xfer_len = kstp->data_xfer_len; 4411 4412 bcopy((void *)kstp->fis, (void *)stp->fis, 10); 4413 4414 stp->flags = kstp->flags & ~MFI_FRAME_SGL64; 4415 stp->stp_flags = kstp->stp_flags; 4416 stp->sgl.sge32[0].length = fis_xferlen; 4417 stp->sgl.sge32[0].phys_addr = fis_dma_obj.dma_cookie[0].dmac_address; 4418 stp->sgl.sge32[1].length = data_xferlen; 4419 stp->sgl.sge32[1].phys_addr = data_dma_obj.dma_cookie[0].dmac_address; 4420 4421 cmd->sync_cmd = MEGASAS_TRUE; 4422 cmd->frame_count = 1; 4423 4424 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4425 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed\n")); 4426 } else { 4427 /* 4428 * con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4429 * "copy to user space\n")); 4430 */ 4431 4432 if (fis_xferlen) { 4433 if (ddi_copyout(fis_dma_obj.buffer, fis_ubuf, 4434 fis_xferlen, mode)) { 4435 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4436 "copy to user space failed\n")); 4437 return (1); 4438 } 4439 } 4440 4441 if (data_xferlen) { 4442 if (ddi_copyout(data_dma_obj.buffer, data_ubuf, 4443 data_xferlen, mode)) { 4444 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4445 "copy to user space failed\n")); 4446 return (1); 4447 } 4448 } 4449 } 4450 4451 kstp->cmd_status = stp->cmd_status; 4452 4453 if (fis_xferlen) { 4454 /* free kernel buffer */ 4455 if (mega_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS) 4456 return (1); 4457 } 4458 4459 if (data_xferlen) { 4460 /* free kernel buffer */ 4461 if (mega_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS) 4462 return (1); 4463 } 4464 4465 return (0); 4466 } 4467 4468 /* 4469 * fill_up_drv_ver 4470 */ 4471 static void 4472 fill_up_drv_ver(struct megasas_drv_ver *dv) 4473 { 4474 (void) memset(dv, 0, sizeof (struct megasas_drv_ver)); 4475 4476 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$")); 4477 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris")); 4478 (void) memcpy(dv->os_ver, "Build 36", strlen("Build 36")); 4479 (void) memcpy(dv->drv_name, "megaraid_sas", strlen("megaraid_sas")); 4480 (void) memcpy(dv->drv_ver, MEGASAS_VERSION, strlen(MEGASAS_VERSION)); 4481 (void) memcpy(dv->drv_rel_date, MEGASAS_RELDATE, 4482 strlen(MEGASAS_RELDATE)); 4483 } 4484 4485 /* 4486 * handle_drv_ioctl 4487 */ 4488 static int 4489 handle_drv_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 4490 int mode) 4491 { 4492 int i; 4493 int rval = 0; 4494 int *props = NULL; 4495 void *ubuf; 4496 4497 uint8_t *pci_conf_buf; 4498 uint32_t xferlen; 4499 uint32_t num_props; 4500 uint_t model; 4501 struct megasas_dcmd_frame *kdcmd; 4502 struct megasas_drv_ver dv; 4503 struct megasas_pci_information pi; 4504 4505 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0]; 4506 4507 model = ddi_model_convert_from(mode & FMODELS); 4508 if (model == DDI_MODEL_ILP32) { 4509 con_log(CL_ANN1, (CE_NOTE, 4510 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4511 4512 xferlen = kdcmd->sgl.sge32[0].length; 4513 4514 /* SJ! - ubuf needs to be virtual address. */ 4515 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4516 } else { 4517 #ifdef _ILP32 4518 con_log(CL_ANN1, (CE_NOTE, 4519 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4520 xferlen = kdcmd->sgl.sge32[0].length; 4521 /* SJ! - ubuf needs to be virtual address. */ 4522 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4523 #else 4524 con_log(CL_ANN1, (CE_NOTE, 4525 "handle_drv_ioctl: DDI_MODEL_LP64")); 4526 xferlen = kdcmd->sgl.sge64[0].length; 4527 /* SJ! - ubuf needs to be virtual address. */ 4528 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 4529 #endif 4530 } 4531 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4532 "dataBuf=%p size=%d bytes", ubuf, xferlen)); 4533 4534 switch (kdcmd->opcode) { 4535 case MR_DRIVER_IOCTL_DRIVER_VERSION: 4536 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4537 "MR_DRIVER_IOCTL_DRIVER_VERSION")); 4538 4539 fill_up_drv_ver(&dv); 4540 4541 if (ddi_copyout(&dv, ubuf, xferlen, mode)) { 4542 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4543 "MR_DRIVER_IOCTL_DRIVER_VERSION : " 4544 "copy to user space failed\n")); 4545 kdcmd->cmd_status = 1; 4546 rval = 1; 4547 } else { 4548 kdcmd->cmd_status = 0; 4549 } 4550 break; 4551 case MR_DRIVER_IOCTL_PCI_INFORMATION: 4552 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4553 "MR_DRIVER_IOCTL_PCI_INFORMAITON")); 4554 4555 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip, 4556 0, "reg", &props, &num_props)) { 4557 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4558 "MR_DRIVER_IOCTL_PCI_INFORMATION : " 4559 "ddi_prop_look_int_array failed\n")); 4560 rval = 1; 4561 } else { 4562 4563 pi.busNumber = (props[0] >> 16) & 0xFF; 4564 pi.deviceNumber = (props[0] >> 11) & 0x1f; 4565 pi.functionNumber = (props[0] >> 8) & 0x7; 4566 ddi_prop_free((void *)props); 4567 } 4568 4569 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo; 4570 4571 for (i = 0; i < (sizeof (struct megasas_pci_information) - 4572 offsetof(struct megasas_pci_information, pciHeaderInfo)); 4573 i++) { 4574 pci_conf_buf[i] = 4575 pci_config_get8(instance->pci_handle, i); 4576 } 4577 4578 if (ddi_copyout(&pi, ubuf, xferlen, mode)) { 4579 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4580 "MR_DRIVER_IOCTL_PCI_INFORMATION : " 4581 "copy to user space failed\n")); 4582 kdcmd->cmd_status = 1; 4583 rval = 1; 4584 } else { 4585 kdcmd->cmd_status = 0; 4586 } 4587 break; 4588 default: 4589 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4590 "invalid driver specific IOCTL opcode = 0x%x", 4591 kdcmd->opcode)); 4592 kdcmd->cmd_status = 1; 4593 rval = 1; 4594 break; 4595 } 4596 4597 return (rval); 4598 } 4599 4600 /* 4601 * handle_mfi_ioctl 4602 */ 4603 static int 4604 handle_mfi_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 4605 int mode) 4606 { 4607 int rval = 0; 4608 4609 struct megasas_header *hdr; 4610 struct megasas_cmd *cmd; 4611 4612 cmd = get_mfi_pkt(instance); 4613 4614 if (!cmd) { 4615 con_log(CL_ANN, (CE_WARN, "megasas: " 4616 "failed to get a cmd packet\n")); 4617 return (1); 4618 } 4619 4620 hdr = (struct megasas_header *)&ioctl->frame[0]; 4621 4622 switch (hdr->cmd) { 4623 case MFI_CMD_OP_DCMD: 4624 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode); 4625 break; 4626 case MFI_CMD_OP_SMP: 4627 rval = issue_mfi_smp(instance, ioctl, cmd, mode); 4628 break; 4629 case MFI_CMD_OP_STP: 4630 rval = issue_mfi_stp(instance, ioctl, cmd, mode); 4631 break; 4632 case MFI_CMD_OP_LD_SCSI: 4633 case MFI_CMD_OP_PD_SCSI: 4634 rval = issue_mfi_pthru(instance, ioctl, cmd, mode); 4635 break; 4636 default: 4637 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: " 4638 "invalid mfi ioctl hdr->cmd = %d\n", hdr->cmd)); 4639 rval = 1; 4640 break; 4641 } 4642 4643 4644 return_mfi_pkt(instance, cmd); 4645 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) 4646 rval = 1; 4647 return (rval); 4648 } 4649 4650 /* 4651 * AEN 4652 */ 4653 static int 4654 handle_mfi_aen(struct megasas_instance *instance, struct megasas_aen *aen) 4655 { 4656 int rval = 0; 4657 4658 rval = register_mfi_aen(instance, instance->aen_seq_num, 4659 aen->class_locale_word); 4660 4661 aen->cmd_status = (uint8_t)rval; 4662 4663 return (rval); 4664 } 4665 4666 static int 4667 register_mfi_aen(struct megasas_instance *instance, uint32_t seq_num, 4668 uint32_t class_locale_word) 4669 { 4670 int ret_val; 4671 4672 struct megasas_cmd *cmd; 4673 struct megasas_dcmd_frame *dcmd; 4674 union megasas_evt_class_locale curr_aen; 4675 union megasas_evt_class_locale prev_aen; 4676 4677 /* 4678 * If there an AEN pending already (aen_cmd), check if the 4679 * class_locale of that pending AEN is inclusive of the new 4680 * AEN request we currently have. If it is, then we don't have 4681 * to do anything. In other words, whichever events the current 4682 * AEN request is subscribing to, have already been subscribed 4683 * to. 4684 * 4685 * If the old_cmd is _not_ inclusive, then we have to abort 4686 * that command, form a class_locale that is superset of both 4687 * old and current and re-issue to the FW 4688 */ 4689 4690 curr_aen.word = class_locale_word; 4691 4692 if (instance->aen_cmd) { 4693 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; 4694 4695 /* 4696 * A class whose enum value is smaller is inclusive of all 4697 * higher values. If a PROGRESS (= -1) was previously 4698 * registered, then a new registration requests for higher 4699 * classes need not be sent to FW. They are automatically 4700 * included. 4701 * 4702 * Locale numbers don't have such hierarchy. They are bitmap 4703 * values 4704 */ 4705 if ((prev_aen.members.class <= curr_aen.members.class) && 4706 !((prev_aen.members.locale & curr_aen.members.locale) ^ 4707 curr_aen.members.locale)) { 4708 /* 4709 * Previously issued event registration includes 4710 * current request. Nothing to do. 4711 */ 4712 4713 return (0); 4714 } else { 4715 curr_aen.members.locale |= prev_aen.members.locale; 4716 4717 if (prev_aen.members.class < curr_aen.members.class) 4718 curr_aen.members.class = prev_aen.members.class; 4719 4720 ret_val = abort_aen_cmd(instance, instance->aen_cmd); 4721 4722 if (ret_val) { 4723 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: " 4724 "failed to abort prevous AEN command\n")); 4725 4726 return (ret_val); 4727 } 4728 } 4729 } else { 4730 curr_aen.word = class_locale_word; 4731 } 4732 4733 cmd = get_mfi_pkt(instance); 4734 4735 if (!cmd) 4736 return (-ENOMEM); 4737 4738 dcmd = &cmd->frame->dcmd; 4739 4740 /* for(i = 0; i < 12; i++) dcmd->mbox.b[i] = 0; */ 4741 (void) memset(dcmd->mbox.b, 0, 12); 4742 4743 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 4744 sizeof (struct megasas_evt_detail)); 4745 4746 /* Prepare DCMD for aen registration */ 4747 dcmd->cmd = MFI_CMD_OP_DCMD; 4748 dcmd->cmd_status = 0x0; 4749 dcmd->sge_count = 1; 4750 dcmd->flags = MFI_FRAME_DIR_READ; 4751 dcmd->timeout = 0; 4752 dcmd->data_xfer_len = sizeof (struct megasas_evt_detail); 4753 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 4754 dcmd->mbox.w[0] = seq_num; 4755 dcmd->mbox.w[1] = curr_aen.word; 4756 dcmd->sgl.sge32[0].phys_addr = 4757 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address; 4758 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_detail); 4759 4760 instance->aen_seq_num = seq_num; 4761 4762 /* 4763 * Store reference to the cmd used to register for AEN. When an 4764 * application wants us to register for AEN, we have to abort this 4765 * cmd and re-register with a new EVENT LOCALE supplied by that app 4766 */ 4767 instance->aen_cmd = cmd; 4768 4769 cmd->frame_count = 1; 4770 4771 /* Issue the aen registration frame */ 4772 /* atomic_add_16 (&instance->fw_outstanding, 1); */ 4773 instance->func_ptr->issue_cmd(cmd, instance); 4774 4775 return (0); 4776 } 4777 4778 #ifndef lint 4779 /*ARGSUSED*/ 4780 static void 4781 megasas_minphys(struct buf *bp) 4782 { 4783 con_log(CL_ANN1, (CE_CONT, ("minphys CALLED\n"))); 4784 } 4785 #endif 4786 4787 static void 4788 display_scsi_inquiry(caddr_t scsi_inq) 4789 { 4790 #define MAX_SCSI_DEVICE_CODE 14 4791 int i; 4792 char inquiry_buf[256] = {0}; 4793 int len; 4794 const char *const scsi_device_types[] = { 4795 "Direct-Access ", 4796 "Sequential-Access", 4797 "Printer ", 4798 "Processor ", 4799 "WORM ", 4800 "CD-ROM ", 4801 "Scanner ", 4802 "Optical Device ", 4803 "Medium Changer ", 4804 "Communications ", 4805 "Unknown ", 4806 "Unknown ", 4807 "Unknown ", 4808 "Enclosure ", 4809 }; 4810 4811 len = 0; 4812 4813 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: "); 4814 for (i = 8; i < 16; i++) { 4815 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4816 scsi_inq[i]); 4817 } 4818 4819 len += snprintf(inquiry_buf + len, 265 - len, " Model: "); 4820 4821 for (i = 16; i < 32; i++) { 4822 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4823 scsi_inq[i]); 4824 } 4825 4826 len += snprintf(inquiry_buf + len, 265 - len, " Rev: "); 4827 4828 for (i = 32; i < 36; i++) { 4829 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4830 scsi_inq[i]); 4831 } 4832 4833 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4834 4835 4836 i = scsi_inq[0] & 0x1f; 4837 4838 4839 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ", 4840 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : 4841 "Unknown "); 4842 4843 4844 len += snprintf(inquiry_buf + len, 265 - len, 4845 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); 4846 4847 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) { 4848 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n"); 4849 } else { 4850 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4851 } 4852 4853 con_log(CL_ANN1, (CE_CONT, inquiry_buf)); 4854 } 4855 4856 #if defined(NOT_YET) && !defined(lint) 4857 /* 4858 * lint pointed out a bug that pkt may be used before being set 4859 */ 4860 static void 4861 io_timeout_checker(void *arg) 4862 { 4863 unsigned int cookie; 4864 struct scsi_pkt *pkt; 4865 struct megasas_instance *instance = arg; 4866 4867 cookie = ddi_enter_critical(); 4868 4869 /* decrease the timeout value per each packet */ 4870 4871 if (pkt->pkt_time == 0) { 4872 /* this means that the scsi command has timed out */ 4873 /* pull out the packet from the list */ 4874 /* call callback in the scsi_pkt structure */ 4875 } 4876 4877 ddi_exit_critical(cookie); 4878 4879 /* schedule next timeout check */ 4880 instance->timeout_id = timeout(io_timeout_checker, (void *)instance, 4881 drv_usectohz(MEGASAS_1_SECOND)); 4882 } 4883 #endif /* defined(NOT_YET) && !defined(lint) */ 4884 4885 static int 4886 read_fw_status_reg_xscale(struct megasas_instance *instance) 4887 { 4888 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4889 return ((int)RD_OB_MSG_0(instance)); 4890 4891 } 4892 4893 static int 4894 read_fw_status_reg_ppc(struct megasas_instance *instance) 4895 { 4896 /* con_log(CL_ANN, (CE_WARN, "read_fw_status_reg_ppc: called\n")); */ 4897 4898 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4899 return ((int)RD_OB_SCRATCH_PAD_0(instance)); 4900 } 4901 4902 static void 4903 issue_cmd_xscale(struct megasas_cmd *cmd, struct megasas_instance *instance) 4904 { 4905 atomic_add_16(&instance->fw_outstanding, 1); 4906 /* push_pend_queue(instance, cmd); */ 4907 4908 /* Issue the command to the FW */ 4909 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4910 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) | 4911 (cmd->frame_count - 1), instance); 4912 } 4913 4914 static void 4915 issue_cmd_ppc(struct megasas_cmd *cmd, struct megasas_instance *instance) 4916 { 4917 /* con_log(CL_ANN, (CE_WARN, "issue_cmd_ppc: called\n")); */ 4918 4919 atomic_add_16(&instance->fw_outstanding, 1); 4920 4921 /* Issue the command to the FW */ 4922 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4923 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) | 4924 (((cmd->frame_count - 1) << 1) | 1), instance); 4925 } 4926 4927 /* 4928 * issue_cmd_in_sync_mode 4929 */ 4930 static int 4931 issue_cmd_in_sync_mode_xscale(struct megasas_instance *instance, 4932 struct megasas_cmd *cmd) 4933 { 4934 int i; 4935 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * 10000; 4936 4937 cmd->cmd_status = ENODATA; 4938 4939 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4940 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) | 4941 (cmd->frame_count - 1), instance); 4942 4943 mutex_enter(&instance->int_cmd_mtx); 4944 4945 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 4946 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 4947 } 4948 4949 mutex_exit(&instance->int_cmd_mtx); 4950 4951 if (i < (msecs -1)) { 4952 return (0); 4953 } else { 4954 return (1); 4955 } 4956 } 4957 4958 static int 4959 issue_cmd_in_sync_mode_ppc(struct megasas_instance *instance, 4960 struct megasas_cmd *cmd) 4961 { 4962 int i; 4963 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * 10000; 4964 4965 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called\n")); 4966 4967 cmd->cmd_status = ENODATA; 4968 4969 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4970 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) | 4971 (((cmd->frame_count - 1) << 1) | 1), instance); 4972 4973 mutex_enter(&instance->int_cmd_mtx); 4974 4975 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 4976 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 4977 } 4978 4979 mutex_exit(&instance->int_cmd_mtx); 4980 4981 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done\n")); 4982 4983 if (i < (msecs -1)) { 4984 return (0); 4985 } else { 4986 return (1); 4987 } 4988 } 4989 4990 /* 4991 * issue_cmd_in_poll_mode 4992 */ 4993 static int 4994 issue_cmd_in_poll_mode_xscale(struct megasas_instance *instance, 4995 struct megasas_cmd *cmd) 4996 { 4997 int i; 4998 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * 1000; 4999 5000 struct megasas_header *frame_hdr = (struct megasas_header *)cmd->frame; 5001 5002 frame_hdr->cmd_status = 0xFF; 5003 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 5004 5005 /* issue the frame using inbound queue port */ 5006 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5007 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) | 5008 (cmd->frame_count - 1), instance); 5009 5010 /* wait for cmd_status to change */ 5011 for (i = 0; i < msecs && (frame_hdr->cmd_status == 0xff); i++) { 5012 drv_usecwait(1000); /* wait for 1000 usecs */ 5013 } 5014 5015 if (frame_hdr->cmd_status == 0xff) { 5016 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: " 5017 "cmd polling timed out")); 5018 return (DDI_FAILURE); 5019 } 5020 5021 return (DDI_SUCCESS); 5022 } 5023 5024 static int 5025 issue_cmd_in_poll_mode_ppc(struct megasas_instance *instance, 5026 struct megasas_cmd *cmd) 5027 { 5028 int i; 5029 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * 1000; 5030 5031 struct megasas_header *frame_hdr = (struct megasas_header *)cmd->frame; 5032 5033 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called\n")); 5034 5035 frame_hdr->cmd_status = 0xFF; 5036 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 5037 5038 /* issue the frame using inbound queue port */ 5039 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5040 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) | 5041 (((cmd->frame_count - 1) << 1) | 1), instance); 5042 5043 /* wait for cmd_status to change */ 5044 for (i = 0; i < msecs && (frame_hdr->cmd_status == 0xff); i++) { 5045 drv_usecwait(1000); /* wait for 1000 usecs */ 5046 } 5047 5048 if (frame_hdr->cmd_status == 0xff) { 5049 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: " 5050 "cmd polling timed out")); 5051 return (DDI_FAILURE); 5052 } 5053 5054 return (DDI_SUCCESS); 5055 } 5056 5057 static void 5058 enable_intr_xscale(struct megasas_instance *instance) 5059 { 5060 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5061 MFI_ENABLE_INTR(instance); 5062 } 5063 5064 static void 5065 enable_intr_ppc(struct megasas_instance *instance) 5066 { 5067 uint32_t mask; 5068 5069 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called\n")); 5070 5071 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5072 WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); 5073 5074 /* 5075 * As 1078DE is same as 1078 chip, the interrupt mask 5076 * remains the same. 5077 */ 5078 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5079 WR_OB_INTR_MASK(~(MFI_REPLY_1078_MESSAGE_INTR), instance); 5080 /* WR_OB_INTR_MASK(~0x80000000, instance); */ 5081 5082 /* dummy read to force PCI flush */ 5083 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5084 mask = RD_OB_INTR_MASK(instance); 5085 #ifdef lint 5086 mask = mask; 5087 #endif 5088 5089 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: " 5090 "outbound_intr_mask = 0x%x\n", mask)); 5091 } 5092 5093 static void 5094 disable_intr_xscale(struct megasas_instance *instance) 5095 { 5096 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5097 MFI_DISABLE_INTR(instance); 5098 } 5099 5100 static void 5101 disable_intr_ppc(struct megasas_instance *instance) 5102 { 5103 uint32_t mask; 5104 5105 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called\n")); 5106 5107 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5108 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : " 5109 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance))); 5110 5111 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5112 WR_OB_INTR_MASK(0xFFFFFFFF, instance); 5113 5114 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5115 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : " 5116 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance))); 5117 5118 /* dummy read to force PCI flush */ 5119 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5120 mask = RD_OB_INTR_MASK(instance); 5121 #ifdef lint 5122 mask = mask; 5123 #endif 5124 } 5125 5126 static int 5127 intr_ack_xscale(struct megasas_instance *instance) 5128 { 5129 uint32_t status; 5130 5131 /* check if it is our interrupt */ 5132 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5133 status = RD_OB_INTR_STATUS(instance); 5134 5135 if (!(status & MFI_OB_INTR_STATUS_MASK)) { 5136 return (DDI_INTR_UNCLAIMED); 5137 } 5138 5139 /* clear the interrupt by writing back the same value */ 5140 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5141 WR_OB_INTR_STATUS(status, instance); 5142 5143 return (DDI_INTR_CLAIMED); 5144 } 5145 5146 static int 5147 intr_ack_ppc(struct megasas_instance *instance) 5148 { 5149 uint32_t status; 5150 5151 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called\n")); 5152 5153 /* check if it is our interrupt */ 5154 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5155 status = RD_OB_INTR_STATUS(instance); 5156 5157 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x\n", status)); 5158 5159 /* 5160 * As 1078DE is same as 1078 chip, the status field 5161 * remains the same. 5162 */ 5163 if (!(status & MFI_REPLY_1078_MESSAGE_INTR)) { 5164 return (DDI_INTR_UNCLAIMED); 5165 } 5166 5167 /* clear the interrupt by writing back the same value */ 5168 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5169 WR_OB_DOORBELL_CLEAR(status, instance); 5170 5171 /* dummy READ */ 5172 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5173 status = RD_OB_INTR_STATUS(instance); 5174 5175 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared\n")); 5176 5177 return (DDI_INTR_CLAIMED); 5178 } 5179 5180 static int 5181 megasas_common_check(struct megasas_instance *instance, 5182 struct megasas_cmd *cmd) 5183 { 5184 int ret = DDI_SUCCESS; 5185 5186 if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 5187 DDI_SUCCESS) { 5188 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 5189 if (cmd->pkt != NULL) { 5190 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 5191 cmd->pkt->pkt_statistics = 0; 5192 } 5193 ret = DDI_FAILURE; 5194 } 5195 if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 5196 != DDI_SUCCESS) { 5197 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 5198 if (cmd->pkt != NULL) { 5199 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 5200 cmd->pkt->pkt_statistics = 0; 5201 } 5202 ret = DDI_FAILURE; 5203 } 5204 if (megasas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) != 5205 DDI_SUCCESS) { 5206 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 5207 if (cmd->pkt != NULL) { 5208 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 5209 cmd->pkt->pkt_statistics = 0; 5210 } 5211 ret = DDI_FAILURE; 5212 } 5213 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 5214 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 5215 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0); 5216 if (cmd->pkt != NULL) { 5217 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 5218 cmd->pkt->pkt_statistics = 0; 5219 } 5220 ret = DDI_FAILURE; 5221 } 5222 5223 return (ret); 5224 } 5225 5226 /*ARGSUSED*/ 5227 static int 5228 megasas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5229 { 5230 /* 5231 * as the driver can always deal with an error in any dma or 5232 * access handle, we can just return the fme_status value. 5233 */ 5234 pci_ereport_post(dip, err, NULL); 5235 return (err->fme_status); 5236 } 5237 5238 static void 5239 megasas_fm_init(struct megasas_instance *instance) 5240 { 5241 /* Need to change iblock to priority for new MSI intr */ 5242 ddi_iblock_cookie_t fm_ibc; 5243 5244 /* Only register with IO Fault Services if we have some capability */ 5245 if (instance->fm_capabilities) { 5246 /* Adjust access and dma attributes for FMA */ 5247 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5248 megasas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 5249 5250 /* 5251 * Register capabilities with IO Fault Services. 5252 * fm_capabilities will be updated to indicate 5253 * capabilities actually supported (not requested.) 5254 */ 5255 5256 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc); 5257 5258 /* 5259 * Initialize pci ereport capabilities if ereport 5260 * capable (should always be.) 5261 */ 5262 5263 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 5264 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 5265 pci_ereport_setup(instance->dip); 5266 } 5267 5268 /* 5269 * Register error callback if error callback capable. 5270 */ 5271 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 5272 ddi_fm_handler_register(instance->dip, 5273 megasas_fm_error_cb, (void*) instance); 5274 } 5275 } else { 5276 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5277 megasas_generic_dma_attr.dma_attr_flags = 0; 5278 } 5279 } 5280 5281 static void 5282 megasas_fm_fini(struct megasas_instance *instance) 5283 { 5284 /* Only unregister FMA capabilities if registered */ 5285 if (instance->fm_capabilities) { 5286 /* 5287 * Un-register error callback if error callback capable. 5288 */ 5289 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 5290 ddi_fm_handler_unregister(instance->dip); 5291 } 5292 5293 /* 5294 * Release any resources allocated by pci_ereport_setup() 5295 */ 5296 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 5297 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 5298 pci_ereport_teardown(instance->dip); 5299 } 5300 5301 /* Unregister from IO Fault Services */ 5302 ddi_fm_fini(instance->dip); 5303 5304 /* Adjust access and dma attributes for FMA */ 5305 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5306 megasas_generic_dma_attr.dma_attr_flags = 0; 5307 } 5308 } 5309 5310 int 5311 megasas_check_acc_handle(ddi_acc_handle_t handle) 5312 { 5313 ddi_fm_error_t de; 5314 5315 if (handle == NULL) { 5316 return (DDI_FAILURE); 5317 } 5318 5319 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5320 5321 return (de.fme_status); 5322 } 5323 5324 int 5325 megasas_check_dma_handle(ddi_dma_handle_t handle) 5326 { 5327 ddi_fm_error_t de; 5328 5329 if (handle == NULL) { 5330 return (DDI_FAILURE); 5331 } 5332 5333 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5334 5335 return (de.fme_status); 5336 } 5337 5338 void 5339 megasas_fm_ereport(struct megasas_instance *instance, char *detail) 5340 { 5341 uint64_t ena; 5342 char buf[FM_MAX_CLASS]; 5343 5344 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 5345 ena = fm_ena_generate(0, FM_ENA_FMT1); 5346 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) { 5347 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP, 5348 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 5349 } 5350 } 5351