1 /* 2 * megaraid_sas.c: source for mega_sas driver 3 * 4 * MegaRAID device driver for SAS controllers 5 * Copyright (c) 2005-2008, LSI Logic Corporation. 6 * All rights reserved. 7 * 8 * Version: 9 * Author: 10 * Rajesh Prabhakaran<Rajesh.Prabhakaran@lsil.com> 11 * Seokmann Ju 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright notice, 17 * this list of conditions and the following disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above copyright notice, 20 * this list of conditions and the following disclaimer in the documentation 21 * and/or other materials provided with the distribution. 22 * 23 * 3. Neither the name of the author nor the names of its contributors may be 24 * used to endorse or promote products derived from this software without 25 * specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 35 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 36 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 37 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 38 * DAMAGE. 39 */ 40 41 /* 42 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 43 * Use is subject to license terms. 44 * Copyright (c) 2011 Bayard G. Bell. All rights reserved. 45 */ 46 47 #include <sys/types.h> 48 #include <sys/param.h> 49 #include <sys/file.h> 50 #include <sys/errno.h> 51 #include <sys/open.h> 52 #include <sys/cred.h> 53 #include <sys/modctl.h> 54 #include <sys/conf.h> 55 #include <sys/devops.h> 56 #include <sys/cmn_err.h> 57 #include <sys/kmem.h> 58 #include <sys/stat.h> 59 #include <sys/mkdev.h> 60 #include <sys/pci.h> 61 #include <sys/scsi/scsi.h> 62 #include <sys/ddi.h> 63 #include <sys/sunddi.h> 64 #include <sys/atomic.h> 65 #include <sys/signal.h> 66 67 #include "megaraid_sas.h" 68 69 /* 70 * FMA header files 71 */ 72 #include <sys/ddifm.h> 73 #include <sys/fm/protocol.h> 74 #include <sys/fm/util.h> 75 #include <sys/fm/io/ddi.h> 76 77 /* 78 * Local static data 79 */ 80 static void *megasas_state = NULL; 81 static int debug_level_g = CL_ANN; 82 83 #pragma weak scsi_hba_open 84 #pragma weak scsi_hba_close 85 #pragma weak scsi_hba_ioctl 86 87 static ddi_dma_attr_t megasas_generic_dma_attr = { 88 DMA_ATTR_V0, /* dma_attr_version */ 89 0, /* low DMA address range */ 90 0xFFFFFFFFU, /* high DMA address range */ 91 0xFFFFFFFFU, /* DMA counter register */ 92 8, /* DMA address alignment */ 93 0x07, /* DMA burstsizes */ 94 1, /* min DMA size */ 95 0xFFFFFFFFU, /* max DMA size */ 96 0xFFFFFFFFU, /* segment boundary */ 97 MEGASAS_MAX_SGE_CNT, /* dma_attr_sglen */ 98 512, /* granularity of device */ 99 0 /* bus specific DMA flags */ 100 }; 101 102 int32_t megasas_max_cap_maxxfer = 0x1000000; 103 104 /* 105 * cb_ops contains base level routines 106 */ 107 static struct cb_ops megasas_cb_ops = { 108 megasas_open, /* open */ 109 megasas_close, /* close */ 110 nodev, /* strategy */ 111 nodev, /* print */ 112 nodev, /* dump */ 113 nodev, /* read */ 114 nodev, /* write */ 115 megasas_ioctl, /* ioctl */ 116 nodev, /* devmap */ 117 nodev, /* mmap */ 118 nodev, /* segmap */ 119 nochpoll, /* poll */ 120 nodev, /* cb_prop_op */ 121 0, /* streamtab */ 122 D_NEW | D_HOTPLUG, /* cb_flag */ 123 CB_REV, /* cb_rev */ 124 nodev, /* cb_aread */ 125 nodev /* cb_awrite */ 126 }; 127 128 /* 129 * dev_ops contains configuration routines 130 */ 131 static struct dev_ops megasas_ops = { 132 DEVO_REV, /* rev, */ 133 0, /* refcnt */ 134 megasas_getinfo, /* getinfo */ 135 nulldev, /* identify */ 136 nulldev, /* probe */ 137 megasas_attach, /* attach */ 138 megasas_detach, /* detach */ 139 megasas_reset, /* reset */ 140 &megasas_cb_ops, /* char/block ops */ 141 NULL, /* bus ops */ 142 NULL, /* power */ 143 ddi_quiesce_not_supported, /* devo_quiesce */ 144 }; 145 146 static struct modldrv modldrv = { 147 &mod_driverops, /* module type - driver */ 148 MEGASAS_VERSION, 149 &megasas_ops, /* driver ops */ 150 }; 151 152 static struct modlinkage modlinkage = { 153 MODREV_1, /* ml_rev - must be MODREV_1 */ 154 &modldrv, /* ml_linkage */ 155 NULL /* end of driver linkage */ 156 }; 157 158 static struct ddi_device_acc_attr endian_attr = { 159 DDI_DEVICE_ATTR_V1, 160 DDI_STRUCTURE_LE_ACC, 161 DDI_STRICTORDER_ACC, 162 DDI_DEFAULT_ACC 163 }; 164 165 166 /* 167 * ************************************************************************** * 168 * * 169 * common entry points - for loadable kernel modules * 170 * * 171 * ************************************************************************** * 172 */ 173 174 /* 175 * _init - initialize a loadable module 176 * @void 177 * 178 * The driver should perform any one-time resource allocation or data 179 * initialization during driver loading in _init(). For example, the driver 180 * should initialize any mutexes global to the driver in this routine. 181 * The driver should not, however, use _init() to allocate or initialize 182 * anything that has to do with a particular instance of the device. 183 * Per-instance initialization must be done in attach(). 184 */ 185 int 186 _init(void) 187 { 188 int ret; 189 190 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 191 192 ret = ddi_soft_state_init(&megasas_state, 193 sizeof (struct megasas_instance), 0); 194 195 if (ret != 0) { 196 con_log(CL_ANN, (CE_WARN, "megaraid: could not init state")); 197 return (ret); 198 } 199 200 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 201 con_log(CL_ANN, (CE_WARN, "megaraid: could not init scsi hba")); 202 ddi_soft_state_fini(&megasas_state); 203 return (ret); 204 } 205 206 ret = mod_install(&modlinkage); 207 208 if (ret != 0) { 209 con_log(CL_ANN, (CE_WARN, "megaraid: mod_install failed")); 210 scsi_hba_fini(&modlinkage); 211 ddi_soft_state_fini(&megasas_state); 212 } 213 214 return (ret); 215 } 216 217 /* 218 * _info - returns information about a loadable module. 219 * @void 220 * 221 * _info() is called to return module information. This is a typical entry 222 * point that does predefined role. It simply calls mod_info(). 223 */ 224 int 225 _info(struct modinfo *modinfop) 226 { 227 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 228 229 return (mod_info(&modlinkage, modinfop)); 230 } 231 232 /* 233 * _fini - prepare a loadable module for unloading 234 * @void 235 * 236 * In _fini(), the driver should release any resources that were allocated in 237 * _init(). The driver must remove itself from the system module list. 238 */ 239 int 240 _fini(void) 241 { 242 int ret; 243 244 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 245 246 if ((ret = mod_remove(&modlinkage)) != 0) 247 return (ret); 248 249 scsi_hba_fini(&modlinkage); 250 251 ddi_soft_state_fini(&megasas_state); 252 253 return (ret); 254 } 255 256 257 /* 258 * ************************************************************************** * 259 * * 260 * common entry points - for autoconfiguration * 261 * * 262 * ************************************************************************** * 263 */ 264 /* 265 * attach - adds a device to the system as part of initialization 266 * @dip: 267 * @cmd: 268 * 269 * The kernel calls a driver's attach() entry point to attach an instance of 270 * a device (for MegaRAID, it is instance of a controller) or to resume 271 * operation for an instance of a device that has been suspended or has been 272 * shut down by the power management framework 273 * The attach() entry point typically includes the following types of 274 * processing: 275 * - allocate a soft-state structure for the device instance (for MegaRAID, 276 * controller instance) 277 * - initialize per-instance mutexes 278 * - initialize condition variables 279 * - register the device's interrupts (for MegaRAID, controller's interrupts) 280 * - map the registers and memory of the device instance (for MegaRAID, 281 * controller instance) 282 * - create minor device nodes for the device instance (for MegaRAID, 283 * controller instance) 284 * - report that the device instance (for MegaRAID, controller instance) has 285 * attached 286 */ 287 static int 288 megasas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 289 { 290 int instance_no; 291 int nregs; 292 uint8_t added_isr_f = 0; 293 uint8_t added_soft_isr_f = 0; 294 uint8_t create_devctl_node_f = 0; 295 uint8_t create_scsi_node_f = 0; 296 uint8_t create_ioc_node_f = 0; 297 uint8_t tran_alloc_f = 0; 298 uint8_t irq; 299 uint16_t vendor_id; 300 uint16_t device_id; 301 uint16_t subsysvid; 302 uint16_t subsysid; 303 uint16_t command; 304 305 scsi_hba_tran_t *tran; 306 ddi_dma_attr_t tran_dma_attr; 307 struct megasas_instance *instance; 308 309 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 310 311 /* CONSTCOND */ 312 ASSERT(NO_COMPETING_THREADS); 313 314 instance_no = ddi_get_instance(dip); 315 316 /* 317 * Since we know that some instantiations of this device can be 318 * plugged into slave-only SBus slots, check to see whether this is 319 * one such. 320 */ 321 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 322 con_log(CL_ANN, (CE_WARN, 323 "mega%d: Device in slave-only slot, unused", instance_no)); 324 return (DDI_FAILURE); 325 } 326 327 switch (cmd) { 328 case DDI_ATTACH: 329 con_log(CL_DLEVEL1, (CE_NOTE, "megasas: DDI_ATTACH")); 330 /* allocate the soft state for the instance */ 331 if (ddi_soft_state_zalloc(megasas_state, instance_no) 332 != DDI_SUCCESS) { 333 con_log(CL_ANN, (CE_WARN, 334 "mega%d: Failed to allocate soft state", 335 instance_no)); 336 337 return (DDI_FAILURE); 338 } 339 340 instance = (struct megasas_instance *)ddi_get_soft_state 341 (megasas_state, instance_no); 342 343 if (instance == NULL) { 344 con_log(CL_ANN, (CE_WARN, 345 "mega%d: Bad soft state", instance_no)); 346 347 ddi_soft_state_free(megasas_state, instance_no); 348 349 return (DDI_FAILURE); 350 } 351 352 bzero((caddr_t)instance, 353 sizeof (struct megasas_instance)); 354 355 instance->func_ptr = kmem_zalloc( 356 sizeof (struct megasas_func_ptr), KM_SLEEP); 357 ASSERT(instance->func_ptr); 358 359 /* Setup the PCI configuration space handles */ 360 if (pci_config_setup(dip, &instance->pci_handle) != 361 DDI_SUCCESS) { 362 con_log(CL_ANN, (CE_WARN, 363 "mega%d: pci config setup failed ", 364 instance_no)); 365 366 kmem_free(instance->func_ptr, 367 sizeof (struct megasas_func_ptr)); 368 ddi_soft_state_free(megasas_state, instance_no); 369 370 return (DDI_FAILURE); 371 } 372 373 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { 374 con_log(CL_ANN, (CE_WARN, 375 "megaraid: failed to get registers.")); 376 377 pci_config_teardown(&instance->pci_handle); 378 kmem_free(instance->func_ptr, 379 sizeof (struct megasas_func_ptr)); 380 ddi_soft_state_free(megasas_state, instance_no); 381 382 return (DDI_FAILURE); 383 } 384 385 vendor_id = pci_config_get16(instance->pci_handle, 386 PCI_CONF_VENID); 387 device_id = pci_config_get16(instance->pci_handle, 388 PCI_CONF_DEVID); 389 390 subsysvid = pci_config_get16(instance->pci_handle, 391 PCI_CONF_SUBVENID); 392 subsysid = pci_config_get16(instance->pci_handle, 393 PCI_CONF_SUBSYSID); 394 395 pci_config_put16(instance->pci_handle, PCI_CONF_COMM, 396 (pci_config_get16(instance->pci_handle, 397 PCI_CONF_COMM) | PCI_COMM_ME)); 398 irq = pci_config_get8(instance->pci_handle, 399 PCI_CONF_ILINE); 400 401 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: " 402 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n", 403 instance_no, vendor_id, device_id, subsysvid, 404 subsysid, irq, MEGASAS_VERSION)); 405 406 /* enable bus-mastering */ 407 command = pci_config_get16(instance->pci_handle, 408 PCI_CONF_COMM); 409 410 if (!(command & PCI_COMM_ME)) { 411 command |= PCI_COMM_ME; 412 413 pci_config_put16(instance->pci_handle, 414 PCI_CONF_COMM, command); 415 416 con_log(CL_ANN, (CE_CONT, "megaraid%d: " 417 "enable bus-mastering\n", instance_no)); 418 } else { 419 con_log(CL_DLEVEL1, (CE_CONT, "megaraid%d: " 420 "bus-mastering already set\n", instance_no)); 421 } 422 423 /* initialize function pointers */ 424 if ((device_id == PCI_DEVICE_ID_LSI_1078) || 425 (device_id == PCI_DEVICE_ID_LSI_1078DE)) { 426 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: " 427 "1078R/DE detected\n", instance_no)); 428 instance->func_ptr->read_fw_status_reg = 429 read_fw_status_reg_ppc; 430 instance->func_ptr->issue_cmd = issue_cmd_ppc; 431 instance->func_ptr->issue_cmd_in_sync_mode = 432 issue_cmd_in_sync_mode_ppc; 433 instance->func_ptr->issue_cmd_in_poll_mode = 434 issue_cmd_in_poll_mode_ppc; 435 instance->func_ptr->enable_intr = 436 enable_intr_ppc; 437 instance->func_ptr->disable_intr = 438 disable_intr_ppc; 439 instance->func_ptr->intr_ack = intr_ack_ppc; 440 } else { 441 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: " 442 "1064/8R detected\n", instance_no)); 443 instance->func_ptr->read_fw_status_reg = 444 read_fw_status_reg_xscale; 445 instance->func_ptr->issue_cmd = 446 issue_cmd_xscale; 447 instance->func_ptr->issue_cmd_in_sync_mode = 448 issue_cmd_in_sync_mode_xscale; 449 instance->func_ptr->issue_cmd_in_poll_mode = 450 issue_cmd_in_poll_mode_xscale; 451 instance->func_ptr->enable_intr = 452 enable_intr_xscale; 453 instance->func_ptr->disable_intr = 454 disable_intr_xscale; 455 instance->func_ptr->intr_ack = 456 intr_ack_xscale; 457 } 458 459 instance->baseaddress = pci_config_get32( 460 instance->pci_handle, PCI_CONF_BASE0); 461 instance->baseaddress &= 0x0fffc; 462 463 instance->dip = dip; 464 instance->vendor_id = vendor_id; 465 instance->device_id = device_id; 466 instance->subsysvid = subsysvid; 467 instance->subsysid = subsysid; 468 469 /* Initialize FMA */ 470 instance->fm_capabilities = ddi_prop_get_int( 471 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS, 472 "fm-capable", DDI_FM_EREPORT_CAPABLE | 473 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE 474 | DDI_FM_ERRCB_CAPABLE); 475 476 megasas_fm_init(instance); 477 478 /* setup the mfi based low level driver */ 479 if (init_mfi(instance) != DDI_SUCCESS) { 480 con_log(CL_ANN, (CE_WARN, "megaraid: " 481 "could not initialize the low level driver")); 482 483 goto fail_attach; 484 } 485 486 /* 487 * Allocate the interrupt blocking cookie. 488 * It represents the information the framework 489 * needs to block interrupts. This cookie will 490 * be used by the locks shared accross our ISR. 491 * These locks must be initialized before we 492 * register our ISR. 493 * ddi_add_intr(9F) 494 */ 495 if (ddi_get_iblock_cookie(dip, 0, 496 &instance->iblock_cookie) != DDI_SUCCESS) { 497 498 goto fail_attach; 499 } 500 501 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_HIGH, 502 &instance->soft_iblock_cookie) != DDI_SUCCESS) { 503 504 goto fail_attach; 505 } 506 507 /* 508 * Initialize the driver mutexes common to 509 * normal/high level isr 510 */ 511 if (ddi_intr_hilevel(dip, 0)) { 512 instance->isr_level = HIGH_LEVEL_INTR; 513 mutex_init(&instance->cmd_pool_mtx, 514 "cmd_pool_mtx", MUTEX_DRIVER, 515 instance->soft_iblock_cookie); 516 mutex_init(&instance->cmd_pend_mtx, 517 "cmd_pend_mtx", MUTEX_DRIVER, 518 instance->soft_iblock_cookie); 519 } else { 520 /* 521 * Initialize the driver mutexes 522 * specific to soft-isr 523 */ 524 instance->isr_level = NORMAL_LEVEL_INTR; 525 mutex_init(&instance->cmd_pool_mtx, 526 "cmd_pool_mtx", MUTEX_DRIVER, 527 instance->iblock_cookie); 528 mutex_init(&instance->cmd_pend_mtx, 529 "cmd_pend_mtx", MUTEX_DRIVER, 530 instance->iblock_cookie); 531 } 532 533 mutex_init(&instance->completed_pool_mtx, 534 "completed_pool_mtx", MUTEX_DRIVER, 535 instance->iblock_cookie); 536 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx", 537 MUTEX_DRIVER, instance->iblock_cookie); 538 mutex_init(&instance->aen_cmd_mtx, "aen_cmd_mtx", 539 MUTEX_DRIVER, instance->iblock_cookie); 540 mutex_init(&instance->abort_cmd_mtx, "abort_cmd_mtx", 541 MUTEX_DRIVER, instance->iblock_cookie); 542 543 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL); 544 cv_init(&instance->abort_cmd_cv, NULL, CV_DRIVER, NULL); 545 546 INIT_LIST_HEAD(&instance->completed_pool_list); 547 548 /* Register our isr. */ 549 if (ddi_add_intr(dip, 0, NULL, NULL, megasas_isr, 550 (caddr_t)instance) != DDI_SUCCESS) { 551 con_log(CL_ANN, (CE_WARN, 552 " ISR did not register")); 553 554 goto fail_attach; 555 } 556 557 added_isr_f = 1; 558 559 /* Register our soft-isr for highlevel interrupts. */ 560 if (instance->isr_level == HIGH_LEVEL_INTR) { 561 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH, 562 &instance->soft_intr_id, NULL, NULL, 563 megasas_softintr, (caddr_t)instance) != 564 DDI_SUCCESS) { 565 con_log(CL_ANN, (CE_WARN, 566 " Software ISR did not register")); 567 568 goto fail_attach; 569 } 570 571 added_soft_isr_f = 1; 572 } 573 574 /* Allocate a transport structure */ 575 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 576 577 if (tran == NULL) { 578 con_log(CL_ANN, (CE_WARN, 579 "scsi_hba_tran_alloc failed")); 580 goto fail_attach; 581 } 582 583 tran_alloc_f = 1; 584 585 instance->tran = tran; 586 587 tran->tran_hba_private = instance; 588 tran->tran_tgt_private = NULL; 589 tran->tran_tgt_init = megasas_tran_tgt_init; 590 tran->tran_tgt_probe = scsi_hba_probe; 591 tran->tran_tgt_free = (void (*)())NULL; 592 tran->tran_init_pkt = megasas_tran_init_pkt; 593 tran->tran_start = megasas_tran_start; 594 tran->tran_abort = megasas_tran_abort; 595 tran->tran_reset = megasas_tran_reset; 596 tran->tran_bus_reset = megasas_tran_bus_reset; 597 tran->tran_getcap = megasas_tran_getcap; 598 tran->tran_setcap = megasas_tran_setcap; 599 tran->tran_destroy_pkt = megasas_tran_destroy_pkt; 600 tran->tran_dmafree = megasas_tran_dmafree; 601 tran->tran_sync_pkt = megasas_tran_sync_pkt; 602 tran->tran_reset_notify = NULL; 603 tran->tran_quiesce = megasas_tran_quiesce; 604 tran->tran_unquiesce = megasas_tran_unquiesce; 605 606 tran_dma_attr = megasas_generic_dma_attr; 607 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge; 608 609 /* Attach this instance of the hba */ 610 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0) 611 != DDI_SUCCESS) { 612 con_log(CL_ANN, (CE_WARN, 613 "scsi_hba_attach failed\n")); 614 615 goto fail_attach; 616 } 617 618 /* create devctl node for cfgadm command */ 619 if (ddi_create_minor_node(dip, "devctl", 620 S_IFCHR, INST2DEVCTL(instance_no), 621 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) { 622 con_log(CL_ANN, (CE_WARN, 623 "megaraid: failed to create devctl node.")); 624 625 goto fail_attach; 626 } 627 628 create_devctl_node_f = 1; 629 630 /* create scsi node for cfgadm command */ 631 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, 632 INST2SCSI(instance_no), 633 DDI_NT_SCSI_ATTACHMENT_POINT, 0) == 634 DDI_FAILURE) { 635 con_log(CL_ANN, (CE_WARN, 636 "megaraid: failed to create scsi node.")); 637 638 goto fail_attach; 639 } 640 641 create_scsi_node_f = 1; 642 643 (void) sprintf(instance->iocnode, "%d:lsirdctl", 644 instance_no); 645 646 /* 647 * Create a node for applications 648 * for issuing ioctl to the driver. 649 */ 650 if (ddi_create_minor_node(dip, instance->iocnode, 651 S_IFCHR, INST2LSIRDCTL(instance_no), 652 DDI_PSEUDO, 0) == DDI_FAILURE) { 653 con_log(CL_ANN, (CE_WARN, 654 "megaraid: failed to create ioctl node.")); 655 656 goto fail_attach; 657 } 658 659 create_ioc_node_f = 1; 660 661 /* enable interrupt */ 662 instance->func_ptr->enable_intr(instance); 663 664 /* initiate AEN */ 665 if (start_mfi_aen(instance)) { 666 con_log(CL_ANN, (CE_WARN, 667 "megaraid: failed to initiate AEN.")); 668 goto fail_initiate_aen; 669 } 670 671 con_log(CL_DLEVEL1, (CE_NOTE, 672 "AEN started for instance %d.", instance_no)); 673 674 /* Finally! We are on the air. */ 675 ddi_report_dev(dip); 676 677 if (megasas_check_acc_handle(instance->regmap_handle) != 678 DDI_SUCCESS) { 679 goto fail_attach; 680 } 681 if (megasas_check_acc_handle(instance->pci_handle) != 682 DDI_SUCCESS) { 683 goto fail_attach; 684 } 685 break; 686 case DDI_PM_RESUME: 687 con_log(CL_ANN, (CE_NOTE, 688 "megasas: DDI_PM_RESUME")); 689 break; 690 case DDI_RESUME: 691 con_log(CL_ANN, (CE_NOTE, 692 "megasas: DDI_RESUME")); 693 break; 694 default: 695 con_log(CL_ANN, (CE_WARN, 696 "megasas: invalid attach cmd=%x", cmd)); 697 return (DDI_FAILURE); 698 } 699 700 return (DDI_SUCCESS); 701 702 fail_initiate_aen: 703 fail_attach: 704 if (create_devctl_node_f) { 705 ddi_remove_minor_node(dip, "devctl"); 706 } 707 708 if (create_scsi_node_f) { 709 ddi_remove_minor_node(dip, "scsi"); 710 } 711 712 if (create_ioc_node_f) { 713 ddi_remove_minor_node(dip, instance->iocnode); 714 } 715 716 if (tran_alloc_f) { 717 scsi_hba_tran_free(tran); 718 } 719 720 721 if (added_soft_isr_f) { 722 ddi_remove_softintr(instance->soft_intr_id); 723 } 724 725 if (added_isr_f) { 726 ddi_remove_intr(dip, 0, instance->iblock_cookie); 727 } 728 729 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 730 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 731 732 megasas_fm_fini(instance); 733 734 pci_config_teardown(&instance->pci_handle); 735 736 ddi_soft_state_free(megasas_state, instance_no); 737 738 con_log(CL_ANN, (CE_NOTE, 739 "megasas: return failure from mega_attach\n")); 740 741 return (DDI_FAILURE); 742 } 743 744 /* 745 * getinfo - gets device information 746 * @dip: 747 * @cmd: 748 * @arg: 749 * @resultp: 750 * 751 * The system calls getinfo() to obtain configuration information that only 752 * the driver knows. The mapping of minor numbers to device instance is 753 * entirely under the control of the driver. The system sometimes needs to ask 754 * the driver which device a particular dev_t represents. 755 * Given the device number return the devinfo pointer from the scsi_device 756 * structure. 757 */ 758 /*ARGSUSED*/ 759 static int 760 megasas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 761 { 762 int rval; 763 int megasas_minor = getminor((dev_t)arg); 764 765 struct megasas_instance *instance; 766 767 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 768 769 switch (cmd) { 770 case DDI_INFO_DEVT2DEVINFO: 771 instance = (struct megasas_instance *) 772 ddi_get_soft_state(megasas_state, 773 MINOR2INST(megasas_minor)); 774 775 if (instance == NULL) { 776 *resultp = NULL; 777 rval = DDI_FAILURE; 778 } else { 779 *resultp = instance->dip; 780 rval = DDI_SUCCESS; 781 } 782 break; 783 case DDI_INFO_DEVT2INSTANCE: 784 *resultp = (void *)instance; 785 rval = DDI_SUCCESS; 786 break; 787 default: 788 *resultp = NULL; 789 rval = DDI_FAILURE; 790 } 791 792 return (rval); 793 } 794 795 /* 796 * detach - detaches a device from the system 797 * @dip: pointer to the device's dev_info structure 798 * @cmd: type of detach 799 * 800 * A driver's detach() entry point is called to detach an instance of a device 801 * that is bound to the driver. The entry point is called with the instance of 802 * the device node to be detached and with DDI_DETACH, which is specified as 803 * the cmd argument to the entry point. 804 * This routine is called during driver unload. We free all the allocated 805 * resources and call the corresponding LLD so that it can also release all 806 * its resources. 807 */ 808 static int 809 megasas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 810 { 811 int instance_no; 812 813 struct megasas_instance *instance; 814 815 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 816 817 /* CONSTCOND */ 818 ASSERT(NO_COMPETING_THREADS); 819 820 instance_no = ddi_get_instance(dip); 821 822 instance = (struct megasas_instance *)ddi_get_soft_state(megasas_state, 823 instance_no); 824 825 if (!instance) { 826 con_log(CL_ANN, (CE_WARN, 827 "megasas:%d could not get instance in detach", 828 instance_no)); 829 830 return (DDI_FAILURE); 831 } 832 833 con_log(CL_ANN, (CE_NOTE, 834 "megasas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x\n", 835 instance_no, instance->vendor_id, instance->device_id, 836 instance->subsysvid, instance->subsysid)); 837 838 switch (cmd) { 839 case DDI_DETACH: 840 con_log(CL_ANN, (CE_NOTE, 841 "megasas_detach: DDI_DETACH\n")); 842 843 if (scsi_hba_detach(dip) != DDI_SUCCESS) { 844 con_log(CL_ANN, (CE_WARN, 845 "megasas:%d failed to detach", 846 instance_no)); 847 848 return (DDI_FAILURE); 849 } 850 851 scsi_hba_tran_free(instance->tran); 852 853 if (abort_aen_cmd(instance, instance->aen_cmd)) { 854 con_log(CL_ANN, (CE_WARN, "megasas_detach: " 855 "failed to abort prevous AEN command\n")); 856 857 return (DDI_FAILURE); 858 } 859 860 instance->func_ptr->disable_intr(instance); 861 862 if (instance->isr_level == HIGH_LEVEL_INTR) { 863 ddi_remove_softintr(instance->soft_intr_id); 864 } 865 866 ddi_remove_intr(dip, 0, instance->iblock_cookie); 867 868 free_space_for_mfi(instance); 869 870 megasas_fm_fini(instance); 871 872 pci_config_teardown(&instance->pci_handle); 873 874 kmem_free(instance->func_ptr, 875 sizeof (struct megasas_func_ptr)); 876 877 ddi_soft_state_free(megasas_state, instance_no); 878 break; 879 case DDI_PM_SUSPEND: 880 con_log(CL_ANN, (CE_NOTE, 881 "megasas_detach: DDI_PM_SUSPEND\n")); 882 883 break; 884 case DDI_SUSPEND: 885 con_log(CL_ANN, (CE_NOTE, 886 "megasas_detach: DDI_SUSPEND\n")); 887 888 break; 889 default: 890 con_log(CL_ANN, (CE_WARN, 891 "invalid detach command:0x%x", cmd)); 892 return (DDI_FAILURE); 893 } 894 895 return (DDI_SUCCESS); 896 } 897 898 /* 899 * ************************************************************************** * 900 * * 901 * common entry points - for character driver types * 902 * * 903 * ************************************************************************** * 904 */ 905 /* 906 * open - gets access to a device 907 * @dev: 908 * @openflags: 909 * @otyp: 910 * @credp: 911 * 912 * Access to a device by one or more application programs is controlled 913 * through the open() and close() entry points. The primary function of 914 * open() is to verify that the open request is allowed. 915 */ 916 static int 917 megasas_open(dev_t *dev, int openflags, int otyp, cred_t *credp) 918 { 919 int rval = 0; 920 921 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 922 923 /* Check root permissions */ 924 if (drv_priv(credp) != 0) { 925 con_log(CL_ANN, (CE_WARN, 926 "megaraid: Non-root ioctl access tried!")); 927 return (EPERM); 928 } 929 930 /* Verify we are being opened as a character device */ 931 if (otyp != OTYP_CHR) { 932 con_log(CL_ANN, (CE_WARN, 933 "megaraid: ioctl node must be a char node\n")); 934 return (EINVAL); 935 } 936 937 if (ddi_get_soft_state(megasas_state, MINOR2INST(getminor(*dev))) 938 == NULL) { 939 return (ENXIO); 940 } 941 942 if (scsi_hba_open) { 943 rval = scsi_hba_open(dev, openflags, otyp, credp); 944 } 945 946 return (rval); 947 } 948 949 /* 950 * close - gives up access to a device 951 * @dev: 952 * @openflags: 953 * @otyp: 954 * @credp: 955 * 956 * close() should perform any cleanup necessary to finish using the minor 957 * device, and prepare the device (and driver) to be opened again. 958 */ 959 static int 960 megasas_close(dev_t dev, int openflags, int otyp, cred_t *credp) 961 { 962 int rval = 0; 963 964 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 965 966 /* no need for locks! */ 967 968 if (scsi_hba_close) { 969 rval = scsi_hba_close(dev, openflags, otyp, credp); 970 } 971 972 return (rval); 973 } 974 975 /* 976 * ioctl - performs a range of I/O commands for character drivers 977 * @dev: 978 * @cmd: 979 * @arg: 980 * @mode: 981 * @credp: 982 * @rvalp: 983 * 984 * ioctl() routine must make sure that user data is copied into or out of the 985 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(), 986 * and ddi_copyout(), as appropriate. 987 * This is a wrapper routine to serialize access to the actual ioctl routine. 988 * ioctl() should return 0 on success, or the appropriate error number. The 989 * driver may also set the value returned to the calling process through rvalp. 990 */ 991 static int 992 megasas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 993 int *rvalp) 994 { 995 int rval = 0; 996 997 struct megasas_instance *instance; 998 struct megasas_ioctl ioctl; 999 struct megasas_aen aen; 1000 1001 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1002 1003 instance = ddi_get_soft_state(megasas_state, MINOR2INST(getminor(dev))); 1004 1005 if (instance == NULL) { 1006 /* invalid minor number */ 1007 con_log(CL_ANN, (CE_WARN, "megaraid: adapter not found.")); 1008 return (ENXIO); 1009 } 1010 1011 switch ((uint_t)cmd) { 1012 case MEGASAS_IOCTL_FIRMWARE: 1013 if (ddi_copyin((void *) arg, &ioctl, 1014 sizeof (struct megasas_ioctl), mode)) { 1015 con_log(CL_ANN, (CE_WARN, "megasas_ioctl: " 1016 "ERROR IOCTL copyin")); 1017 return (EFAULT); 1018 } 1019 1020 if (ioctl.control_code == MR_DRIVER_IOCTL_COMMON) { 1021 rval = handle_drv_ioctl(instance, &ioctl, mode); 1022 } else { 1023 rval = handle_mfi_ioctl(instance, &ioctl, mode); 1024 } 1025 1026 if (ddi_copyout((void *) &ioctl, (void *)arg, 1027 (sizeof (struct megasas_ioctl) - 1), mode)) { 1028 con_log(CL_ANN, (CE_WARN, 1029 "megasas_ioctl: copy_to_user failed\n")); 1030 rval = 1; 1031 } 1032 1033 break; 1034 case MEGASAS_IOCTL_AEN: 1035 if (ddi_copyin((void *) arg, &aen, 1036 sizeof (struct megasas_aen), mode)) { 1037 con_log(CL_ANN, (CE_WARN, 1038 "megasas_ioctl: ERROR AEN copyin")); 1039 return (EFAULT); 1040 } 1041 1042 rval = handle_mfi_aen(instance, &aen); 1043 1044 if (ddi_copyout((void *) &aen, (void *)arg, 1045 sizeof (struct megasas_aen), mode)) { 1046 con_log(CL_ANN, (CE_WARN, 1047 "megasas_ioctl: copy_to_user failed\n")); 1048 rval = 1; 1049 } 1050 1051 break; 1052 default: 1053 rval = scsi_hba_ioctl(dev, cmd, arg, 1054 mode, credp, rvalp); 1055 1056 con_log(CL_DLEVEL1, (CE_NOTE, "megasas_ioctl: " 1057 "scsi_hba_ioctl called, ret = %x.", rval)); 1058 } 1059 1060 return (rval); 1061 } 1062 1063 /* 1064 * ************************************************************************** * 1065 * * 1066 * common entry points - for block driver types * 1067 * * 1068 * ************************************************************************** * 1069 */ 1070 /* 1071 * reset - TBD 1072 * @dip: 1073 * @cmd: 1074 * 1075 * TBD 1076 */ 1077 /*ARGSUSED*/ 1078 static int 1079 megasas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1080 { 1081 int instance_no; 1082 1083 struct megasas_instance *instance; 1084 1085 instance_no = ddi_get_instance(dip); 1086 instance = (struct megasas_instance *)ddi_get_soft_state 1087 (megasas_state, instance_no); 1088 1089 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1090 1091 if (!instance) { 1092 con_log(CL_ANN, (CE_WARN, 1093 "megaraid:%d could not get adapter in reset", 1094 instance_no)); 1095 return (DDI_FAILURE); 1096 } 1097 1098 con_log(CL_ANN, (CE_NOTE, "flushing cache for instance %d ..", 1099 instance_no)); 1100 1101 flush_cache(instance); 1102 1103 return (DDI_SUCCESS); 1104 } 1105 1106 1107 /* 1108 * ************************************************************************** * 1109 * * 1110 * entry points (SCSI HBA) * 1111 * * 1112 * ************************************************************************** * 1113 */ 1114 /* 1115 * tran_tgt_init - initialize a target device instance 1116 * @hba_dip: 1117 * @tgt_dip: 1118 * @tran: 1119 * @sd: 1120 * 1121 * The tran_tgt_init() entry point enables the HBA to allocate and initialize 1122 * any per-target resources. tran_tgt_init() also enables the HBA to qualify 1123 * the device's address as valid and supportable for that particular HBA. 1124 * By returning DDI_FAILURE, the instance of the target driver for that device 1125 * is not probed or attached. 1126 */ 1127 /*ARGSUSED*/ 1128 static int 1129 megasas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1130 scsi_hba_tran_t *tran, struct scsi_device *sd) 1131 { 1132 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1133 1134 return (DDI_SUCCESS); 1135 } 1136 1137 /* 1138 * tran_init_pkt - allocate & initialize a scsi_pkt structure 1139 * @ap: 1140 * @pkt: 1141 * @bp: 1142 * @cmdlen: 1143 * @statuslen: 1144 * @tgtlen: 1145 * @flags: 1146 * @callback: 1147 * 1148 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt 1149 * structure and DMA resources for a target driver request. The 1150 * tran_init_pkt() entry point is called when the target driver calls the 1151 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point 1152 * is a request to perform one or more of three possible services: 1153 * - allocation and initialization of a scsi_pkt structure 1154 * - allocation of DMA resources for data transfer 1155 * - reallocation of DMA resources for the next portion of the data transfer 1156 */ 1157 static struct scsi_pkt * 1158 megasas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt, 1159 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1160 int flags, int (*callback)(), caddr_t arg) 1161 { 1162 struct scsa_cmd *acmd; 1163 struct megasas_instance *instance; 1164 struct scsi_pkt *new_pkt; 1165 1166 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1167 1168 instance = ADDR2MEGA(ap); 1169 1170 /* step #1 : pkt allocation */ 1171 if (pkt == NULL) { 1172 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen, 1173 tgtlen, sizeof (struct scsa_cmd), callback, arg); 1174 if (pkt == NULL) { 1175 return (NULL); 1176 } 1177 1178 acmd = PKT2CMD(pkt); 1179 1180 /* 1181 * Initialize the new pkt - we redundantly initialize 1182 * all the fields for illustrative purposes. 1183 */ 1184 acmd->cmd_pkt = pkt; 1185 acmd->cmd_flags = 0; 1186 acmd->cmd_scblen = statuslen; 1187 acmd->cmd_cdblen = cmdlen; 1188 acmd->cmd_dmahandle = NULL; 1189 acmd->cmd_ncookies = 0; 1190 acmd->cmd_cookie = 0; 1191 acmd->cmd_cookiecnt = 0; 1192 acmd->cmd_nwin = 0; 1193 1194 pkt->pkt_address = *ap; 1195 pkt->pkt_comp = (void (*)())NULL; 1196 pkt->pkt_flags = 0; 1197 pkt->pkt_time = 0; 1198 pkt->pkt_resid = 0; 1199 pkt->pkt_state = 0; 1200 pkt->pkt_statistics = 0; 1201 pkt->pkt_reason = 0; 1202 new_pkt = pkt; 1203 } else { 1204 acmd = PKT2CMD(pkt); 1205 new_pkt = NULL; 1206 } 1207 1208 /* step #2 : dma allocation/move */ 1209 if (bp && bp->b_bcount != 0) { 1210 if (acmd->cmd_dmahandle == NULL) { 1211 if (megasas_dma_alloc(instance, pkt, bp, flags, 1212 callback) == -1) { 1213 if (new_pkt) { 1214 scsi_hba_pkt_free(ap, new_pkt); 1215 } 1216 1217 return ((struct scsi_pkt *)NULL); 1218 } 1219 } else { 1220 if (megasas_dma_move(instance, pkt, bp) == -1) { 1221 return ((struct scsi_pkt *)NULL); 1222 } 1223 } 1224 } 1225 1226 return (pkt); 1227 } 1228 1229 /* 1230 * tran_start - transport a SCSI command to the addressed target 1231 * @ap: 1232 * @pkt: 1233 * 1234 * The tran_start() entry point for a SCSI HBA driver is called to transport a 1235 * SCSI command to the addressed target. The SCSI command is described 1236 * entirely within the scsi_pkt structure, which the target driver allocated 1237 * through the HBA driver's tran_init_pkt() entry point. If the command 1238 * involves a data transfer, DMA resources must also have been allocated for 1239 * the scsi_pkt structure. 1240 * 1241 * Return Values : 1242 * TRAN_BUSY - request queue is full, no more free scbs 1243 * TRAN_ACCEPT - pkt has been submitted to the instance 1244 */ 1245 static int 1246 megasas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt) 1247 { 1248 uchar_t cmd_done = 0; 1249 1250 struct megasas_instance *instance = ADDR2MEGA(ap); 1251 struct megasas_cmd *cmd; 1252 1253 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x", 1254 __func__, __LINE__, pkt->pkt_cdbp[0])); 1255 1256 pkt->pkt_reason = CMD_CMPLT; 1257 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 1258 1259 cmd = build_cmd(instance, ap, pkt, &cmd_done); 1260 1261 /* 1262 * Check if the command is already completed by the mega_build_cmd() 1263 * routine. In which case the busy_flag would be clear and scb will be 1264 * NULL and appropriate reason provided in pkt_reason field 1265 */ 1266 if (cmd_done) { 1267 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { 1268 scsi_hba_pkt_comp(pkt); 1269 } 1270 pkt->pkt_reason = CMD_CMPLT; 1271 pkt->pkt_scbp[0] = STATUS_GOOD; 1272 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET 1273 | STATE_SENT_CMD; 1274 return (TRAN_ACCEPT); 1275 } 1276 1277 if (cmd == NULL) { 1278 return (TRAN_BUSY); 1279 } 1280 1281 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { 1282 if (instance->fw_outstanding > instance->max_fw_cmds) { 1283 con_log(CL_ANN, (CE_CONT, "megasas:Firmware busy")); 1284 return_mfi_pkt(instance, cmd); 1285 return (TRAN_BUSY); 1286 } 1287 1288 /* Syncronize the Cmd frame for the controller */ 1289 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, 1290 DDI_DMA_SYNC_FORDEV); 1291 1292 instance->func_ptr->issue_cmd(cmd, instance); 1293 1294 } else { 1295 struct megasas_header *hdr = &cmd->frame->hdr; 1296 1297 cmd->sync_cmd = MEGASAS_TRUE; 1298 1299 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd); 1300 1301 pkt->pkt_reason = CMD_CMPLT; 1302 pkt->pkt_statistics = 0; 1303 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS; 1304 1305 switch (hdr->cmd_status) { 1306 case MFI_STAT_OK: 1307 pkt->pkt_scbp[0] = STATUS_GOOD; 1308 break; 1309 1310 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1311 1312 pkt->pkt_reason = CMD_CMPLT; 1313 pkt->pkt_statistics = 0; 1314 1315 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; 1316 break; 1317 1318 case MFI_STAT_DEVICE_NOT_FOUND: 1319 pkt->pkt_reason = CMD_DEV_GONE; 1320 pkt->pkt_statistics = STAT_DISCON; 1321 break; 1322 1323 default: 1324 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1; 1325 } 1326 1327 return_mfi_pkt(instance, cmd); 1328 (void) megasas_common_check(instance, cmd); 1329 1330 scsi_hba_pkt_comp(pkt); 1331 1332 } 1333 1334 return (TRAN_ACCEPT); 1335 } 1336 1337 /* 1338 * tran_abort - Abort any commands that are currently in transport 1339 * @ap: 1340 * @pkt: 1341 * 1342 * The tran_abort() entry point for a SCSI HBA driver is called to abort any 1343 * commands that are currently in transport for a particular target. This entry 1344 * point is called when a target driver calls scsi_abort(). The tran_abort() 1345 * entry point should attempt to abort the command denoted by the pkt 1346 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to 1347 * abort all outstanding commands in the transport layer for the particular 1348 * target or logical unit. 1349 */ 1350 /*ARGSUSED*/ 1351 static int 1352 megasas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1353 { 1354 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1355 1356 /* aborting command not supported by H/W */ 1357 1358 return (DDI_FAILURE); 1359 } 1360 1361 /* 1362 * tran_reset - reset either the SCSI bus or target 1363 * @ap: 1364 * @level: 1365 * 1366 * The tran_reset() entry point for a SCSI HBA driver is called to reset either 1367 * the SCSI bus or a particular SCSI target device. This entry point is called 1368 * when a target driver calls scsi_reset(). The tran_reset() entry point must 1369 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the 1370 * particular target or logical unit must be reset. 1371 */ 1372 /*ARGSUSED*/ 1373 static int 1374 megasas_tran_reset(struct scsi_address *ap, int level) 1375 { 1376 struct megasas_instance *instance = ADDR2MEGA(ap); 1377 1378 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1379 1380 if (wait_for_outstanding(instance)) { 1381 return (DDI_FAILURE); 1382 } else { 1383 return (DDI_SUCCESS); 1384 } 1385 } 1386 1387 /* 1388 * tran_bus_reset - reset the SCSI bus 1389 * @dip: 1390 * @level: 1391 * 1392 * The tran_bus_reset() vector in the scsi_hba_tran structure should be 1393 * initialized during the HBA driver's attach(). The vector should point to 1394 * an HBA entry point that is to be called when a user initiates a bus reset. 1395 * Implementation is hardware specific. If the HBA driver cannot reset the 1396 * SCSI bus without affecting the targets, the driver should fail RESET_BUS 1397 * or not initialize this vector. 1398 */ 1399 /*ARGSUSED*/ 1400 static int 1401 megasas_tran_bus_reset(dev_info_t *dip, int level) 1402 { 1403 int instance_no = ddi_get_instance(dip); 1404 1405 struct megasas_instance *instance = ddi_get_soft_state(megasas_state, 1406 instance_no); 1407 1408 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1409 1410 if (wait_for_outstanding(instance)) { 1411 return (DDI_FAILURE); 1412 } else { 1413 return (DDI_SUCCESS); 1414 } 1415 } 1416 1417 /* 1418 * tran_getcap - get one of a set of SCSA-defined capabilities 1419 * @ap: 1420 * @cap: 1421 * @whom: 1422 * 1423 * The target driver can request the current setting of the capability for a 1424 * particular target by setting the whom parameter to nonzero. A whom value of 1425 * zero indicates a request for the current setting of the general capability 1426 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1 1427 * for undefined capabilities or the current value of the requested capability. 1428 */ 1429 /*ARGSUSED*/ 1430 static int 1431 megasas_tran_getcap(struct scsi_address *ap, char *cap, int whom) 1432 { 1433 int rval = 0; 1434 1435 struct megasas_instance *instance = ADDR2MEGA(ap); 1436 1437 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1438 1439 /* we do allow inquiring about capabilities for other targets */ 1440 if (cap == NULL) { 1441 return (-1); 1442 } 1443 1444 switch (scsi_hba_lookup_capstr(cap)) { 1445 case SCSI_CAP_DMA_MAX: 1446 /* Limit to 16MB max transfer */ 1447 rval = megasas_max_cap_maxxfer; 1448 break; 1449 case SCSI_CAP_MSG_OUT: 1450 rval = 1; 1451 break; 1452 case SCSI_CAP_DISCONNECT: 1453 rval = 0; 1454 break; 1455 case SCSI_CAP_SYNCHRONOUS: 1456 rval = 0; 1457 break; 1458 case SCSI_CAP_WIDE_XFER: 1459 rval = 1; 1460 break; 1461 case SCSI_CAP_TAGGED_QING: 1462 rval = 1; 1463 break; 1464 case SCSI_CAP_UNTAGGED_QING: 1465 rval = 1; 1466 break; 1467 case SCSI_CAP_PARITY: 1468 rval = 1; 1469 break; 1470 case SCSI_CAP_INITIATOR_ID: 1471 rval = instance->init_id; 1472 break; 1473 case SCSI_CAP_ARQ: 1474 rval = 1; 1475 break; 1476 case SCSI_CAP_LINKED_CMDS: 1477 rval = 0; 1478 break; 1479 case SCSI_CAP_RESET_NOTIFICATION: 1480 rval = 1; 1481 break; 1482 case SCSI_CAP_GEOMETRY: 1483 rval = -1; 1484 1485 break; 1486 default: 1487 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x", 1488 scsi_hba_lookup_capstr(cap))); 1489 rval = -1; 1490 break; 1491 } 1492 1493 return (rval); 1494 } 1495 1496 /* 1497 * tran_setcap - set one of a set of SCSA-defined capabilities 1498 * @ap: 1499 * @cap: 1500 * @value: 1501 * @whom: 1502 * 1503 * The target driver might request that the new value be set for a particular 1504 * target by setting the whom parameter to nonzero. A whom value of zero 1505 * means that request is to set the new value for the SCSI bus or for adapter 1506 * hardware in general. 1507 * The tran_setcap() should return the following values as appropriate: 1508 * - -1 for undefined capabilities 1509 * - 0 if the HBA driver cannot set the capability to the requested value 1510 * - 1 if the HBA driver is able to set the capability to the requested value 1511 */ 1512 /*ARGSUSED*/ 1513 static int 1514 megasas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1515 { 1516 int rval = 1; 1517 1518 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1519 1520 /* We don't allow setting capabilities for other targets */ 1521 if (cap == NULL || whom == 0) { 1522 return (-1); 1523 } 1524 1525 switch (scsi_hba_lookup_capstr(cap)) { 1526 case SCSI_CAP_DMA_MAX: 1527 case SCSI_CAP_MSG_OUT: 1528 case SCSI_CAP_PARITY: 1529 case SCSI_CAP_LINKED_CMDS: 1530 case SCSI_CAP_RESET_NOTIFICATION: 1531 case SCSI_CAP_DISCONNECT: 1532 case SCSI_CAP_SYNCHRONOUS: 1533 case SCSI_CAP_UNTAGGED_QING: 1534 case SCSI_CAP_WIDE_XFER: 1535 case SCSI_CAP_INITIATOR_ID: 1536 case SCSI_CAP_ARQ: 1537 /* 1538 * None of these are settable via 1539 * the capability interface. 1540 */ 1541 break; 1542 case SCSI_CAP_TAGGED_QING: 1543 rval = 1; 1544 break; 1545 case SCSI_CAP_SECTOR_SIZE: 1546 rval = 1; 1547 break; 1548 1549 case SCSI_CAP_TOTAL_SECTORS: 1550 rval = 1; 1551 break; 1552 default: 1553 rval = -1; 1554 break; 1555 } 1556 1557 return (rval); 1558 } 1559 1560 /* 1561 * tran_destroy_pkt - deallocate scsi_pkt structure 1562 * @ap: 1563 * @pkt: 1564 * 1565 * The tran_destroy_pkt() entry point is the HBA driver function that 1566 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is 1567 * called when the target driver calls scsi_destroy_pkt(). The 1568 * tran_destroy_pkt() entry point must free any DMA resources that have been 1569 * allocated for the packet. An implicit DMA synchronization occurs if the 1570 * DMA resources are freed and any cached data remains after the completion 1571 * of the transfer. 1572 */ 1573 static void 1574 megasas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1575 { 1576 struct scsa_cmd *acmd = PKT2CMD(pkt); 1577 1578 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1579 1580 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1581 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1582 1583 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1584 1585 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1586 1587 acmd->cmd_dmahandle = NULL; 1588 } 1589 1590 /* free the pkt */ 1591 scsi_hba_pkt_free(ap, pkt); 1592 } 1593 1594 /* 1595 * tran_dmafree - deallocates DMA resources 1596 * @ap: 1597 * @pkt: 1598 * 1599 * The tran_dmafree() entry point deallocates DMAQ resources that have been 1600 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is 1601 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must 1602 * free only DMA resources allocated for a scsi_pkt structure, not the 1603 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is 1604 * implicitly performed. 1605 */ 1606 /*ARGSUSED*/ 1607 static void 1608 megasas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1609 { 1610 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1611 1612 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1613 1614 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1615 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1616 1617 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1618 1619 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1620 1621 acmd->cmd_dmahandle = NULL; 1622 } 1623 } 1624 1625 /* 1626 * tran_sync_pkt - synchronize the DMA object allocated 1627 * @ap: 1628 * @pkt: 1629 * 1630 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for 1631 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt() 1632 * entry point is called when the target driver calls scsi_sync_pkt(). If the 1633 * data transfer direction is a DMA read from device to memory, tran_sync_pkt() 1634 * must synchronize the CPU's view of the data. If the data transfer direction 1635 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the 1636 * device's view of the data. 1637 */ 1638 /*ARGSUSED*/ 1639 static void 1640 megasas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1641 { 1642 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1643 1644 /* 1645 * following 'ddi_dma_sync()' API call 1646 * already called for each I/O in the ISR 1647 */ 1648 #if 0 1649 int i; 1650 1651 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1652 1653 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1654 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset, 1655 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ? 1656 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU); 1657 } 1658 #endif 1659 } 1660 1661 /*ARGSUSED*/ 1662 static int 1663 megasas_tran_quiesce(dev_info_t *dip) 1664 { 1665 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1666 1667 return (1); 1668 } 1669 1670 /*ARGSUSED*/ 1671 static int 1672 megasas_tran_unquiesce(dev_info_t *dip) 1673 { 1674 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1675 1676 return (1); 1677 } 1678 1679 /* 1680 * megasas_isr(caddr_t) 1681 * 1682 * The Interrupt Service Routine 1683 * 1684 * Collect status for all completed commands and do callback 1685 * 1686 */ 1687 static uint_t 1688 megasas_isr(struct megasas_instance *instance) 1689 { 1690 int need_softintr; 1691 uint32_t producer; 1692 uint32_t consumer; 1693 uint32_t context; 1694 1695 struct megasas_cmd *cmd; 1696 1697 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1698 1699 ASSERT(instance); 1700 if (!instance->func_ptr->intr_ack(instance)) { 1701 return (DDI_INTR_UNCLAIMED); 1702 } 1703 1704 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1705 0, 0, DDI_DMA_SYNC_FORCPU); 1706 1707 if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 1708 != DDI_SUCCESS) { 1709 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 1710 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 1711 return (DDI_INTR_UNCLAIMED); 1712 } 1713 1714 producer = *instance->producer; 1715 consumer = *instance->consumer; 1716 1717 con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ", 1718 producer, consumer)); 1719 1720 mutex_enter(&instance->completed_pool_mtx); 1721 1722 while (consumer != producer) { 1723 context = instance->reply_queue[consumer]; 1724 cmd = instance->cmd_list[context]; 1725 mlist_add_tail(&cmd->list, &instance->completed_pool_list); 1726 1727 consumer++; 1728 if (consumer == (instance->max_fw_cmds + 1)) { 1729 consumer = 0; 1730 } 1731 } 1732 1733 mutex_exit(&instance->completed_pool_mtx); 1734 1735 *instance->consumer = consumer; 1736 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1737 0, 0, DDI_DMA_SYNC_FORDEV); 1738 1739 if (instance->softint_running) { 1740 need_softintr = 0; 1741 } else { 1742 need_softintr = 1; 1743 } 1744 1745 if (instance->isr_level == HIGH_LEVEL_INTR) { 1746 if (need_softintr) { 1747 ddi_trigger_softintr(instance->soft_intr_id); 1748 } 1749 } else { 1750 /* 1751 * Not a high-level interrupt, therefore call the soft level 1752 * interrupt explicitly 1753 */ 1754 (void) megasas_softintr(instance); 1755 } 1756 1757 return (DDI_INTR_CLAIMED); 1758 } 1759 1760 1761 /* 1762 * ************************************************************************** * 1763 * * 1764 * libraries * 1765 * * 1766 * ************************************************************************** * 1767 */ 1768 /* 1769 * get_mfi_pkt : Get a command from the free pool 1770 */ 1771 static struct megasas_cmd * 1772 get_mfi_pkt(struct megasas_instance *instance) 1773 { 1774 mlist_t *head = &instance->cmd_pool_list; 1775 struct megasas_cmd *cmd = NULL; 1776 1777 mutex_enter(&instance->cmd_pool_mtx); 1778 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1779 1780 if (!mlist_empty(head)) { 1781 cmd = mlist_entry(head->next, struct megasas_cmd, list); 1782 mlist_del_init(head->next); 1783 } 1784 if (cmd != NULL) 1785 cmd->pkt = NULL; 1786 mutex_exit(&instance->cmd_pool_mtx); 1787 1788 return (cmd); 1789 } 1790 1791 /* 1792 * return_mfi_pkt : Return a cmd to free command pool 1793 */ 1794 static void 1795 return_mfi_pkt(struct megasas_instance *instance, struct megasas_cmd *cmd) 1796 { 1797 mutex_enter(&instance->cmd_pool_mtx); 1798 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1799 1800 mlist_add(&cmd->list, &instance->cmd_pool_list); 1801 1802 mutex_exit(&instance->cmd_pool_mtx); 1803 } 1804 1805 /* 1806 * destroy_mfi_frame_pool 1807 */ 1808 static void 1809 destroy_mfi_frame_pool(struct megasas_instance *instance) 1810 { 1811 int i; 1812 uint32_t max_cmd = instance->max_fw_cmds; 1813 1814 struct megasas_cmd *cmd; 1815 1816 /* return all frames to pool */ 1817 for (i = 0; i < max_cmd; i++) { 1818 1819 cmd = instance->cmd_list[i]; 1820 1821 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) 1822 (void) mega_free_dma_obj(instance, cmd->frame_dma_obj); 1823 1824 cmd->frame_dma_obj_status = DMA_OBJ_FREED; 1825 } 1826 1827 } 1828 1829 /* 1830 * create_mfi_frame_pool 1831 */ 1832 static int 1833 create_mfi_frame_pool(struct megasas_instance *instance) 1834 { 1835 int i = 0; 1836 int cookie_cnt; 1837 uint16_t max_cmd; 1838 uint16_t sge_sz; 1839 uint32_t sgl_sz; 1840 uint32_t tot_frame_size; 1841 1842 struct megasas_cmd *cmd; 1843 1844 max_cmd = instance->max_fw_cmds; 1845 1846 sge_sz = sizeof (struct megasas_sge64); 1847 1848 /* calculated the number of 64byte frames required for SGL */ 1849 sgl_sz = sge_sz * instance->max_num_sge; 1850 tot_frame_size = sgl_sz + MEGAMFI_FRAME_SIZE + SENSE_LENGTH; 1851 1852 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: " 1853 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size)); 1854 1855 while (i < max_cmd) { 1856 cmd = instance->cmd_list[i]; 1857 1858 cmd->frame_dma_obj.size = tot_frame_size; 1859 cmd->frame_dma_obj.dma_attr = megasas_generic_dma_attr; 1860 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1861 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 1862 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1; 1863 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64; 1864 1865 1866 cookie_cnt = mega_alloc_dma_obj(instance, &cmd->frame_dma_obj); 1867 1868 if (cookie_cnt == -1 || cookie_cnt > 1) { 1869 con_log(CL_ANN, (CE_WARN, 1870 "create_mfi_frame_pool: could not alloc.")); 1871 return (DDI_FAILURE); 1872 } 1873 1874 bzero(cmd->frame_dma_obj.buffer, tot_frame_size); 1875 1876 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED; 1877 cmd->frame = (union megasas_frame *)cmd->frame_dma_obj.buffer; 1878 cmd->frame_phys_addr = 1879 cmd->frame_dma_obj.dma_cookie[0].dmac_address; 1880 1881 cmd->sense = (uint8_t *)(((unsigned long) 1882 cmd->frame_dma_obj.buffer) + 1883 tot_frame_size - SENSE_LENGTH); 1884 cmd->sense_phys_addr = 1885 cmd->frame_dma_obj.dma_cookie[0].dmac_address + 1886 tot_frame_size - SENSE_LENGTH; 1887 1888 if (!cmd->frame || !cmd->sense) { 1889 con_log(CL_ANN, (CE_NOTE, 1890 "megasas: pci_pool_alloc failed \n")); 1891 1892 return (-ENOMEM); 1893 } 1894 1895 cmd->frame->io.context = cmd->index; 1896 i++; 1897 1898 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x", 1899 cmd->frame->io.context, cmd->frame_phys_addr)); 1900 } 1901 1902 return (DDI_SUCCESS); 1903 } 1904 1905 /* 1906 * free_additional_dma_buffer 1907 */ 1908 static void 1909 free_additional_dma_buffer(struct megasas_instance *instance) 1910 { 1911 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { 1912 (void) mega_free_dma_obj(instance, 1913 instance->mfi_internal_dma_obj); 1914 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; 1915 } 1916 1917 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) { 1918 (void) mega_free_dma_obj(instance, 1919 instance->mfi_evt_detail_obj); 1920 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; 1921 } 1922 } 1923 1924 /* 1925 * alloc_additional_dma_buffer 1926 */ 1927 static int 1928 alloc_additional_dma_buffer(struct megasas_instance *instance) 1929 { 1930 uint32_t reply_q_sz; 1931 uint32_t internal_buf_size = PAGESIZE*2; 1932 1933 /* max cmds plus 1 + producer & consumer */ 1934 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2); 1935 1936 instance->mfi_internal_dma_obj.size = internal_buf_size; 1937 instance->mfi_internal_dma_obj.dma_attr = megasas_generic_dma_attr; 1938 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1939 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 1940 0xFFFFFFFFU; 1941 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1; 1942 1943 if (mega_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj) 1944 != 1) { 1945 con_log(CL_ANN, (CE_WARN, "megaraid: could not alloc reply Q")); 1946 return (DDI_FAILURE); 1947 } 1948 1949 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size); 1950 1951 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED; 1952 1953 instance->producer = (uint32_t *)((unsigned long) 1954 instance->mfi_internal_dma_obj.buffer); 1955 instance->consumer = (uint32_t *)((unsigned long) 1956 instance->mfi_internal_dma_obj.buffer + 4); 1957 instance->reply_queue = (uint32_t *)((unsigned long) 1958 instance->mfi_internal_dma_obj.buffer + 8); 1959 instance->internal_buf = (caddr_t)(((unsigned long) 1960 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8); 1961 instance->internal_buf_dmac_add = 1962 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 1963 reply_q_sz; 1964 instance->internal_buf_size = internal_buf_size - 1965 (reply_q_sz + 8); 1966 1967 /* allocate evt_detail */ 1968 instance->mfi_evt_detail_obj.size = sizeof (struct megasas_evt_detail); 1969 instance->mfi_evt_detail_obj.dma_attr = megasas_generic_dma_attr; 1970 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 1971 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 1972 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1; 1973 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1; 1974 1975 if (mega_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj) != 1) { 1976 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: " 1977 "could not data transfer buffer alloc.")); 1978 return (DDI_FAILURE); 1979 } 1980 1981 bzero(instance->mfi_evt_detail_obj.buffer, 1982 sizeof (struct megasas_evt_detail)); 1983 1984 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED; 1985 1986 return (DDI_SUCCESS); 1987 } 1988 1989 /* 1990 * free_space_for_mfi 1991 */ 1992 static void 1993 free_space_for_mfi(struct megasas_instance *instance) 1994 { 1995 int i; 1996 uint32_t max_cmd = instance->max_fw_cmds; 1997 1998 /* already freed */ 1999 if (instance->cmd_list == NULL) { 2000 return; 2001 } 2002 2003 free_additional_dma_buffer(instance); 2004 2005 /* first free the MFI frame pool */ 2006 destroy_mfi_frame_pool(instance); 2007 2008 /* free all the commands in the cmd_list */ 2009 for (i = 0; i < instance->max_fw_cmds; i++) { 2010 kmem_free(instance->cmd_list[i], 2011 sizeof (struct megasas_cmd)); 2012 2013 instance->cmd_list[i] = NULL; 2014 } 2015 2016 /* free the cmd_list buffer itself */ 2017 kmem_free(instance->cmd_list, 2018 sizeof (struct megasas_cmd *) * max_cmd); 2019 2020 instance->cmd_list = NULL; 2021 2022 INIT_LIST_HEAD(&instance->cmd_pool_list); 2023 } 2024 2025 /* 2026 * alloc_space_for_mfi 2027 */ 2028 static int 2029 alloc_space_for_mfi(struct megasas_instance *instance) 2030 { 2031 int i; 2032 uint32_t max_cmd; 2033 size_t sz; 2034 2035 struct megasas_cmd *cmd; 2036 2037 max_cmd = instance->max_fw_cmds; 2038 sz = sizeof (struct megasas_cmd *) * max_cmd; 2039 2040 /* 2041 * instance->cmd_list is an array of struct megasas_cmd pointers. 2042 * Allocate the dynamic array first and then allocate individual 2043 * commands. 2044 */ 2045 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); 2046 ASSERT(instance->cmd_list); 2047 2048 for (i = 0; i < max_cmd; i++) { 2049 instance->cmd_list[i] = kmem_zalloc(sizeof (struct megasas_cmd), 2050 KM_SLEEP); 2051 ASSERT(instance->cmd_list[i]); 2052 } 2053 2054 INIT_LIST_HEAD(&instance->cmd_pool_list); 2055 2056 /* add all the commands to command pool (instance->cmd_pool) */ 2057 for (i = 0; i < max_cmd; i++) { 2058 cmd = instance->cmd_list[i]; 2059 cmd->index = i; 2060 2061 mlist_add_tail(&cmd->list, &instance->cmd_pool_list); 2062 } 2063 2064 /* create a frame pool and assign one frame to each cmd */ 2065 if (create_mfi_frame_pool(instance)) { 2066 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n")); 2067 return (DDI_FAILURE); 2068 } 2069 2070 /* create a frame pool and assign one frame to each cmd */ 2071 if (alloc_additional_dma_buffer(instance)) { 2072 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n")); 2073 return (DDI_FAILURE); 2074 } 2075 2076 return (DDI_SUCCESS); 2077 } 2078 2079 /* 2080 * get_ctrl_info 2081 */ 2082 static int 2083 get_ctrl_info(struct megasas_instance *instance, 2084 struct megasas_ctrl_info *ctrl_info) 2085 { 2086 int ret = 0; 2087 2088 struct megasas_cmd *cmd; 2089 struct megasas_dcmd_frame *dcmd; 2090 struct megasas_ctrl_info *ci; 2091 2092 cmd = get_mfi_pkt(instance); 2093 2094 if (!cmd) { 2095 con_log(CL_ANN, (CE_WARN, 2096 "Failed to get a cmd for ctrl info\n")); 2097 return (DDI_FAILURE); 2098 } 2099 2100 dcmd = &cmd->frame->dcmd; 2101 2102 ci = (struct megasas_ctrl_info *)instance->internal_buf; 2103 2104 if (!ci) { 2105 con_log(CL_ANN, (CE_WARN, 2106 "Failed to alloc mem for ctrl info\n")); 2107 return_mfi_pkt(instance, cmd); 2108 return (DDI_FAILURE); 2109 } 2110 2111 (void) memset(ci, 0, sizeof (struct megasas_ctrl_info)); 2112 2113 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */ 2114 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2115 2116 dcmd->cmd = MFI_CMD_OP_DCMD; 2117 dcmd->cmd_status = MFI_CMD_STATUS_POLL_MODE; 2118 dcmd->sge_count = 1; 2119 dcmd->flags = MFI_FRAME_DIR_READ; 2120 dcmd->timeout = 0; 2121 dcmd->data_xfer_len = sizeof (struct megasas_ctrl_info); 2122 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 2123 dcmd->sgl.sge32[0].phys_addr = instance->internal_buf_dmac_add; 2124 dcmd->sgl.sge32[0].length = sizeof (struct megasas_ctrl_info); 2125 2126 cmd->frame_count = 1; 2127 2128 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2129 ret = 0; 2130 (void) memcpy(ctrl_info, ci, sizeof (struct megasas_ctrl_info)); 2131 } else { 2132 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed\n")); 2133 ret = -1; 2134 } 2135 2136 return_mfi_pkt(instance, cmd); 2137 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) { 2138 ret = -1; 2139 } 2140 2141 return (ret); 2142 } 2143 2144 /* 2145 * abort_aen_cmd 2146 */ 2147 static int 2148 abort_aen_cmd(struct megasas_instance *instance, 2149 struct megasas_cmd *cmd_to_abort) 2150 { 2151 int ret = 0; 2152 2153 struct megasas_cmd *cmd; 2154 struct megasas_abort_frame *abort_fr; 2155 2156 cmd = get_mfi_pkt(instance); 2157 2158 if (!cmd) { 2159 con_log(CL_ANN, (CE_WARN, 2160 "Failed to get a cmd for ctrl info\n")); 2161 return (DDI_FAILURE); 2162 } 2163 2164 abort_fr = &cmd->frame->abort; 2165 2166 /* prepare and issue the abort frame */ 2167 abort_fr->cmd = MFI_CMD_OP_ABORT; 2168 abort_fr->cmd_status = MFI_CMD_STATUS_SYNC_MODE; 2169 abort_fr->flags = 0; 2170 abort_fr->abort_context = cmd_to_abort->index; 2171 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 2172 abort_fr->abort_mfi_phys_addr_hi = 0; 2173 2174 instance->aen_cmd->abort_aen = 1; 2175 2176 cmd->sync_cmd = MEGASAS_TRUE; 2177 cmd->frame_count = 1; 2178 2179 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2180 con_log(CL_ANN, (CE_WARN, 2181 "abort_aen_cmd: issue_cmd_in_sync_mode failed\n")); 2182 ret = -1; 2183 } else { 2184 ret = 0; 2185 } 2186 2187 instance->aen_cmd->abort_aen = 1; 2188 instance->aen_cmd = 0; 2189 2190 return_mfi_pkt(instance, cmd); 2191 (void) megasas_common_check(instance, cmd); 2192 2193 return (ret); 2194 } 2195 2196 /* 2197 * init_mfi 2198 */ 2199 static int 2200 init_mfi(struct megasas_instance *instance) 2201 { 2202 off_t reglength; 2203 struct megasas_cmd *cmd; 2204 struct megasas_ctrl_info ctrl_info; 2205 struct megasas_init_frame *init_frame; 2206 struct megasas_init_queue_info *initq_info; 2207 2208 if ((ddi_dev_regsize(instance->dip, REGISTER_SET_IO, ®length) 2209 != DDI_SUCCESS) || reglength < MINIMUM_MFI_MEM_SZ) { 2210 return (DDI_FAILURE); 2211 } 2212 2213 if (reglength > DEFAULT_MFI_MEM_SZ) { 2214 reglength = DEFAULT_MFI_MEM_SZ; 2215 con_log(CL_DLEVEL1, (CE_NOTE, 2216 "mega: register length to map is 0x%lx bytes", reglength)); 2217 } 2218 2219 if (ddi_regs_map_setup(instance->dip, REGISTER_SET_IO, 2220 &instance->regmap, 0, reglength, &endian_attr, 2221 &instance->regmap_handle) != DDI_SUCCESS) { 2222 con_log(CL_ANN, (CE_NOTE, 2223 "megaraid: couldn't map control registers")); 2224 2225 goto fail_mfi_reg_setup; 2226 } 2227 2228 /* we expect the FW state to be READY */ 2229 if (mfi_state_transition_to_ready(instance)) { 2230 con_log(CL_ANN, (CE_WARN, "megaraid: F/W is not ready")); 2231 goto fail_ready_state; 2232 } 2233 2234 /* get various operational parameters from status register */ 2235 instance->max_num_sge = 2236 (instance->func_ptr->read_fw_status_reg(instance) & 2237 0xFF0000) >> 0x10; 2238 /* 2239 * Reduce the max supported cmds by 1. This is to ensure that the 2240 * reply_q_sz (1 more than the max cmd that driver may send) 2241 * does not exceed max cmds that the FW can support 2242 */ 2243 instance->max_fw_cmds = 2244 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF; 2245 instance->max_fw_cmds = instance->max_fw_cmds - 1; 2246 2247 instance->max_num_sge = 2248 (instance->max_num_sge > MEGASAS_MAX_SGE_CNT) ? 2249 MEGASAS_MAX_SGE_CNT : instance->max_num_sge; 2250 2251 /* create a pool of commands */ 2252 if (alloc_space_for_mfi(instance)) 2253 goto fail_alloc_fw_space; 2254 2255 /* disable interrupt for initial preparation */ 2256 instance->func_ptr->disable_intr(instance); 2257 2258 /* 2259 * Prepare a init frame. Note the init frame points to queue info 2260 * structure. Each frame has SGL allocated after first 64 bytes. For 2261 * this frame - since we don't need any SGL - we use SGL's space as 2262 * queue info structure 2263 */ 2264 cmd = get_mfi_pkt(instance); 2265 2266 init_frame = (struct megasas_init_frame *)cmd->frame; 2267 initq_info = (struct megasas_init_queue_info *) 2268 ((unsigned long)init_frame + 64); 2269 2270 (void) memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 2271 (void) memset(initq_info, 0, sizeof (struct megasas_init_queue_info)); 2272 2273 initq_info->init_flags = 0; 2274 2275 initq_info->reply_queue_entries = instance->max_fw_cmds + 1; 2276 2277 initq_info->producer_index_phys_addr_hi = 0; 2278 initq_info->producer_index_phys_addr_lo = 2279 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address; 2280 2281 initq_info->consumer_index_phys_addr_hi = 0; 2282 initq_info->consumer_index_phys_addr_lo = 2283 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4; 2284 2285 initq_info->reply_queue_start_phys_addr_hi = 0; 2286 initq_info->reply_queue_start_phys_addr_lo = 2287 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8; 2288 2289 init_frame->cmd = MFI_CMD_OP_INIT; 2290 init_frame->cmd_status = MFI_CMD_STATUS_POLL_MODE; 2291 init_frame->flags = 0; 2292 init_frame->queue_info_new_phys_addr_lo = 2293 cmd->frame_phys_addr + 64; 2294 init_frame->queue_info_new_phys_addr_hi = 0; 2295 2296 init_frame->data_xfer_len = sizeof (struct megasas_init_queue_info); 2297 2298 cmd->frame_count = 1; 2299 2300 /* issue the init frame in polled mode */ 2301 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2302 con_log(CL_ANN, (CE_WARN, "failed to init firmware")); 2303 goto fail_fw_init; 2304 } 2305 2306 return_mfi_pkt(instance, cmd); 2307 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) { 2308 goto fail_fw_init; 2309 } 2310 2311 /* gather misc FW related information */ 2312 if (!get_ctrl_info(instance, &ctrl_info)) { 2313 instance->max_sectors_per_req = ctrl_info.max_request_size; 2314 con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d", 2315 ctrl_info.product_name, ctrl_info.ld_present_count)); 2316 } else { 2317 instance->max_sectors_per_req = instance->max_num_sge * 2318 PAGESIZE / 512; 2319 } 2320 2321 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 2322 goto fail_fw_init; 2323 } 2324 2325 return (0); 2326 2327 fail_fw_init: 2328 fail_alloc_fw_space: 2329 2330 free_space_for_mfi(instance); 2331 2332 fail_ready_state: 2333 ddi_regs_map_free(&instance->regmap_handle); 2334 2335 fail_mfi_reg_setup: 2336 return (DDI_FAILURE); 2337 } 2338 2339 /* 2340 * mfi_state_transition_to_ready : Move the FW to READY state 2341 * 2342 * @reg_set : MFI register set 2343 */ 2344 static int 2345 mfi_state_transition_to_ready(struct megasas_instance *instance) 2346 { 2347 int i; 2348 uint8_t max_wait; 2349 uint32_t fw_ctrl; 2350 uint32_t fw_state; 2351 uint32_t cur_state; 2352 2353 fw_state = 2354 instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK; 2355 con_log(CL_ANN1, (CE_NOTE, 2356 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state)); 2357 2358 while (fw_state != MFI_STATE_READY) { 2359 con_log(CL_ANN, (CE_NOTE, 2360 "mfi_state_transition_to_ready:FW state%x", fw_state)); 2361 2362 switch (fw_state) { 2363 case MFI_STATE_FAULT: 2364 con_log(CL_ANN, (CE_NOTE, 2365 "megasas: FW in FAULT state!!")); 2366 2367 return (-ENODEV); 2368 case MFI_STATE_WAIT_HANDSHAKE: 2369 /* set the CLR bit in IMR0 */ 2370 con_log(CL_ANN, (CE_NOTE, 2371 "megasas: FW waiting for HANDSHAKE")); 2372 /* 2373 * PCI_Hot Plug: MFI F/W requires 2374 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2375 * to be set 2376 */ 2377 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */ 2378 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE | 2379 MFI_INIT_HOTPLUG, instance); 2380 2381 max_wait = 2; 2382 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2383 break; 2384 case MFI_STATE_BOOT_MESSAGE_PENDING: 2385 /* set the CLR bit in IMR0 */ 2386 con_log(CL_ANN, (CE_NOTE, 2387 "megasas: FW state boot message pending")); 2388 /* 2389 * PCI_Hot Plug: MFI F/W requires 2390 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2391 * to be set 2392 */ 2393 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance); 2394 2395 max_wait = 10; 2396 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2397 break; 2398 case MFI_STATE_OPERATIONAL: 2399 /* bring it to READY state; assuming max wait 2 secs */ 2400 instance->func_ptr->disable_intr(instance); 2401 con_log(CL_ANN1, (CE_NOTE, 2402 "megasas: FW in OPERATIONAL state")); 2403 /* 2404 * PCI_Hot Plug: MFI F/W requires 2405 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT) 2406 * to be set 2407 */ 2408 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */ 2409 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance); 2410 2411 max_wait = 10; 2412 cur_state = MFI_STATE_OPERATIONAL; 2413 break; 2414 case MFI_STATE_UNDEFINED: 2415 /* this state should not last for more than 2 seconds */ 2416 con_log(CL_ANN, (CE_NOTE, "FW state undefined\n")); 2417 2418 max_wait = 2; 2419 cur_state = MFI_STATE_UNDEFINED; 2420 break; 2421 case MFI_STATE_BB_INIT: 2422 max_wait = 2; 2423 cur_state = MFI_STATE_BB_INIT; 2424 break; 2425 case MFI_STATE_FW_INIT: 2426 max_wait = 2; 2427 cur_state = MFI_STATE_FW_INIT; 2428 break; 2429 case MFI_STATE_DEVICE_SCAN: 2430 max_wait = 10; 2431 cur_state = MFI_STATE_DEVICE_SCAN; 2432 break; 2433 default: 2434 con_log(CL_ANN, (CE_NOTE, 2435 "megasas: Unknown state 0x%x\n", fw_state)); 2436 return (-ENODEV); 2437 } 2438 2439 /* the cur_state should not last for more than max_wait secs */ 2440 for (i = 0; i < (max_wait * MILLISEC); i++) { 2441 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */ 2442 fw_state = 2443 instance->func_ptr->read_fw_status_reg(instance) & 2444 MFI_STATE_MASK; 2445 2446 if (fw_state == cur_state) { 2447 delay(1 * drv_usectohz(MILLISEC)); 2448 } else { 2449 break; 2450 } 2451 } 2452 2453 /* return error if fw_state hasn't changed after max_wait */ 2454 if (fw_state == cur_state) { 2455 con_log(CL_ANN, (CE_NOTE, 2456 "FW state hasn't changed in %d secs\n", max_wait)); 2457 return (-ENODEV); 2458 } 2459 }; 2460 2461 fw_ctrl = RD_IB_DOORBELL(instance); 2462 2463 con_log(CL_ANN1, (CE_NOTE, 2464 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl)); 2465 2466 /* 2467 * Write 0xF to the doorbell register to do the following. 2468 * - Abort all outstanding commands (bit 0). 2469 * - Transition from OPERATIONAL to READY state (bit 1). 2470 * - Discard (possible) low MFA posted in 64-bit mode (bit-2). 2471 * - Set to release FW to continue running (i.e. BIOS handshake 2472 * (bit 3). 2473 */ 2474 WR_IB_DOORBELL(0xF, instance); 2475 2476 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 2477 return (-ENODEV); 2478 } 2479 return (0); 2480 } 2481 2482 /* 2483 * get_seq_num 2484 */ 2485 static int 2486 get_seq_num(struct megasas_instance *instance, 2487 struct megasas_evt_log_info *eli) 2488 { 2489 int ret = 0; 2490 2491 dma_obj_t dcmd_dma_obj; 2492 struct megasas_cmd *cmd; 2493 struct megasas_dcmd_frame *dcmd; 2494 2495 cmd = get_mfi_pkt(instance); 2496 2497 if (!cmd) { 2498 cmn_err(CE_WARN, "megasas: failed to get a cmd\n"); 2499 return (-ENOMEM); 2500 } 2501 2502 dcmd = &cmd->frame->dcmd; 2503 2504 /* allocate the data transfer buffer */ 2505 dcmd_dma_obj.size = sizeof (struct megasas_evt_log_info); 2506 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr; 2507 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 2508 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 2509 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 2510 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 2511 2512 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) { 2513 con_log(CL_ANN, (CE_WARN, 2514 "get_seq_num: could not data transfer buffer alloc.")); 2515 return (DDI_FAILURE); 2516 } 2517 2518 (void) memset(dcmd_dma_obj.buffer, 0, 2519 sizeof (struct megasas_evt_log_info)); 2520 2521 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2522 2523 dcmd->cmd = MFI_CMD_OP_DCMD; 2524 dcmd->cmd_status = 0; 2525 dcmd->sge_count = 1; 2526 dcmd->flags = MFI_FRAME_DIR_READ; 2527 dcmd->timeout = 0; 2528 dcmd->data_xfer_len = sizeof (struct megasas_evt_log_info); 2529 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 2530 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_log_info); 2531 dcmd->sgl.sge32[0].phys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 2532 2533 cmd->sync_cmd = MEGASAS_TRUE; 2534 cmd->frame_count = 1; 2535 2536 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2537 cmn_err(CE_WARN, "get_seq_num: " 2538 "failed to issue MR_DCMD_CTRL_EVENT_GET_INFO\n"); 2539 ret = -1; 2540 } else { 2541 /* copy the data back into callers buffer */ 2542 bcopy(dcmd_dma_obj.buffer, eli, 2543 sizeof (struct megasas_evt_log_info)); 2544 ret = 0; 2545 } 2546 2547 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 2548 ret = -1; 2549 2550 return_mfi_pkt(instance, cmd); 2551 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) { 2552 ret = -1; 2553 } 2554 return (ret); 2555 } 2556 2557 /* 2558 * start_mfi_aen 2559 */ 2560 static int 2561 start_mfi_aen(struct megasas_instance *instance) 2562 { 2563 int ret = 0; 2564 2565 struct megasas_evt_log_info eli; 2566 union megasas_evt_class_locale class_locale; 2567 2568 /* get the latest sequence number from FW */ 2569 (void) memset(&eli, 0, sizeof (struct megasas_evt_log_info)); 2570 2571 if (get_seq_num(instance, &eli)) { 2572 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num\n"); 2573 return (-1); 2574 } 2575 2576 /* register AEN with FW for latest sequence number plus 1 */ 2577 class_locale.members.reserved = 0; 2578 class_locale.members.locale = MR_EVT_LOCALE_ALL; 2579 class_locale.members.class = MR_EVT_CLASS_CRITICAL; 2580 2581 ret = register_mfi_aen(instance, eli.newest_seq_num + 1, 2582 class_locale.word); 2583 2584 if (ret) { 2585 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed\n"); 2586 return (-1); 2587 } 2588 2589 return (ret); 2590 } 2591 2592 /* 2593 * flush_cache 2594 */ 2595 static void 2596 flush_cache(struct megasas_instance *instance) 2597 { 2598 struct megasas_cmd *cmd; 2599 struct megasas_dcmd_frame *dcmd; 2600 2601 if (!(cmd = get_mfi_pkt(instance))) 2602 return; 2603 2604 dcmd = &cmd->frame->dcmd; 2605 2606 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 2607 2608 dcmd->cmd = MFI_CMD_OP_DCMD; 2609 dcmd->cmd_status = 0x0; 2610 dcmd->sge_count = 0; 2611 dcmd->flags = MFI_FRAME_DIR_NONE; 2612 dcmd->timeout = 0; 2613 dcmd->data_xfer_len = 0; 2614 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 2615 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 2616 2617 cmd->frame_count = 1; 2618 2619 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2620 cmn_err(CE_WARN, 2621 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH\n"); 2622 } 2623 con_log(CL_DLEVEL1, (CE_NOTE, "done")); 2624 return_mfi_pkt(instance, cmd); 2625 (void) megasas_common_check(instance, cmd); 2626 } 2627 2628 /* 2629 * service_mfi_aen- Completes an AEN command 2630 * @instance: Adapter soft state 2631 * @cmd: Command to be completed 2632 * 2633 */ 2634 static void 2635 service_mfi_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 2636 { 2637 uint32_t seq_num; 2638 struct megasas_evt_detail *evt_detail = 2639 (struct megasas_evt_detail *)instance->mfi_evt_detail_obj.buffer; 2640 2641 cmd->cmd_status = cmd->frame->io.cmd_status; 2642 2643 if (cmd->cmd_status == ENODATA) { 2644 cmd->cmd_status = 0; 2645 } 2646 2647 /* 2648 * log the MFI AEN event to the sysevent queue so that 2649 * application will get noticed 2650 */ 2651 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS", 2652 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) { 2653 int instance_no = ddi_get_instance(instance->dip); 2654 con_log(CL_ANN, (CE_WARN, 2655 "mega%d: Failed to log AEN event", instance_no)); 2656 } 2657 2658 /* get copy of seq_num and class/locale for re-registration */ 2659 seq_num = evt_detail->seq_num; 2660 seq_num++; 2661 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 2662 sizeof (struct megasas_evt_detail)); 2663 2664 cmd->frame->dcmd.cmd_status = 0x0; 2665 cmd->frame->dcmd.mbox.w[0] = seq_num; 2666 2667 instance->aen_seq_num = seq_num; 2668 2669 cmd->frame_count = 1; 2670 2671 /* Issue the aen registration frame */ 2672 instance->func_ptr->issue_cmd(cmd, instance); 2673 } 2674 2675 /* 2676 * complete_cmd_in_sync_mode - Completes an internal command 2677 * @instance: Adapter soft state 2678 * @cmd: Command to be completed 2679 * 2680 * The issue_cmd_in_sync_mode() function waits for a command to complete 2681 * after it issues a command. This function wakes up that waiting routine by 2682 * calling wake_up() on the wait queue. 2683 */ 2684 static void 2685 complete_cmd_in_sync_mode(struct megasas_instance *instance, 2686 struct megasas_cmd *cmd) 2687 { 2688 cmd->cmd_status = cmd->frame->io.cmd_status; 2689 2690 cmd->sync_cmd = MEGASAS_FALSE; 2691 2692 if (cmd->cmd_status == ENODATA) { 2693 cmd->cmd_status = 0; 2694 } 2695 2696 cv_broadcast(&instance->int_cmd_cv); 2697 } 2698 2699 /* 2700 * megasas_softintr - The Software ISR 2701 * @param arg : HBA soft state 2702 * 2703 * called from high-level interrupt if hi-level interrupt are not there, 2704 * otherwise triggered as a soft interrupt 2705 */ 2706 static uint_t 2707 megasas_softintr(struct megasas_instance *instance) 2708 { 2709 struct scsi_pkt *pkt; 2710 struct scsa_cmd *acmd; 2711 struct megasas_cmd *cmd; 2712 struct mlist_head *pos, *next; 2713 mlist_t process_list; 2714 struct megasas_header *hdr; 2715 struct scsi_arq_status *arqstat; 2716 2717 con_log(CL_ANN1, (CE_CONT, "megasas_softintr called")); 2718 2719 ASSERT(instance); 2720 mutex_enter(&instance->completed_pool_mtx); 2721 2722 if (mlist_empty(&instance->completed_pool_list)) { 2723 mutex_exit(&instance->completed_pool_mtx); 2724 return (DDI_INTR_UNCLAIMED); 2725 } 2726 2727 instance->softint_running = 1; 2728 2729 INIT_LIST_HEAD(&process_list); 2730 mlist_splice(&instance->completed_pool_list, &process_list); 2731 INIT_LIST_HEAD(&instance->completed_pool_list); 2732 2733 mutex_exit(&instance->completed_pool_mtx); 2734 2735 /* perform all callbacks first, before releasing the SCBs */ 2736 mlist_for_each_safe(pos, next, &process_list) { 2737 cmd = mlist_entry(pos, struct megasas_cmd, list); 2738 2739 /* syncronize the Cmd frame for the controller */ 2740 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 2741 0, 0, DDI_DMA_SYNC_FORCPU); 2742 2743 if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 2744 DDI_SUCCESS) { 2745 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 2746 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 2747 return (DDI_INTR_UNCLAIMED); 2748 } 2749 2750 hdr = &cmd->frame->hdr; 2751 2752 /* remove the internal command from the process list */ 2753 mlist_del_init(&cmd->list); 2754 2755 switch (hdr->cmd) { 2756 case MFI_CMD_OP_PD_SCSI: 2757 case MFI_CMD_OP_LD_SCSI: 2758 case MFI_CMD_OP_LD_READ: 2759 case MFI_CMD_OP_LD_WRITE: 2760 /* 2761 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI 2762 * could have been issued either through an 2763 * IO path or an IOCTL path. If it was via IOCTL, 2764 * we will send it to internal completion. 2765 */ 2766 if (cmd->sync_cmd == MEGASAS_TRUE) { 2767 complete_cmd_in_sync_mode(instance, cmd); 2768 break; 2769 } 2770 2771 /* regular commands */ 2772 acmd = cmd->cmd; 2773 pkt = CMD2PKT(acmd); 2774 2775 if (acmd->cmd_flags & CFLAG_DMAVALID) { 2776 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 2777 (void) ddi_dma_sync(acmd->cmd_dmahandle, 2778 acmd->cmd_dma_offset, 2779 acmd->cmd_dma_len, 2780 DDI_DMA_SYNC_FORCPU); 2781 } 2782 } 2783 2784 pkt->pkt_reason = CMD_CMPLT; 2785 pkt->pkt_statistics = 0; 2786 pkt->pkt_state = STATE_GOT_BUS 2787 | STATE_GOT_TARGET | STATE_SENT_CMD 2788 | STATE_XFERRED_DATA | STATE_GOT_STATUS; 2789 2790 con_log(CL_ANN1, (CE_CONT, 2791 "CDB[0] = %x completed for %s: size %lx context %x", 2792 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"), 2793 acmd->cmd_dmacount, hdr->context)); 2794 2795 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) { 2796 struct scsi_inquiry *inq; 2797 2798 if (acmd->cmd_dmacount != 0) { 2799 bp_mapin(acmd->cmd_buf); 2800 inq = (struct scsi_inquiry *) 2801 acmd->cmd_buf->b_un.b_addr; 2802 2803 /* don't expose physical drives to OS */ 2804 if (acmd->islogical && 2805 (hdr->cmd_status == MFI_STAT_OK)) { 2806 display_scsi_inquiry( 2807 (caddr_t)inq); 2808 } else if ((hdr->cmd_status == 2809 MFI_STAT_OK) && inq->inq_dtype == 2810 DTYPE_DIRECT) { 2811 2812 display_scsi_inquiry( 2813 (caddr_t)inq); 2814 2815 /* for physical disk */ 2816 hdr->cmd_status = 2817 MFI_STAT_DEVICE_NOT_FOUND; 2818 } 2819 } 2820 } 2821 2822 switch (hdr->cmd_status) { 2823 case MFI_STAT_OK: 2824 pkt->pkt_scbp[0] = STATUS_GOOD; 2825 break; 2826 case MFI_STAT_LD_CC_IN_PROGRESS: 2827 case MFI_STAT_LD_RECON_IN_PROGRESS: 2828 /* SJ - these are not correct way */ 2829 pkt->pkt_scbp[0] = STATUS_GOOD; 2830 break; 2831 case MFI_STAT_LD_INIT_IN_PROGRESS: 2832 con_log(CL_ANN, 2833 (CE_WARN, "Initialization in Progress")); 2834 pkt->pkt_reason = CMD_TRAN_ERR; 2835 2836 break; 2837 case MFI_STAT_SCSI_DONE_WITH_ERROR: 2838 con_log(CL_ANN1, (CE_CONT, "scsi_done error")); 2839 2840 pkt->pkt_reason = CMD_CMPLT; 2841 ((struct scsi_status *) 2842 pkt->pkt_scbp)->sts_chk = 1; 2843 2844 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) { 2845 2846 con_log(CL_ANN, 2847 (CE_WARN, "TEST_UNIT_READY fail")); 2848 2849 } else { 2850 pkt->pkt_state |= STATE_ARQ_DONE; 2851 arqstat = (void *)(pkt->pkt_scbp); 2852 arqstat->sts_rqpkt_reason = CMD_CMPLT; 2853 arqstat->sts_rqpkt_resid = 0; 2854 arqstat->sts_rqpkt_state |= 2855 STATE_GOT_BUS | STATE_GOT_TARGET 2856 | STATE_SENT_CMD 2857 | STATE_XFERRED_DATA; 2858 *(uint8_t *)&arqstat->sts_rqpkt_status = 2859 STATUS_GOOD; 2860 2861 bcopy(cmd->sense, 2862 &(arqstat->sts_sensedata), 2863 acmd->cmd_scblen - 2864 offsetof(struct scsi_arq_status, 2865 sts_sensedata)); 2866 } 2867 break; 2868 case MFI_STAT_LD_OFFLINE: 2869 case MFI_STAT_DEVICE_NOT_FOUND: 2870 con_log(CL_ANN1, (CE_CONT, 2871 "device not found error")); 2872 pkt->pkt_reason = CMD_DEV_GONE; 2873 pkt->pkt_statistics = STAT_DISCON; 2874 break; 2875 case MFI_STAT_LD_LBA_OUT_OF_RANGE: 2876 pkt->pkt_state |= STATE_ARQ_DONE; 2877 pkt->pkt_reason = CMD_CMPLT; 2878 ((struct scsi_status *) 2879 pkt->pkt_scbp)->sts_chk = 1; 2880 2881 arqstat = (void *)(pkt->pkt_scbp); 2882 arqstat->sts_rqpkt_reason = CMD_CMPLT; 2883 arqstat->sts_rqpkt_resid = 0; 2884 arqstat->sts_rqpkt_state |= STATE_GOT_BUS 2885 | STATE_GOT_TARGET | STATE_SENT_CMD 2886 | STATE_XFERRED_DATA; 2887 *(uint8_t *)&arqstat->sts_rqpkt_status = 2888 STATUS_GOOD; 2889 2890 arqstat->sts_sensedata.es_valid = 1; 2891 arqstat->sts_sensedata.es_key = 2892 KEY_ILLEGAL_REQUEST; 2893 arqstat->sts_sensedata.es_class = 2894 CLASS_EXTENDED_SENSE; 2895 2896 /* 2897 * LOGICAL BLOCK ADDRESS OUT OF RANGE: 2898 * ASC: 0x21h; ASCQ: 0x00h; 2899 */ 2900 arqstat->sts_sensedata.es_add_code = 0x21; 2901 arqstat->sts_sensedata.es_qual_code = 0x00; 2902 2903 break; 2904 2905 default: 2906 con_log(CL_ANN, (CE_CONT, "Unknown status!")); 2907 pkt->pkt_reason = CMD_TRAN_ERR; 2908 2909 break; 2910 } 2911 2912 atomic_add_16(&instance->fw_outstanding, (-1)); 2913 2914 return_mfi_pkt(instance, cmd); 2915 2916 (void) megasas_common_check(instance, cmd); 2917 2918 if (acmd->cmd_dmahandle) { 2919 if (megasas_check_dma_handle( 2920 acmd->cmd_dmahandle) != DDI_SUCCESS) { 2921 ddi_fm_service_impact(instance->dip, 2922 DDI_SERVICE_UNAFFECTED); 2923 pkt->pkt_reason = CMD_TRAN_ERR; 2924 pkt->pkt_statistics = 0; 2925 } 2926 } 2927 2928 /* Call the callback routine */ 2929 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { 2930 scsi_hba_pkt_comp(pkt); 2931 } 2932 2933 break; 2934 case MFI_CMD_OP_SMP: 2935 case MFI_CMD_OP_STP: 2936 complete_cmd_in_sync_mode(instance, cmd); 2937 break; 2938 case MFI_CMD_OP_DCMD: 2939 /* see if got an event notification */ 2940 if (cmd->frame->dcmd.opcode == 2941 MR_DCMD_CTRL_EVENT_WAIT) { 2942 if ((instance->aen_cmd == cmd) && 2943 (instance->aen_cmd->abort_aen)) { 2944 con_log(CL_ANN, (CE_WARN, 2945 "megasas_softintr: " 2946 "aborted_aen returned")); 2947 } else { 2948 service_mfi_aen(instance, cmd); 2949 2950 atomic_add_16(&instance->fw_outstanding, 2951 (-1)); 2952 } 2953 } else { 2954 complete_cmd_in_sync_mode(instance, cmd); 2955 } 2956 2957 break; 2958 case MFI_CMD_OP_ABORT: 2959 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete")); 2960 /* 2961 * MFI_CMD_OP_ABORT successfully completed 2962 * in the synchronous mode 2963 */ 2964 complete_cmd_in_sync_mode(instance, cmd); 2965 break; 2966 default: 2967 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 2968 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 2969 2970 if (cmd->pkt != NULL) { 2971 pkt = cmd->pkt; 2972 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { 2973 scsi_hba_pkt_comp(pkt); 2974 } 2975 } 2976 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !!")); 2977 break; 2978 } 2979 } 2980 2981 instance->softint_running = 0; 2982 2983 return (DDI_INTR_CLAIMED); 2984 } 2985 2986 /* 2987 * mega_alloc_dma_obj 2988 * 2989 * Allocate the memory and other resources for an dma object. 2990 */ 2991 static int 2992 mega_alloc_dma_obj(struct megasas_instance *instance, dma_obj_t *obj) 2993 { 2994 int i; 2995 size_t alen = 0; 2996 uint_t cookie_cnt; 2997 struct ddi_device_acc_attr tmp_endian_attr; 2998 2999 tmp_endian_attr = endian_attr; 3000 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 3001 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr, 3002 DDI_DMA_SLEEP, NULL, &obj->dma_handle); 3003 if (i != DDI_SUCCESS) { 3004 3005 switch (i) { 3006 case DDI_DMA_BADATTR : 3007 con_log(CL_ANN, (CE_WARN, 3008 "Failed ddi_dma_alloc_handle- Bad atrib")); 3009 break; 3010 case DDI_DMA_NORESOURCES : 3011 con_log(CL_ANN, (CE_WARN, 3012 "Failed ddi_dma_alloc_handle- No Resources")); 3013 break; 3014 default : 3015 con_log(CL_ANN, (CE_WARN, 3016 "Failed ddi_dma_alloc_handle :unknown %d", i)); 3017 break; 3018 } 3019 3020 return (-1); 3021 } 3022 3023 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr, 3024 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 3025 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) || 3026 alen < obj->size) { 3027 3028 ddi_dma_free_handle(&obj->dma_handle); 3029 3030 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc")); 3031 3032 return (-1); 3033 } 3034 3035 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer, 3036 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 3037 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) { 3038 3039 ddi_dma_mem_free(&obj->acc_handle); 3040 ddi_dma_free_handle(&obj->dma_handle); 3041 3042 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle")); 3043 3044 return (-1); 3045 } 3046 3047 if (megasas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) { 3048 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3049 return (-1); 3050 } 3051 3052 if (megasas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) { 3053 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 3054 return (-1); 3055 } 3056 3057 return (cookie_cnt); 3058 } 3059 3060 /* 3061 * mega_free_dma_obj(struct megasas_instance *, dma_obj_t) 3062 * 3063 * De-allocate the memory and other resources for an dma object, which must 3064 * have been alloated by a previous call to mega_alloc_dma_obj() 3065 */ 3066 static int 3067 mega_free_dma_obj(struct megasas_instance *instance, dma_obj_t obj) 3068 { 3069 3070 if (megasas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) { 3071 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3072 return (DDI_FAILURE); 3073 } 3074 3075 if (megasas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) { 3076 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 3077 return (DDI_FAILURE); 3078 } 3079 3080 (void) ddi_dma_unbind_handle(obj.dma_handle); 3081 ddi_dma_mem_free(&obj.acc_handle); 3082 ddi_dma_free_handle(&obj.dma_handle); 3083 3084 return (DDI_SUCCESS); 3085 } 3086 3087 /* 3088 * megasas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *, 3089 * int, int (*)()) 3090 * 3091 * Allocate dma resources for a new scsi command 3092 */ 3093 static int 3094 megasas_dma_alloc(struct megasas_instance *instance, struct scsi_pkt *pkt, 3095 struct buf *bp, int flags, int (*callback)()) 3096 { 3097 int dma_flags; 3098 int (*cb)(caddr_t); 3099 int i; 3100 3101 ddi_dma_attr_t tmp_dma_attr = megasas_generic_dma_attr; 3102 struct scsa_cmd *acmd = PKT2CMD(pkt); 3103 3104 acmd->cmd_buf = bp; 3105 3106 if (bp->b_flags & B_READ) { 3107 acmd->cmd_flags &= ~CFLAG_DMASEND; 3108 dma_flags = DDI_DMA_READ; 3109 } else { 3110 acmd->cmd_flags |= CFLAG_DMASEND; 3111 dma_flags = DDI_DMA_WRITE; 3112 } 3113 3114 if (flags & PKT_CONSISTENT) { 3115 acmd->cmd_flags |= CFLAG_CONSISTENT; 3116 dma_flags |= DDI_DMA_CONSISTENT; 3117 } 3118 3119 if (flags & PKT_DMA_PARTIAL) { 3120 dma_flags |= DDI_DMA_PARTIAL; 3121 } 3122 3123 dma_flags |= DDI_DMA_REDZONE; 3124 3125 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP; 3126 3127 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge; 3128 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 3129 3130 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr, 3131 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) { 3132 switch (i) { 3133 case DDI_DMA_BADATTR: 3134 bioerror(bp, EFAULT); 3135 return (-1); 3136 3137 case DDI_DMA_NORESOURCES: 3138 bioerror(bp, 0); 3139 return (-1); 3140 3141 default: 3142 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: " 3143 "0x%x impossible\n", i)); 3144 bioerror(bp, EFAULT); 3145 return (-1); 3146 } 3147 } 3148 3149 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags, 3150 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies); 3151 3152 switch (i) { 3153 case DDI_DMA_PARTIAL_MAP: 3154 if ((dma_flags & DDI_DMA_PARTIAL) == 0) { 3155 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3156 "DDI_DMA_PARTIAL_MAP impossible\n")); 3157 goto no_dma_cookies; 3158 } 3159 3160 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) == 3161 DDI_FAILURE) { 3162 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed\n")); 3163 goto no_dma_cookies; 3164 } 3165 3166 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3167 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3168 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3169 DDI_FAILURE) { 3170 3171 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed\n")); 3172 goto no_dma_cookies; 3173 } 3174 3175 goto get_dma_cookies; 3176 case DDI_DMA_MAPPED: 3177 acmd->cmd_nwin = 1; 3178 acmd->cmd_dma_len = 0; 3179 acmd->cmd_dma_offset = 0; 3180 3181 get_dma_cookies: 3182 i = 0; 3183 acmd->cmd_dmacount = 0; 3184 for (;;) { 3185 acmd->cmd_dmacount += 3186 acmd->cmd_dmacookies[i++].dmac_size; 3187 3188 if (i == instance->max_num_sge || 3189 i == acmd->cmd_ncookies) 3190 break; 3191 3192 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3193 &acmd->cmd_dmacookies[i]); 3194 } 3195 3196 acmd->cmd_cookie = i; 3197 acmd->cmd_cookiecnt = i; 3198 3199 acmd->cmd_flags |= CFLAG_DMAVALID; 3200 3201 if (bp->b_bcount >= acmd->cmd_dmacount) { 3202 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3203 } else { 3204 pkt->pkt_resid = 0; 3205 } 3206 3207 return (0); 3208 case DDI_DMA_NORESOURCES: 3209 bioerror(bp, 0); 3210 break; 3211 case DDI_DMA_NOMAPPING: 3212 bioerror(bp, EFAULT); 3213 break; 3214 case DDI_DMA_TOOBIG: 3215 bioerror(bp, EINVAL); 3216 break; 3217 case DDI_DMA_INUSE: 3218 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:" 3219 " DDI_DMA_INUSE impossible\n")); 3220 break; 3221 default: 3222 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3223 "0x%x impossible\n", i)); 3224 break; 3225 } 3226 3227 no_dma_cookies: 3228 ddi_dma_free_handle(&acmd->cmd_dmahandle); 3229 acmd->cmd_dmahandle = NULL; 3230 acmd->cmd_flags &= ~CFLAG_DMAVALID; 3231 return (-1); 3232 } 3233 3234 /* 3235 * megasas_dma_move(struct megasas_instance *, struct scsi_pkt *, struct buf *) 3236 * 3237 * move dma resources to next dma window 3238 * 3239 */ 3240 static int 3241 megasas_dma_move(struct megasas_instance *instance, struct scsi_pkt *pkt, 3242 struct buf *bp) 3243 { 3244 int i = 0; 3245 3246 struct scsa_cmd *acmd = PKT2CMD(pkt); 3247 3248 /* 3249 * If there are no more cookies remaining in this window, 3250 * must move to the next window first. 3251 */ 3252 if (acmd->cmd_cookie == acmd->cmd_ncookies) { 3253 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) { 3254 return (0); 3255 } 3256 3257 /* at last window, cannot move */ 3258 if (++acmd->cmd_curwin >= acmd->cmd_nwin) { 3259 return (-1); 3260 } 3261 3262 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3263 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3264 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3265 DDI_FAILURE) { 3266 return (-1); 3267 } 3268 3269 acmd->cmd_cookie = 0; 3270 } else { 3271 /* still more cookies in this window - get the next one */ 3272 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3273 &acmd->cmd_dmacookies[0]); 3274 } 3275 3276 /* get remaining cookies in this window, up to our maximum */ 3277 for (;;) { 3278 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size; 3279 acmd->cmd_cookie++; 3280 3281 if (i == instance->max_num_sge || 3282 acmd->cmd_cookie == acmd->cmd_ncookies) { 3283 break; 3284 } 3285 3286 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3287 &acmd->cmd_dmacookies[i]); 3288 } 3289 3290 acmd->cmd_cookiecnt = i; 3291 3292 if (bp->b_bcount >= acmd->cmd_dmacount) { 3293 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3294 } else { 3295 pkt->pkt_resid = 0; 3296 } 3297 3298 return (0); 3299 } 3300 3301 /* 3302 * build_cmd 3303 */ 3304 static struct megasas_cmd * 3305 build_cmd(struct megasas_instance *instance, struct scsi_address *ap, 3306 struct scsi_pkt *pkt, uchar_t *cmd_done) 3307 { 3308 uint16_t flags = 0; 3309 uint32_t i; 3310 uint32_t context; 3311 uint32_t sge_bytes; 3312 3313 struct megasas_cmd *cmd; 3314 struct megasas_sge64 *mfi_sgl; 3315 struct scsa_cmd *acmd = PKT2CMD(pkt); 3316 struct megasas_pthru_frame *pthru; 3317 struct megasas_io_frame *ldio; 3318 3319 /* find out if this is logical or physical drive command. */ 3320 acmd->islogical = MEGADRV_IS_LOGICAL(ap); 3321 acmd->device_id = MAP_DEVICE_ID(instance, ap); 3322 *cmd_done = 0; 3323 3324 /* get the command packet */ 3325 if (!(cmd = get_mfi_pkt(instance))) { 3326 return (NULL); 3327 } 3328 3329 cmd->pkt = pkt; 3330 cmd->cmd = acmd; 3331 3332 /* lets get the command directions */ 3333 if (acmd->cmd_flags & CFLAG_DMASEND) { 3334 flags = MFI_FRAME_DIR_WRITE; 3335 3336 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3337 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3338 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3339 DDI_DMA_SYNC_FORDEV); 3340 } 3341 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) { 3342 flags = MFI_FRAME_DIR_READ; 3343 3344 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3345 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3346 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3347 DDI_DMA_SYNC_FORCPU); 3348 } 3349 } else { 3350 flags = MFI_FRAME_DIR_NONE; 3351 } 3352 3353 flags |= MFI_FRAME_SGL64; 3354 3355 switch (pkt->pkt_cdbp[0]) { 3356 3357 /* 3358 * case SCMD_SYNCHRONIZE_CACHE: 3359 * flush_cache(instance); 3360 * return_mfi_pkt(instance, cmd); 3361 * *cmd_done = 1; 3362 * 3363 * return (NULL); 3364 */ 3365 3366 case SCMD_READ: 3367 case SCMD_WRITE: 3368 case SCMD_READ_G1: 3369 case SCMD_WRITE_G1: 3370 if (acmd->islogical) { 3371 ldio = (struct megasas_io_frame *)cmd->frame; 3372 3373 /* 3374 * preare the Logical IO frame: 3375 * 2nd bit is zero for all read cmds 3376 */ 3377 ldio->cmd = (pkt->pkt_cdbp[0] & 0x02) ? 3378 MFI_CMD_OP_LD_WRITE : MFI_CMD_OP_LD_READ; 3379 ldio->cmd_status = 0x0; 3380 ldio->scsi_status = 0x0; 3381 ldio->target_id = acmd->device_id; 3382 ldio->timeout = 0; 3383 ldio->reserved_0 = 0; 3384 ldio->pad_0 = 0; 3385 ldio->flags = flags; 3386 3387 /* Initialize sense Information */ 3388 bzero(cmd->sense, SENSE_LENGTH); 3389 ldio->sense_len = SENSE_LENGTH; 3390 ldio->sense_buf_phys_addr_hi = 0; 3391 ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr; 3392 3393 ldio->start_lba_hi = 0; 3394 ldio->access_byte = (acmd->cmd_cdblen != 6) ? 3395 pkt->pkt_cdbp[1] : 0; 3396 ldio->sge_count = acmd->cmd_cookiecnt; 3397 mfi_sgl = (struct megasas_sge64 *)&ldio->sgl; 3398 3399 context = ldio->context; 3400 3401 if (acmd->cmd_cdblen == CDB_GROUP0) { 3402 ldio->lba_count = host_to_le16( 3403 (uint16_t)(pkt->pkt_cdbp[4])); 3404 3405 ldio->start_lba_lo = host_to_le32( 3406 ((uint32_t)(pkt->pkt_cdbp[3])) | 3407 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) | 3408 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) 3409 << 16)); 3410 } else if (acmd->cmd_cdblen == CDB_GROUP1) { 3411 ldio->lba_count = host_to_le16( 3412 ((uint16_t)(pkt->pkt_cdbp[8])) | 3413 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)); 3414 3415 ldio->start_lba_lo = host_to_le32( 3416 ((uint32_t)(pkt->pkt_cdbp[5])) | 3417 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3418 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3419 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); 3420 } else if (acmd->cmd_cdblen == CDB_GROUP2) { 3421 ldio->lba_count = host_to_le16( 3422 ((uint16_t)(pkt->pkt_cdbp[9])) | 3423 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) | 3424 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) | 3425 ((uint16_t)(pkt->pkt_cdbp[6]) << 24)); 3426 3427 ldio->start_lba_lo = host_to_le32( 3428 ((uint32_t)(pkt->pkt_cdbp[5])) | 3429 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3430 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3431 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); 3432 } else if (acmd->cmd_cdblen == CDB_GROUP3) { 3433 ldio->lba_count = host_to_le16( 3434 ((uint16_t)(pkt->pkt_cdbp[13])) | 3435 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) | 3436 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) | 3437 ((uint16_t)(pkt->pkt_cdbp[10]) << 24)); 3438 3439 ldio->start_lba_lo = host_to_le32( 3440 ((uint32_t)(pkt->pkt_cdbp[9])) | 3441 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | 3442 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | 3443 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)); 3444 3445 ldio->start_lba_lo = host_to_le32( 3446 ((uint32_t)(pkt->pkt_cdbp[5])) | 3447 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3448 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3449 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); 3450 } 3451 3452 break; 3453 } 3454 /* fall through For all non-rd/wr cmds */ 3455 default: 3456 pthru = (struct megasas_pthru_frame *)cmd->frame; 3457 3458 /* prepare the DCDB frame */ 3459 pthru->cmd = (acmd->islogical) ? 3460 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI; 3461 pthru->cmd_status = 0x0; 3462 pthru->scsi_status = 0x0; 3463 pthru->target_id = acmd->device_id; 3464 pthru->lun = 0; 3465 pthru->cdb_len = acmd->cmd_cdblen; 3466 pthru->timeout = 0; 3467 pthru->flags = flags; 3468 pthru->data_xfer_len = acmd->cmd_dmacount; 3469 pthru->sge_count = acmd->cmd_cookiecnt; 3470 mfi_sgl = (struct megasas_sge64 *)&pthru->sgl; 3471 3472 bzero(cmd->sense, SENSE_LENGTH); 3473 pthru->sense_len = SENSE_LENGTH; 3474 pthru->sense_buf_phys_addr_hi = 0; 3475 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; 3476 3477 context = pthru->context; 3478 3479 bcopy(pkt->pkt_cdbp, pthru->cdb, acmd->cmd_cdblen); 3480 3481 break; 3482 } 3483 #ifdef lint 3484 context = context; 3485 #endif 3486 /* bzero(mfi_sgl, sizeof (struct megasas_sge64) * MAX_SGL); */ 3487 3488 /* prepare the scatter-gather list for the firmware */ 3489 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) { 3490 mfi_sgl->phys_addr = acmd->cmd_dmacookies[i].dmac_laddress; 3491 mfi_sgl->length = acmd->cmd_dmacookies[i].dmac_size; 3492 } 3493 3494 sge_bytes = sizeof (struct megasas_sge64)*acmd->cmd_cookiecnt; 3495 3496 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 3497 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1; 3498 3499 if (cmd->frame_count >= 8) { 3500 cmd->frame_count = 8; 3501 } 3502 3503 return (cmd); 3504 } 3505 3506 /* 3507 * wait_for_outstanding - Wait for all outstanding cmds 3508 * @instance: Adapter soft state 3509 * 3510 * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to 3511 * complete all its outstanding commands. Returns error if one or more IOs 3512 * are pending after this time period. 3513 */ 3514 static int 3515 wait_for_outstanding(struct megasas_instance *instance) 3516 { 3517 int i; 3518 uint32_t wait_time = 90; 3519 3520 for (i = 0; i < wait_time; i++) { 3521 if (!instance->fw_outstanding) { 3522 break; 3523 } 3524 3525 drv_usecwait(MILLISEC); /* wait for 1000 usecs */; 3526 } 3527 3528 if (instance->fw_outstanding) { 3529 return (1); 3530 } 3531 3532 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VERSION); 3533 3534 return (0); 3535 } 3536 3537 /* 3538 * issue_mfi_pthru 3539 */ 3540 static int 3541 issue_mfi_pthru(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 3542 struct megasas_cmd *cmd, int mode) 3543 { 3544 void *ubuf; 3545 uint32_t kphys_addr = 0; 3546 uint32_t xferlen = 0; 3547 uint_t model; 3548 3549 dma_obj_t pthru_dma_obj; 3550 struct megasas_pthru_frame *kpthru; 3551 struct megasas_pthru_frame *pthru; 3552 3553 pthru = &cmd->frame->pthru; 3554 kpthru = (struct megasas_pthru_frame *)&ioctl->frame[0]; 3555 3556 model = ddi_model_convert_from(mode & FMODELS); 3557 if (model == DDI_MODEL_ILP32) { 3558 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3559 3560 xferlen = kpthru->sgl.sge32[0].length; 3561 3562 /* SJ! - ubuf needs to be virtual address. */ 3563 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3564 } else { 3565 #ifdef _ILP32 3566 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3567 xferlen = kpthru->sgl.sge32[0].length; 3568 /* SJ! - ubuf needs to be virtual address. */ 3569 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3570 #else 3571 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64")); 3572 xferlen = kpthru->sgl.sge64[0].length; 3573 /* SJ! - ubuf needs to be virtual address. */ 3574 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr; 3575 #endif 3576 } 3577 3578 if (xferlen) { 3579 /* means IOCTL requires DMA */ 3580 /* allocate the data transfer buffer */ 3581 pthru_dma_obj.size = xferlen; 3582 pthru_dma_obj.dma_attr = megasas_generic_dma_attr; 3583 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3584 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3585 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1; 3586 pthru_dma_obj.dma_attr.dma_attr_align = 1; 3587 3588 /* allocate kernel buffer for DMA */ 3589 if (mega_alloc_dma_obj(instance, &pthru_dma_obj) != 1) { 3590 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3591 "could not data transfer buffer alloc.")); 3592 return (DDI_FAILURE); 3593 } 3594 3595 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3596 if (kpthru->flags & MFI_FRAME_DIR_WRITE) { 3597 if (ddi_copyin(ubuf, (void *)pthru_dma_obj.buffer, 3598 xferlen, mode)) { 3599 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3600 "copy from user space failed\n")); 3601 return (1); 3602 } 3603 } 3604 3605 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address; 3606 } 3607 3608 pthru->cmd = kpthru->cmd; 3609 pthru->sense_len = kpthru->sense_len; 3610 pthru->cmd_status = kpthru->cmd_status; 3611 pthru->scsi_status = kpthru->scsi_status; 3612 pthru->target_id = kpthru->target_id; 3613 pthru->lun = kpthru->lun; 3614 pthru->cdb_len = kpthru->cdb_len; 3615 pthru->sge_count = kpthru->sge_count; 3616 pthru->timeout = kpthru->timeout; 3617 pthru->data_xfer_len = kpthru->data_xfer_len; 3618 3619 pthru->sense_buf_phys_addr_hi = 0; 3620 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */ 3621 pthru->sense_buf_phys_addr_lo = 0; 3622 3623 bcopy((void *)kpthru->cdb, (void *)pthru->cdb, pthru->cdb_len); 3624 3625 pthru->flags = kpthru->flags & ~MFI_FRAME_SGL64; 3626 pthru->sgl.sge32[0].length = xferlen; 3627 pthru->sgl.sge32[0].phys_addr = kphys_addr; 3628 3629 cmd->sync_cmd = MEGASAS_TRUE; 3630 cmd->frame_count = 1; 3631 3632 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3633 con_log(CL_ANN, (CE_WARN, 3634 "issue_mfi_pthru: fw_ioctl failed\n")); 3635 } else { 3636 if (xferlen && (kpthru->flags & MFI_FRAME_DIR_READ)) { 3637 3638 if (ddi_copyout(pthru_dma_obj.buffer, ubuf, 3639 xferlen, mode)) { 3640 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3641 "copy to user space failed\n")); 3642 return (1); 3643 } 3644 } 3645 } 3646 3647 kpthru->cmd_status = pthru->cmd_status; 3648 kpthru->scsi_status = pthru->scsi_status; 3649 3650 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, " 3651 "scsi_status %x\n", pthru->cmd_status, pthru->scsi_status)); 3652 3653 if (xferlen) { 3654 /* free kernel buffer */ 3655 if (mega_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS) 3656 return (1); 3657 } 3658 3659 return (0); 3660 } 3661 3662 /* 3663 * issue_mfi_dcmd 3664 */ 3665 static int 3666 issue_mfi_dcmd(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 3667 struct megasas_cmd *cmd, int mode) 3668 { 3669 void *ubuf; 3670 uint32_t kphys_addr = 0; 3671 uint32_t xferlen = 0; 3672 uint32_t model; 3673 dma_obj_t dcmd_dma_obj; 3674 struct megasas_dcmd_frame *kdcmd; 3675 struct megasas_dcmd_frame *dcmd; 3676 3677 dcmd = &cmd->frame->dcmd; 3678 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0]; 3679 3680 model = ddi_model_convert_from(mode & FMODELS); 3681 if (model == DDI_MODEL_ILP32) { 3682 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3683 3684 xferlen = kdcmd->sgl.sge32[0].length; 3685 3686 /* SJ! - ubuf needs to be virtual address. */ 3687 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3688 } 3689 else 3690 { 3691 #ifdef _ILP32 3692 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3693 xferlen = kdcmd->sgl.sge32[0].length; 3694 /* SJ! - ubuf needs to be virtual address. */ 3695 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3696 #else 3697 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64")); 3698 xferlen = kdcmd->sgl.sge64[0].length; 3699 /* SJ! - ubuf needs to be virtual address. */ 3700 ubuf = (void *)(ulong_t)dcmd->sgl.sge64[0].phys_addr; 3701 #endif 3702 } 3703 if (xferlen) { 3704 /* means IOCTL requires DMA */ 3705 /* allocate the data transfer buffer */ 3706 dcmd_dma_obj.size = xferlen; 3707 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr; 3708 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3709 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3710 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 3711 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 3712 3713 /* allocate kernel buffer for DMA */ 3714 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) { 3715 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 3716 "could not data transfer buffer alloc.")); 3717 return (DDI_FAILURE); 3718 } 3719 3720 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3721 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) { 3722 if (ddi_copyin(ubuf, (void *)dcmd_dma_obj.buffer, 3723 xferlen, mode)) { 3724 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 3725 "copy from user space failed\n")); 3726 return (1); 3727 } 3728 } 3729 3730 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 3731 } 3732 3733 dcmd->cmd = kdcmd->cmd; 3734 dcmd->cmd_status = kdcmd->cmd_status; 3735 dcmd->sge_count = kdcmd->sge_count; 3736 dcmd->timeout = kdcmd->timeout; 3737 dcmd->data_xfer_len = kdcmd->data_xfer_len; 3738 dcmd->opcode = kdcmd->opcode; 3739 3740 bcopy((void *)kdcmd->mbox.b, (void *)dcmd->mbox.b, DCMD_MBOX_SZ); 3741 3742 dcmd->flags = kdcmd->flags & ~MFI_FRAME_SGL64; 3743 dcmd->sgl.sge32[0].length = xferlen; 3744 dcmd->sgl.sge32[0].phys_addr = kphys_addr; 3745 3746 cmd->sync_cmd = MEGASAS_TRUE; 3747 cmd->frame_count = 1; 3748 3749 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3750 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed\n")); 3751 } else { 3752 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) { 3753 3754 if (ddi_copyout(dcmd_dma_obj.buffer, ubuf, 3755 xferlen, mode)) { 3756 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 3757 "copy to user space failed\n")); 3758 return (1); 3759 } 3760 } 3761 } 3762 3763 kdcmd->cmd_status = dcmd->cmd_status; 3764 3765 if (xferlen) { 3766 /* free kernel buffer */ 3767 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 3768 return (1); 3769 } 3770 3771 return (0); 3772 } 3773 3774 /* 3775 * issue_mfi_smp 3776 */ 3777 static int 3778 issue_mfi_smp(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 3779 struct megasas_cmd *cmd, int mode) 3780 { 3781 void *request_ubuf; 3782 void *response_ubuf; 3783 uint32_t request_xferlen = 0; 3784 uint32_t response_xferlen = 0; 3785 uint_t model; 3786 dma_obj_t request_dma_obj; 3787 dma_obj_t response_dma_obj; 3788 struct megasas_smp_frame *ksmp; 3789 struct megasas_smp_frame *smp; 3790 struct megasas_sge32 *sge32; 3791 #ifndef _ILP32 3792 struct megasas_sge64 *sge64; 3793 #endif 3794 3795 smp = &cmd->frame->smp; 3796 ksmp = (struct megasas_smp_frame *)&ioctl->frame[0]; 3797 3798 model = ddi_model_convert_from(mode & FMODELS); 3799 if (model == DDI_MODEL_ILP32) { 3800 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 3801 3802 sge32 = &ksmp->sgl[0].sge32[0]; 3803 response_xferlen = sge32[0].length; 3804 request_xferlen = sge32[1].length; 3805 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 3806 "response_xferlen = %x, request_xferlen = %x", 3807 response_xferlen, request_xferlen)); 3808 3809 /* SJ! - ubuf needs to be virtual address. */ 3810 3811 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 3812 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 3813 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3814 "response_ubuf = %p, request_ubuf = %p", 3815 response_ubuf, request_ubuf)); 3816 } else { 3817 #ifdef _ILP32 3818 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 3819 3820 sge32 = &ksmp->sgl[0].sge32[0]; 3821 response_xferlen = sge32[0].length; 3822 request_xferlen = sge32[1].length; 3823 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 3824 "response_xferlen = %x, request_xferlen = %x", 3825 response_xferlen, request_xferlen)); 3826 3827 /* SJ! - ubuf needs to be virtual address. */ 3828 3829 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 3830 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 3831 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3832 "response_ubuf = %p, request_ubuf = %p", 3833 response_ubuf, request_ubuf)); 3834 #else 3835 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64")); 3836 3837 sge64 = &ksmp->sgl[0].sge64[0]; 3838 response_xferlen = sge64[0].length; 3839 request_xferlen = sge64[1].length; 3840 3841 /* SJ! - ubuf needs to be virtual address. */ 3842 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr; 3843 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr; 3844 #endif 3845 } 3846 if (request_xferlen) { 3847 /* means IOCTL requires DMA */ 3848 /* allocate the data transfer buffer */ 3849 request_dma_obj.size = request_xferlen; 3850 request_dma_obj.dma_attr = megasas_generic_dma_attr; 3851 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3852 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3853 request_dma_obj.dma_attr.dma_attr_sgllen = 1; 3854 request_dma_obj.dma_attr.dma_attr_align = 1; 3855 3856 /* allocate kernel buffer for DMA */ 3857 if (mega_alloc_dma_obj(instance, &request_dma_obj) != 1) { 3858 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3859 "could not data transfer buffer alloc.")); 3860 return (DDI_FAILURE); 3861 } 3862 3863 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3864 if (ddi_copyin(request_ubuf, (void *) request_dma_obj.buffer, 3865 request_xferlen, mode)) { 3866 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3867 "copy from user space failed\n")); 3868 return (1); 3869 } 3870 } 3871 3872 if (response_xferlen) { 3873 /* means IOCTL requires DMA */ 3874 /* allocate the data transfer buffer */ 3875 response_dma_obj.size = response_xferlen; 3876 response_dma_obj.dma_attr = megasas_generic_dma_attr; 3877 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3878 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3879 response_dma_obj.dma_attr.dma_attr_sgllen = 1; 3880 response_dma_obj.dma_attr.dma_attr_align = 1; 3881 3882 /* allocate kernel buffer for DMA */ 3883 if (mega_alloc_dma_obj(instance, &response_dma_obj) != 1) { 3884 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3885 "could not data transfer buffer alloc.")); 3886 return (DDI_FAILURE); 3887 } 3888 3889 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3890 if (ddi_copyin(response_ubuf, (void *) response_dma_obj.buffer, 3891 response_xferlen, mode)) { 3892 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3893 "copy from user space failed\n")); 3894 return (1); 3895 } 3896 } 3897 3898 smp->cmd = ksmp->cmd; 3899 smp->cmd_status = ksmp->cmd_status; 3900 smp->connection_status = ksmp->connection_status; 3901 smp->sge_count = ksmp->sge_count; 3902 /* smp->context = ksmp->context; */ 3903 smp->timeout = ksmp->timeout; 3904 smp->data_xfer_len = ksmp->data_xfer_len; 3905 3906 bcopy((void *)&ksmp->sas_addr, (void *)&smp->sas_addr, 3907 sizeof (uint64_t)); 3908 3909 smp->flags = ksmp->flags & ~MFI_FRAME_SGL64; 3910 3911 model = ddi_model_convert_from(mode & FMODELS); 3912 if (model == DDI_MODEL_ILP32) { 3913 con_log(CL_ANN1, (CE_NOTE, 3914 "handle_drv_ioctl: DDI_MODEL_ILP32")); 3915 3916 sge32 = &smp->sgl[0].sge32[0]; 3917 sge32[0].length = response_xferlen; 3918 sge32[0].phys_addr = 3919 response_dma_obj.dma_cookie[0].dmac_address; 3920 sge32[1].length = request_xferlen; 3921 sge32[1].phys_addr = 3922 request_dma_obj.dma_cookie[0].dmac_address; 3923 } else { 3924 #ifdef _ILP32 3925 con_log(CL_ANN1, (CE_NOTE, 3926 "handle_drv_ioctl: DDI_MODEL_ILP32")); 3927 sge32 = &smp->sgl[0].sge32[0]; 3928 sge32[0].length = response_xferlen; 3929 sge32[0].phys_addr = 3930 response_dma_obj.dma_cookie[0].dmac_address; 3931 sge32[1].length = request_xferlen; 3932 sge32[1].phys_addr = 3933 request_dma_obj.dma_cookie[0].dmac_address; 3934 #else 3935 con_log(CL_ANN1, (CE_NOTE, 3936 "issue_mfi_smp: DDI_MODEL_LP64")); 3937 sge64 = &smp->sgl[0].sge64[0]; 3938 sge64[0].length = response_xferlen; 3939 sge64[0].phys_addr = 3940 response_dma_obj.dma_cookie[0].dmac_address; 3941 sge64[1].length = request_xferlen; 3942 sge64[1].phys_addr = 3943 request_dma_obj.dma_cookie[0].dmac_address; 3944 #endif 3945 } 3946 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3947 "smp->response_xferlen = %d, smp->request_xferlen = %d " 3948 "smp->data_xfer_len = %d", sge32[0].length, sge32[1].length, 3949 smp->data_xfer_len)); 3950 3951 cmd->sync_cmd = MEGASAS_TRUE; 3952 cmd->frame_count = 1; 3953 3954 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3955 con_log(CL_ANN, (CE_WARN, 3956 "issue_mfi_smp: fw_ioctl failed\n")); 3957 } else { 3958 con_log(CL_ANN1, (CE_NOTE, 3959 "issue_mfi_smp: copy to user space\n")); 3960 3961 if (request_xferlen) { 3962 if (ddi_copyout(request_dma_obj.buffer, request_ubuf, 3963 request_xferlen, mode)) { 3964 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3965 "copy to user space failed\n")); 3966 return (1); 3967 } 3968 } 3969 3970 if (response_xferlen) { 3971 if (ddi_copyout(response_dma_obj.buffer, response_ubuf, 3972 response_xferlen, mode)) { 3973 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3974 "copy to user space failed\n")); 3975 return (1); 3976 } 3977 } 3978 } 3979 3980 ksmp->cmd_status = smp->cmd_status; 3981 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d", 3982 smp->cmd_status)); 3983 3984 3985 if (request_xferlen) { 3986 /* free kernel buffer */ 3987 if (mega_free_dma_obj(instance, request_dma_obj) != DDI_SUCCESS) 3988 return (1); 3989 } 3990 3991 if (response_xferlen) { 3992 /* free kernel buffer */ 3993 if (mega_free_dma_obj(instance, response_dma_obj) != 3994 DDI_SUCCESS) 3995 return (1); 3996 } 3997 3998 return (0); 3999 } 4000 4001 /* 4002 * issue_mfi_stp 4003 */ 4004 static int 4005 issue_mfi_stp(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 4006 struct megasas_cmd *cmd, int mode) 4007 { 4008 void *fis_ubuf; 4009 void *data_ubuf; 4010 uint32_t fis_xferlen = 0; 4011 uint32_t data_xferlen = 0; 4012 uint_t model; 4013 dma_obj_t fis_dma_obj; 4014 dma_obj_t data_dma_obj; 4015 struct megasas_stp_frame *kstp; 4016 struct megasas_stp_frame *stp; 4017 4018 stp = &cmd->frame->stp; 4019 kstp = (struct megasas_stp_frame *)&ioctl->frame[0]; 4020 4021 model = ddi_model_convert_from(mode & FMODELS); 4022 if (model == DDI_MODEL_ILP32) { 4023 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4024 4025 fis_xferlen = kstp->sgl.sge32[0].length; 4026 data_xferlen = kstp->sgl.sge32[1].length; 4027 4028 /* SJ! - ubuf needs to be virtual address. */ 4029 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4030 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4031 } 4032 else 4033 { 4034 #ifdef _ILP32 4035 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4036 4037 fis_xferlen = kstp->sgl.sge32[0].length; 4038 data_xferlen = kstp->sgl.sge32[1].length; 4039 4040 /* SJ! - ubuf needs to be virtual address. */ 4041 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4042 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4043 #else 4044 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64")); 4045 4046 fis_xferlen = kstp->sgl.sge64[0].length; 4047 data_xferlen = kstp->sgl.sge64[1].length; 4048 4049 /* SJ! - ubuf needs to be virtual address. */ 4050 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr; 4051 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr; 4052 #endif 4053 } 4054 4055 4056 if (fis_xferlen) { 4057 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: " 4058 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen)); 4059 4060 /* means IOCTL requires DMA */ 4061 /* allocate the data transfer buffer */ 4062 fis_dma_obj.size = fis_xferlen; 4063 fis_dma_obj.dma_attr = megasas_generic_dma_attr; 4064 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4065 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4066 fis_dma_obj.dma_attr.dma_attr_sgllen = 1; 4067 fis_dma_obj.dma_attr.dma_attr_align = 1; 4068 4069 /* allocate kernel buffer for DMA */ 4070 if (mega_alloc_dma_obj(instance, &fis_dma_obj) != 1) { 4071 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4072 "could not data transfer buffer alloc.")); 4073 return (DDI_FAILURE); 4074 } 4075 4076 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4077 if (ddi_copyin(fis_ubuf, (void *)fis_dma_obj.buffer, 4078 fis_xferlen, mode)) { 4079 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4080 "copy from user space failed\n")); 4081 return (1); 4082 } 4083 } 4084 4085 if (data_xferlen) { 4086 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p " 4087 "data_xferlen = %x", data_ubuf, data_xferlen)); 4088 4089 /* means IOCTL requires DMA */ 4090 /* allocate the data transfer buffer */ 4091 data_dma_obj.size = data_xferlen; 4092 data_dma_obj.dma_attr = megasas_generic_dma_attr; 4093 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4094 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4095 data_dma_obj.dma_attr.dma_attr_sgllen = 1; 4096 data_dma_obj.dma_attr.dma_attr_align = 1; 4097 4098 /* allocate kernel buffer for DMA */ 4099 if (mega_alloc_dma_obj(instance, &data_dma_obj) != 1) { 4100 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4101 "could not data transfer buffer alloc.")); 4102 return (DDI_FAILURE); 4103 } 4104 4105 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4106 if (ddi_copyin(data_ubuf, (void *) data_dma_obj.buffer, 4107 data_xferlen, mode)) { 4108 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4109 "copy from user space failed\n")); 4110 return (1); 4111 } 4112 } 4113 4114 stp->cmd = kstp->cmd; 4115 stp->cmd_status = kstp->cmd_status; 4116 stp->connection_status = kstp->connection_status; 4117 stp->target_id = kstp->target_id; 4118 stp->sge_count = kstp->sge_count; 4119 /* stp->context = kstp->context; */ 4120 stp->timeout = kstp->timeout; 4121 stp->data_xfer_len = kstp->data_xfer_len; 4122 4123 bcopy((void *)kstp->fis, (void *)stp->fis, 10); 4124 4125 stp->flags = kstp->flags & ~MFI_FRAME_SGL64; 4126 stp->stp_flags = kstp->stp_flags; 4127 stp->sgl.sge32[0].length = fis_xferlen; 4128 stp->sgl.sge32[0].phys_addr = fis_dma_obj.dma_cookie[0].dmac_address; 4129 stp->sgl.sge32[1].length = data_xferlen; 4130 stp->sgl.sge32[1].phys_addr = data_dma_obj.dma_cookie[0].dmac_address; 4131 4132 cmd->sync_cmd = MEGASAS_TRUE; 4133 cmd->frame_count = 1; 4134 4135 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4136 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed\n")); 4137 } else { 4138 4139 if (fis_xferlen) { 4140 if (ddi_copyout(fis_dma_obj.buffer, fis_ubuf, 4141 fis_xferlen, mode)) { 4142 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4143 "copy to user space failed\n")); 4144 return (1); 4145 } 4146 } 4147 4148 if (data_xferlen) { 4149 if (ddi_copyout(data_dma_obj.buffer, data_ubuf, 4150 data_xferlen, mode)) { 4151 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4152 "copy to user space failed\n")); 4153 return (1); 4154 } 4155 } 4156 } 4157 4158 kstp->cmd_status = stp->cmd_status; 4159 4160 if (fis_xferlen) { 4161 /* free kernel buffer */ 4162 if (mega_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS) 4163 return (1); 4164 } 4165 4166 if (data_xferlen) { 4167 /* free kernel buffer */ 4168 if (mega_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS) 4169 return (1); 4170 } 4171 4172 return (0); 4173 } 4174 4175 /* 4176 * fill_up_drv_ver 4177 */ 4178 static void 4179 fill_up_drv_ver(struct megasas_drv_ver *dv) 4180 { 4181 (void) memset(dv, 0, sizeof (struct megasas_drv_ver)); 4182 4183 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$")); 4184 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris")); 4185 (void) memcpy(dv->drv_name, "megaraid_sas", strlen("megaraid_sas")); 4186 (void) memcpy(dv->drv_ver, MEGASAS_VERSION, strlen(MEGASAS_VERSION)); 4187 (void) memcpy(dv->drv_rel_date, MEGASAS_RELDATE, 4188 strlen(MEGASAS_RELDATE)); 4189 } 4190 4191 /* 4192 * handle_drv_ioctl 4193 */ 4194 static int 4195 handle_drv_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 4196 int mode) 4197 { 4198 int i; 4199 int rval = 0; 4200 int *props = NULL; 4201 void *ubuf; 4202 4203 uint8_t *pci_conf_buf; 4204 uint32_t xferlen; 4205 uint32_t num_props; 4206 uint_t model; 4207 struct megasas_dcmd_frame *kdcmd; 4208 struct megasas_drv_ver dv; 4209 struct megasas_pci_information pi; 4210 4211 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0]; 4212 4213 model = ddi_model_convert_from(mode & FMODELS); 4214 if (model == DDI_MODEL_ILP32) { 4215 con_log(CL_ANN1, (CE_NOTE, 4216 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4217 4218 xferlen = kdcmd->sgl.sge32[0].length; 4219 4220 /* SJ! - ubuf needs to be virtual address. */ 4221 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4222 } else { 4223 #ifdef _ILP32 4224 con_log(CL_ANN1, (CE_NOTE, 4225 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4226 xferlen = kdcmd->sgl.sge32[0].length; 4227 /* SJ! - ubuf needs to be virtual address. */ 4228 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4229 #else 4230 con_log(CL_ANN1, (CE_NOTE, 4231 "handle_drv_ioctl: DDI_MODEL_LP64")); 4232 xferlen = kdcmd->sgl.sge64[0].length; 4233 /* SJ! - ubuf needs to be virtual address. */ 4234 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 4235 #endif 4236 } 4237 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4238 "dataBuf=%p size=%d bytes", ubuf, xferlen)); 4239 4240 switch (kdcmd->opcode) { 4241 case MR_DRIVER_IOCTL_DRIVER_VERSION: 4242 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4243 "MR_DRIVER_IOCTL_DRIVER_VERSION")); 4244 4245 fill_up_drv_ver(&dv); 4246 4247 if (ddi_copyout(&dv, ubuf, xferlen, mode)) { 4248 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4249 "MR_DRIVER_IOCTL_DRIVER_VERSION : " 4250 "copy to user space failed\n")); 4251 kdcmd->cmd_status = 1; 4252 rval = 1; 4253 } else { 4254 kdcmd->cmd_status = 0; 4255 } 4256 break; 4257 case MR_DRIVER_IOCTL_PCI_INFORMATION: 4258 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4259 "MR_DRIVER_IOCTL_PCI_INFORMAITON")); 4260 4261 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip, 4262 0, "reg", &props, &num_props)) { 4263 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4264 "MR_DRIVER_IOCTL_PCI_INFORMATION : " 4265 "ddi_prop_look_int_array failed\n")); 4266 rval = 1; 4267 } else { 4268 4269 pi.busNumber = (props[0] >> 16) & 0xFF; 4270 pi.deviceNumber = (props[0] >> 11) & 0x1f; 4271 pi.functionNumber = (props[0] >> 8) & 0x7; 4272 ddi_prop_free((void *)props); 4273 } 4274 4275 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo; 4276 4277 for (i = 0; i < (sizeof (struct megasas_pci_information) - 4278 offsetof(struct megasas_pci_information, pciHeaderInfo)); 4279 i++) { 4280 pci_conf_buf[i] = 4281 pci_config_get8(instance->pci_handle, i); 4282 } 4283 4284 if (ddi_copyout(&pi, ubuf, xferlen, mode)) { 4285 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4286 "MR_DRIVER_IOCTL_PCI_INFORMATION : " 4287 "copy to user space failed\n")); 4288 kdcmd->cmd_status = 1; 4289 rval = 1; 4290 } else { 4291 kdcmd->cmd_status = 0; 4292 } 4293 break; 4294 default: 4295 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4296 "invalid driver specific IOCTL opcode = 0x%x", 4297 kdcmd->opcode)); 4298 kdcmd->cmd_status = 1; 4299 rval = 1; 4300 break; 4301 } 4302 4303 return (rval); 4304 } 4305 4306 /* 4307 * handle_mfi_ioctl 4308 */ 4309 static int 4310 handle_mfi_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 4311 int mode) 4312 { 4313 int rval = 0; 4314 4315 struct megasas_header *hdr; 4316 struct megasas_cmd *cmd; 4317 4318 cmd = get_mfi_pkt(instance); 4319 4320 if (!cmd) { 4321 con_log(CL_ANN, (CE_WARN, "megasas: " 4322 "failed to get a cmd packet\n")); 4323 return (1); 4324 } 4325 4326 hdr = (struct megasas_header *)&ioctl->frame[0]; 4327 4328 switch (hdr->cmd) { 4329 case MFI_CMD_OP_DCMD: 4330 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode); 4331 break; 4332 case MFI_CMD_OP_SMP: 4333 rval = issue_mfi_smp(instance, ioctl, cmd, mode); 4334 break; 4335 case MFI_CMD_OP_STP: 4336 rval = issue_mfi_stp(instance, ioctl, cmd, mode); 4337 break; 4338 case MFI_CMD_OP_LD_SCSI: 4339 case MFI_CMD_OP_PD_SCSI: 4340 rval = issue_mfi_pthru(instance, ioctl, cmd, mode); 4341 break; 4342 default: 4343 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: " 4344 "invalid mfi ioctl hdr->cmd = %d\n", hdr->cmd)); 4345 rval = 1; 4346 break; 4347 } 4348 4349 4350 return_mfi_pkt(instance, cmd); 4351 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) 4352 rval = 1; 4353 return (rval); 4354 } 4355 4356 /* 4357 * AEN 4358 */ 4359 static int 4360 handle_mfi_aen(struct megasas_instance *instance, struct megasas_aen *aen) 4361 { 4362 int rval = 0; 4363 4364 rval = register_mfi_aen(instance, instance->aen_seq_num, 4365 aen->class_locale_word); 4366 4367 aen->cmd_status = (uint8_t)rval; 4368 4369 return (rval); 4370 } 4371 4372 static int 4373 register_mfi_aen(struct megasas_instance *instance, uint32_t seq_num, 4374 uint32_t class_locale_word) 4375 { 4376 int ret_val; 4377 4378 struct megasas_cmd *cmd; 4379 struct megasas_dcmd_frame *dcmd; 4380 union megasas_evt_class_locale curr_aen; 4381 union megasas_evt_class_locale prev_aen; 4382 4383 /* 4384 * If there an AEN pending already (aen_cmd), check if the 4385 * class_locale of that pending AEN is inclusive of the new 4386 * AEN request we currently have. If it is, then we don't have 4387 * to do anything. In other words, whichever events the current 4388 * AEN request is subscribing to, have already been subscribed 4389 * to. 4390 * 4391 * If the old_cmd is _not_ inclusive, then we have to abort 4392 * that command, form a class_locale that is superset of both 4393 * old and current and re-issue to the FW 4394 */ 4395 4396 curr_aen.word = class_locale_word; 4397 4398 if (instance->aen_cmd) { 4399 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; 4400 4401 /* 4402 * A class whose enum value is smaller is inclusive of all 4403 * higher values. If a PROGRESS (= -1) was previously 4404 * registered, then a new registration requests for higher 4405 * classes need not be sent to FW. They are automatically 4406 * included. 4407 * 4408 * Locale numbers don't have such hierarchy. They are bitmap 4409 * values 4410 */ 4411 if ((prev_aen.members.class <= curr_aen.members.class) && 4412 !((prev_aen.members.locale & curr_aen.members.locale) ^ 4413 curr_aen.members.locale)) { 4414 /* 4415 * Previously issued event registration includes 4416 * current request. Nothing to do. 4417 */ 4418 4419 return (0); 4420 } else { 4421 curr_aen.members.locale |= prev_aen.members.locale; 4422 4423 if (prev_aen.members.class < curr_aen.members.class) 4424 curr_aen.members.class = prev_aen.members.class; 4425 4426 ret_val = abort_aen_cmd(instance, instance->aen_cmd); 4427 4428 if (ret_val) { 4429 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: " 4430 "failed to abort prevous AEN command\n")); 4431 4432 return (ret_val); 4433 } 4434 } 4435 } else { 4436 curr_aen.word = class_locale_word; 4437 } 4438 4439 cmd = get_mfi_pkt(instance); 4440 4441 if (!cmd) 4442 return (-ENOMEM); 4443 4444 dcmd = &cmd->frame->dcmd; 4445 4446 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */ 4447 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 4448 4449 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 4450 sizeof (struct megasas_evt_detail)); 4451 4452 /* Prepare DCMD for aen registration */ 4453 dcmd->cmd = MFI_CMD_OP_DCMD; 4454 dcmd->cmd_status = 0x0; 4455 dcmd->sge_count = 1; 4456 dcmd->flags = MFI_FRAME_DIR_READ; 4457 dcmd->timeout = 0; 4458 dcmd->data_xfer_len = sizeof (struct megasas_evt_detail); 4459 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 4460 dcmd->mbox.w[0] = seq_num; 4461 dcmd->mbox.w[1] = curr_aen.word; 4462 dcmd->sgl.sge32[0].phys_addr = 4463 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address; 4464 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_detail); 4465 4466 instance->aen_seq_num = seq_num; 4467 4468 /* 4469 * Store reference to the cmd used to register for AEN. When an 4470 * application wants us to register for AEN, we have to abort this 4471 * cmd and re-register with a new EVENT LOCALE supplied by that app 4472 */ 4473 instance->aen_cmd = cmd; 4474 4475 cmd->frame_count = 1; 4476 4477 /* Issue the aen registration frame */ 4478 /* atomic_add_16 (&instance->fw_outstanding, 1); */ 4479 instance->func_ptr->issue_cmd(cmd, instance); 4480 4481 return (0); 4482 } 4483 4484 static void 4485 display_scsi_inquiry(caddr_t scsi_inq) 4486 { 4487 #define MAX_SCSI_DEVICE_CODE 14 4488 int i; 4489 char inquiry_buf[256] = {0}; 4490 int len; 4491 const char *const scsi_device_types[] = { 4492 "Direct-Access ", 4493 "Sequential-Access", 4494 "Printer ", 4495 "Processor ", 4496 "WORM ", 4497 "CD-ROM ", 4498 "Scanner ", 4499 "Optical Device ", 4500 "Medium Changer ", 4501 "Communications ", 4502 "Unknown ", 4503 "Unknown ", 4504 "Unknown ", 4505 "Enclosure ", 4506 }; 4507 4508 len = 0; 4509 4510 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: "); 4511 for (i = 8; i < 16; i++) { 4512 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4513 scsi_inq[i]); 4514 } 4515 4516 len += snprintf(inquiry_buf + len, 265 - len, " Model: "); 4517 4518 for (i = 16; i < 32; i++) { 4519 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4520 scsi_inq[i]); 4521 } 4522 4523 len += snprintf(inquiry_buf + len, 265 - len, " Rev: "); 4524 4525 for (i = 32; i < 36; i++) { 4526 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4527 scsi_inq[i]); 4528 } 4529 4530 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4531 4532 4533 i = scsi_inq[0] & 0x1f; 4534 4535 4536 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ", 4537 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : 4538 "Unknown "); 4539 4540 4541 len += snprintf(inquiry_buf + len, 265 - len, 4542 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); 4543 4544 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) { 4545 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n"); 4546 } else { 4547 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4548 } 4549 4550 con_log(CL_ANN1, (CE_CONT, inquiry_buf)); 4551 } 4552 4553 static int 4554 read_fw_status_reg_xscale(struct megasas_instance *instance) 4555 { 4556 return ((int)RD_OB_MSG_0(instance)); 4557 } 4558 4559 static int 4560 read_fw_status_reg_ppc(struct megasas_instance *instance) 4561 { 4562 return ((int)RD_OB_SCRATCH_PAD_0(instance)); 4563 } 4564 4565 static void 4566 issue_cmd_xscale(struct megasas_cmd *cmd, struct megasas_instance *instance) 4567 { 4568 atomic_add_16(&instance->fw_outstanding, 1); 4569 4570 /* Issue the command to the FW */ 4571 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) | 4572 (cmd->frame_count - 1), instance); 4573 } 4574 4575 static void 4576 issue_cmd_ppc(struct megasas_cmd *cmd, struct megasas_instance *instance) 4577 { 4578 atomic_add_16(&instance->fw_outstanding, 1); 4579 4580 /* Issue the command to the FW */ 4581 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) | 4582 (((cmd->frame_count - 1) << 1) | 1), instance); 4583 } 4584 4585 /* 4586 * issue_cmd_in_sync_mode 4587 */ 4588 static int 4589 issue_cmd_in_sync_mode_xscale(struct megasas_instance *instance, 4590 struct megasas_cmd *cmd) 4591 { 4592 int i; 4593 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC); 4594 4595 cmd->cmd_status = ENODATA; 4596 4597 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) | 4598 (cmd->frame_count - 1), instance); 4599 4600 mutex_enter(&instance->int_cmd_mtx); 4601 4602 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 4603 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 4604 } 4605 4606 mutex_exit(&instance->int_cmd_mtx); 4607 4608 if (i < (msecs -1)) { 4609 return (0); 4610 } else { 4611 return (1); 4612 } 4613 } 4614 4615 static int 4616 issue_cmd_in_sync_mode_ppc(struct megasas_instance *instance, 4617 struct megasas_cmd *cmd) 4618 { 4619 int i; 4620 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC); 4621 4622 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called\n")); 4623 4624 cmd->cmd_status = ENODATA; 4625 4626 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) | 4627 (((cmd->frame_count - 1) << 1) | 1), instance); 4628 4629 mutex_enter(&instance->int_cmd_mtx); 4630 4631 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 4632 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 4633 } 4634 4635 mutex_exit(&instance->int_cmd_mtx); 4636 4637 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done\n")); 4638 4639 if (i < (msecs -1)) { 4640 return (0); 4641 } else { 4642 return (1); 4643 } 4644 } 4645 4646 /* 4647 * issue_cmd_in_poll_mode 4648 */ 4649 static int 4650 issue_cmd_in_poll_mode_xscale(struct megasas_instance *instance, 4651 struct megasas_cmd *cmd) 4652 { 4653 int i; 4654 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC; 4655 struct megasas_header *frame_hdr; 4656 4657 frame_hdr = (struct megasas_header *)cmd->frame; 4658 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE; 4659 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 4660 4661 /* issue the frame using inbound queue port */ 4662 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) | 4663 (cmd->frame_count - 1), instance); 4664 4665 /* wait for cmd_status to change from 0xFF */ 4666 for (i = 0; i < msecs && (frame_hdr->cmd_status == 4667 MFI_CMD_STATUS_POLL_MODE); i++) { 4668 drv_usecwait(MILLISEC); /* wait for 1000 usecs */ 4669 } 4670 4671 if (frame_hdr->cmd_status == MFI_CMD_STATUS_POLL_MODE) { 4672 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: " 4673 "cmd polling timed out")); 4674 return (DDI_FAILURE); 4675 } 4676 4677 return (DDI_SUCCESS); 4678 } 4679 4680 static int 4681 issue_cmd_in_poll_mode_ppc(struct megasas_instance *instance, 4682 struct megasas_cmd *cmd) 4683 { 4684 int i; 4685 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC; 4686 struct megasas_header *frame_hdr; 4687 4688 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called\n")); 4689 4690 frame_hdr = (struct megasas_header *)cmd->frame; 4691 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE; 4692 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 4693 4694 /* issue the frame using inbound queue port */ 4695 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) | 4696 (((cmd->frame_count - 1) << 1) | 1), instance); 4697 4698 /* wait for cmd_status to change from 0xFF */ 4699 for (i = 0; i < msecs && (frame_hdr->cmd_status == 4700 MFI_CMD_STATUS_POLL_MODE); i++) { 4701 drv_usecwait(MILLISEC); /* wait for 1000 usecs */ 4702 } 4703 4704 if (frame_hdr->cmd_status == MFI_CMD_STATUS_POLL_MODE) { 4705 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: " 4706 "cmd polling timed out")); 4707 return (DDI_FAILURE); 4708 } 4709 4710 return (DDI_SUCCESS); 4711 } 4712 4713 static void 4714 enable_intr_xscale(struct megasas_instance *instance) 4715 { 4716 MFI_ENABLE_INTR(instance); 4717 } 4718 4719 static void 4720 enable_intr_ppc(struct megasas_instance *instance) 4721 { 4722 uint32_t mask; 4723 4724 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called\n")); 4725 4726 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */ 4727 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance); 4728 4729 /* 4730 * As 1078DE is same as 1078 chip, the interrupt mask 4731 * remains the same. 4732 */ 4733 /* WR_OB_INTR_MASK(~0x80000000, instance); */ 4734 WR_OB_INTR_MASK(~(MFI_REPLY_1078_MESSAGE_INTR), instance); 4735 4736 /* dummy read to force PCI flush */ 4737 mask = RD_OB_INTR_MASK(instance); 4738 4739 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: " 4740 "outbound_intr_mask = 0x%x\n", mask)); 4741 } 4742 4743 static void 4744 disable_intr_xscale(struct megasas_instance *instance) 4745 { 4746 MFI_DISABLE_INTR(instance); 4747 } 4748 4749 static void 4750 disable_intr_ppc(struct megasas_instance *instance) 4751 { 4752 uint32_t mask; 4753 4754 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called\n")); 4755 4756 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : " 4757 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance))); 4758 4759 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */ 4760 WR_OB_INTR_MASK(OB_INTR_MASK, instance); 4761 4762 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : " 4763 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance))); 4764 4765 /* dummy read to force PCI flush */ 4766 mask = RD_OB_INTR_MASK(instance); 4767 #ifdef lint 4768 mask = mask; 4769 #endif 4770 } 4771 4772 static int 4773 intr_ack_xscale(struct megasas_instance *instance) 4774 { 4775 uint32_t status; 4776 4777 /* check if it is our interrupt */ 4778 status = RD_OB_INTR_STATUS(instance); 4779 4780 if (!(status & MFI_OB_INTR_STATUS_MASK)) { 4781 return (DDI_INTR_UNCLAIMED); 4782 } 4783 4784 /* clear the interrupt by writing back the same value */ 4785 WR_OB_INTR_STATUS(status, instance); 4786 4787 return (DDI_INTR_CLAIMED); 4788 } 4789 4790 static int 4791 intr_ack_ppc(struct megasas_instance *instance) 4792 { 4793 uint32_t status; 4794 4795 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called\n")); 4796 4797 /* check if it is our interrupt */ 4798 status = RD_OB_INTR_STATUS(instance); 4799 4800 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x\n", status)); 4801 4802 /* 4803 * As 1078DE is same as 1078 chip, the status field 4804 * remains the same. 4805 */ 4806 if (!(status & MFI_REPLY_1078_MESSAGE_INTR)) { 4807 return (DDI_INTR_UNCLAIMED); 4808 } 4809 4810 /* clear the interrupt by writing back the same value */ 4811 WR_OB_DOORBELL_CLEAR(status, instance); 4812 4813 /* dummy READ */ 4814 status = RD_OB_INTR_STATUS(instance); 4815 4816 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared\n")); 4817 4818 return (DDI_INTR_CLAIMED); 4819 } 4820 4821 static int 4822 megasas_common_check(struct megasas_instance *instance, 4823 struct megasas_cmd *cmd) 4824 { 4825 int ret = DDI_SUCCESS; 4826 4827 if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 4828 DDI_SUCCESS) { 4829 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4830 if (cmd->pkt != NULL) { 4831 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4832 cmd->pkt->pkt_statistics = 0; 4833 } 4834 ret = DDI_FAILURE; 4835 } 4836 if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 4837 != DDI_SUCCESS) { 4838 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4839 if (cmd->pkt != NULL) { 4840 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4841 cmd->pkt->pkt_statistics = 0; 4842 } 4843 ret = DDI_FAILURE; 4844 } 4845 if (megasas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) != 4846 DDI_SUCCESS) { 4847 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4848 if (cmd->pkt != NULL) { 4849 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4850 cmd->pkt->pkt_statistics = 0; 4851 } 4852 ret = DDI_FAILURE; 4853 } 4854 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 4855 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4856 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0); 4857 if (cmd->pkt != NULL) { 4858 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 4859 cmd->pkt->pkt_statistics = 0; 4860 } 4861 ret = DDI_FAILURE; 4862 } 4863 4864 return (ret); 4865 } 4866 4867 /*ARGSUSED*/ 4868 static int 4869 megasas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 4870 { 4871 /* 4872 * as the driver can always deal with an error in any dma or 4873 * access handle, we can just return the fme_status value. 4874 */ 4875 pci_ereport_post(dip, err, NULL); 4876 return (err->fme_status); 4877 } 4878 4879 static void 4880 megasas_fm_init(struct megasas_instance *instance) 4881 { 4882 /* Need to change iblock to priority for new MSI intr */ 4883 ddi_iblock_cookie_t fm_ibc; 4884 4885 /* Only register with IO Fault Services if we have some capability */ 4886 if (instance->fm_capabilities) { 4887 /* Adjust access and dma attributes for FMA */ 4888 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC; 4889 megasas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 4890 4891 /* 4892 * Register capabilities with IO Fault Services. 4893 * fm_capabilities will be updated to indicate 4894 * capabilities actually supported (not requested.) 4895 */ 4896 4897 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc); 4898 4899 /* 4900 * Initialize pci ereport capabilities if ereport 4901 * capable (should always be.) 4902 */ 4903 4904 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 4905 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4906 pci_ereport_setup(instance->dip); 4907 } 4908 4909 /* 4910 * Register error callback if error callback capable. 4911 */ 4912 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4913 ddi_fm_handler_register(instance->dip, 4914 megasas_fm_error_cb, (void*) instance); 4915 } 4916 } else { 4917 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 4918 megasas_generic_dma_attr.dma_attr_flags = 0; 4919 } 4920 } 4921 4922 static void 4923 megasas_fm_fini(struct megasas_instance *instance) 4924 { 4925 /* Only unregister FMA capabilities if registered */ 4926 if (instance->fm_capabilities) { 4927 /* 4928 * Un-register error callback if error callback capable. 4929 */ 4930 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4931 ddi_fm_handler_unregister(instance->dip); 4932 } 4933 4934 /* 4935 * Release any resources allocated by pci_ereport_setup() 4936 */ 4937 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 4938 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 4939 pci_ereport_teardown(instance->dip); 4940 } 4941 4942 /* Unregister from IO Fault Services */ 4943 ddi_fm_fini(instance->dip); 4944 4945 /* Adjust access and dma attributes for FMA */ 4946 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 4947 megasas_generic_dma_attr.dma_attr_flags = 0; 4948 } 4949 } 4950 4951 int 4952 megasas_check_acc_handle(ddi_acc_handle_t handle) 4953 { 4954 ddi_fm_error_t de; 4955 4956 if (handle == NULL) { 4957 return (DDI_FAILURE); 4958 } 4959 4960 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 4961 4962 return (de.fme_status); 4963 } 4964 4965 int 4966 megasas_check_dma_handle(ddi_dma_handle_t handle) 4967 { 4968 ddi_fm_error_t de; 4969 4970 if (handle == NULL) { 4971 return (DDI_FAILURE); 4972 } 4973 4974 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 4975 4976 return (de.fme_status); 4977 } 4978 4979 void 4980 megasas_fm_ereport(struct megasas_instance *instance, char *detail) 4981 { 4982 uint64_t ena; 4983 char buf[FM_MAX_CLASS]; 4984 4985 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 4986 ena = fm_ena_generate(0, FM_ENA_FMT1); 4987 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) { 4988 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP, 4989 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 4990 } 4991 } 4992