1 /* 2 * megaraid_sas.c: source for mega_sas driver 3 * 4 * MegaRAID device driver for SAS controllers 5 * Copyright (c) 2005-2008, LSI Logic Corporation. 6 * All rights reserved. 7 * 8 * Version: 9 * Author: 10 * Rajesh Prabhakaran<Rajesh.Prabhakaran@lsil.com> 11 * Seokmann Ju 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright notice, 17 * this list of conditions and the following disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above copyright notice, 20 * this list of conditions and the following disclaimer in the documentation 21 * and/or other materials provided with the distribution. 22 * 23 * 3. Neither the name of the author nor the names of its contributors may be 24 * used to endorse or promote products derived from this software without 25 * specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 35 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 36 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 37 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 38 * DAMAGE. 39 */ 40 41 /* 42 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 43 * Use is subject to license terms. 44 */ 45 46 #pragma ident "%Z%%M% %I% %E% SMI" 47 48 #include <sys/types.h> 49 #include <sys/param.h> 50 #include <sys/file.h> 51 #include <sys/errno.h> 52 #include <sys/open.h> 53 #include <sys/cred.h> 54 #include <sys/modctl.h> 55 #include <sys/conf.h> 56 #include <sys/devops.h> 57 #include <sys/cmn_err.h> 58 #include <sys/kmem.h> 59 #include <sys/stat.h> 60 #include <sys/mkdev.h> 61 #include <sys/pci.h> 62 #include <sys/scsi/scsi.h> 63 #include <sys/ddi.h> 64 #include <sys/sunddi.h> 65 #include <sys/atomic.h> 66 #include <sys/signal.h> 67 68 #include "megaraid_sas.h" 69 70 /* 71 * Local static data 72 */ 73 static void *megasas_state = NULL; 74 static int debug_level_g = CL_ANN; 75 76 #pragma weak scsi_hba_open 77 #pragma weak scsi_hba_close 78 #pragma weak scsi_hba_ioctl 79 80 static ddi_dma_attr_t megasas_generic_dma_attr = { 81 DMA_ATTR_V0, /* dma_attr_version */ 82 (unsigned long long)0, /* low DMA address range */ 83 (unsigned long long)0xffffffff, /* high DMA address range */ 84 (unsigned long long)0xffffffff, /* DMA counter register */ 85 8, /* DMA address alignment */ 86 0x07, /* DMA burstsizes */ 87 1, /* min DMA size */ 88 (unsigned long long)0xffffffff, /* max DMA size */ 89 (unsigned long long)0xffffffff, /* segment boundary */ 90 MEGASAS_MAX_SGE_CNT, /* dma_attr_sglen */ 91 512, /* granularity of device */ 92 0 /* bus specific DMA flags */ 93 }; 94 95 int32_t megasas_max_cap_maxxfer = 0x1000000; 96 97 /* 98 * cb_ops contains base level routines 99 */ 100 static struct cb_ops megasas_cb_ops = { 101 megasas_open, /* open */ 102 megasas_close, /* close */ 103 nodev, /* strategy */ 104 nodev, /* print */ 105 nodev, /* dump */ 106 nodev, /* read */ 107 nodev, /* write */ 108 megasas_ioctl, /* ioctl */ 109 nodev, /* devmap */ 110 nodev, /* mmap */ 111 nodev, /* segmap */ 112 nochpoll, /* poll */ 113 nodev, /* cb_prop_op */ 114 0, /* streamtab */ 115 D_NEW | D_HOTPLUG, /* cb_flag */ 116 CB_REV, /* cb_rev */ 117 nodev, /* cb_aread */ 118 nodev /* cb_awrite */ 119 }; 120 121 /* 122 * dev_ops contains configuration routines 123 */ 124 static struct dev_ops megasas_ops = { 125 DEVO_REV, /* rev, */ 126 0, /* refcnt */ 127 megasas_getinfo, /* getinfo */ 128 nulldev, /* identify */ 129 nulldev, /* probe */ 130 megasas_attach, /* attach */ 131 megasas_detach, /* detach */ 132 megasas_reset, /* reset */ 133 &megasas_cb_ops, /* char/block ops */ 134 NULL /* bus ops */ 135 }; 136 137 char _depends_on[] = "misc/scsi"; 138 139 static struct modldrv modldrv = { 140 &mod_driverops, /* module type - driver */ 141 MEGASAS_VERSION, 142 &megasas_ops, /* driver ops */ 143 }; 144 145 static struct modlinkage modlinkage = { 146 MODREV_1, /* ml_rev - must be MODREV_1 */ 147 &modldrv, /* ml_linkage */ 148 NULL /* end of driver linkage */ 149 }; 150 151 static struct ddi_device_acc_attr endian_attr = { 152 DDI_DEVICE_ATTR_V0, 153 DDI_STRUCTURE_LE_ACC, 154 DDI_STRICTORDER_ACC 155 }; 156 157 158 /* 159 * ************************************************************************** * 160 * * 161 * common entry points - for loadable kernel modules * 162 * * 163 * ************************************************************************** * 164 */ 165 166 /* 167 * _init - initialize a loadable module 168 * @void 169 * 170 * The driver should perform any one-time resource allocation or data 171 * initialization during driver loading in _init(). For example, the driver 172 * should initialize any mutexes global to the driver in this routine. 173 * The driver should not, however, use _init() to allocate or initialize 174 * anything that has to do with a particular instance of the device. 175 * Per-instance initialization must be done in attach(). 176 */ 177 int 178 _init(void) 179 { 180 int ret; 181 182 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 183 184 ret = ddi_soft_state_init(&megasas_state, 185 sizeof (struct megasas_instance), 0); 186 187 if (ret != 0) { 188 con_log(CL_ANN, (CE_WARN, "megaraid: could not init state")); 189 return (ret); 190 } 191 192 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 193 con_log(CL_ANN, (CE_WARN, "megaraid: could not init scsi hba")); 194 ddi_soft_state_fini(&megasas_state); 195 return (ret); 196 } 197 198 ret = mod_install(&modlinkage); 199 200 if (ret != 0) { 201 con_log(CL_ANN, (CE_WARN, "megaraid: mod_install failed")); 202 scsi_hba_fini(&modlinkage); 203 ddi_soft_state_fini(&megasas_state); 204 } 205 206 return (ret); 207 } 208 209 /* 210 * _info - returns information about a loadable module. 211 * @void 212 * 213 * _info() is called to return module information. This is a typical entry 214 * point that does predefined role. It simply calls mod_info(). 215 */ 216 int 217 _info(struct modinfo *modinfop) 218 { 219 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 220 221 return (mod_info(&modlinkage, modinfop)); 222 } 223 224 /* 225 * _fini - prepare a loadable module for unloading 226 * @void 227 * 228 * In _fini(), the driver should release any resources that were allocated in 229 * _init(). The driver must remove itself from the system module list. 230 */ 231 int 232 _fini(void) 233 { 234 int ret; 235 236 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 237 238 if ((ret = mod_remove(&modlinkage)) != 0) 239 return (ret); 240 241 scsi_hba_fini(&modlinkage); 242 243 ddi_soft_state_fini(&megasas_state); 244 245 return (ret); 246 } 247 248 249 /* 250 * ************************************************************************** * 251 * * 252 * common entry points - for autoconfiguration * 253 * * 254 * ************************************************************************** * 255 */ 256 /* 257 * probe - called before attach for a given instance 258 * This is an optional entry for self-identifiable device. 259 * @dip: 260 * 261 * static int megasas_probe(dev_info_t *dip) 262 * { 263 * return (DDI_SUCCESS); 264 * } 265 */ 266 267 /* 268 * attach - adds a device to the system as part of initialization 269 * @dip: 270 * @cmd: 271 * 272 * The kernel calls a driver's attach() entry point to attach an instance of 273 * a device (for MegaRAID, it is instance of a controller) or to resume 274 * operation for an instance of a device that has been suspended or has been 275 * shut down by the power management framework 276 * The attach() entry point typically includes the following types of 277 * processing: 278 * - allocate a soft-state structure for the device instance (for MegaRAID, 279 * controller instance) 280 * - initialize per-instance mutexes 281 * - initialize condition variables 282 * - register the device's interrupts (for MegaRAID, controller's interrupts) 283 * - map the registers and memory of the device instance (for MegaRAID, 284 * controller instance) 285 * - create minor device nodes for the device instance (for MegaRAID, 286 * controller instance) 287 * - report that the device instance (for MegaRAID, controller instance) has 288 * attached 289 */ 290 static int 291 megasas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 292 { 293 int instance_no; 294 int nregs; 295 uint8_t added_isr_f = 0; 296 uint8_t added_soft_isr_f = 0; 297 uint8_t create_devctl_node_f = 0; 298 uint8_t create_scsi_node_f = 0; 299 uint8_t create_ioc_node_f = 0; 300 uint8_t tran_alloc_f = 0; 301 uint8_t irq; 302 uint16_t vendor_id; 303 uint16_t device_id; 304 uint16_t subsysvid; 305 uint16_t subsysid; 306 uint16_t command; 307 308 scsi_hba_tran_t *tran; 309 ddi_dma_attr_t tran_dma_attr = megasas_generic_dma_attr; 310 struct megasas_instance *instance; 311 312 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 313 314 instance_no = ddi_get_instance(dip); 315 316 /* 317 * Since we know that some instantiations of this device can be 318 * plugged into slave-only SBus slots, check to see whether this is 319 * one such. 320 */ 321 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 322 con_log(CL_ANN, (CE_WARN, 323 "mega%d: Device in slave-only slot, unused", instance_no)); 324 325 return (DDI_FAILURE); 326 } 327 328 switch (cmd) { 329 case DDI_ATTACH: 330 con_log(CL_DLEVEL1, (CE_NOTE, "megasas: DDI_ATTACH")); 331 /* allocate the soft state for the instance */ 332 if (ddi_soft_state_zalloc(megasas_state, instance_no) 333 != DDI_SUCCESS) { 334 con_log(CL_ANN, (CE_WARN, 335 "mega%d: Failed to allocate soft state", 336 instance_no)); 337 338 return (DDI_FAILURE); 339 } 340 341 instance = (struct megasas_instance *)ddi_get_soft_state 342 (megasas_state, instance_no); 343 344 if (instance == NULL) { 345 con_log(CL_ANN, (CE_WARN, 346 "mega%d: Bad soft state", instance_no)); 347 348 ddi_soft_state_free(megasas_state, instance_no); 349 350 return (DDI_FAILURE); 351 } 352 353 bzero((caddr_t)instance, 354 sizeof (struct megasas_instance)); 355 356 instance->func_ptr = kmem_zalloc( 357 sizeof (struct megasas_func_ptr), KM_SLEEP); 358 ASSERT(instance->func_ptr); 359 360 /* Setup the PCI configuration space handles */ 361 if (pci_config_setup(dip, &instance->pci_handle) != 362 DDI_SUCCESS) { 363 con_log(CL_ANN, (CE_WARN, 364 "mega%d: pci config setup failed ", 365 instance_no)); 366 367 kmem_free(instance->func_ptr, 368 sizeof (struct megasas_func_ptr)); 369 ddi_soft_state_free(megasas_state, instance_no); 370 371 return (DDI_FAILURE); 372 } 373 374 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { 375 con_log(CL_ANN, (CE_WARN, 376 "megaraid: failed to get registers.")); 377 378 pci_config_teardown(&instance->pci_handle); 379 kmem_free(instance->func_ptr, 380 sizeof (struct megasas_func_ptr)); 381 ddi_soft_state_free(megasas_state, instance_no); 382 383 return (DDI_FAILURE); 384 } 385 386 vendor_id = pci_config_get16(instance->pci_handle, 387 PCI_CONF_VENID); 388 device_id = pci_config_get16(instance->pci_handle, 389 PCI_CONF_DEVID); 390 391 subsysvid = pci_config_get16(instance->pci_handle, 392 PCI_CONF_SUBVENID); 393 subsysid = pci_config_get16(instance->pci_handle, 394 PCI_CONF_SUBSYSID); 395 396 pci_config_put16(instance->pci_handle, PCI_CONF_COMM, 397 (pci_config_get16(instance->pci_handle, 398 PCI_CONF_COMM) | PCI_COMM_ME)); 399 irq = pci_config_get8(instance->pci_handle, 400 PCI_CONF_ILINE); 401 #ifdef lint 402 irq = irq; 403 #endif 404 con_log(CL_DLEVEL1, (CE_CONT, "megasas[%d]: " 405 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n", 406 instance_no, vendor_id, device_id, subsysvid, 407 subsysid, pci_config_get8(instance->pci_handle, 408 PCI_CONF_ILINE), MEGASAS_VERSION)); 409 410 /* enable bus-mastering */ 411 command = pci_config_get16(instance->pci_handle, 412 PCI_CONF_COMM); 413 414 if (!(command & PCI_COMM_ME)) { 415 command |= PCI_COMM_ME; 416 417 pci_config_put16(instance->pci_handle, 418 PCI_CONF_COMM, command); 419 420 con_log(CL_ANN, (CE_CONT, "megaraid[%d]: " 421 "enable bus-mastering\n", instance_no)); 422 } else { 423 con_log(CL_DLEVEL1, (CE_CONT, "megaraid[%d]: " 424 "bus-mastering already set\n", instance_no)); 425 } 426 427 /* initialize function pointers */ 428 if ((device_id == PCI_DEVICE_ID_LSI_1078) || 429 (device_id == PCI_DEVICE_ID_LSI_1078DE)) { 430 con_log(CL_DLEVEL1, (CE_CONT, "megasas[%d]: " 431 "1078R/DE detected\n", instance_no)); 432 instance->func_ptr->read_fw_status_reg = 433 read_fw_status_reg_ppc; 434 instance->func_ptr->issue_cmd = issue_cmd_ppc; 435 instance->func_ptr->issue_cmd_in_sync_mode = 436 issue_cmd_in_sync_mode_ppc; 437 instance->func_ptr->issue_cmd_in_poll_mode = 438 issue_cmd_in_poll_mode_ppc; 439 instance->func_ptr->enable_intr = 440 enable_intr_ppc; 441 instance->func_ptr->disable_intr = 442 disable_intr_ppc; 443 instance->func_ptr->intr_ack = intr_ack_ppc; 444 } else { 445 con_log(CL_DLEVEL1, (CE_CONT, "megasas[%d]: " 446 "1064/8R detected\n", instance_no)); 447 instance->func_ptr->read_fw_status_reg = 448 read_fw_status_reg_xscale; 449 instance->func_ptr->issue_cmd = 450 issue_cmd_xscale; 451 instance->func_ptr->issue_cmd_in_sync_mode = 452 issue_cmd_in_sync_mode_xscale; 453 instance->func_ptr->issue_cmd_in_poll_mode = 454 issue_cmd_in_poll_mode_xscale; 455 instance->func_ptr->enable_intr = 456 enable_intr_xscale; 457 instance->func_ptr->disable_intr = 458 disable_intr_xscale; 459 instance->func_ptr->intr_ack = 460 intr_ack_xscale; 461 } 462 463 instance->baseaddress = 464 pci_config_get32(instance->pci_handle, 0x10); 465 instance->baseaddress &= 0x0fffc; 466 467 instance->dip = dip; 468 instance->vendor_id = vendor_id; 469 instance->device_id = device_id; 470 instance->subsysvid = subsysvid; 471 instance->subsysid = subsysid; 472 473 /* setup the mfi based low level driver */ 474 if (init_mfi(instance) != DDI_SUCCESS) { 475 con_log(CL_ANN, (CE_WARN, "megaraid: " 476 "could not initialize the low level driver")); 477 478 goto fail_attach; 479 } 480 481 /* 482 * Allocate the interrupt blocking cookie. 483 * It represents the information the framework 484 * needs to block interrupts. This cookie will 485 * be used by the locks shared accross our ISR. 486 * These locks must be initialized before we 487 * register our ISR. 488 * ddi_add_intr(9F) 489 */ 490 if (ddi_get_iblock_cookie(dip, 0, 491 &instance->iblock_cookie) != DDI_SUCCESS) { 492 493 goto fail_attach; 494 } 495 496 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_HIGH, 497 &instance->soft_iblock_cookie) != DDI_SUCCESS) { 498 499 goto fail_attach; 500 } 501 502 /* 503 * Initialize the driver mutexes common to 504 * normal/high level isr 505 */ 506 if (ddi_intr_hilevel(dip, 0)) { 507 instance->isr_level = HIGH_LEVEL_INTR; 508 mutex_init(&instance->cmd_pool_mtx, 509 "cmd_pool_mtx", MUTEX_DRIVER, 510 instance->soft_iblock_cookie); 511 mutex_init(&instance->cmd_pend_mtx, 512 "cmd_pend_mtx", MUTEX_DRIVER, 513 instance->soft_iblock_cookie); 514 } else { 515 /* 516 * Initialize the driver mutexes 517 * specific to soft-isr 518 */ 519 instance->isr_level = NORMAL_LEVEL_INTR; 520 mutex_init(&instance->cmd_pool_mtx, 521 "cmd_pool_mtx", MUTEX_DRIVER, 522 instance->iblock_cookie); 523 mutex_init(&instance->cmd_pend_mtx, 524 "cmd_pend_mtx", MUTEX_DRIVER, 525 instance->iblock_cookie); 526 } 527 528 mutex_init(&instance->completed_pool_mtx, 529 "completed_pool_mtx", MUTEX_DRIVER, 530 instance->iblock_cookie); 531 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx", 532 MUTEX_DRIVER, instance->iblock_cookie); 533 mutex_init(&instance->aen_cmd_mtx, "aen_cmd_mtx", 534 MUTEX_DRIVER, instance->iblock_cookie); 535 mutex_init(&instance->abort_cmd_mtx, "abort_cmd_mtx", 536 MUTEX_DRIVER, instance->iblock_cookie); 537 538 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL); 539 cv_init(&instance->abort_cmd_cv, NULL, CV_DRIVER, NULL); 540 541 INIT_LIST_HEAD(&instance->completed_pool_list); 542 543 /* Register our isr. */ 544 if (ddi_add_intr(dip, 0, NULL, NULL, megasas_isr, 545 (caddr_t)instance) != DDI_SUCCESS) { 546 con_log(CL_ANN, (CE_WARN, 547 " ISR did not register")); 548 549 goto fail_attach; 550 } 551 552 added_isr_f = 1; 553 554 /* Register our soft-isr for highlevel interrupts. */ 555 if (instance->isr_level == HIGH_LEVEL_INTR) { 556 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH, 557 &instance->soft_intr_id, NULL, NULL, 558 megasas_softintr, (caddr_t)instance) != 559 DDI_SUCCESS) { 560 con_log(CL_ANN, (CE_WARN, 561 " Software ISR did not register")); 562 563 goto fail_attach; 564 } 565 566 added_soft_isr_f = 1; 567 } 568 569 /* Allocate a transport structure */ 570 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 571 572 if (tran == NULL) { 573 con_log(CL_ANN, (CE_WARN, 574 "scsi_hba_tran_alloc failed")); 575 goto fail_attach; 576 } 577 578 tran_alloc_f = 1; 579 580 instance->tran = tran; 581 582 tran->tran_hba_private = instance; 583 tran->tran_tgt_private = NULL; 584 tran->tran_tgt_init = megasas_tran_tgt_init; 585 tran->tran_tgt_probe = scsi_hba_probe; 586 tran->tran_tgt_free = (void (*)())NULL; 587 tran->tran_init_pkt = megasas_tran_init_pkt; 588 tran->tran_start = megasas_tran_start; 589 tran->tran_abort = megasas_tran_abort; 590 tran->tran_reset = megasas_tran_reset; 591 tran->tran_bus_reset = megasas_tran_bus_reset; 592 tran->tran_getcap = megasas_tran_getcap; 593 tran->tran_setcap = megasas_tran_setcap; 594 tran->tran_destroy_pkt = megasas_tran_destroy_pkt; 595 tran->tran_dmafree = megasas_tran_dmafree; 596 tran->tran_sync_pkt = megasas_tran_sync_pkt; 597 tran->tran_reset_notify = NULL; 598 tran->tran_quiesce = megasas_tran_quiesce; 599 tran->tran_unquiesce = megasas_tran_unquiesce; 600 601 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge; 602 603 /* Attach this instance of the hba */ 604 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0) 605 != DDI_SUCCESS) { 606 con_log(CL_ANN, (CE_WARN, 607 "scsi_hba_attach failed\n")); 608 609 goto fail_attach; 610 } 611 612 /* create devctl node for cfgadm command */ 613 if (ddi_create_minor_node(dip, "devctl", 614 S_IFCHR, INST2DEVCTL(instance_no), 615 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) { 616 con_log(CL_ANN, (CE_WARN, 617 "megaraid: failed to create devctl node.")); 618 619 goto fail_attach; 620 } 621 622 create_devctl_node_f = 1; 623 624 /* create scsi node for cfgadm command */ 625 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, 626 INST2SCSI(instance_no), 627 DDI_NT_SCSI_ATTACHMENT_POINT, 0) == 628 DDI_FAILURE) { 629 con_log(CL_ANN, (CE_WARN, 630 "megaraid: failed to create scsi node.")); 631 632 goto fail_attach; 633 } 634 635 create_scsi_node_f = 1; 636 637 (void) sprintf(instance->iocnode, "%d:lsirdctl", 638 instance_no); 639 640 /* 641 * Create a node for applications 642 * for issuing ioctl to the driver. 643 */ 644 if (ddi_create_minor_node(dip, instance->iocnode, 645 S_IFCHR, INST2LSIRDCTL(instance_no), 646 DDI_PSEUDO, 0) == DDI_FAILURE) { 647 con_log(CL_ANN, (CE_WARN, 648 "megaraid: failed to create ioctl node.")); 649 650 goto fail_attach; 651 } 652 653 create_ioc_node_f = 1; 654 655 /* enable interrupt */ 656 instance->func_ptr->enable_intr(instance); 657 658 /* initiate AEN */ 659 if (start_mfi_aen(instance)) { 660 con_log(CL_ANN, (CE_WARN, 661 "megaraid: failed to initiate AEN.")); 662 goto fail_initiate_aen; 663 } 664 665 con_log(CL_DLEVEL1, (CE_NOTE, 666 "AEN started for instance %d.", instance_no)); 667 668 /* Finally! We are on the air. */ 669 ddi_report_dev(dip); 670 break; 671 case DDI_PM_RESUME: 672 con_log(CL_ANN, (CE_NOTE, 673 "megasas: DDI_PM_RESUME")); 674 break; 675 case DDI_RESUME: 676 con_log(CL_ANN, (CE_NOTE, 677 "megasas: DDI_RESUME")); 678 break; 679 default: 680 con_log(CL_ANN, (CE_WARN, 681 "megasas: invalid attach cmd=%x", cmd)); 682 return (DDI_FAILURE); 683 } 684 685 return (DDI_SUCCESS); 686 687 fail_initiate_aen: 688 fail_attach: 689 if (create_devctl_node_f) { 690 ddi_remove_minor_node(dip, "devctl"); 691 } 692 693 if (create_scsi_node_f) { 694 ddi_remove_minor_node(dip, "scsi"); 695 } 696 697 if (create_ioc_node_f) { 698 ddi_remove_minor_node(dip, instance->iocnode); 699 } 700 701 if (tran_alloc_f) { 702 scsi_hba_tran_free(tran); 703 } 704 705 706 if (added_soft_isr_f) { 707 ddi_remove_softintr(instance->soft_intr_id); 708 } 709 710 if (added_isr_f) { 711 ddi_remove_intr(dip, 0, instance->iblock_cookie); 712 } 713 714 pci_config_teardown(&instance->pci_handle); 715 716 ddi_soft_state_free(megasas_state, instance_no); 717 718 con_log(CL_ANN, (CE_NOTE, 719 "megasas: return failure from mega_attach\n")); 720 721 return (DDI_FAILURE); 722 } 723 724 /* 725 * getinfo - gets device information 726 * @dip: 727 * @cmd: 728 * @arg: 729 * @resultp: 730 * 731 * The system calls getinfo() to obtain configuration information that only 732 * the driver knows. The mapping of minor numbers to device instance is 733 * entirely under the control of the driver. The system sometimes needs to ask 734 * the driver which device a particular dev_t represents. 735 * Given the device number return the devinfo pointer from the scsi_device 736 * structure. 737 */ 738 /*ARGSUSED*/ 739 static int 740 megasas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 741 { 742 int rval; 743 int megasas_minor = getminor((dev_t)arg); 744 745 struct megasas_instance *instance; 746 747 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 748 749 switch (cmd) { 750 case DDI_INFO_DEVT2DEVINFO: 751 instance = (struct megasas_instance *) 752 ddi_get_soft_state(megasas_state, 753 MINOR2INST(megasas_minor)); 754 755 if (instance == NULL) { 756 *resultp = NULL; 757 rval = DDI_FAILURE; 758 } else { 759 *resultp = instance->dip; 760 rval = DDI_SUCCESS; 761 } 762 break; 763 case DDI_INFO_DEVT2INSTANCE: 764 *resultp = (void *)instance; 765 rval = DDI_SUCCESS; 766 break; 767 default: 768 *resultp = NULL; 769 rval = DDI_FAILURE; 770 } 771 772 return (rval); 773 } 774 775 /* 776 * detach - detaches a device from the system 777 * @dip: pointer to the device's dev_info structure 778 * @cmd: type of detach 779 * 780 * A driver's detach() entry point is called to detach an instance of a device 781 * that is bound to the driver. The entry point is called with the instance of 782 * the device node to be detached and with DDI_DETACH, which is specified as 783 * the cmd argument to the entry point. 784 * This routine is called during driver unload. We free all the allocated 785 * resources and call the corresponding LLD so that it can also release all 786 * its resources. 787 */ 788 static int 789 megasas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 790 { 791 int instance_no; 792 793 struct megasas_instance *instance; 794 795 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 796 797 instance_no = ddi_get_instance(dip); 798 799 instance = (struct megasas_instance *)ddi_get_soft_state(megasas_state, 800 instance_no); 801 802 if (!instance) { 803 con_log(CL_ANN, (CE_WARN, 804 "megasas:%d could not get instance in detach", 805 instance_no)); 806 807 return (DDI_FAILURE); 808 } 809 810 con_log(CL_ANN, (CE_NOTE, 811 "megasas: detaching device 0x%4x:0x%4x:0x%4x:0x%4x\n", 812 instance->vendor_id, instance->device_id, instance->subsysvid, 813 instance->subsysid)); 814 815 switch (cmd) { 816 case DDI_DETACH: 817 con_log(CL_ANN, (CE_NOTE, 818 "megasas_detach: DDI_DETACH\n")); 819 820 if (scsi_hba_detach(dip) != DDI_SUCCESS) { 821 con_log(CL_ANN, (CE_WARN, 822 "megasas:%d failed to detach", 823 instance_no)); 824 825 return (DDI_FAILURE); 826 } 827 828 scsi_hba_tran_free(instance->tran); 829 830 if (abort_aen_cmd(instance, instance->aen_cmd)) { 831 con_log(CL_ANN, (CE_WARN, "megasas_detach: " 832 "failed to abort prevous AEN command\n")); 833 834 return (DDI_FAILURE); 835 } 836 837 instance->func_ptr->disable_intr(instance); 838 839 if (instance->isr_level == HIGH_LEVEL_INTR) { 840 ddi_remove_softintr(instance->soft_intr_id); 841 } 842 843 ddi_remove_intr(dip, 0, instance->iblock_cookie); 844 845 free_space_for_mfi(instance); 846 847 pci_config_teardown(&instance->pci_handle); 848 849 kmem_free(instance->func_ptr, 850 sizeof (struct megasas_func_ptr)); 851 852 ddi_soft_state_free(megasas_state, instance_no); 853 break; 854 case DDI_PM_SUSPEND: 855 con_log(CL_ANN, (CE_NOTE, 856 "megasas_detach: DDI_PM_SUSPEND\n")); 857 858 break; 859 case DDI_SUSPEND: 860 con_log(CL_ANN, (CE_NOTE, 861 "megasas_detach: DDI_SUSPEND\n")); 862 863 break; 864 default: 865 con_log(CL_ANN, (CE_WARN, 866 "invalid detach command:0x%x", cmd)); 867 return (DDI_FAILURE); 868 } 869 870 return (DDI_SUCCESS); 871 } 872 873 874 /* 875 * ************************************************************************** * 876 * * 877 * common entry points - for character driver types * 878 * * 879 * ************************************************************************** * 880 */ 881 /* 882 * open - gets access to a device 883 * @dev: 884 * @openflags: 885 * @otyp: 886 * @credp: 887 * 888 * Access to a device by one or more application programs is controlled 889 * through the open() and close() entry points. The primary function of 890 * open() is to verify that the open request is allowed. 891 */ 892 static int 893 megasas_open(dev_t *dev, int openflags, int otyp, cred_t *credp) 894 { 895 int rval = 0; 896 897 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 898 899 /* Check root permissions */ 900 if (drv_priv(credp) != 0) { 901 con_log(CL_ANN, (CE_WARN, 902 "megaraid: Non-root ioctl access tried!")); 903 return (EPERM); 904 } 905 906 /* Verify we are being opened as a character device */ 907 if (otyp != OTYP_CHR) { 908 con_log(CL_ANN, (CE_WARN, 909 "megaraid: ioctl node must be a char node\n")); 910 return (EINVAL); 911 } 912 913 if (ddi_get_soft_state(megasas_state, MINOR2INST(getminor(*dev))) 914 == NULL) { 915 return (ENXIO); 916 } 917 918 if (scsi_hba_open) { 919 rval = scsi_hba_open(dev, openflags, otyp, credp); 920 } 921 922 return (rval); 923 } 924 925 /* 926 * close - gives up access to a device 927 * @dev: 928 * @openflags: 929 * @otyp: 930 * @credp: 931 * 932 * close() should perform any cleanup necessary to finish using the minor 933 * device, and prepare the device (and driver) to be opened again. 934 */ 935 static int 936 megasas_close(dev_t dev, int openflags, int otyp, cred_t *credp) 937 { 938 int rval = 0; 939 940 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 941 942 /* no need for locks! */ 943 944 if (scsi_hba_close) { 945 rval = scsi_hba_close(dev, openflags, otyp, credp); 946 } 947 948 return (rval); 949 } 950 951 /* 952 * ioctl - performs a range of I/O commands for character drivers 953 * @dev: 954 * @cmd: 955 * @arg: 956 * @mode: 957 * @credp: 958 * @rvalp: 959 * 960 * ioctl() routine must make sure that user data is copied into or out of the 961 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(), 962 * and ddi_copyout(), as appropriate. 963 * This is a wrapper routine to serialize access to the actual ioctl routine. 964 * ioctl() should return 0 on success, or the appropriate error number. The 965 * driver may also set the value returned to the calling process through rvalp. 966 */ 967 static int 968 megasas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 969 int *rvalp) 970 { 971 int rval = 0; 972 973 struct megasas_instance *instance; 974 struct megasas_ioctl ioctl; 975 struct megasas_aen aen; 976 977 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 978 979 instance = ddi_get_soft_state(megasas_state, MINOR2INST(getminor(dev))); 980 981 if (instance == NULL) { 982 /* invalid minor number */ 983 con_log(CL_ANN, (CE_WARN, "megaraid: adapter not found.")); 984 return (ENXIO); 985 } 986 987 switch ((uint_t)cmd) { 988 case MEGASAS_IOCTL_FIRMWARE: 989 if (ddi_copyin((void *) arg, &ioctl, 990 sizeof (struct megasas_ioctl), mode)) { 991 con_log(CL_ANN, (CE_WARN, "megasas_ioctl: " 992 "ERROR IOCTL copyin")); 993 return (EFAULT); 994 } 995 996 if (ioctl.control_code == MR_DRIVER_IOCTL_COMMON) { 997 rval = handle_drv_ioctl(instance, &ioctl, mode); 998 } else { 999 rval = handle_mfi_ioctl(instance, &ioctl, mode); 1000 } 1001 1002 if (ddi_copyout((void *) &ioctl, (void *)arg, 1003 (sizeof (struct megasas_ioctl) - 1), mode)) { 1004 con_log(CL_ANN, (CE_WARN, 1005 "megasas_ioctl: copy_to_user failed\n")); 1006 rval = 1; 1007 } 1008 1009 break; 1010 case MEGASAS_IOCTL_AEN: 1011 if (ddi_copyin((void *) arg, &aen, 1012 sizeof (struct megasas_aen), mode)) { 1013 con_log(CL_ANN, (CE_WARN, 1014 "megasas_ioctl: ERROR AEN copyin")); 1015 return (EFAULT); 1016 } 1017 1018 rval = handle_mfi_aen(instance, &aen); 1019 1020 if (ddi_copyout((void *) &aen, (void *)arg, 1021 sizeof (struct megasas_aen), mode)) { 1022 con_log(CL_ANN, (CE_WARN, 1023 "megasas_ioctl: copy_to_user failed\n")); 1024 rval = 1; 1025 } 1026 1027 break; 1028 default: 1029 rval = scsi_hba_ioctl(dev, cmd, arg, 1030 mode, credp, rvalp); 1031 1032 con_log(CL_DLEVEL1, (CE_NOTE, "megasas_ioctl: " 1033 "scsi_hba_ioctl called, ret = %x.", rval)); 1034 } 1035 1036 return (rval); 1037 } 1038 1039 /* 1040 * ************************************************************************** * 1041 * * 1042 * common entry points - for block driver types * 1043 * * 1044 * ************************************************************************** * 1045 */ 1046 /* 1047 * reset - TBD 1048 * @dip: 1049 * @cmd: 1050 * 1051 * TBD 1052 */ 1053 /*ARGSUSED*/ 1054 static int 1055 megasas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1056 { 1057 int instance_no; 1058 1059 struct megasas_instance *instance; 1060 1061 instance_no = ddi_get_instance(dip); 1062 instance = (struct megasas_instance *)ddi_get_soft_state 1063 (megasas_state, instance_no); 1064 1065 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1066 1067 if (!instance) { 1068 con_log(CL_ANN, (CE_WARN, 1069 "megaraid:%d could not get adapter in reset", 1070 instance_no)); 1071 return (DDI_FAILURE); 1072 } 1073 1074 con_log(CL_ANN, (CE_NOTE, "flushing cache for instance %d ..", 1075 instance_no)); 1076 1077 flush_cache(instance); 1078 1079 return (DDI_SUCCESS); 1080 } 1081 1082 1083 /* 1084 * ************************************************************************** * 1085 * * 1086 * entry points (SCSI HBA) * 1087 * * 1088 * ************************************************************************** * 1089 */ 1090 /* 1091 * tran_tgt_init - initialize a target device instance 1092 * @hba_dip: 1093 * @tgt_dip: 1094 * @tran: 1095 * @sd: 1096 * 1097 * The tran_tgt_init() entry point enables the HBA to allocate and initialize 1098 * any per-target resources. tran_tgt_init() also enables the HBA to qualify 1099 * the device's address as valid and supportable for that particular HBA. 1100 * By returning DDI_FAILURE, the instance of the target driver for that device 1101 * is not probed or attached. 1102 */ 1103 /*ARGSUSED*/ 1104 static int 1105 megasas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1106 scsi_hba_tran_t *tran, struct scsi_device *sd) 1107 { 1108 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1109 1110 #ifdef NOT_YET 1111 int instance; 1112 int islogical; 1113 1114 instance = ddi_get_instance(hba_dip); 1115 adp = (struct megasas_instance *)ddi_get_soft_state(mega_state, 1116 instance_no); 1117 if ((sd->sd_address.a_target >= (adp->max_channel * 16 + MAX_LD_64)) || 1118 (sd->sd_address.a_lun != 0)) { 1119 1120 return (DDI_FAILURE); 1121 } 1122 1123 MRAID_IS_LOGICAL(sd->sd_address.a_target, islogical); 1124 1125 /* Allow non-disk device commands to pass */ 1126 if (!islogical) { 1127 return (DDI_SUCCESS); 1128 } 1129 1130 /* From Target 40 - 64 there will be no devices */ 1131 if (sd->sd_address.a_target > MAX_LOGICAL_DRIVES_40LD) { 1132 return (DDI_FAILURE); 1133 } 1134 1135 1136 /* 1137 * Get information about the logical drives. 1138 */ 1139 if (megaraid_ld_state_instance(adp) != DDI_SUCCESS) { 1140 con_log(CL_ANN, (CE_WARN, "megaraid: failed query adapter")); 1141 } 1142 1143 if (adp->ldrv_state[adp->device_ids[0][sd->sd_address.a_target]] 1144 == RDRV_DELETED || 1145 adp->ldrv_state[adp->device_ids[0][sd->sd_address.a_target]] 1146 == RDRV_OFFLINE) { 1147 1148 return (DDI_FAILURE); 1149 } 1150 #endif /* NOT_YET */ 1151 return (DDI_SUCCESS); 1152 } 1153 #if defined(USELESS) && !defined(lint) 1154 /* 1155 * tran_tgt_probe - probe for the existence of a target device 1156 * @sd: 1157 * @callback: 1158 * 1159 * The tran_tgt_probe() entry point enables the HBA to customize the operation 1160 * of scsi_probe(), if necessary. This entry point is called only when the 1161 * target driver calls scsi_probe(). The HBA driver can retain the normal 1162 * operation of scsi_probe() by calling scsi_hba_probe() and returning its 1163 * return value. This entry point is not required, and if not needed, the HBA 1164 * driver should set the tran_tgt_ probe vector in the scsi_hba_tran structure 1165 * to point to scsi_hba_probe(). 1166 */ 1167 static int 1168 megasas_tran_tgt_probe(struct scsi_device *sd, int (*callback)()) 1169 { 1170 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1171 1172 /* 1173 * the HBA driver should set the tran_tgt_probe vector in the 1174 * scsi_hba_tran structure to point to scsi_hba_probe() 1175 */ 1176 return (scsi_hba_probe(sd, callback)); 1177 } 1178 #endif /* defined (USELESS) && !defined (lint) */ 1179 1180 /* 1181 * tran_init_pkt - allocate & initialize a scsi_pkt structure 1182 * @ap: 1183 * @pkt: 1184 * @bp: 1185 * @cmdlen: 1186 * @statuslen: 1187 * @tgtlen: 1188 * @flags: 1189 * @callback: 1190 * 1191 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt 1192 * structure and DMA resources for a target driver request. The 1193 * tran_init_pkt() entry point is called when the target driver calls the 1194 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point 1195 * is a request to perform one or more of three possible services: 1196 * - allocation and initialization of a scsi_pkt structure 1197 * - allocation of DMA resources for data transfer 1198 * - reallocation of DMA resources for the next portion of the data transfer 1199 */ 1200 static struct scsi_pkt * 1201 megasas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt, 1202 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1203 int flags, int (*callback)(), caddr_t arg) 1204 { 1205 struct scsa_cmd *acmd; 1206 struct megasas_instance *instance; 1207 struct scsi_pkt *new_pkt; 1208 1209 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1210 1211 instance = ADDR2MEGA(ap); 1212 1213 /* step #1 : pkt allocation */ 1214 if (pkt == NULL) { 1215 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen, 1216 tgtlen, sizeof (struct scsa_cmd), callback, arg); 1217 if (pkt == NULL) { 1218 return (NULL); 1219 } 1220 1221 acmd = PKT2CMD(pkt); 1222 1223 /* 1224 * Initialize the new pkt - we redundantly initialize 1225 * all the fields for illustrative purposes. 1226 */ 1227 acmd->cmd_pkt = pkt; 1228 acmd->cmd_flags = 0; 1229 acmd->cmd_scblen = statuslen; 1230 acmd->cmd_cdblen = cmdlen; 1231 acmd->cmd_dmahandle = NULL; 1232 acmd->cmd_ncookies = 0; 1233 acmd->cmd_cookie = 0; 1234 acmd->cmd_cookiecnt = 0; 1235 acmd->cmd_nwin = 0; 1236 1237 pkt->pkt_address = *ap; 1238 pkt->pkt_comp = (void (*)())NULL; 1239 pkt->pkt_flags = 0; 1240 pkt->pkt_time = 0; 1241 pkt->pkt_resid = 0; 1242 pkt->pkt_state = 0; 1243 pkt->pkt_statistics = 0; 1244 pkt->pkt_reason = 0; 1245 new_pkt = pkt; 1246 } else { 1247 acmd = PKT2CMD(pkt); 1248 new_pkt = NULL; 1249 } 1250 1251 /* step #2 : dma allocation/move */ 1252 if (bp && bp->b_bcount != 0) { 1253 if (acmd->cmd_dmahandle == NULL) { 1254 if (megasas_dma_alloc(instance, pkt, bp, flags, 1255 callback) == -1) { 1256 if (new_pkt) { 1257 scsi_hba_pkt_free(ap, new_pkt); 1258 } 1259 1260 return ((struct scsi_pkt *)NULL); 1261 } 1262 } else { 1263 if (megasas_dma_move(instance, pkt, bp) == -1) { 1264 return ((struct scsi_pkt *)NULL); 1265 } 1266 } 1267 } 1268 1269 return (pkt); 1270 } 1271 1272 /* 1273 * tran_start - transport a SCSI command to the addressed target 1274 * @ap: 1275 * @pkt: 1276 * 1277 * The tran_start() entry point for a SCSI HBA driver is called to transport a 1278 * SCSI command to the addressed target. The SCSI command is described 1279 * entirely within the scsi_pkt structure, which the target driver allocated 1280 * through the HBA driver's tran_init_pkt() entry point. If the command 1281 * involves a data transfer, DMA resources must also have been allocated for 1282 * the scsi_pkt structure. 1283 * 1284 * Return Values : 1285 * TRAN_BUSY - request queue is full, no more free scbs 1286 * TRAN_ACCEPT - pkt has been submitted to the instance 1287 */ 1288 static int 1289 megasas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt) 1290 { 1291 uchar_t cmd_done = 0; 1292 1293 struct megasas_instance *instance = ADDR2MEGA(ap); 1294 struct megasas_cmd *cmd; 1295 1296 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x", 1297 __func__, __LINE__, pkt->pkt_cdbp[0])); 1298 1299 pkt->pkt_reason = CMD_CMPLT; 1300 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 1301 1302 cmd = build_cmd(instance, ap, pkt, &cmd_done); 1303 1304 /* 1305 * Check if the command is already completed by the mega_build_cmd() 1306 * routine. In which case the busy_flag would be clear and scb will be 1307 * NULL and appropriate reason provided in pkt_reason field 1308 */ 1309 if (cmd_done) { 1310 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) { 1311 (*pkt->pkt_comp)(pkt); 1312 } 1313 pkt->pkt_reason = CMD_CMPLT; 1314 pkt->pkt_scbp[0] = STATUS_GOOD; 1315 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET 1316 | STATE_SENT_CMD; 1317 return (TRAN_ACCEPT); 1318 } 1319 1320 if (cmd == NULL) { 1321 return (TRAN_BUSY); 1322 } 1323 1324 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { 1325 if (instance->fw_outstanding > instance->max_fw_cmds) { 1326 con_log(CL_ANN, (CE_CONT, "megasas:Firmware busy")); 1327 return_mfi_pkt(instance, cmd); 1328 return (TRAN_BUSY); 1329 } 1330 1331 /* Syncronize the Cmd frame for the controller */ 1332 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, 1333 DDI_DMA_SYNC_FORDEV); 1334 1335 instance->func_ptr->issue_cmd(cmd, instance); 1336 1337 #if defined(NOT_YET) && !defined(lint) 1338 /* 1339 * before return, set timer - for timeout checking 1340 * (for every 1 second) 1341 */ 1342 instance->timeout_id = timeout(io_timeout_checker, 1343 (void *) instance, drv_usectohz(MEGASAS_1_SECOND)); 1344 #endif /* defined(NOT_YET) && !defined(lint) */ 1345 } else { 1346 struct megasas_header *hdr = &cmd->frame->hdr; 1347 1348 cmd->sync_cmd = MEGASAS_TRUE; 1349 1350 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd); 1351 1352 pkt->pkt_reason = CMD_CMPLT; 1353 pkt->pkt_statistics = 0; 1354 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS; 1355 1356 switch (hdr->cmd_status) { 1357 case MFI_STAT_OK: 1358 pkt->pkt_scbp[0] = STATUS_GOOD; 1359 break; 1360 1361 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1362 1363 pkt->pkt_reason = CMD_CMPLT; 1364 pkt->pkt_statistics = 0; 1365 1366 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; 1367 break; 1368 1369 case MFI_STAT_DEVICE_NOT_FOUND: 1370 pkt->pkt_reason = CMD_DEV_GONE; 1371 pkt->pkt_statistics = STAT_DISCON; 1372 break; 1373 1374 default: 1375 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1; 1376 } 1377 1378 return_mfi_pkt(instance, cmd); 1379 1380 if (pkt->pkt_comp) { 1381 (*pkt->pkt_comp)(pkt); 1382 } 1383 1384 } 1385 1386 return (TRAN_ACCEPT); 1387 } 1388 1389 /* 1390 * tran_abort - Abort any commands that are currently in transport 1391 * @ap: 1392 * @pkt: 1393 * 1394 * The tran_abort() entry point for a SCSI HBA driver is called to abort any 1395 * commands that are currently in transport for a particular target. This entry 1396 * point is called when a target driver calls scsi_abort(). The tran_abort() 1397 * entry point should attempt to abort the command denoted by the pkt 1398 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to 1399 * abort all outstandidng commands in the transport layer for the particular 1400 * target or logical unit. 1401 */ 1402 /*ARGSUSED*/ 1403 static int 1404 megasas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1405 { 1406 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1407 1408 /* aborting command not supported by H/W */ 1409 1410 return (DDI_FAILURE); 1411 } 1412 1413 /* 1414 * tran_reset - reset either the SCSI bus or target 1415 * @ap: 1416 * @level: 1417 * 1418 * The tran_reset() entry point for a SCSI HBA driver is called to reset either 1419 * the SCSI bus or a particular SCSI target device. This entry point is called 1420 * when a target driver calls scsi_reset(). The tran_reset() entry point must 1421 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the 1422 * particular target or logical unit must be reset. 1423 */ 1424 /*ARGSUSED*/ 1425 static int 1426 megasas_tran_reset(struct scsi_address *ap, int level) 1427 { 1428 struct megasas_instance *instance = ADDR2MEGA(ap); 1429 1430 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1431 1432 if (wait_for_outstanding(instance)) { 1433 return (DDI_FAILURE); 1434 } else { 1435 return (DDI_SUCCESS); 1436 } 1437 } 1438 1439 /* 1440 * tran_bus_reset - reset the SCSI bus 1441 * @dip: 1442 * @level: 1443 * 1444 * The tran_bus_reset() vector in the scsi_hba_tran structure should be 1445 * initialized during the HBA driver's attach(). The vector should point to 1446 * an HBA entry point that is to be called when a user initiates a bus reset. 1447 * Implementation is hardware specific. If the HBA driver cannot reset the 1448 * SCSI bus without affecting the targets, the driver should fail RESET_BUS 1449 * or not initialize this vector. 1450 */ 1451 /*ARGSUSED*/ 1452 static int 1453 megasas_tran_bus_reset(dev_info_t *dip, int level) 1454 { 1455 int instance_no = ddi_get_instance(dip); 1456 1457 struct megasas_instance *instance = ddi_get_soft_state(megasas_state, 1458 instance_no); 1459 1460 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1461 1462 if (wait_for_outstanding(instance)) { 1463 return (DDI_FAILURE); 1464 } else { 1465 return (DDI_SUCCESS); 1466 } 1467 } 1468 1469 /* 1470 * tran_getcap - get one of a set of SCSA-defined capabilities 1471 * @ap: 1472 * @cap: 1473 * @whom: 1474 * 1475 * The target driver can request the current setting of the capability for a 1476 * particular target by setting the whom parameter to nonzero. A whom value of 1477 * zero indicates a request for the current setting of the general capability 1478 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1 1479 * for undefined capabilities or the current value of the requested capability. 1480 */ 1481 /*ARGSUSED*/ 1482 static int 1483 megasas_tran_getcap(struct scsi_address *ap, char *cap, int whom) 1484 { 1485 int rval = 0; 1486 1487 struct megasas_instance *instance = ADDR2MEGA(ap); 1488 1489 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1490 1491 /* we do allow inquiring about capabilities for other targets */ 1492 if (cap == NULL) { 1493 return (-1); 1494 } 1495 #if 0 1496 if (ap->a_target >= (adapter->max_channel * 16 + MAX_LD_64)) { 1497 1498 return (-1); 1499 } 1500 1501 acmdp = &acmd; 1502 #endif 1503 1504 switch (scsi_hba_lookup_capstr(cap)) { 1505 case SCSI_CAP_DMA_MAX: 1506 /* Limit to 16MB max transfer */ 1507 rval = megasas_max_cap_maxxfer; 1508 break; 1509 case SCSI_CAP_MSG_OUT: 1510 rval = 1; 1511 break; 1512 case SCSI_CAP_DISCONNECT: 1513 rval = 0; 1514 break; 1515 case SCSI_CAP_SYNCHRONOUS: 1516 rval = 0; 1517 break; 1518 case SCSI_CAP_WIDE_XFER: 1519 rval = 1; 1520 break; 1521 case SCSI_CAP_TAGGED_QING: 1522 rval = 1; 1523 break; 1524 case SCSI_CAP_UNTAGGED_QING: 1525 rval = 1; 1526 break; 1527 case SCSI_CAP_PARITY: 1528 rval = 1; 1529 break; 1530 case SCSI_CAP_INITIATOR_ID: 1531 rval = instance->init_id; 1532 break; 1533 case SCSI_CAP_ARQ: 1534 rval = 1; 1535 break; 1536 case SCSI_CAP_LINKED_CMDS: 1537 rval = 0; 1538 break; 1539 case SCSI_CAP_RESET_NOTIFICATION: 1540 rval = 1; 1541 break; 1542 case SCSI_CAP_GEOMETRY: 1543 #if 0 1544 int channel; 1545 int target; 1546 int islogical; 1547 1548 MRAID_GET_DEVICE_MAP(adapter, acmdp, channel, 1549 target, ap, islogical); 1550 1551 if (!islogical) { 1552 con_log(CL_ANN1, (CE_WARN, "megaraid%d: " 1553 "fail geometry for phy [%d:%d]\n", 1554 ddi_get_instance(adapter->dip), channel, 1555 target)); 1556 return (-1); 1557 } 1558 1559 if (adapter->read_ldidmap) 1560 target -= 0x80; 1561 1562 if ((adapter->ldrv_state[target] == RDRV_OFFLINE) || 1563 (adapter->ldrv_state[target] == RDRV_DELETED)) { 1564 return (-1); 1565 } 1566 1567 rval = (64 << 16) | 32; 1568 1569 if (adapter->ldrv_size[target] > 0x200000) { 1570 rval = (255 << 16) | 63; 1571 } 1572 1573 rval = (64 << 16) | 32; /* remove latter */ 1574 #endif 1575 rval = -1; 1576 1577 break; 1578 default: 1579 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x", 1580 scsi_hba_lookup_capstr(cap))); 1581 rval = -1; 1582 break; 1583 } 1584 1585 return (rval); 1586 } 1587 1588 /* 1589 * tran_setcap - set one of a set of SCSA-defined capabilities 1590 * @ap: 1591 * @cap: 1592 * @value: 1593 * @whom: 1594 * 1595 * The target driver might request that the new value be set for a particular 1596 * target by setting the whom parameter to nonzero. A whom value of zero 1597 * means that request is to set the new value for the SCSI bus or for adapter 1598 * hardware in general. 1599 * The tran_setcap() should return the following values as appropriate: 1600 * - -1 for undefined capabilities 1601 * - 0 if the HBA driver cannot set the capability to the requested value 1602 * - 1 if the HBA driver is able to set the capability to the requested value 1603 */ 1604 /*ARGSUSED*/ 1605 static int 1606 megasas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1607 { 1608 int rval = 1; 1609 1610 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1611 1612 /* We don't allow setting capabilities for other targets */ 1613 if (cap == NULL || whom == 0) { 1614 return (-1); 1615 } 1616 1617 switch (scsi_hba_lookup_capstr(cap)) { 1618 case SCSI_CAP_DMA_MAX: 1619 case SCSI_CAP_MSG_OUT: 1620 case SCSI_CAP_PARITY: 1621 case SCSI_CAP_LINKED_CMDS: 1622 case SCSI_CAP_RESET_NOTIFICATION: 1623 case SCSI_CAP_DISCONNECT: 1624 case SCSI_CAP_SYNCHRONOUS: 1625 case SCSI_CAP_UNTAGGED_QING: 1626 case SCSI_CAP_WIDE_XFER: 1627 case SCSI_CAP_INITIATOR_ID: 1628 case SCSI_CAP_ARQ: 1629 /* 1630 * None of these are settable via 1631 * the capability interface. 1632 */ 1633 break; 1634 case SCSI_CAP_TAGGED_QING: 1635 rval = 1; 1636 break; 1637 case SCSI_CAP_SECTOR_SIZE: 1638 rval = 1; 1639 break; 1640 1641 case SCSI_CAP_TOTAL_SECTORS: 1642 rval = 1; 1643 break; 1644 default: 1645 rval = -1; 1646 break; 1647 } 1648 1649 return (rval); 1650 } 1651 1652 /* 1653 * tran_destroy_pkt - deallocate scsi_pkt structure 1654 * @ap: 1655 * @pkt: 1656 * 1657 * The tran_destroy_pkt() entry point is the HBA driver function that 1658 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is 1659 * called when the target driver calls scsi_destroy_pkt(). The 1660 * tran_destroy_pkt() entry point must free any DMA resources that have been 1661 * allocated for the packet. An implicit DMA synchronization occurs if the 1662 * DMA resources are freed and any cached data remains after the completion 1663 * of the transfer. 1664 */ 1665 static void 1666 megasas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1667 { 1668 struct scsa_cmd *acmd = PKT2CMD(pkt); 1669 1670 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1671 1672 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1673 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1674 1675 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1676 1677 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1678 1679 acmd->cmd_dmahandle = NULL; 1680 } 1681 1682 /* free the pkt */ 1683 scsi_hba_pkt_free(ap, pkt); 1684 } 1685 1686 /* 1687 * tran_dmafree - deallocates DMA resources 1688 * @ap: 1689 * @pkt: 1690 * 1691 * The tran_dmafree() entry point deallocates DMAQ resources that have been 1692 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is 1693 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must 1694 * free only DMA resources allocated for a scsi_pkt structure, not the 1695 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is 1696 * implicitly performed. 1697 */ 1698 /*ARGSUSED*/ 1699 static void 1700 megasas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1701 { 1702 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1703 1704 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1705 1706 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1707 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1708 1709 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1710 1711 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1712 1713 acmd->cmd_dmahandle = NULL; 1714 } 1715 } 1716 1717 /* 1718 * tran_sync_pkt - synchronize the DMA object allocated 1719 * @ap: 1720 * @pkt: 1721 * 1722 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for 1723 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt() 1724 * entry point is called when the target driver calls scsi_sync_pkt(). If the 1725 * data transfer direction is a DMA read from device to memory, tran_sync_pkt() 1726 * must synchronize the CPU's view of the data. If the data transfer direction 1727 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the 1728 * device's view of the data. 1729 */ 1730 /*ARGSUSED*/ 1731 static void 1732 megasas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1733 { 1734 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1735 1736 /* 1737 * following 'ddi_dma_sync()' API call 1738 * already called for each I/O in the ISR 1739 */ 1740 #ifdef TBD 1741 int i; 1742 1743 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1744 1745 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1746 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset, 1747 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ? 1748 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU); 1749 } 1750 #endif /* TBD */ 1751 } 1752 1753 /*ARGSUSED*/ 1754 static int 1755 megasas_tran_quiesce(dev_info_t *dip) 1756 { 1757 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1758 1759 return (1); 1760 } 1761 1762 /*ARGSUSED*/ 1763 static int 1764 megasas_tran_unquiesce(dev_info_t *dip) 1765 { 1766 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1767 1768 return (1); 1769 } 1770 1771 /* 1772 * megasas_isr(caddr_t) 1773 * 1774 * The Interrupt Service Routine 1775 * 1776 * Collect status for all completed commands and do callback 1777 * 1778 */ 1779 static uint_t 1780 megasas_isr(caddr_t arg) 1781 { 1782 int need_softintr; 1783 uint32_t producer; 1784 uint32_t consumer; 1785 uint32_t context; 1786 1787 struct megasas_cmd *cmd; 1788 struct megasas_instance *instance; 1789 1790 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1791 1792 /* LINTED E_BAD_PTR_CAST_ALIGN */ 1793 instance = (struct megasas_instance *)arg; 1794 if (!instance->func_ptr->intr_ack(instance)) { 1795 return (DDI_INTR_UNCLAIMED); 1796 } 1797 1798 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1799 0, 0, DDI_DMA_SYNC_FORCPU); 1800 1801 producer = *instance->producer; 1802 consumer = *instance->consumer; 1803 1804 con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ", 1805 producer, consumer)); 1806 1807 mutex_enter(&instance->completed_pool_mtx); 1808 1809 while (consumer != producer) { 1810 context = instance->reply_queue[consumer]; 1811 /* 1812 * con_log(CL_ANN, (CE_WARN, 1813 * " context returned %x ",context)); 1814 */ 1815 cmd = instance->cmd_list[context]; 1816 mlist_add_tail(&cmd->list, &instance->completed_pool_list); 1817 1818 consumer++; 1819 if (consumer == (instance->max_fw_cmds + 1)) { 1820 consumer = 0; 1821 } 1822 } 1823 1824 mutex_exit(&instance->completed_pool_mtx); 1825 1826 *instance->consumer = consumer; 1827 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1828 0, 0, DDI_DMA_SYNC_FORDEV); 1829 1830 if (instance->softint_running) { 1831 need_softintr = 0; 1832 } else { 1833 need_softintr = 1; 1834 } 1835 1836 if (instance->isr_level == HIGH_LEVEL_INTR) { 1837 if (need_softintr) { 1838 ddi_trigger_softintr(instance->soft_intr_id); 1839 } 1840 } else { 1841 /* 1842 * Not a high-level interrupt, therefore call the soft level 1843 * interrupt explicitly 1844 */ 1845 (void) megasas_softintr((caddr_t)instance); 1846 } 1847 1848 return (DDI_INTR_CLAIMED); 1849 } 1850 1851 1852 /* 1853 * ************************************************************************** * 1854 * * 1855 * libraries * 1856 * * 1857 * ************************************************************************** * 1858 */ 1859 /* 1860 * get_mfi_pkt : Get a command from the free pool 1861 */ 1862 static struct megasas_cmd * 1863 get_mfi_pkt(struct megasas_instance *instance) 1864 { 1865 mlist_t *head = &instance->cmd_pool_list; 1866 struct megasas_cmd *cmd = NULL; 1867 1868 mutex_enter(&instance->cmd_pool_mtx); 1869 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1870 1871 if (!mlist_empty(head)) { 1872 /* LINTED E_BAD_PTR_CAST_ALIGN */ 1873 cmd = mlist_entry(head->next, struct megasas_cmd, list); 1874 mlist_del_init(head->next); 1875 } 1876 1877 mutex_exit(&instance->cmd_pool_mtx); 1878 1879 return (cmd); 1880 } 1881 1882 /* 1883 * return_mfi_pkt : Return a cmd to free command pool 1884 */ 1885 static void 1886 return_mfi_pkt(struct megasas_instance *instance, struct megasas_cmd *cmd) 1887 { 1888 mutex_enter(&instance->cmd_pool_mtx); 1889 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1890 1891 mlist_add(&cmd->list, &instance->cmd_pool_list); 1892 1893 mutex_exit(&instance->cmd_pool_mtx); 1894 } 1895 1896 /* 1897 * get_mfi_pkt : Get a command from the free pool 1898 */ 1899 #ifndef lint 1900 static struct megasas_cmd * 1901 pull_pend_queue(struct megasas_instance *instance) 1902 { 1903 mlist_t *head = &instance->cmd_pend_list; 1904 struct megasas_cmd *cmd = NULL; 1905 1906 mutex_enter(&instance->cmd_pend_mtx); 1907 ASSERT(mutex_owned(&instance->cmd_pend_mtx)); 1908 1909 if (!mlist_empty(head)) { 1910 cmd = mlist_entry(head->next, struct megasas_cmd, list); 1911 mlist_del_init(head->next); 1912 } 1913 1914 mutex_exit(&instance->cmd_pend_mtx); 1915 1916 return (cmd); 1917 } 1918 1919 /* 1920 * return_mfi_pkt : Return a cmd to free command pool 1921 */ 1922 static void 1923 push_pend_queue(struct megasas_instance *instance, struct megasas_cmd *cmd) 1924 { 1925 mutex_enter(&instance->cmd_pend_mtx); 1926 ASSERT(mutex_owned(&instance->cmd_pend_mtx)); 1927 1928 mlist_add(&cmd->list, &instance->cmd_pend_list); 1929 1930 mutex_exit(&instance->cmd_pend_mtx); 1931 } 1932 #endif 1933 1934 /* 1935 * destroy_mfi_frame_pool 1936 */ 1937 static void 1938 destroy_mfi_frame_pool(struct megasas_instance *instance) 1939 { 1940 int i; 1941 uint32_t max_cmd = instance->max_fw_cmds; 1942 1943 struct megasas_cmd *cmd; 1944 1945 /* return all frames to pool */ 1946 for (i = 0; i < max_cmd; i++) { 1947 1948 cmd = instance->cmd_list[i]; 1949 1950 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) 1951 mega_free_dma_obj(cmd->frame_dma_obj); 1952 1953 cmd->frame_dma_obj_status = DMA_OBJ_FREED; 1954 } 1955 1956 } 1957 1958 /* 1959 * create_mfi_frame_pool 1960 */ 1961 static int 1962 create_mfi_frame_pool(struct megasas_instance *instance) 1963 { 1964 int i = 0; 1965 int cookie_cnt; 1966 uint16_t max_cmd; 1967 uint16_t sge_sz; 1968 uint32_t sgl_sz; 1969 uint32_t tot_frame_size; 1970 1971 struct megasas_cmd *cmd; 1972 1973 max_cmd = instance->max_fw_cmds; 1974 1975 sge_sz = sizeof (struct megasas_sge64); 1976 1977 /* calculated the number of 64byte frames required for SGL */ 1978 sgl_sz = sge_sz * instance->max_num_sge; 1979 tot_frame_size = sgl_sz + MEGAMFI_FRAME_SIZE + SENSE_LENGTH; 1980 1981 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: " 1982 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size)); 1983 1984 while (i < max_cmd) { 1985 cmd = instance->cmd_list[i]; 1986 1987 cmd->frame_dma_obj.size = tot_frame_size; 1988 cmd->frame_dma_obj.dma_attr = megasas_generic_dma_attr; 1989 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 1990 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 1991 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1; 1992 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64; 1993 1994 1995 cookie_cnt = mega_alloc_dma_obj(instance, &cmd->frame_dma_obj); 1996 1997 if (cookie_cnt == -1 || cookie_cnt > 1) { 1998 con_log(CL_ANN, (CE_WARN, 1999 "create_mfi_frame_pool: could not alloc.")); 2000 return (DDI_FAILURE); 2001 } 2002 2003 bzero(cmd->frame_dma_obj.buffer, tot_frame_size); 2004 2005 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED; 2006 cmd->frame = (union megasas_frame *)cmd->frame_dma_obj.buffer; 2007 cmd->frame_phys_addr = 2008 cmd->frame_dma_obj.dma_cookie[0].dmac_address; 2009 2010 cmd->sense = (uint8_t *)(((unsigned long) 2011 cmd->frame_dma_obj.buffer) + 2012 tot_frame_size - SENSE_LENGTH); 2013 cmd->sense_phys_addr = 2014 cmd->frame_dma_obj.dma_cookie[0].dmac_address + 2015 tot_frame_size - SENSE_LENGTH; 2016 2017 if (!cmd->frame || !cmd->sense) { 2018 con_log(CL_ANN, (CE_NOTE, 2019 "megasas: pci_pool_alloc failed \n")); 2020 2021 return (-ENOMEM); 2022 } 2023 2024 cmd->frame->io.context = cmd->index; 2025 i++; 2026 2027 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x", 2028 cmd->frame->io.context, cmd->frame_phys_addr)); 2029 } 2030 2031 return (DDI_SUCCESS); 2032 } 2033 2034 /* 2035 * free_additional_dma_buffer 2036 */ 2037 static void 2038 free_additional_dma_buffer(struct megasas_instance *instance) 2039 { 2040 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { 2041 mega_free_dma_obj(instance->mfi_internal_dma_obj); 2042 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; 2043 } 2044 2045 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) { 2046 mega_free_dma_obj(instance->mfi_evt_detail_obj); 2047 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; 2048 } 2049 } 2050 2051 /* 2052 * alloc_additional_dma_buffer 2053 */ 2054 static int 2055 alloc_additional_dma_buffer(struct megasas_instance *instance) 2056 { 2057 uint32_t reply_q_sz; 2058 uint32_t internal_buf_size = PAGESIZE*2; 2059 2060 /* max cmds plus 1 + procudure & consumer */ 2061 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2); 2062 2063 instance->mfi_internal_dma_obj.size = internal_buf_size; 2064 instance->mfi_internal_dma_obj.dma_attr = megasas_generic_dma_attr; 2065 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 2066 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 2067 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1; 2068 2069 if (mega_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj) 2070 != 1) { 2071 con_log(CL_ANN, (CE_WARN, "megaraid: could not alloc reply Q")); 2072 return (DDI_FAILURE); 2073 } 2074 2075 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size); 2076 2077 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED; 2078 2079 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2080 instance->producer = (uint32_t *)instance->mfi_internal_dma_obj.buffer; 2081 instance->consumer = (uint32_t *)((unsigned long) 2082 instance->mfi_internal_dma_obj.buffer + 4); 2083 instance->reply_queue = (uint32_t *)((unsigned long) 2084 instance->mfi_internal_dma_obj.buffer + 8); 2085 instance->internal_buf = (caddr_t)(((unsigned long) 2086 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8); 2087 instance->internal_buf_dmac_add = 2088 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 2089 reply_q_sz; 2090 instance->internal_buf_size = internal_buf_size - 2091 (reply_q_sz + 8); 2092 2093 /* allocate evt_detail */ 2094 instance->mfi_evt_detail_obj.size = sizeof (struct megasas_evt_detail); 2095 instance->mfi_evt_detail_obj.dma_attr = megasas_generic_dma_attr; 2096 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 2097 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xffffffff; 2098 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1; 2099 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1; 2100 2101 if (mega_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj) != 1) { 2102 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: " 2103 "could not data transfer buffer alloc.")); 2104 return (DDI_FAILURE); 2105 } 2106 2107 bzero(instance->mfi_evt_detail_obj.buffer, 2108 sizeof (struct megasas_evt_detail)); 2109 2110 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED; 2111 2112 return (DDI_SUCCESS); 2113 } 2114 2115 /* 2116 * free_space_for_mfi 2117 */ 2118 static void 2119 free_space_for_mfi(struct megasas_instance *instance) 2120 { 2121 int i; 2122 uint32_t max_cmd = instance->max_fw_cmds; 2123 2124 /* already freed */ 2125 if (instance->cmd_list == NULL) { 2126 return; 2127 } 2128 2129 free_additional_dma_buffer(instance); 2130 2131 /* first free the MFI frame pool */ 2132 destroy_mfi_frame_pool(instance); 2133 2134 /* free all the commands in the cmd_list */ 2135 for (i = 0; i < instance->max_fw_cmds; i++) { 2136 kmem_free(instance->cmd_list[i], 2137 sizeof (struct megasas_cmd)); 2138 2139 instance->cmd_list[i] = NULL; 2140 } 2141 2142 /* free the cmd_list buffer itself */ 2143 kmem_free(instance->cmd_list, 2144 sizeof (struct megasas_cmd *) * max_cmd); 2145 2146 instance->cmd_list = NULL; 2147 2148 INIT_LIST_HEAD(&instance->cmd_pool_list); 2149 } 2150 2151 /* 2152 * alloc_space_for_mfi 2153 */ 2154 static int 2155 alloc_space_for_mfi(struct megasas_instance *instance) 2156 { 2157 int i; 2158 uint32_t max_cmd; 2159 size_t sz; 2160 2161 struct megasas_cmd *cmd; 2162 2163 max_cmd = instance->max_fw_cmds; 2164 sz = sizeof (struct megasas_cmd *) * max_cmd; 2165 2166 /* 2167 * instance->cmd_list is an array of struct megasas_cmd pointers. 2168 * Allocate the dynamic array first and then allocate individual 2169 * commands. 2170 */ 2171 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); 2172 ASSERT(instance->cmd_list); 2173 2174 for (i = 0; i < max_cmd; i++) { 2175 instance->cmd_list[i] = kmem_zalloc(sizeof (struct megasas_cmd), 2176 KM_SLEEP); 2177 ASSERT(instance->cmd_list[i]); 2178 } 2179 2180 INIT_LIST_HEAD(&instance->cmd_pool_list); 2181 2182 /* add all the commands to command pool (instance->cmd_pool) */ 2183 for (i = 0; i < max_cmd; i++) { 2184 cmd = instance->cmd_list[i]; 2185 cmd->index = i; 2186 2187 mlist_add_tail(&cmd->list, &instance->cmd_pool_list); 2188 } 2189 2190 /* create a frame pool and assign one frame to each cmd */ 2191 if (create_mfi_frame_pool(instance)) { 2192 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n")); 2193 return (DDI_FAILURE); 2194 } 2195 2196 /* create a frame pool and assign one frame to each cmd */ 2197 if (alloc_additional_dma_buffer(instance)) { 2198 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n")); 2199 return (DDI_FAILURE); 2200 } 2201 2202 return (DDI_SUCCESS); 2203 } 2204 2205 /* 2206 * get_ctrl_info 2207 */ 2208 static int 2209 get_ctrl_info(struct megasas_instance *instance, 2210 struct megasas_ctrl_info *ctrl_info) 2211 { 2212 int ret = 0; 2213 2214 struct megasas_cmd *cmd; 2215 struct megasas_dcmd_frame *dcmd; 2216 struct megasas_ctrl_info *ci; 2217 2218 cmd = get_mfi_pkt(instance); 2219 2220 if (!cmd) { 2221 con_log(CL_ANN, (CE_WARN, 2222 "Failed to get a cmd for ctrl info\n")); 2223 return (DDI_FAILURE); 2224 } 2225 2226 dcmd = &cmd->frame->dcmd; 2227 2228 ci = (struct megasas_ctrl_info *)instance->internal_buf; 2229 2230 if (!ci) { 2231 con_log(CL_ANN, (CE_WARN, 2232 "Failed to alloc mem for ctrl info\n")); 2233 return_mfi_pkt(instance, cmd); 2234 return (DDI_FAILURE); 2235 } 2236 2237 (void) memset(ci, 0, sizeof (struct megasas_ctrl_info)); 2238 2239 /* for( i = 0; i < 12; i++ ) dcmd->mbox.b[i] = 0; */ 2240 (void) memset(dcmd->mbox.b, 0, 12); 2241 2242 dcmd->cmd = MFI_CMD_OP_DCMD; 2243 dcmd->cmd_status = 0xFF; 2244 dcmd->sge_count = 1; 2245 dcmd->flags = MFI_FRAME_DIR_READ; 2246 dcmd->timeout = 0; 2247 dcmd->data_xfer_len = sizeof (struct megasas_ctrl_info); 2248 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 2249 dcmd->sgl.sge32[0].phys_addr = instance->internal_buf_dmac_add; 2250 dcmd->sgl.sge32[0].length = sizeof (struct megasas_ctrl_info); 2251 2252 cmd->frame_count = 1; 2253 2254 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2255 ret = 0; 2256 (void) memcpy(ctrl_info, ci, sizeof (struct megasas_ctrl_info)); 2257 } else { 2258 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed\n")); 2259 ret = -1; 2260 } 2261 2262 return_mfi_pkt(instance, cmd); 2263 2264 return (ret); 2265 } 2266 2267 /* 2268 * abort_aen_cmd 2269 */ 2270 static int 2271 abort_aen_cmd(struct megasas_instance *instance, 2272 struct megasas_cmd *cmd_to_abort) 2273 { 2274 int ret = 0; 2275 2276 struct megasas_cmd *cmd; 2277 struct megasas_abort_frame *abort_fr; 2278 2279 cmd = get_mfi_pkt(instance); 2280 2281 if (!cmd) { 2282 con_log(CL_ANN, (CE_WARN, 2283 "Failed to get a cmd for ctrl info\n")); 2284 return (DDI_FAILURE); 2285 } 2286 2287 abort_fr = &cmd->frame->abort; 2288 2289 /* prepare and issue the abort frame */ 2290 abort_fr->cmd = MFI_CMD_OP_ABORT; 2291 abort_fr->cmd_status = 0xFF; 2292 abort_fr->flags = 0; 2293 abort_fr->abort_context = cmd_to_abort->index; 2294 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 2295 abort_fr->abort_mfi_phys_addr_hi = 0; 2296 2297 instance->aen_cmd->abort_aen = 1; 2298 2299 cmd->sync_cmd = MEGASAS_TRUE; 2300 cmd->frame_count = 1; 2301 2302 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2303 con_log(CL_ANN, (CE_WARN, 2304 "abort_aen_cmd: issue_cmd_in_sync_mode failed\n")); 2305 ret = -1; 2306 } else { 2307 ret = 0; 2308 } 2309 2310 instance->aen_cmd->abort_aen = 1; 2311 instance->aen_cmd = 0; 2312 2313 return_mfi_pkt(instance, cmd); 2314 2315 return (ret); 2316 } 2317 2318 /* 2319 * init_mfi 2320 */ 2321 static int 2322 init_mfi(struct megasas_instance *instance) 2323 { 2324 off_t reglength; 2325 struct megasas_cmd *cmd; 2326 struct megasas_ctrl_info ctrl_info; 2327 struct megasas_init_frame *init_frame; 2328 struct megasas_init_queue_info *initq_info; 2329 2330 if ((ddi_dev_regsize(instance->dip, REGISTER_SET_IO, ®length) 2331 != DDI_SUCCESS) || reglength < 4096) { 2332 return (DDI_FAILURE); 2333 } 2334 2335 if (reglength > 8192) { 2336 reglength = 8192; 2337 con_log(CL_DLEVEL1, (CE_NOTE, 2338 "mega: register length to map is 0x%lx bytes", reglength)); 2339 } 2340 2341 if (ddi_regs_map_setup(instance->dip, REGISTER_SET_IO, 2342 &instance->regmap, 0, reglength, &endian_attr, 2343 &instance->regmap_handle) != DDI_SUCCESS) { 2344 con_log(CL_ANN, (CE_NOTE, 2345 "megaraid: couldn't map control registers")); 2346 2347 goto fail_mfi_reg_setup; 2348 } 2349 2350 /* we expect the FW state to be READY */ 2351 if (mfi_state_transition_to_ready(instance)) { 2352 con_log(CL_ANN, (CE_WARN, "megaraid: F/W is not ready")); 2353 goto fail_ready_state; 2354 } 2355 2356 /* get various operational parameters from status register */ 2357 instance->max_num_sge = 2358 (instance->func_ptr->read_fw_status_reg(instance) & 2359 0xFF0000) >> 0x10; 2360 /* 2361 * Reduce the max supported cmds by 1. This is to ensure that the 2362 * reply_q_sz (1 more than the max cmd that driver may send) 2363 * does not exceed max cmds that the FW can support 2364 */ 2365 instance->max_fw_cmds = 2366 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF; 2367 instance->max_fw_cmds = instance->max_fw_cmds - 1; 2368 2369 /* 2370 * con_log(CL_ANN, (CE_WARN, "megaraid: " 2371 * "max_num_sge = %d max_fw_cmds = %d\n", 2372 * instance->max_num_sge, instance->max_fw_cmds)); 2373 */ 2374 2375 instance->max_num_sge = 2376 (instance->max_num_sge > MEGASAS_MAX_SGE_CNT) ? 2377 MEGASAS_MAX_SGE_CNT : instance->max_num_sge; 2378 2379 /* create a pool of commands */ 2380 if (alloc_space_for_mfi(instance)) 2381 goto fail_alloc_fw_space; 2382 2383 /* disable interrupt for initial preparation */ 2384 instance->func_ptr->disable_intr(instance); 2385 2386 /* 2387 * Prepare a init frame. Note the init frame points to queue info 2388 * structure. Each frame has SGL allocated after first 64 bytes. For 2389 * this frame - since we don't need any SGL - we use SGL's space as 2390 * queue info structure 2391 */ 2392 cmd = get_mfi_pkt(instance); 2393 2394 init_frame = (struct megasas_init_frame *)cmd->frame; 2395 initq_info = (struct megasas_init_queue_info *) 2396 ((unsigned long)init_frame + 64); 2397 2398 (void) memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 2399 (void) memset(initq_info, 0, sizeof (struct megasas_init_queue_info)); 2400 2401 initq_info->init_flags = 0; 2402 2403 initq_info->reply_queue_entries = instance->max_fw_cmds + 1; 2404 2405 initq_info->producer_index_phys_addr_hi = 0; 2406 initq_info->producer_index_phys_addr_lo = 2407 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address; 2408 2409 initq_info->consumer_index_phys_addr_hi = 0; 2410 initq_info->consumer_index_phys_addr_lo = 2411 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4; 2412 2413 initq_info->reply_queue_start_phys_addr_hi = 0; 2414 initq_info->reply_queue_start_phys_addr_lo = 2415 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8; 2416 2417 init_frame->cmd = MFI_CMD_OP_INIT; 2418 init_frame->cmd_status = 0xFF; 2419 init_frame->flags = 0; 2420 init_frame->queue_info_new_phys_addr_lo = 2421 cmd->frame_phys_addr + 64; 2422 init_frame->queue_info_new_phys_addr_hi = 0; 2423 2424 init_frame->data_xfer_len = sizeof (struct megasas_init_queue_info); 2425 2426 cmd->frame_count = 1; 2427 2428 /* issue the init frame in polled mode */ 2429 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2430 con_log(CL_ANN, (CE_WARN, "failed to init firmware")); 2431 goto fail_fw_init; 2432 } 2433 2434 return_mfi_pkt(instance, cmd); 2435 2436 /* gather misc FW related information */ 2437 if (!get_ctrl_info(instance, &ctrl_info)) { 2438 instance->max_sectors_per_req = ctrl_info.max_request_size; 2439 con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d", 2440 ctrl_info.product_name, ctrl_info.ld_present_count)); 2441 } else { 2442 instance->max_sectors_per_req = instance->max_num_sge * 2443 PAGESIZE / 512; 2444 } 2445 2446 return (0); 2447 2448 fail_fw_init: 2449 fail_alloc_fw_space: 2450 2451 free_space_for_mfi(instance); 2452 2453 fail_ready_state: 2454 ddi_regs_map_free(&instance->regmap_handle); 2455 2456 fail_mfi_reg_setup: 2457 return (DDI_FAILURE); 2458 } 2459 2460 /* 2461 * mfi_state_transition_to_ready : Move the FW to READY state 2462 * 2463 * @reg_set : MFI register set 2464 */ 2465 static int 2466 mfi_state_transition_to_ready(struct megasas_instance *instance) 2467 { 2468 int i; 2469 uint8_t max_wait; 2470 uint32_t fw_ctrl; 2471 uint32_t fw_state; 2472 uint32_t cur_state; 2473 2474 fw_state = 2475 instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK; 2476 con_log(CL_ANN1, (CE_NOTE, 2477 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state)); 2478 2479 while (fw_state != MFI_STATE_READY) { 2480 con_log(CL_ANN, (CE_NOTE, 2481 "mfi_state_transition_to_ready:FW state%x", fw_state)); 2482 2483 switch (fw_state) { 2484 case MFI_STATE_FAULT: 2485 con_log(CL_ANN, (CE_NOTE, 2486 "megasas: FW in FAULT state!!")); 2487 2488 return (-ENODEV); 2489 case MFI_STATE_WAIT_HANDSHAKE: 2490 /* set the CLR bit in IMR0 */ 2491 con_log(CL_ANN, (CE_NOTE, 2492 "megasas: FW waiting for HANDSHAKE")); 2493 /* 2494 * PCI_Hot Plug: MFI F/W requires 2495 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2496 * to be set 2497 */ 2498 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */ 2499 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2500 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE | 2501 MFI_INIT_HOTPLUG, instance); 2502 2503 max_wait = 2; 2504 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2505 break; 2506 case MFI_STATE_BOOT_MESSAGE_PENDING: 2507 /* set the CLR bit in IMR0 */ 2508 con_log(CL_ANN, (CE_NOTE, 2509 "megasas: FW state boot message pending")); 2510 /* 2511 * PCI_Hot Plug: MFI F/W requires 2512 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2513 * to be set 2514 */ 2515 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2516 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance); 2517 2518 max_wait = 10; 2519 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2520 break; 2521 case MFI_STATE_OPERATIONAL: 2522 /* bring it to READY state; assuming max wait 2 secs */ 2523 instance->func_ptr->disable_intr(instance); 2524 con_log(CL_ANN1, (CE_NOTE, 2525 "megasas: FW in OPERATIONAL state")); 2526 /* 2527 * PCI_Hot Plug: MFI F/W requires 2528 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT) 2529 * to be set 2530 */ 2531 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */ 2532 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2533 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance); 2534 2535 max_wait = 10; 2536 cur_state = MFI_STATE_OPERATIONAL; 2537 break; 2538 case MFI_STATE_UNDEFINED: 2539 /* this state should not last for more than 2 seconds */ 2540 con_log(CL_ANN, (CE_NOTE, "FW state undefined\n")); 2541 2542 max_wait = 2; 2543 cur_state = MFI_STATE_UNDEFINED; 2544 break; 2545 case MFI_STATE_BB_INIT: 2546 max_wait = 2; 2547 cur_state = MFI_STATE_BB_INIT; 2548 break; 2549 case MFI_STATE_FW_INIT: 2550 max_wait = 2; 2551 cur_state = MFI_STATE_FW_INIT; 2552 break; 2553 case MFI_STATE_DEVICE_SCAN: 2554 max_wait = 10; 2555 cur_state = MFI_STATE_DEVICE_SCAN; 2556 break; 2557 default: 2558 con_log(CL_ANN, (CE_NOTE, 2559 "megasas: Unknown state 0x%x\n", fw_state)); 2560 return (-ENODEV); 2561 } 2562 2563 /* the cur_state should not last for more than max_wait secs */ 2564 for (i = 0; i < (max_wait * 1000); i++) { 2565 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */ 2566 fw_state = 2567 instance->func_ptr->read_fw_status_reg(instance) & 2568 MFI_STATE_MASK; 2569 2570 if (fw_state == cur_state) { 2571 delay(1 * drv_usectohz(1000)); 2572 } else { 2573 break; 2574 } 2575 } 2576 2577 /* return error if fw_state hasn't changed after max_wait */ 2578 if (fw_state == cur_state) { 2579 con_log(CL_ANN, (CE_NOTE, 2580 "FW state hasn't changed in %d secs\n", max_wait)); 2581 return (-ENODEV); 2582 } 2583 }; 2584 2585 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2586 fw_ctrl = RD_IB_DOORBELL(instance); 2587 #ifdef lint 2588 fw_ctrl = fw_ctrl; 2589 #endif 2590 con_log(CL_ANN1, (CE_NOTE, 2591 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl)); 2592 2593 /* 2594 * Write 0xF to the doorbell register to do the following. 2595 * - Abort all outstanding commands (bit 0). 2596 * - Transition from OPERATIONAL to READY state (bit 1). 2597 * - Discard (possible) low MFA posted in 64-bit mode (bit-2). 2598 * - Set to release FW to continue running (i.e. BIOS handshake 2599 * (bit 3). 2600 */ 2601 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2602 WR_IB_DOORBELL(0xF, instance); 2603 2604 return (0); 2605 } 2606 2607 /* 2608 * get_seq_num 2609 */ 2610 static int 2611 get_seq_num(struct megasas_instance *instance, 2612 struct megasas_evt_log_info *eli) 2613 { 2614 int ret = 0; 2615 2616 dma_obj_t dcmd_dma_obj; 2617 struct megasas_cmd *cmd; 2618 struct megasas_dcmd_frame *dcmd; 2619 2620 cmd = get_mfi_pkt(instance); 2621 2622 if (!cmd) { 2623 cmn_err(CE_WARN, "megasas: failed to get a cmd\n"); 2624 return (-ENOMEM); 2625 } 2626 2627 dcmd = &cmd->frame->dcmd; 2628 2629 /* allocate the data transfer buffer */ 2630 dcmd_dma_obj.size = sizeof (struct megasas_evt_log_info); 2631 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr; 2632 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 2633 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 2634 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 2635 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 2636 2637 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) { 2638 con_log(CL_ANN, (CE_WARN, 2639 "get_seq_num: could not data transfer buffer alloc.")); 2640 return (DDI_FAILURE); 2641 } 2642 2643 (void) memset(dcmd_dma_obj.buffer, 0, 2644 sizeof (struct megasas_evt_log_info)); 2645 2646 (void) memset(dcmd->mbox.b, 0, 12); 2647 2648 dcmd->cmd = MFI_CMD_OP_DCMD; 2649 dcmd->cmd_status = 0; 2650 dcmd->sge_count = 1; 2651 dcmd->flags = MFI_FRAME_DIR_READ; 2652 dcmd->timeout = 0; 2653 dcmd->data_xfer_len = sizeof (struct megasas_evt_log_info); 2654 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 2655 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_log_info); 2656 dcmd->sgl.sge32[0].phys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 2657 2658 cmd->sync_cmd = MEGASAS_TRUE; 2659 cmd->frame_count = 1; 2660 2661 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2662 cmn_err(CE_WARN, "get_seq_num: " 2663 "failed to issue MR_DCMD_CTRL_EVENT_GET_INFO\n"); 2664 ret = -1; 2665 } else { 2666 /* copy the data back into callers buffer */ 2667 bcopy(dcmd_dma_obj.buffer, eli, 2668 sizeof (struct megasas_evt_log_info)); 2669 ret = 0; 2670 } 2671 2672 mega_free_dma_obj(dcmd_dma_obj); 2673 2674 return_mfi_pkt(instance, cmd); 2675 2676 return (ret); 2677 } 2678 2679 #ifndef lint 2680 static int 2681 get_seq_num_in_poll(struct megasas_instance *instance, 2682 struct megasas_evt_log_info *eli) 2683 { 2684 int ret = 0; 2685 2686 dma_obj_t dcmd_dma_obj; 2687 struct megasas_cmd *cmd; 2688 struct megasas_dcmd_frame *dcmd; 2689 2690 cmd = get_mfi_pkt(instance); 2691 2692 if (!cmd) { 2693 cmn_err(CE_WARN, "megasas: failed to get a cmd\n"); 2694 return (-ENOMEM); 2695 } 2696 2697 dcmd = &cmd->frame->dcmd; 2698 2699 /* allocate the data transfer buffer */ 2700 dcmd_dma_obj.size = sizeof (struct megasas_evt_log_info); 2701 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr; 2702 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 2703 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 2704 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 2705 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 2706 2707 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) { 2708 con_log(CL_ANN, (CE_WARN, "get_seq_num_in_poll: " 2709 "could not data transfer buffer alloc.")); 2710 return (DDI_FAILURE); 2711 } 2712 2713 (void) memset(dcmd_dma_obj.buffer, 0, 2714 sizeof (struct megasas_evt_log_info)); 2715 2716 /* for( i = 0; i < 12; i++ ) dcmd->mbox.b[i] = 0; */ 2717 (void) memset(dcmd->mbox.b, 0, 12); 2718 2719 dcmd->cmd = MFI_CMD_OP_DCMD; 2720 dcmd->cmd_status = 0; 2721 dcmd->sge_count = 1; 2722 dcmd->flags = MFI_FRAME_DIR_READ; 2723 dcmd->timeout = 0; 2724 dcmd->data_xfer_len = sizeof (struct megasas_evt_log_info); 2725 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 2726 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_log_info); 2727 dcmd->sgl.sge32[0].phys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 2728 2729 cmd->frame_count = 1; 2730 2731 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2732 cmn_err(CE_WARN, "get_seq_num_in_poll: " 2733 "failed to issue MR_DCMD_CTRL_EVENT_GET_INFO\n"); 2734 ret = -1; 2735 } else { 2736 cmn_err(CE_WARN, "get_seq_num_in_poll:done\n"); 2737 /* copy the data back into callers buffer */ 2738 bcopy(dcmd_dma_obj.buffer, eli, 2739 sizeof (struct megasas_evt_log_info)); 2740 ret = 0; 2741 } 2742 2743 mega_free_dma_obj(dcmd_dma_obj); 2744 2745 return_mfi_pkt(instance, cmd); 2746 2747 return (ret); 2748 } 2749 #endif 2750 2751 /* 2752 * start_mfi_aen 2753 */ 2754 static int 2755 start_mfi_aen(struct megasas_instance *instance) 2756 { 2757 int ret = 0; 2758 2759 struct megasas_evt_log_info eli; 2760 union megasas_evt_class_locale class_locale; 2761 2762 /* get the latest sequence number from FW */ 2763 (void) memset(&eli, 0, sizeof (struct megasas_evt_log_info)); 2764 2765 if (get_seq_num(instance, &eli)) { 2766 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num\n"); 2767 return (-1); 2768 } 2769 2770 /* register AEN with FW for latest sequence number plus 1 */ 2771 class_locale.members.reserved = 0; 2772 class_locale.members.locale = MR_EVT_LOCALE_ALL; 2773 class_locale.members.class = MR_EVT_CLASS_CRITICAL; 2774 2775 ret = register_mfi_aen(instance, eli.newest_seq_num + 1, 2776 class_locale.word); 2777 2778 if (ret) { 2779 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed\n"); 2780 return (-1); 2781 } 2782 2783 return (ret); 2784 } 2785 2786 /* 2787 * flush_cache 2788 */ 2789 static void 2790 flush_cache(struct megasas_instance *instance) 2791 { 2792 struct megasas_cmd *cmd; 2793 struct megasas_dcmd_frame *dcmd; 2794 2795 if (!(cmd = get_mfi_pkt(instance))) 2796 return; 2797 2798 dcmd = &cmd->frame->dcmd; 2799 2800 (void) memset(dcmd->mbox.b, 0, 12); 2801 2802 dcmd->cmd = MFI_CMD_OP_DCMD; 2803 dcmd->cmd_status = 0x0; 2804 dcmd->sge_count = 0; 2805 dcmd->flags = MFI_FRAME_DIR_NONE; 2806 dcmd->timeout = 0; 2807 dcmd->data_xfer_len = 0; 2808 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 2809 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 2810 2811 cmd->frame_count = 1; 2812 2813 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2814 cmn_err(CE_WARN, 2815 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH\n"); 2816 } 2817 con_log(CL_DLEVEL1, (CE_NOTE, "done")); 2818 return_mfi_pkt(instance, cmd); 2819 } 2820 2821 /* 2822 * service_mfi_aen- Completes an AEN command 2823 * @instance: Adapter soft state 2824 * @cmd: Command to be completed 2825 * 2826 */ 2827 static void 2828 service_mfi_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 2829 { 2830 uint32_t seq_num; 2831 #ifdef TBD 2832 int ret = 0; 2833 union megasas_evt_class_locale class_locale; 2834 #endif /* TBD */ 2835 struct megasas_evt_detail *evt_detail = 2836 (struct megasas_evt_detail *)instance->mfi_evt_detail_obj.buffer; 2837 2838 cmd->cmd_status = cmd->frame->io.cmd_status; 2839 2840 if (cmd->cmd_status == ENODATA) { 2841 cmd->cmd_status = 0; 2842 } 2843 2844 /* 2845 * log the MFI AEN event to the sysevent queue so that 2846 * application will get noticed 2847 */ 2848 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS", 2849 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) { 2850 int instance_no = ddi_get_instance(instance->dip); 2851 con_log(CL_ANN, (CE_WARN, 2852 "mega%d: Failed to log AEN event", instance_no)); 2853 } 2854 2855 /* get copy of seq_num and class/locale for re-registration */ 2856 seq_num = evt_detail->seq_num; 2857 seq_num++; 2858 #ifdef TBD 2859 class_locale.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; 2860 instance->aen_cmd = 0; 2861 2862 return_mfi_pkt(instance, cmd); 2863 2864 ret = register_mfi_aen(instance, seq_num, class_locale.word); 2865 2866 if (ret) { 2867 cmn_err(CE_WARN, "service_mfi_aen: aen registration failed\n"); 2868 } 2869 #endif /* TBD */ 2870 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 2871 sizeof (struct megasas_evt_detail)); 2872 2873 cmd->frame->dcmd.cmd_status = 0x0; 2874 cmd->frame->dcmd.mbox.w[0] = seq_num; 2875 2876 instance->aen_seq_num = seq_num; 2877 2878 cmd->frame_count = 1; 2879 2880 /* Issue the aen registration frame */ 2881 instance->func_ptr->issue_cmd(cmd, instance); 2882 } 2883 2884 /* 2885 * complete_cmd_in_sync_mode - Completes an internal command 2886 * @instance: Adapter soft state 2887 * @cmd: Command to be completed 2888 * 2889 * The issue_cmd_in_sync_mode() function waits for a command to complete 2890 * after it issues a command. This function wakes up that waiting routine by 2891 * calling wake_up() on the wait queue. 2892 */ 2893 static void 2894 complete_cmd_in_sync_mode(struct megasas_instance *instance, 2895 struct megasas_cmd *cmd) 2896 { 2897 cmd->cmd_status = cmd->frame->io.cmd_status; 2898 2899 cmd->sync_cmd = MEGASAS_FALSE; 2900 2901 if (cmd->cmd_status == ENODATA) { 2902 cmd->cmd_status = 0; 2903 } 2904 2905 cv_broadcast(&instance->int_cmd_cv); 2906 } 2907 2908 /* 2909 * megasas_softintr - The Software ISR 2910 * @param arg : HBA soft state 2911 * 2912 * called from high-level interrupt if hi-level interrupt are not there, 2913 * otherwise triggered as a soft interrupt 2914 */ 2915 static uint_t 2916 megasas_softintr(caddr_t arg) 2917 { 2918 struct scsi_pkt *pkt; 2919 struct scsa_cmd *acmd; 2920 struct megasas_cmd *cmd; 2921 struct mlist_head *pos, *next; 2922 mlist_t process_list; 2923 struct megasas_header *hdr; 2924 struct megasas_instance *instance; 2925 struct scsi_arq_status *arqstat; 2926 2927 con_log(CL_ANN1, (CE_CONT, "megasas_softintr called")); 2928 2929 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2930 instance = (struct megasas_instance *)arg; 2931 mutex_enter(&instance->completed_pool_mtx); 2932 2933 if (mlist_empty(&instance->completed_pool_list)) { 2934 mutex_exit(&instance->completed_pool_mtx); 2935 return (DDI_INTR_UNCLAIMED); 2936 } 2937 2938 instance->softint_running = 1; 2939 2940 INIT_LIST_HEAD(&process_list); 2941 mlist_splice(&instance->completed_pool_list, &process_list); 2942 INIT_LIST_HEAD(&instance->completed_pool_list); 2943 2944 mutex_exit(&instance->completed_pool_mtx); 2945 2946 /* perform all callbacks first, before releasing the SCBs */ 2947 mlist_for_each_safe(pos, next, &process_list) { 2948 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2949 cmd = mlist_entry(pos, struct megasas_cmd, list); 2950 2951 /* syncronize the Cmd frame for the controller */ 2952 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 2953 0, 0, DDI_DMA_SYNC_FORCPU); 2954 hdr = &cmd->frame->hdr; 2955 2956 /* remove the internal command from the process list */ 2957 mlist_del_init(&cmd->list); 2958 2959 switch (hdr->cmd) { 2960 case MFI_CMD_OP_PD_SCSI: 2961 case MFI_CMD_OP_LD_SCSI: 2962 case MFI_CMD_OP_LD_READ: 2963 case MFI_CMD_OP_LD_WRITE: 2964 /* 2965 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI 2966 * could have been issued either through an 2967 * IO path or an IOCTL path. If it was via IOCTL, 2968 * we will send it to internal completion. 2969 */ 2970 if (cmd->sync_cmd == MEGASAS_TRUE) { 2971 complete_cmd_in_sync_mode(instance, cmd); 2972 break; 2973 } 2974 2975 /* regular commands */ 2976 acmd = cmd->cmd; 2977 pkt = CMD2PKT(acmd); 2978 /* con_log(CL_ANN, (CE_CONT,"pkt recived")); */ 2979 2980 if (acmd->cmd_flags & CFLAG_DMAVALID) { 2981 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 2982 (void) ddi_dma_sync(acmd->cmd_dmahandle, 2983 acmd->cmd_dma_offset, 2984 acmd->cmd_dma_len, 2985 DDI_DMA_SYNC_FORCPU); 2986 } 2987 } 2988 2989 pkt->pkt_reason = CMD_CMPLT; 2990 pkt->pkt_statistics = 0; 2991 pkt->pkt_state = STATE_GOT_BUS 2992 | STATE_GOT_TARGET | STATE_SENT_CMD 2993 | STATE_XFERRED_DATA | STATE_GOT_STATUS; 2994 2995 con_log(CL_ANN1, (CE_CONT, 2996 "CDB[0] = %x completed for %s: size %lx context %x", 2997 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"), 2998 acmd->cmd_dmacount, hdr->context)); 2999 3000 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) { 3001 struct scsi_inquiry *inq; 3002 3003 if (acmd->cmd_dmacount != 0) { 3004 bp_mapin(acmd->cmd_buf); 3005 inq = (struct scsi_inquiry *) 3006 acmd->cmd_buf->b_un.b_addr; 3007 3008 /* don't expose physical drives to OS */ 3009 if (acmd->islogical && 3010 (hdr->cmd_status == MFI_STAT_OK)) { 3011 display_scsi_inquiry( 3012 (caddr_t)inq); 3013 } else if ((hdr->cmd_status == 3014 MFI_STAT_OK) && inq->inq_dtype == 3015 DTYPE_DIRECT) { 3016 3017 display_scsi_inquiry( 3018 (caddr_t)inq); 3019 3020 /* for physical disk */ 3021 hdr->cmd_status = 3022 MFI_STAT_DEVICE_NOT_FOUND; 3023 } 3024 } 3025 } 3026 3027 switch (hdr->cmd_status) { 3028 case MFI_STAT_OK: 3029 pkt->pkt_scbp[0] = STATUS_GOOD; 3030 break; 3031 case MFI_STAT_LD_CC_IN_PROGRESS: 3032 case MFI_STAT_LD_RECON_IN_PROGRESS: 3033 /* SJ - these are not correct way */ 3034 pkt->pkt_scbp[0] = STATUS_GOOD; 3035 break; 3036 case MFI_STAT_LD_INIT_IN_PROGRESS: 3037 con_log(CL_ANN, 3038 (CE_WARN, "Initialization in Progress")); 3039 pkt->pkt_reason = CMD_TRAN_ERR; 3040 3041 break; 3042 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3043 con_log(CL_ANN1, (CE_CONT, "scsi_done error")); 3044 3045 pkt->pkt_reason = CMD_CMPLT; 3046 ((struct scsi_status *) 3047 pkt->pkt_scbp)->sts_chk = 1; 3048 3049 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) { 3050 3051 con_log(CL_ANN, 3052 (CE_WARN, "TEST_UNIT_READY fail")); 3053 3054 } else { 3055 pkt->pkt_state |= STATE_ARQ_DONE; 3056 arqstat = (void *)(pkt->pkt_scbp); 3057 arqstat->sts_rqpkt_reason = CMD_CMPLT; 3058 arqstat->sts_rqpkt_resid = 0; 3059 arqstat->sts_rqpkt_state |= 3060 STATE_GOT_BUS | STATE_GOT_TARGET 3061 | STATE_SENT_CMD 3062 | STATE_XFERRED_DATA; 3063 *(uint8_t *)&arqstat->sts_rqpkt_status = 3064 STATUS_GOOD; 3065 3066 bcopy(cmd->sense, 3067 &(arqstat->sts_sensedata), 3068 pkt->pkt_scblen - 3069 offsetof(struct scsi_arq_status, 3070 sts_sensedata)); 3071 } 3072 break; 3073 case MFI_STAT_LD_OFFLINE: 3074 case MFI_STAT_DEVICE_NOT_FOUND: 3075 con_log(CL_ANN1, (CE_CONT, 3076 "device not found error")); 3077 pkt->pkt_reason = CMD_DEV_GONE; 3078 pkt->pkt_statistics = STAT_DISCON; 3079 break; 3080 case MFI_STAT_LD_LBA_OUT_OF_RANGE: 3081 pkt->pkt_state |= STATE_ARQ_DONE; 3082 pkt->pkt_reason = CMD_CMPLT; 3083 ((struct scsi_status *) 3084 pkt->pkt_scbp)->sts_chk = 1; 3085 3086 arqstat = (void *)(pkt->pkt_scbp); 3087 arqstat->sts_rqpkt_reason = CMD_CMPLT; 3088 arqstat->sts_rqpkt_resid = 0; 3089 arqstat->sts_rqpkt_state |= STATE_GOT_BUS 3090 | STATE_GOT_TARGET | STATE_SENT_CMD 3091 | STATE_XFERRED_DATA; 3092 *(uint8_t *)&arqstat->sts_rqpkt_status = 3093 STATUS_GOOD; 3094 3095 arqstat->sts_sensedata.es_valid = 1; 3096 arqstat->sts_sensedata.es_key = 3097 KEY_ILLEGAL_REQUEST; 3098 arqstat->sts_sensedata.es_class = 3099 CLASS_EXTENDED_SENSE; 3100 3101 /* 3102 * LOGICAL BLOCK ADDRESS OUT OF RANGE: 3103 * ASC: 0x21h; ASCQ: 0x00h; 3104 */ 3105 arqstat->sts_sensedata.es_add_code = 0x21; 3106 arqstat->sts_sensedata.es_qual_code = 0x00; 3107 3108 break; 3109 3110 default: 3111 con_log(CL_ANN, (CE_CONT, "Unknown status!")); 3112 pkt->pkt_reason = CMD_TRAN_ERR; 3113 3114 break; 3115 } 3116 3117 atomic_add_16(&instance->fw_outstanding, (-1)); 3118 /* pull_pend_queue(instance); */ 3119 3120 return_mfi_pkt(instance, cmd); 3121 /* 3122 * con_log(CL_ANN, 3123 * (CE_CONT,"call add %lx",pkt->pkt_comp)); 3124 */ 3125 3126 /* Call the callback routine */ 3127 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 3128 pkt->pkt_comp) { 3129 (*pkt->pkt_comp)(pkt); 3130 } 3131 3132 /* con_log(CL_ANN, (CE_CONT, "call complete")); */ 3133 break; 3134 case MFI_CMD_OP_SMP: 3135 case MFI_CMD_OP_STP: 3136 complete_cmd_in_sync_mode(instance, cmd); 3137 break; 3138 case MFI_CMD_OP_DCMD: 3139 /* see if got an event notification */ 3140 if (cmd->frame->dcmd.opcode == 3141 MR_DCMD_CTRL_EVENT_WAIT) { 3142 if ((instance->aen_cmd == cmd) && 3143 (instance->aen_cmd->abort_aen)) { 3144 con_log(CL_ANN, (CE_WARN, 3145 "megasas_softintr: " 3146 "aborted_aen returned")); 3147 } else { 3148 service_mfi_aen(instance, cmd); 3149 } 3150 } else { 3151 complete_cmd_in_sync_mode(instance, cmd); 3152 } 3153 3154 break; 3155 case MFI_CMD_OP_ABORT: 3156 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete")); 3157 /* 3158 * MFI_CMD_OP_ABORT successfully completed 3159 * in the synchronous mode 3160 */ 3161 complete_cmd_in_sync_mode(instance, cmd); 3162 break; 3163 default: 3164 con_log(CL_ANN, (CE_PANIC, "Cmd type unknown !!")); 3165 break; 3166 } 3167 } 3168 3169 instance->softint_running = 0; 3170 3171 return (DDI_INTR_CLAIMED); 3172 } 3173 3174 /* 3175 * mega_alloc_dma_obj 3176 * 3177 * Allocate the memory and other resources for an dma object. 3178 */ 3179 static int 3180 mega_alloc_dma_obj(struct megasas_instance *instance, dma_obj_t *obj) 3181 { 3182 int i; 3183 size_t alen = 0; 3184 uint_t cookie_cnt; 3185 3186 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr, 3187 DDI_DMA_SLEEP, NULL, &obj->dma_handle); 3188 if (i != DDI_SUCCESS) { 3189 3190 switch (i) { 3191 case DDI_DMA_BADATTR : 3192 con_log(CL_ANN, (CE_WARN, 3193 "Failed ddi_dma_alloc_handle- Bad atrib")); 3194 break; 3195 case DDI_DMA_NORESOURCES : 3196 con_log(CL_ANN, (CE_WARN, 3197 "Failed ddi_dma_alloc_handle- No Resources")); 3198 break; 3199 default : 3200 con_log(CL_ANN, (CE_WARN, 3201 "Failed ddi_dma_alloc_handle :unknown %d", i)); 3202 break; 3203 } 3204 3205 return (-1); 3206 } 3207 3208 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &endian_attr, 3209 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 3210 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) || 3211 alen < obj->size) { 3212 3213 ddi_dma_free_handle(&obj->dma_handle); 3214 3215 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc")); 3216 3217 return (-1); 3218 } 3219 3220 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer, 3221 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 3222 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) { 3223 3224 ddi_dma_mem_free(&obj->acc_handle); 3225 ddi_dma_free_handle(&obj->dma_handle); 3226 3227 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle")); 3228 3229 return (-1); 3230 } 3231 3232 return (cookie_cnt); 3233 } 3234 3235 /* 3236 * mega_free_dma_obj(dma_obj_t) 3237 * 3238 * De-allocate the memory and other resources for an dma object, which must 3239 * have been alloated by a previous call to mega_alloc_dma_obj() 3240 */ 3241 static void 3242 mega_free_dma_obj(dma_obj_t obj) 3243 { 3244 (void) ddi_dma_unbind_handle(obj.dma_handle); 3245 ddi_dma_mem_free(&obj.acc_handle); 3246 ddi_dma_free_handle(&obj.dma_handle); 3247 } 3248 3249 /* 3250 * megasas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *, 3251 * int, int (*)()) 3252 * 3253 * Allocate dma resources for a new scsi command 3254 */ 3255 static int 3256 megasas_dma_alloc(struct megasas_instance *instance, struct scsi_pkt *pkt, 3257 struct buf *bp, int flags, int (*callback)()) 3258 { 3259 int dma_flags; 3260 int (*cb)(caddr_t); 3261 int i; 3262 3263 ddi_dma_attr_t tmp_dma_attr = megasas_generic_dma_attr; 3264 struct scsa_cmd *acmd = PKT2CMD(pkt); 3265 3266 acmd->cmd_buf = bp; 3267 3268 if (bp->b_flags & B_READ) { 3269 acmd->cmd_flags &= ~CFLAG_DMASEND; 3270 dma_flags = DDI_DMA_READ; 3271 } else { 3272 acmd->cmd_flags |= CFLAG_DMASEND; 3273 dma_flags = DDI_DMA_WRITE; 3274 } 3275 3276 if (flags & PKT_CONSISTENT) { 3277 acmd->cmd_flags |= CFLAG_CONSISTENT; 3278 dma_flags |= DDI_DMA_CONSISTENT; 3279 } 3280 3281 if (flags & PKT_DMA_PARTIAL) { 3282 dma_flags |= DDI_DMA_PARTIAL; 3283 } 3284 3285 dma_flags |= DDI_DMA_REDZONE; 3286 3287 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP; 3288 3289 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge; 3290 3291 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr, 3292 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) { 3293 switch (i) { 3294 case DDI_DMA_BADATTR: 3295 bioerror(bp, EFAULT); 3296 return (-1); 3297 3298 case DDI_DMA_NORESOURCES: 3299 bioerror(bp, 0); 3300 return (-1); 3301 3302 default: 3303 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: " 3304 "0x%x impossible\n", i)); 3305 /* NOTREACHED */ 3306 break; 3307 } 3308 } 3309 3310 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags, 3311 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies); 3312 3313 switch (i) { 3314 case DDI_DMA_PARTIAL_MAP: 3315 if ((dma_flags & DDI_DMA_PARTIAL) == 0) { 3316 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3317 "DDI_DMA_PARTIAL_MAP impossible\n")); 3318 /* NOTREACHED */ 3319 } 3320 3321 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) == 3322 DDI_FAILURE) { 3323 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed\n")); 3324 /* NOTREACHED */ 3325 } 3326 3327 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3328 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3329 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3330 DDI_FAILURE) { 3331 3332 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed\n")); 3333 /* NOTREACHED */ 3334 } 3335 3336 goto get_dma_cookies; 3337 case DDI_DMA_MAPPED: 3338 acmd->cmd_nwin = 1; 3339 acmd->cmd_dma_len = 0; 3340 acmd->cmd_dma_offset = 0; 3341 3342 get_dma_cookies: 3343 i = 0; 3344 acmd->cmd_dmacount = 0; 3345 for (;;) { 3346 acmd->cmd_dmacount += 3347 acmd->cmd_dmacookies[i++].dmac_size; 3348 3349 if (i == instance->max_num_sge || 3350 i == acmd->cmd_ncookies) 3351 break; 3352 3353 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3354 &acmd->cmd_dmacookies[i]); 3355 } 3356 3357 acmd->cmd_cookie = i; 3358 acmd->cmd_cookiecnt = i; 3359 3360 acmd->cmd_flags |= CFLAG_DMAVALID; 3361 3362 if (bp->b_bcount >= acmd->cmd_dmacount) { 3363 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3364 } else { 3365 pkt->pkt_resid = 0; 3366 } 3367 3368 return (0); 3369 case DDI_DMA_NORESOURCES: 3370 bioerror(bp, 0); 3371 break; 3372 case DDI_DMA_NOMAPPING: 3373 bioerror(bp, EFAULT); 3374 break; 3375 case DDI_DMA_TOOBIG: 3376 bioerror(bp, EINVAL); 3377 break; 3378 case DDI_DMA_INUSE: 3379 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:" 3380 " DDI_DMA_INUSE impossible\n")); 3381 /* NOTREACHED */ 3382 break; 3383 default: 3384 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3385 "0x%x impossible\n", i)); 3386 /* NOTREACHED */ 3387 break; 3388 } 3389 3390 ddi_dma_free_handle(&acmd->cmd_dmahandle); 3391 acmd->cmd_dmahandle = NULL; 3392 acmd->cmd_flags &= ~CFLAG_DMAVALID; 3393 return (-1); 3394 } 3395 3396 /* 3397 * megasas_dma_move(struct megasas_instance *, struct scsi_pkt *, struct buf *) 3398 * 3399 * move dma resources to next dma window 3400 * 3401 */ 3402 static int 3403 megasas_dma_move(struct megasas_instance *instance, struct scsi_pkt *pkt, 3404 struct buf *bp) 3405 { 3406 int i = 0; 3407 3408 struct scsa_cmd *acmd = PKT2CMD(pkt); 3409 3410 /* 3411 * If there are no more cookies remaining in this window, 3412 * must move to the next window first. 3413 */ 3414 if (acmd->cmd_cookie == acmd->cmd_ncookies) { 3415 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) { 3416 return (0); 3417 } 3418 3419 /* at last window, cannot move */ 3420 if (++acmd->cmd_curwin >= acmd->cmd_nwin) { 3421 return (-1); 3422 } 3423 3424 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3425 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3426 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3427 DDI_FAILURE) { 3428 return (-1); 3429 } 3430 3431 acmd->cmd_cookie = 0; 3432 } else { 3433 /* still more cookies in this window - get the next one */ 3434 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3435 &acmd->cmd_dmacookies[0]); 3436 } 3437 3438 /* get remaining cookies in this window, up to our maximum */ 3439 for (;;) { 3440 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size; 3441 acmd->cmd_cookie++; 3442 3443 if (i == instance->max_num_sge || 3444 acmd->cmd_cookie == acmd->cmd_ncookies) { 3445 break; 3446 } 3447 3448 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3449 &acmd->cmd_dmacookies[i]); 3450 } 3451 3452 acmd->cmd_cookiecnt = i; 3453 3454 if (bp->b_bcount >= acmd->cmd_dmacount) { 3455 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3456 } else { 3457 pkt->pkt_resid = 0; 3458 } 3459 3460 return (0); 3461 } 3462 3463 /* 3464 * build_cmd 3465 */ 3466 static struct megasas_cmd * 3467 build_cmd(struct megasas_instance *instance, struct scsi_address *ap, 3468 struct scsi_pkt *pkt, uchar_t *cmd_done) 3469 { 3470 uint16_t flags = 0; 3471 uint32_t i; 3472 uint32_t context; 3473 uint32_t sge_bytes; 3474 3475 struct megasas_cmd *cmd; 3476 struct megasas_sge32 *mfi_sgl; 3477 struct scsa_cmd *acmd = PKT2CMD(pkt); 3478 struct megasas_pthru_frame *pthru; 3479 struct megasas_io_frame *ldio; 3480 3481 /* find out if this is logical or physical drive command. */ 3482 acmd->islogical = MEGADRV_IS_LOGICAL(ap); 3483 acmd->device_id = MAP_DEVICE_ID(instance, ap); 3484 *cmd_done = 0; 3485 3486 /* get the command packet */ 3487 if (!(cmd = get_mfi_pkt(instance))) { 3488 return (NULL); 3489 } 3490 3491 cmd->pkt = pkt; 3492 cmd->cmd = acmd; 3493 3494 /* lets get the command directions */ 3495 if (acmd->cmd_flags & CFLAG_DMASEND) { 3496 flags = MFI_FRAME_DIR_WRITE; 3497 3498 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3499 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3500 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3501 DDI_DMA_SYNC_FORDEV); 3502 } 3503 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) { 3504 flags = MFI_FRAME_DIR_READ; 3505 3506 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3507 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3508 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3509 DDI_DMA_SYNC_FORCPU); 3510 } 3511 } else { 3512 flags = MFI_FRAME_DIR_NONE; 3513 } 3514 3515 /* flags |= MFI_FRAME_SGL64; */ 3516 3517 switch (pkt->pkt_cdbp[0]) { 3518 3519 /* 3520 * case SCMD_SYNCHRONIZE_CACHE: 3521 * flush_cache(instance); 3522 * return_mfi_pkt(instance, cmd); 3523 * *cmd_done = 1; 3524 * 3525 * return (NULL); 3526 */ 3527 3528 case SCMD_READ: 3529 case SCMD_WRITE: 3530 case SCMD_READ_G1: 3531 case SCMD_WRITE_G1: 3532 if (acmd->islogical) { 3533 ldio = (struct megasas_io_frame *)cmd->frame; 3534 3535 /* 3536 * preare the Logical IO frame: 3537 * 2nd bit is zero for all read cmds 3538 */ 3539 ldio->cmd = (pkt->pkt_cdbp[0] & 0x02) ? 3540 MFI_CMD_OP_LD_WRITE : MFI_CMD_OP_LD_READ; 3541 ldio->cmd_status = 0x0; 3542 ldio->scsi_status = 0x0; 3543 ldio->target_id = acmd->device_id; 3544 ldio->timeout = 0; 3545 ldio->reserved_0 = 0; 3546 ldio->pad_0 = 0; 3547 ldio->flags = flags; 3548 3549 /* Initialize sense Information */ 3550 bzero(cmd->sense, SENSE_LENGTH); 3551 ldio->sense_len = SENSE_LENGTH; 3552 ldio->sense_buf_phys_addr_hi = 0; 3553 ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr; 3554 3555 ldio->start_lba_hi = 0; 3556 ldio->access_byte = (acmd->cmd_cdblen != 6) ? 3557 pkt->pkt_cdbp[1] : 0; 3558 ldio->sge_count = acmd->cmd_cookiecnt; 3559 mfi_sgl = (struct megasas_sge32 *)&ldio->sgl; 3560 3561 context = ldio->context; 3562 3563 if (acmd->cmd_cdblen == CDB_GROUP0) { 3564 ldio->lba_count = host_to_le16( 3565 (uint16_t)(pkt->pkt_cdbp[4])); 3566 3567 ldio->start_lba_lo = host_to_le32( 3568 ((uint32_t)(pkt->pkt_cdbp[3])) | 3569 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) | 3570 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) 3571 << 16)); 3572 } else if (acmd->cmd_cdblen == CDB_GROUP1) { 3573 ldio->lba_count = host_to_le16( 3574 ((uint16_t)(pkt->pkt_cdbp[8])) | 3575 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)); 3576 3577 ldio->start_lba_lo = host_to_le32( 3578 ((uint32_t)(pkt->pkt_cdbp[5])) | 3579 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3580 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3581 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); 3582 } else if (acmd->cmd_cdblen == CDB_GROUP2) { 3583 ldio->lba_count = host_to_le16( 3584 ((uint16_t)(pkt->pkt_cdbp[9])) | 3585 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) | 3586 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) | 3587 ((uint16_t)(pkt->pkt_cdbp[6]) << 24)); 3588 3589 ldio->start_lba_lo = host_to_le32( 3590 ((uint32_t)(pkt->pkt_cdbp[5])) | 3591 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3592 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3593 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); 3594 } else if (acmd->cmd_cdblen == CDB_GROUP3) { 3595 ldio->lba_count = host_to_le16( 3596 ((uint16_t)(pkt->pkt_cdbp[13])) | 3597 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) | 3598 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) | 3599 ((uint16_t)(pkt->pkt_cdbp[10]) << 24)); 3600 3601 ldio->start_lba_lo = host_to_le32( 3602 ((uint32_t)(pkt->pkt_cdbp[9])) | 3603 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | 3604 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | 3605 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)); 3606 3607 ldio->start_lba_lo = host_to_le32( 3608 ((uint32_t)(pkt->pkt_cdbp[5])) | 3609 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3610 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3611 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); 3612 } 3613 3614 break; 3615 } 3616 /* fall through For all non-rd/wr cmds */ 3617 default: 3618 pthru = (struct megasas_pthru_frame *)cmd->frame; 3619 3620 /* prepare the DCDB frame */ 3621 pthru->cmd = (acmd->islogical) ? 3622 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI; 3623 pthru->cmd_status = 0x0; 3624 pthru->scsi_status = 0x0; 3625 pthru->target_id = acmd->device_id; 3626 pthru->lun = 0; 3627 pthru->cdb_len = acmd->cmd_cdblen; 3628 pthru->timeout = 0; 3629 pthru->flags = flags; 3630 pthru->data_xfer_len = acmd->cmd_dmacount; 3631 pthru->sge_count = acmd->cmd_cookiecnt; 3632 mfi_sgl = (struct megasas_sge32 *)&pthru->sgl; 3633 3634 bzero(cmd->sense, SENSE_LENGTH); 3635 pthru->sense_len = SENSE_LENGTH; 3636 pthru->sense_buf_phys_addr_hi = 0; 3637 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; 3638 3639 context = pthru->context; 3640 3641 bcopy(pkt->pkt_cdbp, pthru->cdb, acmd->cmd_cdblen); 3642 3643 break; 3644 } 3645 #ifdef lint 3646 context = context; 3647 #endif 3648 /* bzero(mfi_sgl, sizeof (struct megasas_sge64) * MAX_SGL); */ 3649 3650 /* prepare the scatter-gather list for the firmware */ 3651 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) { 3652 mfi_sgl->phys_addr = acmd->cmd_dmacookies[i].dmac_laddress; 3653 mfi_sgl->length = acmd->cmd_dmacookies[i].dmac_size; 3654 } 3655 3656 sge_bytes = sizeof (struct megasas_sge32)*acmd->cmd_cookiecnt; 3657 3658 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 3659 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1; 3660 3661 if (cmd->frame_count >= 8) { 3662 cmd->frame_count = 8; 3663 } 3664 3665 return (cmd); 3666 } 3667 3668 /* 3669 * wait_for_outstanding - Wait for all outstanding cmds 3670 * @instance: Adapter soft state 3671 * 3672 * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to 3673 * complete all its outstanding commands. Returns error if one or more IOs 3674 * are pending after this time period. 3675 */ 3676 static int 3677 wait_for_outstanding(struct megasas_instance *instance) 3678 { 3679 int i; 3680 uint32_t wait_time = 90; 3681 3682 for (i = 0; i < wait_time; i++) { 3683 if (!instance->fw_outstanding) { 3684 break; 3685 } 3686 3687 drv_usecwait(1000); /* wait for 1000 usecs */; 3688 } 3689 3690 if (instance->fw_outstanding) { 3691 return (1); 3692 } 3693 3694 return (0); 3695 } 3696 3697 /* 3698 * issue_mfi_pthru 3699 */ 3700 static int 3701 issue_mfi_pthru(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 3702 struct megasas_cmd *cmd, int mode) 3703 { 3704 void *ubuf; 3705 uint32_t kphys_addr = 0; 3706 uint32_t xferlen = 0; 3707 uint_t model; 3708 3709 dma_obj_t pthru_dma_obj; 3710 struct megasas_pthru_frame *kpthru; 3711 struct megasas_pthru_frame *pthru; 3712 3713 pthru = &cmd->frame->pthru; 3714 kpthru = (struct megasas_pthru_frame *)&ioctl->frame[0]; 3715 3716 model = ddi_model_convert_from(mode & FMODELS); 3717 if (model == DDI_MODEL_ILP32) { 3718 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3719 3720 xferlen = kpthru->sgl.sge32[0].length; 3721 3722 /* SJ! - ubuf needs to be virtual address. */ 3723 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3724 } else { 3725 #ifdef _ILP32 3726 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3727 xferlen = kpthru->sgl.sge32[0].length; 3728 /* SJ! - ubuf needs to be virtual address. */ 3729 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3730 #else 3731 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64")); 3732 xferlen = kpthru->sgl.sge64[0].length; 3733 /* SJ! - ubuf needs to be virtual address. */ 3734 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr; 3735 #endif 3736 } 3737 3738 if (xferlen) { 3739 /* means IOCTL requires DMA */ 3740 /* allocate the data transfer buffer */ 3741 pthru_dma_obj.size = xferlen; 3742 pthru_dma_obj.dma_attr = megasas_generic_dma_attr; 3743 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 3744 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 3745 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1; 3746 pthru_dma_obj.dma_attr.dma_attr_align = 1; 3747 3748 /* allocate kernel buffer for DMA */ 3749 if (mega_alloc_dma_obj(instance, &pthru_dma_obj) != 1) { 3750 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3751 "could not data transfer buffer alloc.")); 3752 return (DDI_FAILURE); 3753 } 3754 3755 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3756 if (kpthru->flags & MFI_FRAME_DIR_WRITE) { 3757 if (ddi_copyin(ubuf, (void *)pthru_dma_obj.buffer, 3758 xferlen, mode)) { 3759 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3760 "copy from user space failed\n")); 3761 return (1); 3762 } 3763 } 3764 3765 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address; 3766 } 3767 3768 pthru->cmd = kpthru->cmd; 3769 pthru->sense_len = kpthru->sense_len; 3770 pthru->cmd_status = kpthru->cmd_status; 3771 pthru->scsi_status = kpthru->scsi_status; 3772 pthru->target_id = kpthru->target_id; 3773 pthru->lun = kpthru->lun; 3774 pthru->cdb_len = kpthru->cdb_len; 3775 pthru->sge_count = kpthru->sge_count; 3776 pthru->timeout = kpthru->timeout; 3777 pthru->data_xfer_len = kpthru->data_xfer_len; 3778 3779 pthru->sense_buf_phys_addr_hi = 0; 3780 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */ 3781 pthru->sense_buf_phys_addr_lo = 0; 3782 3783 bcopy((void *)kpthru->cdb, (void *)pthru->cdb, pthru->cdb_len); 3784 3785 pthru->flags = kpthru->flags & ~MFI_FRAME_SGL64; 3786 pthru->sgl.sge32[0].length = xferlen; 3787 pthru->sgl.sge32[0].phys_addr = kphys_addr; 3788 3789 cmd->sync_cmd = MEGASAS_TRUE; 3790 cmd->frame_count = 1; 3791 3792 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3793 con_log(CL_ANN, (CE_WARN, 3794 "issue_mfi_pthru: fw_ioctl failed\n")); 3795 } else { 3796 if (xferlen && (kpthru->flags & MFI_FRAME_DIR_READ)) { 3797 /* 3798 * con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3799 * "copy to user space\n")); 3800 */ 3801 3802 if (ddi_copyout(pthru_dma_obj.buffer, ubuf, 3803 xferlen, mode)) { 3804 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3805 "copy to user space failed\n")); 3806 return (1); 3807 } 3808 } 3809 } 3810 3811 kpthru->cmd_status = pthru->cmd_status; 3812 kpthru->scsi_status = pthru->scsi_status; 3813 3814 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, " 3815 "scsi_status %x\n", pthru->cmd_status, pthru->scsi_status)); 3816 3817 if (xferlen) { 3818 /* free kernel buffer */ 3819 mega_free_dma_obj(pthru_dma_obj); 3820 } 3821 3822 return (0); 3823 } 3824 3825 /* 3826 * issue_mfi_dcmd 3827 */ 3828 static int 3829 issue_mfi_dcmd(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 3830 struct megasas_cmd *cmd, int mode) 3831 { 3832 void *ubuf; 3833 uint32_t kphys_addr = 0; 3834 uint32_t xferlen = 0; 3835 uint32_t model; 3836 dma_obj_t dcmd_dma_obj; 3837 struct megasas_dcmd_frame *kdcmd; 3838 struct megasas_dcmd_frame *dcmd; 3839 3840 dcmd = &cmd->frame->dcmd; 3841 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0]; 3842 3843 model = ddi_model_convert_from(mode & FMODELS); 3844 if (model == DDI_MODEL_ILP32) { 3845 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3846 3847 xferlen = kdcmd->sgl.sge32[0].length; 3848 3849 /* SJ! - ubuf needs to be virtual address. */ 3850 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3851 } 3852 else 3853 { 3854 #ifdef _ILP32 3855 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3856 xferlen = kdcmd->sgl.sge32[0].length; 3857 /* SJ! - ubuf needs to be virtual address. */ 3858 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3859 #else 3860 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64")); 3861 xferlen = kdcmd->sgl.sge64[0].length; 3862 /* SJ! - ubuf needs to be virtual address. */ 3863 ubuf = (void *)(ulong_t)dcmd->sgl.sge64[0].phys_addr; 3864 #endif 3865 } 3866 if (xferlen) { 3867 /* means IOCTL requires DMA */ 3868 /* allocate the data transfer buffer */ 3869 dcmd_dma_obj.size = xferlen; 3870 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr; 3871 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 3872 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 3873 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 3874 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 3875 3876 /* allocate kernel buffer for DMA */ 3877 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) { 3878 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 3879 "could not data transfer buffer alloc.")); 3880 return (DDI_FAILURE); 3881 } 3882 3883 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3884 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) { 3885 if (ddi_copyin(ubuf, (void *)dcmd_dma_obj.buffer, 3886 xferlen, mode)) { 3887 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 3888 "copy from user space failed\n")); 3889 return (1); 3890 } 3891 } 3892 3893 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 3894 } 3895 3896 dcmd->cmd = kdcmd->cmd; 3897 dcmd->cmd_status = kdcmd->cmd_status; 3898 dcmd->sge_count = kdcmd->sge_count; 3899 dcmd->timeout = kdcmd->timeout; 3900 dcmd->data_xfer_len = kdcmd->data_xfer_len; 3901 dcmd->opcode = kdcmd->opcode; 3902 3903 bcopy((void *)kdcmd->mbox.b, (void *)dcmd->mbox.b, 12); 3904 3905 dcmd->flags = kdcmd->flags & ~MFI_FRAME_SGL64; 3906 dcmd->sgl.sge32[0].length = xferlen; 3907 dcmd->sgl.sge32[0].phys_addr = kphys_addr; 3908 3909 cmd->sync_cmd = MEGASAS_TRUE; 3910 cmd->frame_count = 1; 3911 3912 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3913 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed\n")); 3914 } else { 3915 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) { 3916 /* 3917 * con_log(CL_ANN, (CE_WARN,"issue_mfi_dcmd: " 3918 * copy to user space\n")); 3919 */ 3920 3921 if (ddi_copyout(dcmd_dma_obj.buffer, ubuf, 3922 xferlen, mode)) { 3923 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 3924 "copy to user space failed\n")); 3925 return (1); 3926 } 3927 } 3928 } 3929 3930 kdcmd->cmd_status = dcmd->cmd_status; 3931 3932 if (xferlen) { 3933 /* free kernel buffer */ 3934 mega_free_dma_obj(dcmd_dma_obj); 3935 } 3936 3937 return (0); 3938 } 3939 3940 /* 3941 * issue_mfi_smp 3942 */ 3943 static int 3944 issue_mfi_smp(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 3945 struct megasas_cmd *cmd, int mode) 3946 { 3947 void *request_ubuf; 3948 void *response_ubuf; 3949 uint32_t request_xferlen = 0; 3950 uint32_t response_xferlen = 0; 3951 uint_t model; 3952 dma_obj_t request_dma_obj; 3953 dma_obj_t response_dma_obj; 3954 struct megasas_smp_frame *ksmp; 3955 struct megasas_smp_frame *smp; 3956 struct megasas_sge32 *sge32; 3957 #ifndef _ILP32 3958 struct megasas_sge64 *sge64; 3959 #endif 3960 3961 smp = &cmd->frame->smp; 3962 ksmp = (struct megasas_smp_frame *)&ioctl->frame[0]; 3963 3964 model = ddi_model_convert_from(mode & FMODELS); 3965 if (model == DDI_MODEL_ILP32) { 3966 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 3967 3968 sge32 = &ksmp->sgl[0].sge32[0]; 3969 response_xferlen = sge32[0].length; 3970 request_xferlen = sge32[1].length; 3971 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 3972 "response_xferlen = %x, request_xferlen = %x", 3973 response_xferlen, request_xferlen)); 3974 3975 /* SJ! - ubuf needs to be virtual address. */ 3976 3977 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 3978 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 3979 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3980 "response_ubuf = %p, request_ubuf = %p", 3981 response_ubuf, request_ubuf)); 3982 } else { 3983 #ifdef _ILP32 3984 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 3985 3986 sge32 = &ksmp->sgl[0].sge32[0]; 3987 response_xferlen = sge32[0].length; 3988 request_xferlen = sge32[1].length; 3989 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 3990 "response_xferlen = %x, request_xferlen = %x", 3991 response_xferlen, request_xferlen)); 3992 3993 /* SJ! - ubuf needs to be virtual address. */ 3994 3995 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 3996 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 3997 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3998 "response_ubuf = %p, request_ubuf = %p", 3999 response_ubuf, request_ubuf)); 4000 #else 4001 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64")); 4002 4003 sge64 = &ksmp->sgl[0].sge64[0]; 4004 response_xferlen = sge64[0].length; 4005 request_xferlen = sge64[1].length; 4006 4007 /* SJ! - ubuf needs to be virtual address. */ 4008 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr; 4009 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr; 4010 #endif 4011 } 4012 if (request_xferlen) { 4013 /* means IOCTL requires DMA */ 4014 /* allocate the data transfer buffer */ 4015 request_dma_obj.size = request_xferlen; 4016 request_dma_obj.dma_attr = megasas_generic_dma_attr; 4017 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 4018 request_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 4019 request_dma_obj.dma_attr.dma_attr_sgllen = 1; 4020 request_dma_obj.dma_attr.dma_attr_align = 1; 4021 4022 /* allocate kernel buffer for DMA */ 4023 if (mega_alloc_dma_obj(instance, &request_dma_obj) != 1) { 4024 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4025 "could not data transfer buffer alloc.")); 4026 return (DDI_FAILURE); 4027 } 4028 4029 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4030 if (ddi_copyin(request_ubuf, (void *) request_dma_obj.buffer, 4031 request_xferlen, mode)) { 4032 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4033 "copy from user space failed\n")); 4034 return (1); 4035 } 4036 } 4037 4038 if (response_xferlen) { 4039 /* means IOCTL requires DMA */ 4040 /* allocate the data transfer buffer */ 4041 response_dma_obj.size = response_xferlen; 4042 response_dma_obj.dma_attr = megasas_generic_dma_attr; 4043 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 4044 response_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 4045 response_dma_obj.dma_attr.dma_attr_sgllen = 1; 4046 response_dma_obj.dma_attr.dma_attr_align = 1; 4047 4048 /* allocate kernel buffer for DMA */ 4049 if (mega_alloc_dma_obj(instance, &response_dma_obj) != 1) { 4050 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4051 "could not data transfer buffer alloc.")); 4052 return (DDI_FAILURE); 4053 } 4054 4055 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4056 if (ddi_copyin(response_ubuf, (void *) response_dma_obj.buffer, 4057 response_xferlen, mode)) { 4058 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4059 "copy from user space failed\n")); 4060 return (1); 4061 } 4062 } 4063 4064 smp->cmd = ksmp->cmd; 4065 smp->cmd_status = ksmp->cmd_status; 4066 smp->connection_status = ksmp->connection_status; 4067 smp->sge_count = ksmp->sge_count; 4068 /* smp->context = ksmp->context; */ 4069 smp->timeout = ksmp->timeout; 4070 smp->data_xfer_len = ksmp->data_xfer_len; 4071 4072 bcopy((void *)&ksmp->sas_addr, (void *)&smp->sas_addr, 4073 sizeof (uint64_t)); 4074 4075 smp->flags = ksmp->flags & ~MFI_FRAME_SGL64; 4076 4077 model = ddi_model_convert_from(mode & FMODELS); 4078 if (model == DDI_MODEL_ILP32) { 4079 con_log(CL_ANN1, (CE_NOTE, 4080 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4081 4082 sge32 = &smp->sgl[0].sge32[0]; 4083 sge32[0].length = response_xferlen; 4084 sge32[0].phys_addr = 4085 response_dma_obj.dma_cookie[0].dmac_address; 4086 sge32[1].length = request_xferlen; 4087 sge32[1].phys_addr = 4088 request_dma_obj.dma_cookie[0].dmac_address; 4089 } else { 4090 #ifdef _ILP32 4091 con_log(CL_ANN1, (CE_NOTE, 4092 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4093 sge32 = &smp->sgl[0].sge32[0]; 4094 sge32[0].length = response_xferlen; 4095 sge32[0].phys_addr = 4096 response_dma_obj.dma_cookie[0].dmac_address; 4097 sge32[1].length = request_xferlen; 4098 sge32[1].phys_addr = 4099 request_dma_obj.dma_cookie[0].dmac_address; 4100 #else 4101 con_log(CL_ANN1, (CE_NOTE, 4102 "issue_mfi_smp: DDI_MODEL_LP64")); 4103 sge64 = &smp->sgl[0].sge64[0]; 4104 sge64[0].length = response_xferlen; 4105 sge64[0].phys_addr = 4106 response_dma_obj.dma_cookie[0].dmac_address; 4107 sge64[1].length = request_xferlen; 4108 sge64[1].phys_addr = 4109 request_dma_obj.dma_cookie[0].dmac_address; 4110 #endif 4111 } 4112 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 4113 "smp->response_xferlen = %d, smp->request_xferlen = %d " 4114 "smp->data_xfer_len = %d", sge32[0].length, sge32[1].length, 4115 smp->data_xfer_len)); 4116 4117 cmd->sync_cmd = MEGASAS_TRUE; 4118 cmd->frame_count = 1; 4119 4120 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4121 con_log(CL_ANN, (CE_WARN, 4122 "issue_mfi_smp: fw_ioctl failed\n")); 4123 } else { 4124 con_log(CL_ANN1, (CE_NOTE, 4125 "issue_mfi_smp: copy to user space\n")); 4126 4127 if (request_xferlen) { 4128 if (ddi_copyout(request_dma_obj.buffer, request_ubuf, 4129 request_xferlen, mode)) { 4130 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4131 "copy to user space failed\n")); 4132 return (1); 4133 } 4134 } 4135 4136 if (response_xferlen) { 4137 if (ddi_copyout(response_dma_obj.buffer, response_ubuf, 4138 response_xferlen, mode)) { 4139 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4140 "copy to user space failed\n")); 4141 return (1); 4142 } 4143 } 4144 } 4145 4146 ksmp->cmd_status = smp->cmd_status; 4147 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d", 4148 smp->cmd_status)); 4149 4150 4151 if (request_xferlen) { 4152 /* free kernel buffer */ 4153 mega_free_dma_obj(request_dma_obj); 4154 } 4155 4156 if (response_xferlen) { 4157 /* free kernel buffer */ 4158 mega_free_dma_obj(response_dma_obj); 4159 } 4160 4161 return (0); 4162 } 4163 4164 /* 4165 * issue_mfi_stp 4166 */ 4167 static int 4168 issue_mfi_stp(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 4169 struct megasas_cmd *cmd, int mode) 4170 { 4171 void *fis_ubuf; 4172 void *data_ubuf; 4173 uint32_t fis_xferlen = 0; 4174 uint32_t data_xferlen = 0; 4175 uint_t model; 4176 dma_obj_t fis_dma_obj; 4177 dma_obj_t data_dma_obj; 4178 struct megasas_stp_frame *kstp; 4179 struct megasas_stp_frame *stp; 4180 4181 stp = &cmd->frame->stp; 4182 kstp = (struct megasas_stp_frame *)&ioctl->frame[0]; 4183 4184 model = ddi_model_convert_from(mode & FMODELS); 4185 if (model == DDI_MODEL_ILP32) { 4186 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4187 4188 fis_xferlen = kstp->sgl.sge32[0].length; 4189 data_xferlen = kstp->sgl.sge32[1].length; 4190 4191 /* SJ! - ubuf needs to be virtual address. */ 4192 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4193 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4194 } 4195 else 4196 { 4197 #ifdef _ILP32 4198 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4199 4200 fis_xferlen = kstp->sgl.sge32[0].length; 4201 data_xferlen = kstp->sgl.sge32[1].length; 4202 4203 /* SJ! - ubuf needs to be virtual address. */ 4204 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4205 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4206 #else 4207 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64")); 4208 4209 fis_xferlen = kstp->sgl.sge64[0].length; 4210 data_xferlen = kstp->sgl.sge64[1].length; 4211 4212 /* SJ! - ubuf needs to be virtual address. */ 4213 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr; 4214 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr; 4215 #endif 4216 } 4217 4218 4219 if (fis_xferlen) { 4220 #ifdef DEBUG 4221 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: " 4222 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen)); 4223 #endif 4224 /* means IOCTL requires DMA */ 4225 /* allocate the data transfer buffer */ 4226 fis_dma_obj.size = fis_xferlen; 4227 fis_dma_obj.dma_attr = megasas_generic_dma_attr; 4228 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 4229 fis_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 4230 fis_dma_obj.dma_attr.dma_attr_sgllen = 1; 4231 fis_dma_obj.dma_attr.dma_attr_align = 1; 4232 4233 /* allocate kernel buffer for DMA */ 4234 if (mega_alloc_dma_obj(instance, &fis_dma_obj) != 1) { 4235 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4236 "could not data transfer buffer alloc.")); 4237 return (DDI_FAILURE); 4238 } 4239 4240 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4241 if (ddi_copyin(fis_ubuf, (void *)fis_dma_obj.buffer, 4242 fis_xferlen, mode)) { 4243 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4244 "copy from user space failed\n")); 4245 return (1); 4246 } 4247 } 4248 4249 if (data_xferlen) { 4250 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p " 4251 "data_xferlen = %x", data_ubuf, data_xferlen)); 4252 4253 /* means IOCTL requires DMA */ 4254 /* allocate the data transfer buffer */ 4255 data_dma_obj.size = data_xferlen; 4256 data_dma_obj.dma_attr = megasas_generic_dma_attr; 4257 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 4258 data_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 4259 data_dma_obj.dma_attr.dma_attr_sgllen = 1; 4260 data_dma_obj.dma_attr.dma_attr_align = 1; 4261 4262 /* allocate kernel buffer for DMA */ 4263 if (mega_alloc_dma_obj(instance, &data_dma_obj) != 1) { 4264 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4265 "could not data transfer buffer alloc.")); 4266 return (DDI_FAILURE); 4267 } 4268 4269 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4270 if (ddi_copyin(data_ubuf, (void *) data_dma_obj.buffer, 4271 data_xferlen, mode)) { 4272 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4273 "copy from user space failed\n")); 4274 return (1); 4275 } 4276 } 4277 4278 stp->cmd = kstp->cmd; 4279 stp->cmd_status = kstp->cmd_status; 4280 stp->connection_status = kstp->connection_status; 4281 stp->target_id = kstp->target_id; 4282 stp->sge_count = kstp->sge_count; 4283 /* stp->context = kstp->context; */ 4284 stp->timeout = kstp->timeout; 4285 stp->data_xfer_len = kstp->data_xfer_len; 4286 4287 bcopy((void *)kstp->fis, (void *)stp->fis, 10); 4288 4289 stp->flags = kstp->flags & ~MFI_FRAME_SGL64; 4290 stp->stp_flags = kstp->stp_flags; 4291 stp->sgl.sge32[0].length = fis_xferlen; 4292 stp->sgl.sge32[0].phys_addr = fis_dma_obj.dma_cookie[0].dmac_address; 4293 stp->sgl.sge32[1].length = data_xferlen; 4294 stp->sgl.sge32[1].phys_addr = data_dma_obj.dma_cookie[0].dmac_address; 4295 4296 cmd->sync_cmd = MEGASAS_TRUE; 4297 cmd->frame_count = 1; 4298 4299 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4300 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed\n")); 4301 } else { 4302 /* 4303 * con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4304 * "copy to user space\n")); 4305 */ 4306 4307 if (fis_xferlen) { 4308 if (ddi_copyout(fis_dma_obj.buffer, fis_ubuf, 4309 fis_xferlen, mode)) { 4310 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4311 "copy to user space failed\n")); 4312 return (1); 4313 } 4314 } 4315 4316 if (data_xferlen) { 4317 if (ddi_copyout(data_dma_obj.buffer, data_ubuf, 4318 data_xferlen, mode)) { 4319 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4320 "copy to user space failed\n")); 4321 return (1); 4322 } 4323 } 4324 } 4325 4326 kstp->cmd_status = stp->cmd_status; 4327 4328 if (fis_xferlen) { 4329 /* free kernel buffer */ 4330 mega_free_dma_obj(fis_dma_obj); 4331 } 4332 4333 if (data_xferlen) { 4334 /* free kernel buffer */ 4335 mega_free_dma_obj(data_dma_obj); 4336 } 4337 4338 return (0); 4339 } 4340 4341 /* 4342 * fill_up_drv_ver 4343 */ 4344 static void 4345 fill_up_drv_ver(struct megasas_drv_ver *dv) 4346 { 4347 (void) memset(dv, 0, sizeof (struct megasas_drv_ver)); 4348 4349 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$")); 4350 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris")); 4351 (void) memcpy(dv->os_ver, "Build 36", strlen("Build 36")); 4352 (void) memcpy(dv->drv_name, "megaraid_sas", strlen("megaraid_sas")); 4353 (void) memcpy(dv->drv_ver, MEGASAS_VERSION, strlen(MEGASAS_VERSION)); 4354 (void) memcpy(dv->drv_rel_date, MEGASAS_RELDATE, 4355 strlen(MEGASAS_RELDATE)); 4356 } 4357 4358 /* 4359 * handle_drv_ioctl 4360 */ 4361 static int 4362 handle_drv_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 4363 int mode) 4364 { 4365 int i; 4366 int rval = 0; 4367 int *props = NULL; 4368 void *ubuf; 4369 4370 uint8_t *pci_conf_buf; 4371 uint32_t xferlen; 4372 uint32_t num_props; 4373 uint_t model; 4374 struct megasas_dcmd_frame *kdcmd; 4375 struct megasas_drv_ver dv; 4376 struct megasas_pci_information pi; 4377 4378 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0]; 4379 4380 model = ddi_model_convert_from(mode & FMODELS); 4381 if (model == DDI_MODEL_ILP32) { 4382 con_log(CL_ANN1, (CE_NOTE, 4383 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4384 4385 xferlen = kdcmd->sgl.sge32[0].length; 4386 4387 /* SJ! - ubuf needs to be virtual address. */ 4388 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4389 } else { 4390 #ifdef _ILP32 4391 con_log(CL_ANN1, (CE_NOTE, 4392 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4393 xferlen = kdcmd->sgl.sge32[0].length; 4394 /* SJ! - ubuf needs to be virtual address. */ 4395 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4396 #else 4397 con_log(CL_ANN1, (CE_NOTE, 4398 "handle_drv_ioctl: DDI_MODEL_LP64")); 4399 xferlen = kdcmd->sgl.sge64[0].length; 4400 /* SJ! - ubuf needs to be virtual address. */ 4401 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 4402 #endif 4403 } 4404 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4405 "dataBuf=%p size=%d bytes", ubuf, xferlen)); 4406 4407 switch (kdcmd->opcode) { 4408 case MR_DRIVER_IOCTL_DRIVER_VERSION: 4409 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4410 "MR_DRIVER_IOCTL_DRIVER_VERSION")); 4411 4412 fill_up_drv_ver(&dv); 4413 4414 if (ddi_copyout(&dv, ubuf, xferlen, mode)) { 4415 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4416 "MR_DRIVER_IOCTL_DRIVER_VERSION : " 4417 "copy to user space failed\n")); 4418 kdcmd->cmd_status = 1; 4419 rval = 1; 4420 } else { 4421 kdcmd->cmd_status = 0; 4422 } 4423 break; 4424 case MR_DRIVER_IOCTL_PCI_INFORMATION: 4425 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4426 "MR_DRIVER_IOCTL_PCI_INFORMAITON")); 4427 4428 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip, 4429 0, "reg", &props, &num_props)) { 4430 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4431 "MR_DRIVER_IOCTL_PCI_INFORMATION : " 4432 "ddi_prop_look_int_array failed\n")); 4433 rval = 1; 4434 } else { 4435 4436 pi.busNumber = (props[0] >> 16) & 0xFF; 4437 pi.deviceNumber = (props[0] >> 11) & 0x1f; 4438 pi.functionNumber = (props[0] >> 8) & 0x7; 4439 ddi_prop_free((void *)props); 4440 } 4441 4442 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo; 4443 4444 for (i = 0; i < (sizeof (struct megasas_pci_information) - 4445 offsetof(struct megasas_pci_information, pciHeaderInfo)); 4446 i++) { 4447 pci_conf_buf[i] = 4448 pci_config_get8(instance->pci_handle, i); 4449 } 4450 4451 if (ddi_copyout(&pi, ubuf, xferlen, mode)) { 4452 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4453 "MR_DRIVER_IOCTL_PCI_INFORMATION : " 4454 "copy to user space failed\n")); 4455 kdcmd->cmd_status = 1; 4456 rval = 1; 4457 } else { 4458 kdcmd->cmd_status = 0; 4459 } 4460 break; 4461 default: 4462 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4463 "invalid driver specific IOCTL opcode = 0x%x", 4464 kdcmd->opcode)); 4465 kdcmd->cmd_status = 1; 4466 rval = 1; 4467 break; 4468 } 4469 4470 return (rval); 4471 } 4472 4473 /* 4474 * handle_mfi_ioctl 4475 */ 4476 static int 4477 handle_mfi_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 4478 int mode) 4479 { 4480 int rval = 0; 4481 4482 struct megasas_header *hdr; 4483 struct megasas_cmd *cmd; 4484 4485 cmd = get_mfi_pkt(instance); 4486 4487 if (!cmd) { 4488 con_log(CL_ANN, (CE_WARN, "megasas: " 4489 "failed to get a cmd packet\n")); 4490 return (1); 4491 } 4492 4493 hdr = (struct megasas_header *)&ioctl->frame[0]; 4494 4495 switch (hdr->cmd) { 4496 case MFI_CMD_OP_DCMD: 4497 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode); 4498 break; 4499 case MFI_CMD_OP_SMP: 4500 rval = issue_mfi_smp(instance, ioctl, cmd, mode); 4501 break; 4502 case MFI_CMD_OP_STP: 4503 rval = issue_mfi_stp(instance, ioctl, cmd, mode); 4504 break; 4505 case MFI_CMD_OP_LD_SCSI: 4506 case MFI_CMD_OP_PD_SCSI: 4507 rval = issue_mfi_pthru(instance, ioctl, cmd, mode); 4508 break; 4509 default: 4510 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: " 4511 "invalid mfi ioctl hdr->cmd = %d\n", hdr->cmd)); 4512 rval = 1; 4513 break; 4514 } 4515 4516 4517 return_mfi_pkt(instance, cmd); 4518 4519 return (rval); 4520 } 4521 4522 /* 4523 * AEN 4524 */ 4525 static int 4526 handle_mfi_aen(struct megasas_instance *instance, struct megasas_aen *aen) 4527 { 4528 int rval = 0; 4529 4530 rval = register_mfi_aen(instance, instance->aen_seq_num, 4531 aen->class_locale_word); 4532 4533 aen->cmd_status = (uint8_t)rval; 4534 4535 return (rval); 4536 } 4537 4538 static int 4539 register_mfi_aen(struct megasas_instance *instance, uint32_t seq_num, 4540 uint32_t class_locale_word) 4541 { 4542 int ret_val; 4543 4544 struct megasas_cmd *cmd; 4545 struct megasas_dcmd_frame *dcmd; 4546 union megasas_evt_class_locale curr_aen; 4547 union megasas_evt_class_locale prev_aen; 4548 4549 /* 4550 * If there an AEN pending already (aen_cmd), check if the 4551 * class_locale of that pending AEN is inclusive of the new 4552 * AEN request we currently have. If it is, then we don't have 4553 * to do anything. In other words, whichever events the current 4554 * AEN request is subscribing to, have already been subscribed 4555 * to. 4556 * 4557 * If the old_cmd is _not_ inclusive, then we have to abort 4558 * that command, form a class_locale that is superset of both 4559 * old and current and re-issue to the FW 4560 */ 4561 4562 curr_aen.word = class_locale_word; 4563 4564 if (instance->aen_cmd) { 4565 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; 4566 4567 /* 4568 * A class whose enum value is smaller is inclusive of all 4569 * higher values. If a PROGRESS (= -1) was previously 4570 * registered, then a new registration requests for higher 4571 * classes need not be sent to FW. They are automatically 4572 * included. 4573 * 4574 * Locale numbers don't have such hierarchy. They are bitmap 4575 * values 4576 */ 4577 if ((prev_aen.members.class <= curr_aen.members.class) && 4578 !((prev_aen.members.locale & curr_aen.members.locale) ^ 4579 curr_aen.members.locale)) { 4580 /* 4581 * Previously issued event registration includes 4582 * current request. Nothing to do. 4583 */ 4584 4585 return (0); 4586 } else { 4587 curr_aen.members.locale |= prev_aen.members.locale; 4588 4589 if (prev_aen.members.class < curr_aen.members.class) 4590 curr_aen.members.class = prev_aen.members.class; 4591 4592 ret_val = abort_aen_cmd(instance, instance->aen_cmd); 4593 4594 if (ret_val) { 4595 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: " 4596 "failed to abort prevous AEN command\n")); 4597 4598 return (ret_val); 4599 } 4600 } 4601 } else { 4602 curr_aen.word = class_locale_word; 4603 } 4604 4605 cmd = get_mfi_pkt(instance); 4606 4607 if (!cmd) 4608 return (-ENOMEM); 4609 4610 dcmd = &cmd->frame->dcmd; 4611 4612 /* for(i = 0; i < 12; i++) dcmd->mbox.b[i] = 0; */ 4613 (void) memset(dcmd->mbox.b, 0, 12); 4614 4615 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 4616 sizeof (struct megasas_evt_detail)); 4617 4618 /* Prepare DCMD for aen registration */ 4619 dcmd->cmd = MFI_CMD_OP_DCMD; 4620 dcmd->cmd_status = 0x0; 4621 dcmd->sge_count = 1; 4622 dcmd->flags = MFI_FRAME_DIR_READ; 4623 dcmd->timeout = 0; 4624 dcmd->data_xfer_len = sizeof (struct megasas_evt_detail); 4625 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 4626 dcmd->mbox.w[0] = seq_num; 4627 dcmd->mbox.w[1] = curr_aen.word; 4628 dcmd->sgl.sge32[0].phys_addr = 4629 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address; 4630 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_detail); 4631 4632 instance->aen_seq_num = seq_num; 4633 4634 /* 4635 * Store reference to the cmd used to register for AEN. When an 4636 * application wants us to register for AEN, we have to abort this 4637 * cmd and re-register with a new EVENT LOCALE supplied by that app 4638 */ 4639 instance->aen_cmd = cmd; 4640 4641 cmd->frame_count = 1; 4642 4643 /* Issue the aen registration frame */ 4644 /* atomic_add_16 (&instance->fw_outstanding, 1); */ 4645 instance->func_ptr->issue_cmd(cmd, instance); 4646 4647 return (0); 4648 } 4649 4650 #ifndef lint 4651 /*ARGSUSED*/ 4652 static void 4653 megasas_minphys(struct buf *bp) 4654 { 4655 con_log(CL_ANN1, (CE_CONT, ("minphys CALLED\n"))); 4656 } 4657 #endif 4658 4659 static void 4660 display_scsi_inquiry(caddr_t scsi_inq) 4661 { 4662 #define MAX_SCSI_DEVICE_CODE 14 4663 int i; 4664 char inquiry_buf[256] = {0}; 4665 int len; 4666 const char *const scsi_device_types[] = { 4667 "Direct-Access ", 4668 "Sequential-Access", 4669 "Printer ", 4670 "Processor ", 4671 "WORM ", 4672 "CD-ROM ", 4673 "Scanner ", 4674 "Optical Device ", 4675 "Medium Changer ", 4676 "Communications ", 4677 "Unknown ", 4678 "Unknown ", 4679 "Unknown ", 4680 "Enclosure ", 4681 }; 4682 4683 len = 0; 4684 4685 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: "); 4686 for (i = 8; i < 16; i++) { 4687 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4688 scsi_inq[i]); 4689 } 4690 4691 len += snprintf(inquiry_buf + len, 265 - len, " Model: "); 4692 4693 for (i = 16; i < 32; i++) { 4694 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4695 scsi_inq[i]); 4696 } 4697 4698 len += snprintf(inquiry_buf + len, 265 - len, " Rev: "); 4699 4700 for (i = 32; i < 36; i++) { 4701 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4702 scsi_inq[i]); 4703 } 4704 4705 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4706 4707 4708 i = scsi_inq[0] & 0x1f; 4709 4710 4711 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ", 4712 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : 4713 "Unknown "); 4714 4715 4716 len += snprintf(inquiry_buf + len, 265 - len, 4717 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); 4718 4719 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) { 4720 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n"); 4721 } else { 4722 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4723 } 4724 4725 con_log(CL_ANN1, (CE_CONT, inquiry_buf)); 4726 } 4727 4728 #if defined(NOT_YET) && !defined(lint) 4729 /* 4730 * lint pointed out a bug that pkt may be used before being set 4731 */ 4732 static void 4733 io_timeout_checker(void *arg) 4734 { 4735 unsigned int cookie; 4736 struct scsi_pkt *pkt; 4737 struct megasas_instance *instance = arg; 4738 4739 cookie = ddi_enter_critical(); 4740 4741 /* decrease the timeout value per each packet */ 4742 4743 if (pkt->pkt_time == 0) { 4744 /* this means that the scsi command has timed out */ 4745 /* pull out the packet from the list */ 4746 /* call callback in the scsi_pkt structure */ 4747 } 4748 4749 ddi_exit_critical(cookie); 4750 4751 /* schedule next timeout check */ 4752 instance->timeout_id = timeout(io_timeout_checker, (void *)instance, 4753 drv_usectohz(MEGASAS_1_SECOND)); 4754 } 4755 #endif /* defined(NOT_YET) && !defined(lint) */ 4756 4757 static int 4758 read_fw_status_reg_xscale(struct megasas_instance *instance) 4759 { 4760 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4761 return ((int)RD_OB_MSG_0(instance)); 4762 4763 } 4764 4765 static int 4766 read_fw_status_reg_ppc(struct megasas_instance *instance) 4767 { 4768 /* con_log(CL_ANN, (CE_WARN, "read_fw_status_reg_ppc: called\n")); */ 4769 4770 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4771 return ((int)RD_OB_SCRATCH_PAD_0(instance)); 4772 } 4773 4774 static void 4775 issue_cmd_xscale(struct megasas_cmd *cmd, struct megasas_instance *instance) 4776 { 4777 atomic_add_16(&instance->fw_outstanding, 1); 4778 /* push_pend_queue(instance, cmd); */ 4779 4780 /* Issue the command to the FW */ 4781 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4782 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) | 4783 (cmd->frame_count - 1), instance); 4784 } 4785 4786 static void 4787 issue_cmd_ppc(struct megasas_cmd *cmd, struct megasas_instance *instance) 4788 { 4789 /* con_log(CL_ANN, (CE_WARN, "issue_cmd_ppc: called\n")); */ 4790 4791 atomic_add_16(&instance->fw_outstanding, 1); 4792 4793 /* Issue the command to the FW */ 4794 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4795 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) | 4796 (((cmd->frame_count - 1) << 1) | 1), instance); 4797 } 4798 4799 /* 4800 * issue_cmd_in_sync_mode 4801 */ 4802 static int 4803 issue_cmd_in_sync_mode_xscale(struct megasas_instance *instance, 4804 struct megasas_cmd *cmd) 4805 { 4806 int i; 4807 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * 10000; 4808 4809 cmd->cmd_status = ENODATA; 4810 4811 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4812 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) | 4813 (cmd->frame_count - 1), instance); 4814 4815 mutex_enter(&instance->int_cmd_mtx); 4816 4817 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 4818 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 4819 } 4820 4821 mutex_exit(&instance->int_cmd_mtx); 4822 4823 if (i < (msecs -1)) { 4824 return (0); 4825 } else { 4826 return (1); 4827 } 4828 } 4829 4830 static int 4831 issue_cmd_in_sync_mode_ppc(struct megasas_instance *instance, 4832 struct megasas_cmd *cmd) 4833 { 4834 int i; 4835 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * 10000; 4836 4837 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called\n")); 4838 4839 cmd->cmd_status = ENODATA; 4840 4841 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4842 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) | 4843 (((cmd->frame_count - 1) << 1) | 1), instance); 4844 4845 mutex_enter(&instance->int_cmd_mtx); 4846 4847 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 4848 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 4849 } 4850 4851 mutex_exit(&instance->int_cmd_mtx); 4852 4853 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done\n")); 4854 4855 if (i < (msecs -1)) { 4856 return (0); 4857 } else { 4858 return (1); 4859 } 4860 } 4861 4862 /* 4863 * issue_cmd_in_poll_mode 4864 */ 4865 static int 4866 issue_cmd_in_poll_mode_xscale(struct megasas_instance *instance, 4867 struct megasas_cmd *cmd) 4868 { 4869 int i; 4870 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * 1000; 4871 4872 struct megasas_header *frame_hdr = (struct megasas_header *)cmd->frame; 4873 4874 frame_hdr->cmd_status = 0xFF; 4875 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 4876 4877 /* issue the frame using inbound queue port */ 4878 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4879 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) | 4880 (cmd->frame_count - 1), instance); 4881 4882 /* wait for cmd_status to change */ 4883 for (i = 0; i < msecs && (frame_hdr->cmd_status == 0xff); i++) { 4884 drv_usecwait(1000); /* wait for 1000 usecs */ 4885 } 4886 4887 if (frame_hdr->cmd_status == 0xff) { 4888 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: " 4889 "cmd polling timed out")); 4890 return (DDI_FAILURE); 4891 } 4892 4893 return (DDI_SUCCESS); 4894 } 4895 4896 static int 4897 issue_cmd_in_poll_mode_ppc(struct megasas_instance *instance, 4898 struct megasas_cmd *cmd) 4899 { 4900 int i; 4901 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * 1000; 4902 4903 struct megasas_header *frame_hdr = (struct megasas_header *)cmd->frame; 4904 4905 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called\n")); 4906 4907 frame_hdr->cmd_status = 0xFF; 4908 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 4909 4910 /* issue the frame using inbound queue port */ 4911 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4912 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) | 4913 (((cmd->frame_count - 1) << 1) | 1), instance); 4914 4915 /* wait for cmd_status to change */ 4916 for (i = 0; i < msecs && (frame_hdr->cmd_status == 0xff); i++) { 4917 drv_usecwait(1000); /* wait for 1000 usecs */ 4918 } 4919 4920 if (frame_hdr->cmd_status == 0xff) { 4921 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: " 4922 "cmd polling timed out")); 4923 return (DDI_FAILURE); 4924 } 4925 4926 return (DDI_SUCCESS); 4927 } 4928 4929 static void 4930 enable_intr_xscale(struct megasas_instance *instance) 4931 { 4932 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4933 MFI_ENABLE_INTR(instance); 4934 } 4935 4936 static void 4937 enable_intr_ppc(struct megasas_instance *instance) 4938 { 4939 uint32_t mask; 4940 4941 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called\n")); 4942 4943 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4944 WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); 4945 4946 /* 4947 * As 1078DE is same as 1078 chip, the interrupt mask 4948 * remains the same. 4949 */ 4950 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4951 WR_OB_INTR_MASK(~(MFI_REPLY_1078_MESSAGE_INTR), instance); 4952 /* WR_OB_INTR_MASK(~0x80000000, instance); */ 4953 4954 /* dummy read to force PCI flush */ 4955 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4956 mask = RD_OB_INTR_MASK(instance); 4957 #ifdef lint 4958 mask = mask; 4959 #endif 4960 4961 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: " 4962 "outbound_intr_mask = 0x%x\n", mask)); 4963 } 4964 4965 static void 4966 disable_intr_xscale(struct megasas_instance *instance) 4967 { 4968 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4969 MFI_DISABLE_INTR(instance); 4970 } 4971 4972 static void 4973 disable_intr_ppc(struct megasas_instance *instance) 4974 { 4975 uint32_t mask; 4976 4977 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called\n")); 4978 4979 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4980 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : " 4981 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance))); 4982 4983 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4984 WR_OB_INTR_MASK(0xFFFFFFFF, instance); 4985 4986 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4987 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : " 4988 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance))); 4989 4990 /* dummy read to force PCI flush */ 4991 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4992 mask = RD_OB_INTR_MASK(instance); 4993 #ifdef lint 4994 mask = mask; 4995 #endif 4996 } 4997 4998 static int 4999 intr_ack_xscale(struct megasas_instance *instance) 5000 { 5001 uint32_t status; 5002 5003 /* check if it is our interrupt */ 5004 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5005 status = RD_OB_INTR_STATUS(instance); 5006 5007 if (!(status & MFI_OB_INTR_STATUS_MASK)) { 5008 return (DDI_INTR_UNCLAIMED); 5009 } 5010 5011 /* clear the interrupt by writing back the same value */ 5012 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5013 WR_OB_INTR_STATUS(status, instance); 5014 5015 return (DDI_INTR_CLAIMED); 5016 } 5017 5018 static int 5019 intr_ack_ppc(struct megasas_instance *instance) 5020 { 5021 uint32_t status; 5022 5023 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called\n")); 5024 5025 /* check if it is our interrupt */ 5026 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5027 status = RD_OB_INTR_STATUS(instance); 5028 5029 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x\n", status)); 5030 5031 /* 5032 * As 1078DE is same as 1078 chip, the status field 5033 * remains the same. 5034 */ 5035 if (!(status & MFI_REPLY_1078_MESSAGE_INTR)) { 5036 return (DDI_INTR_UNCLAIMED); 5037 } 5038 5039 /* clear the interrupt by writing back the same value */ 5040 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5041 WR_OB_DOORBELL_CLEAR(status, instance); 5042 5043 /* dummy READ */ 5044 /* LINTED E_BAD_PTR_CAST_ALIGN */ 5045 status = RD_OB_INTR_STATUS(instance); 5046 5047 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared\n")); 5048 5049 return (DDI_INTR_CLAIMED); 5050 } 5051