1 /* 2 * megaraid_sas.c: source for mega_sas driver 3 * 4 * MegaRAID device driver for SAS controllers 5 * Copyright (c) 2005-2008, LSI Logic Corporation. 6 * All rights reserved. 7 * 8 * Version: 9 * Author: 10 * Rajesh Prabhakaran<Rajesh.Prabhakaran@lsil.com> 11 * Seokmann Ju 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright notice, 17 * this list of conditions and the following disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above copyright notice, 20 * this list of conditions and the following disclaimer in the documentation 21 * and/or other materials provided with the distribution. 22 * 23 * 3. Neither the name of the author nor the names of its contributors may be 24 * used to endorse or promote products derived from this software without 25 * specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 35 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 36 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 37 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 38 * DAMAGE. 39 */ 40 41 /* 42 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 43 * Use is subject to license terms. 44 */ 45 46 #pragma ident "%Z%%M% %I% %E% SMI" 47 48 #include <sys/types.h> 49 #include <sys/param.h> 50 #include <sys/file.h> 51 #include <sys/errno.h> 52 #include <sys/open.h> 53 #include <sys/cred.h> 54 #include <sys/modctl.h> 55 #include <sys/conf.h> 56 #include <sys/devops.h> 57 #include <sys/cmn_err.h> 58 #include <sys/kmem.h> 59 #include <sys/stat.h> 60 #include <sys/mkdev.h> 61 #include <sys/pci.h> 62 #include <sys/scsi/scsi.h> 63 #include <sys/ddi.h> 64 #include <sys/sunddi.h> 65 #include <sys/atomic.h> 66 #include <sys/signal.h> 67 68 #include "megaraid_sas.h" 69 70 /* 71 * Local static data 72 */ 73 static void *megasas_state = NULL; 74 static int debug_level_g = CL_ANN; 75 76 #pragma weak scsi_hba_open 77 #pragma weak scsi_hba_close 78 #pragma weak scsi_hba_ioctl 79 80 static ddi_dma_attr_t megasas_generic_dma_attr = { 81 DMA_ATTR_V0, /* dma_attr_version */ 82 (unsigned long long)0, /* low DMA address range */ 83 (unsigned long long)0xffffffff, /* high DMA address range */ 84 (unsigned long long)0xffffffff, /* DMA counter register */ 85 8, /* DMA address alignment */ 86 0x07, /* DMA burstsizes */ 87 1, /* min DMA size */ 88 (unsigned long long)0xffffffff, /* max DMA size */ 89 (unsigned long long)0xffffffff, /* segment boundary */ 90 MEGASAS_MAX_SGE_CNT, /* dma_attr_sglen */ 91 512, /* granularity of device */ 92 0 /* bus specific DMA flags */ 93 }; 94 95 int32_t megasas_max_cap_maxxfer = 0x1000000; 96 97 /* 98 * cb_ops contains base level routines 99 */ 100 static struct cb_ops megasas_cb_ops = { 101 megasas_open, /* open */ 102 megasas_close, /* close */ 103 nodev, /* strategy */ 104 nodev, /* print */ 105 nodev, /* dump */ 106 nodev, /* read */ 107 nodev, /* write */ 108 megasas_ioctl, /* ioctl */ 109 nodev, /* devmap */ 110 nodev, /* mmap */ 111 nodev, /* segmap */ 112 nochpoll, /* poll */ 113 nodev, /* cb_prop_op */ 114 0, /* streamtab */ 115 D_NEW | D_HOTPLUG, /* cb_flag */ 116 CB_REV, /* cb_rev */ 117 nodev, /* cb_aread */ 118 nodev /* cb_awrite */ 119 }; 120 121 /* 122 * dev_ops contains configuration routines 123 */ 124 static struct dev_ops megasas_ops = { 125 DEVO_REV, /* rev, */ 126 0, /* refcnt */ 127 megasas_getinfo, /* getinfo */ 128 nulldev, /* identify */ 129 nulldev, /* probe */ 130 megasas_attach, /* attach */ 131 megasas_detach, /* detach */ 132 megasas_reset, /* reset */ 133 &megasas_cb_ops, /* char/block ops */ 134 NULL /* bus ops */ 135 }; 136 137 char _depends_on[] = "misc/scsi"; 138 139 static struct modldrv modldrv = { 140 &mod_driverops, /* module type - driver */ 141 MEGASAS_VERSION, 142 &megasas_ops, /* driver ops */ 143 }; 144 145 static struct modlinkage modlinkage = { 146 MODREV_1, /* ml_rev - must be MODREV_1 */ 147 &modldrv, /* ml_linkage */ 148 NULL /* end of driver linkage */ 149 }; 150 151 static struct ddi_device_acc_attr endian_attr = { 152 DDI_DEVICE_ATTR_V0, 153 DDI_STRUCTURE_LE_ACC, 154 DDI_STRICTORDER_ACC 155 }; 156 157 158 /* 159 * ************************************************************************** * 160 * * 161 * common entry points - for loadable kernel modules * 162 * * 163 * ************************************************************************** * 164 */ 165 166 /* 167 * _init - initialize a loadable module 168 * @void 169 * 170 * The driver should perform any one-time resource allocation or data 171 * initialization during driver loading in _init(). For example, the driver 172 * should initialize any mutexes global to the driver in this routine. 173 * The driver should not, however, use _init() to allocate or initialize 174 * anything that has to do with a particular instance of the device. 175 * Per-instance initialization must be done in attach(). 176 */ 177 int 178 _init(void) 179 { 180 int ret; 181 182 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 183 184 ret = ddi_soft_state_init(&megasas_state, 185 sizeof (struct megasas_instance), 0); 186 187 if (ret != 0) { 188 con_log(CL_ANN, (CE_WARN, "megaraid: could not init state")); 189 return (ret); 190 } 191 192 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 193 con_log(CL_ANN, (CE_WARN, "megaraid: could not init scsi hba")); 194 ddi_soft_state_fini(&megasas_state); 195 return (ret); 196 } 197 198 ret = mod_install(&modlinkage); 199 200 if (ret != 0) { 201 con_log(CL_ANN, (CE_WARN, "megaraid: mod_install failed")); 202 scsi_hba_fini(&modlinkage); 203 ddi_soft_state_fini(&megasas_state); 204 } 205 206 return (ret); 207 } 208 209 /* 210 * _info - returns information about a loadable module. 211 * @void 212 * 213 * _info() is called to return module information. This is a typical entry 214 * point that does predefined role. It simply calls mod_info(). 215 */ 216 int 217 _info(struct modinfo *modinfop) 218 { 219 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 220 221 return (mod_info(&modlinkage, modinfop)); 222 } 223 224 /* 225 * _fini - prepare a loadable module for unloading 226 * @void 227 * 228 * In _fini(), the driver should release any resources that were allocated in 229 * _init(). The driver must remove itself from the system module list. 230 */ 231 int 232 _fini(void) 233 { 234 int ret; 235 236 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 237 238 if ((ret = mod_remove(&modlinkage)) != 0) 239 return (ret); 240 241 scsi_hba_fini(&modlinkage); 242 243 ddi_soft_state_fini(&megasas_state); 244 245 return (ret); 246 } 247 248 249 /* 250 * ************************************************************************** * 251 * * 252 * common entry points - for autoconfiguration * 253 * * 254 * ************************************************************************** * 255 */ 256 /* 257 * probe - called before attach for a given instance 258 * This is an optional entry for self-identifiable device. 259 * @dip: 260 * 261 * static int megasas_probe(dev_info_t *dip) 262 * { 263 * return (DDI_SUCCESS); 264 * } 265 */ 266 267 /* 268 * attach - adds a device to the system as part of initialization 269 * @dip: 270 * @cmd: 271 * 272 * The kernel calls a driver's attach() entry point to attach an instance of 273 * a device (for MegaRAID, it is instance of a controller) or to resume 274 * operation for an instance of a device that has been suspended or has been 275 * shut down by the power management framework 276 * The attach() entry point typically includes the following types of 277 * processing: 278 * - allocate a soft-state structure for the device instance (for MegaRAID, 279 * controller instance) 280 * - initialize per-instance mutexes 281 * - initialize condition variables 282 * - register the device's interrupts (for MegaRAID, controller's interrupts) 283 * - map the registers and memory of the device instance (for MegaRAID, 284 * controller instance) 285 * - create minor device nodes for the device instance (for MegaRAID, 286 * controller instance) 287 * - report that the device instance (for MegaRAID, controller instance) has 288 * attached 289 */ 290 static int 291 megasas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 292 { 293 int instance_no; 294 int nregs; 295 uint8_t added_isr_f = 0; 296 uint8_t added_soft_isr_f = 0; 297 uint8_t create_devctl_node_f = 0; 298 uint8_t create_scsi_node_f = 0; 299 uint8_t create_ioc_node_f = 0; 300 uint8_t tran_alloc_f = 0; 301 uint8_t irq; 302 uint16_t vendor_id; 303 uint16_t device_id; 304 uint16_t subsysvid; 305 uint16_t subsysid; 306 uint16_t command; 307 308 scsi_hba_tran_t *tran; 309 ddi_dma_attr_t tran_dma_attr = megasas_generic_dma_attr; 310 struct megasas_instance *instance; 311 312 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 313 314 instance_no = ddi_get_instance(dip); 315 316 /* 317 * Since we know that some instantiations of this device can be 318 * plugged into slave-only SBus slots, check to see whether this is 319 * one such. 320 */ 321 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 322 con_log(CL_ANN, (CE_WARN, 323 "mega%d: Device in slave-only slot, unused", instance_no)); 324 325 return (DDI_FAILURE); 326 } 327 328 switch (cmd) { 329 case DDI_ATTACH: 330 con_log(CL_ANN, (CE_NOTE, "megasas: DDI_ATTACH")); 331 /* allocate the soft state for the instance */ 332 if (ddi_soft_state_zalloc(megasas_state, instance_no) 333 != DDI_SUCCESS) { 334 con_log(CL_ANN, (CE_WARN, 335 "mega%d: Failed to allocate soft state", 336 instance_no)); 337 338 return (DDI_FAILURE); 339 } 340 341 instance = (struct megasas_instance *)ddi_get_soft_state 342 (megasas_state, instance_no); 343 344 if (instance == NULL) { 345 con_log(CL_ANN, (CE_WARN, 346 "mega%d: Bad soft state", instance_no)); 347 348 ddi_soft_state_free(megasas_state, instance_no); 349 350 return (DDI_FAILURE); 351 } 352 353 bzero((caddr_t)instance, 354 sizeof (struct megasas_instance)); 355 356 instance->func_ptr = kmem_zalloc( 357 sizeof (struct megasas_func_ptr), KM_SLEEP); 358 ASSERT(instance->func_ptr); 359 360 /* Setup the PCI configuration space handles */ 361 if (pci_config_setup(dip, &instance->pci_handle) != 362 DDI_SUCCESS) { 363 con_log(CL_ANN, (CE_WARN, 364 "mega%d: pci config setup failed ", 365 instance_no)); 366 367 kmem_free(instance->func_ptr, 368 sizeof (struct megasas_func_ptr)); 369 ddi_soft_state_free(megasas_state, instance_no); 370 371 return (DDI_FAILURE); 372 } 373 374 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { 375 con_log(CL_ANN, (CE_WARN, 376 "megaraid: failed to get registers.")); 377 378 pci_config_teardown(&instance->pci_handle); 379 kmem_free(instance->func_ptr, 380 sizeof (struct megasas_func_ptr)); 381 ddi_soft_state_free(megasas_state, instance_no); 382 383 return (DDI_FAILURE); 384 } 385 386 vendor_id = pci_config_get16(instance->pci_handle, 387 PCI_CONF_VENID); 388 device_id = pci_config_get16(instance->pci_handle, 389 PCI_CONF_DEVID); 390 391 subsysvid = pci_config_get16(instance->pci_handle, 392 PCI_CONF_SUBVENID); 393 subsysid = pci_config_get16(instance->pci_handle, 394 PCI_CONF_SUBSYSID); 395 396 pci_config_put16(instance->pci_handle, PCI_CONF_COMM, 397 (pci_config_get16(instance->pci_handle, 398 PCI_CONF_COMM) | PCI_COMM_ME)); 399 irq = pci_config_get8(instance->pci_handle, 400 PCI_CONF_ILINE); 401 #ifdef lint 402 irq = irq; 403 #endif 404 con_log(CL_ANN, (CE_CONT, "megasas[%d]: " 405 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n", 406 instance_no, vendor_id, device_id, subsysvid, 407 subsysid, pci_config_get8(instance->pci_handle, 408 PCI_CONF_ILINE), MEGASAS_VERSION)); 409 410 /* enable bus-mastering */ 411 command = pci_config_get16(instance->pci_handle, 412 PCI_CONF_COMM); 413 414 if (!(command & PCI_COMM_ME)) { 415 command |= PCI_COMM_ME; 416 417 pci_config_put16(instance->pci_handle, 418 PCI_CONF_COMM, command); 419 420 con_log(CL_ANN, (CE_CONT, "megaraid[%d]: " 421 "enable bus-mastering\n", instance_no)); 422 } else { 423 con_log(CL_DLEVEL1, (CE_CONT, "megaraid[%d]: " 424 "bus-mastering already set\n", instance_no)); 425 } 426 427 /* initialize function pointers */ 428 if (device_id == PCI_DEVICE_ID_LSI_1078) { 429 con_log(CL_ANN, (CE_CONT, "megasas[%d]: " 430 "1078R detected\n", instance_no)); 431 instance->func_ptr->read_fw_status_reg = 432 read_fw_status_reg_ppc; 433 instance->func_ptr->issue_cmd = issue_cmd_ppc; 434 instance->func_ptr->issue_cmd_in_sync_mode = 435 issue_cmd_in_sync_mode_ppc; 436 instance->func_ptr->issue_cmd_in_poll_mode = 437 issue_cmd_in_poll_mode_ppc; 438 instance->func_ptr->enable_intr = 439 enable_intr_ppc; 440 instance->func_ptr->disable_intr = 441 disable_intr_ppc; 442 instance->func_ptr->intr_ack = intr_ack_ppc; 443 } else { 444 con_log(CL_ANN, (CE_CONT, "megasas[%d]: " 445 "1064/8R detected\n", instance_no)); 446 instance->func_ptr->read_fw_status_reg = 447 read_fw_status_reg_xscale; 448 instance->func_ptr->issue_cmd = 449 issue_cmd_xscale; 450 instance->func_ptr->issue_cmd_in_sync_mode = 451 issue_cmd_in_sync_mode_xscale; 452 instance->func_ptr->issue_cmd_in_poll_mode = 453 issue_cmd_in_poll_mode_xscale; 454 instance->func_ptr->enable_intr = 455 enable_intr_xscale; 456 instance->func_ptr->disable_intr = 457 disable_intr_xscale; 458 instance->func_ptr->intr_ack = 459 intr_ack_xscale; 460 } 461 462 instance->baseaddress = 463 pci_config_get32(instance->pci_handle, 0x10); 464 instance->baseaddress &= 0x0fffc; 465 466 instance->dip = dip; 467 instance->vendor_id = vendor_id; 468 instance->device_id = device_id; 469 instance->subsysvid = subsysvid; 470 instance->subsysid = subsysid; 471 472 /* setup the mfi based low level driver */ 473 if (init_mfi(instance) != DDI_SUCCESS) { 474 con_log(CL_ANN, (CE_WARN, "megaraid: " 475 "could not initialize the low level driver")); 476 477 goto fail_attach; 478 } 479 480 /* 481 * Allocate the interrupt blocking cookie. 482 * It represents the information the framework 483 * needs to block interrupts. This cookie will 484 * be used by the locks shared accross our ISR. 485 * These locks must be initialized before we 486 * register our ISR. 487 * ddi_add_intr(9F) 488 */ 489 if (ddi_get_iblock_cookie(dip, 0, 490 &instance->iblock_cookie) != DDI_SUCCESS) { 491 492 goto fail_attach; 493 } 494 495 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_HIGH, 496 &instance->soft_iblock_cookie) != DDI_SUCCESS) { 497 498 goto fail_attach; 499 } 500 501 /* 502 * Initialize the driver mutexes common to 503 * normal/high level isr 504 */ 505 if (ddi_intr_hilevel(dip, 0)) { 506 instance->isr_level = HIGH_LEVEL_INTR; 507 mutex_init(&instance->cmd_pool_mtx, 508 "cmd_pool_mtx", MUTEX_DRIVER, 509 instance->soft_iblock_cookie); 510 mutex_init(&instance->cmd_pend_mtx, 511 "cmd_pend_mtx", MUTEX_DRIVER, 512 instance->soft_iblock_cookie); 513 } else { 514 /* 515 * Initialize the driver mutexes 516 * specific to soft-isr 517 */ 518 instance->isr_level = NORMAL_LEVEL_INTR; 519 mutex_init(&instance->cmd_pool_mtx, 520 "cmd_pool_mtx", MUTEX_DRIVER, 521 instance->iblock_cookie); 522 mutex_init(&instance->cmd_pend_mtx, 523 "cmd_pend_mtx", MUTEX_DRIVER, 524 instance->iblock_cookie); 525 } 526 527 mutex_init(&instance->completed_pool_mtx, 528 "completed_pool_mtx", MUTEX_DRIVER, 529 instance->iblock_cookie); 530 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx", 531 MUTEX_DRIVER, instance->iblock_cookie); 532 mutex_init(&instance->aen_cmd_mtx, "aen_cmd_mtx", 533 MUTEX_DRIVER, instance->iblock_cookie); 534 mutex_init(&instance->abort_cmd_mtx, "abort_cmd_mtx", 535 MUTEX_DRIVER, instance->iblock_cookie); 536 537 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL); 538 cv_init(&instance->abort_cmd_cv, NULL, CV_DRIVER, NULL); 539 540 INIT_LIST_HEAD(&instance->completed_pool_list); 541 542 /* Register our isr. */ 543 if (ddi_add_intr(dip, 0, NULL, NULL, megasas_isr, 544 (caddr_t)instance) != DDI_SUCCESS) { 545 con_log(CL_ANN, (CE_WARN, 546 " ISR did not register")); 547 548 goto fail_attach; 549 } 550 551 added_isr_f = 1; 552 553 /* Register our soft-isr for highlevel interrupts. */ 554 if (instance->isr_level == HIGH_LEVEL_INTR) { 555 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH, 556 &instance->soft_intr_id, NULL, NULL, 557 megasas_softintr, (caddr_t)instance) != 558 DDI_SUCCESS) { 559 con_log(CL_ANN, (CE_WARN, 560 " Software ISR did not register")); 561 562 goto fail_attach; 563 } 564 565 added_soft_isr_f = 1; 566 } 567 568 /* Allocate a transport structure */ 569 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 570 571 if (tran == NULL) { 572 con_log(CL_ANN, (CE_WARN, 573 "scsi_hba_tran_alloc failed")); 574 goto fail_attach; 575 } 576 577 tran_alloc_f = 1; 578 579 instance->tran = tran; 580 581 tran->tran_hba_private = instance; 582 tran->tran_tgt_private = NULL; 583 tran->tran_tgt_init = megasas_tran_tgt_init; 584 tran->tran_tgt_probe = scsi_hba_probe; 585 tran->tran_tgt_free = (void (*)())NULL; 586 tran->tran_init_pkt = megasas_tran_init_pkt; 587 tran->tran_start = megasas_tran_start; 588 tran->tran_abort = megasas_tran_abort; 589 tran->tran_reset = megasas_tran_reset; 590 tran->tran_bus_reset = megasas_tran_bus_reset; 591 tran->tran_getcap = megasas_tran_getcap; 592 tran->tran_setcap = megasas_tran_setcap; 593 tran->tran_destroy_pkt = megasas_tran_destroy_pkt; 594 tran->tran_dmafree = megasas_tran_dmafree; 595 tran->tran_sync_pkt = megasas_tran_sync_pkt; 596 tran->tran_reset_notify = NULL; 597 tran->tran_quiesce = megasas_tran_quiesce; 598 tran->tran_unquiesce = megasas_tran_unquiesce; 599 600 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge; 601 602 /* Attach this instance of the hba */ 603 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0) 604 != DDI_SUCCESS) { 605 con_log(CL_ANN, (CE_WARN, 606 "scsi_hba_attach failed\n")); 607 608 goto fail_attach; 609 } 610 611 /* create devctl node for cfgadm command */ 612 if (ddi_create_minor_node(dip, "devctl", 613 S_IFCHR, INST2DEVCTL(instance_no), 614 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) { 615 con_log(CL_ANN, (CE_WARN, 616 "megaraid: failed to create devctl node.")); 617 618 goto fail_attach; 619 } 620 621 create_devctl_node_f = 1; 622 623 /* create scsi node for cfgadm command */ 624 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, 625 INST2SCSI(instance_no), 626 DDI_NT_SCSI_ATTACHMENT_POINT, 0) == 627 DDI_FAILURE) { 628 con_log(CL_ANN, (CE_WARN, 629 "megaraid: failed to create scsi node.")); 630 631 goto fail_attach; 632 } 633 634 create_scsi_node_f = 1; 635 636 (void) sprintf(instance->iocnode, "%d:lsirdctl", 637 instance_no); 638 639 /* 640 * Create a node for applications 641 * for issuing ioctl to the driver. 642 */ 643 if (ddi_create_minor_node(dip, instance->iocnode, 644 S_IFCHR, INST2LSIRDCTL(instance_no), 645 DDI_PSEUDO, 0) == DDI_FAILURE) { 646 con_log(CL_ANN, (CE_WARN, 647 "megaraid: failed to create ioctl node.")); 648 649 goto fail_attach; 650 } 651 652 create_ioc_node_f = 1; 653 654 /* enable interrupt */ 655 instance->func_ptr->enable_intr(instance); 656 657 /* initiate AEN */ 658 if (start_mfi_aen(instance)) { 659 con_log(CL_ANN, (CE_WARN, 660 "megaraid: failed to initiate AEN.")); 661 goto fail_initiate_aen; 662 } 663 664 con_log(CL_ANN, (CE_NOTE, 665 "AEN started for instance %d.", instance_no)); 666 667 /* Finally! We are on the air. */ 668 ddi_report_dev(dip); 669 break; 670 case DDI_PM_RESUME: 671 con_log(CL_ANN, (CE_NOTE, 672 "megasas: DDI_PM_RESUME")); 673 break; 674 case DDI_RESUME: 675 con_log(CL_ANN, (CE_NOTE, 676 "megasas: DDI_RESUME")); 677 break; 678 default: 679 con_log(CL_ANN, (CE_WARN, 680 "megasas: invalid attach cmd=%x", cmd)); 681 return (DDI_FAILURE); 682 } 683 684 return (DDI_SUCCESS); 685 686 fail_initiate_aen: 687 fail_attach: 688 if (create_devctl_node_f) { 689 ddi_remove_minor_node(dip, "devctl"); 690 } 691 692 if (create_scsi_node_f) { 693 ddi_remove_minor_node(dip, "scsi"); 694 } 695 696 if (create_ioc_node_f) { 697 ddi_remove_minor_node(dip, instance->iocnode); 698 } 699 700 if (tran_alloc_f) { 701 scsi_hba_tran_free(tran); 702 } 703 704 705 if (added_soft_isr_f) { 706 ddi_remove_softintr(instance->soft_intr_id); 707 } 708 709 if (added_isr_f) { 710 ddi_remove_intr(dip, 0, instance->iblock_cookie); 711 } 712 713 pci_config_teardown(&instance->pci_handle); 714 715 ddi_soft_state_free(megasas_state, instance_no); 716 717 con_log(CL_ANN, (CE_NOTE, 718 "megasas: return failure from mega_attach\n")); 719 720 return (DDI_FAILURE); 721 } 722 723 /* 724 * getinfo - gets device information 725 * @dip: 726 * @cmd: 727 * @arg: 728 * @resultp: 729 * 730 * The system calls getinfo() to obtain configuration information that only 731 * the driver knows. The mapping of minor numbers to device instance is 732 * entirely under the control of the driver. The system sometimes needs to ask 733 * the driver which device a particular dev_t represents. 734 * Given the device number return the devinfo pointer from the scsi_device 735 * structure. 736 */ 737 /*ARGSUSED*/ 738 static int 739 megasas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 740 { 741 int rval; 742 int megasas_minor = getminor((dev_t)arg); 743 744 struct megasas_instance *instance; 745 746 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 747 748 switch (cmd) { 749 case DDI_INFO_DEVT2DEVINFO: 750 instance = (struct megasas_instance *) 751 ddi_get_soft_state(megasas_state, 752 MINOR2INST(megasas_minor)); 753 754 if (instance == NULL) { 755 *resultp = NULL; 756 rval = DDI_FAILURE; 757 } else { 758 *resultp = instance->dip; 759 rval = DDI_SUCCESS; 760 } 761 break; 762 case DDI_INFO_DEVT2INSTANCE: 763 *resultp = (void *)instance; 764 rval = DDI_SUCCESS; 765 break; 766 default: 767 *resultp = NULL; 768 rval = DDI_FAILURE; 769 } 770 771 return (rval); 772 } 773 774 /* 775 * detach - detaches a device from the system 776 * @dip: pointer to the device's dev_info structure 777 * @cmd: type of detach 778 * 779 * A driver's detach() entry point is called to detach an instance of a device 780 * that is bound to the driver. The entry point is called with the instance of 781 * the device node to be detached and with DDI_DETACH, which is specified as 782 * the cmd argument to the entry point. 783 * This routine is called during driver unload. We free all the allocated 784 * resources and call the corresponding LLD so that it can also release all 785 * its resources. 786 */ 787 static int 788 megasas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 789 { 790 int instance_no; 791 792 struct megasas_instance *instance; 793 794 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 795 796 instance_no = ddi_get_instance(dip); 797 798 instance = (struct megasas_instance *)ddi_get_soft_state(megasas_state, 799 instance_no); 800 801 if (!instance) { 802 con_log(CL_ANN, (CE_WARN, 803 "megasas:%d could not get instance in detach", 804 instance_no)); 805 806 return (DDI_FAILURE); 807 } 808 809 con_log(CL_ANN, (CE_NOTE, 810 "megasas: detaching device 0x%4x:0x%4x:0x%4x:0x%4x\n", 811 instance->vendor_id, instance->device_id, instance->subsysvid, 812 instance->subsysid)); 813 814 switch (cmd) { 815 case DDI_DETACH: 816 con_log(CL_ANN, (CE_NOTE, 817 "megasas_detach: DDI_DETACH\n")); 818 819 if (scsi_hba_detach(dip) != DDI_SUCCESS) { 820 con_log(CL_ANN, (CE_WARN, 821 "megasas:%d failed to detach", 822 instance_no)); 823 824 return (DDI_FAILURE); 825 } 826 827 scsi_hba_tran_free(instance->tran); 828 829 if (abort_aen_cmd(instance, instance->aen_cmd)) { 830 con_log(CL_ANN, (CE_WARN, "megasas_detach: " 831 "failed to abort prevous AEN command\n")); 832 833 return (DDI_FAILURE); 834 } 835 836 instance->func_ptr->disable_intr(instance); 837 838 if (instance->isr_level == HIGH_LEVEL_INTR) { 839 ddi_remove_softintr(instance->soft_intr_id); 840 } 841 842 ddi_remove_intr(dip, 0, instance->iblock_cookie); 843 844 free_space_for_mfi(instance); 845 846 pci_config_teardown(&instance->pci_handle); 847 848 kmem_free(instance->func_ptr, 849 sizeof (struct megasas_func_ptr)); 850 851 ddi_soft_state_free(megasas_state, instance_no); 852 break; 853 case DDI_PM_SUSPEND: 854 con_log(CL_ANN, (CE_NOTE, 855 "megasas_detach: DDI_PM_SUSPEND\n")); 856 857 break; 858 case DDI_SUSPEND: 859 con_log(CL_ANN, (CE_NOTE, 860 "megasas_detach: DDI_SUSPEND\n")); 861 862 break; 863 default: 864 con_log(CL_ANN, (CE_WARN, 865 "invalid detach command:0x%x", cmd)); 866 return (DDI_FAILURE); 867 } 868 869 return (DDI_SUCCESS); 870 } 871 872 873 /* 874 * ************************************************************************** * 875 * * 876 * common entry points - for character driver types * 877 * * 878 * ************************************************************************** * 879 */ 880 /* 881 * open - gets access to a device 882 * @dev: 883 * @openflags: 884 * @otyp: 885 * @credp: 886 * 887 * Access to a device by one or more application programs is controlled 888 * through the open() and close() entry points. The primary function of 889 * open() is to verify that the open request is allowed. 890 */ 891 static int 892 megasas_open(dev_t *dev, int openflags, int otyp, cred_t *credp) 893 { 894 int rval = 0; 895 896 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 897 898 /* Check root permissions */ 899 if (drv_priv(credp) != 0) { 900 con_log(CL_ANN, (CE_WARN, 901 "megaraid: Non-root ioctl access tried!")); 902 return (EPERM); 903 } 904 905 /* Verify we are being opened as a character device */ 906 if (otyp != OTYP_CHR) { 907 con_log(CL_ANN, (CE_WARN, 908 "megaraid: ioctl node must be a char node\n")); 909 return (EINVAL); 910 } 911 912 if (ddi_get_soft_state(megasas_state, MINOR2INST(getminor(*dev))) 913 == NULL) { 914 return (ENXIO); 915 } 916 917 if (scsi_hba_open) { 918 rval = scsi_hba_open(dev, openflags, otyp, credp); 919 } 920 921 return (rval); 922 } 923 924 /* 925 * close - gives up access to a device 926 * @dev: 927 * @openflags: 928 * @otyp: 929 * @credp: 930 * 931 * close() should perform any cleanup necessary to finish using the minor 932 * device, and prepare the device (and driver) to be opened again. 933 */ 934 static int 935 megasas_close(dev_t dev, int openflags, int otyp, cred_t *credp) 936 { 937 int rval = 0; 938 939 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 940 941 /* no need for locks! */ 942 943 if (scsi_hba_close) { 944 rval = scsi_hba_close(dev, openflags, otyp, credp); 945 } 946 947 return (rval); 948 } 949 950 /* 951 * ioctl - performs a range of I/O commands for character drivers 952 * @dev: 953 * @cmd: 954 * @arg: 955 * @mode: 956 * @credp: 957 * @rvalp: 958 * 959 * ioctl() routine must make sure that user data is copied into or out of the 960 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(), 961 * and ddi_copyout(), as appropriate. 962 * This is a wrapper routine to serialize access to the actual ioctl routine. 963 * ioctl() should return 0 on success, or the appropriate error number. The 964 * driver may also set the value returned to the calling process through rvalp. 965 */ 966 static int 967 megasas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 968 int *rvalp) 969 { 970 int rval = 0; 971 972 struct megasas_instance *instance; 973 struct megasas_ioctl ioctl; 974 struct megasas_aen aen; 975 976 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 977 978 instance = ddi_get_soft_state(megasas_state, MINOR2INST(getminor(dev))); 979 980 if (instance == NULL) { 981 /* invalid minor number */ 982 con_log(CL_ANN, (CE_WARN, "megaraid: adapter not found.")); 983 return (ENXIO); 984 } 985 986 switch ((uint_t)cmd) { 987 case MEGASAS_IOCTL_FIRMWARE: 988 if (ddi_copyin((void *) arg, &ioctl, 989 sizeof (struct megasas_ioctl), mode)) { 990 con_log(CL_ANN, (CE_WARN, "megasas_ioctl: " 991 "ERROR IOCTL copyin")); 992 return (EFAULT); 993 } 994 995 if (ioctl.control_code == MR_DRIVER_IOCTL_COMMON) { 996 rval = handle_drv_ioctl(instance, &ioctl, mode); 997 } else { 998 rval = handle_mfi_ioctl(instance, &ioctl, mode); 999 } 1000 1001 if (ddi_copyout((void *) &ioctl, (void *)arg, 1002 (sizeof (struct megasas_ioctl) - 1), mode)) { 1003 con_log(CL_ANN, (CE_WARN, 1004 "megasas_ioctl: copy_to_user failed\n")); 1005 rval = 1; 1006 } 1007 1008 break; 1009 case MEGASAS_IOCTL_AEN: 1010 if (ddi_copyin((void *) arg, &aen, 1011 sizeof (struct megasas_aen), mode)) { 1012 con_log(CL_ANN, (CE_WARN, 1013 "megasas_ioctl: ERROR AEN copyin")); 1014 return (EFAULT); 1015 } 1016 1017 rval = handle_mfi_aen(instance, &aen); 1018 1019 if (ddi_copyout((void *) &aen, (void *)arg, 1020 sizeof (struct megasas_aen), mode)) { 1021 con_log(CL_ANN, (CE_WARN, 1022 "megasas_ioctl: copy_to_user failed\n")); 1023 rval = 1; 1024 } 1025 1026 break; 1027 default: 1028 if (scsi_hba_ioctl) { 1029 rval = scsi_hba_ioctl(dev, cmd, arg, 1030 mode, credp, rvalp); 1031 1032 con_log(CL_ANN, (CE_NOTE, "megasas_ioctl: " 1033 "scsi_hba_ioctl called, ret = %x.", rval)); 1034 } else { 1035 rval = ENOTTY; 1036 1037 con_log(CL_ANN, (CE_WARN, 1038 "megasas_ioctl: scsi_hba_ioctl is NULL.")); 1039 } 1040 1041 rval = EINVAL; 1042 con_log(CL_ANN, (CE_WARN, 1043 "megasas_ioctl: ERROR invalid cmd = 0x%x", cmd)); 1044 } 1045 1046 return (rval); 1047 } 1048 1049 /* 1050 * ************************************************************************** * 1051 * * 1052 * common entry points - for block driver types * 1053 * * 1054 * ************************************************************************** * 1055 */ 1056 /* 1057 * reset - TBD 1058 * @dip: 1059 * @cmd: 1060 * 1061 * TBD 1062 */ 1063 /*ARGSUSED*/ 1064 static int 1065 megasas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1066 { 1067 int instance_no; 1068 1069 struct megasas_instance *instance; 1070 1071 instance_no = ddi_get_instance(dip); 1072 instance = (struct megasas_instance *)ddi_get_soft_state 1073 (megasas_state, instance_no); 1074 1075 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1076 1077 if (!instance) { 1078 con_log(CL_ANN, (CE_WARN, 1079 "megaraid:%d could not get adapter in reset", 1080 instance_no)); 1081 return (DDI_FAILURE); 1082 } 1083 1084 con_log(CL_ANN, (CE_NOTE, "flushing cache for instance %d ..", 1085 instance_no)); 1086 1087 flush_cache(instance); 1088 1089 return (DDI_SUCCESS); 1090 } 1091 1092 1093 /* 1094 * ************************************************************************** * 1095 * * 1096 * entry points (SCSI HBA) * 1097 * * 1098 * ************************************************************************** * 1099 */ 1100 /* 1101 * tran_tgt_init - initialize a target device instance 1102 * @hba_dip: 1103 * @tgt_dip: 1104 * @tran: 1105 * @sd: 1106 * 1107 * The tran_tgt_init() entry point enables the HBA to allocate and initialize 1108 * any per-target resources. tran_tgt_init() also enables the HBA to qualify 1109 * the device's address as valid and supportable for that particular HBA. 1110 * By returning DDI_FAILURE, the instance of the target driver for that device 1111 * is not probed or attached. 1112 */ 1113 /*ARGSUSED*/ 1114 static int 1115 megasas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1116 scsi_hba_tran_t *tran, struct scsi_device *sd) 1117 { 1118 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1119 1120 #ifdef NOT_YET 1121 int instance; 1122 int islogical; 1123 1124 instance = ddi_get_instance(hba_dip); 1125 adp = (struct megasas_instance *)ddi_get_soft_state(mega_state, 1126 instance_no); 1127 if ((sd->sd_address.a_target >= (adp->max_channel * 16 + MAX_LD_64)) || 1128 (sd->sd_address.a_lun != 0)) { 1129 1130 return (DDI_FAILURE); 1131 } 1132 1133 MRAID_IS_LOGICAL(sd->sd_address.a_target, islogical); 1134 1135 /* Allow non-disk device commands to pass */ 1136 if (!islogical) { 1137 return (DDI_SUCCESS); 1138 } 1139 1140 /* From Target 40 - 64 there will be no devices */ 1141 if (sd->sd_address.a_target > MAX_LOGICAL_DRIVES_40LD) { 1142 return (DDI_FAILURE); 1143 } 1144 1145 1146 /* 1147 * Get information about the logical drives. 1148 */ 1149 if (megaraid_ld_state_instance(adp) != DDI_SUCCESS) { 1150 con_log(CL_ANN, (CE_WARN, "megaraid: failed query adapter")); 1151 } 1152 1153 if (adp->ldrv_state[adp->device_ids[0][sd->sd_address.a_target]] 1154 == RDRV_DELETED || 1155 adp->ldrv_state[adp->device_ids[0][sd->sd_address.a_target]] 1156 == RDRV_OFFLINE) { 1157 1158 return (DDI_FAILURE); 1159 } 1160 #endif /* NOT_YET */ 1161 return (DDI_SUCCESS); 1162 } 1163 #if defined(USELESS) && !defined(lint) 1164 /* 1165 * tran_tgt_probe - probe for the existence of a target device 1166 * @sd: 1167 * @callback: 1168 * 1169 * The tran_tgt_probe() entry point enables the HBA to customize the operation 1170 * of scsi_probe(), if necessary. This entry point is called only when the 1171 * target driver calls scsi_probe(). The HBA driver can retain the normal 1172 * operation of scsi_probe() by calling scsi_hba_probe() and returning its 1173 * return value. This entry point is not required, and if not needed, the HBA 1174 * driver should set the tran_tgt_ probe vector in the scsi_hba_tran structure 1175 * to point to scsi_hba_probe(). 1176 */ 1177 static int 1178 megasas_tran_tgt_probe(struct scsi_device *sd, int (*callback)()) 1179 { 1180 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1181 1182 /* 1183 * the HBA driver should set the tran_tgt_probe vector in the 1184 * scsi_hba_tran structure to point to scsi_hba_probe() 1185 */ 1186 return (scsi_hba_probe(sd, callback)); 1187 } 1188 #endif /* defined (USELESS) && !defined (lint) */ 1189 1190 /* 1191 * tran_init_pkt - allocate & initialize a scsi_pkt structure 1192 * @ap: 1193 * @pkt: 1194 * @bp: 1195 * @cmdlen: 1196 * @statuslen: 1197 * @tgtlen: 1198 * @flags: 1199 * @callback: 1200 * 1201 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt 1202 * structure and DMA resources for a target driver request. The 1203 * tran_init_pkt() entry point is called when the target driver calls the 1204 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point 1205 * is a request to perform one or more of three possible services: 1206 * - allocation and initialization of a scsi_pkt structure 1207 * - allocation of DMA resources for data transfer 1208 * - reallocation of DMA resources for the next portion of the data transfer 1209 */ 1210 static struct scsi_pkt * 1211 megasas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt, 1212 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1213 int flags, int (*callback)(), caddr_t arg) 1214 { 1215 struct scsa_cmd *acmd; 1216 struct megasas_instance *instance; 1217 struct scsi_pkt *new_pkt; 1218 1219 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1220 1221 instance = ADDR2MEGA(ap); 1222 1223 /* step #1 : pkt allocation */ 1224 if (pkt == NULL) { 1225 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen, 1226 tgtlen, sizeof (struct scsa_cmd), callback, arg); 1227 if (pkt == NULL) { 1228 return (NULL); 1229 } 1230 1231 acmd = PKT2CMD(pkt); 1232 1233 /* 1234 * Initialize the new pkt - we redundantly initialize 1235 * all the fields for illustrative purposes. 1236 */ 1237 acmd->cmd_pkt = pkt; 1238 acmd->cmd_flags = 0; 1239 acmd->cmd_scblen = statuslen; 1240 acmd->cmd_cdblen = cmdlen; 1241 acmd->cmd_dmahandle = NULL; 1242 acmd->cmd_ncookies = 0; 1243 acmd->cmd_cookie = 0; 1244 acmd->cmd_cookiecnt = 0; 1245 acmd->cmd_nwin = 0; 1246 1247 pkt->pkt_address = *ap; 1248 pkt->pkt_comp = (void (*)())NULL; 1249 pkt->pkt_flags = 0; 1250 pkt->pkt_time = 0; 1251 pkt->pkt_resid = 0; 1252 pkt->pkt_state = 0; 1253 pkt->pkt_statistics = 0; 1254 pkt->pkt_reason = 0; 1255 new_pkt = pkt; 1256 } else { 1257 acmd = PKT2CMD(pkt); 1258 new_pkt = NULL; 1259 } 1260 1261 /* step #2 : dma allocation/move */ 1262 if (bp && bp->b_bcount != 0) { 1263 if (acmd->cmd_dmahandle == NULL) { 1264 if (megasas_dma_alloc(instance, pkt, bp, flags, 1265 callback) == -1) { 1266 if (new_pkt) { 1267 scsi_hba_pkt_free(ap, new_pkt); 1268 } 1269 1270 return ((struct scsi_pkt *)NULL); 1271 } 1272 } else { 1273 if (megasas_dma_move(instance, pkt, bp) == -1) { 1274 return ((struct scsi_pkt *)NULL); 1275 } 1276 } 1277 } 1278 1279 return (pkt); 1280 } 1281 1282 /* 1283 * tran_start - transport a SCSI command to the addressed target 1284 * @ap: 1285 * @pkt: 1286 * 1287 * The tran_start() entry point for a SCSI HBA driver is called to transport a 1288 * SCSI command to the addressed target. The SCSI command is described 1289 * entirely within the scsi_pkt structure, which the target driver allocated 1290 * through the HBA driver's tran_init_pkt() entry point. If the command 1291 * involves a data transfer, DMA resources must also have been allocated for 1292 * the scsi_pkt structure. 1293 * 1294 * Return Values : 1295 * TRAN_BUSY - request queue is full, no more free scbs 1296 * TRAN_ACCEPT - pkt has been submitted to the instance 1297 */ 1298 static int 1299 megasas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt) 1300 { 1301 uchar_t cmd_done = 0; 1302 1303 struct megasas_instance *instance = ADDR2MEGA(ap); 1304 struct megasas_cmd *cmd; 1305 1306 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x", 1307 __func__, __LINE__, pkt->pkt_cdbp[0])); 1308 1309 pkt->pkt_reason = CMD_CMPLT; 1310 1311 cmd = build_cmd(instance, ap, pkt, &cmd_done); 1312 1313 /* 1314 * Check if the command is already completed by the mega_build_cmd() 1315 * routine. In which case the busy_flag would be clear and scb will be 1316 * NULL and appropriate reason provided in pkt_reason field 1317 */ 1318 if (cmd_done) { 1319 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) { 1320 (*pkt->pkt_comp)(pkt); 1321 } 1322 1323 return (TRAN_ACCEPT); 1324 } 1325 1326 if (cmd == NULL) { 1327 return (TRAN_BUSY); 1328 } 1329 1330 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { 1331 if (instance->fw_outstanding > instance->max_fw_cmds) { 1332 con_log(CL_ANN, (CE_CONT, "megasas:Firmware busy")); 1333 return_mfi_pkt(instance, cmd); 1334 return (TRAN_BUSY); 1335 } 1336 1337 /* Syncronize the Cmd frame for the controller */ 1338 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, 1339 DDI_DMA_SYNC_FORDEV); 1340 1341 instance->func_ptr->issue_cmd(cmd, instance); 1342 1343 #if defined(NOT_YET) && !defined(lint) 1344 /* 1345 * before return, set timer - for timeout checking 1346 * (for every 1 second) 1347 */ 1348 instance->timeout_id = timeout(io_timeout_checker, 1349 (void *) instance, drv_usectohz(MEGASAS_1_SECOND)); 1350 #endif /* defined(NOT_YET) && !defined(lint) */ 1351 } else { 1352 struct megasas_header *hdr = &cmd->frame->hdr; 1353 1354 cmd->sync_cmd = MEGASAS_TRUE; 1355 1356 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd); 1357 1358 pkt->pkt_reason = CMD_CMPLT; 1359 pkt->pkt_statistics = 0; 1360 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS; 1361 1362 switch (hdr->cmd_status) { 1363 case MFI_STAT_OK: 1364 pkt->pkt_scbp[0] = STATUS_GOOD; 1365 break; 1366 1367 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1368 1369 pkt->pkt_reason = CMD_INCOMPLETE; 1370 pkt->pkt_statistics = STAT_DISCON; 1371 1372 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; 1373 break; 1374 1375 case MFI_STAT_DEVICE_NOT_FOUND: 1376 pkt->pkt_reason = CMD_DEV_GONE; 1377 pkt->pkt_statistics = STAT_DISCON; 1378 break; 1379 1380 default: 1381 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1; 1382 } 1383 1384 return_mfi_pkt(instance, cmd); 1385 1386 if (pkt->pkt_comp) { 1387 (*pkt->pkt_comp)(pkt); 1388 } 1389 1390 } 1391 1392 return (TRAN_ACCEPT); 1393 } 1394 1395 /* 1396 * tran_abort - Abort any commands that are currently in transport 1397 * @ap: 1398 * @pkt: 1399 * 1400 * The tran_abort() entry point for a SCSI HBA driver is called to abort any 1401 * commands that are currently in transport for a particular target. This entry 1402 * point is called when a target driver calls scsi_abort(). The tran_abort() 1403 * entry point should attempt to abort the command denoted by the pkt 1404 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to 1405 * abort all outstandidng commands in the transport layer for the particular 1406 * target or logical unit. 1407 */ 1408 /*ARGSUSED*/ 1409 static int 1410 megasas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1411 { 1412 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1413 1414 /* aborting command not supported by H/W */ 1415 1416 return (DDI_FAILURE); 1417 } 1418 1419 /* 1420 * tran_reset - reset either the SCSI bus or target 1421 * @ap: 1422 * @level: 1423 * 1424 * The tran_reset() entry point for a SCSI HBA driver is called to reset either 1425 * the SCSI bus or a particular SCSI target device. This entry point is called 1426 * when a target driver calls scsi_reset(). The tran_reset() entry point must 1427 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the 1428 * particular target or logical unit must be reset. 1429 */ 1430 /*ARGSUSED*/ 1431 static int 1432 megasas_tran_reset(struct scsi_address *ap, int level) 1433 { 1434 struct megasas_instance *instance = ADDR2MEGA(ap); 1435 1436 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1437 1438 if (wait_for_outstanding(instance)) { 1439 return (DDI_FAILURE); 1440 } else { 1441 return (DDI_SUCCESS); 1442 } 1443 } 1444 1445 /* 1446 * tran_bus_reset - reset the SCSI bus 1447 * @dip: 1448 * @level: 1449 * 1450 * The tran_bus_reset() vector in the scsi_hba_tran structure should be 1451 * initialized during the HBA driver's attach(). The vector should point to 1452 * an HBA entry point that is to be called when a user initiates a bus reset. 1453 * Implementation is hardware specific. If the HBA driver cannot reset the 1454 * SCSI bus without affecting the targets, the driver should fail RESET_BUS 1455 * or not initialize this vector. 1456 */ 1457 /*ARGSUSED*/ 1458 static int 1459 megasas_tran_bus_reset(dev_info_t *dip, int level) 1460 { 1461 int instance_no = ddi_get_instance(dip); 1462 1463 struct megasas_instance *instance = ddi_get_soft_state(megasas_state, 1464 instance_no); 1465 1466 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1467 1468 if (wait_for_outstanding(instance)) { 1469 return (DDI_FAILURE); 1470 } else { 1471 return (DDI_SUCCESS); 1472 } 1473 } 1474 1475 /* 1476 * tran_getcap - get one of a set of SCSA-defined capabilities 1477 * @ap: 1478 * @cap: 1479 * @whom: 1480 * 1481 * The target driver can request the current setting of the capability for a 1482 * particular target by setting the whom parameter to nonzero. A whom value of 1483 * zero indicates a request for the current setting of the general capability 1484 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1 1485 * for undefined capabilities or the current value of the requested capability. 1486 */ 1487 /*ARGSUSED*/ 1488 static int 1489 megasas_tran_getcap(struct scsi_address *ap, char *cap, int whom) 1490 { 1491 int rval = 0; 1492 1493 struct megasas_instance *instance = ADDR2MEGA(ap); 1494 1495 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1496 1497 /* we do allow inquiring about capabilities for other targets */ 1498 if (cap == NULL) { 1499 return (-1); 1500 } 1501 #if 0 1502 if (ap->a_target >= (adapter->max_channel * 16 + MAX_LD_64)) { 1503 1504 return (-1); 1505 } 1506 1507 acmdp = &acmd; 1508 #endif 1509 1510 switch (scsi_hba_lookup_capstr(cap)) { 1511 case SCSI_CAP_DMA_MAX: 1512 /* Limit to 16MB max transfer */ 1513 rval = megasas_max_cap_maxxfer; 1514 break; 1515 case SCSI_CAP_MSG_OUT: 1516 rval = 1; 1517 break; 1518 case SCSI_CAP_DISCONNECT: 1519 rval = 0; 1520 break; 1521 case SCSI_CAP_SYNCHRONOUS: 1522 rval = 0; 1523 break; 1524 case SCSI_CAP_WIDE_XFER: 1525 rval = 1; 1526 break; 1527 case SCSI_CAP_TAGGED_QING: 1528 rval = 1; 1529 break; 1530 case SCSI_CAP_UNTAGGED_QING: 1531 rval = 1; 1532 break; 1533 case SCSI_CAP_PARITY: 1534 rval = 1; 1535 break; 1536 case SCSI_CAP_INITIATOR_ID: 1537 rval = instance->init_id; 1538 break; 1539 case SCSI_CAP_ARQ: 1540 rval = 1; 1541 break; 1542 case SCSI_CAP_LINKED_CMDS: 1543 rval = 0; 1544 break; 1545 case SCSI_CAP_RESET_NOTIFICATION: 1546 rval = 1; 1547 break; 1548 case SCSI_CAP_GEOMETRY: 1549 #if 0 1550 int channel; 1551 int target; 1552 int islogical; 1553 1554 MRAID_GET_DEVICE_MAP(adapter, acmdp, channel, 1555 target, ap, islogical); 1556 1557 if (!islogical) { 1558 con_log(CL_ANN1, (CE_WARN, "megaraid%d: " 1559 "fail geometry for phy [%d:%d]\n", 1560 ddi_get_instance(adapter->dip), channel, 1561 target)); 1562 return (-1); 1563 } 1564 1565 if (adapter->read_ldidmap) 1566 target -= 0x80; 1567 1568 if ((adapter->ldrv_state[target] == RDRV_OFFLINE) || 1569 (adapter->ldrv_state[target] == RDRV_DELETED)) { 1570 return (-1); 1571 } 1572 1573 rval = (64 << 16) | 32; 1574 1575 if (adapter->ldrv_size[target] > 0x200000) { 1576 rval = (255 << 16) | 63; 1577 } 1578 1579 rval = (64 << 16) | 32; /* remove latter */ 1580 #endif 1581 rval = -1; 1582 1583 break; 1584 default: 1585 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x", 1586 scsi_hba_lookup_capstr(cap))); 1587 rval = -1; 1588 break; 1589 } 1590 1591 return (rval); 1592 } 1593 1594 /* 1595 * tran_setcap - set one of a set of SCSA-defined capabilities 1596 * @ap: 1597 * @cap: 1598 * @value: 1599 * @whom: 1600 * 1601 * The target driver might request that the new value be set for a particular 1602 * target by setting the whom parameter to nonzero. A whom value of zero 1603 * means that request is to set the new value for the SCSI bus or for adapter 1604 * hardware in general. 1605 * The tran_setcap() should return the following values as appropriate: 1606 * - -1 for undefined capabilities 1607 * - 0 if the HBA driver cannot set the capability to the requested value 1608 * - 1 if the HBA driver is able to set the capability to the requested value 1609 */ 1610 /*ARGSUSED*/ 1611 static int 1612 megasas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 1613 { 1614 int rval = 1; 1615 1616 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1617 1618 /* We don't allow setting capabilities for other targets */ 1619 if (cap == NULL || whom == 0) { 1620 return (-1); 1621 } 1622 1623 switch (scsi_hba_lookup_capstr(cap)) { 1624 case SCSI_CAP_DMA_MAX: 1625 case SCSI_CAP_MSG_OUT: 1626 case SCSI_CAP_PARITY: 1627 case SCSI_CAP_LINKED_CMDS: 1628 case SCSI_CAP_RESET_NOTIFICATION: 1629 case SCSI_CAP_DISCONNECT: 1630 case SCSI_CAP_SYNCHRONOUS: 1631 case SCSI_CAP_UNTAGGED_QING: 1632 case SCSI_CAP_WIDE_XFER: 1633 case SCSI_CAP_INITIATOR_ID: 1634 case SCSI_CAP_ARQ: 1635 /* 1636 * None of these are settable via 1637 * the capability interface. 1638 */ 1639 break; 1640 case SCSI_CAP_TAGGED_QING: 1641 rval = 1; 1642 break; 1643 case SCSI_CAP_SECTOR_SIZE: 1644 rval = 1; 1645 break; 1646 1647 case SCSI_CAP_TOTAL_SECTORS: 1648 rval = 1; 1649 break; 1650 default: 1651 rval = -1; 1652 break; 1653 } 1654 1655 return (rval); 1656 } 1657 1658 /* 1659 * tran_destroy_pkt - deallocate scsi_pkt structure 1660 * @ap: 1661 * @pkt: 1662 * 1663 * The tran_destroy_pkt() entry point is the HBA driver function that 1664 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is 1665 * called when the target driver calls scsi_destroy_pkt(). The 1666 * tran_destroy_pkt() entry point must free any DMA resources that have been 1667 * allocated for the packet. An implicit DMA synchronization occurs if the 1668 * DMA resources are freed and any cached data remains after the completion 1669 * of the transfer. 1670 */ 1671 static void 1672 megasas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1673 { 1674 struct scsa_cmd *acmd = PKT2CMD(pkt); 1675 1676 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1677 1678 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1679 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1680 1681 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1682 1683 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1684 1685 acmd->cmd_dmahandle = NULL; 1686 } 1687 1688 /* free the pkt */ 1689 scsi_hba_pkt_free(ap, pkt); 1690 } 1691 1692 /* 1693 * tran_dmafree - deallocates DMA resources 1694 * @ap: 1695 * @pkt: 1696 * 1697 * The tran_dmafree() entry point deallocates DMAQ resources that have been 1698 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is 1699 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must 1700 * free only DMA resources allocated for a scsi_pkt structure, not the 1701 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is 1702 * implicitly performed. 1703 */ 1704 /*ARGSUSED*/ 1705 static void 1706 megasas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 1707 { 1708 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1709 1710 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1711 1712 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1713 acmd->cmd_flags &= ~CFLAG_DMAVALID; 1714 1715 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 1716 1717 ddi_dma_free_handle(&acmd->cmd_dmahandle); 1718 1719 acmd->cmd_dmahandle = NULL; 1720 } 1721 } 1722 1723 /* 1724 * tran_sync_pkt - synchronize the DMA object allocated 1725 * @ap: 1726 * @pkt: 1727 * 1728 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for 1729 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt() 1730 * entry point is called when the target driver calls scsi_sync_pkt(). If the 1731 * data transfer direction is a DMA read from device to memory, tran_sync_pkt() 1732 * must synchronize the CPU's view of the data. If the data transfer direction 1733 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the 1734 * device's view of the data. 1735 */ 1736 /*ARGSUSED*/ 1737 static void 1738 megasas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 1739 { 1740 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1741 1742 /* 1743 * following 'ddi_dma_sync()' API call 1744 * already called for each I/O in the ISR 1745 */ 1746 #ifdef TBD 1747 int i; 1748 1749 register struct scsa_cmd *acmd = PKT2CMD(pkt); 1750 1751 if (acmd->cmd_flags & CFLAG_DMAVALID) { 1752 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset, 1753 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ? 1754 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU); 1755 } 1756 #endif /* TBD */ 1757 } 1758 1759 /*ARGSUSED*/ 1760 static int 1761 megasas_tran_quiesce(dev_info_t *dip) 1762 { 1763 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1764 1765 return (1); 1766 } 1767 1768 /*ARGSUSED*/ 1769 static int 1770 megasas_tran_unquiesce(dev_info_t *dip) 1771 { 1772 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1773 1774 return (1); 1775 } 1776 1777 /* 1778 * megasas_isr(caddr_t) 1779 * 1780 * The Interrupt Service Routine 1781 * 1782 * Collect status for all completed commands and do callback 1783 * 1784 */ 1785 static uint_t 1786 megasas_isr(caddr_t arg) 1787 { 1788 int need_softintr; 1789 uint32_t producer; 1790 uint32_t consumer; 1791 uint32_t context; 1792 1793 struct megasas_cmd *cmd; 1794 struct megasas_instance *instance; 1795 1796 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1797 1798 /* LINTED E_BAD_PTR_CAST_ALIGN */ 1799 instance = (struct megasas_instance *)arg; 1800 if (!instance->func_ptr->intr_ack(instance)) { 1801 return (DDI_INTR_UNCLAIMED); 1802 } 1803 1804 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1805 0, 0, DDI_DMA_SYNC_FORCPU); 1806 1807 producer = *instance->producer; 1808 consumer = *instance->consumer; 1809 1810 con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ", 1811 producer, consumer)); 1812 1813 mutex_enter(&instance->completed_pool_mtx); 1814 1815 while (consumer != producer) { 1816 context = instance->reply_queue[consumer]; 1817 /* 1818 * con_log(CL_ANN, (CE_WARN, 1819 * " context returned %x ",context)); 1820 */ 1821 cmd = instance->cmd_list[context]; 1822 mlist_add_tail(&cmd->list, &instance->completed_pool_list); 1823 1824 consumer++; 1825 if (consumer == (instance->max_fw_cmds + 1)) { 1826 consumer = 0; 1827 } 1828 } 1829 1830 mutex_exit(&instance->completed_pool_mtx); 1831 1832 *instance->consumer = producer; 1833 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 1834 0, 0, DDI_DMA_SYNC_FORDEV); 1835 1836 if (instance->softint_running) { 1837 need_softintr = 0; 1838 } else { 1839 need_softintr = 1; 1840 } 1841 1842 if (instance->isr_level == HIGH_LEVEL_INTR) { 1843 if (need_softintr) { 1844 ddi_trigger_softintr(instance->soft_intr_id); 1845 } 1846 } else { 1847 /* 1848 * Not a high-level interrupt, therefore call the soft level 1849 * interrupt explicitly 1850 */ 1851 (void) megasas_softintr((caddr_t)instance); 1852 } 1853 1854 return (DDI_INTR_CLAIMED); 1855 } 1856 1857 1858 /* 1859 * ************************************************************************** * 1860 * * 1861 * libraries * 1862 * * 1863 * ************************************************************************** * 1864 */ 1865 /* 1866 * get_mfi_pkt : Get a command from the free pool 1867 */ 1868 static struct megasas_cmd * 1869 get_mfi_pkt(struct megasas_instance *instance) 1870 { 1871 mlist_t *head = &instance->cmd_pool_list; 1872 struct megasas_cmd *cmd = NULL; 1873 1874 mutex_enter(&instance->cmd_pool_mtx); 1875 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1876 1877 if (!mlist_empty(head)) { 1878 /* LINTED E_BAD_PTR_CAST_ALIGN */ 1879 cmd = mlist_entry(head->next, struct megasas_cmd, list); 1880 mlist_del_init(head->next); 1881 } 1882 1883 mutex_exit(&instance->cmd_pool_mtx); 1884 1885 return (cmd); 1886 } 1887 1888 /* 1889 * return_mfi_pkt : Return a cmd to free command pool 1890 */ 1891 static void 1892 return_mfi_pkt(struct megasas_instance *instance, struct megasas_cmd *cmd) 1893 { 1894 mutex_enter(&instance->cmd_pool_mtx); 1895 ASSERT(mutex_owned(&instance->cmd_pool_mtx)); 1896 1897 mlist_add(&cmd->list, &instance->cmd_pool_list); 1898 1899 mutex_exit(&instance->cmd_pool_mtx); 1900 } 1901 1902 /* 1903 * get_mfi_pkt : Get a command from the free pool 1904 */ 1905 #ifndef lint 1906 static struct megasas_cmd * 1907 pull_pend_queue(struct megasas_instance *instance) 1908 { 1909 mlist_t *head = &instance->cmd_pend_list; 1910 struct megasas_cmd *cmd = NULL; 1911 1912 mutex_enter(&instance->cmd_pend_mtx); 1913 ASSERT(mutex_owned(&instance->cmd_pend_mtx)); 1914 1915 if (!mlist_empty(head)) { 1916 cmd = mlist_entry(head->next, struct megasas_cmd, list); 1917 mlist_del_init(head->next); 1918 } 1919 1920 mutex_exit(&instance->cmd_pend_mtx); 1921 1922 return (cmd); 1923 } 1924 1925 /* 1926 * return_mfi_pkt : Return a cmd to free command pool 1927 */ 1928 static void 1929 push_pend_queue(struct megasas_instance *instance, struct megasas_cmd *cmd) 1930 { 1931 mutex_enter(&instance->cmd_pend_mtx); 1932 ASSERT(mutex_owned(&instance->cmd_pend_mtx)); 1933 1934 mlist_add(&cmd->list, &instance->cmd_pend_list); 1935 1936 mutex_exit(&instance->cmd_pend_mtx); 1937 } 1938 #endif 1939 1940 /* 1941 * destroy_mfi_frame_pool 1942 */ 1943 static void 1944 destroy_mfi_frame_pool(struct megasas_instance *instance) 1945 { 1946 int i; 1947 uint32_t max_cmd = instance->max_fw_cmds; 1948 1949 struct megasas_cmd *cmd; 1950 1951 /* return all frames to pool */ 1952 for (i = 0; i < max_cmd; i++) { 1953 1954 cmd = instance->cmd_list[i]; 1955 1956 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) 1957 mega_free_dma_obj(cmd->frame_dma_obj); 1958 1959 cmd->frame_dma_obj_status = DMA_OBJ_FREED; 1960 } 1961 1962 } 1963 1964 /* 1965 * create_mfi_frame_pool 1966 */ 1967 static int 1968 create_mfi_frame_pool(struct megasas_instance *instance) 1969 { 1970 int i = 0; 1971 int cookie_cnt; 1972 uint16_t max_cmd; 1973 uint16_t sge_sz; 1974 uint32_t sgl_sz; 1975 uint32_t tot_frame_size; 1976 1977 struct megasas_cmd *cmd; 1978 1979 max_cmd = instance->max_fw_cmds; 1980 1981 sge_sz = sizeof (struct megasas_sge64); 1982 1983 /* calculated the number of 64byte frames required for SGL */ 1984 sgl_sz = sge_sz * instance->max_num_sge; 1985 tot_frame_size = sgl_sz + MEGAMFI_FRAME_SIZE + NUM_SENSE_KEYS; 1986 1987 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: " 1988 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size)); 1989 1990 while (i < max_cmd) { 1991 cmd = instance->cmd_list[i]; 1992 1993 cmd->frame_dma_obj.size = tot_frame_size; 1994 cmd->frame_dma_obj.dma_attr = megasas_generic_dma_attr; 1995 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 1996 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 1997 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1; 1998 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64; 1999 2000 2001 cookie_cnt = mega_alloc_dma_obj(instance, &cmd->frame_dma_obj); 2002 2003 if (cookie_cnt == -1 || cookie_cnt > 1) { 2004 con_log(CL_ANN, (CE_WARN, 2005 "create_mfi_frame_pool: could not alloc.")); 2006 return (DDI_FAILURE); 2007 } 2008 2009 bzero(cmd->frame_dma_obj.buffer, tot_frame_size); 2010 2011 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED; 2012 cmd->frame = (union megasas_frame *)cmd->frame_dma_obj.buffer; 2013 cmd->frame_phys_addr = 2014 cmd->frame_dma_obj.dma_cookie[0].dmac_address; 2015 2016 cmd->sense = (uint8_t *)(((unsigned long) 2017 cmd->frame_dma_obj.buffer) + 2018 tot_frame_size - NUM_SENSE_KEYS); 2019 cmd->sense_phys_addr = 2020 cmd->frame_dma_obj.dma_cookie[0].dmac_address + 2021 tot_frame_size - NUM_SENSE_KEYS; 2022 2023 if (!cmd->frame || !cmd->sense) { 2024 con_log(CL_ANN, (CE_NOTE, 2025 "megasas: pci_pool_alloc failed \n")); 2026 2027 return (-ENOMEM); 2028 } 2029 2030 cmd->frame->io.context = cmd->index; 2031 i++; 2032 2033 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x", 2034 cmd->frame->io.context, cmd->frame_phys_addr)); 2035 } 2036 2037 return (DDI_SUCCESS); 2038 } 2039 2040 /* 2041 * free_additional_dma_buffer 2042 */ 2043 static void 2044 free_additional_dma_buffer(struct megasas_instance *instance) 2045 { 2046 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { 2047 mega_free_dma_obj(instance->mfi_internal_dma_obj); 2048 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; 2049 } 2050 2051 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) { 2052 mega_free_dma_obj(instance->mfi_evt_detail_obj); 2053 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; 2054 } 2055 } 2056 2057 /* 2058 * alloc_additional_dma_buffer 2059 */ 2060 static int 2061 alloc_additional_dma_buffer(struct megasas_instance *instance) 2062 { 2063 uint32_t reply_q_sz; 2064 uint32_t internal_buf_size = PAGESIZE*2; 2065 2066 /* max cmds plus 1 + procudure & consumer */ 2067 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2); 2068 2069 instance->mfi_internal_dma_obj.size = internal_buf_size; 2070 instance->mfi_internal_dma_obj.dma_attr = megasas_generic_dma_attr; 2071 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 2072 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 2073 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1; 2074 2075 if (mega_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj) 2076 != 1) { 2077 con_log(CL_ANN, (CE_WARN, "megaraid: could not alloc reply Q")); 2078 return (DDI_FAILURE); 2079 } 2080 2081 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size); 2082 2083 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED; 2084 2085 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2086 instance->producer = (uint32_t *)instance->mfi_internal_dma_obj.buffer; 2087 instance->consumer = (uint32_t *)((unsigned long) 2088 instance->mfi_internal_dma_obj.buffer + 4); 2089 instance->reply_queue = (uint32_t *)((unsigned long) 2090 instance->mfi_internal_dma_obj.buffer + 8); 2091 instance->internal_buf = (caddr_t)(((unsigned long) 2092 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8); 2093 instance->internal_buf_dmac_add = 2094 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 2095 reply_q_sz; 2096 instance->internal_buf_size = internal_buf_size - 2097 (reply_q_sz + 8); 2098 2099 /* allocate evt_detail */ 2100 instance->mfi_evt_detail_obj.size = sizeof (struct megasas_evt_detail); 2101 instance->mfi_evt_detail_obj.dma_attr = megasas_generic_dma_attr; 2102 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 2103 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xffffffff; 2104 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1; 2105 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1; 2106 2107 if (mega_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj) != 1) { 2108 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: " 2109 "could not data transfer buffer alloc.")); 2110 return (DDI_FAILURE); 2111 } 2112 2113 bzero(instance->mfi_evt_detail_obj.buffer, 2114 sizeof (struct megasas_evt_detail)); 2115 2116 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED; 2117 2118 return (DDI_SUCCESS); 2119 } 2120 2121 /* 2122 * free_space_for_mfi 2123 */ 2124 static void 2125 free_space_for_mfi(struct megasas_instance *instance) 2126 { 2127 int i; 2128 uint32_t max_cmd = instance->max_fw_cmds; 2129 2130 /* already freed */ 2131 if (instance->cmd_list == NULL) { 2132 return; 2133 } 2134 2135 free_additional_dma_buffer(instance); 2136 2137 /* first free the MFI frame pool */ 2138 destroy_mfi_frame_pool(instance); 2139 2140 /* free all the commands in the cmd_list */ 2141 for (i = 0; i < instance->max_fw_cmds; i++) { 2142 kmem_free(instance->cmd_list[i], 2143 sizeof (struct megasas_cmd)); 2144 2145 instance->cmd_list[i] = NULL; 2146 } 2147 2148 /* free the cmd_list buffer itself */ 2149 kmem_free(instance->cmd_list, 2150 sizeof (struct megasas_cmd *) * max_cmd); 2151 2152 instance->cmd_list = NULL; 2153 2154 INIT_LIST_HEAD(&instance->cmd_pool_list); 2155 } 2156 2157 /* 2158 * alloc_space_for_mfi 2159 */ 2160 static int 2161 alloc_space_for_mfi(struct megasas_instance *instance) 2162 { 2163 int i; 2164 uint32_t max_cmd; 2165 size_t sz; 2166 2167 struct megasas_cmd *cmd; 2168 2169 max_cmd = instance->max_fw_cmds; 2170 sz = sizeof (struct megasas_cmd *) * max_cmd; 2171 2172 /* 2173 * instance->cmd_list is an array of struct megasas_cmd pointers. 2174 * Allocate the dynamic array first and then allocate individual 2175 * commands. 2176 */ 2177 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); 2178 ASSERT(instance->cmd_list); 2179 2180 for (i = 0; i < max_cmd; i++) { 2181 instance->cmd_list[i] = kmem_zalloc(sizeof (struct megasas_cmd), 2182 KM_SLEEP); 2183 ASSERT(instance->cmd_list[i]); 2184 } 2185 2186 INIT_LIST_HEAD(&instance->cmd_pool_list); 2187 2188 /* add all the commands to command pool (instance->cmd_pool) */ 2189 for (i = 0; i < max_cmd; i++) { 2190 cmd = instance->cmd_list[i]; 2191 cmd->index = i; 2192 2193 mlist_add_tail(&cmd->list, &instance->cmd_pool_list); 2194 } 2195 2196 /* create a frame pool and assign one frame to each cmd */ 2197 if (create_mfi_frame_pool(instance)) { 2198 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n")); 2199 return (DDI_FAILURE); 2200 } 2201 2202 /* create a frame pool and assign one frame to each cmd */ 2203 if (alloc_additional_dma_buffer(instance)) { 2204 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n")); 2205 return (DDI_FAILURE); 2206 } 2207 2208 return (DDI_SUCCESS); 2209 } 2210 2211 /* 2212 * get_ctrl_info 2213 */ 2214 static int 2215 get_ctrl_info(struct megasas_instance *instance, 2216 struct megasas_ctrl_info *ctrl_info) 2217 { 2218 int ret = 0; 2219 2220 struct megasas_cmd *cmd; 2221 struct megasas_dcmd_frame *dcmd; 2222 struct megasas_ctrl_info *ci; 2223 2224 cmd = get_mfi_pkt(instance); 2225 2226 if (!cmd) { 2227 con_log(CL_ANN, (CE_WARN, 2228 "Failed to get a cmd for ctrl info\n")); 2229 return (DDI_FAILURE); 2230 } 2231 2232 dcmd = &cmd->frame->dcmd; 2233 2234 ci = (struct megasas_ctrl_info *)instance->internal_buf; 2235 2236 if (!ci) { 2237 con_log(CL_ANN, (CE_WARN, 2238 "Failed to alloc mem for ctrl info\n")); 2239 return_mfi_pkt(instance, cmd); 2240 return (DDI_FAILURE); 2241 } 2242 2243 (void) memset(ci, 0, sizeof (struct megasas_ctrl_info)); 2244 2245 /* for( i = 0; i < 12; i++ ) dcmd->mbox.b[i] = 0; */ 2246 (void) memset(dcmd->mbox.b, 0, 12); 2247 2248 dcmd->cmd = MFI_CMD_OP_DCMD; 2249 dcmd->cmd_status = 0xFF; 2250 dcmd->sge_count = 1; 2251 dcmd->flags = MFI_FRAME_DIR_READ; 2252 dcmd->timeout = 0; 2253 dcmd->data_xfer_len = sizeof (struct megasas_ctrl_info); 2254 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 2255 dcmd->sgl.sge32[0].phys_addr = instance->internal_buf_dmac_add; 2256 dcmd->sgl.sge32[0].length = sizeof (struct megasas_ctrl_info); 2257 2258 cmd->frame_count = 1; 2259 2260 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2261 ret = 0; 2262 (void) memcpy(ctrl_info, ci, sizeof (struct megasas_ctrl_info)); 2263 } else { 2264 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed\n")); 2265 ret = -1; 2266 } 2267 2268 return_mfi_pkt(instance, cmd); 2269 2270 return (ret); 2271 } 2272 2273 /* 2274 * abort_aen_cmd 2275 */ 2276 static int 2277 abort_aen_cmd(struct megasas_instance *instance, 2278 struct megasas_cmd *cmd_to_abort) 2279 { 2280 int ret = 0; 2281 2282 struct megasas_cmd *cmd; 2283 struct megasas_abort_frame *abort_fr; 2284 2285 cmd = get_mfi_pkt(instance); 2286 2287 if (!cmd) { 2288 con_log(CL_ANN, (CE_WARN, 2289 "Failed to get a cmd for ctrl info\n")); 2290 return (DDI_FAILURE); 2291 } 2292 2293 abort_fr = &cmd->frame->abort; 2294 2295 /* prepare and issue the abort frame */ 2296 abort_fr->cmd = MFI_CMD_OP_ABORT; 2297 abort_fr->cmd_status = 0xFF; 2298 abort_fr->flags = 0; 2299 abort_fr->abort_context = cmd_to_abort->index; 2300 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 2301 abort_fr->abort_mfi_phys_addr_hi = 0; 2302 2303 instance->aen_cmd->abort_aen = 1; 2304 2305 cmd->sync_cmd = MEGASAS_TRUE; 2306 cmd->frame_count = 1; 2307 2308 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2309 con_log(CL_ANN, (CE_WARN, 2310 "abort_aen_cmd: issue_cmd_in_sync_mode failed\n")); 2311 ret = -1; 2312 } else { 2313 ret = 0; 2314 } 2315 2316 instance->aen_cmd->abort_aen = 1; 2317 instance->aen_cmd = 0; 2318 2319 return_mfi_pkt(instance, cmd); 2320 2321 return (ret); 2322 } 2323 2324 /* 2325 * init_mfi 2326 */ 2327 static int 2328 init_mfi(struct megasas_instance *instance) 2329 { 2330 off_t reglength; 2331 struct megasas_cmd *cmd; 2332 struct megasas_ctrl_info ctrl_info; 2333 struct megasas_init_frame *init_frame; 2334 struct megasas_init_queue_info *initq_info; 2335 2336 if ((ddi_dev_regsize(instance->dip, REGISTER_SET_IO, ®length) 2337 != DDI_SUCCESS) || reglength < 4096) { 2338 return (DDI_FAILURE); 2339 } 2340 2341 if (reglength > 8192) { 2342 reglength = 8192; 2343 con_log(CL_ANN, (CE_NOTE, 2344 "mega: register length to map is 0x%lx bytes", reglength)); 2345 } 2346 2347 if (ddi_regs_map_setup(instance->dip, REGISTER_SET_IO, 2348 &instance->regmap, 0, reglength, &endian_attr, 2349 &instance->regmap_handle) != DDI_SUCCESS) { 2350 con_log(CL_ANN, (CE_NOTE, 2351 "megaraid: couldn't map control registers")); 2352 2353 goto fail_mfi_reg_setup; 2354 } 2355 2356 /* we expect the FW state to be READY */ 2357 if (mfi_state_transition_to_ready(instance)) { 2358 con_log(CL_ANN, (CE_WARN, "megaraid: F/W is not ready")); 2359 goto fail_ready_state; 2360 } 2361 2362 /* get various operational parameters from status register */ 2363 instance->max_num_sge = 2364 (instance->func_ptr->read_fw_status_reg(instance) & 2365 0xFF0000) >> 0x10; 2366 /* 2367 * Reduce the max supported cmds by 1. This is to ensure that the 2368 * reply_q_sz (1 more than the max cmd that driver may send) 2369 * does not exceed max cmds that the FW can support 2370 */ 2371 instance->max_fw_cmds = 2372 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF; 2373 instance->max_fw_cmds = instance->max_fw_cmds - 1; 2374 2375 /* 2376 * con_log(CL_ANN, (CE_WARN, "megaraid: " 2377 * "max_num_sge = %d max_fw_cmds = %d\n", 2378 * instance->max_num_sge, instance->max_fw_cmds)); 2379 */ 2380 2381 instance->max_num_sge = 2382 (instance->max_num_sge > MEGASAS_MAX_SGE_CNT) ? 2383 MEGASAS_MAX_SGE_CNT : instance->max_num_sge; 2384 2385 /* create a pool of commands */ 2386 if (alloc_space_for_mfi(instance)) 2387 goto fail_alloc_fw_space; 2388 2389 /* disable interrupt for initial preparation */ 2390 instance->func_ptr->disable_intr(instance); 2391 2392 /* 2393 * Prepare a init frame. Note the init frame points to queue info 2394 * structure. Each frame has SGL allocated after first 64 bytes. For 2395 * this frame - since we don't need any SGL - we use SGL's space as 2396 * queue info structure 2397 */ 2398 cmd = get_mfi_pkt(instance); 2399 2400 init_frame = (struct megasas_init_frame *)cmd->frame; 2401 initq_info = (struct megasas_init_queue_info *) 2402 ((unsigned long)init_frame + 64); 2403 2404 (void) memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 2405 (void) memset(initq_info, 0, sizeof (struct megasas_init_queue_info)); 2406 2407 initq_info->init_flags = 0; 2408 2409 initq_info->reply_queue_entries = instance->max_fw_cmds + 1; 2410 2411 initq_info->producer_index_phys_addr_hi = 0; 2412 initq_info->producer_index_phys_addr_lo = 2413 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address; 2414 2415 initq_info->consumer_index_phys_addr_hi = 0; 2416 initq_info->consumer_index_phys_addr_lo = 2417 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4; 2418 2419 initq_info->reply_queue_start_phys_addr_hi = 0; 2420 initq_info->reply_queue_start_phys_addr_lo = 2421 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8; 2422 2423 init_frame->cmd = MFI_CMD_OP_INIT; 2424 init_frame->cmd_status = 0xFF; 2425 init_frame->flags = 0; 2426 init_frame->queue_info_new_phys_addr_lo = 2427 cmd->frame_phys_addr + 64; 2428 init_frame->queue_info_new_phys_addr_hi = 0; 2429 2430 init_frame->data_xfer_len = sizeof (struct megasas_init_queue_info); 2431 2432 cmd->frame_count = 1; 2433 2434 /* issue the init frame in polled mode */ 2435 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2436 con_log(CL_ANN, (CE_WARN, "failed to init firmware")); 2437 goto fail_fw_init; 2438 } 2439 2440 return_mfi_pkt(instance, cmd); 2441 2442 /* gather misc FW related information */ 2443 if (!get_ctrl_info(instance, &ctrl_info)) { 2444 instance->max_sectors_per_req = ctrl_info.max_request_size; 2445 con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d", 2446 ctrl_info.product_name, ctrl_info.ld_present_count)); 2447 } else { 2448 instance->max_sectors_per_req = instance->max_num_sge * 2449 PAGESIZE / 512; 2450 } 2451 2452 return (0); 2453 2454 fail_fw_init: 2455 fail_alloc_fw_space: 2456 2457 free_space_for_mfi(instance); 2458 2459 fail_ready_state: 2460 ddi_regs_map_free(&instance->regmap_handle); 2461 2462 fail_mfi_reg_setup: 2463 return (DDI_FAILURE); 2464 } 2465 2466 /* 2467 * mfi_state_transition_to_ready : Move the FW to READY state 2468 * 2469 * @reg_set : MFI register set 2470 */ 2471 static int 2472 mfi_state_transition_to_ready(struct megasas_instance *instance) 2473 { 2474 int i; 2475 uint8_t max_wait; 2476 uint32_t fw_ctrl; 2477 uint32_t fw_state; 2478 uint32_t cur_state; 2479 2480 fw_state = 2481 instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK; 2482 con_log(CL_ANN1, (CE_NOTE, 2483 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state)); 2484 2485 while (fw_state != MFI_STATE_READY) { 2486 con_log(CL_ANN, (CE_NOTE, 2487 "mfi_state_transition_to_ready:FW state%x", fw_state)); 2488 2489 switch (fw_state) { 2490 case MFI_STATE_FAULT: 2491 con_log(CL_ANN, (CE_NOTE, 2492 "megasas: FW in FAULT state!!")); 2493 2494 return (-ENODEV); 2495 case MFI_STATE_WAIT_HANDSHAKE: 2496 /* set the CLR bit in IMR0 */ 2497 con_log(CL_ANN, (CE_NOTE, 2498 "megasas: FW waiting for HANDSHAKE")); 2499 /* 2500 * PCI_Hot Plug: MFI F/W requires 2501 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2502 * to be set 2503 */ 2504 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */ 2505 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2506 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE | 2507 MFI_INIT_HOTPLUG, instance); 2508 2509 max_wait = 2; 2510 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2511 break; 2512 case MFI_STATE_BOOT_MESSAGE_PENDING: 2513 /* set the CLR bit in IMR0 */ 2514 con_log(CL_ANN, (CE_NOTE, 2515 "megasas: FW state boot message pending")); 2516 /* 2517 * PCI_Hot Plug: MFI F/W requires 2518 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 2519 * to be set 2520 */ 2521 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2522 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance); 2523 2524 max_wait = 10; 2525 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2526 break; 2527 case MFI_STATE_OPERATIONAL: 2528 /* bring it to READY state; assuming max wait 2 secs */ 2529 instance->func_ptr->disable_intr(instance); 2530 con_log(CL_ANN1, (CE_NOTE, 2531 "megasas: FW in OPERATIONAL state")); 2532 /* 2533 * PCI_Hot Plug: MFI F/W requires 2534 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT) 2535 * to be set 2536 */ 2537 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */ 2538 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2539 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance); 2540 2541 max_wait = 10; 2542 cur_state = MFI_STATE_OPERATIONAL; 2543 break; 2544 case MFI_STATE_UNDEFINED: 2545 /* this state should not last for more than 2 seconds */ 2546 con_log(CL_ANN, (CE_NOTE, "FW state undefined\n")); 2547 2548 max_wait = 2; 2549 cur_state = MFI_STATE_UNDEFINED; 2550 break; 2551 case MFI_STATE_BB_INIT: 2552 max_wait = 2; 2553 cur_state = MFI_STATE_BB_INIT; 2554 break; 2555 case MFI_STATE_FW_INIT: 2556 max_wait = 2; 2557 cur_state = MFI_STATE_FW_INIT; 2558 break; 2559 case MFI_STATE_DEVICE_SCAN: 2560 max_wait = 10; 2561 cur_state = MFI_STATE_DEVICE_SCAN; 2562 break; 2563 default: 2564 con_log(CL_ANN, (CE_NOTE, 2565 "megasas: Unknown state 0x%x\n", fw_state)); 2566 return (-ENODEV); 2567 } 2568 2569 /* the cur_state should not last for more than max_wait secs */ 2570 for (i = 0; i < (max_wait * 1000); i++) { 2571 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */ 2572 fw_state = 2573 instance->func_ptr->read_fw_status_reg(instance) & 2574 MFI_STATE_MASK; 2575 2576 if (fw_state == cur_state) { 2577 delay(1 * drv_usectohz(1000)); 2578 } else { 2579 break; 2580 } 2581 } 2582 2583 /* return error if fw_state hasn't changed after max_wait */ 2584 if (fw_state == cur_state) { 2585 con_log(CL_ANN, (CE_NOTE, 2586 "FW state hasn't changed in %d secs\n", max_wait)); 2587 return (-ENODEV); 2588 } 2589 }; 2590 2591 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2592 fw_ctrl = RD_IB_DOORBELL(instance); 2593 #ifdef lint 2594 fw_ctrl = fw_ctrl; 2595 #endif 2596 con_log(CL_ANN1, (CE_NOTE, 2597 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl)); 2598 2599 /* 2600 * Write 0xF to the doorbell register to do the following. 2601 * - Abort all outstanding commands (bit 0). 2602 * - Transition from OPERATIONAL to READY state (bit 1). 2603 * - Discard (possible) low MFA posted in 64-bit mode (bit-2). 2604 * - Set to release FW to continue running (i.e. BIOS handshake 2605 * (bit 3). 2606 */ 2607 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2608 WR_IB_DOORBELL(0xF, instance); 2609 2610 return (0); 2611 } 2612 2613 /* 2614 * get_seq_num 2615 */ 2616 static int 2617 get_seq_num(struct megasas_instance *instance, 2618 struct megasas_evt_log_info *eli) 2619 { 2620 int ret = 0; 2621 2622 dma_obj_t dcmd_dma_obj; 2623 struct megasas_cmd *cmd; 2624 struct megasas_dcmd_frame *dcmd; 2625 2626 cmd = get_mfi_pkt(instance); 2627 2628 if (!cmd) { 2629 cmn_err(CE_WARN, "megasas: failed to get a cmd\n"); 2630 return (-ENOMEM); 2631 } 2632 2633 dcmd = &cmd->frame->dcmd; 2634 2635 /* allocate the data transfer buffer */ 2636 dcmd_dma_obj.size = sizeof (struct megasas_evt_log_info); 2637 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr; 2638 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 2639 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 2640 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 2641 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 2642 2643 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) { 2644 con_log(CL_ANN, (CE_WARN, 2645 "get_seq_num: could not data transfer buffer alloc.")); 2646 return (DDI_FAILURE); 2647 } 2648 2649 (void) memset(dcmd_dma_obj.buffer, 0, 2650 sizeof (struct megasas_evt_log_info)); 2651 2652 (void) memset(dcmd->mbox.b, 0, 12); 2653 2654 dcmd->cmd = MFI_CMD_OP_DCMD; 2655 dcmd->cmd_status = 0; 2656 dcmd->sge_count = 1; 2657 dcmd->flags = MFI_FRAME_DIR_READ; 2658 dcmd->timeout = 0; 2659 dcmd->data_xfer_len = sizeof (struct megasas_evt_log_info); 2660 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 2661 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_log_info); 2662 dcmd->sgl.sge32[0].phys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 2663 2664 cmd->sync_cmd = MEGASAS_TRUE; 2665 cmd->frame_count = 1; 2666 2667 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 2668 cmn_err(CE_WARN, "get_seq_num: " 2669 "failed to issue MR_DCMD_CTRL_EVENT_GET_INFO\n"); 2670 ret = -1; 2671 } else { 2672 /* copy the data back into callers buffer */ 2673 bcopy(dcmd_dma_obj.buffer, eli, 2674 sizeof (struct megasas_evt_log_info)); 2675 ret = 0; 2676 } 2677 2678 mega_free_dma_obj(dcmd_dma_obj); 2679 2680 return_mfi_pkt(instance, cmd); 2681 2682 return (ret); 2683 } 2684 2685 #ifndef lint 2686 static int 2687 get_seq_num_in_poll(struct megasas_instance *instance, 2688 struct megasas_evt_log_info *eli) 2689 { 2690 int ret = 0; 2691 2692 dma_obj_t dcmd_dma_obj; 2693 struct megasas_cmd *cmd; 2694 struct megasas_dcmd_frame *dcmd; 2695 2696 cmd = get_mfi_pkt(instance); 2697 2698 if (!cmd) { 2699 cmn_err(CE_WARN, "megasas: failed to get a cmd\n"); 2700 return (-ENOMEM); 2701 } 2702 2703 dcmd = &cmd->frame->dcmd; 2704 2705 /* allocate the data transfer buffer */ 2706 dcmd_dma_obj.size = sizeof (struct megasas_evt_log_info); 2707 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr; 2708 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 2709 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 2710 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 2711 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 2712 2713 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) { 2714 con_log(CL_ANN, (CE_WARN, "get_seq_num_in_poll: " 2715 "could not data transfer buffer alloc.")); 2716 return (DDI_FAILURE); 2717 } 2718 2719 (void) memset(dcmd_dma_obj.buffer, 0, 2720 sizeof (struct megasas_evt_log_info)); 2721 2722 /* for( i = 0; i < 12; i++ ) dcmd->mbox.b[i] = 0; */ 2723 (void) memset(dcmd->mbox.b, 0, 12); 2724 2725 dcmd->cmd = MFI_CMD_OP_DCMD; 2726 dcmd->cmd_status = 0; 2727 dcmd->sge_count = 1; 2728 dcmd->flags = MFI_FRAME_DIR_READ; 2729 dcmd->timeout = 0; 2730 dcmd->data_xfer_len = sizeof (struct megasas_evt_log_info); 2731 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 2732 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_log_info); 2733 dcmd->sgl.sge32[0].phys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 2734 2735 cmd->frame_count = 1; 2736 2737 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2738 cmn_err(CE_WARN, "get_seq_num_in_poll: " 2739 "failed to issue MR_DCMD_CTRL_EVENT_GET_INFO\n"); 2740 ret = -1; 2741 } else { 2742 cmn_err(CE_WARN, "get_seq_num_in_poll:done\n"); 2743 /* copy the data back into callers buffer */ 2744 bcopy(dcmd_dma_obj.buffer, eli, 2745 sizeof (struct megasas_evt_log_info)); 2746 ret = 0; 2747 } 2748 2749 mega_free_dma_obj(dcmd_dma_obj); 2750 2751 return_mfi_pkt(instance, cmd); 2752 2753 return (ret); 2754 } 2755 #endif 2756 2757 /* 2758 * start_mfi_aen 2759 */ 2760 static int 2761 start_mfi_aen(struct megasas_instance *instance) 2762 { 2763 int ret = 0; 2764 2765 struct megasas_evt_log_info eli; 2766 union megasas_evt_class_locale class_locale; 2767 2768 /* get the latest sequence number from FW */ 2769 (void) memset(&eli, 0, sizeof (struct megasas_evt_log_info)); 2770 2771 if (get_seq_num(instance, &eli)) { 2772 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num\n"); 2773 return (-1); 2774 } 2775 2776 /* register AEN with FW for latest sequence number plus 1 */ 2777 class_locale.members.reserved = 0; 2778 class_locale.members.locale = MR_EVT_LOCALE_ALL; 2779 class_locale.members.class = MR_EVT_CLASS_CRITICAL; 2780 2781 ret = register_mfi_aen(instance, eli.newest_seq_num + 1, 2782 class_locale.word); 2783 2784 if (ret) { 2785 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed\n"); 2786 return (-1); 2787 } 2788 2789 return (ret); 2790 } 2791 2792 /* 2793 * flush_cache 2794 */ 2795 static void 2796 flush_cache(struct megasas_instance *instance) 2797 { 2798 struct megasas_cmd *cmd; 2799 struct megasas_dcmd_frame *dcmd; 2800 2801 if (!(cmd = get_mfi_pkt(instance))) 2802 return; 2803 2804 dcmd = &cmd->frame->dcmd; 2805 2806 (void) memset(dcmd->mbox.b, 0, 12); 2807 2808 dcmd->cmd = MFI_CMD_OP_DCMD; 2809 dcmd->cmd_status = 0x0; 2810 dcmd->sge_count = 0; 2811 dcmd->flags = MFI_FRAME_DIR_NONE; 2812 dcmd->timeout = 0; 2813 dcmd->data_xfer_len = 0; 2814 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 2815 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 2816 2817 cmd->frame_count = 1; 2818 2819 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 2820 cmn_err(CE_WARN, 2821 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH\n"); 2822 } 2823 con_log(CL_ANN, (CE_NOTE, "done")); 2824 return_mfi_pkt(instance, cmd); 2825 } 2826 2827 /* 2828 * service_mfi_aen- Completes an AEN command 2829 * @instance: Adapter soft state 2830 * @cmd: Command to be completed 2831 * 2832 */ 2833 static void 2834 service_mfi_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 2835 { 2836 uint32_t seq_num; 2837 #ifdef TBD 2838 int ret = 0; 2839 union megasas_evt_class_locale class_locale; 2840 #endif /* TBD */ 2841 struct megasas_evt_detail *evt_detail = 2842 (struct megasas_evt_detail *)instance->mfi_evt_detail_obj.buffer; 2843 2844 cmd->cmd_status = cmd->frame->io.cmd_status; 2845 2846 if (cmd->cmd_status == ENODATA) { 2847 cmd->cmd_status = 0; 2848 } 2849 2850 /* 2851 * log the MFI AEN event to the sysevent queue so that 2852 * application will get noticed 2853 */ 2854 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS", 2855 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) { 2856 int instance_no = ddi_get_instance(instance->dip); 2857 con_log(CL_ANN, (CE_WARN, 2858 "mega%d: Failed to log AEN event", instance_no)); 2859 } 2860 2861 /* get copy of seq_num and class/locale for re-registration */ 2862 seq_num = evt_detail->seq_num; 2863 seq_num++; 2864 #ifdef TBD 2865 class_locale.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; 2866 instance->aen_cmd = 0; 2867 2868 return_mfi_pkt(instance, cmd); 2869 2870 ret = register_mfi_aen(instance, seq_num, class_locale.word); 2871 2872 if (ret) { 2873 cmn_err(CE_WARN, "service_mfi_aen: aen registration failed\n"); 2874 } 2875 #endif /* TBD */ 2876 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 2877 sizeof (struct megasas_evt_detail)); 2878 2879 cmd->frame->dcmd.cmd_status = 0x0; 2880 cmd->frame->dcmd.mbox.w[0] = seq_num; 2881 2882 instance->aen_seq_num = seq_num; 2883 2884 cmd->frame_count = 1; 2885 2886 /* Issue the aen registration frame */ 2887 instance->func_ptr->issue_cmd(cmd, instance); 2888 } 2889 2890 /* 2891 * complete_cmd_in_sync_mode - Completes an internal command 2892 * @instance: Adapter soft state 2893 * @cmd: Command to be completed 2894 * 2895 * The issue_cmd_in_sync_mode() function waits for a command to complete 2896 * after it issues a command. This function wakes up that waiting routine by 2897 * calling wake_up() on the wait queue. 2898 */ 2899 static void 2900 complete_cmd_in_sync_mode(struct megasas_instance *instance, 2901 struct megasas_cmd *cmd) 2902 { 2903 cmd->cmd_status = cmd->frame->io.cmd_status; 2904 2905 cmd->sync_cmd = MEGASAS_FALSE; 2906 2907 if (cmd->cmd_status == ENODATA) { 2908 cmd->cmd_status = 0; 2909 } 2910 2911 cv_broadcast(&instance->int_cmd_cv); 2912 } 2913 2914 /* 2915 * megasas_softintr - The Software ISR 2916 * @param arg : HBA soft state 2917 * 2918 * called from high-level interrupt if hi-level interrupt are not there, 2919 * otherwise triggered as a soft interrupt 2920 */ 2921 static uint_t 2922 megasas_softintr(caddr_t arg) 2923 { 2924 struct scsi_pkt *pkt; 2925 struct scsa_cmd *acmd; 2926 struct megasas_cmd *cmd; 2927 struct mlist_head *pos, *next; 2928 mlist_t process_list; 2929 struct megasas_header *hdr; 2930 struct megasas_instance *instance; 2931 2932 con_log(CL_ANN1, (CE_CONT, "megasas_softintr called")); 2933 2934 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2935 instance = (struct megasas_instance *)arg; 2936 mutex_enter(&instance->completed_pool_mtx); 2937 2938 if (mlist_empty(&instance->completed_pool_list)) { 2939 mutex_exit(&instance->completed_pool_mtx); 2940 return (DDI_INTR_UNCLAIMED); 2941 } 2942 2943 instance->softint_running = 1; 2944 2945 INIT_LIST_HEAD(&process_list); 2946 mlist_splice(&instance->completed_pool_list, &process_list); 2947 INIT_LIST_HEAD(&instance->completed_pool_list); 2948 2949 mutex_exit(&instance->completed_pool_mtx); 2950 2951 /* perform all callbacks first, before releasing the SCBs */ 2952 mlist_for_each_safe(pos, next, &process_list) { 2953 /* LINTED E_BAD_PTR_CAST_ALIGN */ 2954 cmd = mlist_entry(pos, struct megasas_cmd, list); 2955 2956 /* syncronize the Cmd frame for the controller */ 2957 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 2958 0, 0, DDI_DMA_SYNC_FORCPU); 2959 hdr = &cmd->frame->hdr; 2960 2961 /* remove the internal command from the process list */ 2962 mlist_del_init(&cmd->list); 2963 2964 switch (hdr->cmd) { 2965 case MFI_CMD_OP_PD_SCSI: 2966 case MFI_CMD_OP_LD_SCSI: 2967 case MFI_CMD_OP_LD_READ: 2968 case MFI_CMD_OP_LD_WRITE: 2969 /* 2970 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI 2971 * could have been issued either through an 2972 * IO path or an IOCTL path. If it was via IOCTL, 2973 * we will send it to internal completion. 2974 */ 2975 if (cmd->sync_cmd == MEGASAS_TRUE) { 2976 complete_cmd_in_sync_mode(instance, cmd); 2977 break; 2978 } 2979 2980 /* regular commands */ 2981 acmd = cmd->cmd; 2982 pkt = CMD2PKT(acmd); 2983 /* con_log(CL_ANN, (CE_CONT,"pkt recived")); */ 2984 2985 if (acmd->cmd_flags & CFLAG_DMAVALID) { 2986 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 2987 (void) ddi_dma_sync(acmd->cmd_dmahandle, 2988 acmd->cmd_dma_offset, 2989 acmd->cmd_dma_len, 2990 DDI_DMA_SYNC_FORCPU); 2991 } 2992 } 2993 2994 pkt->pkt_reason = CMD_CMPLT; 2995 pkt->pkt_statistics = 0; 2996 pkt->pkt_state = STATE_XFERRED_DATA | STATE_GOT_STATUS; 2997 2998 con_log(CL_ANN1, (CE_CONT, 2999 "CDB[0] = %x completed for %s: size %lx context %x", 3000 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"), 3001 acmd->cmd_dmacount, hdr->context)); 3002 3003 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) { 3004 struct scsi_inquiry *inq; 3005 3006 if (acmd->cmd_dmacount != 0) { 3007 bp_mapin(acmd->cmd_buf); 3008 inq = (struct scsi_inquiry *) 3009 acmd->cmd_buf->b_un.b_addr; 3010 3011 /* don't expose physical drives to OS */ 3012 if (acmd->islogical && 3013 (hdr->cmd_status == MFI_STAT_OK)) { 3014 display_scsi_inquiry( 3015 (caddr_t)inq); 3016 } else if ((hdr->cmd_status == 3017 MFI_STAT_OK) && inq->inq_dtype == 3018 DTYPE_DIRECT) { 3019 3020 display_scsi_inquiry( 3021 (caddr_t)inq); 3022 3023 /* for physical disk */ 3024 hdr->cmd_status = 3025 MFI_STAT_DEVICE_NOT_FOUND; 3026 } 3027 } 3028 } 3029 3030 switch (hdr->cmd_status) { 3031 case MFI_STAT_OK: 3032 pkt->pkt_scbp[0] = STATUS_GOOD; 3033 break; 3034 case MFI_STAT_LD_CC_IN_PROGRESS: 3035 case MFI_STAT_LD_INIT_IN_PROGRESS: 3036 case MFI_STAT_LD_RECON_IN_PROGRESS: 3037 /* SJ - these are not correct way */ 3038 pkt->pkt_scbp[0] = STATUS_GOOD; 3039 break; 3040 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3041 con_log(CL_ANN1, (CE_CONT, "scsi_done error")); 3042 if (pkt->pkt_cdbp[0] != SCMD_TEST_UNIT_READY) { 3043 pkt->pkt_reason = CMD_INCOMPLETE; 3044 pkt->pkt_statistics = STAT_DISCON; 3045 ((struct scsi_status *) 3046 pkt->pkt_scbp)->sts_chk = 1; 3047 } else { 3048 pkt->pkt_reason = CMD_DEV_GONE; 3049 pkt->pkt_statistics = STAT_DISCON; 3050 } 3051 break; 3052 case MFI_STAT_DEVICE_NOT_FOUND: 3053 con_log(CL_ANN1, (CE_CONT, 3054 "device not found error")); 3055 pkt->pkt_reason = CMD_DEV_GONE; 3056 pkt->pkt_statistics = STAT_DISCON; 3057 break; 3058 default: 3059 ((struct scsi_status *) 3060 pkt->pkt_scbp)->sts_busy = 1; 3061 break; 3062 } 3063 3064 atomic_add_16(&instance->fw_outstanding, (-1)); 3065 /* pull_pend_queue(instance); */ 3066 3067 return_mfi_pkt(instance, cmd); 3068 /* 3069 * con_log(CL_ANN, 3070 * (CE_CONT,"call add %lx",pkt->pkt_comp)); 3071 */ 3072 3073 /* Call the callback routine */ 3074 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 3075 pkt->pkt_comp) { 3076 (*pkt->pkt_comp)(pkt); 3077 } 3078 3079 /* con_log(CL_ANN, (CE_CONT, "call complete")); */ 3080 break; 3081 case MFI_CMD_OP_SMP: 3082 case MFI_CMD_OP_STP: 3083 complete_cmd_in_sync_mode(instance, cmd); 3084 break; 3085 case MFI_CMD_OP_DCMD: 3086 /* see if got an event notification */ 3087 if (cmd->frame->dcmd.opcode == 3088 MR_DCMD_CTRL_EVENT_WAIT) { 3089 if ((instance->aen_cmd == cmd) && 3090 (instance->aen_cmd->abort_aen)) { 3091 con_log(CL_ANN, (CE_WARN, 3092 "megasas_softintr: " 3093 "aborted_aen returned")); 3094 } else { 3095 service_mfi_aen(instance, cmd); 3096 } 3097 } else { 3098 complete_cmd_in_sync_mode(instance, cmd); 3099 } 3100 3101 break; 3102 case MFI_CMD_OP_ABORT: 3103 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete")); 3104 /* 3105 * MFI_CMD_OP_ABORT successfully completed 3106 * in the synchronous mode 3107 */ 3108 complete_cmd_in_sync_mode(instance, cmd); 3109 break; 3110 default: 3111 con_log(CL_ANN, (CE_PANIC, "Cmd type unknown !!")); 3112 break; 3113 } 3114 } 3115 3116 instance->softint_running = 0; 3117 3118 return (DDI_INTR_CLAIMED); 3119 } 3120 3121 /* 3122 * mega_alloc_dma_obj 3123 * 3124 * Allocate the memory and other resources for an dma object. 3125 */ 3126 static int 3127 mega_alloc_dma_obj(struct megasas_instance *instance, dma_obj_t *obj) 3128 { 3129 int i; 3130 size_t alen = 0; 3131 uint_t cookie_cnt; 3132 3133 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr, 3134 DDI_DMA_SLEEP, NULL, &obj->dma_handle); 3135 if (i != DDI_SUCCESS) { 3136 3137 switch (i) { 3138 case DDI_DMA_BADATTR : 3139 con_log(CL_ANN, (CE_WARN, 3140 "Failed ddi_dma_alloc_handle- Bad atrib")); 3141 break; 3142 case DDI_DMA_NORESOURCES : 3143 con_log(CL_ANN, (CE_WARN, 3144 "Failed ddi_dma_alloc_handle- No Resources")); 3145 break; 3146 default : 3147 con_log(CL_ANN, (CE_WARN, 3148 "Failed ddi_dma_alloc_handle :unknown %d", i)); 3149 break; 3150 } 3151 3152 return (-1); 3153 } 3154 3155 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &endian_attr, 3156 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 3157 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) || 3158 alen < obj->size) { 3159 3160 ddi_dma_free_handle(&obj->dma_handle); 3161 3162 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc")); 3163 3164 return (-1); 3165 } 3166 3167 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer, 3168 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 3169 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) { 3170 3171 ddi_dma_mem_free(&obj->acc_handle); 3172 ddi_dma_free_handle(&obj->dma_handle); 3173 3174 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle")); 3175 3176 return (-1); 3177 } 3178 3179 return (cookie_cnt); 3180 } 3181 3182 /* 3183 * mega_free_dma_obj(dma_obj_t) 3184 * 3185 * De-allocate the memory and other resources for an dma object, which must 3186 * have been alloated by a previous call to mega_alloc_dma_obj() 3187 */ 3188 static void 3189 mega_free_dma_obj(dma_obj_t obj) 3190 { 3191 (void) ddi_dma_unbind_handle(obj.dma_handle); 3192 ddi_dma_mem_free(&obj.acc_handle); 3193 ddi_dma_free_handle(&obj.dma_handle); 3194 } 3195 3196 /* 3197 * megasas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *, 3198 * int, int (*)()) 3199 * 3200 * Allocate dma resources for a new scsi command 3201 */ 3202 static int 3203 megasas_dma_alloc(struct megasas_instance *instance, struct scsi_pkt *pkt, 3204 struct buf *bp, int flags, int (*callback)()) 3205 { 3206 int dma_flags; 3207 int (*cb)(caddr_t); 3208 int i; 3209 3210 ddi_dma_attr_t tmp_dma_attr = megasas_generic_dma_attr; 3211 struct scsa_cmd *acmd = PKT2CMD(pkt); 3212 3213 acmd->cmd_buf = bp; 3214 3215 if (bp->b_flags & B_READ) { 3216 acmd->cmd_flags &= ~CFLAG_DMASEND; 3217 dma_flags = DDI_DMA_READ; 3218 } else { 3219 acmd->cmd_flags |= CFLAG_DMASEND; 3220 dma_flags = DDI_DMA_WRITE; 3221 } 3222 3223 if (flags & PKT_CONSISTENT) { 3224 acmd->cmd_flags |= CFLAG_CONSISTENT; 3225 dma_flags |= DDI_DMA_CONSISTENT; 3226 } 3227 3228 if (flags & PKT_DMA_PARTIAL) { 3229 dma_flags |= DDI_DMA_PARTIAL; 3230 } 3231 3232 dma_flags |= DDI_DMA_REDZONE; 3233 3234 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP; 3235 3236 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge; 3237 3238 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr, 3239 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) { 3240 switch (i) { 3241 case DDI_DMA_BADATTR: 3242 bioerror(bp, EFAULT); 3243 return (-1); 3244 3245 case DDI_DMA_NORESOURCES: 3246 bioerror(bp, 0); 3247 return (-1); 3248 3249 default: 3250 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: " 3251 "0x%x impossible\n", i)); 3252 /* NOTREACHED */ 3253 break; 3254 } 3255 } 3256 3257 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags, 3258 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies); 3259 3260 switch (i) { 3261 case DDI_DMA_PARTIAL_MAP: 3262 if ((dma_flags & DDI_DMA_PARTIAL) == 0) { 3263 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3264 "DDI_DMA_PARTIAL_MAP impossible\n")); 3265 /* NOTREACHED */ 3266 } 3267 3268 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) == 3269 DDI_FAILURE) { 3270 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed\n")); 3271 /* NOTREACHED */ 3272 } 3273 3274 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3275 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3276 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3277 DDI_FAILURE) { 3278 3279 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed\n")); 3280 /* NOTREACHED */ 3281 } 3282 3283 goto get_dma_cookies; 3284 case DDI_DMA_MAPPED: 3285 acmd->cmd_nwin = 1; 3286 acmd->cmd_dma_len = 0; 3287 acmd->cmd_dma_offset = 0; 3288 3289 get_dma_cookies: 3290 i = 0; 3291 acmd->cmd_dmacount = 0; 3292 for (;;) { 3293 acmd->cmd_dmacount += 3294 acmd->cmd_dmacookies[i++].dmac_size; 3295 3296 if (i == instance->max_num_sge || 3297 i == acmd->cmd_ncookies) 3298 break; 3299 3300 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3301 &acmd->cmd_dmacookies[i]); 3302 } 3303 3304 acmd->cmd_cookie = i; 3305 acmd->cmd_cookiecnt = i; 3306 3307 acmd->cmd_flags |= CFLAG_DMAVALID; 3308 3309 if (bp->b_bcount >= acmd->cmd_dmacount) { 3310 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3311 } else { 3312 pkt->pkt_resid = 0; 3313 } 3314 3315 return (0); 3316 case DDI_DMA_NORESOURCES: 3317 bioerror(bp, 0); 3318 break; 3319 case DDI_DMA_NOMAPPING: 3320 bioerror(bp, EFAULT); 3321 break; 3322 case DDI_DMA_TOOBIG: 3323 bioerror(bp, EINVAL); 3324 break; 3325 case DDI_DMA_INUSE: 3326 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:" 3327 " DDI_DMA_INUSE impossible\n")); 3328 /* NOTREACHED */ 3329 break; 3330 default: 3331 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 3332 "0x%x impossible\n", i)); 3333 /* NOTREACHED */ 3334 break; 3335 } 3336 3337 ddi_dma_free_handle(&acmd->cmd_dmahandle); 3338 acmd->cmd_dmahandle = NULL; 3339 acmd->cmd_flags &= ~CFLAG_DMAVALID; 3340 return (-1); 3341 } 3342 3343 /* 3344 * megasas_dma_move(struct megasas_instance *, struct scsi_pkt *, struct buf *) 3345 * 3346 * move dma resources to next dma window 3347 * 3348 */ 3349 static int 3350 megasas_dma_move(struct megasas_instance *instance, struct scsi_pkt *pkt, 3351 struct buf *bp) 3352 { 3353 int i = 0; 3354 3355 struct scsa_cmd *acmd = PKT2CMD(pkt); 3356 3357 /* 3358 * If there are no more cookies remaining in this window, 3359 * must move to the next window first. 3360 */ 3361 if (acmd->cmd_cookie == acmd->cmd_ncookies) { 3362 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) { 3363 return (0); 3364 } 3365 3366 /* at last window, cannot move */ 3367 if (++acmd->cmd_curwin >= acmd->cmd_nwin) { 3368 return (-1); 3369 } 3370 3371 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 3372 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 3373 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 3374 DDI_FAILURE) { 3375 return (-1); 3376 } 3377 3378 acmd->cmd_cookie = 0; 3379 } else { 3380 /* still more cookies in this window - get the next one */ 3381 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3382 &acmd->cmd_dmacookies[0]); 3383 } 3384 3385 /* get remaining cookies in this window, up to our maximum */ 3386 for (;;) { 3387 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size; 3388 acmd->cmd_cookie++; 3389 3390 if (i == instance->max_num_sge || 3391 acmd->cmd_cookie == acmd->cmd_ncookies) { 3392 break; 3393 } 3394 3395 ddi_dma_nextcookie(acmd->cmd_dmahandle, 3396 &acmd->cmd_dmacookies[i]); 3397 } 3398 3399 acmd->cmd_cookiecnt = i; 3400 3401 if (bp->b_bcount >= acmd->cmd_dmacount) { 3402 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 3403 } else { 3404 pkt->pkt_resid = 0; 3405 } 3406 3407 return (0); 3408 } 3409 3410 /* 3411 * build_cmd 3412 */ 3413 static struct megasas_cmd * 3414 build_cmd(struct megasas_instance *instance, struct scsi_address *ap, 3415 struct scsi_pkt *pkt, uchar_t *cmd_done) 3416 { 3417 uint16_t flags = 0; 3418 uint32_t i; 3419 uint32_t context; 3420 uint32_t sge_bytes; 3421 3422 struct megasas_cmd *cmd; 3423 struct megasas_sge32 *mfi_sgl; 3424 struct scsa_cmd *acmd = PKT2CMD(pkt); 3425 struct megasas_pthru_frame *pthru; 3426 struct megasas_io_frame *ldio; 3427 3428 /* find out if this is logical or physical drive command. */ 3429 acmd->islogical = MEGADRV_IS_LOGICAL(ap); 3430 acmd->device_id = MAP_DEVICE_ID(instance, ap); 3431 3432 /* get the command packet */ 3433 if (!(cmd = get_mfi_pkt(instance))) { 3434 return (NULL); 3435 } 3436 3437 cmd->pkt = pkt; 3438 cmd->cmd = acmd; 3439 3440 /* lets get the command directions */ 3441 if (acmd->cmd_flags & CFLAG_DMASEND) { 3442 flags = MFI_FRAME_DIR_WRITE; 3443 3444 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3445 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3446 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3447 DDI_DMA_SYNC_FORDEV); 3448 } 3449 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) { 3450 flags = MFI_FRAME_DIR_READ; 3451 3452 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 3453 (void) ddi_dma_sync(acmd->cmd_dmahandle, 3454 acmd->cmd_dma_offset, acmd->cmd_dma_len, 3455 DDI_DMA_SYNC_FORCPU); 3456 } 3457 } else { 3458 flags = MFI_FRAME_DIR_NONE; 3459 } 3460 3461 /* flags |= MFI_FRAME_SGL64; */ 3462 3463 switch (pkt->pkt_cdbp[0]) { 3464 /* Mode sense */ 3465 case 0x15 : /* mode select(6) */ 3466 case 0x55 : /* mode select(10) */ 3467 case 0x1a : /* mode sense(6) */ 3468 case 0x5a : /* mode sense(10) */ 3469 case 0x5e : /* ??? */ 3470 case 0x4d : /* log sense */ 3471 case 0x35 : /* Synchronize Cache */ 3472 return_mfi_pkt(instance, cmd); 3473 *cmd_done = 1; 3474 3475 return (NULL); 3476 case SCMD_READ: 3477 case SCMD_WRITE: 3478 case SCMD_READ_G1: 3479 case SCMD_WRITE_G1: 3480 if (acmd->islogical) { 3481 ldio = (struct megasas_io_frame *)cmd->frame; 3482 3483 /* 3484 * preare the Logical IO frame: 3485 * 2nd bit is zero for all read cmds 3486 */ 3487 ldio->cmd = (pkt->pkt_cdbp[0] & 0x02) ? 3488 MFI_CMD_OP_LD_WRITE : MFI_CMD_OP_LD_READ; 3489 ldio->cmd_status = 0x0; 3490 ldio->scsi_status = 0x0; 3491 ldio->target_id = acmd->device_id; 3492 ldio->timeout = 0; 3493 ldio->reserved_0 = 0; 3494 ldio->pad_0 = 0; 3495 ldio->flags = flags; 3496 ldio->start_lba_hi = 0; 3497 ldio->access_byte = (acmd->cmd_cdblen != 6) ? 3498 pkt->pkt_cdbp[1] : 0; 3499 ldio->sge_count = acmd->cmd_cookiecnt; 3500 mfi_sgl = (struct megasas_sge32 *)&ldio->sgl; 3501 3502 context = ldio->context; 3503 3504 if (acmd->cmd_cdblen == CDB_GROUP0) { 3505 ldio->lba_count = host_to_le16( 3506 (uint16_t)(pkt->pkt_cdbp[4])); 3507 3508 ldio->start_lba_lo = host_to_le32( 3509 ((uint32_t)(pkt->pkt_cdbp[3])) | 3510 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) | 3511 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) 3512 << 16)); 3513 } else if (acmd->cmd_cdblen == CDB_GROUP1) { 3514 ldio->lba_count = host_to_le16( 3515 ((uint16_t)(pkt->pkt_cdbp[8])) | 3516 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)); 3517 3518 ldio->start_lba_lo = host_to_le32( 3519 ((uint32_t)(pkt->pkt_cdbp[5])) | 3520 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3521 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3522 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); 3523 } else if (acmd->cmd_cdblen == CDB_GROUP2) { 3524 ldio->lba_count = host_to_le16( 3525 ((uint16_t)(pkt->pkt_cdbp[9])) | 3526 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) | 3527 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) | 3528 ((uint16_t)(pkt->pkt_cdbp[6]) << 24)); 3529 3530 ldio->start_lba_lo = host_to_le32( 3531 ((uint32_t)(pkt->pkt_cdbp[5])) | 3532 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3533 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3534 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); 3535 } else if (acmd->cmd_cdblen == CDB_GROUP3) { 3536 ldio->lba_count = host_to_le16( 3537 ((uint16_t)(pkt->pkt_cdbp[13])) | 3538 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) | 3539 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) | 3540 ((uint16_t)(pkt->pkt_cdbp[10]) << 24)); 3541 3542 ldio->start_lba_lo = host_to_le32( 3543 ((uint32_t)(pkt->pkt_cdbp[9])) | 3544 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | 3545 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | 3546 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)); 3547 3548 ldio->start_lba_lo = host_to_le32( 3549 ((uint32_t)(pkt->pkt_cdbp[5])) | 3550 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 3551 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 3552 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)); 3553 } 3554 3555 break; 3556 } 3557 /* fall through For all non-rd/wr cmds */ 3558 default: 3559 pthru = (struct megasas_pthru_frame *)cmd->frame; 3560 3561 /* prepare the DCDB frame */ 3562 pthru->cmd = (acmd->islogical) ? 3563 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI; 3564 pthru->cmd_status = 0x0; 3565 pthru->scsi_status = 0x0; 3566 pthru->target_id = acmd->device_id; 3567 pthru->lun = 0; 3568 pthru->cdb_len = acmd->cmd_cdblen; 3569 pthru->timeout = 0; 3570 pthru->flags = flags; 3571 pthru->data_xfer_len = acmd->cmd_dmacount; 3572 pthru->sge_count = acmd->cmd_cookiecnt; 3573 mfi_sgl = (struct megasas_sge32 *)&pthru->sgl; 3574 /* pthru->sense_len = NUM_SENSE_KEYS; */ 3575 pthru->sense_len = 0; 3576 pthru->sense_buf_phys_addr_hi = 0; 3577 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */ 3578 pthru->sense_buf_phys_addr_lo = 0; 3579 3580 context = pthru->context; 3581 3582 bcopy(pkt->pkt_cdbp, pthru->cdb, acmd->cmd_cdblen); 3583 3584 break; 3585 } 3586 #ifdef lint 3587 context = context; 3588 #endif 3589 /* bzero(mfi_sgl, sizeof (struct megasas_sge64) * MAX_SGL); */ 3590 3591 /* prepare the scatter-gather list for the firmware */ 3592 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) { 3593 mfi_sgl->phys_addr = acmd->cmd_dmacookies[i].dmac_laddress; 3594 mfi_sgl->length = acmd->cmd_dmacookies[i].dmac_size; 3595 } 3596 3597 sge_bytes = sizeof (struct megasas_sge32)*acmd->cmd_cookiecnt; 3598 3599 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 3600 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1; 3601 3602 if (cmd->frame_count >= 8) { 3603 cmd->frame_count = 8; 3604 } 3605 3606 return (cmd); 3607 } 3608 3609 /* 3610 * wait_for_outstanding - Wait for all outstanding cmds 3611 * @instance: Adapter soft state 3612 * 3613 * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to 3614 * complete all its outstanding commands. Returns error if one or more IOs 3615 * are pending after this time period. 3616 */ 3617 static int 3618 wait_for_outstanding(struct megasas_instance *instance) 3619 { 3620 int i; 3621 uint32_t wait_time = 90; 3622 3623 for (i = 0; i < wait_time; i++) { 3624 if (!instance->fw_outstanding) { 3625 break; 3626 } 3627 3628 drv_usecwait(1000); /* wait for 1000 usecs */; 3629 } 3630 3631 if (instance->fw_outstanding) { 3632 return (1); 3633 } 3634 3635 return (0); 3636 } 3637 3638 /* 3639 * issue_mfi_pthru 3640 */ 3641 static int 3642 issue_mfi_pthru(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 3643 struct megasas_cmd *cmd, int mode) 3644 { 3645 void *ubuf; 3646 uint32_t kphys_addr = 0; 3647 uint32_t xferlen = 0; 3648 uint_t model; 3649 3650 dma_obj_t pthru_dma_obj; 3651 struct megasas_pthru_frame *kpthru; 3652 struct megasas_pthru_frame *pthru; 3653 3654 pthru = &cmd->frame->pthru; 3655 kpthru = (struct megasas_pthru_frame *)&ioctl->frame[0]; 3656 3657 model = ddi_model_convert_from(mode & FMODELS); 3658 if (model == DDI_MODEL_ILP32) { 3659 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3660 3661 xferlen = kpthru->sgl.sge32[0].length; 3662 3663 /* SJ! - ubuf needs to be virtual address. */ 3664 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3665 } else { 3666 #ifdef _ILP32 3667 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32")); 3668 xferlen = kpthru->sgl.sge32[0].length; 3669 /* SJ! - ubuf needs to be virtual address. */ 3670 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 3671 #else 3672 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64")); 3673 xferlen = kpthru->sgl.sge64[0].length; 3674 /* SJ! - ubuf needs to be virtual address. */ 3675 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr; 3676 #endif 3677 } 3678 3679 if (xferlen) { 3680 /* means IOCTL requires DMA */ 3681 /* allocate the data transfer buffer */ 3682 pthru_dma_obj.size = xferlen; 3683 pthru_dma_obj.dma_attr = megasas_generic_dma_attr; 3684 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 3685 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 3686 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1; 3687 pthru_dma_obj.dma_attr.dma_attr_align = 1; 3688 3689 /* allocate kernel buffer for DMA */ 3690 if (mega_alloc_dma_obj(instance, &pthru_dma_obj) != 1) { 3691 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3692 "could not data transfer buffer alloc.")); 3693 return (DDI_FAILURE); 3694 } 3695 3696 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3697 if (kpthru->flags & MFI_FRAME_DIR_WRITE) { 3698 if (ddi_copyin(ubuf, (void *)pthru_dma_obj.buffer, 3699 xferlen, mode)) { 3700 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3701 "copy from user space failed\n")); 3702 return (1); 3703 } 3704 } 3705 3706 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address; 3707 } 3708 3709 pthru->cmd = kpthru->cmd; 3710 pthru->sense_len = kpthru->sense_len; 3711 pthru->cmd_status = kpthru->cmd_status; 3712 pthru->scsi_status = kpthru->scsi_status; 3713 pthru->target_id = kpthru->target_id; 3714 pthru->lun = kpthru->lun; 3715 pthru->cdb_len = kpthru->cdb_len; 3716 pthru->sge_count = kpthru->sge_count; 3717 pthru->timeout = kpthru->timeout; 3718 pthru->data_xfer_len = kpthru->data_xfer_len; 3719 3720 pthru->sense_buf_phys_addr_hi = 0; 3721 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */ 3722 pthru->sense_buf_phys_addr_lo = 0; 3723 3724 bcopy((void *)kpthru->cdb, (void *)pthru->cdb, pthru->cdb_len); 3725 3726 pthru->flags = kpthru->flags & ~MFI_FRAME_SGL64; 3727 pthru->sgl.sge32[0].length = xferlen; 3728 pthru->sgl.sge32[0].phys_addr = kphys_addr; 3729 3730 cmd->sync_cmd = MEGASAS_TRUE; 3731 cmd->frame_count = 1; 3732 3733 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3734 con_log(CL_ANN, (CE_WARN, 3735 "issue_mfi_pthru: fw_ioctl failed\n")); 3736 } else { 3737 if (xferlen && (kpthru->flags & MFI_FRAME_DIR_READ)) { 3738 /* 3739 * con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3740 * "copy to user space\n")); 3741 */ 3742 3743 if (ddi_copyout(pthru_dma_obj.buffer, ubuf, 3744 xferlen, mode)) { 3745 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 3746 "copy to user space failed\n")); 3747 return (1); 3748 } 3749 } 3750 } 3751 3752 kpthru->cmd_status = pthru->cmd_status; 3753 kpthru->scsi_status = pthru->scsi_status; 3754 3755 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, " 3756 "scsi_status %x\n", pthru->cmd_status, pthru->scsi_status)); 3757 3758 if (xferlen) { 3759 /* free kernel buffer */ 3760 mega_free_dma_obj(pthru_dma_obj); 3761 } 3762 3763 return (0); 3764 } 3765 3766 /* 3767 * issue_mfi_dcmd 3768 */ 3769 static int 3770 issue_mfi_dcmd(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 3771 struct megasas_cmd *cmd, int mode) 3772 { 3773 void *ubuf; 3774 uint32_t kphys_addr = 0; 3775 uint32_t xferlen = 0; 3776 uint32_t model; 3777 dma_obj_t dcmd_dma_obj; 3778 struct megasas_dcmd_frame *kdcmd; 3779 struct megasas_dcmd_frame *dcmd; 3780 3781 dcmd = &cmd->frame->dcmd; 3782 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0]; 3783 3784 model = ddi_model_convert_from(mode & FMODELS); 3785 if (model == DDI_MODEL_ILP32) { 3786 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3787 3788 xferlen = kdcmd->sgl.sge32[0].length; 3789 3790 /* SJ! - ubuf needs to be virtual address. */ 3791 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3792 } 3793 else 3794 { 3795 #ifdef _ILP32 3796 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 3797 xferlen = kdcmd->sgl.sge32[0].length; 3798 /* SJ! - ubuf needs to be virtual address. */ 3799 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 3800 #else 3801 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64")); 3802 xferlen = kdcmd->sgl.sge64[0].length; 3803 /* SJ! - ubuf needs to be virtual address. */ 3804 ubuf = (void *)(ulong_t)dcmd->sgl.sge64[0].phys_addr; 3805 #endif 3806 } 3807 if (xferlen) { 3808 /* means IOCTL requires DMA */ 3809 /* allocate the data transfer buffer */ 3810 dcmd_dma_obj.size = xferlen; 3811 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr; 3812 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 3813 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 3814 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 3815 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 3816 3817 /* allocate kernel buffer for DMA */ 3818 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) { 3819 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 3820 "could not data transfer buffer alloc.")); 3821 return (DDI_FAILURE); 3822 } 3823 3824 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3825 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) { 3826 if (ddi_copyin(ubuf, (void *)dcmd_dma_obj.buffer, 3827 xferlen, mode)) { 3828 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 3829 "copy from user space failed\n")); 3830 return (1); 3831 } 3832 } 3833 3834 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 3835 } 3836 3837 dcmd->cmd = kdcmd->cmd; 3838 dcmd->cmd_status = kdcmd->cmd_status; 3839 dcmd->sge_count = kdcmd->sge_count; 3840 dcmd->timeout = kdcmd->timeout; 3841 dcmd->data_xfer_len = kdcmd->data_xfer_len; 3842 dcmd->opcode = kdcmd->opcode; 3843 3844 bcopy((void *)kdcmd->mbox.b, (void *)dcmd->mbox.b, 12); 3845 3846 dcmd->flags = kdcmd->flags & ~MFI_FRAME_SGL64; 3847 dcmd->sgl.sge32[0].length = xferlen; 3848 dcmd->sgl.sge32[0].phys_addr = kphys_addr; 3849 3850 cmd->sync_cmd = MEGASAS_TRUE; 3851 cmd->frame_count = 1; 3852 3853 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 3854 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed\n")); 3855 } else { 3856 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) { 3857 /* 3858 * con_log(CL_ANN, (CE_WARN,"issue_mfi_dcmd: " 3859 * copy to user space\n")); 3860 */ 3861 3862 if (ddi_copyout(dcmd_dma_obj.buffer, ubuf, 3863 xferlen, mode)) { 3864 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: " 3865 "copy to user space failed\n")); 3866 return (1); 3867 } 3868 } 3869 } 3870 3871 kdcmd->cmd_status = dcmd->cmd_status; 3872 3873 if (xferlen) { 3874 /* free kernel buffer */ 3875 mega_free_dma_obj(dcmd_dma_obj); 3876 } 3877 3878 return (0); 3879 } 3880 3881 /* 3882 * issue_mfi_smp 3883 */ 3884 static int 3885 issue_mfi_smp(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 3886 struct megasas_cmd *cmd, int mode) 3887 { 3888 void *request_ubuf; 3889 void *response_ubuf; 3890 uint32_t request_xferlen = 0; 3891 uint32_t response_xferlen = 0; 3892 uint_t model; 3893 dma_obj_t request_dma_obj; 3894 dma_obj_t response_dma_obj; 3895 struct megasas_smp_frame *ksmp; 3896 struct megasas_smp_frame *smp; 3897 struct megasas_sge32 *sge32; 3898 #ifndef _ILP32 3899 struct megasas_sge64 *sge64; 3900 #endif 3901 3902 smp = &cmd->frame->smp; 3903 ksmp = (struct megasas_smp_frame *)&ioctl->frame[0]; 3904 3905 model = ddi_model_convert_from(mode & FMODELS); 3906 if (model == DDI_MODEL_ILP32) { 3907 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 3908 3909 sge32 = &ksmp->sgl[0].sge32[0]; 3910 response_xferlen = sge32[0].length; 3911 request_xferlen = sge32[1].length; 3912 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 3913 "response_xferlen = %x, request_xferlen = %x", 3914 response_xferlen, request_xferlen)); 3915 3916 /* SJ! - ubuf needs to be virtual address. */ 3917 3918 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 3919 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 3920 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3921 "response_ubuf = %p, request_ubuf = %p", 3922 response_ubuf, request_ubuf)); 3923 } else { 3924 #ifdef _ILP32 3925 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32")); 3926 3927 sge32 = &ksmp->sgl[0].sge32[0]; 3928 response_xferlen = sge32[0].length; 3929 request_xferlen = sge32[1].length; 3930 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: " 3931 "response_xferlen = %x, request_xferlen = %x", 3932 response_xferlen, request_xferlen)); 3933 3934 /* SJ! - ubuf needs to be virtual address. */ 3935 3936 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 3937 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 3938 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 3939 "response_ubuf = %p, request_ubuf = %p", 3940 response_ubuf, request_ubuf)); 3941 #else 3942 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64")); 3943 3944 sge64 = &ksmp->sgl[0].sge64[0]; 3945 response_xferlen = sge64[0].length; 3946 request_xferlen = sge64[1].length; 3947 3948 /* SJ! - ubuf needs to be virtual address. */ 3949 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr; 3950 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr; 3951 #endif 3952 } 3953 if (request_xferlen) { 3954 /* means IOCTL requires DMA */ 3955 /* allocate the data transfer buffer */ 3956 request_dma_obj.size = request_xferlen; 3957 request_dma_obj.dma_attr = megasas_generic_dma_attr; 3958 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 3959 request_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 3960 request_dma_obj.dma_attr.dma_attr_sgllen = 1; 3961 request_dma_obj.dma_attr.dma_attr_align = 1; 3962 3963 /* allocate kernel buffer for DMA */ 3964 if (mega_alloc_dma_obj(instance, &request_dma_obj) != 1) { 3965 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3966 "could not data transfer buffer alloc.")); 3967 return (DDI_FAILURE); 3968 } 3969 3970 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3971 if (ddi_copyin(request_ubuf, (void *) request_dma_obj.buffer, 3972 request_xferlen, mode)) { 3973 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3974 "copy from user space failed\n")); 3975 return (1); 3976 } 3977 } 3978 3979 if (response_xferlen) { 3980 /* means IOCTL requires DMA */ 3981 /* allocate the data transfer buffer */ 3982 response_dma_obj.size = response_xferlen; 3983 response_dma_obj.dma_attr = megasas_generic_dma_attr; 3984 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 3985 response_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 3986 response_dma_obj.dma_attr.dma_attr_sgllen = 1; 3987 response_dma_obj.dma_attr.dma_attr_align = 1; 3988 3989 /* allocate kernel buffer for DMA */ 3990 if (mega_alloc_dma_obj(instance, &response_dma_obj) != 1) { 3991 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 3992 "could not data transfer buffer alloc.")); 3993 return (DDI_FAILURE); 3994 } 3995 3996 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 3997 if (ddi_copyin(response_ubuf, (void *) response_dma_obj.buffer, 3998 response_xferlen, mode)) { 3999 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4000 "copy from user space failed\n")); 4001 return (1); 4002 } 4003 } 4004 4005 smp->cmd = ksmp->cmd; 4006 smp->cmd_status = ksmp->cmd_status; 4007 smp->connection_status = ksmp->connection_status; 4008 smp->sge_count = ksmp->sge_count; 4009 /* smp->context = ksmp->context; */ 4010 smp->timeout = ksmp->timeout; 4011 smp->data_xfer_len = ksmp->data_xfer_len; 4012 4013 bcopy((void *)&ksmp->sas_addr, (void *)&smp->sas_addr, 4014 sizeof (uint64_t)); 4015 4016 smp->flags = ksmp->flags & ~MFI_FRAME_SGL64; 4017 4018 model = ddi_model_convert_from(mode & FMODELS); 4019 if (model == DDI_MODEL_ILP32) { 4020 con_log(CL_ANN1, (CE_NOTE, 4021 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4022 4023 sge32 = &smp->sgl[0].sge32[0]; 4024 sge32[0].length = response_xferlen; 4025 sge32[0].phys_addr = 4026 response_dma_obj.dma_cookie[0].dmac_address; 4027 sge32[1].length = request_xferlen; 4028 sge32[1].phys_addr = 4029 request_dma_obj.dma_cookie[0].dmac_address; 4030 } else { 4031 #ifdef _ILP32 4032 con_log(CL_ANN1, (CE_NOTE, 4033 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4034 sge32 = &smp->sgl[0].sge32[0]; 4035 sge32[0].length = response_xferlen; 4036 sge32[0].phys_addr = 4037 response_dma_obj.dma_cookie[0].dmac_address; 4038 sge32[1].length = request_xferlen; 4039 sge32[1].phys_addr = 4040 request_dma_obj.dma_cookie[0].dmac_address; 4041 #else 4042 con_log(CL_ANN1, (CE_NOTE, 4043 "issue_mfi_smp: DDI_MODEL_LP64")); 4044 sge64 = &smp->sgl[0].sge64[0]; 4045 sge64[0].length = response_xferlen; 4046 sge64[0].phys_addr = 4047 response_dma_obj.dma_cookie[0].dmac_address; 4048 sge64[1].length = request_xferlen; 4049 sge64[1].phys_addr = 4050 request_dma_obj.dma_cookie[0].dmac_address; 4051 #endif 4052 } 4053 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: " 4054 "smp->response_xferlen = %d, smp->request_xferlen = %d " 4055 "smp->data_xfer_len = %d", sge32[0].length, sge32[1].length, 4056 smp->data_xfer_len)); 4057 4058 cmd->sync_cmd = MEGASAS_TRUE; 4059 cmd->frame_count = 1; 4060 4061 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4062 con_log(CL_ANN, (CE_WARN, 4063 "issue_mfi_smp: fw_ioctl failed\n")); 4064 } else { 4065 con_log(CL_ANN1, (CE_NOTE, 4066 "issue_mfi_smp: copy to user space\n")); 4067 4068 if (request_xferlen) { 4069 if (ddi_copyout(request_dma_obj.buffer, request_ubuf, 4070 request_xferlen, mode)) { 4071 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4072 "copy to user space failed\n")); 4073 return (1); 4074 } 4075 } 4076 4077 if (response_xferlen) { 4078 if (ddi_copyout(response_dma_obj.buffer, response_ubuf, 4079 response_xferlen, mode)) { 4080 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 4081 "copy to user space failed\n")); 4082 return (1); 4083 } 4084 } 4085 } 4086 4087 ksmp->cmd_status = smp->cmd_status; 4088 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d", 4089 smp->cmd_status)); 4090 4091 4092 if (request_xferlen) { 4093 /* free kernel buffer */ 4094 mega_free_dma_obj(request_dma_obj); 4095 } 4096 4097 if (response_xferlen) { 4098 /* free kernel buffer */ 4099 mega_free_dma_obj(response_dma_obj); 4100 } 4101 4102 return (0); 4103 } 4104 4105 /* 4106 * issue_mfi_stp 4107 */ 4108 static int 4109 issue_mfi_stp(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 4110 struct megasas_cmd *cmd, int mode) 4111 { 4112 void *fis_ubuf; 4113 void *data_ubuf; 4114 uint32_t fis_xferlen = 0; 4115 uint32_t data_xferlen = 0; 4116 uint_t model; 4117 dma_obj_t fis_dma_obj; 4118 dma_obj_t data_dma_obj; 4119 struct megasas_stp_frame *kstp; 4120 struct megasas_stp_frame *stp; 4121 4122 stp = &cmd->frame->stp; 4123 kstp = (struct megasas_stp_frame *)&ioctl->frame[0]; 4124 4125 model = ddi_model_convert_from(mode & FMODELS); 4126 if (model == DDI_MODEL_ILP32) { 4127 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4128 4129 fis_xferlen = kstp->sgl.sge32[0].length; 4130 data_xferlen = kstp->sgl.sge32[1].length; 4131 4132 /* SJ! - ubuf needs to be virtual address. */ 4133 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4134 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4135 } 4136 else 4137 { 4138 #ifdef _ILP32 4139 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32")); 4140 4141 fis_xferlen = kstp->sgl.sge32[0].length; 4142 data_xferlen = kstp->sgl.sge32[1].length; 4143 4144 /* SJ! - ubuf needs to be virtual address. */ 4145 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 4146 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 4147 #else 4148 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64")); 4149 4150 fis_xferlen = kstp->sgl.sge64[0].length; 4151 data_xferlen = kstp->sgl.sge64[1].length; 4152 4153 /* SJ! - ubuf needs to be virtual address. */ 4154 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr; 4155 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr; 4156 #endif 4157 } 4158 4159 4160 if (fis_xferlen) { 4161 #ifdef DEBUG 4162 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: " 4163 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen)); 4164 #endif 4165 /* means IOCTL requires DMA */ 4166 /* allocate the data transfer buffer */ 4167 fis_dma_obj.size = fis_xferlen; 4168 fis_dma_obj.dma_attr = megasas_generic_dma_attr; 4169 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 4170 fis_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 4171 fis_dma_obj.dma_attr.dma_attr_sgllen = 1; 4172 fis_dma_obj.dma_attr.dma_attr_align = 1; 4173 4174 /* allocate kernel buffer for DMA */ 4175 if (mega_alloc_dma_obj(instance, &fis_dma_obj) != 1) { 4176 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4177 "could not data transfer buffer alloc.")); 4178 return (DDI_FAILURE); 4179 } 4180 4181 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4182 if (ddi_copyin(fis_ubuf, (void *)fis_dma_obj.buffer, 4183 fis_xferlen, mode)) { 4184 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4185 "copy from user space failed\n")); 4186 return (1); 4187 } 4188 } 4189 4190 if (data_xferlen) { 4191 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p " 4192 "data_xferlen = %x", data_ubuf, data_xferlen)); 4193 4194 /* means IOCTL requires DMA */ 4195 /* allocate the data transfer buffer */ 4196 data_dma_obj.size = data_xferlen; 4197 data_dma_obj.dma_attr = megasas_generic_dma_attr; 4198 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff; 4199 data_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff; 4200 data_dma_obj.dma_attr.dma_attr_sgllen = 1; 4201 data_dma_obj.dma_attr.dma_attr_align = 1; 4202 4203 /* allocate kernel buffer for DMA */ 4204 if (mega_alloc_dma_obj(instance, &data_dma_obj) != 1) { 4205 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4206 "could not data transfer buffer alloc.")); 4207 return (DDI_FAILURE); 4208 } 4209 4210 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 4211 if (ddi_copyin(data_ubuf, (void *) data_dma_obj.buffer, 4212 data_xferlen, mode)) { 4213 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4214 "copy from user space failed\n")); 4215 return (1); 4216 } 4217 } 4218 4219 stp->cmd = kstp->cmd; 4220 stp->cmd_status = kstp->cmd_status; 4221 stp->connection_status = kstp->connection_status; 4222 stp->target_id = kstp->target_id; 4223 stp->sge_count = kstp->sge_count; 4224 /* stp->context = kstp->context; */ 4225 stp->timeout = kstp->timeout; 4226 stp->data_xfer_len = kstp->data_xfer_len; 4227 4228 bcopy((void *)kstp->fis, (void *)stp->fis, 10); 4229 4230 stp->flags = kstp->flags & ~MFI_FRAME_SGL64; 4231 stp->stp_flags = kstp->stp_flags; 4232 stp->sgl.sge32[0].length = fis_xferlen; 4233 stp->sgl.sge32[0].phys_addr = fis_dma_obj.dma_cookie[0].dmac_address; 4234 stp->sgl.sge32[1].length = data_xferlen; 4235 stp->sgl.sge32[1].phys_addr = data_dma_obj.dma_cookie[0].dmac_address; 4236 4237 cmd->sync_cmd = MEGASAS_TRUE; 4238 cmd->frame_count = 1; 4239 4240 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4241 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed\n")); 4242 } else { 4243 /* 4244 * con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4245 * "copy to user space\n")); 4246 */ 4247 4248 if (fis_xferlen) { 4249 if (ddi_copyout(fis_dma_obj.buffer, fis_ubuf, 4250 fis_xferlen, mode)) { 4251 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4252 "copy to user space failed\n")); 4253 return (1); 4254 } 4255 } 4256 4257 if (data_xferlen) { 4258 if (ddi_copyout(data_dma_obj.buffer, data_ubuf, 4259 data_xferlen, mode)) { 4260 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 4261 "copy to user space failed\n")); 4262 return (1); 4263 } 4264 } 4265 } 4266 4267 kstp->cmd_status = stp->cmd_status; 4268 4269 if (fis_xferlen) { 4270 /* free kernel buffer */ 4271 mega_free_dma_obj(fis_dma_obj); 4272 } 4273 4274 if (data_xferlen) { 4275 /* free kernel buffer */ 4276 mega_free_dma_obj(data_dma_obj); 4277 } 4278 4279 return (0); 4280 } 4281 4282 /* 4283 * fill_up_drv_ver 4284 */ 4285 static void 4286 fill_up_drv_ver(struct megasas_drv_ver *dv) 4287 { 4288 (void) memset(dv, 0, sizeof (struct megasas_drv_ver)); 4289 4290 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$")); 4291 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris")); 4292 (void) memcpy(dv->os_ver, "Build 36", strlen("Build 36")); 4293 (void) memcpy(dv->drv_name, "megaraid_sas", strlen("megaraid_sas")); 4294 (void) memcpy(dv->drv_ver, MEGASAS_VERSION, strlen(MEGASAS_VERSION)); 4295 (void) memcpy(dv->drv_rel_date, MEGASAS_RELDATE, 4296 strlen(MEGASAS_RELDATE)); 4297 } 4298 4299 /* 4300 * handle_drv_ioctl 4301 */ 4302 static int 4303 handle_drv_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 4304 int mode) 4305 { 4306 int i; 4307 int rval = 0; 4308 int *props = NULL; 4309 void *ubuf; 4310 4311 uint8_t *pci_conf_buf; 4312 uint32_t xferlen; 4313 uint32_t num_props; 4314 uint_t model; 4315 struct megasas_dcmd_frame *kdcmd; 4316 struct megasas_drv_ver dv; 4317 struct megasas_pci_information pi; 4318 4319 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0]; 4320 4321 model = ddi_model_convert_from(mode & FMODELS); 4322 if (model == DDI_MODEL_ILP32) { 4323 con_log(CL_ANN1, (CE_NOTE, 4324 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4325 4326 xferlen = kdcmd->sgl.sge32[0].length; 4327 4328 /* SJ! - ubuf needs to be virtual address. */ 4329 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4330 } else { 4331 #ifdef _ILP32 4332 con_log(CL_ANN1, (CE_NOTE, 4333 "handle_drv_ioctl: DDI_MODEL_ILP32")); 4334 xferlen = kdcmd->sgl.sge32[0].length; 4335 /* SJ! - ubuf needs to be virtual address. */ 4336 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 4337 #else 4338 con_log(CL_ANN1, (CE_NOTE, 4339 "handle_drv_ioctl: DDI_MODEL_LP64")); 4340 xferlen = kdcmd->sgl.sge64[0].length; 4341 /* SJ! - ubuf needs to be virtual address. */ 4342 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 4343 #endif 4344 } 4345 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4346 "dataBuf=%p size=%d bytes", ubuf, xferlen)); 4347 4348 switch (kdcmd->opcode) { 4349 case MR_DRIVER_IOCTL_DRIVER_VERSION: 4350 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4351 "MR_DRIVER_IOCTL_DRIVER_VERSION")); 4352 4353 fill_up_drv_ver(&dv); 4354 4355 if (ddi_copyout(&dv, ubuf, xferlen, mode)) { 4356 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4357 "MR_DRIVER_IOCTL_DRIVER_VERSION : " 4358 "copy to user space failed\n")); 4359 kdcmd->cmd_status = 1; 4360 rval = 1; 4361 } else { 4362 kdcmd->cmd_status = 0; 4363 } 4364 break; 4365 case MR_DRIVER_IOCTL_PCI_INFORMATION: 4366 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 4367 "MR_DRIVER_IOCTL_PCI_INFORMAITON")); 4368 4369 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip, 4370 0, "reg", &props, &num_props)) { 4371 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4372 "MR_DRIVER_IOCTL_PCI_INFORMATION : " 4373 "ddi_prop_look_int_array failed\n")); 4374 rval = 1; 4375 } else { 4376 4377 pi.busNumber = (props[0] >> 16) & 0xFF; 4378 pi.deviceNumber = (props[0] >> 11) & 0x1f; 4379 pi.functionNumber = (props[0] >> 8) & 0x7; 4380 ddi_prop_free((void *)props); 4381 } 4382 4383 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo; 4384 4385 for (i = 0; i < sizeof (struct megasas_pci_information); i++) { 4386 pci_conf_buf[i] = 4387 pci_config_get8(instance->pci_handle, i); 4388 } 4389 4390 if (ddi_copyout(&pi, ubuf, xferlen, mode)) { 4391 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4392 "MR_DRIVER_IOCTL_PCI_INFORMATION : " 4393 "copy to user space failed\n")); 4394 kdcmd->cmd_status = 1; 4395 rval = 1; 4396 } else { 4397 kdcmd->cmd_status = 0; 4398 } 4399 break; 4400 default: 4401 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 4402 "invalid driver specific IOCTL opcode = 0x%x", 4403 kdcmd->opcode)); 4404 kdcmd->cmd_status = 1; 4405 rval = 1; 4406 break; 4407 } 4408 4409 return (rval); 4410 } 4411 4412 /* 4413 * handle_mfi_ioctl 4414 */ 4415 static int 4416 handle_mfi_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl, 4417 int mode) 4418 { 4419 int rval = 0; 4420 4421 struct megasas_header *hdr; 4422 struct megasas_cmd *cmd; 4423 4424 cmd = get_mfi_pkt(instance); 4425 4426 if (!cmd) { 4427 con_log(CL_ANN, (CE_WARN, "megasas: " 4428 "failed to get a cmd packet\n")); 4429 return (1); 4430 } 4431 4432 hdr = (struct megasas_header *)&ioctl->frame[0]; 4433 4434 switch (hdr->cmd) { 4435 case MFI_CMD_OP_DCMD: 4436 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode); 4437 break; 4438 case MFI_CMD_OP_SMP: 4439 rval = issue_mfi_smp(instance, ioctl, cmd, mode); 4440 break; 4441 case MFI_CMD_OP_STP: 4442 rval = issue_mfi_stp(instance, ioctl, cmd, mode); 4443 break; 4444 case MFI_CMD_OP_LD_SCSI: 4445 case MFI_CMD_OP_PD_SCSI: 4446 rval = issue_mfi_pthru(instance, ioctl, cmd, mode); 4447 break; 4448 default: 4449 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: " 4450 "invalid mfi ioctl hdr->cmd = %d\n", hdr->cmd)); 4451 rval = 1; 4452 break; 4453 } 4454 4455 4456 return_mfi_pkt(instance, cmd); 4457 4458 return (rval); 4459 } 4460 4461 /* 4462 * AEN 4463 */ 4464 static int 4465 handle_mfi_aen(struct megasas_instance *instance, struct megasas_aen *aen) 4466 { 4467 int rval = 0; 4468 4469 rval = register_mfi_aen(instance, instance->aen_seq_num, 4470 aen->class_locale_word); 4471 4472 aen->cmd_status = (uint8_t)rval; 4473 4474 return (rval); 4475 } 4476 4477 static int 4478 register_mfi_aen(struct megasas_instance *instance, uint32_t seq_num, 4479 uint32_t class_locale_word) 4480 { 4481 int ret_val; 4482 4483 struct megasas_cmd *cmd; 4484 struct megasas_dcmd_frame *dcmd; 4485 union megasas_evt_class_locale curr_aen; 4486 union megasas_evt_class_locale prev_aen; 4487 4488 /* 4489 * If there an AEN pending already (aen_cmd), check if the 4490 * class_locale of that pending AEN is inclusive of the new 4491 * AEN request we currently have. If it is, then we don't have 4492 * to do anything. In other words, whichever events the current 4493 * AEN request is subscribing to, have already been subscribed 4494 * to. 4495 * 4496 * If the old_cmd is _not_ inclusive, then we have to abort 4497 * that command, form a class_locale that is superset of both 4498 * old and current and re-issue to the FW 4499 */ 4500 4501 curr_aen.word = class_locale_word; 4502 4503 if (instance->aen_cmd) { 4504 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; 4505 4506 /* 4507 * A class whose enum value is smaller is inclusive of all 4508 * higher values. If a PROGRESS (= -1) was previously 4509 * registered, then a new registration requests for higher 4510 * classes need not be sent to FW. They are automatically 4511 * included. 4512 * 4513 * Locale numbers don't have such hierarchy. They are bitmap 4514 * values 4515 */ 4516 if ((prev_aen.members.class <= curr_aen.members.class) && 4517 !((prev_aen.members.locale & curr_aen.members.locale) ^ 4518 curr_aen.members.locale)) { 4519 /* 4520 * Previously issued event registration includes 4521 * current request. Nothing to do. 4522 */ 4523 4524 return (0); 4525 } else { 4526 curr_aen.members.locale |= prev_aen.members.locale; 4527 4528 if (prev_aen.members.class < curr_aen.members.class) 4529 curr_aen.members.class = prev_aen.members.class; 4530 4531 ret_val = abort_aen_cmd(instance, instance->aen_cmd); 4532 4533 if (ret_val) { 4534 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: " 4535 "failed to abort prevous AEN command\n")); 4536 4537 return (ret_val); 4538 } 4539 } 4540 } else { 4541 curr_aen.word = class_locale_word; 4542 } 4543 4544 cmd = get_mfi_pkt(instance); 4545 4546 if (!cmd) 4547 return (-ENOMEM); 4548 4549 dcmd = &cmd->frame->dcmd; 4550 4551 /* for(i = 0; i < 12; i++) dcmd->mbox.b[i] = 0; */ 4552 (void) memset(dcmd->mbox.b, 0, 12); 4553 4554 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 4555 sizeof (struct megasas_evt_detail)); 4556 4557 /* Prepare DCMD for aen registration */ 4558 dcmd->cmd = MFI_CMD_OP_DCMD; 4559 dcmd->cmd_status = 0x0; 4560 dcmd->sge_count = 1; 4561 dcmd->flags = MFI_FRAME_DIR_READ; 4562 dcmd->timeout = 0; 4563 dcmd->data_xfer_len = sizeof (struct megasas_evt_detail); 4564 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 4565 dcmd->mbox.w[0] = seq_num; 4566 dcmd->mbox.w[1] = curr_aen.word; 4567 dcmd->sgl.sge32[0].phys_addr = 4568 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address; 4569 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_detail); 4570 4571 instance->aen_seq_num = seq_num; 4572 4573 /* 4574 * Store reference to the cmd used to register for AEN. When an 4575 * application wants us to register for AEN, we have to abort this 4576 * cmd and re-register with a new EVENT LOCALE supplied by that app 4577 */ 4578 instance->aen_cmd = cmd; 4579 4580 cmd->frame_count = 1; 4581 4582 /* Issue the aen registration frame */ 4583 /* atomic_add_16 (&instance->fw_outstanding, 1); */ 4584 instance->func_ptr->issue_cmd(cmd, instance); 4585 4586 return (0); 4587 } 4588 4589 #ifndef lint 4590 /*ARGSUSED*/ 4591 static void 4592 megasas_minphys(struct buf *bp) 4593 { 4594 con_log(CL_ANN1, (CE_CONT, ("minphys CALLED\n"))); 4595 } 4596 #endif 4597 4598 static void 4599 display_scsi_inquiry(caddr_t scsi_inq) 4600 { 4601 #define MAX_SCSI_DEVICE_CODE 14 4602 int i; 4603 char inquiry_buf[256] = {0}; 4604 int len; 4605 const char *const scsi_device_types[] = { 4606 "Direct-Access ", 4607 "Sequential-Access", 4608 "Printer ", 4609 "Processor ", 4610 "WORM ", 4611 "CD-ROM ", 4612 "Scanner ", 4613 "Optical Device ", 4614 "Medium Changer ", 4615 "Communications ", 4616 "Unknown ", 4617 "Unknown ", 4618 "Unknown ", 4619 "Enclosure ", 4620 }; 4621 4622 len = 0; 4623 4624 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: "); 4625 for (i = 8; i < 16; i++) { 4626 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4627 scsi_inq[i]); 4628 } 4629 4630 len += snprintf(inquiry_buf + len, 265 - len, " Model: "); 4631 4632 for (i = 16; i < 32; i++) { 4633 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4634 scsi_inq[i]); 4635 } 4636 4637 len += snprintf(inquiry_buf + len, 265 - len, " Rev: "); 4638 4639 for (i = 32; i < 36; i++) { 4640 len += snprintf(inquiry_buf + len, 265 - len, "%c", 4641 scsi_inq[i]); 4642 } 4643 4644 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4645 4646 4647 i = scsi_inq[0] & 0x1f; 4648 4649 4650 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ", 4651 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : 4652 "Unknown "); 4653 4654 4655 len += snprintf(inquiry_buf + len, 265 - len, 4656 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); 4657 4658 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) { 4659 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n"); 4660 } else { 4661 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 4662 } 4663 4664 con_log(CL_ANN1, (CE_CONT, inquiry_buf)); 4665 } 4666 4667 #if defined(NOT_YET) && !defined(lint) 4668 /* 4669 * lint pointed out a bug that pkt may be used before being set 4670 */ 4671 static void 4672 io_timeout_checker(void *arg) 4673 { 4674 unsigned int cookie; 4675 struct scsi_pkt *pkt; 4676 struct megasas_instance *instance = arg; 4677 4678 cookie = ddi_enter_critical(); 4679 4680 /* decrease the timeout value per each packet */ 4681 4682 if (pkt->pkt_time == 0) { 4683 /* this means that the scsi command has timed out */ 4684 /* pull out the packet from the list */ 4685 /* call callback in the scsi_pkt structure */ 4686 } 4687 4688 ddi_exit_critical(cookie); 4689 4690 /* schedule next timeout check */ 4691 instance->timeout_id = timeout(io_timeout_checker, (void *)instance, 4692 drv_usectohz(MEGASAS_1_SECOND)); 4693 } 4694 #endif /* defined(NOT_YET) && !defined(lint) */ 4695 4696 static int 4697 read_fw_status_reg_xscale(struct megasas_instance *instance) 4698 { 4699 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4700 return ((int)RD_OB_MSG_0(instance)); 4701 4702 } 4703 4704 static int 4705 read_fw_status_reg_ppc(struct megasas_instance *instance) 4706 { 4707 /* con_log(CL_ANN, (CE_WARN, "read_fw_status_reg_ppc: called\n")); */ 4708 4709 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4710 return ((int)RD_OB_SCRATCH_PAD_0(instance)); 4711 } 4712 4713 static void 4714 issue_cmd_xscale(struct megasas_cmd *cmd, struct megasas_instance *instance) 4715 { 4716 atomic_add_16(&instance->fw_outstanding, 1); 4717 /* push_pend_queue(instance, cmd); */ 4718 4719 /* Issue the command to the FW */ 4720 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4721 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) | 4722 (cmd->frame_count - 1), instance); 4723 } 4724 4725 static void 4726 issue_cmd_ppc(struct megasas_cmd *cmd, struct megasas_instance *instance) 4727 { 4728 /* con_log(CL_ANN, (CE_WARN, "issue_cmd_ppc: called\n")); */ 4729 4730 atomic_add_16(&instance->fw_outstanding, 1); 4731 4732 /* Issue the command to the FW */ 4733 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4734 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) | 4735 (((cmd->frame_count - 1) << 1) | 1), instance); 4736 } 4737 4738 /* 4739 * issue_cmd_in_sync_mode 4740 */ 4741 static int 4742 issue_cmd_in_sync_mode_xscale(struct megasas_instance *instance, 4743 struct megasas_cmd *cmd) 4744 { 4745 int i; 4746 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * 10000; 4747 4748 cmd->cmd_status = ENODATA; 4749 4750 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4751 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) | 4752 (cmd->frame_count - 1), instance); 4753 4754 mutex_enter(&instance->int_cmd_mtx); 4755 4756 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 4757 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 4758 } 4759 4760 mutex_exit(&instance->int_cmd_mtx); 4761 4762 if (i < (msecs -1)) { 4763 return (0); 4764 } else { 4765 return (1); 4766 } 4767 } 4768 4769 static int 4770 issue_cmd_in_sync_mode_ppc(struct megasas_instance *instance, 4771 struct megasas_cmd *cmd) 4772 { 4773 int i; 4774 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * 10000; 4775 4776 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called\n")); 4777 4778 cmd->cmd_status = ENODATA; 4779 4780 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4781 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) | 4782 (((cmd->frame_count - 1) << 1) | 1), instance); 4783 4784 mutex_enter(&instance->int_cmd_mtx); 4785 4786 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 4787 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 4788 } 4789 4790 mutex_exit(&instance->int_cmd_mtx); 4791 4792 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done\n")); 4793 4794 if (i < (msecs -1)) { 4795 return (0); 4796 } else { 4797 return (1); 4798 } 4799 } 4800 4801 /* 4802 * issue_cmd_in_poll_mode 4803 */ 4804 static int 4805 issue_cmd_in_poll_mode_xscale(struct megasas_instance *instance, 4806 struct megasas_cmd *cmd) 4807 { 4808 int i; 4809 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * 1000; 4810 4811 struct megasas_header *frame_hdr = (struct megasas_header *)cmd->frame; 4812 4813 frame_hdr->cmd_status = 0xFF; 4814 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 4815 4816 /* issue the frame using inbound queue port */ 4817 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4818 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) | 4819 (cmd->frame_count - 1), instance); 4820 4821 /* wait for cmd_status to change */ 4822 for (i = 0; i < msecs && (frame_hdr->cmd_status == 0xff); i++) { 4823 drv_usecwait(1000); /* wait for 1000 usecs */ 4824 } 4825 4826 if (frame_hdr->cmd_status == 0xff) { 4827 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: " 4828 "cmd polling timed out")); 4829 return (DDI_FAILURE); 4830 } 4831 4832 return (DDI_SUCCESS); 4833 } 4834 4835 static int 4836 issue_cmd_in_poll_mode_ppc(struct megasas_instance *instance, 4837 struct megasas_cmd *cmd) 4838 { 4839 int i; 4840 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * 1000; 4841 4842 struct megasas_header *frame_hdr = (struct megasas_header *)cmd->frame; 4843 4844 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called\n")); 4845 4846 frame_hdr->cmd_status = 0xFF; 4847 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 4848 4849 /* issue the frame using inbound queue port */ 4850 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4851 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) | 4852 (((cmd->frame_count - 1) << 1) | 1), instance); 4853 4854 /* wait for cmd_status to change */ 4855 for (i = 0; i < msecs && (frame_hdr->cmd_status == 0xff); i++) { 4856 drv_usecwait(1000); /* wait for 1000 usecs */ 4857 } 4858 4859 if (frame_hdr->cmd_status == 0xff) { 4860 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: " 4861 "cmd polling timed out")); 4862 return (DDI_FAILURE); 4863 } 4864 4865 return (DDI_SUCCESS); 4866 } 4867 4868 static void 4869 enable_intr_xscale(struct megasas_instance *instance) 4870 { 4871 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4872 MFI_ENABLE_INTR(instance); 4873 } 4874 4875 static void 4876 enable_intr_ppc(struct megasas_instance *instance) 4877 { 4878 uint32_t mask; 4879 4880 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called\n")); 4881 4882 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4883 WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); 4884 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4885 WR_OB_INTR_MASK(~(MFI_REPLY_1078_MESSAGE_INTR), instance); 4886 /* WR_OB_INTR_MASK(~0x80000000, instance); */ 4887 4888 /* dummy read to force PCI flush */ 4889 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4890 mask = RD_OB_INTR_MASK(instance); 4891 #ifdef lint 4892 mask = mask; 4893 #endif 4894 4895 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: " 4896 "outbound_intr_mask = 0x%x\n", mask)); 4897 } 4898 4899 static void 4900 disable_intr_xscale(struct megasas_instance *instance) 4901 { 4902 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4903 MFI_DISABLE_INTR(instance); 4904 } 4905 4906 static void 4907 disable_intr_ppc(struct megasas_instance *instance) 4908 { 4909 uint32_t mask; 4910 4911 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called\n")); 4912 4913 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4914 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : " 4915 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance))); 4916 4917 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4918 WR_OB_INTR_MASK(0xFFFFFFFF, instance); 4919 4920 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4921 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : " 4922 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance))); 4923 4924 /* dummy read to force PCI flush */ 4925 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4926 mask = RD_OB_INTR_MASK(instance); 4927 #ifdef lint 4928 mask = mask; 4929 #endif 4930 } 4931 4932 static int 4933 intr_ack_xscale(struct megasas_instance *instance) 4934 { 4935 uint32_t status; 4936 4937 /* check if it is our interrupt */ 4938 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4939 status = RD_OB_INTR_STATUS(instance); 4940 4941 if (!(status & MFI_OB_INTR_STATUS_MASK)) { 4942 return (DDI_INTR_UNCLAIMED); 4943 } 4944 4945 /* clear the interrupt by writing back the same value */ 4946 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4947 WR_OB_INTR_STATUS(status, instance); 4948 4949 return (DDI_INTR_CLAIMED); 4950 } 4951 4952 static int 4953 intr_ack_ppc(struct megasas_instance *instance) 4954 { 4955 uint32_t status; 4956 4957 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called\n")); 4958 4959 /* check if it is our interrupt */ 4960 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4961 status = RD_OB_INTR_STATUS(instance); 4962 4963 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x\n", status)); 4964 4965 if (!(status & MFI_REPLY_1078_MESSAGE_INTR)) { 4966 return (DDI_INTR_UNCLAIMED); 4967 } 4968 4969 /* clear the interrupt by writing back the same value */ 4970 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4971 WR_OB_DOORBELL_CLEAR(status, instance); 4972 4973 /* dummy READ */ 4974 /* LINTED E_BAD_PTR_CAST_ALIGN */ 4975 status = RD_OB_INTR_STATUS(instance); 4976 4977 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared\n")); 4978 4979 return (DDI_INTR_CLAIMED); 4980 } 4981