1 /* 2 * mr_sas.c: source for mr_sas driver 3 * 4 * Solaris MegaRAID device driver for SAS2.0 controllers 5 * Copyright (c) 2008-2012, LSI Logic Corporation. 6 * All rights reserved. 7 * 8 * Version: 9 * Author: 10 * Swaminathan K S 11 * Arun Chandrashekhar 12 * Manju R 13 * Rasheed 14 * Shakeel Bukhari 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions are met: 18 * 19 * 1. Redistributions of source code must retain the above copyright notice, 20 * this list of conditions and the following disclaimer. 21 * 22 * 2. Redistributions in binary form must reproduce the above copyright notice, 23 * this list of conditions and the following disclaimer in the documentation 24 * and/or other materials provided with the distribution. 25 * 26 * 3. Neither the name of the author nor the names of its contributors may be 27 * used to endorse or promote products derived from this software without 28 * specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 34 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 37 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 41 * DAMAGE. 42 */ 43 44 /* 45 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 46 * Copyright (c) 2011 Bayard G. Bell. All rights reserved. 47 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 48 * Copyright 2015, 2017 Citrus IT Limited. All rights reserved. 49 * Copyright 2015 Garrett D'Amore <garrett@damore.org> 50 */ 51 52 #include <sys/types.h> 53 #include <sys/param.h> 54 #include <sys/file.h> 55 #include <sys/errno.h> 56 #include <sys/open.h> 57 #include <sys/cred.h> 58 #include <sys/modctl.h> 59 #include <sys/conf.h> 60 #include <sys/devops.h> 61 #include <sys/cmn_err.h> 62 #include <sys/kmem.h> 63 #include <sys/stat.h> 64 #include <sys/mkdev.h> 65 #include <sys/pci.h> 66 #include <sys/scsi/scsi.h> 67 #include <sys/ddi.h> 68 #include <sys/sunddi.h> 69 #include <sys/atomic.h> 70 #include <sys/signal.h> 71 #include <sys/byteorder.h> 72 #include <sys/sdt.h> 73 #include <sys/fs/dv_node.h> /* devfs_clean */ 74 75 #include "mr_sas.h" 76 77 /* 78 * FMA header files 79 */ 80 #include <sys/ddifm.h> 81 #include <sys/fm/protocol.h> 82 #include <sys/fm/util.h> 83 #include <sys/fm/io/ddi.h> 84 85 /* Macros to help Skinny and stock 2108/MFI live together. */ 86 #define WR_IB_PICK_QPORT(addr, instance) \ 87 if ((instance)->skinny) { \ 88 WR_IB_LOW_QPORT((addr), (instance)); \ 89 WR_IB_HIGH_QPORT(0, (instance)); \ 90 } else { \ 91 WR_IB_QPORT((addr), (instance)); \ 92 } 93 94 /* 95 * Local static data 96 */ 97 static void *mrsas_state = NULL; 98 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE; 99 volatile int debug_level_g = CL_NONE; 100 static volatile int msi_enable = 1; 101 static volatile int ctio_enable = 1; 102 103 /* Default Timeout value to issue online controller reset */ 104 volatile int debug_timeout_g = 0xF0; /* 0xB4; */ 105 /* Simulate consecutive firmware fault */ 106 static volatile int debug_fw_faults_after_ocr_g = 0; 107 #ifdef OCRDEBUG 108 /* Simulate three consecutive timeout for an IO */ 109 static volatile int debug_consecutive_timeout_after_ocr_g = 0; 110 #endif 111 112 #pragma weak scsi_hba_open 113 #pragma weak scsi_hba_close 114 #pragma weak scsi_hba_ioctl 115 116 /* Local static prototypes. */ 117 static int mrsas_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 118 static int mrsas_attach(dev_info_t *, ddi_attach_cmd_t); 119 #ifdef __sparc 120 static int mrsas_reset(dev_info_t *, ddi_reset_cmd_t); 121 #else 122 static int mrsas_quiesce(dev_info_t *); 123 #endif 124 static int mrsas_detach(dev_info_t *, ddi_detach_cmd_t); 125 static int mrsas_open(dev_t *, int, int, cred_t *); 126 static int mrsas_close(dev_t, int, int, cred_t *); 127 static int mrsas_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 128 129 static int mrsas_tran_tgt_init(dev_info_t *, dev_info_t *, 130 scsi_hba_tran_t *, struct scsi_device *); 131 static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register 132 struct scsi_pkt *, struct buf *, int, int, int, int, 133 int (*)(), caddr_t); 134 static int mrsas_tran_start(struct scsi_address *, 135 register struct scsi_pkt *); 136 static int mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *); 137 static int mrsas_tran_reset(struct scsi_address *, int); 138 static int mrsas_tran_getcap(struct scsi_address *, char *, int); 139 static int mrsas_tran_setcap(struct scsi_address *, char *, int, int); 140 static void mrsas_tran_destroy_pkt(struct scsi_address *, 141 struct scsi_pkt *); 142 static void mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *); 143 static void mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *); 144 static int mrsas_tran_quiesce(dev_info_t *dip); 145 static int mrsas_tran_unquiesce(dev_info_t *dip); 146 static uint_t mrsas_isr(); 147 static uint_t mrsas_softintr(); 148 static void mrsas_undo_resources(dev_info_t *, struct mrsas_instance *); 149 150 static void free_space_for_mfi(struct mrsas_instance *); 151 static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *); 152 static void issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *); 153 static int issue_cmd_in_poll_mode_ppc(struct mrsas_instance *, 154 struct mrsas_cmd *); 155 static int issue_cmd_in_sync_mode_ppc(struct mrsas_instance *, 156 struct mrsas_cmd *); 157 static void enable_intr_ppc(struct mrsas_instance *); 158 static void disable_intr_ppc(struct mrsas_instance *); 159 static int intr_ack_ppc(struct mrsas_instance *); 160 static void flush_cache(struct mrsas_instance *instance); 161 void display_scsi_inquiry(caddr_t); 162 static int start_mfi_aen(struct mrsas_instance *instance); 163 static int handle_drv_ioctl(struct mrsas_instance *instance, 164 struct mrsas_ioctl *ioctl, int mode); 165 static int handle_mfi_ioctl(struct mrsas_instance *instance, 166 struct mrsas_ioctl *ioctl, int mode); 167 static int handle_mfi_aen(struct mrsas_instance *instance, 168 struct mrsas_aen *aen); 169 static struct mrsas_cmd *build_cmd(struct mrsas_instance *, 170 struct scsi_address *, struct scsi_pkt *, uchar_t *); 171 static int alloc_additional_dma_buffer(struct mrsas_instance *); 172 static void complete_cmd_in_sync_mode(struct mrsas_instance *, 173 struct mrsas_cmd *); 174 static int mrsas_kill_adapter(struct mrsas_instance *); 175 static int mrsas_issue_init_mfi(struct mrsas_instance *); 176 static int mrsas_reset_ppc(struct mrsas_instance *); 177 static uint32_t mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *); 178 static int wait_for_outstanding(struct mrsas_instance *instance); 179 static int register_mfi_aen(struct mrsas_instance *instance, 180 uint32_t seq_num, uint32_t class_locale_word); 181 static int issue_mfi_pthru(struct mrsas_instance *instance, struct 182 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode); 183 static int issue_mfi_dcmd(struct mrsas_instance *instance, struct 184 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode); 185 static int issue_mfi_smp(struct mrsas_instance *instance, struct 186 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode); 187 static int issue_mfi_stp(struct mrsas_instance *instance, struct 188 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode); 189 static int abort_aen_cmd(struct mrsas_instance *instance, 190 struct mrsas_cmd *cmd_to_abort); 191 192 static void mrsas_rem_intrs(struct mrsas_instance *instance); 193 static int mrsas_add_intrs(struct mrsas_instance *instance, int intr_type); 194 195 static void mrsas_tran_tgt_free(dev_info_t *, dev_info_t *, 196 scsi_hba_tran_t *, struct scsi_device *); 197 static int mrsas_tran_bus_config(dev_info_t *, uint_t, 198 ddi_bus_config_op_t, void *, dev_info_t **); 199 static int mrsas_parse_devname(char *, int *, int *); 200 static int mrsas_config_all_devices(struct mrsas_instance *); 201 static int mrsas_config_ld(struct mrsas_instance *, uint16_t, 202 uint8_t, dev_info_t **); 203 static int mrsas_name_node(dev_info_t *, char *, int); 204 static void mrsas_issue_evt_taskq(struct mrsas_eventinfo *); 205 static void free_additional_dma_buffer(struct mrsas_instance *); 206 static void io_timeout_checker(void *); 207 static void mrsas_fm_init(struct mrsas_instance *); 208 static void mrsas_fm_fini(struct mrsas_instance *); 209 210 static struct mrsas_function_template mrsas_function_template_ppc = { 211 .read_fw_status_reg = read_fw_status_reg_ppc, 212 .issue_cmd = issue_cmd_ppc, 213 .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc, 214 .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc, 215 .enable_intr = enable_intr_ppc, 216 .disable_intr = disable_intr_ppc, 217 .intr_ack = intr_ack_ppc, 218 .init_adapter = mrsas_init_adapter_ppc 219 }; 220 221 222 static struct mrsas_function_template mrsas_function_template_fusion = { 223 .read_fw_status_reg = tbolt_read_fw_status_reg, 224 .issue_cmd = tbolt_issue_cmd, 225 .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode, 226 .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode, 227 .enable_intr = tbolt_enable_intr, 228 .disable_intr = tbolt_disable_intr, 229 .intr_ack = tbolt_intr_ack, 230 .init_adapter = mrsas_init_adapter_tbolt 231 }; 232 233 234 ddi_dma_attr_t mrsas_generic_dma_attr = { 235 DMA_ATTR_V0, /* dma_attr_version */ 236 0, /* low DMA address range */ 237 0xFFFFFFFFU, /* high DMA address range */ 238 0xFFFFFFFFU, /* DMA counter register */ 239 8, /* DMA address alignment */ 240 0x07, /* DMA burstsizes */ 241 1, /* min DMA size */ 242 0xFFFFFFFFU, /* max DMA size */ 243 0xFFFFFFFFU, /* segment boundary */ 244 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */ 245 512, /* granularity of device */ 246 0 /* bus specific DMA flags */ 247 }; 248 249 int32_t mrsas_max_cap_maxxfer = 0x1000000; 250 251 /* 252 * Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG, 253 * Limit size to 256K 254 */ 255 uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512); 256 257 /* 258 * cb_ops contains base level routines 259 */ 260 static struct cb_ops mrsas_cb_ops = { 261 mrsas_open, /* open */ 262 mrsas_close, /* close */ 263 nodev, /* strategy */ 264 nodev, /* print */ 265 nodev, /* dump */ 266 nodev, /* read */ 267 nodev, /* write */ 268 mrsas_ioctl, /* ioctl */ 269 nodev, /* devmap */ 270 nodev, /* mmap */ 271 nodev, /* segmap */ 272 nochpoll, /* poll */ 273 nodev, /* cb_prop_op */ 274 0, /* streamtab */ 275 D_NEW | D_HOTPLUG, /* cb_flag */ 276 CB_REV, /* cb_rev */ 277 nodev, /* cb_aread */ 278 nodev /* cb_awrite */ 279 }; 280 281 /* 282 * dev_ops contains configuration routines 283 */ 284 static struct dev_ops mrsas_ops = { 285 DEVO_REV, /* rev, */ 286 0, /* refcnt */ 287 mrsas_getinfo, /* getinfo */ 288 nulldev, /* identify */ 289 nulldev, /* probe */ 290 mrsas_attach, /* attach */ 291 mrsas_detach, /* detach */ 292 #ifdef __sparc 293 mrsas_reset, /* reset */ 294 #else /* __sparc */ 295 nodev, 296 #endif /* __sparc */ 297 &mrsas_cb_ops, /* char/block ops */ 298 NULL, /* bus ops */ 299 NULL, /* power */ 300 #ifdef __sparc 301 ddi_quiesce_not_needed 302 #else /* __sparc */ 303 mrsas_quiesce /* quiesce */ 304 #endif /* __sparc */ 305 }; 306 307 static struct modldrv modldrv = { 308 &mod_driverops, /* module type - driver */ 309 MRSAS_VERSION, 310 &mrsas_ops, /* driver ops */ 311 }; 312 313 static struct modlinkage modlinkage = { 314 MODREV_1, /* ml_rev - must be MODREV_1 */ 315 &modldrv, /* ml_linkage */ 316 NULL /* end of driver linkage */ 317 }; 318 319 static struct ddi_device_acc_attr endian_attr = { 320 DDI_DEVICE_ATTR_V1, 321 DDI_STRUCTURE_LE_ACC, 322 DDI_STRICTORDER_ACC, 323 DDI_DEFAULT_ACC 324 }; 325 326 /* Use the LSI Fast Path for the 2208 (tbolt) commands. */ 327 unsigned int enable_fp = 1; 328 329 330 /* 331 * ************************************************************************** * 332 * * 333 * common entry points - for loadable kernel modules * 334 * * 335 * ************************************************************************** * 336 */ 337 338 /* 339 * _init - initialize a loadable module 340 * @void 341 * 342 * The driver should perform any one-time resource allocation or data 343 * initialization during driver loading in _init(). For example, the driver 344 * should initialize any mutexes global to the driver in this routine. 345 * The driver should not, however, use _init() to allocate or initialize 346 * anything that has to do with a particular instance of the device. 347 * Per-instance initialization must be done in attach(). 348 */ 349 int 350 _init(void) 351 { 352 int ret; 353 354 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 355 356 ret = ddi_soft_state_init(&mrsas_state, 357 sizeof (struct mrsas_instance), 0); 358 359 if (ret != DDI_SUCCESS) { 360 cmn_err(CE_WARN, "mr_sas: could not init state"); 361 return (ret); 362 } 363 364 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) { 365 cmn_err(CE_WARN, "mr_sas: could not init scsi hba"); 366 ddi_soft_state_fini(&mrsas_state); 367 return (ret); 368 } 369 370 ret = mod_install(&modlinkage); 371 372 if (ret != DDI_SUCCESS) { 373 cmn_err(CE_WARN, "mr_sas: mod_install failed"); 374 scsi_hba_fini(&modlinkage); 375 ddi_soft_state_fini(&mrsas_state); 376 } 377 378 return (ret); 379 } 380 381 /* 382 * _info - returns information about a loadable module. 383 * @void 384 * 385 * _info() is called to return module information. This is a typical entry 386 * point that does predefined role. It simply calls mod_info(). 387 */ 388 int 389 _info(struct modinfo *modinfop) 390 { 391 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 392 393 return (mod_info(&modlinkage, modinfop)); 394 } 395 396 /* 397 * _fini - prepare a loadable module for unloading 398 * @void 399 * 400 * In _fini(), the driver should release any resources that were allocated in 401 * _init(). The driver must remove itself from the system module list. 402 */ 403 int 404 _fini(void) 405 { 406 int ret; 407 408 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 409 410 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) { 411 con_log(CL_ANN1, 412 (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret)); 413 return (ret); 414 } 415 416 scsi_hba_fini(&modlinkage); 417 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done.")); 418 419 ddi_soft_state_fini(&mrsas_state); 420 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done.")); 421 422 return (ret); 423 } 424 425 426 /* 427 * ************************************************************************** * 428 * * 429 * common entry points - for autoconfiguration * 430 * * 431 * ************************************************************************** * 432 */ 433 /* 434 * attach - adds a device to the system as part of initialization 435 * @dip: 436 * @cmd: 437 * 438 * The kernel calls a driver's attach() entry point to attach an instance of 439 * a device (for MegaRAID, it is instance of a controller) or to resume 440 * operation for an instance of a device that has been suspended or has been 441 * shut down by the power management framework 442 * The attach() entry point typically includes the following types of 443 * processing: 444 * - allocate a soft-state structure for the device instance (for MegaRAID, 445 * controller instance) 446 * - initialize per-instance mutexes 447 * - initialize condition variables 448 * - register the device's interrupts (for MegaRAID, controller's interrupts) 449 * - map the registers and memory of the device instance (for MegaRAID, 450 * controller instance) 451 * - create minor device nodes for the device instance (for MegaRAID, 452 * controller instance) 453 * - report that the device instance (for MegaRAID, controller instance) has 454 * attached 455 */ 456 static int 457 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 458 { 459 int instance_no; 460 int nregs; 461 int i = 0; 462 uint8_t irq; 463 uint16_t vendor_id; 464 uint16_t device_id; 465 uint16_t subsysvid; 466 uint16_t subsysid; 467 uint16_t command; 468 off_t reglength = 0; 469 int intr_types = 0; 470 char *data; 471 472 scsi_hba_tran_t *tran; 473 ddi_dma_attr_t tran_dma_attr; 474 struct mrsas_instance *instance; 475 476 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 477 478 /* CONSTCOND */ 479 ASSERT(NO_COMPETING_THREADS); 480 481 instance_no = ddi_get_instance(dip); 482 483 /* 484 * check to see whether this device is in a DMA-capable slot. 485 */ 486 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 487 dev_err(dip, CE_WARN, "Device in slave-only slot, unused"); 488 return (DDI_FAILURE); 489 } 490 491 switch (cmd) { 492 case DDI_ATTACH: 493 /* allocate the soft state for the instance */ 494 if (ddi_soft_state_zalloc(mrsas_state, instance_no) 495 != DDI_SUCCESS) { 496 dev_err(dip, CE_WARN, "Failed to allocate soft state"); 497 return (DDI_FAILURE); 498 } 499 500 instance = (struct mrsas_instance *)ddi_get_soft_state 501 (mrsas_state, instance_no); 502 503 if (instance == NULL) { 504 dev_err(dip, CE_WARN, "Bad soft state"); 505 ddi_soft_state_free(mrsas_state, instance_no); 506 return (DDI_FAILURE); 507 } 508 509 instance->unroll.softs = 1; 510 511 /* Setup the PCI configuration space handles */ 512 if (pci_config_setup(dip, &instance->pci_handle) != 513 DDI_SUCCESS) { 514 dev_err(dip, CE_WARN, "pci config setup failed"); 515 516 ddi_soft_state_free(mrsas_state, instance_no); 517 return (DDI_FAILURE); 518 } 519 520 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { 521 dev_err(dip, CE_WARN, "Failed to get registers"); 522 523 pci_config_teardown(&instance->pci_handle); 524 ddi_soft_state_free(mrsas_state, instance_no); 525 return (DDI_FAILURE); 526 } 527 528 vendor_id = pci_config_get16(instance->pci_handle, 529 PCI_CONF_VENID); 530 device_id = pci_config_get16(instance->pci_handle, 531 PCI_CONF_DEVID); 532 533 subsysvid = pci_config_get16(instance->pci_handle, 534 PCI_CONF_SUBVENID); 535 subsysid = pci_config_get16(instance->pci_handle, 536 PCI_CONF_SUBSYSID); 537 538 pci_config_put16(instance->pci_handle, PCI_CONF_COMM, 539 (pci_config_get16(instance->pci_handle, 540 PCI_CONF_COMM) | PCI_COMM_ME)); 541 irq = pci_config_get8(instance->pci_handle, 542 PCI_CONF_ILINE); 543 544 dev_err(dip, CE_CONT, 545 "?0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n", 546 vendor_id, device_id, subsysvid, 547 subsysid, irq, MRSAS_VERSION); 548 549 /* enable bus-mastering */ 550 command = pci_config_get16(instance->pci_handle, 551 PCI_CONF_COMM); 552 553 if (!(command & PCI_COMM_ME)) { 554 command |= PCI_COMM_ME; 555 556 pci_config_put16(instance->pci_handle, 557 PCI_CONF_COMM, command); 558 559 con_log(CL_ANN, (CE_CONT, "mr_sas%d: " 560 "enable bus-mastering", instance_no)); 561 } else { 562 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: " 563 "bus-mastering already set", instance_no)); 564 } 565 566 /* initialize function pointers */ 567 switch (device_id) { 568 case PCI_DEVICE_ID_LSI_INVADER: 569 case PCI_DEVICE_ID_LSI_FURY: 570 case PCI_DEVICE_ID_LSI_INTRUDER: 571 case PCI_DEVICE_ID_LSI_INTRUDER_24: 572 case PCI_DEVICE_ID_LSI_CUTLASS_52: 573 case PCI_DEVICE_ID_LSI_CUTLASS_53: 574 dev_err(dip, CE_CONT, "?Gen3 device detected\n"); 575 instance->gen3 = 1; 576 /* FALLTHROUGH */ 577 case PCI_DEVICE_ID_LSI_TBOLT: 578 dev_err(dip, CE_CONT, "?TBOLT device detected\n"); 579 580 instance->func_ptr = 581 &mrsas_function_template_fusion; 582 instance->tbolt = 1; 583 break; 584 585 case PCI_DEVICE_ID_LSI_SKINNY: 586 case PCI_DEVICE_ID_LSI_SKINNY_NEW: 587 /* 588 * FALLTHRU to PPC-style functions, but mark this 589 * instance as Skinny, because the register set is 590 * slightly different (See WR_IB_PICK_QPORT), and 591 * certain other features are available to a Skinny 592 * HBA. 593 */ 594 dev_err(dip, CE_CONT, "?Skinny device detected\n"); 595 instance->skinny = 1; 596 /* FALLTHRU */ 597 598 case PCI_DEVICE_ID_LSI_2108VDE: 599 case PCI_DEVICE_ID_LSI_2108V: 600 dev_err(dip, CE_CONT, 601 "?2108 Liberator device detected\n"); 602 603 instance->func_ptr = 604 &mrsas_function_template_ppc; 605 break; 606 607 default: 608 dev_err(dip, CE_WARN, "Invalid device detected"); 609 610 pci_config_teardown(&instance->pci_handle); 611 ddi_soft_state_free(mrsas_state, instance_no); 612 return (DDI_FAILURE); 613 } 614 615 instance->baseaddress = pci_config_get32( 616 instance->pci_handle, PCI_CONF_BASE0); 617 instance->baseaddress &= 0x0fffc; 618 619 instance->dip = dip; 620 instance->vendor_id = vendor_id; 621 instance->device_id = device_id; 622 instance->subsysvid = subsysvid; 623 instance->subsysid = subsysid; 624 instance->instance = instance_no; 625 626 /* Initialize FMA */ 627 instance->fm_capabilities = ddi_prop_get_int( 628 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS, 629 "fm-capable", DDI_FM_EREPORT_CAPABLE | 630 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE 631 | DDI_FM_ERRCB_CAPABLE); 632 633 mrsas_fm_init(instance); 634 635 /* Setup register map */ 636 if ((ddi_dev_regsize(instance->dip, 637 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) || 638 reglength < MINIMUM_MFI_MEM_SZ) { 639 goto fail_attach; 640 } 641 if (reglength > DEFAULT_MFI_MEM_SZ) { 642 reglength = DEFAULT_MFI_MEM_SZ; 643 con_log(CL_DLEVEL1, (CE_NOTE, 644 "mr_sas: register length to map is 0x%lx bytes", 645 reglength)); 646 } 647 if (ddi_regs_map_setup(instance->dip, 648 REGISTER_SET_IO_2108, &instance->regmap, 0, 649 reglength, &endian_attr, &instance->regmap_handle) 650 != DDI_SUCCESS) { 651 dev_err(dip, CE_WARN, "couldn't map control registers"); 652 goto fail_attach; 653 } 654 655 instance->unroll.regs = 1; 656 657 /* 658 * Disable Interrupt Now. 659 * Setup Software interrupt 660 */ 661 instance->func_ptr->disable_intr(instance); 662 663 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 664 "mrsas-enable-msi", &data) == DDI_SUCCESS) { 665 if (strncmp(data, "no", 3) == 0) { 666 msi_enable = 0; 667 con_log(CL_ANN1, (CE_WARN, 668 "msi_enable = %d disabled", msi_enable)); 669 } 670 ddi_prop_free(data); 671 } 672 673 dev_err(dip, CE_CONT, "?msi_enable = %d\n", msi_enable); 674 675 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 676 "mrsas-enable-fp", &data) == DDI_SUCCESS) { 677 if (strncmp(data, "no", 3) == 0) { 678 enable_fp = 0; 679 dev_err(dip, CE_NOTE, 680 "enable_fp = %d, Fast-Path disabled.\n", 681 enable_fp); 682 } 683 684 ddi_prop_free(data); 685 } 686 687 dev_err(dip, CE_CONT, "?enable_fp = %d\n", enable_fp); 688 689 /* Check for all supported interrupt types */ 690 if (ddi_intr_get_supported_types( 691 dip, &intr_types) != DDI_SUCCESS) { 692 dev_err(dip, CE_WARN, 693 "ddi_intr_get_supported_types() failed"); 694 goto fail_attach; 695 } 696 697 con_log(CL_DLEVEL1, (CE_NOTE, 698 "ddi_intr_get_supported_types() ret: 0x%x", intr_types)); 699 700 /* Initialize and Setup Interrupt handler */ 701 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) { 702 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSIX) != 703 DDI_SUCCESS) { 704 dev_err(dip, CE_WARN, 705 "MSIX interrupt query failed"); 706 goto fail_attach; 707 } 708 instance->intr_type = DDI_INTR_TYPE_MSIX; 709 } else if (msi_enable && (intr_types & DDI_INTR_TYPE_MSI)) { 710 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSI) != 711 DDI_SUCCESS) { 712 dev_err(dip, CE_WARN, 713 "MSI interrupt query failed"); 714 goto fail_attach; 715 } 716 instance->intr_type = DDI_INTR_TYPE_MSI; 717 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 718 msi_enable = 0; 719 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_FIXED) != 720 DDI_SUCCESS) { 721 dev_err(dip, CE_WARN, 722 "FIXED interrupt query failed"); 723 goto fail_attach; 724 } 725 instance->intr_type = DDI_INTR_TYPE_FIXED; 726 } else { 727 dev_err(dip, CE_WARN, "Device cannot " 728 "suppport either FIXED or MSI/X " 729 "interrupts"); 730 goto fail_attach; 731 } 732 733 instance->unroll.intr = 1; 734 735 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 736 "mrsas-enable-ctio", &data) == DDI_SUCCESS) { 737 if (strncmp(data, "no", 3) == 0) { 738 ctio_enable = 0; 739 con_log(CL_ANN1, (CE_WARN, 740 "ctio_enable = %d disabled", ctio_enable)); 741 } 742 ddi_prop_free(data); 743 } 744 745 dev_err(dip, CE_CONT, "?ctio_enable = %d\n", ctio_enable); 746 747 /* setup the mfi based low level driver */ 748 if (mrsas_init_adapter(instance) != DDI_SUCCESS) { 749 dev_err(dip, CE_WARN, 750 "could not initialize the low level driver"); 751 752 goto fail_attach; 753 } 754 755 /* Initialize all Mutex */ 756 INIT_LIST_HEAD(&instance->completed_pool_list); 757 mutex_init(&instance->completed_pool_mtx, NULL, 758 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 759 760 mutex_init(&instance->sync_map_mtx, NULL, 761 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 762 763 mutex_init(&instance->app_cmd_pool_mtx, NULL, 764 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 765 766 mutex_init(&instance->config_dev_mtx, NULL, 767 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 768 769 mutex_init(&instance->cmd_pend_mtx, NULL, 770 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 771 772 mutex_init(&instance->ocr_flags_mtx, NULL, 773 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 774 775 mutex_init(&instance->int_cmd_mtx, NULL, 776 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 777 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL); 778 779 mutex_init(&instance->cmd_pool_mtx, NULL, 780 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 781 782 mutex_init(&instance->reg_write_mtx, NULL, 783 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 784 785 if (instance->tbolt) { 786 mutex_init(&instance->cmd_app_pool_mtx, NULL, 787 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 788 789 mutex_init(&instance->chip_mtx, NULL, 790 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri)); 791 792 } 793 794 instance->unroll.mutexs = 1; 795 796 instance->timeout_id = (timeout_id_t)-1; 797 798 /* Register our soft-isr for highlevel interrupts. */ 799 instance->isr_level = instance->intr_pri; 800 if (!(instance->tbolt)) { 801 if (instance->isr_level == HIGH_LEVEL_INTR) { 802 if (ddi_add_softintr(dip, 803 DDI_SOFTINT_HIGH, 804 &instance->soft_intr_id, NULL, NULL, 805 mrsas_softintr, (caddr_t)instance) != 806 DDI_SUCCESS) { 807 dev_err(dip, CE_WARN, 808 "Software ISR did not register"); 809 810 goto fail_attach; 811 } 812 813 instance->unroll.soft_isr = 1; 814 815 } 816 } 817 818 instance->softint_running = 0; 819 820 /* Allocate a transport structure */ 821 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 822 823 if (tran == NULL) { 824 dev_err(dip, CE_WARN, 825 "scsi_hba_tran_alloc failed"); 826 goto fail_attach; 827 } 828 829 instance->tran = tran; 830 instance->unroll.tran = 1; 831 832 tran->tran_hba_private = instance; 833 tran->tran_tgt_init = mrsas_tran_tgt_init; 834 tran->tran_tgt_probe = scsi_hba_probe; 835 tran->tran_tgt_free = mrsas_tran_tgt_free; 836 tran->tran_init_pkt = mrsas_tran_init_pkt; 837 if (instance->tbolt) 838 tran->tran_start = mrsas_tbolt_tran_start; 839 else 840 tran->tran_start = mrsas_tran_start; 841 tran->tran_abort = mrsas_tran_abort; 842 tran->tran_reset = mrsas_tran_reset; 843 tran->tran_getcap = mrsas_tran_getcap; 844 tran->tran_setcap = mrsas_tran_setcap; 845 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt; 846 tran->tran_dmafree = mrsas_tran_dmafree; 847 tran->tran_sync_pkt = mrsas_tran_sync_pkt; 848 tran->tran_quiesce = mrsas_tran_quiesce; 849 tran->tran_unquiesce = mrsas_tran_unquiesce; 850 tran->tran_bus_config = mrsas_tran_bus_config; 851 852 if (mrsas_relaxed_ordering) 853 mrsas_generic_dma_attr.dma_attr_flags |= 854 DDI_DMA_RELAXED_ORDERING; 855 856 857 tran_dma_attr = mrsas_generic_dma_attr; 858 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge; 859 860 /* Attach this instance of the hba */ 861 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0) 862 != DDI_SUCCESS) { 863 dev_err(dip, CE_WARN, 864 "scsi_hba_attach failed"); 865 866 goto fail_attach; 867 } 868 instance->unroll.tranSetup = 1; 869 con_log(CL_ANN1, 870 (CE_CONT, "scsi_hba_attach_setup() done.")); 871 872 /* create devctl node for cfgadm command */ 873 if (ddi_create_minor_node(dip, "devctl", 874 S_IFCHR, INST2DEVCTL(instance_no), 875 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) { 876 dev_err(dip, CE_WARN, "failed to create devctl node."); 877 878 goto fail_attach; 879 } 880 881 instance->unroll.devctl = 1; 882 883 /* create scsi node for cfgadm command */ 884 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, 885 INST2SCSI(instance_no), DDI_NT_SCSI_ATTACHMENT_POINT, 0) == 886 DDI_FAILURE) { 887 dev_err(dip, CE_WARN, "failed to create scsi node."); 888 889 goto fail_attach; 890 } 891 892 instance->unroll.scsictl = 1; 893 894 (void) sprintf(instance->iocnode, "%d:lsirdctl", instance_no); 895 896 /* 897 * Create a node for applications 898 * for issuing ioctl to the driver. 899 */ 900 if (ddi_create_minor_node(dip, instance->iocnode, 901 S_IFCHR, INST2LSIRDCTL(instance_no), DDI_PSEUDO, 0) == 902 DDI_FAILURE) { 903 dev_err(dip, CE_WARN, "failed to create ioctl node."); 904 905 goto fail_attach; 906 } 907 908 instance->unroll.ioctl = 1; 909 910 /* Create a taskq to handle dr events */ 911 if ((instance->taskq = ddi_taskq_create(dip, 912 "mrsas_dr_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 913 dev_err(dip, CE_WARN, "failed to create taskq."); 914 instance->taskq = NULL; 915 goto fail_attach; 916 } 917 instance->unroll.taskq = 1; 918 con_log(CL_ANN1, (CE_CONT, "ddi_taskq_create() done.")); 919 920 /* enable interrupt */ 921 instance->func_ptr->enable_intr(instance); 922 923 /* initiate AEN */ 924 if (start_mfi_aen(instance)) { 925 dev_err(dip, CE_WARN, "failed to initiate AEN."); 926 goto fail_attach; 927 } 928 instance->unroll.aenPend = 1; 929 con_log(CL_ANN1, 930 (CE_CONT, "AEN started for instance %d.", instance_no)); 931 932 /* Finally! We are on the air. */ 933 ddi_report_dev(dip); 934 935 /* FMA handle checking. */ 936 if (mrsas_check_acc_handle(instance->regmap_handle) != 937 DDI_SUCCESS) { 938 goto fail_attach; 939 } 940 if (mrsas_check_acc_handle(instance->pci_handle) != 941 DDI_SUCCESS) { 942 goto fail_attach; 943 } 944 945 instance->mr_ld_list = 946 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld), 947 KM_SLEEP); 948 instance->unroll.ldlist_buff = 1; 949 950 #ifdef PDSUPPORT 951 if (instance->tbolt || instance->skinny) { 952 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX; 953 instance->mr_tbolt_pd_list = 954 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) * 955 sizeof (struct mrsas_tbolt_pd), KM_SLEEP); 956 ASSERT(instance->mr_tbolt_pd_list); 957 for (i = 0; i < instance->mr_tbolt_pd_max; i++) { 958 instance->mr_tbolt_pd_list[i].lun_type = 959 MRSAS_TBOLT_PD_LUN; 960 instance->mr_tbolt_pd_list[i].dev_id = 961 (uint8_t)i; 962 } 963 964 instance->unroll.pdlist_buff = 1; 965 } 966 #endif 967 break; 968 case DDI_PM_RESUME: 969 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_PM_RESUME")); 970 break; 971 case DDI_RESUME: 972 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_RESUME")); 973 break; 974 default: 975 con_log(CL_ANN, 976 (CE_WARN, "mr_sas: invalid attach cmd=%x", cmd)); 977 return (DDI_FAILURE); 978 } 979 980 981 con_log(CL_DLEVEL1, 982 (CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d", 983 instance_no)); 984 return (DDI_SUCCESS); 985 986 fail_attach: 987 988 mrsas_undo_resources(dip, instance); 989 990 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 991 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 992 993 mrsas_fm_fini(instance); 994 995 pci_config_teardown(&instance->pci_handle); 996 ddi_soft_state_free(mrsas_state, instance_no); 997 998 return (DDI_FAILURE); 999 } 1000 1001 /* 1002 * getinfo - gets device information 1003 * @dip: 1004 * @cmd: 1005 * @arg: 1006 * @resultp: 1007 * 1008 * The system calls getinfo() to obtain configuration information that only 1009 * the driver knows. The mapping of minor numbers to device instance is 1010 * entirely under the control of the driver. The system sometimes needs to ask 1011 * the driver which device a particular dev_t represents. 1012 * Given the device number return the devinfo pointer from the scsi_device 1013 * structure. 1014 */ 1015 /*ARGSUSED*/ 1016 static int 1017 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 1018 { 1019 int rval; 1020 int mrsas_minor = getminor((dev_t)arg); 1021 1022 struct mrsas_instance *instance; 1023 1024 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1025 1026 switch (cmd) { 1027 case DDI_INFO_DEVT2DEVINFO: 1028 instance = (struct mrsas_instance *) 1029 ddi_get_soft_state(mrsas_state, 1030 MINOR2INST(mrsas_minor)); 1031 1032 if (instance == NULL) { 1033 *resultp = NULL; 1034 rval = DDI_FAILURE; 1035 } else { 1036 *resultp = instance->dip; 1037 rval = DDI_SUCCESS; 1038 } 1039 break; 1040 case DDI_INFO_DEVT2INSTANCE: 1041 *resultp = (void *)(intptr_t) 1042 (MINOR2INST(getminor((dev_t)arg))); 1043 rval = DDI_SUCCESS; 1044 break; 1045 default: 1046 *resultp = NULL; 1047 rval = DDI_FAILURE; 1048 } 1049 1050 return (rval); 1051 } 1052 1053 /* 1054 * detach - detaches a device from the system 1055 * @dip: pointer to the device's dev_info structure 1056 * @cmd: type of detach 1057 * 1058 * A driver's detach() entry point is called to detach an instance of a device 1059 * that is bound to the driver. The entry point is called with the instance of 1060 * the device node to be detached and with DDI_DETACH, which is specified as 1061 * the cmd argument to the entry point. 1062 * This routine is called during driver unload. We free all the allocated 1063 * resources and call the corresponding LLD so that it can also release all 1064 * its resources. 1065 */ 1066 static int 1067 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1068 { 1069 int instance_no; 1070 1071 struct mrsas_instance *instance; 1072 1073 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1074 1075 1076 /* CONSTCOND */ 1077 ASSERT(NO_COMPETING_THREADS); 1078 1079 instance_no = ddi_get_instance(dip); 1080 1081 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state, 1082 instance_no); 1083 1084 if (!instance) { 1085 dev_err(dip, CE_WARN, "could not get instance in detach"); 1086 1087 return (DDI_FAILURE); 1088 } 1089 1090 switch (cmd) { 1091 case DDI_DETACH: 1092 con_log(CL_ANN, (CE_NOTE, 1093 "mrsas_detach: DDI_DETACH")); 1094 1095 mutex_enter(&instance->config_dev_mtx); 1096 if (instance->timeout_id != (timeout_id_t)-1) { 1097 mutex_exit(&instance->config_dev_mtx); 1098 (void) untimeout(instance->timeout_id); 1099 instance->timeout_id = (timeout_id_t)-1; 1100 mutex_enter(&instance->config_dev_mtx); 1101 instance->unroll.timer = 0; 1102 } 1103 mutex_exit(&instance->config_dev_mtx); 1104 1105 if (instance->unroll.tranSetup == 1) { 1106 if (scsi_hba_detach(dip) != DDI_SUCCESS) { 1107 dev_err(dip, CE_WARN, 1108 "failed to detach"); 1109 return (DDI_FAILURE); 1110 } 1111 instance->unroll.tranSetup = 0; 1112 con_log(CL_ANN1, 1113 (CE_CONT, "scsi_hba_dettach() done.")); 1114 } 1115 1116 flush_cache(instance); 1117 1118 mrsas_undo_resources(dip, instance); 1119 1120 mrsas_fm_fini(instance); 1121 1122 pci_config_teardown(&instance->pci_handle); 1123 ddi_soft_state_free(mrsas_state, instance_no); 1124 break; 1125 1126 case DDI_PM_SUSPEND: 1127 con_log(CL_ANN, (CE_NOTE, 1128 "mrsas_detach: DDI_PM_SUSPEND")); 1129 1130 break; 1131 case DDI_SUSPEND: 1132 con_log(CL_ANN, (CE_NOTE, 1133 "mrsas_detach: DDI_SUSPEND")); 1134 1135 break; 1136 default: 1137 con_log(CL_ANN, (CE_WARN, 1138 "invalid detach command:0x%x", cmd)); 1139 return (DDI_FAILURE); 1140 } 1141 1142 return (DDI_SUCCESS); 1143 } 1144 1145 1146 static void 1147 mrsas_undo_resources(dev_info_t *dip, struct mrsas_instance *instance) 1148 { 1149 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1150 1151 if (instance->unroll.ioctl == 1) { 1152 ddi_remove_minor_node(dip, instance->iocnode); 1153 instance->unroll.ioctl = 0; 1154 } 1155 1156 if (instance->unroll.scsictl == 1) { 1157 ddi_remove_minor_node(dip, "scsi"); 1158 instance->unroll.scsictl = 0; 1159 } 1160 1161 if (instance->unroll.devctl == 1) { 1162 ddi_remove_minor_node(dip, "devctl"); 1163 instance->unroll.devctl = 0; 1164 } 1165 1166 if (instance->unroll.tranSetup == 1) { 1167 if (scsi_hba_detach(dip) != DDI_SUCCESS) { 1168 dev_err(dip, CE_WARN, "failed to detach"); 1169 return; /* DDI_FAILURE */ 1170 } 1171 instance->unroll.tranSetup = 0; 1172 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done.")); 1173 } 1174 1175 if (instance->unroll.tran == 1) { 1176 scsi_hba_tran_free(instance->tran); 1177 instance->unroll.tran = 0; 1178 con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free() done.")); 1179 } 1180 1181 if (instance->unroll.syncCmd == 1) { 1182 if (instance->tbolt) { 1183 if (abort_syncmap_cmd(instance, 1184 instance->map_update_cmd)) { 1185 dev_err(dip, CE_WARN, "mrsas_detach: " 1186 "failed to abort previous syncmap command"); 1187 } 1188 1189 instance->unroll.syncCmd = 0; 1190 con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done.")); 1191 } 1192 } 1193 1194 if (instance->unroll.aenPend == 1) { 1195 if (abort_aen_cmd(instance, instance->aen_cmd)) 1196 dev_err(dip, CE_WARN, "mrsas_detach: " 1197 "failed to abort prevous AEN command"); 1198 1199 instance->unroll.aenPend = 0; 1200 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done.")); 1201 /* This means the controller is fully initialized and running */ 1202 /* Shutdown should be a last command to controller. */ 1203 /* shutdown_controller(); */ 1204 } 1205 1206 1207 if (instance->unroll.timer == 1) { 1208 if (instance->timeout_id != (timeout_id_t)-1) { 1209 (void) untimeout(instance->timeout_id); 1210 instance->timeout_id = (timeout_id_t)-1; 1211 1212 instance->unroll.timer = 0; 1213 } 1214 } 1215 1216 instance->func_ptr->disable_intr(instance); 1217 1218 1219 if (instance->unroll.mutexs == 1) { 1220 mutex_destroy(&instance->cmd_pool_mtx); 1221 mutex_destroy(&instance->app_cmd_pool_mtx); 1222 mutex_destroy(&instance->cmd_pend_mtx); 1223 mutex_destroy(&instance->completed_pool_mtx); 1224 mutex_destroy(&instance->sync_map_mtx); 1225 mutex_destroy(&instance->int_cmd_mtx); 1226 cv_destroy(&instance->int_cmd_cv); 1227 mutex_destroy(&instance->config_dev_mtx); 1228 mutex_destroy(&instance->ocr_flags_mtx); 1229 mutex_destroy(&instance->reg_write_mtx); 1230 1231 if (instance->tbolt) { 1232 mutex_destroy(&instance->cmd_app_pool_mtx); 1233 mutex_destroy(&instance->chip_mtx); 1234 } 1235 1236 instance->unroll.mutexs = 0; 1237 con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv, done.")); 1238 } 1239 1240 1241 if (instance->unroll.soft_isr == 1) { 1242 ddi_remove_softintr(instance->soft_intr_id); 1243 instance->unroll.soft_isr = 0; 1244 } 1245 1246 if (instance->unroll.intr == 1) { 1247 mrsas_rem_intrs(instance); 1248 instance->unroll.intr = 0; 1249 } 1250 1251 1252 if (instance->unroll.taskq == 1) { 1253 if (instance->taskq) { 1254 ddi_taskq_destroy(instance->taskq); 1255 instance->unroll.taskq = 0; 1256 } 1257 1258 } 1259 1260 /* 1261 * free dma memory allocated for 1262 * cmds/frames/queues/driver version etc 1263 */ 1264 if (instance->unroll.verBuff == 1) { 1265 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj); 1266 instance->unroll.verBuff = 0; 1267 } 1268 1269 if (instance->unroll.pdlist_buff == 1) { 1270 if (instance->mr_tbolt_pd_list != NULL) { 1271 kmem_free(instance->mr_tbolt_pd_list, 1272 MRSAS_TBOLT_GET_PD_MAX(instance) * 1273 sizeof (struct mrsas_tbolt_pd)); 1274 } 1275 1276 instance->mr_tbolt_pd_list = NULL; 1277 instance->unroll.pdlist_buff = 0; 1278 } 1279 1280 if (instance->unroll.ldlist_buff == 1) { 1281 if (instance->mr_ld_list != NULL) { 1282 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD 1283 * sizeof (struct mrsas_ld)); 1284 } 1285 1286 instance->mr_ld_list = NULL; 1287 instance->unroll.ldlist_buff = 0; 1288 } 1289 1290 if (instance->tbolt) { 1291 if (instance->unroll.alloc_space_mpi2 == 1) { 1292 free_space_for_mpi2(instance); 1293 instance->unroll.alloc_space_mpi2 = 0; 1294 } 1295 } else { 1296 if (instance->unroll.alloc_space_mfi == 1) { 1297 free_space_for_mfi(instance); 1298 instance->unroll.alloc_space_mfi = 0; 1299 } 1300 } 1301 1302 if (instance->unroll.regs == 1) { 1303 ddi_regs_map_free(&instance->regmap_handle); 1304 instance->unroll.regs = 0; 1305 con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free() done.")); 1306 } 1307 } 1308 1309 1310 1311 /* 1312 * ************************************************************************** * 1313 * * 1314 * common entry points - for character driver types * 1315 * * 1316 * ************************************************************************** * 1317 */ 1318 /* 1319 * open - gets access to a device 1320 * @dev: 1321 * @openflags: 1322 * @otyp: 1323 * @credp: 1324 * 1325 * Access to a device by one or more application programs is controlled 1326 * through the open() and close() entry points. The primary function of 1327 * open() is to verify that the open request is allowed. 1328 */ 1329 static int 1330 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp) 1331 { 1332 int rval = 0; 1333 1334 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1335 1336 /* Check root permissions */ 1337 if (drv_priv(credp) != 0) { 1338 con_log(CL_ANN, (CE_WARN, 1339 "mr_sas: Non-root ioctl access denied!")); 1340 return (EPERM); 1341 } 1342 1343 /* Verify we are being opened as a character device */ 1344 if (otyp != OTYP_CHR) { 1345 con_log(CL_ANN, (CE_WARN, 1346 "mr_sas: ioctl node must be a char node")); 1347 return (EINVAL); 1348 } 1349 1350 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev))) 1351 == NULL) { 1352 return (ENXIO); 1353 } 1354 1355 if (scsi_hba_open) { 1356 rval = scsi_hba_open(dev, openflags, otyp, credp); 1357 } 1358 1359 return (rval); 1360 } 1361 1362 /* 1363 * close - gives up access to a device 1364 * @dev: 1365 * @openflags: 1366 * @otyp: 1367 * @credp: 1368 * 1369 * close() should perform any cleanup necessary to finish using the minor 1370 * device, and prepare the device (and driver) to be opened again. 1371 */ 1372 static int 1373 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp) 1374 { 1375 int rval = 0; 1376 1377 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1378 1379 /* no need for locks! */ 1380 1381 if (scsi_hba_close) { 1382 rval = scsi_hba_close(dev, openflags, otyp, credp); 1383 } 1384 1385 return (rval); 1386 } 1387 1388 /* 1389 * ioctl - performs a range of I/O commands for character drivers 1390 * @dev: 1391 * @cmd: 1392 * @arg: 1393 * @mode: 1394 * @credp: 1395 * @rvalp: 1396 * 1397 * ioctl() routine must make sure that user data is copied into or out of the 1398 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(), 1399 * and ddi_copyout(), as appropriate. 1400 * This is a wrapper routine to serialize access to the actual ioctl routine. 1401 * ioctl() should return 0 on success, or the appropriate error number. The 1402 * driver may also set the value returned to the calling process through rvalp. 1403 */ 1404 1405 static int 1406 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 1407 int *rvalp) 1408 { 1409 int rval = 0; 1410 1411 struct mrsas_instance *instance; 1412 struct mrsas_ioctl *ioctl; 1413 struct mrsas_aen aen; 1414 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1415 1416 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev))); 1417 1418 if (instance == NULL) { 1419 /* invalid minor number */ 1420 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found.")); 1421 return (ENXIO); 1422 } 1423 1424 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl), 1425 KM_SLEEP); 1426 ASSERT(ioctl); 1427 1428 switch ((uint_t)cmd) { 1429 case MRSAS_IOCTL_FIRMWARE: 1430 if (ddi_copyin((void *)arg, ioctl, 1431 sizeof (struct mrsas_ioctl), mode)) { 1432 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: " 1433 "ERROR IOCTL copyin")); 1434 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 1435 return (EFAULT); 1436 } 1437 1438 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) { 1439 rval = handle_drv_ioctl(instance, ioctl, mode); 1440 } else { 1441 rval = handle_mfi_ioctl(instance, ioctl, mode); 1442 } 1443 1444 if (ddi_copyout((void *)ioctl, (void *)arg, 1445 (sizeof (struct mrsas_ioctl) - 1), mode)) { 1446 con_log(CL_ANN, (CE_WARN, 1447 "mrsas_ioctl: copy_to_user failed")); 1448 rval = 1; 1449 } 1450 1451 break; 1452 case MRSAS_IOCTL_AEN: 1453 if (ddi_copyin((void *) arg, &aen, 1454 sizeof (struct mrsas_aen), mode)) { 1455 con_log(CL_ANN, (CE_WARN, 1456 "mrsas_ioctl: ERROR AEN copyin")); 1457 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 1458 return (EFAULT); 1459 } 1460 1461 rval = handle_mfi_aen(instance, &aen); 1462 1463 if (ddi_copyout((void *) &aen, (void *)arg, 1464 sizeof (struct mrsas_aen), mode)) { 1465 con_log(CL_ANN, (CE_WARN, 1466 "mrsas_ioctl: copy_to_user failed")); 1467 rval = 1; 1468 } 1469 1470 break; 1471 default: 1472 rval = scsi_hba_ioctl(dev, cmd, arg, 1473 mode, credp, rvalp); 1474 1475 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: " 1476 "scsi_hba_ioctl called, ret = %x.", rval)); 1477 } 1478 1479 kmem_free(ioctl, sizeof (struct mrsas_ioctl)); 1480 return (rval); 1481 } 1482 1483 /* 1484 * ************************************************************************** * 1485 * * 1486 * common entry points - for block driver types * 1487 * * 1488 * ************************************************************************** * 1489 */ 1490 #ifdef __sparc 1491 /* 1492 * reset - TBD 1493 * @dip: 1494 * @cmd: 1495 * 1496 * TBD 1497 */ 1498 /*ARGSUSED*/ 1499 static int 1500 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 1501 { 1502 int instance_no; 1503 1504 struct mrsas_instance *instance; 1505 1506 instance_no = ddi_get_instance(dip); 1507 instance = (struct mrsas_instance *)ddi_get_soft_state 1508 (mrsas_state, instance_no); 1509 1510 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1511 1512 if (!instance) { 1513 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter " 1514 "in reset", instance_no)); 1515 return (DDI_FAILURE); 1516 } 1517 1518 instance->func_ptr->disable_intr(instance); 1519 1520 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d", 1521 instance_no)); 1522 1523 flush_cache(instance); 1524 1525 return (DDI_SUCCESS); 1526 } 1527 #else /* __sparc */ 1528 /*ARGSUSED*/ 1529 static int 1530 mrsas_quiesce(dev_info_t *dip) 1531 { 1532 int instance_no; 1533 1534 struct mrsas_instance *instance; 1535 1536 instance_no = ddi_get_instance(dip); 1537 instance = (struct mrsas_instance *)ddi_get_soft_state 1538 (mrsas_state, instance_no); 1539 1540 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1541 1542 if (!instance) { 1543 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter " 1544 "in quiesce", instance_no)); 1545 return (DDI_FAILURE); 1546 } 1547 if (instance->deadadapter || instance->adapterresetinprogress) { 1548 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in " 1549 "healthy state", instance_no)); 1550 return (DDI_FAILURE); 1551 } 1552 1553 if (abort_aen_cmd(instance, instance->aen_cmd)) { 1554 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: " 1555 "failed to abort prevous AEN command QUIESCE")); 1556 } 1557 1558 if (instance->tbolt) { 1559 if (abort_syncmap_cmd(instance, 1560 instance->map_update_cmd)) { 1561 dev_err(dip, CE_WARN, 1562 "mrsas_detach: failed to abort " 1563 "previous syncmap command"); 1564 return (DDI_FAILURE); 1565 } 1566 } 1567 1568 instance->func_ptr->disable_intr(instance); 1569 1570 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d", 1571 instance_no)); 1572 1573 flush_cache(instance); 1574 1575 if (wait_for_outstanding(instance)) { 1576 con_log(CL_ANN1, 1577 (CE_CONT, "wait_for_outstanding: return FAIL.\n")); 1578 return (DDI_FAILURE); 1579 } 1580 return (DDI_SUCCESS); 1581 } 1582 #endif /* __sparc */ 1583 1584 /* 1585 * ************************************************************************** * 1586 * * 1587 * entry points (SCSI HBA) * 1588 * * 1589 * ************************************************************************** * 1590 */ 1591 /* 1592 * tran_tgt_init - initialize a target device instance 1593 * @hba_dip: 1594 * @tgt_dip: 1595 * @tran: 1596 * @sd: 1597 * 1598 * The tran_tgt_init() entry point enables the HBA to allocate and initialize 1599 * any per-target resources. tran_tgt_init() also enables the HBA to qualify 1600 * the device's address as valid and supportable for that particular HBA. 1601 * By returning DDI_FAILURE, the instance of the target driver for that device 1602 * is not probed or attached. 1603 */ 1604 /*ARGSUSED*/ 1605 static int 1606 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1607 scsi_hba_tran_t *tran, struct scsi_device *sd) 1608 { 1609 struct mrsas_instance *instance; 1610 uint16_t tgt = sd->sd_address.a_target; 1611 uint8_t lun = sd->sd_address.a_lun; 1612 dev_info_t *child = NULL; 1613 1614 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d", 1615 tgt, lun)); 1616 1617 instance = ADDR2MR(&sd->sd_address); 1618 1619 if (ndi_dev_is_persistent_node(tgt_dip) == 0) { 1620 /* 1621 * If no persistent node exists, we don't allow .conf node 1622 * to be created. 1623 */ 1624 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) { 1625 con_log(CL_DLEVEL2, 1626 (CE_NOTE, "mrsas_tgt_init find child =" 1627 " %p t = %d l = %d", (void *)child, tgt, lun)); 1628 if (ndi_merge_node(tgt_dip, mrsas_name_node) != 1629 DDI_SUCCESS) 1630 /* Create this .conf node */ 1631 return (DDI_SUCCESS); 1632 } 1633 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per " 1634 "DDI_FAILURE t = %d l = %d", tgt, lun)); 1635 return (DDI_FAILURE); 1636 1637 } 1638 1639 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p", 1640 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip)); 1641 1642 if (tgt < MRDRV_MAX_LD && lun == 0) { 1643 if (instance->mr_ld_list[tgt].dip == NULL && 1644 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) { 1645 mutex_enter(&instance->config_dev_mtx); 1646 instance->mr_ld_list[tgt].dip = tgt_dip; 1647 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN; 1648 instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID; 1649 mutex_exit(&instance->config_dev_mtx); 1650 } 1651 } 1652 1653 #ifdef PDSUPPORT 1654 else if (instance->tbolt || instance->skinny) { 1655 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) { 1656 mutex_enter(&instance->config_dev_mtx); 1657 instance->mr_tbolt_pd_list[tgt].dip = tgt_dip; 1658 instance->mr_tbolt_pd_list[tgt].flag = 1659 MRDRV_TGT_VALID; 1660 mutex_exit(&instance->config_dev_mtx); 1661 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:" 1662 "t%xl%x", tgt, lun)); 1663 } 1664 } 1665 #endif 1666 1667 return (DDI_SUCCESS); 1668 } 1669 1670 /*ARGSUSED*/ 1671 static void 1672 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 1673 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 1674 { 1675 struct mrsas_instance *instance; 1676 int tgt = sd->sd_address.a_target; 1677 int lun = sd->sd_address.a_lun; 1678 1679 instance = ADDR2MR(&sd->sd_address); 1680 1681 con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun)); 1682 1683 if (tgt < MRDRV_MAX_LD && lun == 0) { 1684 if (instance->mr_ld_list[tgt].dip == tgt_dip) { 1685 mutex_enter(&instance->config_dev_mtx); 1686 instance->mr_ld_list[tgt].dip = NULL; 1687 mutex_exit(&instance->config_dev_mtx); 1688 } 1689 } 1690 1691 #ifdef PDSUPPORT 1692 else if (instance->tbolt || instance->skinny) { 1693 mutex_enter(&instance->config_dev_mtx); 1694 instance->mr_tbolt_pd_list[tgt].dip = NULL; 1695 mutex_exit(&instance->config_dev_mtx); 1696 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL" 1697 "for tgt:%x", tgt)); 1698 } 1699 #endif 1700 1701 } 1702 1703 dev_info_t * 1704 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun) 1705 { 1706 dev_info_t *child = NULL; 1707 char addr[SCSI_MAXNAMELEN]; 1708 char tmp[MAXNAMELEN]; 1709 1710 (void) sprintf(addr, "%x,%x", tgt, lun); 1711 for (child = ddi_get_child(instance->dip); child; 1712 child = ddi_get_next_sibling(child)) { 1713 1714 if (ndi_dev_is_persistent_node(child) == 0) { 1715 continue; 1716 } 1717 1718 if (mrsas_name_node(child, tmp, MAXNAMELEN) != 1719 DDI_SUCCESS) { 1720 continue; 1721 } 1722 1723 if (strcmp(addr, tmp) == 0) { 1724 break; 1725 } 1726 } 1727 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p", 1728 (void *)child)); 1729 return (child); 1730 } 1731 1732 /* 1733 * mrsas_name_node - 1734 * @dip: 1735 * @name: 1736 * @len: 1737 */ 1738 static int 1739 mrsas_name_node(dev_info_t *dip, char *name, int len) 1740 { 1741 int tgt, lun; 1742 1743 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1744 DDI_PROP_DONTPASS, "target", -1); 1745 con_log(CL_DLEVEL2, (CE_NOTE, 1746 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt)); 1747 if (tgt == -1) { 1748 return (DDI_FAILURE); 1749 } 1750 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1751 "lun", -1); 1752 con_log(CL_DLEVEL2, 1753 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun)); 1754 if (lun == -1) { 1755 return (DDI_FAILURE); 1756 } 1757 (void) snprintf(name, len, "%x,%x", tgt, lun); 1758 return (DDI_SUCCESS); 1759 } 1760 1761 /* 1762 * tran_init_pkt - allocate & initialize a scsi_pkt structure 1763 * @ap: 1764 * @pkt: 1765 * @bp: 1766 * @cmdlen: 1767 * @statuslen: 1768 * @tgtlen: 1769 * @flags: 1770 * @callback: 1771 * 1772 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt 1773 * structure and DMA resources for a target driver request. The 1774 * tran_init_pkt() entry point is called when the target driver calls the 1775 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point 1776 * is a request to perform one or more of three possible services: 1777 * - allocation and initialization of a scsi_pkt structure 1778 * - allocation of DMA resources for data transfer 1779 * - reallocation of DMA resources for the next portion of the data transfer 1780 */ 1781 static struct scsi_pkt * 1782 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt, 1783 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 1784 int flags, int (*callback)(), caddr_t arg) 1785 { 1786 struct scsa_cmd *acmd; 1787 struct mrsas_instance *instance; 1788 struct scsi_pkt *new_pkt; 1789 1790 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1791 1792 instance = ADDR2MR(ap); 1793 1794 /* step #1 : pkt allocation */ 1795 if (pkt == NULL) { 1796 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen, 1797 tgtlen, sizeof (struct scsa_cmd), callback, arg); 1798 if (pkt == NULL) { 1799 return (NULL); 1800 } 1801 1802 acmd = PKT2CMD(pkt); 1803 1804 /* 1805 * Initialize the new pkt - we redundantly initialize 1806 * all the fields for illustrative purposes. 1807 */ 1808 acmd->cmd_pkt = pkt; 1809 acmd->cmd_flags = 0; 1810 acmd->cmd_scblen = statuslen; 1811 acmd->cmd_cdblen = cmdlen; 1812 acmd->cmd_dmahandle = NULL; 1813 acmd->cmd_ncookies = 0; 1814 acmd->cmd_cookie = 0; 1815 acmd->cmd_cookiecnt = 0; 1816 acmd->cmd_nwin = 0; 1817 1818 pkt->pkt_address = *ap; 1819 pkt->pkt_comp = (void (*)())NULL; 1820 pkt->pkt_flags = 0; 1821 pkt->pkt_time = 0; 1822 pkt->pkt_resid = 0; 1823 pkt->pkt_state = 0; 1824 pkt->pkt_statistics = 0; 1825 pkt->pkt_reason = 0; 1826 new_pkt = pkt; 1827 } else { 1828 acmd = PKT2CMD(pkt); 1829 new_pkt = NULL; 1830 } 1831 1832 /* step #2 : dma allocation/move */ 1833 if (bp && bp->b_bcount != 0) { 1834 if (acmd->cmd_dmahandle == NULL) { 1835 if (mrsas_dma_alloc(instance, pkt, bp, flags, 1836 callback) == DDI_FAILURE) { 1837 if (new_pkt) { 1838 scsi_hba_pkt_free(ap, new_pkt); 1839 } 1840 return ((struct scsi_pkt *)NULL); 1841 } 1842 } else { 1843 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) { 1844 return ((struct scsi_pkt *)NULL); 1845 } 1846 } 1847 } 1848 1849 return (pkt); 1850 } 1851 1852 /* 1853 * tran_start - transport a SCSI command to the addressed target 1854 * @ap: 1855 * @pkt: 1856 * 1857 * The tran_start() entry point for a SCSI HBA driver is called to transport a 1858 * SCSI command to the addressed target. The SCSI command is described 1859 * entirely within the scsi_pkt structure, which the target driver allocated 1860 * through the HBA driver's tran_init_pkt() entry point. If the command 1861 * involves a data transfer, DMA resources must also have been allocated for 1862 * the scsi_pkt structure. 1863 * 1864 * Return Values : 1865 * TRAN_BUSY - request queue is full, no more free scbs 1866 * TRAN_ACCEPT - pkt has been submitted to the instance 1867 */ 1868 static int 1869 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt) 1870 { 1871 uchar_t cmd_done = 0; 1872 1873 struct mrsas_instance *instance = ADDR2MR(ap); 1874 struct mrsas_cmd *cmd; 1875 1876 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 1877 if (instance->deadadapter == 1) { 1878 con_log(CL_ANN1, (CE_WARN, 1879 "mrsas_tran_start: return TRAN_FATAL_ERROR " 1880 "for IO, as the HBA doesnt take any more IOs")); 1881 if (pkt) { 1882 pkt->pkt_reason = CMD_DEV_GONE; 1883 pkt->pkt_statistics = STAT_DISCON; 1884 } 1885 return (TRAN_FATAL_ERROR); 1886 } 1887 1888 if (instance->adapterresetinprogress) { 1889 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, " 1890 "returning mfi_pkt and setting TRAN_BUSY\n")); 1891 return (TRAN_BUSY); 1892 } 1893 1894 con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x", 1895 __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time)); 1896 1897 pkt->pkt_reason = CMD_CMPLT; 1898 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */ 1899 1900 cmd = build_cmd(instance, ap, pkt, &cmd_done); 1901 1902 /* 1903 * Check if the command is already completed by the mrsas_build_cmd() 1904 * routine. In which case the busy_flag would be clear and scb will be 1905 * NULL and appropriate reason provided in pkt_reason field 1906 */ 1907 if (cmd_done) { 1908 pkt->pkt_reason = CMD_CMPLT; 1909 pkt->pkt_scbp[0] = STATUS_GOOD; 1910 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET 1911 | STATE_SENT_CMD; 1912 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) { 1913 (*pkt->pkt_comp)(pkt); 1914 } 1915 1916 return (TRAN_ACCEPT); 1917 } 1918 1919 if (cmd == NULL) { 1920 return (TRAN_BUSY); 1921 } 1922 1923 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) { 1924 if (instance->fw_outstanding > instance->max_fw_cmds) { 1925 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy")); 1926 DTRACE_PROBE2(start_tran_err, 1927 uint16_t, instance->fw_outstanding, 1928 uint16_t, instance->max_fw_cmds); 1929 mrsas_return_mfi_pkt(instance, cmd); 1930 return (TRAN_BUSY); 1931 } 1932 1933 /* Synchronize the Cmd frame for the controller */ 1934 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, 1935 DDI_DMA_SYNC_FORDEV); 1936 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x" 1937 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index)); 1938 instance->func_ptr->issue_cmd(cmd, instance); 1939 1940 } else { 1941 struct mrsas_header *hdr = &cmd->frame->hdr; 1942 1943 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd); 1944 1945 pkt->pkt_reason = CMD_CMPLT; 1946 pkt->pkt_statistics = 0; 1947 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS; 1948 1949 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, 1950 &hdr->cmd_status)) { 1951 case MFI_STAT_OK: 1952 pkt->pkt_scbp[0] = STATUS_GOOD; 1953 break; 1954 1955 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1956 con_log(CL_ANN, (CE_CONT, 1957 "mrsas_tran_start: scsi done with error")); 1958 pkt->pkt_reason = CMD_CMPLT; 1959 pkt->pkt_statistics = 0; 1960 1961 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1; 1962 break; 1963 1964 case MFI_STAT_DEVICE_NOT_FOUND: 1965 con_log(CL_ANN, (CE_CONT, 1966 "mrsas_tran_start: device not found error")); 1967 pkt->pkt_reason = CMD_DEV_GONE; 1968 pkt->pkt_statistics = STAT_DISCON; 1969 break; 1970 1971 default: 1972 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1; 1973 } 1974 1975 (void) mrsas_common_check(instance, cmd); 1976 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd, 1977 uint8_t, hdr->cmd_status); 1978 mrsas_return_mfi_pkt(instance, cmd); 1979 1980 if (pkt->pkt_comp) { 1981 (*pkt->pkt_comp)(pkt); 1982 } 1983 1984 } 1985 1986 return (TRAN_ACCEPT); 1987 } 1988 1989 /* 1990 * tran_abort - Abort any commands that are currently in transport 1991 * @ap: 1992 * @pkt: 1993 * 1994 * The tran_abort() entry point for a SCSI HBA driver is called to abort any 1995 * commands that are currently in transport for a particular target. This entry 1996 * point is called when a target driver calls scsi_abort(). The tran_abort() 1997 * entry point should attempt to abort the command denoted by the pkt 1998 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to 1999 * abort all outstanding commands in the transport layer for the particular 2000 * target or logical unit. 2001 */ 2002 /*ARGSUSED*/ 2003 static int 2004 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 2005 { 2006 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 2007 2008 /* abort command not supported by H/W */ 2009 2010 return (DDI_FAILURE); 2011 } 2012 2013 /* 2014 * tran_reset - reset either the SCSI bus or target 2015 * @ap: 2016 * @level: 2017 * 2018 * The tran_reset() entry point for a SCSI HBA driver is called to reset either 2019 * the SCSI bus or a particular SCSI target device. This entry point is called 2020 * when a target driver calls scsi_reset(). The tran_reset() entry point must 2021 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the 2022 * particular target or logical unit must be reset. 2023 */ 2024 /*ARGSUSED*/ 2025 static int 2026 mrsas_tran_reset(struct scsi_address *ap, int level) 2027 { 2028 struct mrsas_instance *instance = ADDR2MR(ap); 2029 2030 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 2031 2032 if (wait_for_outstanding(instance)) { 2033 con_log(CL_ANN1, 2034 (CE_CONT, "wait_for_outstanding: return FAIL.\n")); 2035 return (DDI_FAILURE); 2036 } else { 2037 return (DDI_SUCCESS); 2038 } 2039 } 2040 2041 /* 2042 * tran_getcap - get one of a set of SCSA-defined capabilities 2043 * @ap: 2044 * @cap: 2045 * @whom: 2046 * 2047 * The target driver can request the current setting of the capability for a 2048 * particular target by setting the whom parameter to nonzero. A whom value of 2049 * zero indicates a request for the current setting of the general capability 2050 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1 2051 * for undefined capabilities or the current value of the requested capability. 2052 */ 2053 /*ARGSUSED*/ 2054 static int 2055 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom) 2056 { 2057 int rval = 0; 2058 2059 struct mrsas_instance *instance = ADDR2MR(ap); 2060 2061 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 2062 2063 /* we do allow inquiring about capabilities for other targets */ 2064 if (cap == NULL) { 2065 return (-1); 2066 } 2067 2068 switch (scsi_hba_lookup_capstr(cap)) { 2069 case SCSI_CAP_DMA_MAX: 2070 if (instance->tbolt) { 2071 /* Limit to 256k max transfer */ 2072 rval = mrsas_tbolt_max_cap_maxxfer; 2073 } else { 2074 /* Limit to 16MB max transfer */ 2075 rval = mrsas_max_cap_maxxfer; 2076 } 2077 break; 2078 case SCSI_CAP_MSG_OUT: 2079 rval = 1; 2080 break; 2081 case SCSI_CAP_DISCONNECT: 2082 rval = 0; 2083 break; 2084 case SCSI_CAP_SYNCHRONOUS: 2085 rval = 0; 2086 break; 2087 case SCSI_CAP_WIDE_XFER: 2088 rval = 1; 2089 break; 2090 case SCSI_CAP_TAGGED_QING: 2091 rval = 1; 2092 break; 2093 case SCSI_CAP_UNTAGGED_QING: 2094 rval = 1; 2095 break; 2096 case SCSI_CAP_PARITY: 2097 rval = 1; 2098 break; 2099 case SCSI_CAP_INITIATOR_ID: 2100 rval = instance->init_id; 2101 break; 2102 case SCSI_CAP_ARQ: 2103 rval = 1; 2104 break; 2105 case SCSI_CAP_LINKED_CMDS: 2106 rval = 0; 2107 break; 2108 case SCSI_CAP_RESET_NOTIFICATION: 2109 rval = 1; 2110 break; 2111 case SCSI_CAP_GEOMETRY: 2112 rval = -1; 2113 2114 break; 2115 default: 2116 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x", 2117 scsi_hba_lookup_capstr(cap))); 2118 rval = -1; 2119 break; 2120 } 2121 2122 return (rval); 2123 } 2124 2125 /* 2126 * tran_setcap - set one of a set of SCSA-defined capabilities 2127 * @ap: 2128 * @cap: 2129 * @value: 2130 * @whom: 2131 * 2132 * The target driver might request that the new value be set for a particular 2133 * target by setting the whom parameter to nonzero. A whom value of zero 2134 * means that request is to set the new value for the SCSI bus or for adapter 2135 * hardware in general. 2136 * The tran_setcap() should return the following values as appropriate: 2137 * - -1 for undefined capabilities 2138 * - 0 if the HBA driver cannot set the capability to the requested value 2139 * - 1 if the HBA driver is able to set the capability to the requested value 2140 */ 2141 /*ARGSUSED*/ 2142 static int 2143 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom) 2144 { 2145 int rval = 1; 2146 2147 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 2148 2149 /* We don't allow setting capabilities for other targets */ 2150 if (cap == NULL || whom == 0) { 2151 return (-1); 2152 } 2153 2154 switch (scsi_hba_lookup_capstr(cap)) { 2155 case SCSI_CAP_DMA_MAX: 2156 case SCSI_CAP_MSG_OUT: 2157 case SCSI_CAP_PARITY: 2158 case SCSI_CAP_LINKED_CMDS: 2159 case SCSI_CAP_RESET_NOTIFICATION: 2160 case SCSI_CAP_DISCONNECT: 2161 case SCSI_CAP_SYNCHRONOUS: 2162 case SCSI_CAP_UNTAGGED_QING: 2163 case SCSI_CAP_WIDE_XFER: 2164 case SCSI_CAP_INITIATOR_ID: 2165 case SCSI_CAP_ARQ: 2166 /* 2167 * None of these are settable via 2168 * the capability interface. 2169 */ 2170 break; 2171 case SCSI_CAP_TAGGED_QING: 2172 rval = 1; 2173 break; 2174 case SCSI_CAP_SECTOR_SIZE: 2175 rval = 1; 2176 break; 2177 2178 case SCSI_CAP_TOTAL_SECTORS: 2179 rval = 1; 2180 break; 2181 default: 2182 rval = -1; 2183 break; 2184 } 2185 2186 return (rval); 2187 } 2188 2189 /* 2190 * tran_destroy_pkt - deallocate scsi_pkt structure 2191 * @ap: 2192 * @pkt: 2193 * 2194 * The tran_destroy_pkt() entry point is the HBA driver function that 2195 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is 2196 * called when the target driver calls scsi_destroy_pkt(). The 2197 * tran_destroy_pkt() entry point must free any DMA resources that have been 2198 * allocated for the packet. An implicit DMA synchronization occurs if the 2199 * DMA resources are freed and any cached data remains after the completion 2200 * of the transfer. 2201 */ 2202 static void 2203 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 2204 { 2205 struct scsa_cmd *acmd = PKT2CMD(pkt); 2206 2207 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 2208 2209 if (acmd->cmd_flags & CFLAG_DMAVALID) { 2210 acmd->cmd_flags &= ~CFLAG_DMAVALID; 2211 2212 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 2213 2214 ddi_dma_free_handle(&acmd->cmd_dmahandle); 2215 2216 acmd->cmd_dmahandle = NULL; 2217 } 2218 2219 /* free the pkt */ 2220 scsi_hba_pkt_free(ap, pkt); 2221 } 2222 2223 /* 2224 * tran_dmafree - deallocates DMA resources 2225 * @ap: 2226 * @pkt: 2227 * 2228 * The tran_dmafree() entry point deallocates DMAQ resources that have been 2229 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is 2230 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must 2231 * free only DMA resources allocated for a scsi_pkt structure, not the 2232 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is 2233 * implicitly performed. 2234 */ 2235 /*ARGSUSED*/ 2236 static void 2237 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 2238 { 2239 register struct scsa_cmd *acmd = PKT2CMD(pkt); 2240 2241 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 2242 2243 if (acmd->cmd_flags & CFLAG_DMAVALID) { 2244 acmd->cmd_flags &= ~CFLAG_DMAVALID; 2245 2246 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle); 2247 2248 ddi_dma_free_handle(&acmd->cmd_dmahandle); 2249 2250 acmd->cmd_dmahandle = NULL; 2251 } 2252 } 2253 2254 /* 2255 * tran_sync_pkt - synchronize the DMA object allocated 2256 * @ap: 2257 * @pkt: 2258 * 2259 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for 2260 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt() 2261 * entry point is called when the target driver calls scsi_sync_pkt(). If the 2262 * data transfer direction is a DMA read from device to memory, tran_sync_pkt() 2263 * must synchronize the CPU's view of the data. If the data transfer direction 2264 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the 2265 * device's view of the data. 2266 */ 2267 /*ARGSUSED*/ 2268 static void 2269 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 2270 { 2271 register struct scsa_cmd *acmd = PKT2CMD(pkt); 2272 2273 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 2274 2275 if (acmd->cmd_flags & CFLAG_DMAVALID) { 2276 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset, 2277 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ? 2278 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU); 2279 } 2280 } 2281 2282 /*ARGSUSED*/ 2283 static int 2284 mrsas_tran_quiesce(dev_info_t *dip) 2285 { 2286 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 2287 2288 return (1); 2289 } 2290 2291 /*ARGSUSED*/ 2292 static int 2293 mrsas_tran_unquiesce(dev_info_t *dip) 2294 { 2295 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 2296 2297 return (1); 2298 } 2299 2300 2301 /* 2302 * mrsas_isr(caddr_t) 2303 * 2304 * The Interrupt Service Routine 2305 * 2306 * Collect status for all completed commands and do callback 2307 * 2308 */ 2309 static uint_t 2310 mrsas_isr(struct mrsas_instance *instance) 2311 { 2312 int need_softintr; 2313 uint32_t producer; 2314 uint32_t consumer; 2315 uint32_t context; 2316 int retval; 2317 2318 struct mrsas_cmd *cmd; 2319 struct mrsas_header *hdr; 2320 struct scsi_pkt *pkt; 2321 2322 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 2323 ASSERT(instance); 2324 if (instance->tbolt) { 2325 mutex_enter(&instance->chip_mtx); 2326 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) && 2327 !(instance->func_ptr->intr_ack(instance))) { 2328 mutex_exit(&instance->chip_mtx); 2329 return (DDI_INTR_UNCLAIMED); 2330 } 2331 retval = mr_sas_tbolt_process_outstanding_cmd(instance); 2332 mutex_exit(&instance->chip_mtx); 2333 return (retval); 2334 } else { 2335 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) && 2336 !instance->func_ptr->intr_ack(instance)) { 2337 return (DDI_INTR_UNCLAIMED); 2338 } 2339 } 2340 2341 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 2342 0, 0, DDI_DMA_SYNC_FORCPU); 2343 2344 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 2345 != DDI_SUCCESS) { 2346 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 2347 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 2348 con_log(CL_ANN1, (CE_WARN, 2349 "mr_sas_isr(): FMA check, returning DDI_INTR_UNCLAIMED")); 2350 return (DDI_INTR_CLAIMED); 2351 } 2352 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 2353 2354 #ifdef OCRDEBUG 2355 if (debug_consecutive_timeout_after_ocr_g == 1) { 2356 con_log(CL_ANN1, (CE_NOTE, 2357 "simulating consecutive timeout after ocr")); 2358 return (DDI_INTR_CLAIMED); 2359 } 2360 #endif 2361 2362 mutex_enter(&instance->completed_pool_mtx); 2363 mutex_enter(&instance->cmd_pend_mtx); 2364 2365 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 2366 instance->producer); 2367 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 2368 instance->consumer); 2369 2370 con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ", 2371 producer, consumer)); 2372 if (producer == consumer) { 2373 con_log(CL_ANN, (CE_WARN, "producer == consumer case")); 2374 DTRACE_PROBE2(isr_pc_err, uint32_t, producer, 2375 uint32_t, consumer); 2376 mutex_exit(&instance->cmd_pend_mtx); 2377 mutex_exit(&instance->completed_pool_mtx); 2378 return (DDI_INTR_CLAIMED); 2379 } 2380 2381 while (consumer != producer) { 2382 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle, 2383 &instance->reply_queue[consumer]); 2384 cmd = instance->cmd_list[context]; 2385 2386 if (cmd->sync_cmd == MRSAS_TRUE) { 2387 hdr = (struct mrsas_header *)&cmd->frame->hdr; 2388 if (hdr) { 2389 mlist_del_init(&cmd->list); 2390 } 2391 } else { 2392 pkt = cmd->pkt; 2393 if (pkt) { 2394 mlist_del_init(&cmd->list); 2395 } 2396 } 2397 2398 mlist_add_tail(&cmd->list, &instance->completed_pool_list); 2399 2400 consumer++; 2401 if (consumer == (instance->max_fw_cmds + 1)) { 2402 consumer = 0; 2403 } 2404 } 2405 ddi_put32(instance->mfi_internal_dma_obj.acc_handle, 2406 instance->consumer, consumer); 2407 mutex_exit(&instance->cmd_pend_mtx); 2408 mutex_exit(&instance->completed_pool_mtx); 2409 2410 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle, 2411 0, 0, DDI_DMA_SYNC_FORDEV); 2412 2413 if (instance->softint_running) { 2414 need_softintr = 0; 2415 } else { 2416 need_softintr = 1; 2417 } 2418 2419 if (instance->isr_level == HIGH_LEVEL_INTR) { 2420 if (need_softintr) { 2421 ddi_trigger_softintr(instance->soft_intr_id); 2422 } 2423 } else { 2424 /* 2425 * Not a high-level interrupt, therefore call the soft level 2426 * interrupt explicitly 2427 */ 2428 (void) mrsas_softintr(instance); 2429 } 2430 2431 return (DDI_INTR_CLAIMED); 2432 } 2433 2434 2435 /* 2436 * ************************************************************************** * 2437 * * 2438 * libraries * 2439 * * 2440 * ************************************************************************** * 2441 */ 2442 /* 2443 * get_mfi_pkt : Get a command from the free pool 2444 * After successful allocation, the caller of this routine 2445 * must clear the frame buffer (memset to zero) before 2446 * using the packet further. 2447 * 2448 * ***** Note ***** 2449 * After clearing the frame buffer the context id of the 2450 * frame buffer SHOULD be restored back. 2451 */ 2452 struct mrsas_cmd * 2453 mrsas_get_mfi_pkt(struct mrsas_instance *instance) 2454 { 2455 mlist_t *head = &instance->cmd_pool_list; 2456 struct mrsas_cmd *cmd = NULL; 2457 2458 mutex_enter(&instance->cmd_pool_mtx); 2459 2460 if (!mlist_empty(head)) { 2461 cmd = mlist_entry(head->next, struct mrsas_cmd, list); 2462 mlist_del_init(head->next); 2463 } 2464 if (cmd != NULL) { 2465 cmd->pkt = NULL; 2466 cmd->retry_count_for_ocr = 0; 2467 cmd->drv_pkt_time = 0; 2468 2469 } 2470 mutex_exit(&instance->cmd_pool_mtx); 2471 2472 return (cmd); 2473 } 2474 2475 static struct mrsas_cmd * 2476 get_mfi_app_pkt(struct mrsas_instance *instance) 2477 { 2478 mlist_t *head = &instance->app_cmd_pool_list; 2479 struct mrsas_cmd *cmd = NULL; 2480 2481 mutex_enter(&instance->app_cmd_pool_mtx); 2482 2483 if (!mlist_empty(head)) { 2484 cmd = mlist_entry(head->next, struct mrsas_cmd, list); 2485 mlist_del_init(head->next); 2486 } 2487 if (cmd != NULL) { 2488 cmd->pkt = NULL; 2489 cmd->retry_count_for_ocr = 0; 2490 cmd->drv_pkt_time = 0; 2491 } 2492 2493 mutex_exit(&instance->app_cmd_pool_mtx); 2494 2495 return (cmd); 2496 } 2497 /* 2498 * return_mfi_pkt : Return a cmd to free command pool 2499 */ 2500 void 2501 mrsas_return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 2502 { 2503 mutex_enter(&instance->cmd_pool_mtx); 2504 /* use mlist_add_tail for debug assistance */ 2505 mlist_add_tail(&cmd->list, &instance->cmd_pool_list); 2506 2507 mutex_exit(&instance->cmd_pool_mtx); 2508 } 2509 2510 static void 2511 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 2512 { 2513 mutex_enter(&instance->app_cmd_pool_mtx); 2514 2515 mlist_add(&cmd->list, &instance->app_cmd_pool_list); 2516 2517 mutex_exit(&instance->app_cmd_pool_mtx); 2518 } 2519 void 2520 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 2521 { 2522 struct scsi_pkt *pkt; 2523 struct mrsas_header *hdr; 2524 con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n")); 2525 mutex_enter(&instance->cmd_pend_mtx); 2526 mlist_del_init(&cmd->list); 2527 mlist_add_tail(&cmd->list, &instance->cmd_pend_list); 2528 if (cmd->sync_cmd == MRSAS_TRUE) { 2529 hdr = (struct mrsas_header *)&cmd->frame->hdr; 2530 if (hdr) { 2531 con_log(CL_ANN1, (CE_CONT, 2532 "push_pending_mfi_pkt: " 2533 "cmd %p index %x " 2534 "time %llx", 2535 (void *)cmd, cmd->index, 2536 gethrtime())); 2537 /* Wait for specified interval */ 2538 cmd->drv_pkt_time = ddi_get16( 2539 cmd->frame_dma_obj.acc_handle, &hdr->timeout); 2540 if (cmd->drv_pkt_time < debug_timeout_g) 2541 cmd->drv_pkt_time = (uint16_t)debug_timeout_g; 2542 con_log(CL_ANN1, (CE_CONT, 2543 "push_pending_pkt(): " 2544 "Called IO Timeout Value %x\n", 2545 cmd->drv_pkt_time)); 2546 } 2547 if (hdr && instance->timeout_id == (timeout_id_t)-1) { 2548 instance->timeout_id = timeout(io_timeout_checker, 2549 (void *) instance, drv_usectohz(MRSAS_1_SECOND)); 2550 } 2551 } else { 2552 pkt = cmd->pkt; 2553 if (pkt) { 2554 con_log(CL_ANN1, (CE_CONT, 2555 "push_pending_mfi_pkt: " 2556 "cmd %p index %x pkt %p, " 2557 "time %llx", 2558 (void *)cmd, cmd->index, (void *)pkt, 2559 gethrtime())); 2560 cmd->drv_pkt_time = (uint16_t)debug_timeout_g; 2561 } 2562 if (pkt && instance->timeout_id == (timeout_id_t)-1) { 2563 instance->timeout_id = timeout(io_timeout_checker, 2564 (void *) instance, drv_usectohz(MRSAS_1_SECOND)); 2565 } 2566 } 2567 2568 mutex_exit(&instance->cmd_pend_mtx); 2569 2570 } 2571 2572 int 2573 mrsas_print_pending_cmds(struct mrsas_instance *instance) 2574 { 2575 mlist_t *head = &instance->cmd_pend_list; 2576 mlist_t *tmp = head; 2577 struct mrsas_cmd *cmd = NULL; 2578 struct mrsas_header *hdr; 2579 unsigned int flag = 1; 2580 struct scsi_pkt *pkt; 2581 int saved_level; 2582 int cmd_count = 0; 2583 2584 saved_level = debug_level_g; 2585 debug_level_g = CL_ANN1; 2586 2587 dev_err(instance->dip, CE_NOTE, 2588 "mrsas_print_pending_cmds(): Called"); 2589 2590 while (flag) { 2591 mutex_enter(&instance->cmd_pend_mtx); 2592 tmp = tmp->next; 2593 if (tmp == head) { 2594 mutex_exit(&instance->cmd_pend_mtx); 2595 flag = 0; 2596 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds():" 2597 " NO MORE CMDS PENDING....\n")); 2598 break; 2599 } else { 2600 cmd = mlist_entry(tmp, struct mrsas_cmd, list); 2601 mutex_exit(&instance->cmd_pend_mtx); 2602 if (cmd) { 2603 if (cmd->sync_cmd == MRSAS_TRUE) { 2604 hdr = (struct mrsas_header *) 2605 &cmd->frame->hdr; 2606 if (hdr) { 2607 con_log(CL_ANN1, (CE_CONT, 2608 "print: cmd %p index 0x%x " 2609 "drv_pkt_time 0x%x (NO-PKT)" 2610 " hdr %p\n", (void *)cmd, 2611 cmd->index, 2612 cmd->drv_pkt_time, 2613 (void *)hdr)); 2614 } 2615 } else { 2616 pkt = cmd->pkt; 2617 if (pkt) { 2618 con_log(CL_ANN1, (CE_CONT, 2619 "print: cmd %p index 0x%x " 2620 "drv_pkt_time 0x%x pkt %p \n", 2621 (void *)cmd, cmd->index, 2622 cmd->drv_pkt_time, (void *)pkt)); 2623 } 2624 } 2625 2626 if (++cmd_count == 1) { 2627 mrsas_print_cmd_details(instance, cmd, 2628 0xDD); 2629 } else { 2630 mrsas_print_cmd_details(instance, cmd, 2631 1); 2632 } 2633 2634 } 2635 } 2636 } 2637 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n")); 2638 2639 2640 debug_level_g = saved_level; 2641 2642 return (DDI_SUCCESS); 2643 } 2644 2645 2646 int 2647 mrsas_complete_pending_cmds(struct mrsas_instance *instance) 2648 { 2649 2650 struct mrsas_cmd *cmd = NULL; 2651 struct scsi_pkt *pkt; 2652 struct mrsas_header *hdr; 2653 2654 struct mlist_head *pos, *next; 2655 2656 con_log(CL_ANN1, (CE_NOTE, 2657 "mrsas_complete_pending_cmds(): Called")); 2658 2659 mutex_enter(&instance->cmd_pend_mtx); 2660 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) { 2661 cmd = mlist_entry(pos, struct mrsas_cmd, list); 2662 if (cmd) { 2663 pkt = cmd->pkt; 2664 if (pkt) { /* for IO */ 2665 if (((pkt->pkt_flags & FLAG_NOINTR) 2666 == 0) && pkt->pkt_comp) { 2667 pkt->pkt_reason 2668 = CMD_DEV_GONE; 2669 pkt->pkt_statistics 2670 = STAT_DISCON; 2671 con_log(CL_ANN1, (CE_CONT, 2672 "fail and posting to scsa " 2673 "cmd %p index %x" 2674 " pkt %p " 2675 "time : %llx", 2676 (void *)cmd, cmd->index, 2677 (void *)pkt, gethrtime())); 2678 (*pkt->pkt_comp)(pkt); 2679 } 2680 } else { /* for DCMDS */ 2681 if (cmd->sync_cmd == MRSAS_TRUE) { 2682 hdr = (struct mrsas_header *)&cmd->frame->hdr; 2683 con_log(CL_ANN1, (CE_CONT, 2684 "posting invalid status to application " 2685 "cmd %p index %x" 2686 " hdr %p " 2687 "time : %llx", 2688 (void *)cmd, cmd->index, 2689 (void *)hdr, gethrtime())); 2690 hdr->cmd_status = MFI_STAT_INVALID_STATUS; 2691 complete_cmd_in_sync_mode(instance, cmd); 2692 } 2693 } 2694 mlist_del_init(&cmd->list); 2695 } else { 2696 con_log(CL_ANN1, (CE_CONT, 2697 "mrsas_complete_pending_cmds:" 2698 "NULL command\n")); 2699 } 2700 con_log(CL_ANN1, (CE_CONT, 2701 "mrsas_complete_pending_cmds:" 2702 "looping for more commands\n")); 2703 } 2704 mutex_exit(&instance->cmd_pend_mtx); 2705 2706 con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n")); 2707 return (DDI_SUCCESS); 2708 } 2709 2710 void 2711 mrsas_print_cmd_details(struct mrsas_instance *instance, struct mrsas_cmd *cmd, 2712 int detail) 2713 { 2714 struct scsi_pkt *pkt = cmd->pkt; 2715 Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request; 2716 int i; 2717 int saved_level; 2718 ddi_acc_handle_t acc_handle = 2719 instance->mpi2_frame_pool_dma_obj.acc_handle; 2720 2721 if (detail == 0xDD) { 2722 saved_level = debug_level_g; 2723 debug_level_g = CL_ANN1; 2724 } 2725 2726 2727 if (instance->tbolt) { 2728 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p " 2729 "cmd->index 0x%x SMID 0x%x timer 0x%x sec\n", 2730 (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time)); 2731 } else { 2732 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p " 2733 "cmd->index 0x%x timer 0x%x sec\n", 2734 (void *)cmd, cmd->index, cmd->drv_pkt_time)); 2735 } 2736 2737 if (pkt) { 2738 con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x", 2739 pkt->pkt_cdbp[0])); 2740 } else { 2741 con_log(CL_ANN1, (CE_CONT, "NO-PKT")); 2742 } 2743 2744 if ((detail == 0xDD) && instance->tbolt) { 2745 con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n")); 2746 con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X " 2747 "IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n", 2748 ddi_get16(acc_handle, &scsi_io->DevHandle), 2749 ddi_get8(acc_handle, &scsi_io->Function), 2750 ddi_get16(acc_handle, &scsi_io->IoFlags), 2751 ddi_get16(acc_handle, &scsi_io->SGLFlags), 2752 ddi_get32(acc_handle, &scsi_io->DataLength))); 2753 2754 for (i = 0; i < 32; i++) { 2755 con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ", i, 2756 ddi_get8(acc_handle, &scsi_io->CDB.CDB32[i]))); 2757 } 2758 2759 con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n")); 2760 con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X " 2761 "ldTargetId=0x%X timeoutValue=0x%X regLockFlags=0x%X " 2762 "RAIDFlags=0x%X regLockRowLBA=0x%" PRIu64 2763 " regLockLength=0x%X spanArm=0x%X\n", 2764 ddi_get8(acc_handle, &scsi_io->RaidContext.status), 2765 ddi_get8(acc_handle, &scsi_io->RaidContext.extStatus), 2766 ddi_get16(acc_handle, &scsi_io->RaidContext.ldTargetId), 2767 ddi_get16(acc_handle, &scsi_io->RaidContext.timeoutValue), 2768 ddi_get8(acc_handle, &scsi_io->RaidContext.regLockFlags), 2769 ddi_get8(acc_handle, &scsi_io->RaidContext.RAIDFlags), 2770 ddi_get64(acc_handle, &scsi_io->RaidContext.regLockRowLBA), 2771 ddi_get32(acc_handle, &scsi_io->RaidContext.regLockLength), 2772 ddi_get8(acc_handle, &scsi_io->RaidContext.spanArm))); 2773 } 2774 2775 if (detail == 0xDD) { 2776 debug_level_g = saved_level; 2777 } 2778 } 2779 2780 2781 int 2782 mrsas_issue_pending_cmds(struct mrsas_instance *instance) 2783 { 2784 mlist_t *head = &instance->cmd_pend_list; 2785 mlist_t *tmp = head->next; 2786 struct mrsas_cmd *cmd = NULL; 2787 struct scsi_pkt *pkt; 2788 2789 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called")); 2790 while (tmp != head) { 2791 mutex_enter(&instance->cmd_pend_mtx); 2792 cmd = mlist_entry(tmp, struct mrsas_cmd, list); 2793 tmp = tmp->next; 2794 mutex_exit(&instance->cmd_pend_mtx); 2795 if (cmd) { 2796 con_log(CL_ANN1, (CE_CONT, 2797 "mrsas_issue_pending_cmds(): " 2798 "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ", 2799 (void *)cmd, cmd->index, cmd->drv_pkt_time)); 2800 2801 /* Reset command timeout value */ 2802 if (cmd->drv_pkt_time < debug_timeout_g) 2803 cmd->drv_pkt_time = (uint16_t)debug_timeout_g; 2804 2805 cmd->retry_count_for_ocr++; 2806 2807 dev_err(instance->dip, CE_CONT, 2808 "cmd retry count = %d\n", 2809 cmd->retry_count_for_ocr); 2810 2811 if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) { 2812 dev_err(instance->dip, 2813 CE_WARN, "mrsas_issue_pending_cmds(): " 2814 "cmd->retry_count exceeded limit >%d\n", 2815 IO_RETRY_COUNT); 2816 mrsas_print_cmd_details(instance, cmd, 0xDD); 2817 2818 dev_err(instance->dip, CE_WARN, 2819 "mrsas_issue_pending_cmds():" 2820 "Calling KILL Adapter"); 2821 if (instance->tbolt) 2822 mrsas_tbolt_kill_adapter(instance); 2823 else 2824 (void) mrsas_kill_adapter(instance); 2825 return (DDI_FAILURE); 2826 } 2827 2828 pkt = cmd->pkt; 2829 if (pkt) { 2830 con_log(CL_ANN1, (CE_CONT, 2831 "PENDING PKT-CMD ISSUE: cmd %p index %x " 2832 "pkt %p time %llx", 2833 (void *)cmd, cmd->index, 2834 (void *)pkt, 2835 gethrtime())); 2836 2837 } else { 2838 dev_err(instance->dip, CE_CONT, 2839 "mrsas_issue_pending_cmds(): NO-PKT, " 2840 "cmd %p index 0x%x drv_pkt_time 0x%x", 2841 (void *)cmd, cmd->index, cmd->drv_pkt_time); 2842 } 2843 2844 2845 if (cmd->sync_cmd == MRSAS_TRUE) { 2846 dev_err(instance->dip, CE_CONT, 2847 "mrsas_issue_pending_cmds(): " 2848 "SYNC_CMD == TRUE \n"); 2849 instance->func_ptr->issue_cmd_in_sync_mode( 2850 instance, cmd); 2851 } else { 2852 instance->func_ptr->issue_cmd(cmd, instance); 2853 } 2854 } else { 2855 con_log(CL_ANN1, (CE_CONT, 2856 "mrsas_issue_pending_cmds: NULL command\n")); 2857 } 2858 con_log(CL_ANN1, (CE_CONT, 2859 "mrsas_issue_pending_cmds:" 2860 "looping for more commands")); 2861 } 2862 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n")); 2863 return (DDI_SUCCESS); 2864 } 2865 2866 2867 2868 /* 2869 * destroy_mfi_frame_pool 2870 */ 2871 void 2872 destroy_mfi_frame_pool(struct mrsas_instance *instance) 2873 { 2874 int i; 2875 uint32_t max_cmd = instance->max_fw_cmds; 2876 2877 struct mrsas_cmd *cmd; 2878 2879 /* return all frames to pool */ 2880 2881 for (i = 0; i < max_cmd; i++) { 2882 2883 cmd = instance->cmd_list[i]; 2884 2885 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) 2886 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj); 2887 2888 cmd->frame_dma_obj_status = DMA_OBJ_FREED; 2889 } 2890 2891 } 2892 2893 /* 2894 * create_mfi_frame_pool 2895 */ 2896 int 2897 create_mfi_frame_pool(struct mrsas_instance *instance) 2898 { 2899 int i = 0; 2900 int cookie_cnt; 2901 uint16_t max_cmd; 2902 uint16_t sge_sz; 2903 uint32_t sgl_sz; 2904 uint32_t tot_frame_size; 2905 struct mrsas_cmd *cmd; 2906 int retval = DDI_SUCCESS; 2907 2908 max_cmd = instance->max_fw_cmds; 2909 sge_sz = sizeof (struct mrsas_sge_ieee); 2910 /* calculated the number of 64byte frames required for SGL */ 2911 sgl_sz = sge_sz * instance->max_num_sge; 2912 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH; 2913 2914 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: " 2915 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size)); 2916 2917 while (i < max_cmd) { 2918 cmd = instance->cmd_list[i]; 2919 2920 cmd->frame_dma_obj.size = tot_frame_size; 2921 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr; 2922 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 2923 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 2924 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1; 2925 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64; 2926 2927 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj, 2928 (uchar_t)DDI_STRUCTURE_LE_ACC); 2929 2930 if (cookie_cnt == -1 || cookie_cnt > 1) { 2931 dev_err(instance->dip, CE_WARN, 2932 "create_mfi_frame_pool: could not alloc."); 2933 retval = DDI_FAILURE; 2934 goto mrsas_undo_frame_pool; 2935 } 2936 2937 bzero(cmd->frame_dma_obj.buffer, tot_frame_size); 2938 2939 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED; 2940 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer; 2941 cmd->frame_phys_addr = 2942 cmd->frame_dma_obj.dma_cookie[0].dmac_address; 2943 2944 cmd->sense = (uint8_t *)(((unsigned long) 2945 cmd->frame_dma_obj.buffer) + 2946 tot_frame_size - SENSE_LENGTH); 2947 cmd->sense_phys_addr = 2948 cmd->frame_dma_obj.dma_cookie[0].dmac_address + 2949 tot_frame_size - SENSE_LENGTH; 2950 2951 if (!cmd->frame || !cmd->sense) { 2952 dev_err(instance->dip, CE_WARN, 2953 "pci_pool_alloc failed"); 2954 retval = ENOMEM; 2955 goto mrsas_undo_frame_pool; 2956 } 2957 2958 ddi_put32(cmd->frame_dma_obj.acc_handle, 2959 &cmd->frame->io.context, cmd->index); 2960 i++; 2961 2962 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x", 2963 cmd->index, cmd->frame_phys_addr)); 2964 } 2965 2966 return (DDI_SUCCESS); 2967 2968 mrsas_undo_frame_pool: 2969 if (i > 0) 2970 destroy_mfi_frame_pool(instance); 2971 2972 return (retval); 2973 } 2974 2975 /* 2976 * free_additional_dma_buffer 2977 */ 2978 static void 2979 free_additional_dma_buffer(struct mrsas_instance *instance) 2980 { 2981 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { 2982 (void) mrsas_free_dma_obj(instance, 2983 instance->mfi_internal_dma_obj); 2984 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; 2985 } 2986 2987 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) { 2988 (void) mrsas_free_dma_obj(instance, 2989 instance->mfi_evt_detail_obj); 2990 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED; 2991 } 2992 } 2993 2994 /* 2995 * alloc_additional_dma_buffer 2996 */ 2997 static int 2998 alloc_additional_dma_buffer(struct mrsas_instance *instance) 2999 { 3000 uint32_t reply_q_sz; 3001 uint32_t internal_buf_size = PAGESIZE*2; 3002 3003 /* max cmds plus 1 + producer & consumer */ 3004 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2); 3005 3006 instance->mfi_internal_dma_obj.size = internal_buf_size; 3007 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr; 3008 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3009 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 3010 0xFFFFFFFFU; 3011 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1; 3012 3013 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj, 3014 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3015 dev_err(instance->dip, CE_WARN, 3016 "could not alloc reply queue"); 3017 return (DDI_FAILURE); 3018 } 3019 3020 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size); 3021 3022 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED; 3023 3024 instance->producer = (uint32_t *)((unsigned long) 3025 instance->mfi_internal_dma_obj.buffer); 3026 instance->consumer = (uint32_t *)((unsigned long) 3027 instance->mfi_internal_dma_obj.buffer + 4); 3028 instance->reply_queue = (uint32_t *)((unsigned long) 3029 instance->mfi_internal_dma_obj.buffer + 8); 3030 instance->internal_buf = (caddr_t)(((unsigned long) 3031 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8); 3032 instance->internal_buf_dmac_add = 3033 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 3034 (reply_q_sz + 8); 3035 instance->internal_buf_size = internal_buf_size - 3036 (reply_q_sz + 8); 3037 3038 /* allocate evt_detail */ 3039 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail); 3040 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr; 3041 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3042 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3043 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1; 3044 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1; 3045 3046 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj, 3047 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3048 dev_err(instance->dip, CE_WARN, "alloc_additional_dma_buffer: " 3049 "could not allocate data transfer buffer."); 3050 goto mrsas_undo_internal_buff; 3051 } 3052 3053 bzero(instance->mfi_evt_detail_obj.buffer, 3054 sizeof (struct mrsas_evt_detail)); 3055 3056 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED; 3057 3058 return (DDI_SUCCESS); 3059 3060 mrsas_undo_internal_buff: 3061 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) { 3062 (void) mrsas_free_dma_obj(instance, 3063 instance->mfi_internal_dma_obj); 3064 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED; 3065 } 3066 3067 return (DDI_FAILURE); 3068 } 3069 3070 3071 void 3072 mrsas_free_cmd_pool(struct mrsas_instance *instance) 3073 { 3074 int i; 3075 uint32_t max_cmd; 3076 size_t sz; 3077 3078 /* already freed */ 3079 if (instance->cmd_list == NULL) { 3080 return; 3081 } 3082 3083 max_cmd = instance->max_fw_cmds; 3084 3085 /* size of cmd_list array */ 3086 sz = sizeof (struct mrsas_cmd *) * max_cmd; 3087 3088 /* First free each cmd */ 3089 for (i = 0; i < max_cmd; i++) { 3090 if (instance->cmd_list[i] != NULL) { 3091 kmem_free(instance->cmd_list[i], 3092 sizeof (struct mrsas_cmd)); 3093 } 3094 3095 instance->cmd_list[i] = NULL; 3096 } 3097 3098 /* Now, free cmd_list array */ 3099 if (instance->cmd_list != NULL) 3100 kmem_free(instance->cmd_list, sz); 3101 3102 instance->cmd_list = NULL; 3103 3104 INIT_LIST_HEAD(&instance->cmd_pool_list); 3105 INIT_LIST_HEAD(&instance->cmd_pend_list); 3106 if (instance->tbolt) { 3107 INIT_LIST_HEAD(&instance->cmd_app_pool_list); 3108 } else { 3109 INIT_LIST_HEAD(&instance->app_cmd_pool_list); 3110 } 3111 3112 } 3113 3114 3115 /* 3116 * mrsas_alloc_cmd_pool 3117 */ 3118 int 3119 mrsas_alloc_cmd_pool(struct mrsas_instance *instance) 3120 { 3121 int i; 3122 int count; 3123 uint32_t max_cmd; 3124 uint32_t reserve_cmd; 3125 size_t sz; 3126 3127 struct mrsas_cmd *cmd; 3128 3129 max_cmd = instance->max_fw_cmds; 3130 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: " 3131 "max_cmd %x", max_cmd)); 3132 3133 3134 sz = sizeof (struct mrsas_cmd *) * max_cmd; 3135 3136 /* 3137 * instance->cmd_list is an array of struct mrsas_cmd pointers. 3138 * Allocate the dynamic array first and then allocate individual 3139 * commands. 3140 */ 3141 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP); 3142 ASSERT(instance->cmd_list); 3143 3144 /* create a frame pool and assign one frame to each cmd */ 3145 for (count = 0; count < max_cmd; count++) { 3146 instance->cmd_list[count] = 3147 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP); 3148 ASSERT(instance->cmd_list[count]); 3149 } 3150 3151 /* add all the commands to command pool */ 3152 3153 INIT_LIST_HEAD(&instance->cmd_pool_list); 3154 INIT_LIST_HEAD(&instance->cmd_pend_list); 3155 INIT_LIST_HEAD(&instance->app_cmd_pool_list); 3156 3157 /* 3158 * When max_cmd is lower than MRSAS_APP_RESERVED_CMDS, how do I split 3159 * into app_cmd and regular cmd? For now, just take 3160 * max(1/8th of max, 4); 3161 */ 3162 reserve_cmd = min(MRSAS_APP_RESERVED_CMDS, 3163 max(max_cmd >> 3, MRSAS_APP_MIN_RESERVED_CMDS)); 3164 3165 for (i = 0; i < reserve_cmd; i++) { 3166 cmd = instance->cmd_list[i]; 3167 cmd->index = i; 3168 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list); 3169 } 3170 3171 3172 for (i = reserve_cmd; i < max_cmd; i++) { 3173 cmd = instance->cmd_list[i]; 3174 cmd->index = i; 3175 mlist_add_tail(&cmd->list, &instance->cmd_pool_list); 3176 } 3177 3178 return (DDI_SUCCESS); 3179 3180 mrsas_undo_cmds: 3181 if (count > 0) { 3182 /* free each cmd */ 3183 for (i = 0; i < count; i++) { 3184 if (instance->cmd_list[i] != NULL) { 3185 kmem_free(instance->cmd_list[i], 3186 sizeof (struct mrsas_cmd)); 3187 } 3188 instance->cmd_list[i] = NULL; 3189 } 3190 } 3191 3192 mrsas_undo_cmd_list: 3193 if (instance->cmd_list != NULL) 3194 kmem_free(instance->cmd_list, sz); 3195 instance->cmd_list = NULL; 3196 3197 return (DDI_FAILURE); 3198 } 3199 3200 3201 /* 3202 * free_space_for_mfi 3203 */ 3204 static void 3205 free_space_for_mfi(struct mrsas_instance *instance) 3206 { 3207 3208 /* already freed */ 3209 if (instance->cmd_list == NULL) { 3210 return; 3211 } 3212 3213 /* Free additional dma buffer */ 3214 free_additional_dma_buffer(instance); 3215 3216 /* Free the MFI frame pool */ 3217 destroy_mfi_frame_pool(instance); 3218 3219 /* Free all the commands in the cmd_list */ 3220 /* Free the cmd_list buffer itself */ 3221 mrsas_free_cmd_pool(instance); 3222 } 3223 3224 /* 3225 * alloc_space_for_mfi 3226 */ 3227 static int 3228 alloc_space_for_mfi(struct mrsas_instance *instance) 3229 { 3230 /* Allocate command pool (memory for cmd_list & individual commands) */ 3231 if (mrsas_alloc_cmd_pool(instance)) { 3232 dev_err(instance->dip, CE_WARN, "error creating cmd pool"); 3233 return (DDI_FAILURE); 3234 } 3235 3236 /* Allocate MFI Frame pool */ 3237 if (create_mfi_frame_pool(instance)) { 3238 dev_err(instance->dip, CE_WARN, 3239 "error creating frame DMA pool"); 3240 goto mfi_undo_cmd_pool; 3241 } 3242 3243 /* Allocate additional DMA buffer */ 3244 if (alloc_additional_dma_buffer(instance)) { 3245 dev_err(instance->dip, CE_WARN, 3246 "error creating frame DMA pool"); 3247 goto mfi_undo_frame_pool; 3248 } 3249 3250 return (DDI_SUCCESS); 3251 3252 mfi_undo_frame_pool: 3253 destroy_mfi_frame_pool(instance); 3254 3255 mfi_undo_cmd_pool: 3256 mrsas_free_cmd_pool(instance); 3257 3258 return (DDI_FAILURE); 3259 } 3260 3261 3262 3263 /* 3264 * get_ctrl_info 3265 */ 3266 static int 3267 get_ctrl_info(struct mrsas_instance *instance, 3268 struct mrsas_ctrl_info *ctrl_info) 3269 { 3270 int ret = 0; 3271 3272 struct mrsas_cmd *cmd; 3273 struct mrsas_dcmd_frame *dcmd; 3274 struct mrsas_ctrl_info *ci; 3275 3276 if (instance->tbolt) { 3277 cmd = get_raid_msg_mfi_pkt(instance); 3278 } else { 3279 cmd = mrsas_get_mfi_pkt(instance); 3280 } 3281 3282 if (!cmd) { 3283 con_log(CL_ANN, (CE_WARN, 3284 "Failed to get a cmd for ctrl info")); 3285 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding, 3286 uint16_t, instance->max_fw_cmds); 3287 return (DDI_FAILURE); 3288 } 3289 3290 /* Clear the frame buffer and assign back the context id */ 3291 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 3292 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 3293 cmd->index); 3294 3295 dcmd = &cmd->frame->dcmd; 3296 3297 ci = (struct mrsas_ctrl_info *)instance->internal_buf; 3298 3299 if (!ci) { 3300 dev_err(instance->dip, CE_WARN, 3301 "Failed to alloc mem for ctrl info"); 3302 mrsas_return_mfi_pkt(instance, cmd); 3303 return (DDI_FAILURE); 3304 } 3305 3306 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info)); 3307 3308 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */ 3309 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 3310 3311 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 3312 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 3313 MFI_CMD_STATUS_POLL_MODE); 3314 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 3315 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 3316 MFI_FRAME_DIR_READ); 3317 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 3318 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 3319 sizeof (struct mrsas_ctrl_info)); 3320 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 3321 MR_DCMD_CTRL_GET_INFO); 3322 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 3323 instance->internal_buf_dmac_add); 3324 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 3325 sizeof (struct mrsas_ctrl_info)); 3326 3327 cmd->frame_count = 1; 3328 3329 if (instance->tbolt) { 3330 mr_sas_tbolt_build_mfi_cmd(instance, cmd); 3331 } 3332 3333 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 3334 ret = 0; 3335 3336 ctrl_info->max_request_size = ddi_get32( 3337 cmd->frame_dma_obj.acc_handle, &ci->max_request_size); 3338 3339 ctrl_info->ld_present_count = ddi_get16( 3340 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count); 3341 3342 ctrl_info->properties.on_off_properties = ddi_get32( 3343 cmd->frame_dma_obj.acc_handle, 3344 &ci->properties.on_off_properties); 3345 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, 3346 (uint8_t *)(ctrl_info->product_name), 3347 (uint8_t *)(ci->product_name), 80 * sizeof (char), 3348 DDI_DEV_AUTOINCR); 3349 /* should get more members of ci with ddi_get when needed */ 3350 } else { 3351 dev_err(instance->dip, CE_WARN, 3352 "get_ctrl_info: Ctrl info failed"); 3353 ret = -1; 3354 } 3355 3356 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 3357 ret = -1; 3358 } 3359 if (instance->tbolt) { 3360 return_raid_msg_mfi_pkt(instance, cmd); 3361 } else { 3362 mrsas_return_mfi_pkt(instance, cmd); 3363 } 3364 3365 return (ret); 3366 } 3367 3368 /* 3369 * abort_aen_cmd 3370 */ 3371 static int 3372 abort_aen_cmd(struct mrsas_instance *instance, 3373 struct mrsas_cmd *cmd_to_abort) 3374 { 3375 int ret = 0; 3376 3377 struct mrsas_cmd *cmd; 3378 struct mrsas_abort_frame *abort_fr; 3379 3380 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__)); 3381 3382 if (instance->tbolt) { 3383 cmd = get_raid_msg_mfi_pkt(instance); 3384 } else { 3385 cmd = mrsas_get_mfi_pkt(instance); 3386 } 3387 3388 if (!cmd) { 3389 con_log(CL_ANN1, (CE_WARN, 3390 "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd")); 3391 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding, 3392 uint16_t, instance->max_fw_cmds); 3393 return (DDI_FAILURE); 3394 } 3395 3396 /* Clear the frame buffer and assign back the context id */ 3397 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 3398 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 3399 cmd->index); 3400 3401 abort_fr = &cmd->frame->abort; 3402 3403 /* prepare and issue the abort frame */ 3404 ddi_put8(cmd->frame_dma_obj.acc_handle, 3405 &abort_fr->cmd, MFI_CMD_OP_ABORT); 3406 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status, 3407 MFI_CMD_STATUS_SYNC_MODE); 3408 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0); 3409 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context, 3410 cmd_to_abort->index); 3411 ddi_put32(cmd->frame_dma_obj.acc_handle, 3412 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr); 3413 ddi_put32(cmd->frame_dma_obj.acc_handle, 3414 &abort_fr->abort_mfi_phys_addr_hi, 0); 3415 3416 instance->aen_cmd->abort_aen = 1; 3417 3418 cmd->frame_count = 1; 3419 3420 if (instance->tbolt) { 3421 mr_sas_tbolt_build_mfi_cmd(instance, cmd); 3422 } 3423 3424 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 3425 con_log(CL_ANN1, (CE_WARN, 3426 "abort_aen_cmd: issue_cmd_in_poll_mode failed")); 3427 ret = -1; 3428 } else { 3429 ret = 0; 3430 } 3431 3432 instance->aen_cmd->abort_aen = 1; 3433 instance->aen_cmd = 0; 3434 3435 if (instance->tbolt) { 3436 return_raid_msg_mfi_pkt(instance, cmd); 3437 } else { 3438 mrsas_return_mfi_pkt(instance, cmd); 3439 } 3440 3441 atomic_add_16(&instance->fw_outstanding, (-1)); 3442 3443 return (ret); 3444 } 3445 3446 3447 static int 3448 mrsas_build_init_cmd(struct mrsas_instance *instance, 3449 struct mrsas_cmd **cmd_ptr) 3450 { 3451 struct mrsas_cmd *cmd; 3452 struct mrsas_init_frame *init_frame; 3453 struct mrsas_init_queue_info *initq_info; 3454 struct mrsas_drv_ver drv_ver_info; 3455 3456 3457 /* 3458 * Prepare a init frame. Note the init frame points to queue info 3459 * structure. Each frame has SGL allocated after first 64 bytes. For 3460 * this frame - since we don't need any SGL - we use SGL's space as 3461 * queue info structure 3462 */ 3463 cmd = *cmd_ptr; 3464 3465 3466 /* Clear the frame buffer and assign back the context id */ 3467 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 3468 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 3469 cmd->index); 3470 3471 init_frame = (struct mrsas_init_frame *)cmd->frame; 3472 initq_info = (struct mrsas_init_queue_info *) 3473 ((unsigned long)init_frame + 64); 3474 3475 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE); 3476 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info)); 3477 3478 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0); 3479 3480 ddi_put32(cmd->frame_dma_obj.acc_handle, 3481 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1); 3482 3483 ddi_put32(cmd->frame_dma_obj.acc_handle, 3484 &initq_info->producer_index_phys_addr_hi, 0); 3485 ddi_put32(cmd->frame_dma_obj.acc_handle, 3486 &initq_info->producer_index_phys_addr_lo, 3487 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address); 3488 3489 ddi_put32(cmd->frame_dma_obj.acc_handle, 3490 &initq_info->consumer_index_phys_addr_hi, 0); 3491 ddi_put32(cmd->frame_dma_obj.acc_handle, 3492 &initq_info->consumer_index_phys_addr_lo, 3493 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4); 3494 3495 ddi_put32(cmd->frame_dma_obj.acc_handle, 3496 &initq_info->reply_queue_start_phys_addr_hi, 0); 3497 ddi_put32(cmd->frame_dma_obj.acc_handle, 3498 &initq_info->reply_queue_start_phys_addr_lo, 3499 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8); 3500 3501 ddi_put8(cmd->frame_dma_obj.acc_handle, 3502 &init_frame->cmd, MFI_CMD_OP_INIT); 3503 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status, 3504 MFI_CMD_STATUS_POLL_MODE); 3505 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0); 3506 ddi_put32(cmd->frame_dma_obj.acc_handle, 3507 &init_frame->queue_info_new_phys_addr_lo, 3508 cmd->frame_phys_addr + 64); 3509 ddi_put32(cmd->frame_dma_obj.acc_handle, 3510 &init_frame->queue_info_new_phys_addr_hi, 0); 3511 3512 3513 /* fill driver version information */ 3514 fill_up_drv_ver(&drv_ver_info); 3515 3516 /* allocate the driver version data transfer buffer */ 3517 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver); 3518 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr; 3519 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 3520 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 3521 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1; 3522 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1; 3523 3524 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj, 3525 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 3526 con_log(CL_ANN, (CE_WARN, 3527 "init_mfi : Could not allocate driver version buffer.")); 3528 return (DDI_FAILURE); 3529 } 3530 /* copy driver version to dma buffer */ 3531 (void) memset(instance->drv_ver_dma_obj.buffer, 0, 3532 sizeof (drv_ver_info.drv_ver)); 3533 ddi_rep_put8(cmd->frame_dma_obj.acc_handle, 3534 (uint8_t *)drv_ver_info.drv_ver, 3535 (uint8_t *)instance->drv_ver_dma_obj.buffer, 3536 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR); 3537 3538 3539 /* copy driver version physical address to init frame */ 3540 ddi_put64(cmd->frame_dma_obj.acc_handle, &init_frame->driverversion, 3541 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address); 3542 3543 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len, 3544 sizeof (struct mrsas_init_queue_info)); 3545 3546 cmd->frame_count = 1; 3547 3548 *cmd_ptr = cmd; 3549 3550 return (DDI_SUCCESS); 3551 } 3552 3553 3554 /* 3555 * mrsas_init_adapter_ppc - Initialize MFI interface adapter. 3556 */ 3557 int 3558 mrsas_init_adapter_ppc(struct mrsas_instance *instance) 3559 { 3560 struct mrsas_cmd *cmd; 3561 3562 /* 3563 * allocate memory for mfi adapter(cmd pool, individual commands, mfi 3564 * frames etc 3565 */ 3566 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) { 3567 con_log(CL_ANN, (CE_NOTE, 3568 "Error, failed to allocate memory for MFI adapter")); 3569 return (DDI_FAILURE); 3570 } 3571 3572 /* Build INIT command */ 3573 cmd = mrsas_get_mfi_pkt(instance); 3574 if (cmd == NULL) { 3575 DTRACE_PROBE2(init_adapter_mfi_err, uint16_t, 3576 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 3577 return (DDI_FAILURE); 3578 } 3579 3580 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) { 3581 con_log(CL_ANN, 3582 (CE_NOTE, "Error, failed to build INIT command")); 3583 3584 goto fail_undo_alloc_mfi_space; 3585 } 3586 3587 /* 3588 * Disable interrupt before sending init frame ( see linux driver code) 3589 * send INIT MFI frame in polled mode 3590 */ 3591 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 3592 con_log(CL_ANN, (CE_WARN, "failed to init firmware")); 3593 goto fail_fw_init; 3594 } 3595 3596 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) 3597 goto fail_fw_init; 3598 mrsas_return_mfi_pkt(instance, cmd); 3599 3600 if (ctio_enable && 3601 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) { 3602 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported")); 3603 instance->flag_ieee = 1; 3604 } else { 3605 instance->flag_ieee = 0; 3606 } 3607 3608 ASSERT(!instance->skinny || instance->flag_ieee); 3609 3610 instance->unroll.alloc_space_mfi = 1; 3611 instance->unroll.verBuff = 1; 3612 3613 return (DDI_SUCCESS); 3614 3615 3616 fail_fw_init: 3617 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj); 3618 3619 fail_undo_alloc_mfi_space: 3620 mrsas_return_mfi_pkt(instance, cmd); 3621 free_space_for_mfi(instance); 3622 3623 return (DDI_FAILURE); 3624 3625 } 3626 3627 /* 3628 * mrsas_init_adapter - Initialize adapter. 3629 */ 3630 int 3631 mrsas_init_adapter(struct mrsas_instance *instance) 3632 { 3633 struct mrsas_ctrl_info ctrl_info; 3634 3635 3636 /* we expect the FW state to be READY */ 3637 if (mfi_state_transition_to_ready(instance)) { 3638 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready")); 3639 return (DDI_FAILURE); 3640 } 3641 3642 /* get various operational parameters from status register */ 3643 instance->max_num_sge = 3644 (instance->func_ptr->read_fw_status_reg(instance) & 3645 0xFF0000) >> 0x10; 3646 instance->max_num_sge = 3647 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ? 3648 MRSAS_MAX_SGE_CNT : instance->max_num_sge; 3649 3650 /* 3651 * Reduce the max supported cmds by 1. This is to ensure that the 3652 * reply_q_sz (1 more than the max cmd that driver may send) 3653 * does not exceed max cmds that the FW can support 3654 */ 3655 instance->max_fw_cmds = 3656 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF; 3657 instance->max_fw_cmds = instance->max_fw_cmds - 1; 3658 3659 3660 3661 /* Initialize adapter */ 3662 if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) { 3663 con_log(CL_ANN, 3664 (CE_WARN, "mr_sas: could not initialize adapter")); 3665 return (DDI_FAILURE); 3666 } 3667 3668 /* gather misc FW related information */ 3669 instance->disable_online_ctrl_reset = 0; 3670 3671 if (!get_ctrl_info(instance, &ctrl_info)) { 3672 instance->max_sectors_per_req = ctrl_info.max_request_size; 3673 con_log(CL_ANN1, (CE_NOTE, 3674 "product name %s ld present %d", 3675 ctrl_info.product_name, ctrl_info.ld_present_count)); 3676 } else { 3677 instance->max_sectors_per_req = instance->max_num_sge * 3678 PAGESIZE / 512; 3679 } 3680 3681 if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG) 3682 instance->disable_online_ctrl_reset = 1; 3683 3684 return (DDI_SUCCESS); 3685 3686 } 3687 3688 3689 3690 static int 3691 mrsas_issue_init_mfi(struct mrsas_instance *instance) 3692 { 3693 struct mrsas_cmd *cmd; 3694 struct mrsas_init_frame *init_frame; 3695 struct mrsas_init_queue_info *initq_info; 3696 3697 /* 3698 * Prepare a init frame. Note the init frame points to queue info 3699 * structure. Each frame has SGL allocated after first 64 bytes. For 3700 * this frame - since we don't need any SGL - we use SGL's space as 3701 * queue info structure 3702 */ 3703 con_log(CL_ANN1, (CE_NOTE, 3704 "mrsas_issue_init_mfi: entry\n")); 3705 cmd = get_mfi_app_pkt(instance); 3706 3707 if (!cmd) { 3708 con_log(CL_ANN1, (CE_WARN, 3709 "mrsas_issue_init_mfi: get_pkt failed\n")); 3710 return (DDI_FAILURE); 3711 } 3712 3713 /* Clear the frame buffer and assign back the context id */ 3714 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 3715 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 3716 cmd->index); 3717 3718 init_frame = (struct mrsas_init_frame *)cmd->frame; 3719 initq_info = (struct mrsas_init_queue_info *) 3720 ((unsigned long)init_frame + 64); 3721 3722 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE); 3723 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info)); 3724 3725 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0); 3726 3727 ddi_put32(cmd->frame_dma_obj.acc_handle, 3728 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1); 3729 ddi_put32(cmd->frame_dma_obj.acc_handle, 3730 &initq_info->producer_index_phys_addr_hi, 0); 3731 ddi_put32(cmd->frame_dma_obj.acc_handle, 3732 &initq_info->producer_index_phys_addr_lo, 3733 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address); 3734 ddi_put32(cmd->frame_dma_obj.acc_handle, 3735 &initq_info->consumer_index_phys_addr_hi, 0); 3736 ddi_put32(cmd->frame_dma_obj.acc_handle, 3737 &initq_info->consumer_index_phys_addr_lo, 3738 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4); 3739 3740 ddi_put32(cmd->frame_dma_obj.acc_handle, 3741 &initq_info->reply_queue_start_phys_addr_hi, 0); 3742 ddi_put32(cmd->frame_dma_obj.acc_handle, 3743 &initq_info->reply_queue_start_phys_addr_lo, 3744 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8); 3745 3746 ddi_put8(cmd->frame_dma_obj.acc_handle, 3747 &init_frame->cmd, MFI_CMD_OP_INIT); 3748 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status, 3749 MFI_CMD_STATUS_POLL_MODE); 3750 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0); 3751 ddi_put32(cmd->frame_dma_obj.acc_handle, 3752 &init_frame->queue_info_new_phys_addr_lo, 3753 cmd->frame_phys_addr + 64); 3754 ddi_put32(cmd->frame_dma_obj.acc_handle, 3755 &init_frame->queue_info_new_phys_addr_hi, 0); 3756 3757 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len, 3758 sizeof (struct mrsas_init_queue_info)); 3759 3760 cmd->frame_count = 1; 3761 3762 /* issue the init frame in polled mode */ 3763 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 3764 con_log(CL_ANN1, (CE_WARN, 3765 "mrsas_issue_init_mfi():failed to " 3766 "init firmware")); 3767 return_mfi_app_pkt(instance, cmd); 3768 return (DDI_FAILURE); 3769 } 3770 3771 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) { 3772 return_mfi_app_pkt(instance, cmd); 3773 return (DDI_FAILURE); 3774 } 3775 3776 return_mfi_app_pkt(instance, cmd); 3777 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done")); 3778 3779 return (DDI_SUCCESS); 3780 } 3781 /* 3782 * mfi_state_transition_to_ready : Move the FW to READY state 3783 * 3784 * @reg_set : MFI register set 3785 */ 3786 int 3787 mfi_state_transition_to_ready(struct mrsas_instance *instance) 3788 { 3789 int i; 3790 uint8_t max_wait; 3791 uint32_t fw_ctrl = 0; 3792 uint32_t fw_state; 3793 uint32_t cur_state; 3794 uint32_t cur_abs_reg_val; 3795 uint32_t prev_abs_reg_val; 3796 uint32_t status; 3797 3798 cur_abs_reg_val = 3799 instance->func_ptr->read_fw_status_reg(instance); 3800 fw_state = 3801 cur_abs_reg_val & MFI_STATE_MASK; 3802 con_log(CL_ANN1, (CE_CONT, 3803 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state)); 3804 3805 while (fw_state != MFI_STATE_READY) { 3806 con_log(CL_ANN, (CE_CONT, 3807 "mfi_state_transition_to_ready:FW state%x", fw_state)); 3808 3809 switch (fw_state) { 3810 case MFI_STATE_FAULT: 3811 con_log(CL_ANN, (CE_NOTE, 3812 "mr_sas: FW in FAULT state!!")); 3813 3814 return (ENODEV); 3815 case MFI_STATE_WAIT_HANDSHAKE: 3816 /* set the CLR bit in IMR0 */ 3817 con_log(CL_ANN1, (CE_NOTE, 3818 "mr_sas: FW waiting for HANDSHAKE")); 3819 /* 3820 * PCI_Hot Plug: MFI F/W requires 3821 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 3822 * to be set 3823 */ 3824 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */ 3825 if (!instance->tbolt && !instance->skinny) { 3826 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE | 3827 MFI_INIT_HOTPLUG, instance); 3828 } else { 3829 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE | 3830 MFI_INIT_HOTPLUG, instance); 3831 } 3832 max_wait = (instance->tbolt == 1) ? 180 : 2; 3833 cur_state = MFI_STATE_WAIT_HANDSHAKE; 3834 break; 3835 case MFI_STATE_BOOT_MESSAGE_PENDING: 3836 /* set the CLR bit in IMR0 */ 3837 con_log(CL_ANN1, (CE_NOTE, 3838 "mr_sas: FW state boot message pending")); 3839 /* 3840 * PCI_Hot Plug: MFI F/W requires 3841 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG) 3842 * to be set 3843 */ 3844 if (!instance->tbolt && !instance->skinny) { 3845 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance); 3846 } else { 3847 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG, 3848 instance); 3849 } 3850 max_wait = (instance->tbolt == 1) ? 180 : 10; 3851 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 3852 break; 3853 case MFI_STATE_OPERATIONAL: 3854 /* bring it to READY state; assuming max wait 2 secs */ 3855 instance->func_ptr->disable_intr(instance); 3856 con_log(CL_ANN1, (CE_NOTE, 3857 "mr_sas: FW in OPERATIONAL state")); 3858 /* 3859 * PCI_Hot Plug: MFI F/W requires 3860 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT) 3861 * to be set 3862 */ 3863 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */ 3864 if (!instance->tbolt && !instance->skinny) { 3865 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance); 3866 } else { 3867 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS, 3868 instance); 3869 3870 for (i = 0; i < (10 * 1000); i++) { 3871 status = 3872 RD_RESERVED0_REGISTER(instance); 3873 if (status & 1) { 3874 delay(1 * 3875 drv_usectohz(MILLISEC)); 3876 } else { 3877 break; 3878 } 3879 } 3880 3881 } 3882 max_wait = (instance->tbolt == 1) ? 180 : 10; 3883 cur_state = MFI_STATE_OPERATIONAL; 3884 break; 3885 case MFI_STATE_UNDEFINED: 3886 /* this state should not last for more than 2 seconds */ 3887 con_log(CL_ANN1, (CE_NOTE, "FW state undefined")); 3888 3889 max_wait = (instance->tbolt == 1) ? 180 : 2; 3890 cur_state = MFI_STATE_UNDEFINED; 3891 break; 3892 case MFI_STATE_BB_INIT: 3893 max_wait = (instance->tbolt == 1) ? 180 : 2; 3894 cur_state = MFI_STATE_BB_INIT; 3895 break; 3896 case MFI_STATE_FW_INIT: 3897 max_wait = (instance->tbolt == 1) ? 180 : 2; 3898 cur_state = MFI_STATE_FW_INIT; 3899 break; 3900 case MFI_STATE_FW_INIT_2: 3901 max_wait = 180; 3902 cur_state = MFI_STATE_FW_INIT_2; 3903 break; 3904 case MFI_STATE_DEVICE_SCAN: 3905 max_wait = 180; 3906 cur_state = MFI_STATE_DEVICE_SCAN; 3907 prev_abs_reg_val = cur_abs_reg_val; 3908 con_log(CL_NONE, (CE_NOTE, 3909 "Device scan in progress ...\n")); 3910 break; 3911 case MFI_STATE_FLUSH_CACHE: 3912 max_wait = 180; 3913 cur_state = MFI_STATE_FLUSH_CACHE; 3914 break; 3915 default: 3916 con_log(CL_ANN1, (CE_NOTE, 3917 "mr_sas: Unknown state 0x%x", fw_state)); 3918 return (ENODEV); 3919 } 3920 3921 /* the cur_state should not last for more than max_wait secs */ 3922 for (i = 0; i < (max_wait * MILLISEC); i++) { 3923 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */ 3924 cur_abs_reg_val = 3925 instance->func_ptr->read_fw_status_reg(instance); 3926 fw_state = cur_abs_reg_val & MFI_STATE_MASK; 3927 3928 if (fw_state == cur_state) { 3929 delay(1 * drv_usectohz(MILLISEC)); 3930 } else { 3931 break; 3932 } 3933 } 3934 if (fw_state == MFI_STATE_DEVICE_SCAN) { 3935 if (prev_abs_reg_val != cur_abs_reg_val) { 3936 continue; 3937 } 3938 } 3939 3940 /* return error if fw_state hasn't changed after max_wait */ 3941 if (fw_state == cur_state) { 3942 con_log(CL_ANN1, (CE_WARN, 3943 "FW state hasn't changed in %d secs", max_wait)); 3944 return (ENODEV); 3945 } 3946 }; 3947 3948 /* This may also need to apply to Skinny, but for now, don't worry. */ 3949 if (!instance->tbolt && !instance->skinny) { 3950 fw_ctrl = RD_IB_DOORBELL(instance); 3951 con_log(CL_ANN1, (CE_CONT, 3952 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl)); 3953 3954 /* 3955 * Write 0xF to the doorbell register to do the following. 3956 * - Abort all outstanding commands (bit 0). 3957 * - Transition from OPERATIONAL to READY state (bit 1). 3958 * - Discard (possible) low MFA posted in 64-bit mode (bit-2). 3959 * - Set to release FW to continue running (i.e. BIOS handshake 3960 * (bit 3). 3961 */ 3962 WR_IB_DOORBELL(0xF, instance); 3963 } 3964 3965 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 3966 return (EIO); 3967 } 3968 3969 return (DDI_SUCCESS); 3970 } 3971 3972 /* 3973 * get_seq_num 3974 */ 3975 static int 3976 get_seq_num(struct mrsas_instance *instance, 3977 struct mrsas_evt_log_info *eli) 3978 { 3979 int ret = DDI_SUCCESS; 3980 3981 dma_obj_t dcmd_dma_obj; 3982 struct mrsas_cmd *cmd; 3983 struct mrsas_dcmd_frame *dcmd; 3984 struct mrsas_evt_log_info *eli_tmp; 3985 if (instance->tbolt) { 3986 cmd = get_raid_msg_mfi_pkt(instance); 3987 } else { 3988 cmd = mrsas_get_mfi_pkt(instance); 3989 } 3990 3991 if (!cmd) { 3992 dev_err(instance->dip, CE_WARN, "failed to get a cmd"); 3993 DTRACE_PROBE2(seq_num_mfi_err, uint16_t, 3994 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 3995 return (ENOMEM); 3996 } 3997 3998 /* Clear the frame buffer and assign back the context id */ 3999 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 4000 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 4001 cmd->index); 4002 4003 dcmd = &cmd->frame->dcmd; 4004 4005 /* allocate the data transfer buffer */ 4006 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info); 4007 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; 4008 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 4009 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 4010 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 4011 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 4012 4013 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, 4014 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 4015 dev_err(instance->dip, CE_WARN, 4016 "get_seq_num: could not allocate data transfer buffer."); 4017 return (DDI_FAILURE); 4018 } 4019 4020 (void) memset(dcmd_dma_obj.buffer, 0, 4021 sizeof (struct mrsas_evt_log_info)); 4022 4023 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 4024 4025 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 4026 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0); 4027 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 4028 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 4029 MFI_FRAME_DIR_READ); 4030 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 4031 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 4032 sizeof (struct mrsas_evt_log_info)); 4033 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 4034 MR_DCMD_CTRL_EVENT_GET_INFO); 4035 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 4036 sizeof (struct mrsas_evt_log_info)); 4037 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 4038 dcmd_dma_obj.dma_cookie[0].dmac_address); 4039 4040 cmd->sync_cmd = MRSAS_TRUE; 4041 cmd->frame_count = 1; 4042 4043 if (instance->tbolt) { 4044 mr_sas_tbolt_build_mfi_cmd(instance, cmd); 4045 } 4046 4047 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 4048 dev_err(instance->dip, CE_WARN, "get_seq_num: " 4049 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO"); 4050 ret = DDI_FAILURE; 4051 } else { 4052 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer; 4053 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle, 4054 &eli_tmp->newest_seq_num); 4055 ret = DDI_SUCCESS; 4056 } 4057 4058 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 4059 ret = DDI_FAILURE; 4060 4061 if (instance->tbolt) { 4062 return_raid_msg_mfi_pkt(instance, cmd); 4063 } else { 4064 mrsas_return_mfi_pkt(instance, cmd); 4065 } 4066 4067 return (ret); 4068 } 4069 4070 /* 4071 * start_mfi_aen 4072 */ 4073 static int 4074 start_mfi_aen(struct mrsas_instance *instance) 4075 { 4076 int ret = 0; 4077 4078 struct mrsas_evt_log_info eli; 4079 union mrsas_evt_class_locale class_locale; 4080 4081 /* get the latest sequence number from FW */ 4082 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info)); 4083 4084 if (get_seq_num(instance, &eli)) { 4085 dev_err(instance->dip, CE_WARN, 4086 "start_mfi_aen: failed to get seq num"); 4087 return (-1); 4088 } 4089 4090 /* register AEN with FW for latest sequence number plus 1 */ 4091 class_locale.members.reserved = 0; 4092 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL); 4093 class_locale.members.class = MR_EVT_CLASS_INFO; 4094 class_locale.word = LE_32(class_locale.word); 4095 ret = register_mfi_aen(instance, eli.newest_seq_num + 1, 4096 class_locale.word); 4097 4098 if (ret) { 4099 dev_err(instance->dip, CE_WARN, 4100 "start_mfi_aen: aen registration failed"); 4101 return (-1); 4102 } 4103 4104 4105 return (ret); 4106 } 4107 4108 /* 4109 * flush_cache 4110 */ 4111 static void 4112 flush_cache(struct mrsas_instance *instance) 4113 { 4114 struct mrsas_cmd *cmd = NULL; 4115 struct mrsas_dcmd_frame *dcmd; 4116 if (instance->tbolt) { 4117 cmd = get_raid_msg_mfi_pkt(instance); 4118 } else { 4119 cmd = mrsas_get_mfi_pkt(instance); 4120 } 4121 4122 if (!cmd) { 4123 con_log(CL_ANN1, (CE_WARN, 4124 "flush_cache():Failed to get a cmd for flush_cache")); 4125 DTRACE_PROBE2(flush_cache_err, uint16_t, 4126 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 4127 return; 4128 } 4129 4130 /* Clear the frame buffer and assign back the context id */ 4131 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 4132 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 4133 cmd->index); 4134 4135 dcmd = &cmd->frame->dcmd; 4136 4137 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 4138 4139 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 4140 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0); 4141 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0); 4142 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 4143 MFI_FRAME_DIR_NONE); 4144 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 4145 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0); 4146 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 4147 MR_DCMD_CTRL_CACHE_FLUSH); 4148 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0], 4149 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE); 4150 4151 cmd->frame_count = 1; 4152 4153 if (instance->tbolt) { 4154 mr_sas_tbolt_build_mfi_cmd(instance, cmd); 4155 } 4156 4157 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) { 4158 con_log(CL_ANN1, (CE_WARN, 4159 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH")); 4160 } 4161 con_log(CL_ANN1, (CE_CONT, "flush_cache done")); 4162 if (instance->tbolt) { 4163 return_raid_msg_mfi_pkt(instance, cmd); 4164 } else { 4165 mrsas_return_mfi_pkt(instance, cmd); 4166 } 4167 4168 } 4169 4170 /* 4171 * service_mfi_aen- Completes an AEN command 4172 * @instance: Adapter soft state 4173 * @cmd: Command to be completed 4174 * 4175 */ 4176 void 4177 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 4178 { 4179 uint32_t seq_num; 4180 struct mrsas_evt_detail *evt_detail = 4181 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer; 4182 int rval = 0; 4183 int tgt = 0; 4184 uint8_t dtype; 4185 #ifdef PDSUPPORT 4186 mrsas_pd_address_t *pd_addr; 4187 #endif 4188 ddi_acc_handle_t acc_handle; 4189 4190 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 4191 4192 acc_handle = cmd->frame_dma_obj.acc_handle; 4193 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status); 4194 if (cmd->cmd_status == ENODATA) { 4195 cmd->cmd_status = 0; 4196 } 4197 4198 /* 4199 * log the MFI AEN event to the sysevent queue so that 4200 * application will get noticed 4201 */ 4202 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS", 4203 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) { 4204 int instance_no = ddi_get_instance(instance->dip); 4205 con_log(CL_ANN, (CE_WARN, 4206 "mr_sas%d: Failed to log AEN event", instance_no)); 4207 } 4208 /* 4209 * Check for any ld devices that has changed state. i.e. online 4210 * or offline. 4211 */ 4212 con_log(CL_ANN1, (CE_CONT, 4213 "AEN: code = %x class = %x locale = %x args = %x", 4214 ddi_get32(acc_handle, &evt_detail->code), 4215 evt_detail->cl.members.class, 4216 ddi_get16(acc_handle, &evt_detail->cl.members.locale), 4217 ddi_get8(acc_handle, &evt_detail->arg_type))); 4218 4219 switch (ddi_get32(acc_handle, &evt_detail->code)) { 4220 case MR_EVT_CFG_CLEARED: { 4221 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { 4222 if (instance->mr_ld_list[tgt].dip != NULL) { 4223 mutex_enter(&instance->config_dev_mtx); 4224 instance->mr_ld_list[tgt].flag = 4225 (uint8_t)~MRDRV_TGT_VALID; 4226 mutex_exit(&instance->config_dev_mtx); 4227 rval = mrsas_service_evt(instance, tgt, 0, 4228 MRSAS_EVT_UNCONFIG_TGT, NULL); 4229 con_log(CL_ANN1, (CE_WARN, 4230 "mr_sas: CFG CLEARED AEN rval = %d " 4231 "tgt id = %d", rval, tgt)); 4232 } 4233 } 4234 break; 4235 } 4236 4237 case MR_EVT_LD_DELETED: { 4238 tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id); 4239 mutex_enter(&instance->config_dev_mtx); 4240 instance->mr_ld_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID; 4241 mutex_exit(&instance->config_dev_mtx); 4242 rval = mrsas_service_evt(instance, 4243 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, 4244 MRSAS_EVT_UNCONFIG_TGT, NULL); 4245 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d " 4246 "tgt id = %d index = %d", rval, 4247 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 4248 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); 4249 break; 4250 } /* End of MR_EVT_LD_DELETED */ 4251 4252 case MR_EVT_LD_CREATED: { 4253 rval = mrsas_service_evt(instance, 4254 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0, 4255 MRSAS_EVT_CONFIG_TGT, NULL); 4256 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d " 4257 "tgt id = %d index = %d", rval, 4258 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 4259 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index))); 4260 break; 4261 } /* End of MR_EVT_LD_CREATED */ 4262 4263 #ifdef PDSUPPORT 4264 case MR_EVT_PD_REMOVED_EXT: { 4265 if (instance->tbolt || instance->skinny) { 4266 pd_addr = &evt_detail->args.pd_addr; 4267 dtype = pd_addr->scsi_dev_type; 4268 con_log(CL_DLEVEL1, (CE_NOTE, 4269 " MR_EVT_PD_REMOVED_EXT: dtype = %x," 4270 " arg_type = %d ", dtype, evt_detail->arg_type)); 4271 tgt = ddi_get16(acc_handle, 4272 &evt_detail->args.pd.device_id); 4273 mutex_enter(&instance->config_dev_mtx); 4274 instance->mr_tbolt_pd_list[tgt].flag = 4275 (uint8_t)~MRDRV_TGT_VALID; 4276 mutex_exit(&instance->config_dev_mtx); 4277 rval = mrsas_service_evt(instance, ddi_get16( 4278 acc_handle, &evt_detail->args.pd.device_id), 4279 1, MRSAS_EVT_UNCONFIG_TGT, NULL); 4280 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:" 4281 "rval = %d tgt id = %d ", rval, 4282 ddi_get16(acc_handle, 4283 &evt_detail->args.pd.device_id))); 4284 } 4285 break; 4286 } /* End of MR_EVT_PD_REMOVED_EXT */ 4287 4288 case MR_EVT_PD_INSERTED_EXT: { 4289 if (instance->tbolt || instance->skinny) { 4290 rval = mrsas_service_evt(instance, 4291 ddi_get16(acc_handle, 4292 &evt_detail->args.pd.device_id), 4293 1, MRSAS_EVT_CONFIG_TGT, NULL); 4294 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:" 4295 "rval = %d tgt id = %d ", rval, 4296 ddi_get16(acc_handle, 4297 &evt_detail->args.pd.device_id))); 4298 } 4299 break; 4300 } /* End of MR_EVT_PD_INSERTED_EXT */ 4301 4302 case MR_EVT_PD_STATE_CHANGE: { 4303 if (instance->tbolt || instance->skinny) { 4304 tgt = ddi_get16(acc_handle, 4305 &evt_detail->args.pd.device_id); 4306 if ((evt_detail->args.pd_state.prevState == 4307 PD_SYSTEM) && 4308 (evt_detail->args.pd_state.newState != PD_SYSTEM)) { 4309 mutex_enter(&instance->config_dev_mtx); 4310 instance->mr_tbolt_pd_list[tgt].flag = 4311 (uint8_t)~MRDRV_TGT_VALID; 4312 mutex_exit(&instance->config_dev_mtx); 4313 rval = mrsas_service_evt(instance, 4314 ddi_get16(acc_handle, 4315 &evt_detail->args.pd.device_id), 4316 1, MRSAS_EVT_UNCONFIG_TGT, NULL); 4317 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:" 4318 "rval = %d tgt id = %d ", rval, 4319 ddi_get16(acc_handle, 4320 &evt_detail->args.pd.device_id))); 4321 break; 4322 } 4323 if ((evt_detail->args.pd_state.prevState 4324 == UNCONFIGURED_GOOD) && 4325 (evt_detail->args.pd_state.newState == PD_SYSTEM)) { 4326 rval = mrsas_service_evt(instance, 4327 ddi_get16(acc_handle, 4328 &evt_detail->args.pd.device_id), 4329 1, MRSAS_EVT_CONFIG_TGT, NULL); 4330 con_log(CL_ANN1, (CE_WARN, 4331 "mr_sas: PD_INSERTED: rval = %d " 4332 " tgt id = %d ", rval, 4333 ddi_get16(acc_handle, 4334 &evt_detail->args.pd.device_id))); 4335 break; 4336 } 4337 } 4338 break; 4339 } 4340 #endif 4341 4342 } /* End of Main Switch */ 4343 4344 /* get copy of seq_num and class/locale for re-registration */ 4345 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num); 4346 seq_num++; 4347 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 4348 sizeof (struct mrsas_evt_detail)); 4349 4350 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0); 4351 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num); 4352 4353 instance->aen_seq_num = seq_num; 4354 4355 cmd->frame_count = 1; 4356 4357 cmd->retry_count_for_ocr = 0; 4358 cmd->drv_pkt_time = 0; 4359 4360 /* Issue the aen registration frame */ 4361 instance->func_ptr->issue_cmd(cmd, instance); 4362 } 4363 4364 /* 4365 * complete_cmd_in_sync_mode - Completes an internal command 4366 * @instance: Adapter soft state 4367 * @cmd: Command to be completed 4368 * 4369 * The issue_cmd_in_sync_mode() function waits for a command to complete 4370 * after it issues a command. This function wakes up that waiting routine by 4371 * calling wake_up() on the wait queue. 4372 */ 4373 static void 4374 complete_cmd_in_sync_mode(struct mrsas_instance *instance, 4375 struct mrsas_cmd *cmd) 4376 { 4377 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle, 4378 &cmd->frame->io.cmd_status); 4379 4380 cmd->sync_cmd = MRSAS_FALSE; 4381 4382 con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n", 4383 (void *)cmd)); 4384 4385 mutex_enter(&instance->int_cmd_mtx); 4386 if (cmd->cmd_status == ENODATA) { 4387 cmd->cmd_status = 0; 4388 } 4389 cv_broadcast(&instance->int_cmd_cv); 4390 mutex_exit(&instance->int_cmd_mtx); 4391 4392 } 4393 4394 /* 4395 * Call this function inside mrsas_softintr. 4396 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty 4397 * @instance: Adapter soft state 4398 */ 4399 4400 static uint32_t 4401 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance) 4402 { 4403 uint32_t cur_abs_reg_val; 4404 uint32_t fw_state; 4405 4406 cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance); 4407 fw_state = cur_abs_reg_val & MFI_STATE_MASK; 4408 if (fw_state == MFI_STATE_FAULT) { 4409 if (instance->disable_online_ctrl_reset == 1) { 4410 dev_err(instance->dip, CE_WARN, 4411 "mrsas_initiate_ocr_if_fw_is_faulty: " 4412 "FW in Fault state, detected in ISR: " 4413 "FW doesn't support ocr "); 4414 4415 return (ADAPTER_RESET_NOT_REQUIRED); 4416 } else { 4417 con_log(CL_ANN, (CE_NOTE, 4418 "mrsas_initiate_ocr_if_fw_is_faulty: FW in Fault " 4419 "state, detected in ISR: FW supports ocr ")); 4420 4421 return (ADAPTER_RESET_REQUIRED); 4422 } 4423 } 4424 4425 return (ADAPTER_RESET_NOT_REQUIRED); 4426 } 4427 4428 /* 4429 * mrsas_softintr - The Software ISR 4430 * @param arg : HBA soft state 4431 * 4432 * called from high-level interrupt if hi-level interrupt are not there, 4433 * otherwise triggered as a soft interrupt 4434 */ 4435 static uint_t 4436 mrsas_softintr(struct mrsas_instance *instance) 4437 { 4438 struct scsi_pkt *pkt; 4439 struct scsa_cmd *acmd; 4440 struct mrsas_cmd *cmd; 4441 struct mlist_head *pos, *next; 4442 mlist_t process_list; 4443 struct mrsas_header *hdr; 4444 struct scsi_arq_status *arqstat; 4445 4446 con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called.")); 4447 4448 ASSERT(instance); 4449 4450 mutex_enter(&instance->completed_pool_mtx); 4451 4452 if (mlist_empty(&instance->completed_pool_list)) { 4453 mutex_exit(&instance->completed_pool_mtx); 4454 return (DDI_INTR_CLAIMED); 4455 } 4456 4457 instance->softint_running = 1; 4458 4459 INIT_LIST_HEAD(&process_list); 4460 mlist_splice(&instance->completed_pool_list, &process_list); 4461 INIT_LIST_HEAD(&instance->completed_pool_list); 4462 4463 mutex_exit(&instance->completed_pool_mtx); 4464 4465 /* perform all callbacks first, before releasing the SCBs */ 4466 mlist_for_each_safe(pos, next, &process_list) { 4467 cmd = mlist_entry(pos, struct mrsas_cmd, list); 4468 4469 /* syncronize the Cmd frame for the controller */ 4470 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 4471 0, 0, DDI_DMA_SYNC_FORCPU); 4472 4473 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 4474 DDI_SUCCESS) { 4475 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 4476 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 4477 con_log(CL_ANN1, (CE_WARN, 4478 "mrsas_softintr: " 4479 "FMA check reports DMA handle failure")); 4480 return (DDI_INTR_CLAIMED); 4481 } 4482 4483 hdr = &cmd->frame->hdr; 4484 4485 /* remove the internal command from the process list */ 4486 mlist_del_init(&cmd->list); 4487 4488 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) { 4489 case MFI_CMD_OP_PD_SCSI: 4490 case MFI_CMD_OP_LD_SCSI: 4491 case MFI_CMD_OP_LD_READ: 4492 case MFI_CMD_OP_LD_WRITE: 4493 /* 4494 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI 4495 * could have been issued either through an 4496 * IO path or an IOCTL path. If it was via IOCTL, 4497 * we will send it to internal completion. 4498 */ 4499 if (cmd->sync_cmd == MRSAS_TRUE) { 4500 complete_cmd_in_sync_mode(instance, cmd); 4501 break; 4502 } 4503 4504 /* regular commands */ 4505 acmd = cmd->cmd; 4506 pkt = CMD2PKT(acmd); 4507 4508 if (acmd->cmd_flags & CFLAG_DMAVALID) { 4509 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 4510 (void) ddi_dma_sync(acmd->cmd_dmahandle, 4511 acmd->cmd_dma_offset, 4512 acmd->cmd_dma_len, 4513 DDI_DMA_SYNC_FORCPU); 4514 } 4515 } 4516 4517 pkt->pkt_reason = CMD_CMPLT; 4518 pkt->pkt_statistics = 0; 4519 pkt->pkt_state = STATE_GOT_BUS 4520 | STATE_GOT_TARGET | STATE_SENT_CMD 4521 | STATE_XFERRED_DATA | STATE_GOT_STATUS; 4522 4523 con_log(CL_ANN, (CE_CONT, 4524 "CDB[0] = %x completed for %s: size %lx context %x", 4525 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"), 4526 acmd->cmd_dmacount, hdr->context)); 4527 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0], 4528 uint_t, acmd->cmd_cdblen, ulong_t, 4529 acmd->cmd_dmacount); 4530 4531 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) { 4532 struct scsi_inquiry *inq; 4533 4534 if (acmd->cmd_dmacount != 0) { 4535 bp_mapin(acmd->cmd_buf); 4536 inq = (struct scsi_inquiry *) 4537 acmd->cmd_buf->b_un.b_addr; 4538 4539 #ifdef PDSUPPORT 4540 if (hdr->cmd_status == MFI_STAT_OK) { 4541 display_scsi_inquiry( 4542 (caddr_t)inq); 4543 } 4544 #else 4545 /* don't expose physical drives to OS */ 4546 if (acmd->islogical && 4547 (hdr->cmd_status == MFI_STAT_OK)) { 4548 display_scsi_inquiry( 4549 (caddr_t)inq); 4550 } else if ((hdr->cmd_status == 4551 MFI_STAT_OK) && inq->inq_dtype == 4552 DTYPE_DIRECT) { 4553 4554 display_scsi_inquiry( 4555 (caddr_t)inq); 4556 4557 /* for physical disk */ 4558 hdr->cmd_status = 4559 MFI_STAT_DEVICE_NOT_FOUND; 4560 } 4561 #endif /* PDSUPPORT */ 4562 } 4563 } 4564 4565 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd, 4566 uint8_t, hdr->cmd_status); 4567 4568 switch (hdr->cmd_status) { 4569 case MFI_STAT_OK: 4570 pkt->pkt_scbp[0] = STATUS_GOOD; 4571 break; 4572 case MFI_STAT_LD_CC_IN_PROGRESS: 4573 case MFI_STAT_LD_RECON_IN_PROGRESS: 4574 pkt->pkt_scbp[0] = STATUS_GOOD; 4575 break; 4576 case MFI_STAT_LD_INIT_IN_PROGRESS: 4577 con_log(CL_ANN, 4578 (CE_WARN, "Initialization in Progress")); 4579 pkt->pkt_reason = CMD_TRAN_ERR; 4580 4581 break; 4582 case MFI_STAT_SCSI_DONE_WITH_ERROR: 4583 con_log(CL_ANN, (CE_CONT, "scsi_done error")); 4584 4585 pkt->pkt_reason = CMD_CMPLT; 4586 ((struct scsi_status *) 4587 pkt->pkt_scbp)->sts_chk = 1; 4588 4589 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) { 4590 con_log(CL_ANN, 4591 (CE_WARN, "TEST_UNIT_READY fail")); 4592 } else { 4593 pkt->pkt_state |= STATE_ARQ_DONE; 4594 arqstat = (void *)(pkt->pkt_scbp); 4595 arqstat->sts_rqpkt_reason = CMD_CMPLT; 4596 arqstat->sts_rqpkt_resid = 0; 4597 arqstat->sts_rqpkt_state |= 4598 STATE_GOT_BUS | STATE_GOT_TARGET 4599 | STATE_SENT_CMD 4600 | STATE_XFERRED_DATA; 4601 *(uint8_t *)&arqstat->sts_rqpkt_status = 4602 STATUS_GOOD; 4603 ddi_rep_get8( 4604 cmd->frame_dma_obj.acc_handle, 4605 (uint8_t *) 4606 &(arqstat->sts_sensedata), 4607 cmd->sense, 4608 sizeof (struct scsi_extended_sense), 4609 DDI_DEV_AUTOINCR); 4610 } 4611 break; 4612 case MFI_STAT_LD_OFFLINE: 4613 case MFI_STAT_DEVICE_NOT_FOUND: 4614 con_log(CL_ANN, (CE_CONT, 4615 "mrsas_softintr:device not found error")); 4616 pkt->pkt_reason = CMD_DEV_GONE; 4617 pkt->pkt_statistics = STAT_DISCON; 4618 break; 4619 case MFI_STAT_LD_LBA_OUT_OF_RANGE: 4620 pkt->pkt_state |= STATE_ARQ_DONE; 4621 pkt->pkt_reason = CMD_CMPLT; 4622 ((struct scsi_status *) 4623 pkt->pkt_scbp)->sts_chk = 1; 4624 4625 arqstat = (void *)(pkt->pkt_scbp); 4626 arqstat->sts_rqpkt_reason = CMD_CMPLT; 4627 arqstat->sts_rqpkt_resid = 0; 4628 arqstat->sts_rqpkt_state |= STATE_GOT_BUS 4629 | STATE_GOT_TARGET | STATE_SENT_CMD 4630 | STATE_XFERRED_DATA; 4631 *(uint8_t *)&arqstat->sts_rqpkt_status = 4632 STATUS_GOOD; 4633 4634 arqstat->sts_sensedata.es_valid = 1; 4635 arqstat->sts_sensedata.es_key = 4636 KEY_ILLEGAL_REQUEST; 4637 arqstat->sts_sensedata.es_class = 4638 CLASS_EXTENDED_SENSE; 4639 4640 /* 4641 * LOGICAL BLOCK ADDRESS OUT OF RANGE: 4642 * ASC: 0x21h; ASCQ: 0x00h; 4643 */ 4644 arqstat->sts_sensedata.es_add_code = 0x21; 4645 arqstat->sts_sensedata.es_qual_code = 0x00; 4646 4647 break; 4648 4649 default: 4650 con_log(CL_ANN, (CE_CONT, "Unknown status!")); 4651 pkt->pkt_reason = CMD_TRAN_ERR; 4652 4653 break; 4654 } 4655 4656 atomic_add_16(&instance->fw_outstanding, (-1)); 4657 4658 (void) mrsas_common_check(instance, cmd); 4659 4660 if (acmd->cmd_dmahandle) { 4661 if (mrsas_check_dma_handle( 4662 acmd->cmd_dmahandle) != DDI_SUCCESS) { 4663 ddi_fm_service_impact(instance->dip, 4664 DDI_SERVICE_UNAFFECTED); 4665 pkt->pkt_reason = CMD_TRAN_ERR; 4666 pkt->pkt_statistics = 0; 4667 } 4668 } 4669 4670 mrsas_return_mfi_pkt(instance, cmd); 4671 4672 /* Call the callback routine */ 4673 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 4674 pkt->pkt_comp) { 4675 (*pkt->pkt_comp)(pkt); 4676 } 4677 4678 break; 4679 4680 case MFI_CMD_OP_SMP: 4681 case MFI_CMD_OP_STP: 4682 complete_cmd_in_sync_mode(instance, cmd); 4683 break; 4684 4685 case MFI_CMD_OP_DCMD: 4686 /* see if got an event notification */ 4687 if (ddi_get32(cmd->frame_dma_obj.acc_handle, 4688 &cmd->frame->dcmd.opcode) == 4689 MR_DCMD_CTRL_EVENT_WAIT) { 4690 if ((instance->aen_cmd == cmd) && 4691 (instance->aen_cmd->abort_aen)) { 4692 con_log(CL_ANN, (CE_WARN, 4693 "mrsas_softintr: " 4694 "aborted_aen returned")); 4695 } else { 4696 atomic_add_16(&instance->fw_outstanding, 4697 (-1)); 4698 service_mfi_aen(instance, cmd); 4699 } 4700 } else { 4701 complete_cmd_in_sync_mode(instance, cmd); 4702 } 4703 4704 break; 4705 4706 case MFI_CMD_OP_ABORT: 4707 con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete")); 4708 /* 4709 * MFI_CMD_OP_ABORT successfully completed 4710 * in the synchronous mode 4711 */ 4712 complete_cmd_in_sync_mode(instance, cmd); 4713 break; 4714 4715 default: 4716 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE); 4717 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 4718 4719 if (cmd->pkt != NULL) { 4720 pkt = cmd->pkt; 4721 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && 4722 pkt->pkt_comp) { 4723 4724 con_log(CL_ANN1, (CE_CONT, "posting to " 4725 "scsa cmd %p index %x pkt %p" 4726 "time %llx, default ", (void *)cmd, 4727 cmd->index, (void *)pkt, 4728 gethrtime())); 4729 4730 (*pkt->pkt_comp)(pkt); 4731 4732 } 4733 } 4734 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !")); 4735 break; 4736 } 4737 } 4738 4739 instance->softint_running = 0; 4740 4741 return (DDI_INTR_CLAIMED); 4742 } 4743 4744 /* 4745 * mrsas_alloc_dma_obj 4746 * 4747 * Allocate the memory and other resources for an dma object. 4748 */ 4749 int 4750 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj, 4751 uchar_t endian_flags) 4752 { 4753 int i; 4754 size_t alen = 0; 4755 uint_t cookie_cnt; 4756 struct ddi_device_acc_attr tmp_endian_attr; 4757 4758 tmp_endian_attr = endian_attr; 4759 tmp_endian_attr.devacc_attr_endian_flags = endian_flags; 4760 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 4761 4762 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr, 4763 DDI_DMA_SLEEP, NULL, &obj->dma_handle); 4764 if (i != DDI_SUCCESS) { 4765 4766 switch (i) { 4767 case DDI_DMA_BADATTR : 4768 con_log(CL_ANN, (CE_WARN, 4769 "Failed ddi_dma_alloc_handle- Bad attribute")); 4770 break; 4771 case DDI_DMA_NORESOURCES : 4772 con_log(CL_ANN, (CE_WARN, 4773 "Failed ddi_dma_alloc_handle- No Resources")); 4774 break; 4775 default : 4776 con_log(CL_ANN, (CE_WARN, 4777 "Failed ddi_dma_alloc_handle: " 4778 "unknown status %d", i)); 4779 break; 4780 } 4781 4782 return (-1); 4783 } 4784 4785 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr, 4786 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 4787 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) || 4788 alen < obj->size) { 4789 4790 ddi_dma_free_handle(&obj->dma_handle); 4791 4792 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc")); 4793 4794 return (-1); 4795 } 4796 4797 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer, 4798 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 4799 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) { 4800 4801 ddi_dma_mem_free(&obj->acc_handle); 4802 ddi_dma_free_handle(&obj->dma_handle); 4803 4804 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle")); 4805 4806 return (-1); 4807 } 4808 4809 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) { 4810 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 4811 return (-1); 4812 } 4813 4814 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) { 4815 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 4816 return (-1); 4817 } 4818 4819 return (cookie_cnt); 4820 } 4821 4822 /* 4823 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t) 4824 * 4825 * De-allocate the memory and other resources for an dma object, which must 4826 * have been alloated by a previous call to mrsas_alloc_dma_obj() 4827 */ 4828 int 4829 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj) 4830 { 4831 4832 if ((obj.dma_handle == NULL) || (obj.acc_handle == NULL)) { 4833 return (DDI_SUCCESS); 4834 } 4835 4836 /* 4837 * NOTE: These check-handle functions fail if *_handle == NULL, but 4838 * this function succeeds because of the previous check. 4839 */ 4840 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) { 4841 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4842 return (DDI_FAILURE); 4843 } 4844 4845 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) { 4846 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 4847 return (DDI_FAILURE); 4848 } 4849 4850 (void) ddi_dma_unbind_handle(obj.dma_handle); 4851 ddi_dma_mem_free(&obj.acc_handle); 4852 ddi_dma_free_handle(&obj.dma_handle); 4853 obj.acc_handle = NULL; 4854 return (DDI_SUCCESS); 4855 } 4856 4857 /* 4858 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *, 4859 * int, int (*)()) 4860 * 4861 * Allocate dma resources for a new scsi command 4862 */ 4863 int 4864 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt, 4865 struct buf *bp, int flags, int (*callback)()) 4866 { 4867 int dma_flags; 4868 int (*cb)(caddr_t); 4869 int i; 4870 4871 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr; 4872 struct scsa_cmd *acmd = PKT2CMD(pkt); 4873 4874 acmd->cmd_buf = bp; 4875 4876 if (bp->b_flags & B_READ) { 4877 acmd->cmd_flags &= ~CFLAG_DMASEND; 4878 dma_flags = DDI_DMA_READ; 4879 } else { 4880 acmd->cmd_flags |= CFLAG_DMASEND; 4881 dma_flags = DDI_DMA_WRITE; 4882 } 4883 4884 if (flags & PKT_CONSISTENT) { 4885 acmd->cmd_flags |= CFLAG_CONSISTENT; 4886 dma_flags |= DDI_DMA_CONSISTENT; 4887 } 4888 4889 if (flags & PKT_DMA_PARTIAL) { 4890 dma_flags |= DDI_DMA_PARTIAL; 4891 } 4892 4893 dma_flags |= DDI_DMA_REDZONE; 4894 4895 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP; 4896 4897 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge; 4898 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull; 4899 if (instance->tbolt) { 4900 /* OCR-RESET FIX */ 4901 tmp_dma_attr.dma_attr_count_max = 4902 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */ 4903 tmp_dma_attr.dma_attr_maxxfer = 4904 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */ 4905 } 4906 4907 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr, 4908 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) { 4909 switch (i) { 4910 case DDI_DMA_BADATTR: 4911 bioerror(bp, EFAULT); 4912 return (DDI_FAILURE); 4913 4914 case DDI_DMA_NORESOURCES: 4915 bioerror(bp, 0); 4916 return (DDI_FAILURE); 4917 4918 default: 4919 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: " 4920 "impossible result (0x%x)", i)); 4921 bioerror(bp, EFAULT); 4922 return (DDI_FAILURE); 4923 } 4924 } 4925 4926 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags, 4927 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies); 4928 4929 switch (i) { 4930 case DDI_DMA_PARTIAL_MAP: 4931 if ((dma_flags & DDI_DMA_PARTIAL) == 0) { 4932 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 4933 "DDI_DMA_PARTIAL_MAP impossible")); 4934 goto no_dma_cookies; 4935 } 4936 4937 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) == 4938 DDI_FAILURE) { 4939 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed")); 4940 goto no_dma_cookies; 4941 } 4942 4943 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 4944 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 4945 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 4946 DDI_FAILURE) { 4947 4948 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed")); 4949 goto no_dma_cookies; 4950 } 4951 4952 goto get_dma_cookies; 4953 case DDI_DMA_MAPPED: 4954 acmd->cmd_nwin = 1; 4955 acmd->cmd_dma_len = 0; 4956 acmd->cmd_dma_offset = 0; 4957 4958 get_dma_cookies: 4959 i = 0; 4960 acmd->cmd_dmacount = 0; 4961 for (;;) { 4962 acmd->cmd_dmacount += 4963 acmd->cmd_dmacookies[i++].dmac_size; 4964 4965 if (i == instance->max_num_sge || 4966 i == acmd->cmd_ncookies) 4967 break; 4968 4969 ddi_dma_nextcookie(acmd->cmd_dmahandle, 4970 &acmd->cmd_dmacookies[i]); 4971 } 4972 4973 acmd->cmd_cookie = i; 4974 acmd->cmd_cookiecnt = i; 4975 4976 acmd->cmd_flags |= CFLAG_DMAVALID; 4977 4978 if (bp->b_bcount >= acmd->cmd_dmacount) { 4979 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 4980 } else { 4981 pkt->pkt_resid = 0; 4982 } 4983 4984 return (DDI_SUCCESS); 4985 case DDI_DMA_NORESOURCES: 4986 bioerror(bp, 0); 4987 break; 4988 case DDI_DMA_NOMAPPING: 4989 bioerror(bp, EFAULT); 4990 break; 4991 case DDI_DMA_TOOBIG: 4992 bioerror(bp, EINVAL); 4993 break; 4994 case DDI_DMA_INUSE: 4995 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:" 4996 " DDI_DMA_INUSE impossible")); 4997 break; 4998 default: 4999 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: " 5000 "impossible result (0x%x)", i)); 5001 break; 5002 } 5003 5004 no_dma_cookies: 5005 ddi_dma_free_handle(&acmd->cmd_dmahandle); 5006 acmd->cmd_dmahandle = NULL; 5007 acmd->cmd_flags &= ~CFLAG_DMAVALID; 5008 return (DDI_FAILURE); 5009 } 5010 5011 /* 5012 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *) 5013 * 5014 * move dma resources to next dma window 5015 * 5016 */ 5017 int 5018 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt, 5019 struct buf *bp) 5020 { 5021 int i = 0; 5022 5023 struct scsa_cmd *acmd = PKT2CMD(pkt); 5024 5025 /* 5026 * If there are no more cookies remaining in this window, 5027 * must move to the next window first. 5028 */ 5029 if (acmd->cmd_cookie == acmd->cmd_ncookies) { 5030 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) { 5031 return (DDI_SUCCESS); 5032 } 5033 5034 /* at last window, cannot move */ 5035 if (++acmd->cmd_curwin >= acmd->cmd_nwin) { 5036 return (DDI_FAILURE); 5037 } 5038 5039 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin, 5040 &acmd->cmd_dma_offset, &acmd->cmd_dma_len, 5041 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) == 5042 DDI_FAILURE) { 5043 return (DDI_FAILURE); 5044 } 5045 5046 acmd->cmd_cookie = 0; 5047 } else { 5048 /* still more cookies in this window - get the next one */ 5049 ddi_dma_nextcookie(acmd->cmd_dmahandle, 5050 &acmd->cmd_dmacookies[0]); 5051 } 5052 5053 /* get remaining cookies in this window, up to our maximum */ 5054 for (;;) { 5055 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size; 5056 acmd->cmd_cookie++; 5057 5058 if (i == instance->max_num_sge || 5059 acmd->cmd_cookie == acmd->cmd_ncookies) { 5060 break; 5061 } 5062 5063 ddi_dma_nextcookie(acmd->cmd_dmahandle, 5064 &acmd->cmd_dmacookies[i]); 5065 } 5066 5067 acmd->cmd_cookiecnt = i; 5068 5069 if (bp->b_bcount >= acmd->cmd_dmacount) { 5070 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount; 5071 } else { 5072 pkt->pkt_resid = 0; 5073 } 5074 5075 return (DDI_SUCCESS); 5076 } 5077 5078 /* 5079 * build_cmd 5080 */ 5081 static struct mrsas_cmd * 5082 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap, 5083 struct scsi_pkt *pkt, uchar_t *cmd_done) 5084 { 5085 uint16_t flags = 0; 5086 uint32_t i; 5087 uint32_t context; 5088 uint32_t sge_bytes; 5089 uint32_t tmp_data_xfer_len; 5090 ddi_acc_handle_t acc_handle; 5091 struct mrsas_cmd *cmd; 5092 struct mrsas_sge64 *mfi_sgl; 5093 struct mrsas_sge_ieee *mfi_sgl_ieee; 5094 struct scsa_cmd *acmd = PKT2CMD(pkt); 5095 struct mrsas_pthru_frame *pthru; 5096 struct mrsas_io_frame *ldio; 5097 5098 /* find out if this is logical or physical drive command. */ 5099 acmd->islogical = MRDRV_IS_LOGICAL(ap); 5100 acmd->device_id = MAP_DEVICE_ID(instance, ap); 5101 *cmd_done = 0; 5102 5103 /* get the command packet */ 5104 if (!(cmd = mrsas_get_mfi_pkt(instance))) { 5105 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t, 5106 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 5107 return (NULL); 5108 } 5109 5110 acc_handle = cmd->frame_dma_obj.acc_handle; 5111 5112 /* Clear the frame buffer and assign back the context id */ 5113 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 5114 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index); 5115 5116 cmd->pkt = pkt; 5117 cmd->cmd = acmd; 5118 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0], 5119 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len); 5120 5121 /* lets get the command directions */ 5122 if (acmd->cmd_flags & CFLAG_DMASEND) { 5123 flags = MFI_FRAME_DIR_WRITE; 5124 5125 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 5126 (void) ddi_dma_sync(acmd->cmd_dmahandle, 5127 acmd->cmd_dma_offset, acmd->cmd_dma_len, 5128 DDI_DMA_SYNC_FORDEV); 5129 } 5130 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) { 5131 flags = MFI_FRAME_DIR_READ; 5132 5133 if (acmd->cmd_flags & CFLAG_CONSISTENT) { 5134 (void) ddi_dma_sync(acmd->cmd_dmahandle, 5135 acmd->cmd_dma_offset, acmd->cmd_dma_len, 5136 DDI_DMA_SYNC_FORCPU); 5137 } 5138 } else { 5139 flags = MFI_FRAME_DIR_NONE; 5140 } 5141 5142 if (instance->flag_ieee) { 5143 flags |= MFI_FRAME_IEEE; 5144 } 5145 flags |= MFI_FRAME_SGL64; 5146 5147 switch (pkt->pkt_cdbp[0]) { 5148 5149 /* 5150 * case SCMD_SYNCHRONIZE_CACHE: 5151 * flush_cache(instance); 5152 * mrsas_return_mfi_pkt(instance, cmd); 5153 * *cmd_done = 1; 5154 * 5155 * return (NULL); 5156 */ 5157 5158 case SCMD_READ: 5159 case SCMD_WRITE: 5160 case SCMD_READ_G1: 5161 case SCMD_WRITE_G1: 5162 case SCMD_READ_G4: 5163 case SCMD_WRITE_G4: 5164 case SCMD_READ_G5: 5165 case SCMD_WRITE_G5: 5166 if (acmd->islogical) { 5167 ldio = (struct mrsas_io_frame *)cmd->frame; 5168 5169 /* 5170 * preare the Logical IO frame: 5171 * 2nd bit is zero for all read cmds 5172 */ 5173 ddi_put8(acc_handle, &ldio->cmd, 5174 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE 5175 : MFI_CMD_OP_LD_READ); 5176 ddi_put8(acc_handle, &ldio->cmd_status, 0x0); 5177 ddi_put8(acc_handle, &ldio->scsi_status, 0x0); 5178 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id); 5179 ddi_put16(acc_handle, &ldio->timeout, 0); 5180 ddi_put8(acc_handle, &ldio->reserved_0, 0); 5181 ddi_put16(acc_handle, &ldio->pad_0, 0); 5182 ddi_put16(acc_handle, &ldio->flags, flags); 5183 5184 /* Initialize sense Information */ 5185 bzero(cmd->sense, SENSE_LENGTH); 5186 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH); 5187 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0); 5188 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo, 5189 cmd->sense_phys_addr); 5190 ddi_put32(acc_handle, &ldio->start_lba_hi, 0); 5191 ddi_put8(acc_handle, &ldio->access_byte, 5192 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0); 5193 ddi_put8(acc_handle, &ldio->sge_count, 5194 acmd->cmd_cookiecnt); 5195 if (instance->flag_ieee) { 5196 mfi_sgl_ieee = 5197 (struct mrsas_sge_ieee *)&ldio->sgl; 5198 } else { 5199 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl; 5200 } 5201 5202 context = ddi_get32(acc_handle, &ldio->context); 5203 5204 if (acmd->cmd_cdblen == CDB_GROUP0) { 5205 /* 6-byte cdb */ 5206 ddi_put32(acc_handle, &ldio->lba_count, ( 5207 (uint16_t)(pkt->pkt_cdbp[4]))); 5208 5209 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 5210 ((uint32_t)(pkt->pkt_cdbp[3])) | 5211 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) | 5212 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) 5213 << 16))); 5214 } else if (acmd->cmd_cdblen == CDB_GROUP1) { 5215 /* 10-byte cdb */ 5216 ddi_put32(acc_handle, &ldio->lba_count, ( 5217 ((uint16_t)(pkt->pkt_cdbp[8])) | 5218 ((uint16_t)(pkt->pkt_cdbp[7]) << 8))); 5219 5220 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 5221 ((uint32_t)(pkt->pkt_cdbp[5])) | 5222 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 5223 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 5224 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 5225 } else if (acmd->cmd_cdblen == CDB_GROUP5) { 5226 /* 12-byte cdb */ 5227 ddi_put32(acc_handle, &ldio->lba_count, ( 5228 ((uint32_t)(pkt->pkt_cdbp[9])) | 5229 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | 5230 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | 5231 ((uint32_t)(pkt->pkt_cdbp[6]) << 24))); 5232 5233 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 5234 ((uint32_t)(pkt->pkt_cdbp[5])) | 5235 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 5236 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 5237 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 5238 } else if (acmd->cmd_cdblen == CDB_GROUP4) { 5239 /* 16-byte cdb */ 5240 ddi_put32(acc_handle, &ldio->lba_count, ( 5241 ((uint32_t)(pkt->pkt_cdbp[13])) | 5242 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) | 5243 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) | 5244 ((uint32_t)(pkt->pkt_cdbp[10]) << 24))); 5245 5246 ddi_put32(acc_handle, &ldio->start_lba_lo, ( 5247 ((uint32_t)(pkt->pkt_cdbp[9])) | 5248 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) | 5249 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) | 5250 ((uint32_t)(pkt->pkt_cdbp[6]) << 24))); 5251 5252 ddi_put32(acc_handle, &ldio->start_lba_hi, ( 5253 ((uint32_t)(pkt->pkt_cdbp[5])) | 5254 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) | 5255 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) | 5256 ((uint32_t)(pkt->pkt_cdbp[2]) << 24))); 5257 } 5258 5259 break; 5260 } 5261 /* fall through For all non-rd/wr and physical disk cmds */ 5262 default: 5263 5264 switch (pkt->pkt_cdbp[0]) { 5265 case SCMD_MODE_SENSE: 5266 case SCMD_MODE_SENSE_G1: { 5267 union scsi_cdb *cdbp; 5268 uint16_t page_code; 5269 5270 cdbp = (void *)pkt->pkt_cdbp; 5271 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0]; 5272 switch (page_code) { 5273 case 0x3: 5274 case 0x4: 5275 (void) mrsas_mode_sense_build(pkt); 5276 mrsas_return_mfi_pkt(instance, cmd); 5277 *cmd_done = 1; 5278 return (NULL); 5279 } 5280 break; 5281 } 5282 default: 5283 break; 5284 } 5285 5286 pthru = (struct mrsas_pthru_frame *)cmd->frame; 5287 5288 /* prepare the DCDB frame */ 5289 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ? 5290 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI); 5291 ddi_put8(acc_handle, &pthru->cmd_status, 0x0); 5292 ddi_put8(acc_handle, &pthru->scsi_status, 0x0); 5293 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id); 5294 ddi_put8(acc_handle, &pthru->lun, 0); 5295 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen); 5296 ddi_put16(acc_handle, &pthru->timeout, 0); 5297 ddi_put16(acc_handle, &pthru->flags, flags); 5298 tmp_data_xfer_len = 0; 5299 for (i = 0; i < acmd->cmd_cookiecnt; i++) { 5300 tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size; 5301 } 5302 ddi_put32(acc_handle, &pthru->data_xfer_len, 5303 tmp_data_xfer_len); 5304 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt); 5305 if (instance->flag_ieee) { 5306 mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl; 5307 } else { 5308 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl; 5309 } 5310 5311 bzero(cmd->sense, SENSE_LENGTH); 5312 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH); 5313 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); 5314 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 5315 cmd->sense_phys_addr); 5316 5317 context = ddi_get32(acc_handle, &pthru->context); 5318 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp, 5319 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR); 5320 5321 break; 5322 } 5323 #ifdef lint 5324 context = context; 5325 #endif 5326 /* prepare the scatter-gather list for the firmware */ 5327 if (instance->flag_ieee) { 5328 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) { 5329 ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr, 5330 acmd->cmd_dmacookies[i].dmac_laddress); 5331 ddi_put32(acc_handle, &mfi_sgl_ieee->length, 5332 acmd->cmd_dmacookies[i].dmac_size); 5333 } 5334 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt; 5335 } else { 5336 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) { 5337 ddi_put64(acc_handle, &mfi_sgl->phys_addr, 5338 acmd->cmd_dmacookies[i].dmac_laddress); 5339 ddi_put32(acc_handle, &mfi_sgl->length, 5340 acmd->cmd_dmacookies[i].dmac_size); 5341 } 5342 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt; 5343 } 5344 5345 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) + 5346 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1; 5347 5348 if (cmd->frame_count >= 8) { 5349 cmd->frame_count = 8; 5350 } 5351 5352 return (cmd); 5353 } 5354 5355 /* 5356 * wait_for_outstanding - Wait for all outstanding cmds 5357 * @instance: Adapter soft state 5358 * 5359 * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to 5360 * complete all its outstanding commands. Returns error if one or more IOs 5361 * are pending after this time period. 5362 */ 5363 static int 5364 wait_for_outstanding(struct mrsas_instance *instance) 5365 { 5366 int i; 5367 uint32_t wait_time = 90; 5368 5369 for (i = 0; i < wait_time; i++) { 5370 if (!instance->fw_outstanding) { 5371 break; 5372 } 5373 5374 drv_usecwait(MILLISEC); /* wait for 1000 usecs */; 5375 } 5376 5377 if (instance->fw_outstanding) { 5378 return (1); 5379 } 5380 5381 return (0); 5382 } 5383 5384 /* 5385 * issue_mfi_pthru 5386 */ 5387 static int 5388 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 5389 struct mrsas_cmd *cmd, int mode) 5390 { 5391 void *ubuf; 5392 uint32_t kphys_addr = 0; 5393 uint32_t xferlen = 0; 5394 uint32_t new_xfer_length = 0; 5395 uint_t model; 5396 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 5397 dma_obj_t pthru_dma_obj; 5398 struct mrsas_pthru_frame *kpthru; 5399 struct mrsas_pthru_frame *pthru; 5400 int i; 5401 pthru = &cmd->frame->pthru; 5402 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0]; 5403 5404 if (instance->adapterresetinprogress) { 5405 con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, " 5406 "returning mfi_pkt and setting TRAN_BUSY\n")); 5407 return (DDI_FAILURE); 5408 } 5409 model = ddi_model_convert_from(mode & FMODELS); 5410 if (model == DDI_MODEL_ILP32) { 5411 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32")); 5412 5413 xferlen = kpthru->sgl.sge32[0].length; 5414 5415 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 5416 } else { 5417 #ifdef _ILP32 5418 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32")); 5419 xferlen = kpthru->sgl.sge32[0].length; 5420 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr; 5421 #else 5422 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64")); 5423 xferlen = kpthru->sgl.sge64[0].length; 5424 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr; 5425 #endif 5426 } 5427 5428 if (xferlen) { 5429 /* means IOCTL requires DMA */ 5430 /* allocate the data transfer buffer */ 5431 /* pthru_dma_obj.size = xferlen; */ 5432 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length, 5433 PAGESIZE); 5434 pthru_dma_obj.size = new_xfer_length; 5435 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr; 5436 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 5437 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 5438 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1; 5439 pthru_dma_obj.dma_attr.dma_attr_align = 1; 5440 5441 /* allocate kernel buffer for DMA */ 5442 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj, 5443 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 5444 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: " 5445 "could not allocate data transfer buffer.")); 5446 return (DDI_FAILURE); 5447 } 5448 (void) memset(pthru_dma_obj.buffer, 0, xferlen); 5449 5450 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 5451 if (kpthru->flags & MFI_FRAME_DIR_WRITE) { 5452 for (i = 0; i < xferlen; i++) { 5453 if (ddi_copyin((uint8_t *)ubuf+i, 5454 (uint8_t *)pthru_dma_obj.buffer+i, 5455 1, mode)) { 5456 con_log(CL_ANN, (CE_WARN, 5457 "issue_mfi_pthru : " 5458 "copy from user space failed")); 5459 return (DDI_FAILURE); 5460 } 5461 } 5462 } 5463 5464 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address; 5465 } 5466 5467 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd); 5468 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH); 5469 ddi_put8(acc_handle, &pthru->cmd_status, 0); 5470 ddi_put8(acc_handle, &pthru->scsi_status, 0); 5471 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id); 5472 ddi_put8(acc_handle, &pthru->lun, kpthru->lun); 5473 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len); 5474 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count); 5475 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout); 5476 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len); 5477 5478 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0); 5479 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; 5480 /* ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */ 5481 5482 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb, 5483 pthru->cdb_len, DDI_DEV_AUTOINCR); 5484 5485 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64); 5486 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen); 5487 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr); 5488 5489 cmd->sync_cmd = MRSAS_TRUE; 5490 cmd->frame_count = 1; 5491 5492 if (instance->tbolt) { 5493 mr_sas_tbolt_build_mfi_cmd(instance, cmd); 5494 } 5495 5496 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 5497 con_log(CL_ANN, (CE_WARN, 5498 "issue_mfi_pthru: fw_ioctl failed")); 5499 } else { 5500 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) { 5501 for (i = 0; i < xferlen; i++) { 5502 if (ddi_copyout( 5503 (uint8_t *)pthru_dma_obj.buffer+i, 5504 (uint8_t *)ubuf+i, 1, mode)) { 5505 con_log(CL_ANN, (CE_WARN, 5506 "issue_mfi_pthru : " 5507 "copy to user space failed")); 5508 return (DDI_FAILURE); 5509 } 5510 } 5511 } 5512 } 5513 5514 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status); 5515 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status); 5516 5517 con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, " 5518 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status)); 5519 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t, 5520 kpthru->cmd_status, uint8_t, kpthru->scsi_status); 5521 5522 if (kpthru->sense_len) { 5523 uint_t sense_len = SENSE_LENGTH; 5524 void *sense_ubuf = 5525 (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo; 5526 if (kpthru->sense_len <= SENSE_LENGTH) { 5527 sense_len = kpthru->sense_len; 5528 } 5529 5530 for (i = 0; i < sense_len; i++) { 5531 if (ddi_copyout( 5532 (uint8_t *)cmd->sense+i, 5533 (uint8_t *)sense_ubuf+i, 1, mode)) { 5534 con_log(CL_ANN, (CE_WARN, 5535 "issue_mfi_pthru : " 5536 "copy to user space failed")); 5537 } 5538 con_log(CL_DLEVEL1, (CE_WARN, 5539 "Copying Sense info sense_buff[%d] = 0x%X", 5540 i, *((uint8_t *)cmd->sense + i))); 5541 } 5542 } 5543 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0, 5544 DDI_DMA_SYNC_FORDEV); 5545 5546 if (xferlen) { 5547 /* free kernel buffer */ 5548 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS) 5549 return (DDI_FAILURE); 5550 } 5551 5552 return (DDI_SUCCESS); 5553 } 5554 5555 /* 5556 * issue_mfi_dcmd 5557 */ 5558 static int 5559 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 5560 struct mrsas_cmd *cmd, int mode) 5561 { 5562 void *ubuf; 5563 uint32_t kphys_addr = 0; 5564 uint32_t xferlen = 0; 5565 uint32_t new_xfer_length = 0; 5566 uint32_t model; 5567 dma_obj_t dcmd_dma_obj; 5568 struct mrsas_dcmd_frame *kdcmd; 5569 struct mrsas_dcmd_frame *dcmd; 5570 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 5571 int i; 5572 dcmd = &cmd->frame->dcmd; 5573 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; 5574 5575 if (instance->adapterresetinprogress) { 5576 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, " 5577 "returning mfi_pkt and setting TRAN_BUSY")); 5578 return (DDI_FAILURE); 5579 } 5580 model = ddi_model_convert_from(mode & FMODELS); 5581 if (model == DDI_MODEL_ILP32) { 5582 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 5583 5584 xferlen = kdcmd->sgl.sge32[0].length; 5585 5586 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 5587 } else { 5588 #ifdef _ILP32 5589 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32")); 5590 xferlen = kdcmd->sgl.sge32[0].length; 5591 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 5592 #else 5593 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64")); 5594 xferlen = kdcmd->sgl.sge64[0].length; 5595 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 5596 #endif 5597 } 5598 if (xferlen) { 5599 /* means IOCTL requires DMA */ 5600 /* allocate the data transfer buffer */ 5601 /* dcmd_dma_obj.size = xferlen; */ 5602 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length, 5603 PAGESIZE); 5604 dcmd_dma_obj.size = new_xfer_length; 5605 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr; 5606 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 5607 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 5608 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1; 5609 dcmd_dma_obj.dma_attr.dma_attr_align = 1; 5610 5611 /* allocate kernel buffer for DMA */ 5612 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj, 5613 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 5614 con_log(CL_ANN, 5615 (CE_WARN, "issue_mfi_dcmd: could not " 5616 "allocate data transfer buffer.")); 5617 return (DDI_FAILURE); 5618 } 5619 (void) memset(dcmd_dma_obj.buffer, 0, xferlen); 5620 5621 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 5622 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) { 5623 for (i = 0; i < xferlen; i++) { 5624 if (ddi_copyin((uint8_t *)ubuf + i, 5625 (uint8_t *)dcmd_dma_obj.buffer + i, 5626 1, mode)) { 5627 con_log(CL_ANN, (CE_WARN, 5628 "issue_mfi_dcmd : " 5629 "copy from user space failed")); 5630 return (DDI_FAILURE); 5631 } 5632 } 5633 } 5634 5635 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address; 5636 } 5637 5638 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd); 5639 ddi_put8(acc_handle, &dcmd->cmd_status, 0); 5640 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count); 5641 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout); 5642 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len); 5643 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode); 5644 5645 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b, 5646 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR); 5647 5648 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64); 5649 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen); 5650 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr); 5651 5652 cmd->sync_cmd = MRSAS_TRUE; 5653 cmd->frame_count = 1; 5654 5655 if (instance->tbolt) { 5656 mr_sas_tbolt_build_mfi_cmd(instance, cmd); 5657 } 5658 5659 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 5660 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed")); 5661 } else { 5662 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) { 5663 for (i = 0; i < xferlen; i++) { 5664 if (ddi_copyout( 5665 (uint8_t *)dcmd_dma_obj.buffer + i, 5666 (uint8_t *)ubuf + i, 5667 1, mode)) { 5668 con_log(CL_ANN, (CE_WARN, 5669 "issue_mfi_dcmd : " 5670 "copy to user space failed")); 5671 return (DDI_FAILURE); 5672 } 5673 } 5674 } 5675 } 5676 5677 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status); 5678 con_log(CL_ANN, 5679 (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status)); 5680 DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t, 5681 kdcmd->cmd, uint8_t, kdcmd->cmd_status); 5682 5683 if (xferlen) { 5684 /* free kernel buffer */ 5685 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS) 5686 return (DDI_FAILURE); 5687 } 5688 5689 return (DDI_SUCCESS); 5690 } 5691 5692 /* 5693 * issue_mfi_smp 5694 */ 5695 static int 5696 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 5697 struct mrsas_cmd *cmd, int mode) 5698 { 5699 void *request_ubuf; 5700 void *response_ubuf; 5701 uint32_t request_xferlen = 0; 5702 uint32_t response_xferlen = 0; 5703 uint32_t new_xfer_length1 = 0; 5704 uint32_t new_xfer_length2 = 0; 5705 uint_t model; 5706 dma_obj_t request_dma_obj; 5707 dma_obj_t response_dma_obj; 5708 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 5709 struct mrsas_smp_frame *ksmp; 5710 struct mrsas_smp_frame *smp; 5711 struct mrsas_sge32 *sge32; 5712 #ifndef _ILP32 5713 struct mrsas_sge64 *sge64; 5714 #endif 5715 int i; 5716 uint64_t tmp_sas_addr; 5717 5718 smp = &cmd->frame->smp; 5719 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0]; 5720 5721 if (instance->adapterresetinprogress) { 5722 con_log(CL_ANN1, (CE_WARN, "Reset flag set, " 5723 "returning mfi_pkt and setting TRAN_BUSY\n")); 5724 return (DDI_FAILURE); 5725 } 5726 model = ddi_model_convert_from(mode & FMODELS); 5727 if (model == DDI_MODEL_ILP32) { 5728 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32")); 5729 5730 sge32 = &ksmp->sgl[0].sge32[0]; 5731 response_xferlen = sge32[0].length; 5732 request_xferlen = sge32[1].length; 5733 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: " 5734 "response_xferlen = %x, request_xferlen = %x", 5735 response_xferlen, request_xferlen)); 5736 5737 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 5738 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 5739 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: " 5740 "response_ubuf = %p, request_ubuf = %p", 5741 response_ubuf, request_ubuf)); 5742 } else { 5743 #ifdef _ILP32 5744 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32")); 5745 5746 sge32 = &ksmp->sgl[0].sge32[0]; 5747 response_xferlen = sge32[0].length; 5748 request_xferlen = sge32[1].length; 5749 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: " 5750 "response_xferlen = %x, request_xferlen = %x", 5751 response_xferlen, request_xferlen)); 5752 5753 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr; 5754 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr; 5755 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: " 5756 "response_ubuf = %p, request_ubuf = %p", 5757 response_ubuf, request_ubuf)); 5758 #else 5759 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64")); 5760 5761 sge64 = &ksmp->sgl[0].sge64[0]; 5762 response_xferlen = sge64[0].length; 5763 request_xferlen = sge64[1].length; 5764 5765 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr; 5766 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr; 5767 #endif 5768 } 5769 if (request_xferlen) { 5770 /* means IOCTL requires DMA */ 5771 /* allocate the data transfer buffer */ 5772 /* request_dma_obj.size = request_xferlen; */ 5773 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen, 5774 new_xfer_length1, PAGESIZE); 5775 request_dma_obj.size = new_xfer_length1; 5776 request_dma_obj.dma_attr = mrsas_generic_dma_attr; 5777 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 5778 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 5779 request_dma_obj.dma_attr.dma_attr_sgllen = 1; 5780 request_dma_obj.dma_attr.dma_attr_align = 1; 5781 5782 /* allocate kernel buffer for DMA */ 5783 if (mrsas_alloc_dma_obj(instance, &request_dma_obj, 5784 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 5785 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 5786 "could not allocate data transfer buffer.")); 5787 return (DDI_FAILURE); 5788 } 5789 (void) memset(request_dma_obj.buffer, 0, request_xferlen); 5790 5791 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 5792 for (i = 0; i < request_xferlen; i++) { 5793 if (ddi_copyin((uint8_t *)request_ubuf + i, 5794 (uint8_t *)request_dma_obj.buffer + i, 5795 1, mode)) { 5796 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 5797 "copy from user space failed")); 5798 return (DDI_FAILURE); 5799 } 5800 } 5801 } 5802 5803 if (response_xferlen) { 5804 /* means IOCTL requires DMA */ 5805 /* allocate the data transfer buffer */ 5806 /* response_dma_obj.size = response_xferlen; */ 5807 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen, 5808 new_xfer_length2, PAGESIZE); 5809 response_dma_obj.size = new_xfer_length2; 5810 response_dma_obj.dma_attr = mrsas_generic_dma_attr; 5811 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 5812 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 5813 response_dma_obj.dma_attr.dma_attr_sgllen = 1; 5814 response_dma_obj.dma_attr.dma_attr_align = 1; 5815 5816 /* allocate kernel buffer for DMA */ 5817 if (mrsas_alloc_dma_obj(instance, &response_dma_obj, 5818 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 5819 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 5820 "could not allocate data transfer buffer.")); 5821 return (DDI_FAILURE); 5822 } 5823 (void) memset(response_dma_obj.buffer, 0, response_xferlen); 5824 5825 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 5826 for (i = 0; i < response_xferlen; i++) { 5827 if (ddi_copyin((uint8_t *)response_ubuf + i, 5828 (uint8_t *)response_dma_obj.buffer + i, 5829 1, mode)) { 5830 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: " 5831 "copy from user space failed")); 5832 return (DDI_FAILURE); 5833 } 5834 } 5835 } 5836 5837 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd); 5838 ddi_put8(acc_handle, &smp->cmd_status, 0); 5839 ddi_put8(acc_handle, &smp->connection_status, 0); 5840 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count); 5841 /* smp->context = ksmp->context; */ 5842 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout); 5843 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len); 5844 5845 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr, 5846 sizeof (uint64_t)); 5847 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr); 5848 5849 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64); 5850 5851 model = ddi_model_convert_from(mode & FMODELS); 5852 if (model == DDI_MODEL_ILP32) { 5853 con_log(CL_ANN1, (CE_CONT, 5854 "issue_mfi_smp: DDI_MODEL_ILP32")); 5855 5856 sge32 = &smp->sgl[0].sge32[0]; 5857 ddi_put32(acc_handle, &sge32[0].length, response_xferlen); 5858 ddi_put32(acc_handle, &sge32[0].phys_addr, 5859 response_dma_obj.dma_cookie[0].dmac_address); 5860 ddi_put32(acc_handle, &sge32[1].length, request_xferlen); 5861 ddi_put32(acc_handle, &sge32[1].phys_addr, 5862 request_dma_obj.dma_cookie[0].dmac_address); 5863 } else { 5864 #ifdef _ILP32 5865 con_log(CL_ANN1, (CE_CONT, 5866 "issue_mfi_smp: DDI_MODEL_ILP32")); 5867 sge32 = &smp->sgl[0].sge32[0]; 5868 ddi_put32(acc_handle, &sge32[0].length, response_xferlen); 5869 ddi_put32(acc_handle, &sge32[0].phys_addr, 5870 response_dma_obj.dma_cookie[0].dmac_address); 5871 ddi_put32(acc_handle, &sge32[1].length, request_xferlen); 5872 ddi_put32(acc_handle, &sge32[1].phys_addr, 5873 request_dma_obj.dma_cookie[0].dmac_address); 5874 #else 5875 con_log(CL_ANN1, (CE_CONT, 5876 "issue_mfi_smp: DDI_MODEL_LP64")); 5877 sge64 = &smp->sgl[0].sge64[0]; 5878 ddi_put32(acc_handle, &sge64[0].length, response_xferlen); 5879 ddi_put64(acc_handle, &sge64[0].phys_addr, 5880 response_dma_obj.dma_cookie[0].dmac_address); 5881 ddi_put32(acc_handle, &sge64[1].length, request_xferlen); 5882 ddi_put64(acc_handle, &sge64[1].phys_addr, 5883 request_dma_obj.dma_cookie[0].dmac_address); 5884 #endif 5885 } 5886 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : " 5887 "smp->response_xferlen = %d, smp->request_xferlen = %d " 5888 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length), 5889 ddi_get32(acc_handle, &sge32[1].length), 5890 ddi_get32(acc_handle, &smp->data_xfer_len))); 5891 5892 cmd->sync_cmd = MRSAS_TRUE; 5893 cmd->frame_count = 1; 5894 5895 if (instance->tbolt) { 5896 mr_sas_tbolt_build_mfi_cmd(instance, cmd); 5897 } 5898 5899 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 5900 con_log(CL_ANN, (CE_WARN, 5901 "issue_mfi_smp: fw_ioctl failed")); 5902 } else { 5903 con_log(CL_ANN1, (CE_CONT, 5904 "issue_mfi_smp: copy to user space")); 5905 5906 if (request_xferlen) { 5907 for (i = 0; i < request_xferlen; i++) { 5908 if (ddi_copyout( 5909 (uint8_t *)request_dma_obj.buffer + 5910 i, (uint8_t *)request_ubuf + i, 5911 1, mode)) { 5912 con_log(CL_ANN, (CE_WARN, 5913 "issue_mfi_smp : copy to user space" 5914 " failed")); 5915 return (DDI_FAILURE); 5916 } 5917 } 5918 } 5919 5920 if (response_xferlen) { 5921 for (i = 0; i < response_xferlen; i++) { 5922 if (ddi_copyout( 5923 (uint8_t *)response_dma_obj.buffer 5924 + i, (uint8_t *)response_ubuf 5925 + i, 1, mode)) { 5926 con_log(CL_ANN, (CE_WARN, 5927 "issue_mfi_smp : copy to " 5928 "user space failed")); 5929 return (DDI_FAILURE); 5930 } 5931 } 5932 } 5933 } 5934 5935 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status); 5936 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d", 5937 ksmp->cmd_status)); 5938 DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status); 5939 5940 if (request_xferlen) { 5941 /* free kernel buffer */ 5942 if (mrsas_free_dma_obj(instance, request_dma_obj) != 5943 DDI_SUCCESS) 5944 return (DDI_FAILURE); 5945 } 5946 5947 if (response_xferlen) { 5948 /* free kernel buffer */ 5949 if (mrsas_free_dma_obj(instance, response_dma_obj) != 5950 DDI_SUCCESS) 5951 return (DDI_FAILURE); 5952 } 5953 5954 return (DDI_SUCCESS); 5955 } 5956 5957 /* 5958 * issue_mfi_stp 5959 */ 5960 static int 5961 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 5962 struct mrsas_cmd *cmd, int mode) 5963 { 5964 void *fis_ubuf; 5965 void *data_ubuf; 5966 uint32_t fis_xferlen = 0; 5967 uint32_t new_xfer_length1 = 0; 5968 uint32_t new_xfer_length2 = 0; 5969 uint32_t data_xferlen = 0; 5970 uint_t model; 5971 dma_obj_t fis_dma_obj; 5972 dma_obj_t data_dma_obj; 5973 struct mrsas_stp_frame *kstp; 5974 struct mrsas_stp_frame *stp; 5975 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle; 5976 int i; 5977 5978 stp = &cmd->frame->stp; 5979 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0]; 5980 5981 if (instance->adapterresetinprogress) { 5982 con_log(CL_ANN1, (CE_WARN, "Reset flag set, " 5983 "returning mfi_pkt and setting TRAN_BUSY\n")); 5984 return (DDI_FAILURE); 5985 } 5986 model = ddi_model_convert_from(mode & FMODELS); 5987 if (model == DDI_MODEL_ILP32) { 5988 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32")); 5989 5990 fis_xferlen = kstp->sgl.sge32[0].length; 5991 data_xferlen = kstp->sgl.sge32[1].length; 5992 5993 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 5994 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 5995 } else { 5996 #ifdef _ILP32 5997 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32")); 5998 5999 fis_xferlen = kstp->sgl.sge32[0].length; 6000 data_xferlen = kstp->sgl.sge32[1].length; 6001 6002 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr; 6003 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr; 6004 #else 6005 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64")); 6006 6007 fis_xferlen = kstp->sgl.sge64[0].length; 6008 data_xferlen = kstp->sgl.sge64[1].length; 6009 6010 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr; 6011 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr; 6012 #endif 6013 } 6014 6015 6016 if (fis_xferlen) { 6017 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: " 6018 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen)); 6019 6020 /* means IOCTL requires DMA */ 6021 /* allocate the data transfer buffer */ 6022 /* fis_dma_obj.size = fis_xferlen; */ 6023 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen, 6024 new_xfer_length1, PAGESIZE); 6025 fis_dma_obj.size = new_xfer_length1; 6026 fis_dma_obj.dma_attr = mrsas_generic_dma_attr; 6027 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 6028 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 6029 fis_dma_obj.dma_attr.dma_attr_sgllen = 1; 6030 fis_dma_obj.dma_attr.dma_attr_align = 1; 6031 6032 /* allocate kernel buffer for DMA */ 6033 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj, 6034 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 6035 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : " 6036 "could not allocate data transfer buffer.")); 6037 return (DDI_FAILURE); 6038 } 6039 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen); 6040 6041 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 6042 for (i = 0; i < fis_xferlen; i++) { 6043 if (ddi_copyin((uint8_t *)fis_ubuf + i, 6044 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) { 6045 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 6046 "copy from user space failed")); 6047 return (DDI_FAILURE); 6048 } 6049 } 6050 } 6051 6052 if (data_xferlen) { 6053 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p " 6054 "data_xferlen = %x", data_ubuf, data_xferlen)); 6055 6056 /* means IOCTL requires DMA */ 6057 /* allocate the data transfer buffer */ 6058 /* data_dma_obj.size = data_xferlen; */ 6059 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen, new_xfer_length2, 6060 PAGESIZE); 6061 data_dma_obj.size = new_xfer_length2; 6062 data_dma_obj.dma_attr = mrsas_generic_dma_attr; 6063 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU; 6064 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU; 6065 data_dma_obj.dma_attr.dma_attr_sgllen = 1; 6066 data_dma_obj.dma_attr.dma_attr_align = 1; 6067 6068 /* allocate kernel buffer for DMA */ 6069 if (mrsas_alloc_dma_obj(instance, &data_dma_obj, 6070 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) { 6071 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 6072 "could not allocate data transfer buffer.")); 6073 return (DDI_FAILURE); 6074 } 6075 (void) memset(data_dma_obj.buffer, 0, data_xferlen); 6076 6077 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */ 6078 for (i = 0; i < data_xferlen; i++) { 6079 if (ddi_copyin((uint8_t *)data_ubuf + i, 6080 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) { 6081 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: " 6082 "copy from user space failed")); 6083 return (DDI_FAILURE); 6084 } 6085 } 6086 } 6087 6088 ddi_put8(acc_handle, &stp->cmd, kstp->cmd); 6089 ddi_put8(acc_handle, &stp->cmd_status, 0); 6090 ddi_put8(acc_handle, &stp->connection_status, 0); 6091 ddi_put8(acc_handle, &stp->target_id, kstp->target_id); 6092 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count); 6093 6094 ddi_put16(acc_handle, &stp->timeout, kstp->timeout); 6095 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len); 6096 6097 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10, 6098 DDI_DEV_AUTOINCR); 6099 6100 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64); 6101 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags); 6102 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen); 6103 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr, 6104 fis_dma_obj.dma_cookie[0].dmac_address); 6105 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen); 6106 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr, 6107 data_dma_obj.dma_cookie[0].dmac_address); 6108 6109 cmd->sync_cmd = MRSAS_TRUE; 6110 cmd->frame_count = 1; 6111 6112 if (instance->tbolt) { 6113 mr_sas_tbolt_build_mfi_cmd(instance, cmd); 6114 } 6115 6116 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) { 6117 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed")); 6118 } else { 6119 6120 if (fis_xferlen) { 6121 for (i = 0; i < fis_xferlen; i++) { 6122 if (ddi_copyout( 6123 (uint8_t *)fis_dma_obj.buffer + i, 6124 (uint8_t *)fis_ubuf + i, 1, mode)) { 6125 con_log(CL_ANN, (CE_WARN, 6126 "issue_mfi_stp : copy to " 6127 "user space failed")); 6128 return (DDI_FAILURE); 6129 } 6130 } 6131 } 6132 } 6133 if (data_xferlen) { 6134 for (i = 0; i < data_xferlen; i++) { 6135 if (ddi_copyout( 6136 (uint8_t *)data_dma_obj.buffer + i, 6137 (uint8_t *)data_ubuf + i, 1, mode)) { 6138 con_log(CL_ANN, (CE_WARN, 6139 "issue_mfi_stp : copy to" 6140 " user space failed")); 6141 return (DDI_FAILURE); 6142 } 6143 } 6144 } 6145 6146 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status); 6147 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d", 6148 kstp->cmd_status)); 6149 DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status); 6150 6151 if (fis_xferlen) { 6152 /* free kernel buffer */ 6153 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS) 6154 return (DDI_FAILURE); 6155 } 6156 6157 if (data_xferlen) { 6158 /* free kernel buffer */ 6159 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS) 6160 return (DDI_FAILURE); 6161 } 6162 6163 return (DDI_SUCCESS); 6164 } 6165 6166 /* 6167 * fill_up_drv_ver 6168 */ 6169 void 6170 fill_up_drv_ver(struct mrsas_drv_ver *dv) 6171 { 6172 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver)); 6173 6174 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$")); 6175 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris")); 6176 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas")); 6177 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION)); 6178 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE, 6179 strlen(MRSAS_RELDATE)); 6180 6181 } 6182 6183 /* 6184 * handle_drv_ioctl 6185 */ 6186 static int 6187 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 6188 int mode) 6189 { 6190 int i; 6191 int rval = DDI_SUCCESS; 6192 int *props = NULL; 6193 void *ubuf; 6194 6195 uint8_t *pci_conf_buf; 6196 uint32_t xferlen; 6197 uint32_t num_props; 6198 uint_t model; 6199 struct mrsas_dcmd_frame *kdcmd; 6200 struct mrsas_drv_ver dv; 6201 struct mrsas_pci_information pi; 6202 6203 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0]; 6204 6205 model = ddi_model_convert_from(mode & FMODELS); 6206 if (model == DDI_MODEL_ILP32) { 6207 con_log(CL_ANN1, (CE_CONT, 6208 "handle_drv_ioctl: DDI_MODEL_ILP32")); 6209 6210 xferlen = kdcmd->sgl.sge32[0].length; 6211 6212 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 6213 } else { 6214 #ifdef _ILP32 6215 con_log(CL_ANN1, (CE_CONT, 6216 "handle_drv_ioctl: DDI_MODEL_ILP32")); 6217 xferlen = kdcmd->sgl.sge32[0].length; 6218 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr; 6219 #else 6220 con_log(CL_ANN1, (CE_CONT, 6221 "handle_drv_ioctl: DDI_MODEL_LP64")); 6222 xferlen = kdcmd->sgl.sge64[0].length; 6223 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr; 6224 #endif 6225 } 6226 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: " 6227 "dataBuf=%p size=%d bytes", ubuf, xferlen)); 6228 6229 switch (kdcmd->opcode) { 6230 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION: 6231 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: " 6232 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION")); 6233 6234 fill_up_drv_ver(&dv); 6235 6236 if (ddi_copyout(&dv, ubuf, xferlen, mode)) { 6237 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 6238 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : " 6239 "copy to user space failed")); 6240 kdcmd->cmd_status = 1; 6241 rval = 1; 6242 } else { 6243 kdcmd->cmd_status = 0; 6244 } 6245 break; 6246 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION: 6247 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: " 6248 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON")); 6249 6250 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip, 6251 0, "reg", &props, &num_props)) { 6252 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 6253 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : " 6254 "ddi_prop_look_int_array failed")); 6255 rval = DDI_FAILURE; 6256 } else { 6257 6258 pi.busNumber = (props[0] >> 16) & 0xFF; 6259 pi.deviceNumber = (props[0] >> 11) & 0x1f; 6260 pi.functionNumber = (props[0] >> 8) & 0x7; 6261 ddi_prop_free((void *)props); 6262 } 6263 6264 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo; 6265 6266 for (i = 0; i < (sizeof (struct mrsas_pci_information) - 6267 offsetof(struct mrsas_pci_information, pciHeaderInfo)); 6268 i++) { 6269 pci_conf_buf[i] = 6270 pci_config_get8(instance->pci_handle, i); 6271 } 6272 6273 if (ddi_copyout(&pi, ubuf, xferlen, mode)) { 6274 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 6275 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : " 6276 "copy to user space failed")); 6277 kdcmd->cmd_status = 1; 6278 rval = 1; 6279 } else { 6280 kdcmd->cmd_status = 0; 6281 } 6282 break; 6283 default: 6284 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: " 6285 "invalid driver specific IOCTL opcode = 0x%x", 6286 kdcmd->opcode)); 6287 kdcmd->cmd_status = 1; 6288 rval = DDI_FAILURE; 6289 break; 6290 } 6291 6292 return (rval); 6293 } 6294 6295 /* 6296 * handle_mfi_ioctl 6297 */ 6298 static int 6299 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl, 6300 int mode) 6301 { 6302 int rval = DDI_SUCCESS; 6303 6304 struct mrsas_header *hdr; 6305 struct mrsas_cmd *cmd; 6306 6307 if (instance->tbolt) { 6308 cmd = get_raid_msg_mfi_pkt(instance); 6309 } else { 6310 cmd = mrsas_get_mfi_pkt(instance); 6311 } 6312 if (!cmd) { 6313 con_log(CL_ANN, (CE_WARN, "mr_sas: " 6314 "failed to get a cmd packet")); 6315 DTRACE_PROBE2(mfi_ioctl_err, uint16_t, 6316 instance->fw_outstanding, uint16_t, instance->max_fw_cmds); 6317 return (DDI_FAILURE); 6318 } 6319 6320 /* Clear the frame buffer and assign back the context id */ 6321 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 6322 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 6323 cmd->index); 6324 6325 hdr = (struct mrsas_header *)&ioctl->frame[0]; 6326 6327 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) { 6328 case MFI_CMD_OP_DCMD: 6329 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode); 6330 break; 6331 case MFI_CMD_OP_SMP: 6332 rval = issue_mfi_smp(instance, ioctl, cmd, mode); 6333 break; 6334 case MFI_CMD_OP_STP: 6335 rval = issue_mfi_stp(instance, ioctl, cmd, mode); 6336 break; 6337 case MFI_CMD_OP_LD_SCSI: 6338 case MFI_CMD_OP_PD_SCSI: 6339 rval = issue_mfi_pthru(instance, ioctl, cmd, mode); 6340 break; 6341 default: 6342 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: " 6343 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd)); 6344 rval = DDI_FAILURE; 6345 break; 6346 } 6347 6348 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) 6349 rval = DDI_FAILURE; 6350 6351 if (instance->tbolt) { 6352 return_raid_msg_mfi_pkt(instance, cmd); 6353 } else { 6354 mrsas_return_mfi_pkt(instance, cmd); 6355 } 6356 6357 return (rval); 6358 } 6359 6360 /* 6361 * AEN 6362 */ 6363 static int 6364 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen) 6365 { 6366 int rval = 0; 6367 6368 rval = register_mfi_aen(instance, instance->aen_seq_num, 6369 aen->class_locale_word); 6370 6371 aen->cmd_status = (uint8_t)rval; 6372 6373 return (rval); 6374 } 6375 6376 static int 6377 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num, 6378 uint32_t class_locale_word) 6379 { 6380 int ret_val; 6381 6382 struct mrsas_cmd *cmd, *aen_cmd; 6383 struct mrsas_dcmd_frame *dcmd; 6384 union mrsas_evt_class_locale curr_aen; 6385 union mrsas_evt_class_locale prev_aen; 6386 6387 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 6388 /* 6389 * If there an AEN pending already (aen_cmd), check if the 6390 * class_locale of that pending AEN is inclusive of the new 6391 * AEN request we currently have. If it is, then we don't have 6392 * to do anything. In other words, whichever events the current 6393 * AEN request is subscribing to, have already been subscribed 6394 * to. 6395 * 6396 * If the old_cmd is _not_ inclusive, then we have to abort 6397 * that command, form a class_locale that is superset of both 6398 * old and current and re-issue to the FW 6399 */ 6400 6401 curr_aen.word = LE_32(class_locale_word); 6402 curr_aen.members.locale = LE_16(curr_aen.members.locale); 6403 aen_cmd = instance->aen_cmd; 6404 if (aen_cmd) { 6405 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle, 6406 &aen_cmd->frame->dcmd.mbox.w[1]); 6407 prev_aen.word = LE_32(prev_aen.word); 6408 prev_aen.members.locale = LE_16(prev_aen.members.locale); 6409 /* 6410 * A class whose enum value is smaller is inclusive of all 6411 * higher values. If a PROGRESS (= -1) was previously 6412 * registered, then a new registration requests for higher 6413 * classes need not be sent to FW. They are automatically 6414 * included. 6415 * 6416 * Locale numbers don't have such hierarchy. They are bitmap 6417 * values 6418 */ 6419 if ((prev_aen.members.class <= curr_aen.members.class) && 6420 !((prev_aen.members.locale & curr_aen.members.locale) ^ 6421 curr_aen.members.locale)) { 6422 /* 6423 * Previously issued event registration includes 6424 * current request. Nothing to do. 6425 */ 6426 6427 return (0); 6428 } else { 6429 curr_aen.members.locale |= prev_aen.members.locale; 6430 6431 if (prev_aen.members.class < curr_aen.members.class) 6432 curr_aen.members.class = prev_aen.members.class; 6433 6434 ret_val = abort_aen_cmd(instance, aen_cmd); 6435 6436 if (ret_val) { 6437 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: " 6438 "failed to abort prevous AEN command")); 6439 6440 return (ret_val); 6441 } 6442 } 6443 } else { 6444 curr_aen.word = LE_32(class_locale_word); 6445 curr_aen.members.locale = LE_16(curr_aen.members.locale); 6446 } 6447 6448 if (instance->tbolt) { 6449 cmd = get_raid_msg_mfi_pkt(instance); 6450 } else { 6451 cmd = mrsas_get_mfi_pkt(instance); 6452 } 6453 6454 if (!cmd) { 6455 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding, 6456 uint16_t, instance->max_fw_cmds); 6457 return (ENOMEM); 6458 } 6459 6460 /* Clear the frame buffer and assign back the context id */ 6461 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame)); 6462 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context, 6463 cmd->index); 6464 6465 dcmd = &cmd->frame->dcmd; 6466 6467 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */ 6468 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ); 6469 6470 (void) memset(instance->mfi_evt_detail_obj.buffer, 0, 6471 sizeof (struct mrsas_evt_detail)); 6472 6473 /* Prepare DCMD for aen registration */ 6474 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD); 6475 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0); 6476 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1); 6477 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, 6478 MFI_FRAME_DIR_READ); 6479 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0); 6480 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 6481 sizeof (struct mrsas_evt_detail)); 6482 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode, 6483 MR_DCMD_CTRL_EVENT_WAIT); 6484 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num); 6485 curr_aen.members.locale = LE_16(curr_aen.members.locale); 6486 curr_aen.word = LE_32(curr_aen.word); 6487 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1], 6488 curr_aen.word); 6489 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr, 6490 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address); 6491 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length, 6492 sizeof (struct mrsas_evt_detail)); 6493 6494 instance->aen_seq_num = seq_num; 6495 6496 6497 /* 6498 * Store reference to the cmd used to register for AEN. When an 6499 * application wants us to register for AEN, we have to abort this 6500 * cmd and re-register with a new EVENT LOCALE supplied by that app 6501 */ 6502 instance->aen_cmd = cmd; 6503 6504 cmd->frame_count = 1; 6505 6506 /* Issue the aen registration frame */ 6507 /* atomic_add_16 (&instance->fw_outstanding, 1); */ 6508 if (instance->tbolt) { 6509 mr_sas_tbolt_build_mfi_cmd(instance, cmd); 6510 } 6511 instance->func_ptr->issue_cmd(cmd, instance); 6512 6513 return (0); 6514 } 6515 6516 void 6517 display_scsi_inquiry(caddr_t scsi_inq) 6518 { 6519 #define MAX_SCSI_DEVICE_CODE 14 6520 int i; 6521 char inquiry_buf[256] = {0}; 6522 int len; 6523 const char *const scsi_device_types[] = { 6524 "Direct-Access ", 6525 "Sequential-Access", 6526 "Printer ", 6527 "Processor ", 6528 "WORM ", 6529 "CD-ROM ", 6530 "Scanner ", 6531 "Optical Device ", 6532 "Medium Changer ", 6533 "Communications ", 6534 "Unknown ", 6535 "Unknown ", 6536 "Unknown ", 6537 "Enclosure ", 6538 }; 6539 6540 len = 0; 6541 6542 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: "); 6543 for (i = 8; i < 16; i++) { 6544 len += snprintf(inquiry_buf + len, 265 - len, "%c", 6545 scsi_inq[i]); 6546 } 6547 6548 len += snprintf(inquiry_buf + len, 265 - len, " Model: "); 6549 6550 for (i = 16; i < 32; i++) { 6551 len += snprintf(inquiry_buf + len, 265 - len, "%c", 6552 scsi_inq[i]); 6553 } 6554 6555 len += snprintf(inquiry_buf + len, 265 - len, " Rev: "); 6556 6557 for (i = 32; i < 36; i++) { 6558 len += snprintf(inquiry_buf + len, 265 - len, "%c", 6559 scsi_inq[i]); 6560 } 6561 6562 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 6563 6564 6565 i = scsi_inq[0] & 0x1f; 6566 6567 6568 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ", 6569 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : 6570 "Unknown "); 6571 6572 6573 len += snprintf(inquiry_buf + len, 265 - len, 6574 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); 6575 6576 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) { 6577 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n"); 6578 } else { 6579 len += snprintf(inquiry_buf + len, 265 - len, "\n"); 6580 } 6581 6582 con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf)); 6583 } 6584 6585 static void 6586 io_timeout_checker(void *arg) 6587 { 6588 struct scsi_pkt *pkt; 6589 struct mrsas_instance *instance = arg; 6590 struct mrsas_cmd *cmd = NULL; 6591 struct mrsas_header *hdr; 6592 int time = 0; 6593 int counter = 0; 6594 struct mlist_head *pos, *next; 6595 mlist_t process_list; 6596 6597 if (instance->adapterresetinprogress == 1) { 6598 con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:" 6599 " reset in progress")); 6600 6601 instance->timeout_id = timeout(io_timeout_checker, 6602 (void *) instance, drv_usectohz(MRSAS_1_SECOND)); 6603 return; 6604 } 6605 6606 /* See if this check needs to be in the beginning or last in ISR */ 6607 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) { 6608 dev_err(instance->dip, CE_WARN, "io_timeout_checker: " 6609 "FW Fault, calling reset adapter"); 6610 dev_err(instance->dip, CE_CONT, "io_timeout_checker: " 6611 "fw_outstanding 0x%X max_fw_cmds 0x%X", 6612 instance->fw_outstanding, instance->max_fw_cmds); 6613 if (instance->adapterresetinprogress == 0) { 6614 instance->adapterresetinprogress = 1; 6615 if (instance->tbolt) 6616 (void) mrsas_tbolt_reset_ppc(instance); 6617 else 6618 (void) mrsas_reset_ppc(instance); 6619 instance->adapterresetinprogress = 0; 6620 } 6621 instance->timeout_id = timeout(io_timeout_checker, 6622 (void *) instance, drv_usectohz(MRSAS_1_SECOND)); 6623 return; 6624 } 6625 6626 INIT_LIST_HEAD(&process_list); 6627 6628 mutex_enter(&instance->cmd_pend_mtx); 6629 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) { 6630 cmd = mlist_entry(pos, struct mrsas_cmd, list); 6631 6632 if (cmd == NULL) { 6633 continue; 6634 } 6635 6636 if (cmd->sync_cmd == MRSAS_TRUE) { 6637 hdr = (struct mrsas_header *)&cmd->frame->hdr; 6638 if (hdr == NULL) { 6639 continue; 6640 } 6641 time = --cmd->drv_pkt_time; 6642 } else { 6643 pkt = cmd->pkt; 6644 if (pkt == NULL) { 6645 continue; 6646 } 6647 time = --cmd->drv_pkt_time; 6648 } 6649 if (time <= 0) { 6650 dev_err(instance->dip, CE_WARN, "%llx: " 6651 "io_timeout_checker: TIMING OUT: pkt: %p, " 6652 "cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X", 6653 gethrtime(), (void *)pkt, (void *)cmd, 6654 instance->fw_outstanding, instance->max_fw_cmds); 6655 6656 counter++; 6657 break; 6658 } 6659 } 6660 mutex_exit(&instance->cmd_pend_mtx); 6661 6662 if (counter) { 6663 if (instance->disable_online_ctrl_reset == 1) { 6664 dev_err(instance->dip, CE_WARN, "%s(): OCR is NOT " 6665 "supported by Firmware, KILL adapter!!!", 6666 __func__); 6667 6668 if (instance->tbolt) 6669 mrsas_tbolt_kill_adapter(instance); 6670 else 6671 (void) mrsas_kill_adapter(instance); 6672 6673 return; 6674 } else { 6675 if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) { 6676 if (instance->adapterresetinprogress == 0) { 6677 if (instance->tbolt) { 6678 (void) mrsas_tbolt_reset_ppc( 6679 instance); 6680 } else { 6681 (void) mrsas_reset_ppc( 6682 instance); 6683 } 6684 } 6685 } else { 6686 dev_err(instance->dip, CE_WARN, 6687 "io_timeout_checker: " 6688 "cmd %p cmd->index %d " 6689 "timed out even after 3 resets: " 6690 "so KILL adapter", (void *)cmd, cmd->index); 6691 6692 mrsas_print_cmd_details(instance, cmd, 0xDD); 6693 6694 if (instance->tbolt) 6695 mrsas_tbolt_kill_adapter(instance); 6696 else 6697 (void) mrsas_kill_adapter(instance); 6698 return; 6699 } 6700 } 6701 } 6702 con_log(CL_ANN, (CE_NOTE, "mrsas: " 6703 "schedule next timeout check: " 6704 "do timeout \n")); 6705 instance->timeout_id = 6706 timeout(io_timeout_checker, (void *)instance, 6707 drv_usectohz(MRSAS_1_SECOND)); 6708 } 6709 6710 static uint32_t 6711 read_fw_status_reg_ppc(struct mrsas_instance *instance) 6712 { 6713 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance)); 6714 } 6715 6716 static void 6717 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance) 6718 { 6719 struct scsi_pkt *pkt; 6720 atomic_inc_16(&instance->fw_outstanding); 6721 6722 pkt = cmd->pkt; 6723 if (pkt) { 6724 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:" 6725 "ISSUED CMD TO FW : called : cmd:" 6726 ": %p instance : %p pkt : %p pkt_time : %x\n", 6727 gethrtime(), (void *)cmd, (void *)instance, 6728 (void *)pkt, cmd->drv_pkt_time)); 6729 if (instance->adapterresetinprogress) { 6730 cmd->drv_pkt_time = (uint16_t)debug_timeout_g; 6731 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer")); 6732 } else { 6733 push_pending_mfi_pkt(instance, cmd); 6734 } 6735 6736 } else { 6737 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:" 6738 "ISSUED CMD TO FW : called : cmd : %p, instance: %p" 6739 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance)); 6740 } 6741 6742 mutex_enter(&instance->reg_write_mtx); 6743 /* Issue the command to the FW */ 6744 WR_IB_PICK_QPORT((cmd->frame_phys_addr) | 6745 (((cmd->frame_count - 1) << 1) | 1), instance); 6746 mutex_exit(&instance->reg_write_mtx); 6747 6748 } 6749 6750 /* 6751 * issue_cmd_in_sync_mode 6752 */ 6753 static int 6754 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance, 6755 struct mrsas_cmd *cmd) 6756 { 6757 int i; 6758 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC; 6759 struct mrsas_header *hdr = &cmd->frame->hdr; 6760 6761 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called")); 6762 6763 if (instance->adapterresetinprogress) { 6764 cmd->drv_pkt_time = ddi_get16( 6765 cmd->frame_dma_obj.acc_handle, &hdr->timeout); 6766 if (cmd->drv_pkt_time < debug_timeout_g) 6767 cmd->drv_pkt_time = (uint16_t)debug_timeout_g; 6768 6769 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: " 6770 "issue and return in reset case\n")); 6771 WR_IB_PICK_QPORT((cmd->frame_phys_addr) | 6772 (((cmd->frame_count - 1) << 1) | 1), instance); 6773 6774 return (DDI_SUCCESS); 6775 } else { 6776 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n")); 6777 push_pending_mfi_pkt(instance, cmd); 6778 } 6779 6780 cmd->cmd_status = ENODATA; 6781 6782 mutex_enter(&instance->reg_write_mtx); 6783 /* Issue the command to the FW */ 6784 WR_IB_PICK_QPORT((cmd->frame_phys_addr) | 6785 (((cmd->frame_count - 1) << 1) | 1), instance); 6786 mutex_exit(&instance->reg_write_mtx); 6787 6788 mutex_enter(&instance->int_cmd_mtx); 6789 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) { 6790 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx); 6791 } 6792 mutex_exit(&instance->int_cmd_mtx); 6793 6794 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done")); 6795 6796 if (i < (msecs -1)) { 6797 return (DDI_SUCCESS); 6798 } else { 6799 return (DDI_FAILURE); 6800 } 6801 } 6802 6803 /* 6804 * issue_cmd_in_poll_mode 6805 */ 6806 static int 6807 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance, 6808 struct mrsas_cmd *cmd) 6809 { 6810 int i; 6811 uint16_t flags; 6812 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC; 6813 struct mrsas_header *frame_hdr; 6814 6815 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called")); 6816 6817 frame_hdr = (struct mrsas_header *)cmd->frame; 6818 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status, 6819 MFI_CMD_STATUS_POLL_MODE); 6820 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags); 6821 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 6822 6823 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags); 6824 6825 /* issue the frame using inbound queue port */ 6826 WR_IB_PICK_QPORT((cmd->frame_phys_addr) | 6827 (((cmd->frame_count - 1) << 1) | 1), instance); 6828 6829 /* wait for cmd_status to change from 0xFF */ 6830 for (i = 0; i < msecs && ( 6831 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) 6832 == MFI_CMD_STATUS_POLL_MODE); i++) { 6833 drv_usecwait(MILLISEC); /* wait for 1000 usecs */ 6834 } 6835 6836 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status) 6837 == MFI_CMD_STATUS_POLL_MODE) { 6838 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: " 6839 "cmd polling timed out")); 6840 return (DDI_FAILURE); 6841 } 6842 6843 return (DDI_SUCCESS); 6844 } 6845 6846 static void 6847 enable_intr_ppc(struct mrsas_instance *instance) 6848 { 6849 uint32_t mask; 6850 6851 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called")); 6852 6853 if (instance->skinny) { 6854 /* For SKINNY, write ~0x1, from BSD's mfi driver. */ 6855 WR_OB_INTR_MASK(0xfffffffe, instance); 6856 } else { 6857 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */ 6858 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance); 6859 6860 /* WR_OB_INTR_MASK(~0x80000000, instance); */ 6861 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance); 6862 } 6863 6864 /* dummy read to force PCI flush */ 6865 mask = RD_OB_INTR_MASK(instance); 6866 6867 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: " 6868 "outbound_intr_mask = 0x%x", mask)); 6869 } 6870 6871 static void 6872 disable_intr_ppc(struct mrsas_instance *instance) 6873 { 6874 uint32_t mask; 6875 6876 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called")); 6877 6878 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : " 6879 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance))); 6880 6881 /* For now, assume there are no extras needed for Skinny support. */ 6882 6883 WR_OB_INTR_MASK(OB_INTR_MASK, instance); 6884 6885 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : " 6886 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance))); 6887 6888 /* dummy read to force PCI flush */ 6889 mask = RD_OB_INTR_MASK(instance); 6890 #ifdef lint 6891 mask = mask; 6892 #endif 6893 } 6894 6895 static int 6896 intr_ack_ppc(struct mrsas_instance *instance) 6897 { 6898 uint32_t status; 6899 int ret = DDI_INTR_CLAIMED; 6900 6901 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called")); 6902 6903 /* check if it is our interrupt */ 6904 status = RD_OB_INTR_STATUS(instance); 6905 6906 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status)); 6907 6908 /* 6909 * NOTE: Some drivers call out SKINNY here, but the return is the same 6910 * for SKINNY and 2108. 6911 */ 6912 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) { 6913 ret = DDI_INTR_UNCLAIMED; 6914 } 6915 6916 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 6917 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST); 6918 ret = DDI_INTR_UNCLAIMED; 6919 } 6920 6921 if (ret == DDI_INTR_UNCLAIMED) { 6922 return (ret); 6923 } 6924 6925 /* 6926 * Clear the interrupt by writing back the same value. 6927 * Another case where SKINNY is slightly different. 6928 */ 6929 if (instance->skinny) { 6930 WR_OB_INTR_STATUS(status, instance); 6931 } else { 6932 WR_OB_DOORBELL_CLEAR(status, instance); 6933 } 6934 6935 /* dummy READ */ 6936 status = RD_OB_INTR_STATUS(instance); 6937 6938 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared")); 6939 6940 return (ret); 6941 } 6942 6943 /* 6944 * Marks HBA as bad. This will be called either when an 6945 * IO packet times out even after 3 FW resets 6946 * or FW is found to be fault even after 3 continuous resets. 6947 */ 6948 6949 static int 6950 mrsas_kill_adapter(struct mrsas_instance *instance) 6951 { 6952 if (instance->deadadapter == 1) 6953 return (DDI_FAILURE); 6954 6955 con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: " 6956 "Writing to doorbell with MFI_STOP_ADP ")); 6957 mutex_enter(&instance->ocr_flags_mtx); 6958 instance->deadadapter = 1; 6959 mutex_exit(&instance->ocr_flags_mtx); 6960 instance->func_ptr->disable_intr(instance); 6961 WR_IB_DOORBELL(MFI_STOP_ADP, instance); 6962 (void) mrsas_complete_pending_cmds(instance); 6963 return (DDI_SUCCESS); 6964 } 6965 6966 6967 static int 6968 mrsas_reset_ppc(struct mrsas_instance *instance) 6969 { 6970 uint32_t status; 6971 uint32_t retry = 0; 6972 uint32_t cur_abs_reg_val; 6973 uint32_t fw_state; 6974 6975 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__)); 6976 6977 if (instance->deadadapter == 1) { 6978 dev_err(instance->dip, CE_WARN, "mrsas_reset_ppc: " 6979 "no more resets as HBA has been marked dead "); 6980 return (DDI_FAILURE); 6981 } 6982 mutex_enter(&instance->ocr_flags_mtx); 6983 instance->adapterresetinprogress = 1; 6984 mutex_exit(&instance->ocr_flags_mtx); 6985 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress " 6986 "flag set, time %llx", gethrtime())); 6987 6988 instance->func_ptr->disable_intr(instance); 6989 retry_reset: 6990 WR_IB_WRITE_SEQ(0, instance); 6991 WR_IB_WRITE_SEQ(4, instance); 6992 WR_IB_WRITE_SEQ(0xb, instance); 6993 WR_IB_WRITE_SEQ(2, instance); 6994 WR_IB_WRITE_SEQ(7, instance); 6995 WR_IB_WRITE_SEQ(0xd, instance); 6996 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written " 6997 "to write sequence register\n")); 6998 delay(100 * drv_usectohz(MILLISEC)); 6999 status = RD_OB_DRWE(instance); 7000 7001 while (!(status & DIAG_WRITE_ENABLE)) { 7002 delay(100 * drv_usectohz(MILLISEC)); 7003 status = RD_OB_DRWE(instance); 7004 if (retry++ == 100) { 7005 dev_err(instance->dip, CE_WARN, 7006 "mrsas_reset_ppc: DRWE bit " 7007 "check retry count %d", retry); 7008 return (DDI_FAILURE); 7009 } 7010 } 7011 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance); 7012 delay(100 * drv_usectohz(MILLISEC)); 7013 status = RD_OB_DRWE(instance); 7014 while (status & DIAG_RESET_ADAPTER) { 7015 delay(100 * drv_usectohz(MILLISEC)); 7016 status = RD_OB_DRWE(instance); 7017 if (retry++ == 100) { 7018 dev_err(instance->dip, CE_WARN, "mrsas_reset_ppc: " 7019 "RESET FAILED. KILL adapter called."); 7020 7021 (void) mrsas_kill_adapter(instance); 7022 return (DDI_FAILURE); 7023 } 7024 } 7025 con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete")); 7026 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 7027 "Calling mfi_state_transition_to_ready")); 7028 7029 /* Mark HBA as bad, if FW is fault after 3 continuous resets */ 7030 if (mfi_state_transition_to_ready(instance) || 7031 debug_fw_faults_after_ocr_g == 1) { 7032 cur_abs_reg_val = 7033 instance->func_ptr->read_fw_status_reg(instance); 7034 fw_state = cur_abs_reg_val & MFI_STATE_MASK; 7035 7036 #ifdef OCRDEBUG 7037 con_log(CL_ANN1, (CE_NOTE, 7038 "mrsas_reset_ppc :before fake: FW is not ready " 7039 "FW state = 0x%x", fw_state)); 7040 if (debug_fw_faults_after_ocr_g == 1) 7041 fw_state = MFI_STATE_FAULT; 7042 #endif 7043 7044 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc : FW is not ready " 7045 "FW state = 0x%x", fw_state)); 7046 7047 if (fw_state == MFI_STATE_FAULT) { 7048 /* increment the count */ 7049 instance->fw_fault_count_after_ocr++; 7050 if (instance->fw_fault_count_after_ocr 7051 < MAX_FW_RESET_COUNT) { 7052 dev_err(instance->dip, CE_WARN, 7053 "mrsas_reset_ppc: " 7054 "FW is in fault after OCR count %d " 7055 "Retry Reset", 7056 instance->fw_fault_count_after_ocr); 7057 goto retry_reset; 7058 7059 } else { 7060 dev_err(instance->dip, CE_WARN, 7061 "mrsas_reset_ppc: " 7062 "Max Reset Count exceeded >%d" 7063 "Mark HBA as bad, KILL adapter", 7064 MAX_FW_RESET_COUNT); 7065 7066 (void) mrsas_kill_adapter(instance); 7067 return (DDI_FAILURE); 7068 } 7069 } 7070 } 7071 /* reset the counter as FW is up after OCR */ 7072 instance->fw_fault_count_after_ocr = 0; 7073 7074 7075 ddi_put32(instance->mfi_internal_dma_obj.acc_handle, 7076 instance->producer, 0); 7077 7078 ddi_put32(instance->mfi_internal_dma_obj.acc_handle, 7079 instance->consumer, 0); 7080 7081 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 7082 " after resetting produconsumer chck indexs:" 7083 "producer %x consumer %x", *instance->producer, 7084 *instance->consumer)); 7085 7086 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 7087 "Calling mrsas_issue_init_mfi")); 7088 (void) mrsas_issue_init_mfi(instance); 7089 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 7090 "mrsas_issue_init_mfi Done")); 7091 7092 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 7093 "Calling mrsas_print_pending_cmd\n")); 7094 (void) mrsas_print_pending_cmds(instance); 7095 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 7096 "mrsas_print_pending_cmd done\n")); 7097 7098 instance->func_ptr->enable_intr(instance); 7099 instance->fw_outstanding = 0; 7100 7101 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 7102 "Calling mrsas_issue_pending_cmds")); 7103 (void) mrsas_issue_pending_cmds(instance); 7104 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 7105 "issue_pending_cmds done.\n")); 7106 7107 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 7108 "Calling aen registration")); 7109 7110 7111 instance->aen_cmd->retry_count_for_ocr = 0; 7112 instance->aen_cmd->drv_pkt_time = 0; 7113 7114 instance->func_ptr->issue_cmd(instance->aen_cmd, instance); 7115 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n")); 7116 7117 mutex_enter(&instance->ocr_flags_mtx); 7118 instance->adapterresetinprogress = 0; 7119 mutex_exit(&instance->ocr_flags_mtx); 7120 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: " 7121 "adpterresetinprogress flag unset")); 7122 7123 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n")); 7124 return (DDI_SUCCESS); 7125 } 7126 7127 /* 7128 * FMA functions. 7129 */ 7130 int 7131 mrsas_common_check(struct mrsas_instance *instance, struct mrsas_cmd *cmd) 7132 { 7133 int ret = DDI_SUCCESS; 7134 7135 if (cmd != NULL && 7136 mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) != 7137 DDI_SUCCESS) { 7138 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 7139 if (cmd->pkt != NULL) { 7140 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 7141 cmd->pkt->pkt_statistics = 0; 7142 } 7143 ret = DDI_FAILURE; 7144 } 7145 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle) 7146 != DDI_SUCCESS) { 7147 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 7148 if (cmd != NULL && cmd->pkt != NULL) { 7149 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 7150 cmd->pkt->pkt_statistics = 0; 7151 } 7152 ret = DDI_FAILURE; 7153 } 7154 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) != 7155 DDI_SUCCESS) { 7156 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 7157 if (cmd != NULL && cmd->pkt != NULL) { 7158 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 7159 cmd->pkt->pkt_statistics = 0; 7160 } 7161 ret = DDI_FAILURE; 7162 } 7163 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) { 7164 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED); 7165 7166 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0); 7167 7168 if (cmd != NULL && cmd->pkt != NULL) { 7169 cmd->pkt->pkt_reason = CMD_TRAN_ERR; 7170 cmd->pkt->pkt_statistics = 0; 7171 } 7172 ret = DDI_FAILURE; 7173 } 7174 7175 return (ret); 7176 } 7177 7178 /*ARGSUSED*/ 7179 static int 7180 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 7181 { 7182 /* 7183 * as the driver can always deal with an error in any dma or 7184 * access handle, we can just return the fme_status value. 7185 */ 7186 pci_ereport_post(dip, err, NULL); 7187 return (err->fme_status); 7188 } 7189 7190 static void 7191 mrsas_fm_init(struct mrsas_instance *instance) 7192 { 7193 /* Need to change iblock to priority for new MSI intr */ 7194 ddi_iblock_cookie_t fm_ibc; 7195 7196 /* Only register with IO Fault Services if we have some capability */ 7197 if (instance->fm_capabilities) { 7198 /* Adjust access and dma attributes for FMA */ 7199 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC; 7200 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 7201 7202 /* 7203 * Register capabilities with IO Fault Services. 7204 * fm_capabilities will be updated to indicate 7205 * capabilities actually supported (not requested.) 7206 */ 7207 7208 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc); 7209 7210 /* 7211 * Initialize pci ereport capabilities if ereport 7212 * capable (should always be.) 7213 */ 7214 7215 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 7216 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 7217 pci_ereport_setup(instance->dip); 7218 } 7219 7220 /* 7221 * Register error callback if error callback capable. 7222 */ 7223 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 7224 ddi_fm_handler_register(instance->dip, 7225 mrsas_fm_error_cb, (void*) instance); 7226 } 7227 } else { 7228 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 7229 mrsas_generic_dma_attr.dma_attr_flags = 0; 7230 } 7231 } 7232 7233 static void 7234 mrsas_fm_fini(struct mrsas_instance *instance) 7235 { 7236 /* Only unregister FMA capabilities if registered */ 7237 if (instance->fm_capabilities) { 7238 /* 7239 * Un-register error callback if error callback capable. 7240 */ 7241 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 7242 ddi_fm_handler_unregister(instance->dip); 7243 } 7244 7245 /* 7246 * Release any resources allocated by pci_ereport_setup() 7247 */ 7248 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) || 7249 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) { 7250 pci_ereport_teardown(instance->dip); 7251 } 7252 7253 /* Unregister from IO Fault Services */ 7254 ddi_fm_fini(instance->dip); 7255 7256 /* Adjust access and dma attributes for FMA */ 7257 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC; 7258 mrsas_generic_dma_attr.dma_attr_flags = 0; 7259 } 7260 } 7261 7262 int 7263 mrsas_check_acc_handle(ddi_acc_handle_t handle) 7264 { 7265 ddi_fm_error_t de; 7266 7267 if (handle == NULL) { 7268 return (DDI_FAILURE); 7269 } 7270 7271 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 7272 7273 return (de.fme_status); 7274 } 7275 7276 int 7277 mrsas_check_dma_handle(ddi_dma_handle_t handle) 7278 { 7279 ddi_fm_error_t de; 7280 7281 if (handle == NULL) { 7282 return (DDI_FAILURE); 7283 } 7284 7285 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 7286 7287 return (de.fme_status); 7288 } 7289 7290 void 7291 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail) 7292 { 7293 uint64_t ena; 7294 char buf[FM_MAX_CLASS]; 7295 7296 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 7297 ena = fm_ena_generate(0, FM_ENA_FMT1); 7298 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) { 7299 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP, 7300 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 7301 } 7302 } 7303 7304 static int 7305 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type) 7306 { 7307 7308 dev_info_t *dip = instance->dip; 7309 int avail, actual, count; 7310 int i, flag, ret; 7311 7312 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x", 7313 intr_type)); 7314 7315 /* Get number of interrupts */ 7316 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 7317 if ((ret != DDI_SUCCESS) || (count == 0)) { 7318 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:" 7319 "ret %d count %d", ret, count)); 7320 7321 return (DDI_FAILURE); 7322 } 7323 7324 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count)); 7325 7326 /* Get number of available interrupts */ 7327 ret = ddi_intr_get_navail(dip, intr_type, &avail); 7328 if ((ret != DDI_SUCCESS) || (avail == 0)) { 7329 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:" 7330 "ret %d avail %d", ret, avail)); 7331 7332 return (DDI_FAILURE); 7333 } 7334 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail)); 7335 7336 /* Only one interrupt routine. So limit the count to 1 */ 7337 if (count > 1) { 7338 count = 1; 7339 } 7340 7341 /* 7342 * Allocate an array of interrupt handlers. Currently we support 7343 * only one interrupt. The framework can be extended later. 7344 */ 7345 instance->intr_htable_size = count * sizeof (ddi_intr_handle_t); 7346 instance->intr_htable = kmem_zalloc(instance->intr_htable_size, 7347 KM_SLEEP); 7348 ASSERT(instance->intr_htable); 7349 7350 flag = ((intr_type == DDI_INTR_TYPE_MSI) || 7351 (intr_type == DDI_INTR_TYPE_MSIX)) ? 7352 DDI_INTR_ALLOC_STRICT : DDI_INTR_ALLOC_NORMAL; 7353 7354 /* Allocate interrupt */ 7355 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0, 7356 count, &actual, flag); 7357 7358 if ((ret != DDI_SUCCESS) || (actual == 0)) { 7359 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 7360 "avail = %d", avail)); 7361 goto mrsas_free_htable; 7362 } 7363 7364 if (actual < count) { 7365 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 7366 "Requested = %d Received = %d", count, actual)); 7367 } 7368 instance->intr_cnt = actual; 7369 7370 /* 7371 * Get the priority of the interrupt allocated. 7372 */ 7373 if ((ret = ddi_intr_get_pri(instance->intr_htable[0], 7374 &instance->intr_pri)) != DDI_SUCCESS) { 7375 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 7376 "get priority call failed")); 7377 goto mrsas_free_handles; 7378 } 7379 7380 /* 7381 * Test for high level mutex. we don't support them. 7382 */ 7383 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) { 7384 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: " 7385 "High level interrupts not supported.")); 7386 goto mrsas_free_handles; 7387 } 7388 7389 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ", 7390 instance->intr_pri)); 7391 7392 /* Call ddi_intr_add_handler() */ 7393 for (i = 0; i < actual; i++) { 7394 ret = ddi_intr_add_handler(instance->intr_htable[i], 7395 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance, 7396 (caddr_t)(uintptr_t)i); 7397 7398 if (ret != DDI_SUCCESS) { 7399 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:" 7400 "failed %d", ret)); 7401 goto mrsas_free_handles; 7402 } 7403 7404 } 7405 7406 con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done")); 7407 7408 if ((ret = ddi_intr_get_cap(instance->intr_htable[0], 7409 &instance->intr_cap)) != DDI_SUCCESS) { 7410 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d", 7411 ret)); 7412 goto mrsas_free_handlers; 7413 } 7414 7415 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { 7416 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable")); 7417 7418 (void) ddi_intr_block_enable(instance->intr_htable, 7419 instance->intr_cnt); 7420 } else { 7421 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable")); 7422 7423 for (i = 0; i < instance->intr_cnt; i++) { 7424 (void) ddi_intr_enable(instance->intr_htable[i]); 7425 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns " 7426 "%d", i)); 7427 } 7428 } 7429 7430 return (DDI_SUCCESS); 7431 7432 mrsas_free_handlers: 7433 for (i = 0; i < actual; i++) 7434 (void) ddi_intr_remove_handler(instance->intr_htable[i]); 7435 7436 mrsas_free_handles: 7437 for (i = 0; i < actual; i++) 7438 (void) ddi_intr_free(instance->intr_htable[i]); 7439 7440 mrsas_free_htable: 7441 if (instance->intr_htable != NULL) 7442 kmem_free(instance->intr_htable, instance->intr_htable_size); 7443 7444 instance->intr_htable = NULL; 7445 instance->intr_htable_size = 0; 7446 7447 return (DDI_FAILURE); 7448 7449 } 7450 7451 7452 static void 7453 mrsas_rem_intrs(struct mrsas_instance *instance) 7454 { 7455 int i; 7456 7457 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called")); 7458 7459 /* Disable all interrupts first */ 7460 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) { 7461 (void) ddi_intr_block_disable(instance->intr_htable, 7462 instance->intr_cnt); 7463 } else { 7464 for (i = 0; i < instance->intr_cnt; i++) { 7465 (void) ddi_intr_disable(instance->intr_htable[i]); 7466 } 7467 } 7468 7469 /* Remove all the handlers */ 7470 7471 for (i = 0; i < instance->intr_cnt; i++) { 7472 (void) ddi_intr_remove_handler(instance->intr_htable[i]); 7473 (void) ddi_intr_free(instance->intr_htable[i]); 7474 } 7475 7476 if (instance->intr_htable != NULL) 7477 kmem_free(instance->intr_htable, instance->intr_htable_size); 7478 7479 instance->intr_htable = NULL; 7480 instance->intr_htable_size = 0; 7481 7482 } 7483 7484 static int 7485 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags, 7486 ddi_bus_config_op_t op, void *arg, dev_info_t **childp) 7487 { 7488 struct mrsas_instance *instance; 7489 int config; 7490 int rval = NDI_SUCCESS; 7491 7492 char *ptr = NULL; 7493 int tgt, lun; 7494 7495 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op)); 7496 7497 if ((instance = ddi_get_soft_state(mrsas_state, 7498 ddi_get_instance(parent))) == NULL) { 7499 return (NDI_FAILURE); 7500 } 7501 7502 /* Hold nexus during bus_config */ 7503 ndi_devi_enter(parent, &config); 7504 switch (op) { 7505 case BUS_CONFIG_ONE: { 7506 7507 /* parse wwid/target name out of name given */ 7508 if ((ptr = strchr((char *)arg, '@')) == NULL) { 7509 rval = NDI_FAILURE; 7510 break; 7511 } 7512 ptr++; 7513 7514 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) { 7515 rval = NDI_FAILURE; 7516 break; 7517 } 7518 7519 if (lun == 0) { 7520 rval = mrsas_config_ld(instance, tgt, lun, childp); 7521 #ifdef PDSUPPORT 7522 } else if ((instance->tbolt || instance->skinny) && lun != 0) { 7523 rval = mrsas_tbolt_config_pd(instance, 7524 tgt, lun, childp); 7525 #endif 7526 } else { 7527 rval = NDI_FAILURE; 7528 } 7529 7530 break; 7531 } 7532 case BUS_CONFIG_DRIVER: 7533 case BUS_CONFIG_ALL: { 7534 7535 rval = mrsas_config_all_devices(instance); 7536 7537 rval = NDI_SUCCESS; 7538 break; 7539 } 7540 } 7541 7542 if (rval == NDI_SUCCESS) { 7543 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0); 7544 7545 } 7546 ndi_devi_exit(parent, config); 7547 7548 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x", 7549 rval)); 7550 return (rval); 7551 } 7552 7553 static int 7554 mrsas_config_all_devices(struct mrsas_instance *instance) 7555 { 7556 int rval, tgt; 7557 7558 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) { 7559 (void) mrsas_config_ld(instance, tgt, 0, NULL); 7560 7561 } 7562 7563 #ifdef PDSUPPORT 7564 /* Config PD devices connected to the card */ 7565 if (instance->tbolt || instance->skinny) { 7566 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) { 7567 (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL); 7568 } 7569 } 7570 #endif 7571 7572 rval = NDI_SUCCESS; 7573 return (rval); 7574 } 7575 7576 static int 7577 mrsas_parse_devname(char *devnm, int *tgt, int *lun) 7578 { 7579 char devbuf[SCSI_MAXNAMELEN]; 7580 char *addr; 7581 char *p, *tp, *lp; 7582 long num; 7583 7584 /* Parse dev name and address */ 7585 (void) strcpy(devbuf, devnm); 7586 addr = ""; 7587 for (p = devbuf; *p != '\0'; p++) { 7588 if (*p == '@') { 7589 addr = p + 1; 7590 *p = '\0'; 7591 } else if (*p == ':') { 7592 *p = '\0'; 7593 break; 7594 } 7595 } 7596 7597 /* Parse target and lun */ 7598 for (p = tp = addr, lp = NULL; *p != '\0'; p++) { 7599 if (*p == ',') { 7600 lp = p + 1; 7601 *p = '\0'; 7602 break; 7603 } 7604 } 7605 if (tgt && tp) { 7606 if (ddi_strtol(tp, NULL, 0x10, &num)) { 7607 return (DDI_FAILURE); /* Can declare this as constant */ 7608 } 7609 *tgt = (int)num; 7610 } 7611 if (lun && lp) { 7612 if (ddi_strtol(lp, NULL, 0x10, &num)) { 7613 return (DDI_FAILURE); 7614 } 7615 *lun = (int)num; 7616 } 7617 return (DDI_SUCCESS); /* Success case */ 7618 } 7619 7620 static int 7621 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt, 7622 uint8_t lun, dev_info_t **ldip) 7623 { 7624 struct scsi_device *sd; 7625 dev_info_t *child; 7626 int rval; 7627 7628 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d", 7629 tgt, lun)); 7630 7631 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) { 7632 if (ldip) { 7633 *ldip = child; 7634 } 7635 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) { 7636 rval = mrsas_service_evt(instance, tgt, 0, 7637 MRSAS_EVT_UNCONFIG_TGT, NULL); 7638 con_log(CL_ANN1, (CE_WARN, 7639 "mr_sas: DELETING STALE ENTRY rval = %d " 7640 "tgt id = %d ", rval, tgt)); 7641 return (NDI_FAILURE); 7642 } 7643 return (NDI_SUCCESS); 7644 } 7645 7646 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP); 7647 sd->sd_address.a_hba_tran = instance->tran; 7648 sd->sd_address.a_target = (uint16_t)tgt; 7649 sd->sd_address.a_lun = (uint8_t)lun; 7650 7651 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) 7652 rval = mrsas_config_scsi_device(instance, sd, ldip); 7653 else 7654 rval = NDI_FAILURE; 7655 7656 /* sd_unprobe is blank now. Free buffer manually */ 7657 if (sd->sd_inq) { 7658 kmem_free(sd->sd_inq, SUN_INQSIZE); 7659 sd->sd_inq = (struct scsi_inquiry *)NULL; 7660 } 7661 7662 kmem_free(sd, sizeof (struct scsi_device)); 7663 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d", 7664 rval)); 7665 return (rval); 7666 } 7667 7668 int 7669 mrsas_config_scsi_device(struct mrsas_instance *instance, 7670 struct scsi_device *sd, dev_info_t **dipp) 7671 { 7672 char *nodename = NULL; 7673 char **compatible = NULL; 7674 int ncompatible = 0; 7675 char *childname; 7676 dev_info_t *ldip = NULL; 7677 int tgt = sd->sd_address.a_target; 7678 int lun = sd->sd_address.a_lun; 7679 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK; 7680 int rval; 7681 7682 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun)); 7683 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype, 7684 NULL, &nodename, &compatible, &ncompatible); 7685 7686 if (nodename == NULL) { 7687 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver " 7688 "for t%dL%d", tgt, lun)); 7689 rval = NDI_FAILURE; 7690 goto finish; 7691 } 7692 7693 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename; 7694 con_log(CL_DLEVEL1, (CE_NOTE, 7695 "mr_sas: Childname = %2s nodename = %s", childname, nodename)); 7696 7697 /* Create a dev node */ 7698 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip); 7699 con_log(CL_DLEVEL1, (CE_NOTE, 7700 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval)); 7701 if (rval == NDI_SUCCESS) { 7702 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) != 7703 DDI_PROP_SUCCESS) { 7704 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 7705 "property for t%dl%d target", tgt, lun)); 7706 rval = NDI_FAILURE; 7707 goto finish; 7708 } 7709 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) != 7710 DDI_PROP_SUCCESS) { 7711 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 7712 "property for t%dl%d lun", tgt, lun)); 7713 rval = NDI_FAILURE; 7714 goto finish; 7715 } 7716 7717 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip, 7718 "compatible", compatible, ncompatible) != 7719 DDI_PROP_SUCCESS) { 7720 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create " 7721 "property for t%dl%d compatible", tgt, lun)); 7722 rval = NDI_FAILURE; 7723 goto finish; 7724 } 7725 7726 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH); 7727 if (rval != NDI_SUCCESS) { 7728 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online " 7729 "t%dl%d", tgt, lun)); 7730 ndi_prop_remove_all(ldip); 7731 (void) ndi_devi_free(ldip); 7732 } else { 7733 con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :" 7734 "0 t%dl%d", tgt, lun)); 7735 } 7736 7737 } 7738 finish: 7739 if (dipp) { 7740 *dipp = ldip; 7741 } 7742 7743 con_log(CL_DLEVEL1, (CE_NOTE, 7744 "mr_sas: config_scsi_device rval = %d t%dL%d", 7745 rval, tgt, lun)); 7746 scsi_hba_nodename_compatible_free(nodename, compatible); 7747 return (rval); 7748 } 7749 7750 /*ARGSUSED*/ 7751 int 7752 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event, 7753 uint64_t wwn) 7754 { 7755 struct mrsas_eventinfo *mrevt = NULL; 7756 7757 con_log(CL_ANN1, (CE_NOTE, 7758 "mrsas_service_evt called for t%dl%d event = %d", 7759 tgt, lun, event)); 7760 7761 if ((instance->taskq == NULL) || (mrevt = 7762 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) { 7763 return (ENOMEM); 7764 } 7765 7766 mrevt->instance = instance; 7767 mrevt->tgt = tgt; 7768 mrevt->lun = lun; 7769 mrevt->event = event; 7770 mrevt->wwn = wwn; 7771 7772 if ((ddi_taskq_dispatch(instance->taskq, 7773 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) != 7774 DDI_SUCCESS) { 7775 con_log(CL_ANN1, (CE_NOTE, 7776 "mr_sas: Event task failed for t%dl%d event = %d", 7777 tgt, lun, event)); 7778 kmem_free(mrevt, sizeof (struct mrsas_eventinfo)); 7779 return (DDI_FAILURE); 7780 } 7781 DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event); 7782 return (DDI_SUCCESS); 7783 } 7784 7785 static void 7786 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt) 7787 { 7788 struct mrsas_instance *instance = mrevt->instance; 7789 dev_info_t *dip, *pdip; 7790 int circ1 = 0; 7791 char *devname; 7792 7793 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for" 7794 " tgt %d lun %d event %d", 7795 mrevt->tgt, mrevt->lun, mrevt->event)); 7796 7797 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) { 7798 mutex_enter(&instance->config_dev_mtx); 7799 dip = instance->mr_ld_list[mrevt->tgt].dip; 7800 mutex_exit(&instance->config_dev_mtx); 7801 #ifdef PDSUPPORT 7802 } else { 7803 mutex_enter(&instance->config_dev_mtx); 7804 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip; 7805 mutex_exit(&instance->config_dev_mtx); 7806 #endif 7807 } 7808 7809 7810 ndi_devi_enter(instance->dip, &circ1); 7811 switch (mrevt->event) { 7812 case MRSAS_EVT_CONFIG_TGT: 7813 if (dip == NULL) { 7814 7815 if (mrevt->lun == 0) { 7816 (void) mrsas_config_ld(instance, mrevt->tgt, 7817 0, NULL); 7818 #ifdef PDSUPPORT 7819 } else if (instance->tbolt || instance->skinny) { 7820 (void) mrsas_tbolt_config_pd(instance, 7821 mrevt->tgt, 7822 1, NULL); 7823 #endif 7824 } 7825 con_log(CL_ANN1, (CE_NOTE, 7826 "mr_sas: EVT_CONFIG_TGT called:" 7827 " for tgt %d lun %d event %d", 7828 mrevt->tgt, mrevt->lun, mrevt->event)); 7829 7830 } else { 7831 con_log(CL_ANN1, (CE_NOTE, 7832 "mr_sas: EVT_CONFIG_TGT dip != NULL:" 7833 " for tgt %d lun %d event %d", 7834 mrevt->tgt, mrevt->lun, mrevt->event)); 7835 } 7836 break; 7837 case MRSAS_EVT_UNCONFIG_TGT: 7838 if (dip) { 7839 if (i_ddi_devi_attached(dip)) { 7840 7841 pdip = ddi_get_parent(dip); 7842 7843 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP); 7844 (void) ddi_deviname(dip, devname); 7845 7846 (void) devfs_clean(pdip, devname + 1, 7847 DV_CLEAN_FORCE); 7848 kmem_free(devname, MAXNAMELEN + 1); 7849 } 7850 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE); 7851 con_log(CL_ANN1, (CE_NOTE, 7852 "mr_sas: EVT_UNCONFIG_TGT called:" 7853 " for tgt %d lun %d event %d", 7854 mrevt->tgt, mrevt->lun, mrevt->event)); 7855 } else { 7856 con_log(CL_ANN1, (CE_NOTE, 7857 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:" 7858 " for tgt %d lun %d event %d", 7859 mrevt->tgt, mrevt->lun, mrevt->event)); 7860 } 7861 break; 7862 } 7863 kmem_free(mrevt, sizeof (struct mrsas_eventinfo)); 7864 ndi_devi_exit(instance->dip, circ1); 7865 } 7866 7867 7868 int 7869 mrsas_mode_sense_build(struct scsi_pkt *pkt) 7870 { 7871 union scsi_cdb *cdbp; 7872 uint16_t page_code; 7873 struct scsa_cmd *acmd; 7874 struct buf *bp; 7875 struct mode_header *modehdrp; 7876 7877 cdbp = (void *)pkt->pkt_cdbp; 7878 page_code = cdbp->cdb_un.sg.scsi[0]; 7879 acmd = PKT2CMD(pkt); 7880 bp = acmd->cmd_buf; 7881 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) { 7882 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command")); 7883 /* ADD pkt statistics as Command failed. */ 7884 return (NULL); 7885 } 7886 7887 bp_mapin(bp); 7888 bzero(bp->b_un.b_addr, bp->b_bcount); 7889 7890 switch (page_code) { 7891 case 0x3: { 7892 struct mode_format *page3p = NULL; 7893 modehdrp = (struct mode_header *)(bp->b_un.b_addr); 7894 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH; 7895 7896 page3p = (void *)((caddr_t)modehdrp + 7897 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 7898 page3p->mode_page.code = 0x3; 7899 page3p->mode_page.length = 7900 (uchar_t)(sizeof (struct mode_format)); 7901 page3p->data_bytes_sect = 512; 7902 page3p->sect_track = 63; 7903 break; 7904 } 7905 case 0x4: { 7906 struct mode_geometry *page4p = NULL; 7907 modehdrp = (struct mode_header *)(bp->b_un.b_addr); 7908 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH; 7909 7910 page4p = (void *)((caddr_t)modehdrp + 7911 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH); 7912 page4p->mode_page.code = 0x4; 7913 page4p->mode_page.length = 7914 (uchar_t)(sizeof (struct mode_geometry)); 7915 page4p->heads = 255; 7916 page4p->rpm = 10000; 7917 break; 7918 } 7919 default: 7920 break; 7921 } 7922 return (NULL); 7923 } 7924