1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2023 Racktop Systems, Inc. 14 */ 15 16 /* 17 * This driver targets the LSI/Broadcom/AVAGO Megaraid SAS controllers 18 * of the 3rd generation, in particular the models Aero and Ventura. 19 * 20 * This file contains the interfaces to DDI. 21 * 22 * Driver attach: 23 * -------------- 24 * 25 * For each HBA, the driver will attach three instances. The first will be for 26 * the controller, carrying out hardware and driver initialzation, while the 27 * remaining two are SCSA instances for the RAID (LD) and physical (PD) iports. 28 * 29 * Controller Initialization: 30 * -------------------------- 31 * 32 * The initialization of the controller hardware is split across multiple 33 * functions which are called during lmrc_ctrl_attach(): 34 * 1. As soon as the device registers are mapped, lmrc_adapter_init() will 35 * be called. This will attempt to bring the firmware to a ready state, 36 * after which control registers are read to fetch basic hardware properties 37 * and calculate the sizes of various data structures used by the driver. 38 * 2. After setting up interrupts and initializing mutexes, the expected number 39 * of MFI and MPT commands will be pre-allocated. Then, the I/O controller 40 * will be initialized by sending a IOC INIT command. 41 * 3. At this point the driver is able to send commands to the controller and 42 * receive replies. This will first be used to retrieve controller firmware 43 * properties to finish driver setup based on the information received. 44 * 4. As part of the remaining firmware configuration, we'll post a set of long- 45 * running commands to keep us informed about RAID map and PD map changes. 46 * These commands will complete asynchronously and will be rescheduled every 47 * time they have completed. 48 * 49 * While it's not really part of the controller initialization, it is worthwhile 50 * to mention here that we send a CTRL SHUTDOWN command to the controller during 51 * our quiesce(9e). 52 * 53 * 54 * SCSA HBA Setup: 55 * --------------- 56 * 57 * The driver is written to conform to SCSAv3. 58 * 59 * The driver will attach two iport(9) instances, one for physical devices that 60 * are directly exposed by the HBA to the host, and another for logical devices. 61 * The latter category not only includes RAID volumes but also physical disks 62 * when the controller is in JBOD mode. 63 * 64 * The attach function for either iport will enumerate the physical and logical 65 * devices, respectively, and populate a tgtmap(9). The driver itself maintains 66 * target state state in lmrc_tgt_t. It will attempt to get the SAS WWN of the 67 * target and use it as a device address, falling back to the target ID as used 68 * by the controller hardware. 69 * 70 * The array of target states is initialized once during controller attach. The 71 * initial portion of each target state contains a back link to the controller 72 * soft state and a mutex, neither of which need changing when a new target is 73 * discovered or a target disappears. The array of target states is indexed by 74 * the target ID as used by the controller hardware. Unused targets will have 75 * their target ID set to LMRC_DEVHDL_INVALID. 76 * 77 * 78 * MPT I/O request sending and reply processing: 79 * ----------------------------------------- 80 * 81 * The hardware expects to have access to two large areas of DMA memory that the 82 * driver will use to send I/O requests and receive replies. The size of these 83 * DMA buffers are based on the fixed size of I/O requests and the number of 84 * such requests that the controller may accept, and the size of the replies, 85 * the queue depth supported by the hardware, and the number interrupt vectors 86 * available for this driver. 87 * 88 * Based on these numbers, the driver will pre-allocate enough MPT and MFI 89 * commands to match the size of the I/O request buffer. In addition, each 90 * MPT command will have a SGL chain frame and a sense buffer pre-allocated. 91 * A set of functions are available to get a initialized command structure to 92 * send a request, and to return it to the command list after use. 93 * 94 * Sending a MPT I/O request to the controller is done by filling out the I/O 95 * frame with all the parameters needed for the request and creating a request 96 * descriptor, filling in the SMID of the I/O frame used and the queue number 97 * where the reply should be posted. The request descriptor is then written 98 * into the appropriate device registers. 99 * 100 * On completion, an interrupt may or may not be posted, depending the I/O 101 * request flags and the overall system state, such as whether interrupts are 102 * enabled at all. If an interrupt is received, any new replies posted into the 103 * queue associated with the interrupt vector are processed and their callbacks, 104 * if any, will be called. The hardware will be informed about the last reply 105 * index processed by writing the appropriate register. 106 * 107 * Polled I/O is facilitated by repeatedly checking for the presence of a reply, 108 * waiting a short time in between, up to a pre-defined timeout. 109 * 110 * 111 * MFI (MegaRAID Firmware Interface) commands: 112 * ------------------------------------------- 113 * 114 * MFI commands are used internally by the driver or by user space via the ioctl 115 * interface. Except for the initial IOC INIT command, all MFI commands will be 116 * sent using MPT MFI passthru commands. Therefore, after the initial IOC INIT 117 * command each MFI command always has a MPT command associated. 118 * 119 * MFI commands can be sent synchronously in "blocked" or "polled" mode, which 120 * differ only in the way the driver waits for completion. When sending a 121 * "blocked" command, the driver will set a callback and wait for the hardware 122 * to return the command through the normal interrupt driven code path. In 123 * "polled" mode, the command has a flag set to indicate to the hardware it 124 * should not be posted to a reply queue, and the driver repeatedly checks its 125 * status until it changes to indicate completion. 126 * 127 * MFI commands can also be sent asynchronously, in which case they are always 128 * completed through the interrupt code path and have a callback. This is used 129 * for RAID and PD map updates and Asynchronous Event Notifications (AENs). In 130 * all these cases, the commands are usually send to the hardware again after 131 * having been completed, avoiding unnecessary reallocation. 132 * 133 * As asynchronous commands can still be outstanding during detach, they can and 134 * will be aborted by sending a MFI ABORT command when the driver is shutting 135 * down. 136 * 137 * Asynchronous Event Notifications: 138 * --------------------------------- 139 * 140 * The driver will always have one AEN request outstanding to receive events 141 * from the controller. These events aren't very well documented, but it is 142 * known that they include a "locale" describing to which aspect of the HBA 143 * they apply, which is either the controller itself, physical devices, or 144 * logical devices. 145 * 146 * Most events will be logged but otherwise ignored by the driver, but some 147 * inform us about changes to the physical or logical drives connected to the 148 * HBA, in which case we update the respective target map. 149 * 150 * 151 * DMA considerations: 152 * ------------------- 153 * 154 * Most of the MPT structures can hold a 64bit physical address for DMA, but 155 * some don't. Additionally, the hardware may indicate that it doesn't handle 156 * 64bit DMA, even though the structures could hold an address this wide. 157 * 158 * Consequently, the driver keeps two sets of DMA attributes in its soft state, 159 * one decidedly for 32bit DMA and another one for all other uses which could 160 * potentially support 64bit DMA. The latter will be modified to fit what the 161 * hardware actually supports. 162 * 163 * 164 * Interrupt considerations: 165 * ------------------------- 166 * 167 * Unless we're in the unlikely situation that the hardware claims to not 168 * actually support it, the driver will prefer to get MSI-X interrupts. If that 169 * fails it'll do with MSI interrupts, falling back to FIXED interrupts if that 170 * fails as well. 171 * 172 * The number of queues supported is set to the minimum of what the hardware 173 * claims to support, and the number of interrupt vectors we can allocate. It is 174 * expected that the hardware will support much more queues and interrupt 175 * vectors than what the OS gives us by default. 176 * 177 * 178 * Locking considerations: 179 * ----------------------- 180 * 181 * The driver uses several mutexes, rwlocks, and one semaphore to serialize 182 * accessess to various parts of its internal state. 183 * 184 * The semaphore lmrc->l_ioctl_sema is used to limit the amount of MFI commands 185 * concurrently in use by user space. This semaphore needs to be decremented by 186 * the ioctl code path before any other locks may be acquired. 187 * 188 * The PD and RAID maps are each protected by a rwlock, lrmc->l_pdmap_lock and 189 * lmrc->l_raidmap_lock. Either map is write-locked only when we recieve an 190 * updated map from the firmware and copy it over our map, which happens only 191 * in the context of the MFI command completion for respective MAP GET INFO 192 * with the respective MFI command mutex being held. Read-locking of either map 193 * does not require any specific lock ordering. 194 * 195 * Each lmrc_tgt_t has its own rwlock, tgt->tgt_lock, which is write-locked only 196 * during lmrc_tgt_clear(), lmrc_tgt_init(), and lmrc_raid_get_wwn(), all of 197 * which run to update our internal target state as the hardware notifies us 198 * about a target change. No other locks are held during target state changes. 199 * During lmrc_tran_start() and lmrc_task_mgmt(), all other required command and 200 * map locks are acquired and released as necessary with the addressed target 201 * being read-locked, preventing target state updates while I/O is being done. 202 * 203 * Each MPT and MFI command has an associated mutex and condition variable used 204 * for synchronization. In general the mutex should be held while the command is 205 * set up until it has been sent to the hardware. The interrupt handler acquires 206 * the mutex of each completed command before signalling completion. In case of 207 * command abortion, the mutex of a command to be aborted is held to block 208 * completion until the ABORT or TASK MGMT command is sent to the hardware to 209 * avoid races. 210 * 211 * Additionally, each lmrc_tgt_t has an active command list to keep track of all 212 * MPT I/O commands send to a target, protected by a mutex. When iterating the 213 * active command list of a target, the mutex protecting this list must be held, 214 * while the command mutexes are entered and exited. When adding a command to an 215 * active command list, the mutex protecting the list is acquired while the 216 * command mutex is held. Care must be taken to avoid a deadlock against the 217 * iterating functions when removing a command from an active command list: The 218 * command mutex must not be held when the mutex protecting the list is entered. 219 * Using the functions for active command list management ensures lock ordering. 220 */ 221 222 #include <sys/class.h> 223 #include <sys/conf.h> 224 #include <sys/devops.h> 225 #include <sys/types.h> 226 #include <sys/errno.h> 227 #include <sys/ddi.h> 228 #include <sys/dditypes.h> 229 #include <sys/modctl.h> 230 #include <sys/debug.h> 231 #include <sys/pci.h> 232 #include <sys/policy.h> 233 #include <sys/scsi/scsi.h> 234 235 #include <sys/ddifm.h> 236 #include <sys/fm/protocol.h> 237 #include <sys/fm/util.h> 238 #include <sys/fm/io/ddi.h> 239 240 #include "lmrc.h" 241 #include "lmrc_reg.h" 242 #include "lmrc_ioctl.h" 243 #include "lmrc_phys.h" 244 245 #define INST2LSIRDCTL(x) ((x) << INST_MINOR_SHIFT) 246 247 void *lmrc_state; 248 249 /* 250 * Since the max sgl length can vary, we create a per-instance copy of 251 * lmrc_dma_attr and fill in .dma_attr_sgllen with the correct value 252 * during attach. 253 */ 254 static const ddi_dma_attr_t lmrc_dma_attr = { 255 .dma_attr_version = DMA_ATTR_V0, 256 .dma_attr_addr_lo = 0x00000000, 257 .dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFF, 258 .dma_attr_count_max = 0xFFFFFFFF, 259 .dma_attr_align = 8, 260 .dma_attr_burstsizes = 0x7, 261 .dma_attr_minxfer = 1, 262 .dma_attr_maxxfer = 0xFFFFFFFF, 263 .dma_attr_seg = 0xFFFFFFFF, 264 .dma_attr_sgllen = 0, 265 .dma_attr_granular = 512, 266 .dma_attr_flags = 0, 267 }; 268 269 static struct ddi_device_acc_attr lmrc_acc_attr = { 270 .devacc_attr_version = DDI_DEVICE_ATTR_V1, 271 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 272 .devacc_attr_dataorder = DDI_STRICTORDER_ACC, 273 .devacc_attr_access = DDI_DEFAULT_ACC, 274 }; 275 276 static int lmrc_attach(dev_info_t *, ddi_attach_cmd_t); 277 static int lmrc_detach(dev_info_t *, ddi_detach_cmd_t); 278 static int lmrc_ctrl_attach(dev_info_t *); 279 static int lmrc_ctrl_detach(dev_info_t *); 280 static int lmrc_cleanup(lmrc_t *, boolean_t); 281 static lmrc_adapter_class_t lmrc_get_class(lmrc_t *); 282 static int lmrc_regs_init(lmrc_t *); 283 static uint_t lmrc_isr(caddr_t, caddr_t); 284 static int lmrc_add_intrs(lmrc_t *, int); 285 static int lmrc_intr_init(lmrc_t *); 286 static void lmrc_intr_fini(lmrc_t *); 287 static int lmrc_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *); 288 static void lmrc_fm_init(lmrc_t *); 289 static void lmrc_fm_fini(lmrc_t *); 290 static int lmrc_alloc_mpt_cmds(lmrc_t *, const size_t); 291 static void lmrc_free_mpt_cmds(lmrc_t *, const size_t); 292 static int lmrc_alloc_mfi_cmds(lmrc_t *, const size_t); 293 static void lmrc_free_mfi_cmds(lmrc_t *, const size_t); 294 295 static int 296 lmrc_ctrl_attach(dev_info_t *dip) 297 { 298 char name[64]; /* large enough fo the taskq name */ 299 lmrc_t *lmrc; 300 uint32_t instance; 301 int ret; 302 int i; 303 304 instance = ddi_get_instance(dip); 305 if (ddi_soft_state_zalloc(lmrc_state, instance) != DDI_SUCCESS) { 306 dev_err(dip, CE_WARN, "could not allocate soft state"); 307 return (DDI_FAILURE); 308 } 309 310 lmrc = ddi_get_soft_state(lmrc_state, instance); 311 lmrc->l_dip = dip; 312 313 lmrc->l_ctrl_info = kmem_zalloc(sizeof (lmrc_ctrl_info_t), KM_SLEEP); 314 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_BASIC); 315 316 lmrc->l_class = lmrc_get_class(lmrc); 317 318 if (lmrc->l_class == LMRC_ACLASS_OTHER) { 319 dev_err(dip, CE_WARN, "unknown controller class"); 320 goto fail; 321 } 322 323 lmrc->l_acc_attr = lmrc_acc_attr; 324 lmrc->l_dma_attr = lmrc_dma_attr; 325 lmrc->l_dma_attr_32 = lmrc_dma_attr; 326 327 lmrc_fm_init(lmrc); 328 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_FM); 329 330 if (lmrc_regs_init(lmrc) != DDI_SUCCESS) 331 goto fail; 332 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_REGS); 333 334 if (lmrc_adapter_init(lmrc) != DDI_SUCCESS) 335 goto fail; 336 337 lmrc->l_dma_attr_32.dma_attr_addr_hi = 0xFFFFFFFF; 338 339 /* Restrict all DMA to the lower 32bit address space if necessary. */ 340 if (!lmrc->l_64bit_dma_support) 341 lmrc->l_dma_attr.dma_attr_addr_hi = 0xFFFFFFFF; 342 343 if (lmrc_intr_init(lmrc) != DDI_SUCCESS) 344 goto fail; 345 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_INTR); 346 347 mutex_init(&lmrc->l_mpt_cmd_lock, NULL, MUTEX_DRIVER, 348 DDI_INTR_PRI(lmrc->l_intr_pri)); 349 list_create(&lmrc->l_mpt_cmd_list, sizeof (lmrc_mpt_cmd_t), 350 offsetof(lmrc_mpt_cmd_t, mpt_node)); 351 352 mutex_init(&lmrc->l_mfi_cmd_lock, NULL, MUTEX_DRIVER, 353 DDI_INTR_PRI(lmrc->l_intr_pri)); 354 list_create(&lmrc->l_mfi_cmd_list, sizeof (lmrc_mfi_cmd_t), 355 offsetof(lmrc_mfi_cmd_t, mfi_node)); 356 357 mutex_init(&lmrc->l_reg_lock, NULL, MUTEX_DRIVER, 358 DDI_INTR_PRI(lmrc->l_intr_pri)); 359 360 rw_init(&lmrc->l_raidmap_lock, NULL, RW_DRIVER, 361 DDI_INTR_PRI(lmrc->l_intr_pri)); 362 rw_init(&lmrc->l_pdmap_lock, NULL, RW_DRIVER, 363 DDI_INTR_PRI(lmrc->l_intr_pri)); 364 365 sema_init(&lmrc->l_ioctl_sema, LMRC_MAX_IOCTL_CMDS, NULL, SEMA_DRIVER, 366 NULL); 367 368 mutex_init(&lmrc->l_thread_lock, NULL, MUTEX_DRIVER, 369 DDI_INTR_PRI(lmrc->l_intr_pri)); 370 cv_init(&lmrc->l_thread_cv, NULL, CV_DRIVER, NULL); 371 372 373 for (i = 0; i < ARRAY_SIZE(lmrc->l_targets); i++) { 374 lmrc_tgt_t *tgt = &lmrc->l_targets[i]; 375 376 rw_init(&tgt->tgt_lock, NULL, RW_DRIVER, 377 DDI_INTR_PRI(lmrc->l_intr_pri)); 378 mutex_init(&tgt->tgt_mpt_active_lock, NULL, MUTEX_DRIVER, 379 DDI_INTR_PRI(lmrc->l_intr_pri)); 380 list_create(&tgt->tgt_mpt_active, sizeof (lmrc_mpt_cmd_t), 381 offsetof(lmrc_mpt_cmd_t, mpt_node)); 382 tgt->tgt_lmrc = lmrc; 383 tgt->tgt_dev_id = LMRC_DEVHDL_INVALID; 384 } 385 386 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_SYNC); 387 388 if (lmrc_alloc_mfi_cmds(lmrc, LMRC_MAX_MFI_CMDS) != DDI_SUCCESS) 389 goto fail; 390 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_MFICMDS); 391 392 if (lmrc_alloc_mpt_cmds(lmrc, lmrc->l_max_fw_cmds) != DDI_SUCCESS) 393 goto fail; 394 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_MPTCMDS); 395 396 lmrc->l_thread = thread_create(NULL, 0, lmrc_thread, lmrc, 0, &p0, 397 TS_RUN, minclsyspri); 398 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_THREAD); 399 400 if (lmrc_ioc_init(lmrc) != DDI_SUCCESS) 401 goto fail; 402 403 lmrc_enable_intr(lmrc); 404 405 if (lmrc_fw_init(lmrc) != DDI_SUCCESS) 406 goto fail; 407 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_FW); 408 409 if (lmrc_hba_attach(lmrc) != DDI_SUCCESS) 410 goto fail; 411 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_HBA); 412 413 (void) snprintf(lmrc->l_iocname, sizeof (lmrc->l_iocname), 414 "%d:lsirdctl", instance); 415 if (ddi_create_minor_node(dip, lmrc->l_iocname, S_IFCHR, 416 INST2LSIRDCTL(instance), DDI_PSEUDO, 0) != DDI_SUCCESS) { 417 dev_err(dip, CE_WARN, "failed to create ioctl node."); 418 goto fail; 419 } 420 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_NODE); 421 422 (void) snprintf(name, sizeof (name), "%s%d_taskq", 423 ddi_driver_name(dip), ddi_get_instance(dip)); 424 425 lmrc->l_taskq = taskq_create(name, lmrc->l_max_reply_queues, 426 minclsyspri, 64, INT_MAX, TASKQ_PREPOPULATE); 427 if (lmrc->l_taskq == NULL) { 428 dev_err(dip, CE_WARN, "failed to create taskq."); 429 goto fail; 430 } 431 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_TASKQ); 432 433 if (lmrc_start_aen(lmrc) != DDI_SUCCESS) { 434 dev_err(dip, CE_WARN, "failed to initiate AEN."); 435 goto fail; 436 } 437 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_AEN); 438 439 ddi_report_dev(dip); 440 441 if (lmrc_check_acc_handle(lmrc->l_reghandle) != DDI_SUCCESS) { 442 lmrc_fm_ereport(lmrc, DDI_FM_DEVICE_NO_RESPONSE); 443 ddi_fm_service_impact(lmrc->l_dip, DDI_SERVICE_LOST); 444 } 445 446 return (DDI_SUCCESS); 447 448 fail: 449 ret = lmrc_cleanup(lmrc, B_TRUE); 450 VERIFY3U(ret, ==, DDI_SUCCESS); 451 452 return (DDI_FAILURE); 453 } 454 455 static int 456 lmrc_ctrl_detach(dev_info_t *dip) 457 { 458 lmrc_t *lmrc = ddi_get_soft_state(lmrc_state, ddi_get_instance(dip)); 459 VERIFY(lmrc != NULL); 460 461 return (lmrc_cleanup(lmrc, B_FALSE)); 462 } 463 464 static int 465 lmrc_cleanup(lmrc_t *lmrc, boolean_t failed) 466 { 467 int i, ret; 468 469 if (lmrc->l_raid_dip != NULL || lmrc->l_phys_dip != NULL) 470 return (DDI_FAILURE); 471 472 /* 473 * Before doing anything else, abort any outstanding commands. 474 * The first commands are issued during FW initialisation, so check 475 * that we're past this point. 476 */ 477 if (INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_FW)) { 478 ret = lmrc_abort_outstanding_mfi(lmrc, LMRC_MAX_MFI_CMDS); 479 lmrc_disable_intr(lmrc); 480 if (ret != DDI_SUCCESS) 481 return (ret); 482 } 483 484 if (INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_AEN)) { 485 /* The AEN command was aborted above already. */ 486 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_AEN); 487 } 488 489 if (INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_TASKQ)) { 490 taskq_destroy(lmrc->l_taskq); 491 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_TASKQ); 492 } 493 494 if (INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_NODE)) { 495 ddi_remove_minor_node(lmrc->l_dip, lmrc->l_iocname); 496 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_NODE); 497 } 498 499 if (INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_HBA)) { 500 (void) lmrc_hba_detach(lmrc); 501 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_HBA); 502 } 503 504 505 if (INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_FW)) { 506 lmrc_free_pdmap(lmrc); 507 lmrc_free_raidmap(lmrc); 508 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_FW); 509 } 510 511 if (INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_THREAD)) { 512 mutex_enter(&lmrc->l_thread_lock); 513 lmrc->l_thread_stop = B_TRUE; 514 cv_signal(&lmrc->l_thread_cv); 515 mutex_exit(&lmrc->l_thread_lock); 516 thread_join(lmrc->l_thread->t_did); 517 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_THREAD); 518 } 519 520 if (INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_MFICMDS)) { 521 lmrc_free_mfi_cmds(lmrc, LMRC_MAX_MFI_CMDS); 522 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_MFICMDS); 523 } 524 525 if (INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_MPTCMDS)) { 526 lmrc_free_mpt_cmds(lmrc, lmrc->l_max_fw_cmds); 527 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_MPTCMDS); 528 } 529 530 if (INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_SYNC)) { 531 for (i = 0; i < ARRAY_SIZE(lmrc->l_targets); i++) { 532 lmrc_tgt_t *tgt = &lmrc->l_targets[i]; 533 534 list_destroy(&tgt->tgt_mpt_active); 535 mutex_destroy(&tgt->tgt_mpt_active_lock); 536 rw_destroy(&tgt->tgt_lock); 537 } 538 539 mutex_destroy(&lmrc->l_thread_lock); 540 cv_destroy(&lmrc->l_thread_cv); 541 542 sema_destroy(&lmrc->l_ioctl_sema); 543 544 mutex_destroy(&lmrc->l_mfi_cmd_lock); 545 list_destroy(&lmrc->l_mfi_cmd_list); 546 547 mutex_destroy(&lmrc->l_mpt_cmd_lock); 548 list_destroy(&lmrc->l_mpt_cmd_list); 549 550 rw_destroy(&lmrc->l_pdmap_lock); 551 rw_destroy(&lmrc->l_raidmap_lock); 552 mutex_destroy(&lmrc->l_reg_lock); 553 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_SYNC); 554 } 555 556 if (INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_INTR)) { 557 lmrc_intr_fini(lmrc); 558 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_INTR); 559 } 560 561 if (INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_REGS)) { 562 ddi_regs_map_free(&lmrc->l_reghandle); 563 lmrc->l_regmap = NULL; 564 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_REGS); 565 } 566 567 if (INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_FM)) { 568 lmrc_fm_fini(lmrc); 569 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_FM); 570 } 571 572 if (INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_BASIC)) { 573 kmem_free(lmrc->l_ctrl_info, sizeof (lmrc_ctrl_info_t)); 574 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_BASIC); 575 } 576 577 VERIFY0(lmrc->l_init_level); 578 ddi_soft_state_free(lmrc_state, ddi_get_instance(lmrc->l_dip)); 579 580 return (DDI_SUCCESS); 581 } 582 583 static int 584 lmrc_regs_init(lmrc_t *lmrc) 585 { 586 uint_t regno; 587 off_t regsize; 588 589 switch (lmrc->l_class) { 590 case LMRC_ACLASS_VENTURA: 591 case LMRC_ACLASS_AERO: 592 regno = 1; 593 break; 594 default: 595 regno = 2; 596 break; 597 } 598 599 if (ddi_dev_regsize(lmrc->l_dip, regno, ®size) != DDI_SUCCESS) 600 return (DDI_FAILURE); 601 602 if (regsize < LMRC_MFI_MIN_MEM) { 603 dev_err(lmrc->l_dip, CE_WARN, "reg %d size (%ld) is too small", 604 regno, regsize); 605 return (DDI_FAILURE); 606 } 607 608 if (ddi_regs_map_setup(lmrc->l_dip, regno, &lmrc->l_regmap, 0, 0, 609 &lmrc->l_acc_attr, &lmrc->l_reghandle) 610 != DDI_SUCCESS) { 611 dev_err(lmrc->l_dip, CE_WARN, 612 "unable to map control registers"); 613 return (DDI_FAILURE); 614 } 615 616 return (DDI_SUCCESS); 617 } 618 619 static uint_t 620 lmrc_isr(caddr_t arg1, caddr_t arg2) 621 { 622 lmrc_t *lmrc = (lmrc_t *)arg1; 623 int queue = (int)(uintptr_t)arg2; 624 uint_t ret = DDI_INTR_UNCLAIMED; 625 626 if (lmrc->l_intr_type == DDI_INTR_TYPE_FIXED) { 627 ret = lmrc_intr_ack(lmrc); 628 if (ret != DDI_INTR_CLAIMED) 629 return (ret); 630 } 631 632 ret = lmrc_process_replies(lmrc, queue); 633 return (ret); 634 } 635 636 static int 637 lmrc_add_intrs(lmrc_t *lmrc, int intr_type) 638 { 639 int navail, nintrs, count; 640 int ret; 641 int i; 642 643 if (lmrc->l_intr_types == 0) { 644 ret = ddi_intr_get_supported_types(lmrc->l_dip, 645 &lmrc->l_intr_types); 646 if (ret != DDI_SUCCESS) { 647 dev_err(lmrc->l_dip, CE_WARN, 648 "!%s: ddi_intr_get_supported_types failed", 649 __func__); 650 return (ret); 651 } 652 } 653 654 if ((lmrc->l_intr_types & intr_type) == 0) 655 return (DDI_FAILURE); 656 657 /* Don't use MSI-X if the firmware doesn't support it. */ 658 if (intr_type == DDI_INTR_TYPE_MSIX && !lmrc->l_fw_msix_enabled) 659 return (DDI_FAILURE); 660 661 ret = ddi_intr_get_nintrs(lmrc->l_dip, intr_type, &nintrs); 662 if (ret != DDI_SUCCESS) { 663 dev_err(lmrc->l_dip, CE_WARN, 664 "!%s: ddi_intr_get_nintrs failed", __func__); 665 return (ret); 666 } 667 668 ret = ddi_intr_get_navail(lmrc->l_dip, intr_type, &navail); 669 if (ret != DDI_SUCCESS) { 670 dev_err(lmrc->l_dip, CE_WARN, 671 "!%s: ddi_intr_get_navail failed", __func__); 672 return (ret); 673 } 674 675 /* 676 * There's no point in having more interrupts than queues supported by 677 * the hardware. 678 */ 679 if (navail > lmrc->l_max_reply_queues) 680 navail = lmrc->l_max_reply_queues; 681 682 lmrc->l_intr_htable_size = navail * sizeof (ddi_intr_handle_t); 683 lmrc->l_intr_htable = kmem_zalloc(lmrc->l_intr_htable_size, KM_SLEEP); 684 685 ret = ddi_intr_alloc(lmrc->l_dip, lmrc->l_intr_htable, intr_type, 0, 686 navail, &count, DDI_INTR_ALLOC_NORMAL); 687 if (ret != DDI_SUCCESS) { 688 dev_err(lmrc->l_dip, CE_WARN, "!%s: ddi_intr_alloc failed", 689 __func__); 690 goto fail; 691 } 692 693 if (count < navail) { 694 dev_err(lmrc->l_dip, CE_CONT, 695 "?requested %d interrupts, received %d\n", navail, count); 696 } 697 698 lmrc->l_intr_count = count; 699 700 ret = ddi_intr_get_pri(lmrc->l_intr_htable[0], &lmrc->l_intr_pri); 701 if (ret != DDI_SUCCESS) { 702 dev_err(lmrc->l_dip, CE_WARN, "!%s: ddi_intr_get_pri failed", 703 __func__); 704 goto fail; 705 } 706 707 if (lmrc->l_intr_pri >= ddi_intr_get_hilevel_pri()) { 708 dev_err(lmrc->l_dip, CE_WARN, 709 "high level interrupts not supported"); 710 goto fail; 711 } 712 713 for (i = 0; i < lmrc->l_intr_count; i++) { 714 ret = ddi_intr_add_handler(lmrc->l_intr_htable[i], lmrc_isr, 715 (caddr_t)lmrc, (caddr_t)(uintptr_t)i); 716 if (ret != DDI_SUCCESS) { 717 dev_err(lmrc->l_dip, CE_WARN, 718 "!%s: ddi_intr_add_handler failed", __func__); 719 goto fail; 720 } 721 } 722 723 ret = ddi_intr_get_cap(lmrc->l_intr_htable[0], &lmrc->l_intr_cap); 724 if (ret != DDI_SUCCESS) { 725 dev_err(lmrc->l_dip, CE_WARN, 726 "!%s: ddi_intr_get_cap failed", __func__); 727 goto fail; 728 } 729 730 if ((lmrc->l_intr_cap & DDI_INTR_FLAG_BLOCK) != 0) { 731 ret = ddi_intr_block_enable(lmrc->l_intr_htable, count); 732 if (ret != DDI_SUCCESS) { 733 dev_err(lmrc->l_dip, CE_WARN, 734 "!%s: ddi_intr_block_enable failed", __func__); 735 goto fail; 736 } 737 } else { 738 for (i = 0; i < lmrc->l_intr_count; i++) { 739 ret = ddi_intr_enable(lmrc->l_intr_htable[i]); 740 if (ret != DDI_SUCCESS) { 741 dev_err(lmrc->l_dip, CE_WARN, 742 "!%s: ddi_entr_enable failed", __func__); 743 goto fail; 744 } 745 } 746 } 747 748 lmrc->l_intr_type = intr_type; 749 return (DDI_SUCCESS); 750 751 fail: 752 lmrc_intr_fini(lmrc); 753 return (ret); 754 } 755 756 static int 757 lmrc_intr_init(lmrc_t *lmrc) 758 { 759 int ret; 760 761 lmrc_disable_intr(lmrc); 762 763 if ((lmrc_add_intrs(lmrc, DDI_INTR_TYPE_MSIX) != DDI_SUCCESS) && 764 (lmrc_add_intrs(lmrc, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) && 765 (lmrc_add_intrs(lmrc, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)) { 766 dev_err(lmrc->l_dip, CE_WARN, "failed to set up interrupts"); 767 return (DDI_FAILURE); 768 } 769 770 dev_err(lmrc->l_dip, CE_NOTE, "!got %d %s interrupts", 771 lmrc->l_intr_count, 772 lmrc->l_intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X" : 773 lmrc->l_intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED"); 774 775 /* Don't use more queues than we got interrupts for. */ 776 if (lmrc->l_max_reply_queues > lmrc->l_intr_count) 777 lmrc->l_max_reply_queues = lmrc->l_intr_count; 778 779 lmrc->l_last_reply_idx = 780 kmem_zalloc(sizeof (uint16_t) * lmrc->l_max_reply_queues, KM_SLEEP); 781 782 /* 783 * While here, allocate the reply descriptor DMA memory and the array 784 * keeping the last reply index for each queue. Each queue will have 785 * space for reply_q_depth MPI2 descriptors (reply_alloc_sz). 786 */ 787 ret = lmrc_dma_alloc(lmrc, lmrc->l_dma_attr, &lmrc->l_reply_dma, 788 lmrc->l_reply_alloc_sz * lmrc->l_max_reply_queues, 16, 789 DDI_DMA_CONSISTENT); 790 if (ret != DDI_SUCCESS) { 791 lmrc_intr_fini(lmrc); 792 return (ret); 793 } 794 memset(lmrc->l_reply_dma.ld_buf, -1, lmrc->l_reply_dma.ld_len); 795 796 return (DDI_SUCCESS); 797 } 798 799 static void 800 lmrc_intr_fini(lmrc_t *lmrc) 801 { 802 uint_t i; 803 804 if (lmrc->l_intr_htable[0] == NULL) 805 return; 806 807 if ((lmrc->l_intr_cap & DDI_INTR_FLAG_BLOCK) != 0) { 808 (void) ddi_intr_block_disable(lmrc->l_intr_htable, 809 lmrc->l_intr_count); 810 } 811 812 for (i = 0; i < lmrc->l_intr_count; i++) { 813 if (lmrc->l_intr_htable[i] == NULL) 814 break; 815 816 if ((lmrc->l_intr_cap & DDI_INTR_FLAG_BLOCK) == 0) 817 (void) ddi_intr_disable(lmrc->l_intr_htable[i]); 818 (void) ddi_intr_remove_handler(lmrc->l_intr_htable[i]); 819 (void) ddi_intr_free(lmrc->l_intr_htable[i]); 820 } 821 822 if (lmrc->l_intr_htable != NULL) 823 kmem_free(lmrc->l_intr_htable, lmrc->l_intr_htable_size); 824 825 lmrc->l_intr_htable = NULL; 826 lmrc->l_intr_htable_size = 0; 827 828 if (lmrc->l_last_reply_idx != NULL) 829 kmem_free(lmrc->l_last_reply_idx, 830 sizeof (uint16_t) * lmrc->l_max_reply_queues); 831 832 lmrc_dma_free(&lmrc->l_reply_dma); 833 } 834 835 static int 836 lmrc_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err_status, 837 const void *arg) 838 { 839 pci_ereport_post(dip, err_status, NULL); 840 return (err_status->fme_status); 841 } 842 843 static void 844 lmrc_fm_init(lmrc_t *lmrc) 845 { 846 ddi_iblock_cookie_t fm_ibc; 847 848 lmrc->l_fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, 849 lmrc->l_dip, DDI_PROP_DONTPASS, "fm-capable", 850 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 851 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 852 853 if (lmrc->l_fm_capabilities == 0) 854 return; 855 856 lmrc->l_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 857 lmrc->l_dma_attr_32.dma_attr_flags = DDI_DMA_FLAGERR; 858 lmrc->l_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 859 860 ddi_fm_init(lmrc->l_dip, &lmrc->l_fm_capabilities, &fm_ibc); 861 862 if (DDI_FM_EREPORT_CAP(lmrc->l_fm_capabilities) || 863 DDI_FM_ERRCB_CAP(lmrc->l_fm_capabilities)) { 864 pci_ereport_setup(lmrc->l_dip); 865 } 866 867 if (DDI_FM_ERRCB_CAP(lmrc->l_fm_capabilities)) { 868 ddi_fm_handler_register(lmrc->l_dip, lmrc_fm_error_cb, 869 lmrc); 870 } 871 } 872 873 static void 874 lmrc_fm_fini(lmrc_t *lmrc) 875 { 876 if (lmrc->l_fm_capabilities == 0) 877 return; 878 879 if (DDI_FM_ERRCB_CAP(lmrc->l_fm_capabilities)) 880 ddi_fm_handler_unregister(lmrc->l_dip); 881 882 if (DDI_FM_EREPORT_CAP(lmrc->l_fm_capabilities) || 883 DDI_FM_ERRCB_CAP(lmrc->l_fm_capabilities)) { 884 pci_ereport_teardown(lmrc->l_dip); 885 } 886 887 ddi_fm_fini(lmrc->l_dip); 888 } 889 890 void 891 lmrc_fm_ereport(lmrc_t *lmrc, const char *detail) 892 { 893 uint64_t ena; 894 char buf[FM_MAX_CLASS]; 895 896 (void) snprintf(buf, sizeof (buf), "%s.%s", DDI_FM_DEVICE, detail); 897 ena = fm_ena_generate(0, FM_ENA_FMT1); 898 if (DDI_FM_EREPORT_CAP(lmrc->l_fm_capabilities)) { 899 ddi_fm_ereport_post(lmrc->l_dip, buf, ena, DDI_NOSLEEP, 900 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL); 901 } 902 } 903 904 int 905 lmrc_check_acc_handle(ddi_acc_handle_t h) 906 { 907 ddi_fm_error_t de; 908 909 if (h == NULL) 910 return (DDI_FAILURE); 911 912 ddi_fm_acc_err_get(h, &de, DDI_FME_VERSION); 913 return (de.fme_status); 914 } 915 916 int 917 lmrc_check_dma_handle(ddi_dma_handle_t h) 918 { 919 ddi_fm_error_t de; 920 921 if (h == NULL) 922 return (DDI_FAILURE); 923 924 ddi_fm_dma_err_get(h, &de, DDI_FME_VERSION); 925 return (de.fme_status); 926 } 927 928 static int 929 lmrc_alloc_mpt_cmds(lmrc_t *lmrc, const size_t ncmd) 930 { 931 lmrc_mpt_cmd_t **cmds; 932 lmrc_mpt_cmd_t *cmd; 933 uint32_t i; 934 int ret; 935 936 /* 937 * The hardware expects to find MPI I/O request frames in a big chunk 938 * of DMA memory, indexed by the MPT cmd SMID. 939 */ 940 ret = lmrc_dma_alloc(lmrc, lmrc->l_dma_attr, &lmrc->l_ioreq_dma, 941 lmrc->l_io_frames_alloc_sz, 256, DDI_DMA_CONSISTENT); 942 if (ret != DDI_SUCCESS) 943 return (ret); 944 945 cmds = kmem_zalloc(ncmd * sizeof (lmrc_mpt_cmd_t *), KM_SLEEP); 946 for (i = 0; i < ncmd; i++) { 947 cmd = kmem_zalloc(sizeof (lmrc_mpt_cmd_t), KM_SLEEP); 948 949 /* XXX: allocate on demand in tran_start / build_sgl? */ 950 ret = lmrc_dma_alloc(lmrc, lmrc->l_dma_attr, 951 &cmd->mpt_chain_dma, lmrc->l_max_chain_frame_sz, 4, 952 DDI_DMA_CONSISTENT); 953 if (ret != DDI_SUCCESS) 954 goto fail; 955 956 cmd->mpt_chain = cmd->mpt_chain_dma.ld_buf; 957 958 /* 959 * We request a few bytes more for sense so that we can fit our 960 * arq struct before the actual sense data. We must make sure to 961 * put sts_sensedata at a 64 byte aligned address. 962 */ 963 ret = lmrc_dma_alloc(lmrc, lmrc->l_dma_attr_32, 964 &cmd->mpt_sense_dma, LMRC_SENSE_LEN + P2ROUNDUP( 965 offsetof(struct scsi_arq_status, sts_sensedata), 64), 64, 966 DDI_DMA_CONSISTENT); 967 if (ret != DDI_SUCCESS) 968 goto fail; 969 970 /* 971 * Now that we have a sufficiently sized and 64 byte aligned DMA 972 * buffer for sense, calculate mpt_sense so that it points at a 973 * struct scsi_arq_status somewhere within the first 64 bytes in 974 * the DMA buffer, making sure its sts_sensedata is aligned at 975 * 64 bytes as well. 976 */ 977 cmd->mpt_sense = cmd->mpt_sense_dma.ld_buf + 64 - 978 offsetof(struct scsi_arq_status, sts_sensedata); 979 VERIFY(IS_P2ALIGNED(&(((struct scsi_arq_status *)cmd->mpt_sense) 980 ->sts_sensedata), 64)); 981 982 cmd->mpt_smid = i + 1; 983 984 /* 985 * Calculate address of this commands I/O frame within the DMA 986 * memory allocated earlier. 987 */ 988 cmd->mpt_io_frame = 989 LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * cmd->mpt_smid + 990 lmrc->l_ioreq_dma.ld_buf; 991 992 cmd->mpt_lmrc = lmrc; 993 994 mutex_init(&cmd->mpt_lock, NULL, MUTEX_DRIVER, 995 DDI_INTR_PRI(lmrc->l_intr_pri)); 996 997 cmds[i] = cmd; 998 list_insert_tail(&lmrc->l_mpt_cmd_list, cmd); 999 } 1000 1001 lmrc->l_mpt_cmds = cmds; 1002 return (DDI_SUCCESS); 1003 1004 fail: 1005 if (cmd->mpt_chain != NULL) 1006 lmrc_dma_free(&cmd->mpt_chain_dma); 1007 kmem_free(cmd, sizeof (lmrc_mpt_cmd_t)); 1008 1009 lmrc_free_mpt_cmds(lmrc, ncmd); 1010 1011 return (ret); 1012 } 1013 1014 static void 1015 lmrc_free_mpt_cmds(lmrc_t *lmrc, const size_t ncmd) 1016 { 1017 lmrc_mpt_cmd_t *cmd; 1018 size_t count = 0; 1019 1020 for (cmd = list_remove_head(&lmrc->l_mpt_cmd_list); 1021 cmd != NULL; 1022 cmd = list_remove_head(&lmrc->l_mpt_cmd_list)) { 1023 lmrc_dma_free(&cmd->mpt_chain_dma); 1024 lmrc_dma_free(&cmd->mpt_sense_dma); 1025 mutex_destroy(&cmd->mpt_lock); 1026 kmem_free(cmd, sizeof (lmrc_mpt_cmd_t)); 1027 count++; 1028 } 1029 VERIFY3U(count, ==, ncmd); 1030 VERIFY(list_is_empty(&lmrc->l_mpt_cmd_list)); 1031 1032 kmem_free(lmrc->l_mpt_cmds, ncmd * sizeof (lmrc_mpt_cmd_t *)); 1033 1034 lmrc_dma_free(&lmrc->l_ioreq_dma); 1035 } 1036 1037 static int 1038 lmrc_alloc_mfi_cmds(lmrc_t *lmrc, const size_t ncmd) 1039 { 1040 int ret = DDI_SUCCESS; 1041 lmrc_mfi_cmd_t **cmds; 1042 lmrc_mfi_cmd_t *cmd; 1043 uint32_t i; 1044 1045 cmds = kmem_zalloc(ncmd * sizeof (lmrc_mfi_cmd_t *), KM_SLEEP); 1046 for (i = 0; i < ncmd; i++) { 1047 cmd = kmem_zalloc(sizeof (lmrc_mfi_cmd_t), KM_SLEEP); 1048 ret = lmrc_dma_alloc(lmrc, lmrc->l_dma_attr, 1049 &cmd->mfi_frame_dma, sizeof (lmrc_mfi_frame_t), 256, 1050 DDI_DMA_CONSISTENT); 1051 if (ret != DDI_SUCCESS) 1052 goto fail; 1053 1054 cmd->mfi_lmrc = lmrc; 1055 cmd->mfi_frame = cmd->mfi_frame_dma.ld_buf; 1056 cmd->mfi_idx = i; 1057 1058 mutex_init(&cmd->mfi_lock, NULL, MUTEX_DRIVER, 1059 DDI_INTR_PRI(lmrc->l_intr_pri)); 1060 1061 cmds[i] = cmd; 1062 list_insert_tail(&lmrc->l_mfi_cmd_list, cmd); 1063 } 1064 1065 lmrc->l_mfi_cmds = cmds; 1066 return (DDI_SUCCESS); 1067 1068 fail: 1069 kmem_free(cmd, sizeof (lmrc_mfi_cmd_t)); 1070 lmrc_free_mfi_cmds(lmrc, ncmd); 1071 1072 return (ret); 1073 } 1074 1075 static void 1076 lmrc_free_mfi_cmds(lmrc_t *lmrc, const size_t ncmd) 1077 { 1078 lmrc_mfi_cmd_t *cmd; 1079 size_t count = 0; 1080 1081 for (cmd = list_remove_head(&lmrc->l_mfi_cmd_list); 1082 cmd != NULL; 1083 cmd = list_remove_head(&lmrc->l_mfi_cmd_list)) { 1084 ASSERT(lmrc->l_mfi_cmds[cmd->mfi_idx] == cmd); 1085 lmrc->l_mfi_cmds[cmd->mfi_idx] = NULL; 1086 lmrc_dma_free(&cmd->mfi_frame_dma); 1087 mutex_destroy(&cmd->mfi_lock); 1088 kmem_free(cmd, sizeof (lmrc_mfi_cmd_t)); 1089 count++; 1090 } 1091 VERIFY3U(count, ==, ncmd); 1092 VERIFY(list_is_empty(&lmrc->l_mfi_cmd_list)); 1093 1094 kmem_free(lmrc->l_mfi_cmds, ncmd * sizeof (lmrc_mfi_cmd_t *)); 1095 } 1096 1097 1098 void 1099 lmrc_dma_build_sgl(lmrc_t *lmrc, lmrc_mpt_cmd_t *mpt, 1100 const ddi_dma_cookie_t *cookie, uint_t ncookies) 1101 { 1102 Mpi25SCSIIORequest_t *io_req = mpt->mpt_io_frame; 1103 Mpi25IeeeSgeChain64_t *sgl_ptr = &io_req->SGL.IeeeChain; 1104 uint_t nsge, max_sge; 1105 uint_t i; 1106 1107 ASSERT(ncookies > 0); 1108 1109 /* Start with the 8 SGEs in the I/O frame. */ 1110 max_sge = lmrc->l_max_sge_in_main_msg; 1111 1112 for (;;) { 1113 nsge = min(ncookies, max_sge); 1114 1115 for (i = 0; i < nsge; i++, cookie++) { 1116 *(uint64_t *)&sgl_ptr[i].Address = 1117 cookie->dmac_laddress; 1118 sgl_ptr[i].Length = cookie->dmac_size; 1119 sgl_ptr[i].Flags = 0; 1120 } 1121 1122 ncookies -= nsge; 1123 1124 if (ncookies == 0) 1125 break; 1126 1127 /* 1128 * There's more. Roll back to the last cookie processed, 1129 * setup SGE chain and repeat. 1130 */ 1131 cookie--; 1132 ncookies++; 1133 1134 if ((io_req->IoFlags & 1135 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) == 0) 1136 /* XXX: Why? And why only if not fast path? */ 1137 io_req->ChainOffset = lmrc->l_chain_offset_io_request; 1138 else 1139 io_req->ChainOffset = 0; 1140 1141 sgl_ptr[i - 1].Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT; 1142 sgl_ptr[i - 1].Length = sizeof (Mpi25SGEIOUnion_t) * ncookies; 1143 lmrc_dma_set_addr64(&mpt->mpt_chain_dma, 1144 (uint64_t *)&sgl_ptr[i - 1].Address); 1145 sgl_ptr = mpt->mpt_chain; 1146 1147 nsge = ncookies; 1148 max_sge = lmrc->l_max_sge_in_chain; 1149 1150 VERIFY3U(nsge, <=, max_sge); 1151 } 1152 1153 sgl_ptr[i - 1].Flags = MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 1154 1155 (void) ddi_dma_sync(mpt->mpt_chain_dma.ld_hdl, 0, 1156 mpt->mpt_chain_dma.ld_len, DDI_DMA_SYNC_FORDEV); 1157 } 1158 1159 size_t 1160 lmrc_dma_get_size(lmrc_dma_t *dmap) 1161 { 1162 const ddi_dma_cookie_t *cookie = ddi_dma_cookie_one(dmap->ld_hdl); 1163 1164 return (cookie->dmac_size); 1165 } 1166 1167 void 1168 lmrc_dma_set_addr64(lmrc_dma_t *dmap, uint64_t *addr) 1169 { 1170 const ddi_dma_cookie_t *cookie = ddi_dma_cookie_one(dmap->ld_hdl); 1171 1172 *addr = cookie->dmac_laddress; 1173 } 1174 1175 void 1176 lmrc_dma_set_addr32(lmrc_dma_t *dmap, uint32_t *addr) 1177 { 1178 const ddi_dma_cookie_t *cookie = ddi_dma_cookie_one(dmap->ld_hdl); 1179 1180 *addr = cookie->dmac_address; 1181 } 1182 1183 int 1184 lmrc_dma_alloc(lmrc_t *lmrc, ddi_dma_attr_t attr, lmrc_dma_t *dmap, size_t len, 1185 uint64_t align, uint_t flags) 1186 { 1187 int ret; 1188 1189 VERIFY3U(len, >, 0); 1190 VERIFY3U(align, >=, 1); 1191 1192 bzero(dmap, sizeof (*dmap)); 1193 1194 attr.dma_attr_align = align; 1195 attr.dma_attr_sgllen = 1; 1196 attr.dma_attr_granular = 1; 1197 1198 1199 ret = ddi_dma_alloc_handle(lmrc->l_dip, &attr, DDI_DMA_SLEEP, NULL, 1200 &dmap->ld_hdl); 1201 if (ret != DDI_SUCCESS) { 1202 /* 1203 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and 1204 * the only other possible error is DDI_DMA_BADATTR which 1205 * indicates a driver bug which should cause a panic. 1206 */ 1207 dev_err(lmrc->l_dip, CE_PANIC, 1208 "!failed to get DMA handle, check DMA attributes"); 1209 return (ret); 1210 } 1211 1212 ret = ddi_dma_mem_alloc(dmap->ld_hdl, len, &lmrc->l_acc_attr, 1213 flags, DDI_DMA_SLEEP, NULL, (caddr_t *)&dmap->ld_buf, 1214 &dmap->ld_len, &dmap->ld_acc); 1215 if (ret != DDI_SUCCESS) { 1216 /* 1217 * When DDI_DMA_NOSLEEP is specified, ddi_dma_mem_alloc() can 1218 * only fail if the flags are conflicting, which indicates a 1219 * driver bug and should cause a panic. 1220 */ 1221 dev_err(lmrc->l_dip, CE_PANIC, 1222 "!failed to allocated DMA memory, check DMA flags (%x)", 1223 flags); 1224 return (ret); 1225 } 1226 1227 ret = ddi_dma_addr_bind_handle(dmap->ld_hdl, NULL, dmap->ld_buf, 1228 dmap->ld_len, DDI_DMA_RDWR | flags, DDI_DMA_SLEEP, NULL, NULL, 1229 NULL); 1230 if (ret != DDI_DMA_MAPPED) { 1231 ddi_dma_mem_free(&dmap->ld_acc); 1232 ddi_dma_free_handle(&dmap->ld_hdl); 1233 return (ret); 1234 } 1235 1236 bzero(dmap->ld_buf, dmap->ld_len); 1237 return (DDI_SUCCESS); 1238 } 1239 1240 void 1241 lmrc_dma_free(lmrc_dma_t *dmap) 1242 { 1243 if (dmap->ld_hdl != NULL) 1244 (void) ddi_dma_unbind_handle(dmap->ld_hdl); 1245 if (dmap->ld_acc != NULL) 1246 ddi_dma_mem_free(&dmap->ld_acc); 1247 if (dmap->ld_hdl != NULL) 1248 ddi_dma_free_handle(&dmap->ld_hdl); 1249 bzero(dmap, sizeof (lmrc_dma_t)); 1250 } 1251 1252 static lmrc_adapter_class_t 1253 lmrc_get_class(lmrc_t *lmrc) 1254 { 1255 int device_id = ddi_prop_get_int(DDI_DEV_T_ANY, lmrc->l_dip, 1256 DDI_PROP_DONTPASS, "device-id", 0); 1257 1258 switch (device_id) { 1259 case LMRC_VENTURA: 1260 case LMRC_CRUSADER: 1261 case LMRC_HARPOON: 1262 case LMRC_TOMCAT: 1263 case LMRC_VENTURA_4PORT: 1264 case LMRC_CRUSADER_4PORT: 1265 return (LMRC_ACLASS_VENTURA); 1266 1267 case LMRC_AERO_10E1: 1268 case LMRC_AERO_10E5: 1269 dev_err(lmrc->l_dip, CE_CONT, 1270 "?Adapter is in configurable secure mode\n"); 1271 /*FALLTHRU*/ 1272 case LMRC_AERO_10E2: 1273 case LMRC_AERO_10E6: 1274 return (LMRC_ACLASS_AERO); 1275 1276 case LMRC_AERO_10E0: 1277 case LMRC_AERO_10E3: 1278 case LMRC_AERO_10E4: 1279 case LMRC_AERO_10E7: 1280 dev_err(lmrc->l_dip, CE_CONT, 1281 "?Adapter is in non-secure mode\n"); 1282 } 1283 1284 return (LMRC_ACLASS_OTHER); 1285 } 1286 1287 static int 1288 lmrc_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1289 { 1290 const char *addr = scsi_hba_iport_unit_address(dip); 1291 1292 if (cmd != DDI_ATTACH) 1293 return (DDI_FAILURE); 1294 1295 if (addr == NULL) 1296 return (lmrc_ctrl_attach(dip)); 1297 1298 if (strcmp(addr, LMRC_IPORT_RAID) == 0) 1299 return (lmrc_raid_attach(dip)); 1300 1301 if (strcmp(addr, LMRC_IPORT_PHYS) == 0) 1302 return (lmrc_phys_attach(dip)); 1303 1304 return (DDI_FAILURE); 1305 } 1306 1307 static int 1308 lmrc_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1309 { 1310 const char *addr = scsi_hba_iport_unit_address(dip); 1311 1312 if (cmd != DDI_DETACH) 1313 return (DDI_FAILURE); 1314 1315 if (addr == NULL) 1316 return (lmrc_ctrl_detach(dip)); 1317 1318 if (strcmp(addr, LMRC_IPORT_RAID) == 0) 1319 return (lmrc_raid_detach(dip)); 1320 1321 if (strcmp(addr, LMRC_IPORT_PHYS) == 0) 1322 return (lmrc_phys_detach(dip)); 1323 1324 return (DDI_FAILURE); 1325 } 1326 1327 static int 1328 lmrc_quiesce(dev_info_t *dip) 1329 { 1330 lmrc_t *lmrc = ddi_get_soft_state(lmrc_state, ddi_get_instance(dip)); 1331 1332 if (lmrc == NULL) 1333 return (DDI_SUCCESS); 1334 1335 return (lmrc_ctrl_shutdown(lmrc)); 1336 } 1337 1338 static struct cb_ops lmrc_cb_ops = { 1339 .cb_rev = CB_REV, 1340 .cb_flag = D_NEW | D_MP, 1341 1342 .cb_open = scsi_hba_open, 1343 .cb_close = scsi_hba_close, 1344 1345 .cb_ioctl = lmrc_ioctl, 1346 1347 .cb_strategy = nodev, 1348 .cb_print = nodev, 1349 .cb_dump = nodev, 1350 .cb_read = nodev, 1351 .cb_write = nodev, 1352 .cb_devmap = nodev, 1353 .cb_mmap = nodev, 1354 .cb_segmap = nodev, 1355 .cb_chpoll = nochpoll, 1356 .cb_prop_op = ddi_prop_op, 1357 .cb_str = NULL, 1358 .cb_aread = nodev, 1359 .cb_awrite = nodev, 1360 }; 1361 1362 static struct dev_ops lmrc_dev_ops = { 1363 .devo_rev = DEVO_REV, 1364 .devo_refcnt = 0, 1365 1366 .devo_attach = lmrc_attach, 1367 .devo_detach = lmrc_detach, 1368 1369 .devo_cb_ops = &lmrc_cb_ops, 1370 1371 .devo_getinfo = ddi_no_info, 1372 .devo_identify = nulldev, 1373 .devo_probe = nulldev, 1374 .devo_reset = nodev, 1375 .devo_bus_ops = NULL, 1376 .devo_power = nodev, 1377 .devo_quiesce = lmrc_quiesce, 1378 }; 1379 1380 static struct modldrv lmrc_modldrv = { 1381 .drv_modops = &mod_driverops, 1382 .drv_linkinfo = "Broadcom MegaRAID 12G SAS RAID", 1383 .drv_dev_ops = &lmrc_dev_ops, 1384 }; 1385 1386 static struct modlinkage lmrc_modlinkage = { 1387 .ml_rev = MODREV_1, 1388 .ml_linkage = { &lmrc_modldrv, NULL }, 1389 }; 1390 1391 int 1392 _init(void) 1393 { 1394 int ret; 1395 1396 ret = ddi_soft_state_init(&lmrc_state, sizeof (lmrc_t), 1); 1397 if (ret != DDI_SUCCESS) 1398 return (ret); 1399 1400 ret = scsi_hba_init(&lmrc_modlinkage); 1401 if (ret != 0) { 1402 ddi_soft_state_fini(&lmrc_state); 1403 return (ret); 1404 } 1405 1406 ret = mod_install(&lmrc_modlinkage); 1407 if (ret != DDI_SUCCESS) { 1408 scsi_hba_fini(&lmrc_modlinkage); 1409 ddi_soft_state_fini(&lmrc_state); 1410 return (ret); 1411 } 1412 1413 return (DDI_SUCCESS); 1414 } 1415 1416 int 1417 _fini(void) 1418 { 1419 int ret; 1420 1421 ret = mod_remove(&lmrc_modlinkage); 1422 if (ret == DDI_SUCCESS) { 1423 scsi_hba_fini(&lmrc_modlinkage); 1424 ddi_soft_state_fini(&lmrc_state); 1425 } 1426 1427 return (ret); 1428 } 1429 1430 int 1431 _info(struct modinfo *modinfop) 1432 { 1433 return (mod_info(&lmrc_modlinkage, modinfop)); 1434 } 1435