1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <dev/mrsas/mrsas.h> 44 #include <dev/mrsas/mrsas_ioctl.h> 45 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 49 #include <sys/sysctl.h> 50 #include <sys/types.h> 51 #include <sys/kthread.h> 52 #include <sys/taskqueue.h> 53 #include <sys/smp.h> 54 55 56 /* 57 * Function prototypes 58 */ 59 static d_open_t mrsas_open; 60 static d_close_t mrsas_close; 61 static d_read_t mrsas_read; 62 static d_write_t mrsas_write; 63 static d_ioctl_t mrsas_ioctl; 64 static d_poll_t mrsas_poll; 65 66 static struct mrsas_mgmt_info mrsas_mgmt_info; 67 static struct mrsas_ident *mrsas_find_ident(device_t); 68 static int mrsas_setup_msix(struct mrsas_softc *sc); 69 static int mrsas_allocate_msix(struct mrsas_softc *sc); 70 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); 71 static void mrsas_flush_cache(struct mrsas_softc *sc); 72 static void mrsas_reset_reply_desc(struct mrsas_softc *sc); 73 static void mrsas_ocr_thread(void *arg); 74 static int mrsas_get_map_info(struct mrsas_softc *sc); 75 static int mrsas_get_ld_map_info(struct mrsas_softc *sc); 76 static int mrsas_sync_map_info(struct mrsas_softc *sc); 77 static int mrsas_get_pd_list(struct mrsas_softc *sc); 78 static int mrsas_get_ld_list(struct mrsas_softc *sc); 79 static int mrsas_setup_irq(struct mrsas_softc *sc); 80 static int mrsas_alloc_mem(struct mrsas_softc *sc); 81 static int mrsas_init_fw(struct mrsas_softc *sc); 82 static int mrsas_setup_raidmap(struct mrsas_softc *sc); 83 static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); 84 static int mrsas_clear_intr(struct mrsas_softc *sc); 85 static int mrsas_get_ctrl_info(struct mrsas_softc *sc); 86 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc); 87 static int 88 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 89 struct mrsas_mfi_cmd *cmd_to_abort); 90 static struct mrsas_softc * 91 mrsas_get_softc_instance(struct cdev *dev, 92 u_long cmd, caddr_t arg); 93 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); 94 u_int8_t 95 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, 96 struct mrsas_mfi_cmd *mfi_cmd); 97 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc); 98 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); 99 int mrsas_init_adapter(struct mrsas_softc *sc); 100 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); 101 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); 102 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); 103 int mrsas_ioc_init(struct mrsas_softc *sc); 104 int mrsas_bus_scan(struct mrsas_softc *sc); 105 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 106 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 107 int mrsas_reset_ctrl(struct mrsas_softc *sc); 108 int mrsas_wait_for_outstanding(struct mrsas_softc *sc); 109 int 110 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, 111 struct mrsas_mfi_cmd *cmd); 112 int 113 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, 114 int size); 115 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); 116 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 117 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 118 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 119 void mrsas_disable_intr(struct mrsas_softc *sc); 120 void mrsas_enable_intr(struct mrsas_softc *sc); 121 void mrsas_free_ioc_cmd(struct mrsas_softc *sc); 122 void mrsas_free_mem(struct mrsas_softc *sc); 123 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); 124 void mrsas_isr(void *arg); 125 void mrsas_teardown_intr(struct mrsas_softc *sc); 126 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 127 void mrsas_kill_hba(struct mrsas_softc *sc); 128 void mrsas_aen_handler(struct mrsas_softc *sc); 129 void 130 mrsas_write_reg(struct mrsas_softc *sc, int offset, 131 u_int32_t value); 132 void 133 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 134 u_int32_t req_desc_hi); 135 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); 136 void 137 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, 138 struct mrsas_mfi_cmd *cmd, u_int8_t status); 139 void 140 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, 141 u_int8_t extStatus); 142 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); 143 144 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd 145 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 146 147 extern int mrsas_cam_attach(struct mrsas_softc *sc); 148 extern void mrsas_cam_detach(struct mrsas_softc *sc); 149 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 150 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 151 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); 152 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); 153 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); 154 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); 155 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 156 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 157 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 158 extern void mrsas_xpt_freeze(struct mrsas_softc *sc); 159 extern void mrsas_xpt_release(struct mrsas_softc *sc); 160 extern MRSAS_REQUEST_DESCRIPTOR_UNION * 161 mrsas_get_request_desc(struct mrsas_softc *sc, 162 u_int16_t index); 163 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 164 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); 165 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); 166 167 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters"); 168 169 /* 170 * PCI device struct and table 171 * 172 */ 173 typedef struct mrsas_ident { 174 uint16_t vendor; 175 uint16_t device; 176 uint16_t subvendor; 177 uint16_t subdevice; 178 const char *desc; 179 } MRSAS_CTLR_ID; 180 181 MRSAS_CTLR_ID device_table[] = { 182 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"}, 183 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"}, 184 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"}, 185 {0, 0, 0, 0, NULL} 186 }; 187 188 /* 189 * Character device entry points 190 * 191 */ 192 static struct cdevsw mrsas_cdevsw = { 193 .d_version = D_VERSION, 194 .d_open = mrsas_open, 195 .d_close = mrsas_close, 196 .d_read = mrsas_read, 197 .d_write = mrsas_write, 198 .d_ioctl = mrsas_ioctl, 199 .d_poll = mrsas_poll, 200 .d_name = "mrsas", 201 }; 202 203 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); 204 205 /* 206 * In the cdevsw routines, we find our softc by using the si_drv1 member of 207 * struct cdev. We set this variable to point to our softc in our attach 208 * routine when we create the /dev entry. 209 */ 210 int 211 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 212 { 213 struct mrsas_softc *sc; 214 215 sc = dev->si_drv1; 216 return (0); 217 } 218 219 int 220 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 221 { 222 struct mrsas_softc *sc; 223 224 sc = dev->si_drv1; 225 return (0); 226 } 227 228 int 229 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag) 230 { 231 struct mrsas_softc *sc; 232 233 sc = dev->si_drv1; 234 return (0); 235 } 236 int 237 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag) 238 { 239 struct mrsas_softc *sc; 240 241 sc = dev->si_drv1; 242 return (0); 243 } 244 245 /* 246 * Register Read/Write Functions 247 * 248 */ 249 void 250 mrsas_write_reg(struct mrsas_softc *sc, int offset, 251 u_int32_t value) 252 { 253 bus_space_tag_t bus_tag = sc->bus_tag; 254 bus_space_handle_t bus_handle = sc->bus_handle; 255 256 bus_space_write_4(bus_tag, bus_handle, offset, value); 257 } 258 259 u_int32_t 260 mrsas_read_reg(struct mrsas_softc *sc, int offset) 261 { 262 bus_space_tag_t bus_tag = sc->bus_tag; 263 bus_space_handle_t bus_handle = sc->bus_handle; 264 265 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); 266 } 267 268 269 /* 270 * Interrupt Disable/Enable/Clear Functions 271 * 272 */ 273 void 274 mrsas_disable_intr(struct mrsas_softc *sc) 275 { 276 u_int32_t mask = 0xFFFFFFFF; 277 u_int32_t status; 278 279 sc->mask_interrupts = 1; 280 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); 281 /* Dummy read to force pci flush */ 282 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 283 } 284 285 void 286 mrsas_enable_intr(struct mrsas_softc *sc) 287 { 288 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; 289 u_int32_t status; 290 291 sc->mask_interrupts = 0; 292 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); 293 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 294 295 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); 296 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 297 } 298 299 static int 300 mrsas_clear_intr(struct mrsas_softc *sc) 301 { 302 u_int32_t status, fw_status, fw_state; 303 304 /* Read received interrupt */ 305 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 306 307 /* 308 * If FW state change interrupt is received, write to it again to 309 * clear 310 */ 311 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) { 312 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 313 outbound_scratch_pad)); 314 fw_state = fw_status & MFI_STATE_MASK; 315 if (fw_state == MFI_STATE_FAULT) { 316 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n"); 317 if (sc->ocr_thread_active) 318 wakeup(&sc->ocr_chan); 319 } 320 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status); 321 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 322 return (1); 323 } 324 /* Not our interrupt, so just return */ 325 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 326 return (0); 327 328 /* We got a reply interrupt */ 329 return (1); 330 } 331 332 /* 333 * PCI Support Functions 334 * 335 */ 336 static struct mrsas_ident * 337 mrsas_find_ident(device_t dev) 338 { 339 struct mrsas_ident *pci_device; 340 341 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) { 342 if ((pci_device->vendor == pci_get_vendor(dev)) && 343 (pci_device->device == pci_get_device(dev)) && 344 ((pci_device->subvendor == pci_get_subvendor(dev)) || 345 (pci_device->subvendor == 0xffff)) && 346 ((pci_device->subdevice == pci_get_subdevice(dev)) || 347 (pci_device->subdevice == 0xffff))) 348 return (pci_device); 349 } 350 return (NULL); 351 } 352 353 static int 354 mrsas_probe(device_t dev) 355 { 356 static u_int8_t first_ctrl = 1; 357 struct mrsas_ident *id; 358 359 if ((id = mrsas_find_ident(dev)) != NULL) { 360 if (first_ctrl) { 361 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n", 362 MRSAS_VERSION); 363 first_ctrl = 0; 364 } 365 device_set_desc(dev, id->desc); 366 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */ 367 return (-30); 368 } 369 return (ENXIO); 370 } 371 372 /* 373 * mrsas_setup_sysctl: setup sysctl values for mrsas 374 * input: Adapter instance soft state 375 * 376 * Setup sysctl entries for mrsas driver. 377 */ 378 static void 379 mrsas_setup_sysctl(struct mrsas_softc *sc) 380 { 381 struct sysctl_ctx_list *sysctl_ctx = NULL; 382 struct sysctl_oid *sysctl_tree = NULL; 383 char tmpstr[80], tmpstr2[80]; 384 385 /* 386 * Setup the sysctl variable so the user can change the debug level 387 * on the fly. 388 */ 389 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", 390 device_get_unit(sc->mrsas_dev)); 391 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev)); 392 393 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); 394 if (sysctl_ctx != NULL) 395 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); 396 397 if (sysctl_tree == NULL) { 398 sysctl_ctx_init(&sc->sysctl_ctx); 399 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 400 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2, 401 CTLFLAG_RD, 0, tmpstr); 402 if (sc->sysctl_tree == NULL) 403 return; 404 sysctl_ctx = &sc->sysctl_ctx; 405 sysctl_tree = sc->sysctl_tree; 406 } 407 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 408 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, 409 "Disable the use of OCR"); 410 411 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 412 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, 413 strlen(MRSAS_VERSION), "driver version"); 414 415 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 416 OID_AUTO, "reset_count", CTLFLAG_RD, 417 &sc->reset_count, 0, "number of ocr from start of the day"); 418 419 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 420 OID_AUTO, "fw_outstanding", CTLFLAG_RD, 421 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands"); 422 423 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 424 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 425 &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); 426 427 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 428 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, 429 "Driver debug level"); 430 431 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 432 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 433 0, "Driver IO timeout value in mili-second."); 434 435 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 436 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, 437 &sc->mrsas_fw_fault_check_delay, 438 0, "FW fault check thread delay in seconds. <default is 1 sec>"); 439 440 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 441 OID_AUTO, "reset_in_progress", CTLFLAG_RD, 442 &sc->reset_in_progress, 0, "ocr in progress status"); 443 444 } 445 446 /* 447 * mrsas_get_tunables: get tunable parameters. 448 * input: Adapter instance soft state 449 * 450 * Get tunable parameters. This will help to debug driver at boot time. 451 */ 452 static void 453 mrsas_get_tunables(struct mrsas_softc *sc) 454 { 455 char tmpstr[80]; 456 457 /* XXX default to some debugging for now */ 458 sc->mrsas_debug = MRSAS_FAULT; 459 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; 460 sc->mrsas_fw_fault_check_delay = 1; 461 sc->reset_count = 0; 462 sc->reset_in_progress = 0; 463 464 /* 465 * Grab the global variables. 466 */ 467 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); 468 469 /* 470 * Grab the global variables. 471 */ 472 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds); 473 474 /* Grab the unit-instance variables */ 475 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", 476 device_get_unit(sc->mrsas_dev)); 477 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); 478 } 479 480 /* 481 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. 482 * Used to get sequence number at driver load time. 483 * input: Adapter soft state 484 * 485 * Allocates DMAable memory for the event log info internal command. 486 */ 487 int 488 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) 489 { 490 int el_info_size; 491 492 /* Allocate get event log info command */ 493 el_info_size = sizeof(struct mrsas_evt_log_info); 494 if (bus_dma_tag_create(sc->mrsas_parent_tag, 495 1, 0, 496 BUS_SPACE_MAXADDR_32BIT, 497 BUS_SPACE_MAXADDR, 498 NULL, NULL, 499 el_info_size, 500 1, 501 el_info_size, 502 BUS_DMA_ALLOCNOW, 503 NULL, NULL, 504 &sc->el_info_tag)) { 505 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); 506 return (ENOMEM); 507 } 508 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, 509 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { 510 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); 511 return (ENOMEM); 512 } 513 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, 514 sc->el_info_mem, el_info_size, mrsas_addr_cb, 515 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { 516 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); 517 return (ENOMEM); 518 } 519 memset(sc->el_info_mem, 0, el_info_size); 520 return (0); 521 } 522 523 /* 524 * mrsas_free_evt_info_cmd: Free memory for Event log info command 525 * input: Adapter soft state 526 * 527 * Deallocates memory for the event log info internal command. 528 */ 529 void 530 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) 531 { 532 if (sc->el_info_phys_addr) 533 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); 534 if (sc->el_info_mem != NULL) 535 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); 536 if (sc->el_info_tag != NULL) 537 bus_dma_tag_destroy(sc->el_info_tag); 538 } 539 540 /* 541 * mrsas_get_seq_num: Get latest event sequence number 542 * @sc: Adapter soft state 543 * @eli: Firmware event log sequence number information. 544 * 545 * Firmware maintains a log of all events in a non-volatile area. 546 * Driver get the sequence number using DCMD 547 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. 548 */ 549 550 static int 551 mrsas_get_seq_num(struct mrsas_softc *sc, 552 struct mrsas_evt_log_info *eli) 553 { 554 struct mrsas_mfi_cmd *cmd; 555 struct mrsas_dcmd_frame *dcmd; 556 557 cmd = mrsas_get_mfi_cmd(sc); 558 559 if (!cmd) { 560 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 561 return -ENOMEM; 562 } 563 dcmd = &cmd->frame->dcmd; 564 565 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { 566 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); 567 mrsas_release_mfi_cmd(cmd); 568 return -ENOMEM; 569 } 570 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 571 572 dcmd->cmd = MFI_CMD_DCMD; 573 dcmd->cmd_status = 0x0; 574 dcmd->sge_count = 1; 575 dcmd->flags = MFI_FRAME_DIR_READ; 576 dcmd->timeout = 0; 577 dcmd->pad_0 = 0; 578 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info); 579 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 580 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr; 581 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info); 582 583 mrsas_issue_blocked_cmd(sc, cmd); 584 585 /* 586 * Copy the data back into callers buffer 587 */ 588 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); 589 mrsas_free_evt_log_info_cmd(sc); 590 mrsas_release_mfi_cmd(cmd); 591 592 return 0; 593 } 594 595 596 /* 597 * mrsas_register_aen: Register for asynchronous event notification 598 * @sc: Adapter soft state 599 * @seq_num: Starting sequence number 600 * @class_locale: Class of the event 601 * 602 * This function subscribes for events beyond the @seq_num 603 * and type @class_locale. 604 * 605 */ 606 static int 607 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, 608 u_int32_t class_locale_word) 609 { 610 int ret_val; 611 struct mrsas_mfi_cmd *cmd; 612 struct mrsas_dcmd_frame *dcmd; 613 union mrsas_evt_class_locale curr_aen; 614 union mrsas_evt_class_locale prev_aen; 615 616 /* 617 * If there an AEN pending already (aen_cmd), check if the 618 * class_locale of that pending AEN is inclusive of the new AEN 619 * request we currently have. If it is, then we don't have to do 620 * anything. In other words, whichever events the current AEN request 621 * is subscribing to, have already been subscribed to. If the old_cmd 622 * is _not_ inclusive, then we have to abort that command, form a 623 * class_locale that is superset of both old and current and re-issue 624 * to the FW 625 */ 626 627 curr_aen.word = class_locale_word; 628 629 if (sc->aen_cmd) { 630 631 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1]; 632 633 /* 634 * A class whose enum value is smaller is inclusive of all 635 * higher values. If a PROGRESS (= -1) was previously 636 * registered, then a new registration requests for higher 637 * classes need not be sent to FW. They are automatically 638 * included. Locale numbers don't have such hierarchy. They 639 * are bitmap values 640 */ 641 if ((prev_aen.members.class <= curr_aen.members.class) && 642 !((prev_aen.members.locale & curr_aen.members.locale) ^ 643 curr_aen.members.locale)) { 644 /* 645 * Previously issued event registration includes 646 * current request. Nothing to do. 647 */ 648 return 0; 649 } else { 650 curr_aen.members.locale |= prev_aen.members.locale; 651 652 if (prev_aen.members.class < curr_aen.members.class) 653 curr_aen.members.class = prev_aen.members.class; 654 655 sc->aen_cmd->abort_aen = 1; 656 ret_val = mrsas_issue_blocked_abort_cmd(sc, 657 sc->aen_cmd); 658 659 if (ret_val) { 660 printf("mrsas: Failed to abort " 661 "previous AEN command\n"); 662 return ret_val; 663 } 664 } 665 } 666 cmd = mrsas_get_mfi_cmd(sc); 667 668 if (!cmd) 669 return -ENOMEM; 670 671 dcmd = &cmd->frame->dcmd; 672 673 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); 674 675 /* 676 * Prepare DCMD for aen registration 677 */ 678 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 679 680 dcmd->cmd = MFI_CMD_DCMD; 681 dcmd->cmd_status = 0x0; 682 dcmd->sge_count = 1; 683 dcmd->flags = MFI_FRAME_DIR_READ; 684 dcmd->timeout = 0; 685 dcmd->pad_0 = 0; 686 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail); 687 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 688 dcmd->mbox.w[0] = seq_num; 689 sc->last_seq_num = seq_num; 690 dcmd->mbox.w[1] = curr_aen.word; 691 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr; 692 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail); 693 694 if (sc->aen_cmd != NULL) { 695 mrsas_release_mfi_cmd(cmd); 696 return 0; 697 } 698 /* 699 * Store reference to the cmd used to register for AEN. When an 700 * application wants us to register for AEN, we have to abort this 701 * cmd and re-register with a new EVENT LOCALE supplied by that app 702 */ 703 sc->aen_cmd = cmd; 704 705 /* 706 * Issue the aen registration frame 707 */ 708 if (mrsas_issue_dcmd(sc, cmd)) { 709 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); 710 return (1); 711 } 712 return 0; 713 } 714 715 /* 716 * mrsas_start_aen: Subscribes to AEN during driver load time 717 * @instance: Adapter soft state 718 */ 719 static int 720 mrsas_start_aen(struct mrsas_softc *sc) 721 { 722 struct mrsas_evt_log_info eli; 723 union mrsas_evt_class_locale class_locale; 724 725 726 /* Get the latest sequence number from FW */ 727 728 memset(&eli, 0, sizeof(eli)); 729 730 if (mrsas_get_seq_num(sc, &eli)) 731 return -1; 732 733 /* Register AEN with FW for latest sequence number plus 1 */ 734 class_locale.members.reserved = 0; 735 class_locale.members.locale = MR_EVT_LOCALE_ALL; 736 class_locale.members.class = MR_EVT_CLASS_DEBUG; 737 738 return mrsas_register_aen(sc, eli.newest_seq_num + 1, 739 class_locale.word); 740 741 } 742 743 /* 744 * mrsas_setup_msix: Allocate MSI-x vectors 745 * @sc: adapter soft state 746 */ 747 static int 748 mrsas_setup_msix(struct mrsas_softc *sc) 749 { 750 int i; 751 752 for (i = 0; i < sc->msix_vectors; i++) { 753 sc->irq_context[i].sc = sc; 754 sc->irq_context[i].MSIxIndex = i; 755 sc->irq_id[i] = i + 1; 756 sc->mrsas_irq[i] = bus_alloc_resource_any 757 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i] 758 ,RF_ACTIVE); 759 if (sc->mrsas_irq[i] == NULL) { 760 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n"); 761 goto irq_alloc_failed; 762 } 763 if (bus_setup_intr(sc->mrsas_dev, 764 sc->mrsas_irq[i], 765 INTR_MPSAFE | INTR_TYPE_CAM, 766 NULL, mrsas_isr, &sc->irq_context[i], 767 &sc->intr_handle[i])) { 768 device_printf(sc->mrsas_dev, 769 "Cannot set up MSI-x interrupt handler\n"); 770 goto irq_alloc_failed; 771 } 772 } 773 return SUCCESS; 774 775 irq_alloc_failed: 776 mrsas_teardown_intr(sc); 777 return (FAIL); 778 } 779 780 /* 781 * mrsas_allocate_msix: Setup MSI-x vectors 782 * @sc: adapter soft state 783 */ 784 static int 785 mrsas_allocate_msix(struct mrsas_softc *sc) 786 { 787 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) { 788 device_printf(sc->mrsas_dev, "Using MSI-X with %d number" 789 " of vectors\n", sc->msix_vectors); 790 } else { 791 device_printf(sc->mrsas_dev, "MSI-x setup failed\n"); 792 goto irq_alloc_failed; 793 } 794 return SUCCESS; 795 796 irq_alloc_failed: 797 mrsas_teardown_intr(sc); 798 return (FAIL); 799 } 800 801 /* 802 * mrsas_attach: PCI entry point 803 * input: pointer to device struct 804 * 805 * Performs setup of PCI and registers, initializes mutexes and linked lists, 806 * registers interrupts and CAM, and initializes the adapter/controller to 807 * its proper state. 808 */ 809 static int 810 mrsas_attach(device_t dev) 811 { 812 struct mrsas_softc *sc = device_get_softc(dev); 813 uint32_t cmd, bar, error; 814 815 /* Look up our softc and initialize its fields. */ 816 sc->mrsas_dev = dev; 817 sc->device_id = pci_get_device(dev); 818 819 mrsas_get_tunables(sc); 820 821 /* 822 * Set up PCI and registers 823 */ 824 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 825 if ((cmd & PCIM_CMD_PORTEN) == 0) { 826 return (ENXIO); 827 } 828 /* Force the busmaster enable bit on. */ 829 cmd |= PCIM_CMD_BUSMASTEREN; 830 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 831 832 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4); 833 834 sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */ 835 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, 836 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) 837 == NULL) { 838 device_printf(dev, "Cannot allocate PCI registers\n"); 839 goto attach_fail; 840 } 841 sc->bus_tag = rman_get_bustag(sc->reg_res); 842 sc->bus_handle = rman_get_bushandle(sc->reg_res); 843 844 /* Intialize mutexes */ 845 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF); 846 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF); 847 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF); 848 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF); 849 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN); 850 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF); 851 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF); 852 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF); 853 854 /* 855 * Intialize a counting Semaphore to take care no. of concurrent 856 * IOCTLs 857 */ 858 sema_init(&sc->ioctl_count_sema, MRSAS_MAX_MFI_CMDS - 5, IOCTL_SEMA_DESCRIPTION); 859 860 /* Intialize linked list */ 861 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); 862 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); 863 864 mrsas_atomic_set(&sc->fw_outstanding, 0); 865 866 sc->io_cmds_highwater = 0; 867 868 /* Create a /dev entry for this device. */ 869 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT, 870 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", 871 device_get_unit(dev)); 872 if (device_get_unit(dev) == 0) 873 make_dev_alias(sc->mrsas_cdev, "megaraid_sas_ioctl_node"); 874 if (sc->mrsas_cdev) 875 sc->mrsas_cdev->si_drv1 = sc; 876 877 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 878 sc->UnevenSpanSupport = 0; 879 880 sc->msix_enable = 0; 881 882 /* Initialize Firmware */ 883 if (mrsas_init_fw(sc) != SUCCESS) { 884 goto attach_fail_fw; 885 } 886 /* Register SCSI mid-layer */ 887 if ((mrsas_cam_attach(sc) != SUCCESS)) { 888 goto attach_fail_cam; 889 } 890 /* Register IRQs */ 891 if (mrsas_setup_irq(sc) != SUCCESS) { 892 goto attach_fail_irq; 893 } 894 /* Enable Interrupts */ 895 mrsas_enable_intr(sc); 896 897 error = mrsas_kproc_create(mrsas_ocr_thread, sc, 898 &sc->ocr_thread, 0, 0, "mrsas_ocr%d", 899 device_get_unit(sc->mrsas_dev)); 900 if (error) { 901 printf("Error %d starting rescan thread\n", error); 902 goto attach_fail_irq; 903 } 904 mrsas_setup_sysctl(sc); 905 906 /* Initiate AEN (Asynchronous Event Notification) */ 907 908 if (mrsas_start_aen(sc)) { 909 printf("Error: start aen failed\n"); 910 goto fail_start_aen; 911 } 912 /* 913 * Add this controller to mrsas_mgmt_info structure so that it can be 914 * exported to management applications 915 */ 916 if (device_get_unit(dev) == 0) 917 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info)); 918 919 mrsas_mgmt_info.count++; 920 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc; 921 mrsas_mgmt_info.max_index++; 922 923 return (0); 924 925 fail_start_aen: 926 attach_fail_irq: 927 mrsas_teardown_intr(sc); 928 attach_fail_cam: 929 mrsas_cam_detach(sc); 930 attach_fail_fw: 931 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */ 932 if (sc->msix_enable == 1) 933 pci_release_msi(sc->mrsas_dev); 934 mrsas_free_mem(sc); 935 mtx_destroy(&sc->sim_lock); 936 mtx_destroy(&sc->aen_lock); 937 mtx_destroy(&sc->pci_lock); 938 mtx_destroy(&sc->io_lock); 939 mtx_destroy(&sc->ioctl_lock); 940 mtx_destroy(&sc->mpt_cmd_pool_lock); 941 mtx_destroy(&sc->mfi_cmd_pool_lock); 942 mtx_destroy(&sc->raidmap_lock); 943 /* Destroy the counting semaphore created for Ioctl */ 944 sema_destroy(&sc->ioctl_count_sema); 945 attach_fail: 946 destroy_dev(sc->mrsas_cdev); 947 if (sc->reg_res) { 948 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, 949 sc->reg_res_id, sc->reg_res); 950 } 951 return (ENXIO); 952 } 953 954 /* 955 * mrsas_detach: De-allocates and teardown resources 956 * input: pointer to device struct 957 * 958 * This function is the entry point for device disconnect and detach. 959 * It performs memory de-allocations, shutdown of the controller and various 960 * teardown and destroy resource functions. 961 */ 962 static int 963 mrsas_detach(device_t dev) 964 { 965 struct mrsas_softc *sc; 966 int i = 0; 967 968 sc = device_get_softc(dev); 969 sc->remove_in_progress = 1; 970 971 /* Destroy the character device so no other IOCTL will be handled */ 972 destroy_dev(sc->mrsas_cdev); 973 974 /* 975 * Take the instance off the instance array. Note that we will not 976 * decrement the max_index. We let this array be sparse array 977 */ 978 for (i = 0; i < mrsas_mgmt_info.max_index; i++) { 979 if (mrsas_mgmt_info.sc_ptr[i] == sc) { 980 mrsas_mgmt_info.count--; 981 mrsas_mgmt_info.sc_ptr[i] = NULL; 982 break; 983 } 984 } 985 986 if (sc->ocr_thread_active) 987 wakeup(&sc->ocr_chan); 988 while (sc->reset_in_progress) { 989 i++; 990 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 991 mrsas_dprint(sc, MRSAS_INFO, 992 "[%2d]waiting for ocr to be finished\n", i); 993 } 994 pause("mr_shutdown", hz); 995 } 996 i = 0; 997 while (sc->ocr_thread_active) { 998 i++; 999 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1000 mrsas_dprint(sc, MRSAS_INFO, 1001 "[%2d]waiting for " 1002 "mrsas_ocr thread to quit ocr %d\n", i, 1003 sc->ocr_thread_active); 1004 } 1005 pause("mr_shutdown", hz); 1006 } 1007 mrsas_flush_cache(sc); 1008 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); 1009 mrsas_disable_intr(sc); 1010 mrsas_cam_detach(sc); 1011 mrsas_teardown_intr(sc); 1012 mrsas_free_mem(sc); 1013 mtx_destroy(&sc->sim_lock); 1014 mtx_destroy(&sc->aen_lock); 1015 mtx_destroy(&sc->pci_lock); 1016 mtx_destroy(&sc->io_lock); 1017 mtx_destroy(&sc->ioctl_lock); 1018 mtx_destroy(&sc->mpt_cmd_pool_lock); 1019 mtx_destroy(&sc->mfi_cmd_pool_lock); 1020 mtx_destroy(&sc->raidmap_lock); 1021 1022 /* Wait for all the semaphores to be released */ 1023 while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5)) 1024 pause("mr_shutdown", hz); 1025 1026 /* Destroy the counting semaphore created for Ioctl */ 1027 sema_destroy(&sc->ioctl_count_sema); 1028 1029 if (sc->reg_res) { 1030 bus_release_resource(sc->mrsas_dev, 1031 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); 1032 } 1033 if (sc->sysctl_tree != NULL) 1034 sysctl_ctx_free(&sc->sysctl_ctx); 1035 1036 return (0); 1037 } 1038 1039 /* 1040 * mrsas_free_mem: Frees allocated memory 1041 * input: Adapter instance soft state 1042 * 1043 * This function is called from mrsas_detach() to free previously allocated 1044 * memory. 1045 */ 1046 void 1047 mrsas_free_mem(struct mrsas_softc *sc) 1048 { 1049 int i; 1050 u_int32_t max_cmd; 1051 struct mrsas_mfi_cmd *mfi_cmd; 1052 struct mrsas_mpt_cmd *mpt_cmd; 1053 1054 /* 1055 * Free RAID map memory 1056 */ 1057 for (i = 0; i < 2; i++) { 1058 if (sc->raidmap_phys_addr[i]) 1059 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); 1060 if (sc->raidmap_mem[i] != NULL) 1061 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); 1062 if (sc->raidmap_tag[i] != NULL) 1063 bus_dma_tag_destroy(sc->raidmap_tag[i]); 1064 1065 if (sc->ld_drv_map[i] != NULL) 1066 free(sc->ld_drv_map[i], M_MRSAS); 1067 } 1068 1069 /* 1070 * Free version buffer memroy 1071 */ 1072 if (sc->verbuf_phys_addr) 1073 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); 1074 if (sc->verbuf_mem != NULL) 1075 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); 1076 if (sc->verbuf_tag != NULL) 1077 bus_dma_tag_destroy(sc->verbuf_tag); 1078 1079 1080 /* 1081 * Free sense buffer memory 1082 */ 1083 if (sc->sense_phys_addr) 1084 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); 1085 if (sc->sense_mem != NULL) 1086 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); 1087 if (sc->sense_tag != NULL) 1088 bus_dma_tag_destroy(sc->sense_tag); 1089 1090 /* 1091 * Free chain frame memory 1092 */ 1093 if (sc->chain_frame_phys_addr) 1094 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); 1095 if (sc->chain_frame_mem != NULL) 1096 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); 1097 if (sc->chain_frame_tag != NULL) 1098 bus_dma_tag_destroy(sc->chain_frame_tag); 1099 1100 /* 1101 * Free IO Request memory 1102 */ 1103 if (sc->io_request_phys_addr) 1104 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); 1105 if (sc->io_request_mem != NULL) 1106 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); 1107 if (sc->io_request_tag != NULL) 1108 bus_dma_tag_destroy(sc->io_request_tag); 1109 1110 /* 1111 * Free Reply Descriptor memory 1112 */ 1113 if (sc->reply_desc_phys_addr) 1114 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); 1115 if (sc->reply_desc_mem != NULL) 1116 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); 1117 if (sc->reply_desc_tag != NULL) 1118 bus_dma_tag_destroy(sc->reply_desc_tag); 1119 1120 /* 1121 * Free event detail memory 1122 */ 1123 if (sc->evt_detail_phys_addr) 1124 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); 1125 if (sc->evt_detail_mem != NULL) 1126 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); 1127 if (sc->evt_detail_tag != NULL) 1128 bus_dma_tag_destroy(sc->evt_detail_tag); 1129 1130 /* 1131 * Free MFI frames 1132 */ 1133 if (sc->mfi_cmd_list) { 1134 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1135 mfi_cmd = sc->mfi_cmd_list[i]; 1136 mrsas_free_frame(sc, mfi_cmd); 1137 } 1138 } 1139 if (sc->mficmd_frame_tag != NULL) 1140 bus_dma_tag_destroy(sc->mficmd_frame_tag); 1141 1142 /* 1143 * Free MPT internal command list 1144 */ 1145 max_cmd = sc->max_fw_cmds; 1146 if (sc->mpt_cmd_list) { 1147 for (i = 0; i < max_cmd; i++) { 1148 mpt_cmd = sc->mpt_cmd_list[i]; 1149 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); 1150 free(sc->mpt_cmd_list[i], M_MRSAS); 1151 } 1152 free(sc->mpt_cmd_list, M_MRSAS); 1153 sc->mpt_cmd_list = NULL; 1154 } 1155 /* 1156 * Free MFI internal command list 1157 */ 1158 1159 if (sc->mfi_cmd_list) { 1160 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1161 free(sc->mfi_cmd_list[i], M_MRSAS); 1162 } 1163 free(sc->mfi_cmd_list, M_MRSAS); 1164 sc->mfi_cmd_list = NULL; 1165 } 1166 /* 1167 * Free request descriptor memory 1168 */ 1169 free(sc->req_desc, M_MRSAS); 1170 sc->req_desc = NULL; 1171 1172 /* 1173 * Destroy parent tag 1174 */ 1175 if (sc->mrsas_parent_tag != NULL) 1176 bus_dma_tag_destroy(sc->mrsas_parent_tag); 1177 1178 /* 1179 * Free ctrl_info memory 1180 */ 1181 if (sc->ctrl_info != NULL) 1182 free(sc->ctrl_info, M_MRSAS); 1183 } 1184 1185 /* 1186 * mrsas_teardown_intr: Teardown interrupt 1187 * input: Adapter instance soft state 1188 * 1189 * This function is called from mrsas_detach() to teardown and release bus 1190 * interrupt resourse. 1191 */ 1192 void 1193 mrsas_teardown_intr(struct mrsas_softc *sc) 1194 { 1195 int i; 1196 1197 if (!sc->msix_enable) { 1198 if (sc->intr_handle[0]) 1199 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]); 1200 if (sc->mrsas_irq[0] != NULL) 1201 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1202 sc->irq_id[0], sc->mrsas_irq[0]); 1203 sc->intr_handle[0] = NULL; 1204 } else { 1205 for (i = 0; i < sc->msix_vectors; i++) { 1206 if (sc->intr_handle[i]) 1207 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i], 1208 sc->intr_handle[i]); 1209 1210 if (sc->mrsas_irq[i] != NULL) 1211 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1212 sc->irq_id[i], sc->mrsas_irq[i]); 1213 1214 sc->intr_handle[i] = NULL; 1215 } 1216 pci_release_msi(sc->mrsas_dev); 1217 } 1218 1219 } 1220 1221 /* 1222 * mrsas_suspend: Suspend entry point 1223 * input: Device struct pointer 1224 * 1225 * This function is the entry point for system suspend from the OS. 1226 */ 1227 static int 1228 mrsas_suspend(device_t dev) 1229 { 1230 struct mrsas_softc *sc; 1231 1232 sc = device_get_softc(dev); 1233 return (0); 1234 } 1235 1236 /* 1237 * mrsas_resume: Resume entry point 1238 * input: Device struct pointer 1239 * 1240 * This function is the entry point for system resume from the OS. 1241 */ 1242 static int 1243 mrsas_resume(device_t dev) 1244 { 1245 struct mrsas_softc *sc; 1246 1247 sc = device_get_softc(dev); 1248 return (0); 1249 } 1250 1251 /** 1252 * mrsas_get_softc_instance: Find softc instance based on cmd type 1253 * 1254 * This function will return softc instance based on cmd type. 1255 * In some case, application fire ioctl on required management instance and 1256 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those 1257 * case, else get the softc instance from host_no provided by application in 1258 * user data. 1259 */ 1260 1261 static struct mrsas_softc * 1262 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg) 1263 { 1264 struct mrsas_softc *sc = NULL; 1265 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; 1266 1267 if (cmd == MRSAS_IOC_GET_PCI_INFO) { 1268 sc = dev->si_drv1; 1269 } else { 1270 /* 1271 * get the Host number & the softc from data sent by the 1272 * Application 1273 */ 1274 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no]; 1275 if ((user_ioc->host_no >= mrsas_mgmt_info.max_index) || (sc == NULL)) { 1276 if (sc == NULL) 1277 mrsas_dprint(sc, MRSAS_FAULT, 1278 "There is no Controller number %d .\n", user_ioc->host_no); 1279 else 1280 mrsas_dprint(sc, MRSAS_FAULT, 1281 "Invalid Controller number %d .\n", user_ioc->host_no); 1282 } 1283 } 1284 1285 return sc; 1286 } 1287 1288 /* 1289 * mrsas_ioctl: IOCtl commands entry point. 1290 * 1291 * This function is the entry point for IOCtls from the OS. It calls the 1292 * appropriate function for processing depending on the command received. 1293 */ 1294 static int 1295 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, 1296 struct thread *td) 1297 { 1298 struct mrsas_softc *sc; 1299 int ret = 0, i = 0; 1300 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo; 1301 1302 sc = mrsas_get_softc_instance(dev, cmd, arg); 1303 if (!sc) 1304 return ENOENT; 1305 1306 if (sc->remove_in_progress) { 1307 mrsas_dprint(sc, MRSAS_INFO, 1308 "Driver remove or shutdown called.\n"); 1309 return ENOENT; 1310 } 1311 mtx_lock_spin(&sc->ioctl_lock); 1312 if (!sc->reset_in_progress) { 1313 mtx_unlock_spin(&sc->ioctl_lock); 1314 goto do_ioctl; 1315 } 1316 mtx_unlock_spin(&sc->ioctl_lock); 1317 while (sc->reset_in_progress) { 1318 i++; 1319 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1320 mrsas_dprint(sc, MRSAS_INFO, 1321 "[%2d]waiting for " 1322 "OCR to be finished %d\n", i, 1323 sc->ocr_thread_active); 1324 } 1325 pause("mr_ioctl", hz); 1326 } 1327 1328 do_ioctl: 1329 switch (cmd) { 1330 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64: 1331 #ifdef COMPAT_FREEBSD32 1332 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32: 1333 #endif 1334 /* 1335 * Decrement the Ioctl counting Semaphore before getting an 1336 * mfi command 1337 */ 1338 sema_wait(&sc->ioctl_count_sema); 1339 1340 ret = mrsas_passthru(sc, (void *)arg, cmd); 1341 1342 /* Increment the Ioctl counting semaphore value */ 1343 sema_post(&sc->ioctl_count_sema); 1344 1345 break; 1346 case MRSAS_IOC_SCAN_BUS: 1347 ret = mrsas_bus_scan(sc); 1348 break; 1349 1350 case MRSAS_IOC_GET_PCI_INFO: 1351 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg; 1352 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION)); 1353 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev); 1354 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev); 1355 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev); 1356 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev); 1357 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d," 1358 "pci device no: %d, pci function no: %d," 1359 "pci domain ID: %d\n", 1360 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber, 1361 pciDrvInfo->functionNumber, pciDrvInfo->domainID); 1362 ret = 0; 1363 break; 1364 1365 default: 1366 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd); 1367 ret = ENOENT; 1368 } 1369 1370 return (ret); 1371 } 1372 1373 /* 1374 * mrsas_poll: poll entry point for mrsas driver fd 1375 * 1376 * This function is the entry point for poll from the OS. It waits for some AEN 1377 * events to be triggered from the controller and notifies back. 1378 */ 1379 static int 1380 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td) 1381 { 1382 struct mrsas_softc *sc; 1383 int revents = 0; 1384 1385 sc = dev->si_drv1; 1386 1387 if (poll_events & (POLLIN | POLLRDNORM)) { 1388 if (sc->mrsas_aen_triggered) { 1389 revents |= poll_events & (POLLIN | POLLRDNORM); 1390 } 1391 } 1392 if (revents == 0) { 1393 if (poll_events & (POLLIN | POLLRDNORM)) { 1394 mtx_lock(&sc->aen_lock); 1395 sc->mrsas_poll_waiting = 1; 1396 selrecord(td, &sc->mrsas_select); 1397 mtx_unlock(&sc->aen_lock); 1398 } 1399 } 1400 return revents; 1401 } 1402 1403 /* 1404 * mrsas_setup_irq: Set up interrupt 1405 * input: Adapter instance soft state 1406 * 1407 * This function sets up interrupts as a bus resource, with flags indicating 1408 * resource permitting contemporaneous sharing and for resource to activate 1409 * atomically. 1410 */ 1411 static int 1412 mrsas_setup_irq(struct mrsas_softc *sc) 1413 { 1414 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS)) 1415 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n"); 1416 1417 else { 1418 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n"); 1419 sc->irq_context[0].sc = sc; 1420 sc->irq_context[0].MSIxIndex = 0; 1421 sc->irq_id[0] = 0; 1422 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev, 1423 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE); 1424 if (sc->mrsas_irq[0] == NULL) { 1425 device_printf(sc->mrsas_dev, "Cannot allocate legcay" 1426 "interrupt\n"); 1427 return (FAIL); 1428 } 1429 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0], 1430 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, 1431 &sc->irq_context[0], &sc->intr_handle[0])) { 1432 device_printf(sc->mrsas_dev, "Cannot set up legacy" 1433 "interrupt\n"); 1434 return (FAIL); 1435 } 1436 } 1437 return (0); 1438 } 1439 1440 /* 1441 * mrsas_isr: ISR entry point 1442 * input: argument pointer 1443 * 1444 * This function is the interrupt service routine entry point. There are two 1445 * types of interrupts, state change interrupt and response interrupt. If an 1446 * interrupt is not ours, we just return. 1447 */ 1448 void 1449 mrsas_isr(void *arg) 1450 { 1451 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg; 1452 struct mrsas_softc *sc = irq_context->sc; 1453 int status = 0; 1454 1455 if (sc->mask_interrupts) 1456 return; 1457 1458 if (!sc->msix_vectors) { 1459 status = mrsas_clear_intr(sc); 1460 if (!status) 1461 return; 1462 } 1463 /* If we are resetting, bail */ 1464 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { 1465 printf(" Entered into ISR when OCR is going active. \n"); 1466 mrsas_clear_intr(sc); 1467 return; 1468 } 1469 /* Process for reply request and clear response interrupt */ 1470 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS) 1471 mrsas_clear_intr(sc); 1472 1473 return; 1474 } 1475 1476 /* 1477 * mrsas_complete_cmd: Process reply request 1478 * input: Adapter instance soft state 1479 * 1480 * This function is called from mrsas_isr() to process reply request and clear 1481 * response interrupt. Processing of the reply request entails walking 1482 * through the reply descriptor array for the command request pended from 1483 * Firmware. We look at the Function field to determine the command type and 1484 * perform the appropriate action. Before we return, we clear the response 1485 * interrupt. 1486 */ 1487 static int 1488 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex) 1489 { 1490 Mpi2ReplyDescriptorsUnion_t *desc; 1491 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 1492 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; 1493 struct mrsas_mpt_cmd *cmd_mpt; 1494 struct mrsas_mfi_cmd *cmd_mfi; 1495 u_int8_t reply_descript_type; 1496 u_int16_t smid, num_completed; 1497 u_int8_t status, extStatus; 1498 union desc_value desc_val; 1499 PLD_LOAD_BALANCE_INFO lbinfo; 1500 u_int32_t device_id; 1501 int threshold_reply_count = 0; 1502 1503 1504 /* If we have a hardware error, not need to continue */ 1505 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 1506 return (DONE); 1507 1508 desc = sc->reply_desc_mem; 1509 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)) 1510 + sc->last_reply_idx[MSIxIndex]; 1511 1512 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1513 1514 desc_val.word = desc->Words; 1515 num_completed = 0; 1516 1517 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1518 1519 /* Find our reply descriptor for the command and process */ 1520 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) { 1521 smid = reply_desc->SMID; 1522 cmd_mpt = sc->mpt_cmd_list[smid - 1]; 1523 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request; 1524 1525 status = scsi_io_req->RaidContext.status; 1526 extStatus = scsi_io_req->RaidContext.exStatus; 1527 1528 switch (scsi_io_req->Function) { 1529 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */ 1530 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; 1531 lbinfo = &sc->load_balance_info[device_id]; 1532 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { 1533 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]); 1534 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; 1535 } 1536 /* Fall thru and complete IO */ 1537 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: 1538 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus); 1539 mrsas_cmd_done(sc, cmd_mpt); 1540 scsi_io_req->RaidContext.status = 0; 1541 scsi_io_req->RaidContext.exStatus = 0; 1542 mrsas_atomic_dec(&sc->fw_outstanding); 1543 break; 1544 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */ 1545 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 1546 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); 1547 cmd_mpt->flags = 0; 1548 mrsas_release_mpt_cmd(cmd_mpt); 1549 break; 1550 } 1551 1552 sc->last_reply_idx[MSIxIndex]++; 1553 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth) 1554 sc->last_reply_idx[MSIxIndex] = 0; 1555 1556 desc->Words = ~((uint64_t)0x00); /* set it back to all 1557 * 0xFFFFFFFFs */ 1558 num_completed++; 1559 threshold_reply_count++; 1560 1561 /* Get the next reply descriptor */ 1562 if (!sc->last_reply_idx[MSIxIndex]) { 1563 desc = sc->reply_desc_mem; 1564 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)); 1565 } else 1566 desc++; 1567 1568 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1569 desc_val.word = desc->Words; 1570 1571 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1572 1573 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1574 break; 1575 1576 /* 1577 * Write to reply post index after completing threshold reply 1578 * count and still there are more replies in reply queue 1579 * pending to be completed. 1580 */ 1581 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 1582 if (sc->msix_enable) { 1583 if ((sc->device_id == MRSAS_INVADER) || 1584 (sc->device_id == MRSAS_FURY)) 1585 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1586 ((MSIxIndex & 0x7) << 24) | 1587 sc->last_reply_idx[MSIxIndex]); 1588 else 1589 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1590 sc->last_reply_idx[MSIxIndex]); 1591 } else 1592 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1593 reply_post_host_index), sc->last_reply_idx[0]); 1594 1595 threshold_reply_count = 0; 1596 } 1597 } 1598 1599 /* No match, just return */ 1600 if (num_completed == 0) 1601 return (DONE); 1602 1603 /* Clear response interrupt */ 1604 if (sc->msix_enable) { 1605 if ((sc->device_id == MRSAS_INVADER) || 1606 (sc->device_id == MRSAS_FURY)) { 1607 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1608 ((MSIxIndex & 0x7) << 24) | 1609 sc->last_reply_idx[MSIxIndex]); 1610 } else 1611 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1612 sc->last_reply_idx[MSIxIndex]); 1613 } else 1614 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1615 reply_post_host_index), sc->last_reply_idx[0]); 1616 1617 return (0); 1618 } 1619 1620 /* 1621 * mrsas_map_mpt_cmd_status: Allocate DMAable memory. 1622 * input: Adapter instance soft state 1623 * 1624 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. 1625 * It checks the command status and maps the appropriate CAM status for the 1626 * CCB. 1627 */ 1628 void 1629 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus) 1630 { 1631 struct mrsas_softc *sc = cmd->sc; 1632 u_int8_t *sense_data; 1633 1634 switch (status) { 1635 case MFI_STAT_OK: 1636 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP; 1637 break; 1638 case MFI_STAT_SCSI_IO_FAILED: 1639 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1640 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1641 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data; 1642 if (sense_data) { 1643 /* For now just copy 18 bytes back */ 1644 memcpy(sense_data, cmd->sense, 18); 1645 cmd->ccb_ptr->csio.sense_len = 18; 1646 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; 1647 } 1648 break; 1649 case MFI_STAT_LD_OFFLINE: 1650 case MFI_STAT_DEVICE_NOT_FOUND: 1651 if (cmd->ccb_ptr->ccb_h.target_lun) 1652 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; 1653 else 1654 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; 1655 break; 1656 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1657 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; 1658 break; 1659 default: 1660 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); 1661 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; 1662 cmd->ccb_ptr->csio.scsi_status = status; 1663 } 1664 return; 1665 } 1666 1667 /* 1668 * mrsas_alloc_mem: Allocate DMAable memory 1669 * input: Adapter instance soft state 1670 * 1671 * This function creates the parent DMA tag and allocates DMAable memory. DMA 1672 * tag describes constraints of DMA mapping. Memory allocated is mapped into 1673 * Kernel virtual address. Callback argument is physical memory address. 1674 */ 1675 static int 1676 mrsas_alloc_mem(struct mrsas_softc *sc) 1677 { 1678 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, 1679 chain_frame_size, evt_detail_size, count; 1680 1681 /* 1682 * Allocate parent DMA tag 1683 */ 1684 if (bus_dma_tag_create(NULL, /* parent */ 1685 1, /* alignment */ 1686 0, /* boundary */ 1687 BUS_SPACE_MAXADDR, /* lowaddr */ 1688 BUS_SPACE_MAXADDR, /* highaddr */ 1689 NULL, NULL, /* filter, filterarg */ 1690 MRSAS_MAX_IO_SIZE, /* maxsize */ 1691 MRSAS_MAX_SGL, /* nsegments */ 1692 MRSAS_MAX_IO_SIZE, /* maxsegsize */ 1693 0, /* flags */ 1694 NULL, NULL, /* lockfunc, lockarg */ 1695 &sc->mrsas_parent_tag /* tag */ 1696 )) { 1697 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); 1698 return (ENOMEM); 1699 } 1700 /* 1701 * Allocate for version buffer 1702 */ 1703 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t)); 1704 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1705 1, 0, 1706 BUS_SPACE_MAXADDR_32BIT, 1707 BUS_SPACE_MAXADDR, 1708 NULL, NULL, 1709 verbuf_size, 1710 1, 1711 verbuf_size, 1712 BUS_DMA_ALLOCNOW, 1713 NULL, NULL, 1714 &sc->verbuf_tag)) { 1715 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); 1716 return (ENOMEM); 1717 } 1718 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, 1719 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { 1720 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); 1721 return (ENOMEM); 1722 } 1723 bzero(sc->verbuf_mem, verbuf_size); 1724 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, 1725 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, 1726 BUS_DMA_NOWAIT)) { 1727 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); 1728 return (ENOMEM); 1729 } 1730 /* 1731 * Allocate IO Request Frames 1732 */ 1733 io_req_size = sc->io_frames_alloc_sz; 1734 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1735 16, 0, 1736 BUS_SPACE_MAXADDR_32BIT, 1737 BUS_SPACE_MAXADDR, 1738 NULL, NULL, 1739 io_req_size, 1740 1, 1741 io_req_size, 1742 BUS_DMA_ALLOCNOW, 1743 NULL, NULL, 1744 &sc->io_request_tag)) { 1745 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); 1746 return (ENOMEM); 1747 } 1748 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, 1749 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { 1750 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); 1751 return (ENOMEM); 1752 } 1753 bzero(sc->io_request_mem, io_req_size); 1754 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, 1755 sc->io_request_mem, io_req_size, mrsas_addr_cb, 1756 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { 1757 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); 1758 return (ENOMEM); 1759 } 1760 /* 1761 * Allocate Chain Frames 1762 */ 1763 chain_frame_size = sc->chain_frames_alloc_sz; 1764 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1765 4, 0, 1766 BUS_SPACE_MAXADDR_32BIT, 1767 BUS_SPACE_MAXADDR, 1768 NULL, NULL, 1769 chain_frame_size, 1770 1, 1771 chain_frame_size, 1772 BUS_DMA_ALLOCNOW, 1773 NULL, NULL, 1774 &sc->chain_frame_tag)) { 1775 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); 1776 return (ENOMEM); 1777 } 1778 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, 1779 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { 1780 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); 1781 return (ENOMEM); 1782 } 1783 bzero(sc->chain_frame_mem, chain_frame_size); 1784 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, 1785 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, 1786 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { 1787 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); 1788 return (ENOMEM); 1789 } 1790 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 1791 /* 1792 * Allocate Reply Descriptor Array 1793 */ 1794 reply_desc_size = sc->reply_alloc_sz * count; 1795 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1796 16, 0, 1797 BUS_SPACE_MAXADDR_32BIT, 1798 BUS_SPACE_MAXADDR, 1799 NULL, NULL, 1800 reply_desc_size, 1801 1, 1802 reply_desc_size, 1803 BUS_DMA_ALLOCNOW, 1804 NULL, NULL, 1805 &sc->reply_desc_tag)) { 1806 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); 1807 return (ENOMEM); 1808 } 1809 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, 1810 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { 1811 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); 1812 return (ENOMEM); 1813 } 1814 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, 1815 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, 1816 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { 1817 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); 1818 return (ENOMEM); 1819 } 1820 /* 1821 * Allocate Sense Buffer Array. Keep in lower 4GB 1822 */ 1823 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; 1824 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1825 64, 0, 1826 BUS_SPACE_MAXADDR_32BIT, 1827 BUS_SPACE_MAXADDR, 1828 NULL, NULL, 1829 sense_size, 1830 1, 1831 sense_size, 1832 BUS_DMA_ALLOCNOW, 1833 NULL, NULL, 1834 &sc->sense_tag)) { 1835 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); 1836 return (ENOMEM); 1837 } 1838 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, 1839 BUS_DMA_NOWAIT, &sc->sense_dmamap)) { 1840 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); 1841 return (ENOMEM); 1842 } 1843 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, 1844 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, 1845 BUS_DMA_NOWAIT)) { 1846 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); 1847 return (ENOMEM); 1848 } 1849 /* 1850 * Allocate for Event detail structure 1851 */ 1852 evt_detail_size = sizeof(struct mrsas_evt_detail); 1853 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1854 1, 0, 1855 BUS_SPACE_MAXADDR_32BIT, 1856 BUS_SPACE_MAXADDR, 1857 NULL, NULL, 1858 evt_detail_size, 1859 1, 1860 evt_detail_size, 1861 BUS_DMA_ALLOCNOW, 1862 NULL, NULL, 1863 &sc->evt_detail_tag)) { 1864 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); 1865 return (ENOMEM); 1866 } 1867 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, 1868 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { 1869 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); 1870 return (ENOMEM); 1871 } 1872 bzero(sc->evt_detail_mem, evt_detail_size); 1873 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, 1874 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, 1875 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { 1876 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); 1877 return (ENOMEM); 1878 } 1879 /* 1880 * Create a dma tag for data buffers; size will be the maximum 1881 * possible I/O size (280kB). 1882 */ 1883 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1884 1, 1885 0, 1886 BUS_SPACE_MAXADDR, 1887 BUS_SPACE_MAXADDR, 1888 NULL, NULL, 1889 MRSAS_MAX_IO_SIZE, 1890 MRSAS_MAX_SGL, 1891 MRSAS_MAX_IO_SIZE, 1892 BUS_DMA_ALLOCNOW, 1893 busdma_lock_mutex, 1894 &sc->io_lock, 1895 &sc->data_tag)) { 1896 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); 1897 return (ENOMEM); 1898 } 1899 return (0); 1900 } 1901 1902 /* 1903 * mrsas_addr_cb: Callback function of bus_dmamap_load() 1904 * input: callback argument, machine dependent type 1905 * that describes DMA segments, number of segments, error code 1906 * 1907 * This function is for the driver to receive mapping information resultant of 1908 * the bus_dmamap_load(). The information is actually not being used, but the 1909 * address is saved anyway. 1910 */ 1911 void 1912 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1913 { 1914 bus_addr_t *addr; 1915 1916 addr = arg; 1917 *addr = segs[0].ds_addr; 1918 } 1919 1920 /* 1921 * mrsas_setup_raidmap: Set up RAID map. 1922 * input: Adapter instance soft state 1923 * 1924 * Allocate DMA memory for the RAID maps and perform setup. 1925 */ 1926 static int 1927 mrsas_setup_raidmap(struct mrsas_softc *sc) 1928 { 1929 int i; 1930 1931 for (i = 0; i < 2; i++) { 1932 sc->ld_drv_map[i] = 1933 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT); 1934 /* Do Error handling */ 1935 if (!sc->ld_drv_map[i]) { 1936 device_printf(sc->mrsas_dev, "Could not allocate memory for local map"); 1937 1938 if (i == 1) 1939 free(sc->ld_drv_map[0], M_MRSAS); 1940 /* ABORT driver initialization */ 1941 goto ABORT; 1942 } 1943 } 1944 1945 for (int i = 0; i < 2; i++) { 1946 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1947 4, 0, 1948 BUS_SPACE_MAXADDR_32BIT, 1949 BUS_SPACE_MAXADDR, 1950 NULL, NULL, 1951 sc->max_map_sz, 1952 1, 1953 sc->max_map_sz, 1954 BUS_DMA_ALLOCNOW, 1955 NULL, NULL, 1956 &sc->raidmap_tag[i])) { 1957 device_printf(sc->mrsas_dev, 1958 "Cannot allocate raid map tag.\n"); 1959 return (ENOMEM); 1960 } 1961 if (bus_dmamem_alloc(sc->raidmap_tag[i], 1962 (void **)&sc->raidmap_mem[i], 1963 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { 1964 device_printf(sc->mrsas_dev, 1965 "Cannot allocate raidmap memory.\n"); 1966 return (ENOMEM); 1967 } 1968 bzero(sc->raidmap_mem[i], sc->max_map_sz); 1969 1970 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], 1971 sc->raidmap_mem[i], sc->max_map_sz, 1972 mrsas_addr_cb, &sc->raidmap_phys_addr[i], 1973 BUS_DMA_NOWAIT)) { 1974 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); 1975 return (ENOMEM); 1976 } 1977 if (!sc->raidmap_mem[i]) { 1978 device_printf(sc->mrsas_dev, 1979 "Cannot allocate memory for raid map.\n"); 1980 return (ENOMEM); 1981 } 1982 } 1983 1984 if (!mrsas_get_map_info(sc)) 1985 mrsas_sync_map_info(sc); 1986 1987 return (0); 1988 1989 ABORT: 1990 return (1); 1991 } 1992 1993 /* 1994 * mrsas_init_fw: Initialize Firmware 1995 * input: Adapter soft state 1996 * 1997 * Calls transition_to_ready() to make sure Firmware is in operational state and 1998 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It 1999 * issues internal commands to get the controller info after the IOC_INIT 2000 * command response is received by Firmware. Note: code relating to 2001 * get_pdlist, get_ld_list and max_sectors are currently not being used, it 2002 * is left here as placeholder. 2003 */ 2004 static int 2005 mrsas_init_fw(struct mrsas_softc *sc) 2006 { 2007 2008 int ret, loop, ocr = 0; 2009 u_int32_t max_sectors_1; 2010 u_int32_t max_sectors_2; 2011 u_int32_t tmp_sectors; 2012 u_int32_t scratch_pad_2; 2013 int msix_enable = 0; 2014 int fw_msix_count = 0; 2015 2016 /* Make sure Firmware is ready */ 2017 ret = mrsas_transition_to_ready(sc, ocr); 2018 if (ret != SUCCESS) { 2019 return (ret); 2020 } 2021 /* MSI-x index 0- reply post host index register */ 2022 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET; 2023 /* Check if MSI-X is supported while in ready state */ 2024 msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a; 2025 2026 if (msix_enable) { 2027 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2028 outbound_scratch_pad_2)); 2029 2030 /* Check max MSI-X vectors */ 2031 if (sc->device_id == MRSAS_TBOLT) { 2032 sc->msix_vectors = (scratch_pad_2 2033 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 2034 fw_msix_count = sc->msix_vectors; 2035 } else { 2036 /* Invader/Fury supports 96 MSI-X vectors */ 2037 sc->msix_vectors = ((scratch_pad_2 2038 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 2039 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 2040 fw_msix_count = sc->msix_vectors; 2041 2042 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; 2043 loop++) { 2044 sc->msix_reg_offset[loop] = 2045 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + 2046 (loop * 0x10); 2047 } 2048 } 2049 2050 /* Don't bother allocating more MSI-X vectors than cpus */ 2051 sc->msix_vectors = min(sc->msix_vectors, 2052 mp_ncpus); 2053 2054 /* Allocate MSI-x vectors */ 2055 if (mrsas_allocate_msix(sc) == SUCCESS) 2056 sc->msix_enable = 1; 2057 else 2058 sc->msix_enable = 0; 2059 2060 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector," 2061 "Online CPU %d Current MSIX <%d>\n", 2062 fw_msix_count, mp_ncpus, sc->msix_vectors); 2063 } 2064 if (mrsas_init_adapter(sc) != SUCCESS) { 2065 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); 2066 return (1); 2067 } 2068 /* Allocate internal commands for pass-thru */ 2069 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) { 2070 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); 2071 return (1); 2072 } 2073 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); 2074 if (!sc->ctrl_info) { 2075 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n"); 2076 return (1); 2077 } 2078 /* 2079 * Get the controller info from FW, so that the MAX VD support 2080 * availability can be decided. 2081 */ 2082 if (mrsas_get_ctrl_info(sc)) { 2083 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n"); 2084 return (1); 2085 } 2086 sc->secure_jbod_support = 2087 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD; 2088 2089 if (sc->secure_jbod_support) 2090 device_printf(sc->mrsas_dev, "FW supports SED \n"); 2091 2092 if (mrsas_setup_raidmap(sc) != SUCCESS) { 2093 device_printf(sc->mrsas_dev, "Set up RAID map failed.\n"); 2094 return (1); 2095 } 2096 /* For pass-thru, get PD/LD list and controller info */ 2097 memset(sc->pd_list, 0, 2098 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 2099 mrsas_get_pd_list(sc); 2100 2101 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); 2102 mrsas_get_ld_list(sc); 2103 2104 /* 2105 * Compute the max allowed sectors per IO: The controller info has 2106 * two limits on max sectors. Driver should use the minimum of these 2107 * two. 2108 * 2109 * 1 << stripe_sz_ops.min = max sectors per strip 2110 * 2111 * Note that older firmwares ( < FW ver 30) didn't report information to 2112 * calculate max_sectors_1. So the number ended up as zero always. 2113 */ 2114 tmp_sectors = 0; 2115 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) * 2116 sc->ctrl_info->max_strips_per_io; 2117 max_sectors_2 = sc->ctrl_info->max_request_size; 2118 tmp_sectors = min(max_sectors_1, max_sectors_2); 2119 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512; 2120 2121 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) 2122 sc->max_sectors_per_req = tmp_sectors; 2123 2124 sc->disableOnlineCtrlReset = 2125 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 2126 sc->UnevenSpanSupport = 2127 sc->ctrl_info->adapterOperations2.supportUnevenSpans; 2128 if (sc->UnevenSpanSupport) { 2129 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n", 2130 sc->UnevenSpanSupport); 2131 2132 if (MR_ValidateMapInfo(sc)) 2133 sc->fast_path_io = 1; 2134 else 2135 sc->fast_path_io = 0; 2136 } 2137 return (0); 2138 } 2139 2140 /* 2141 * mrsas_init_adapter: Initializes the adapter/controller 2142 * input: Adapter soft state 2143 * 2144 * Prepares for the issuing of the IOC Init cmd to FW for initializing the 2145 * ROC/controller. The FW register is read to determined the number of 2146 * commands that is supported. All memory allocations for IO is based on 2147 * max_cmd. Appropriate calculations are performed in this function. 2148 */ 2149 int 2150 mrsas_init_adapter(struct mrsas_softc *sc) 2151 { 2152 uint32_t status; 2153 u_int32_t max_cmd; 2154 int ret; 2155 int i = 0; 2156 2157 /* Read FW status register */ 2158 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2159 2160 /* Get operational params from status register */ 2161 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; 2162 2163 /* Decrement the max supported by 1, to correlate with FW */ 2164 sc->max_fw_cmds = sc->max_fw_cmds - 1; 2165 max_cmd = sc->max_fw_cmds; 2166 2167 /* Determine allocation size of command frames */ 2168 sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2; 2169 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd; 2170 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); 2171 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1)); 2172 sc->chain_frames_alloc_sz = 1024 * max_cmd; 2173 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2174 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16; 2175 2176 sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION); 2177 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; 2178 2179 /* Used for pass thru MFI frame (DCMD) */ 2180 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16; 2181 2182 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2183 sizeof(MPI2_SGE_IO_UNION)) / 16; 2184 2185 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2186 2187 for (i = 0; i < count; i++) 2188 sc->last_reply_idx[i] = 0; 2189 2190 ret = mrsas_alloc_mem(sc); 2191 if (ret != SUCCESS) 2192 return (ret); 2193 2194 ret = mrsas_alloc_mpt_cmds(sc); 2195 if (ret != SUCCESS) 2196 return (ret); 2197 2198 ret = mrsas_ioc_init(sc); 2199 if (ret != SUCCESS) 2200 return (ret); 2201 2202 return (0); 2203 } 2204 2205 /* 2206 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command 2207 * input: Adapter soft state 2208 * 2209 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. 2210 */ 2211 int 2212 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) 2213 { 2214 int ioc_init_size; 2215 2216 /* Allocate IOC INIT command */ 2217 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); 2218 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2219 1, 0, 2220 BUS_SPACE_MAXADDR_32BIT, 2221 BUS_SPACE_MAXADDR, 2222 NULL, NULL, 2223 ioc_init_size, 2224 1, 2225 ioc_init_size, 2226 BUS_DMA_ALLOCNOW, 2227 NULL, NULL, 2228 &sc->ioc_init_tag)) { 2229 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); 2230 return (ENOMEM); 2231 } 2232 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, 2233 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { 2234 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); 2235 return (ENOMEM); 2236 } 2237 bzero(sc->ioc_init_mem, ioc_init_size); 2238 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, 2239 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, 2240 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { 2241 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); 2242 return (ENOMEM); 2243 } 2244 return (0); 2245 } 2246 2247 /* 2248 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command 2249 * input: Adapter soft state 2250 * 2251 * Deallocates memory of the IOC Init cmd. 2252 */ 2253 void 2254 mrsas_free_ioc_cmd(struct mrsas_softc *sc) 2255 { 2256 if (sc->ioc_init_phys_mem) 2257 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); 2258 if (sc->ioc_init_mem != NULL) 2259 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); 2260 if (sc->ioc_init_tag != NULL) 2261 bus_dma_tag_destroy(sc->ioc_init_tag); 2262 } 2263 2264 /* 2265 * mrsas_ioc_init: Sends IOC Init command to FW 2266 * input: Adapter soft state 2267 * 2268 * Issues the IOC Init cmd to FW to initialize the ROC/controller. 2269 */ 2270 int 2271 mrsas_ioc_init(struct mrsas_softc *sc) 2272 { 2273 struct mrsas_init_frame *init_frame; 2274 pMpi2IOCInitRequest_t IOCInitMsg; 2275 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; 2276 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME; 2277 bus_addr_t phys_addr; 2278 int i, retcode = 0; 2279 2280 /* Allocate memory for the IOC INIT command */ 2281 if (mrsas_alloc_ioc_cmd(sc)) { 2282 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); 2283 return (1); 2284 } 2285 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024); 2286 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; 2287 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 2288 IOCInitMsg->MsgVersion = MPI2_VERSION; 2289 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION; 2290 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; 2291 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth; 2292 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr; 2293 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr; 2294 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0); 2295 2296 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; 2297 init_frame->cmd = MFI_CMD_INIT; 2298 init_frame->cmd_status = 0xFF; 2299 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2300 2301 /* driver support Extended MSIX */ 2302 if ((sc->device_id == MRSAS_INVADER) || 2303 (sc->device_id == MRSAS_FURY)) { 2304 init_frame->driver_operations. 2305 mfi_capabilities.support_additional_msix = 1; 2306 } 2307 if (sc->verbuf_mem) { 2308 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n", 2309 MRSAS_VERSION); 2310 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; 2311 init_frame->driver_ver_hi = 0; 2312 } 2313 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1; 2314 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1; 2315 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1; 2316 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; 2317 init_frame->queue_info_new_phys_addr_lo = phys_addr; 2318 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t); 2319 2320 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem; 2321 req_desc.MFAIo.RequestFlags = 2322 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2323 2324 mrsas_disable_intr(sc); 2325 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); 2326 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high); 2327 2328 /* 2329 * Poll response timer to wait for Firmware response. While this 2330 * timer with the DELAY call could block CPU, the time interval for 2331 * this is only 1 millisecond. 2332 */ 2333 if (init_frame->cmd_status == 0xFF) { 2334 for (i = 0; i < (max_wait * 1000); i++) { 2335 if (init_frame->cmd_status == 0xFF) 2336 DELAY(1000); 2337 else 2338 break; 2339 } 2340 } 2341 if (init_frame->cmd_status == 0) 2342 mrsas_dprint(sc, MRSAS_OCR, 2343 "IOC INIT response received from FW.\n"); 2344 else { 2345 if (init_frame->cmd_status == 0xFF) 2346 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); 2347 else 2348 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); 2349 retcode = 1; 2350 } 2351 2352 mrsas_free_ioc_cmd(sc); 2353 return (retcode); 2354 } 2355 2356 /* 2357 * mrsas_alloc_mpt_cmds: Allocates the command packets 2358 * input: Adapter instance soft state 2359 * 2360 * This function allocates the internal commands for IOs. Each command that is 2361 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An 2362 * array is allocated with mrsas_mpt_cmd context. The free commands are 2363 * maintained in a linked list (cmd pool). SMID value range is from 1 to 2364 * max_fw_cmds. 2365 */ 2366 int 2367 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) 2368 { 2369 int i, j; 2370 u_int32_t max_cmd, count; 2371 struct mrsas_mpt_cmd *cmd; 2372 pMpi2ReplyDescriptorsUnion_t reply_desc; 2373 u_int32_t offset, chain_offset, sense_offset; 2374 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; 2375 u_int8_t *io_req_base, *chain_frame_base, *sense_base; 2376 2377 max_cmd = sc->max_fw_cmds; 2378 2379 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); 2380 if (!sc->req_desc) { 2381 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); 2382 return (ENOMEM); 2383 } 2384 memset(sc->req_desc, 0, sc->request_alloc_sz); 2385 2386 /* 2387 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. 2388 * Allocate the dynamic array first and then allocate individual 2389 * commands. 2390 */ 2391 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT); 2392 if (!sc->mpt_cmd_list) { 2393 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); 2394 return (ENOMEM); 2395 } 2396 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd); 2397 for (i = 0; i < max_cmd; i++) { 2398 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd), 2399 M_MRSAS, M_NOWAIT); 2400 if (!sc->mpt_cmd_list[i]) { 2401 for (j = 0; j < i; j++) 2402 free(sc->mpt_cmd_list[j], M_MRSAS); 2403 free(sc->mpt_cmd_list, M_MRSAS); 2404 sc->mpt_cmd_list = NULL; 2405 return (ENOMEM); 2406 } 2407 } 2408 2409 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2410 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2411 chain_frame_base = (u_int8_t *)sc->chain_frame_mem; 2412 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; 2413 sense_base = (u_int8_t *)sc->sense_mem; 2414 sense_base_phys = (bus_addr_t)sc->sense_phys_addr; 2415 for (i = 0; i < max_cmd; i++) { 2416 cmd = sc->mpt_cmd_list[i]; 2417 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 2418 chain_offset = 1024 * i; 2419 sense_offset = MRSAS_SENSE_LEN * i; 2420 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); 2421 cmd->index = i + 1; 2422 cmd->ccb_ptr = NULL; 2423 callout_init(&cmd->cm_callout, 0); 2424 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 2425 cmd->sc = sc; 2426 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); 2427 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); 2428 cmd->io_request_phys_addr = io_req_base_phys + offset; 2429 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); 2430 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; 2431 cmd->sense = sense_base + sense_offset; 2432 cmd->sense_phys_addr = sense_base_phys + sense_offset; 2433 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { 2434 return (FAIL); 2435 } 2436 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 2437 } 2438 2439 /* Initialize reply descriptor array to 0xFFFFFFFF */ 2440 reply_desc = sc->reply_desc_mem; 2441 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2442 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) { 2443 reply_desc->Words = MRSAS_ULONG_MAX; 2444 } 2445 return (0); 2446 } 2447 2448 /* 2449 * mrsas_fire_cmd: Sends command to FW 2450 * input: Adapter softstate 2451 * request descriptor address low 2452 * request descriptor address high 2453 * 2454 * This functions fires the command to Firmware by writing to the 2455 * inbound_low_queue_port and inbound_high_queue_port. 2456 */ 2457 void 2458 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2459 u_int32_t req_desc_hi) 2460 { 2461 mtx_lock(&sc->pci_lock); 2462 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), 2463 req_desc_lo); 2464 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), 2465 req_desc_hi); 2466 mtx_unlock(&sc->pci_lock); 2467 } 2468 2469 /* 2470 * mrsas_transition_to_ready: Move FW to Ready state input: 2471 * Adapter instance soft state 2472 * 2473 * During the initialization, FW passes can potentially be in any one of several 2474 * possible states. If the FW in operational, waiting-for-handshake states, 2475 * driver must take steps to bring it to ready state. Otherwise, it has to 2476 * wait for the ready state. 2477 */ 2478 int 2479 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) 2480 { 2481 int i; 2482 u_int8_t max_wait; 2483 u_int32_t val, fw_state; 2484 u_int32_t cur_state; 2485 u_int32_t abs_state, curr_abs_state; 2486 2487 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2488 fw_state = val & MFI_STATE_MASK; 2489 max_wait = MRSAS_RESET_WAIT_TIME; 2490 2491 if (fw_state != MFI_STATE_READY) 2492 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); 2493 2494 while (fw_state != MFI_STATE_READY) { 2495 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2496 switch (fw_state) { 2497 case MFI_STATE_FAULT: 2498 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); 2499 if (ocr) { 2500 cur_state = MFI_STATE_FAULT; 2501 break; 2502 } else 2503 return -ENODEV; 2504 case MFI_STATE_WAIT_HANDSHAKE: 2505 /* Set the CLR bit in inbound doorbell */ 2506 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2507 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG); 2508 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2509 break; 2510 case MFI_STATE_BOOT_MESSAGE_PENDING: 2511 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2512 MFI_INIT_HOTPLUG); 2513 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2514 break; 2515 case MFI_STATE_OPERATIONAL: 2516 /* 2517 * Bring it to READY state; assuming max wait 10 2518 * secs 2519 */ 2520 mrsas_disable_intr(sc); 2521 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); 2522 for (i = 0; i < max_wait * 1000; i++) { 2523 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1) 2524 DELAY(1000); 2525 else 2526 break; 2527 } 2528 cur_state = MFI_STATE_OPERATIONAL; 2529 break; 2530 case MFI_STATE_UNDEFINED: 2531 /* 2532 * This state should not last for more than 2 2533 * seconds 2534 */ 2535 cur_state = MFI_STATE_UNDEFINED; 2536 break; 2537 case MFI_STATE_BB_INIT: 2538 cur_state = MFI_STATE_BB_INIT; 2539 break; 2540 case MFI_STATE_FW_INIT: 2541 cur_state = MFI_STATE_FW_INIT; 2542 break; 2543 case MFI_STATE_FW_INIT_2: 2544 cur_state = MFI_STATE_FW_INIT_2; 2545 break; 2546 case MFI_STATE_DEVICE_SCAN: 2547 cur_state = MFI_STATE_DEVICE_SCAN; 2548 break; 2549 case MFI_STATE_FLUSH_CACHE: 2550 cur_state = MFI_STATE_FLUSH_CACHE; 2551 break; 2552 default: 2553 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); 2554 return -ENODEV; 2555 } 2556 2557 /* 2558 * The cur_state should not last for more than max_wait secs 2559 */ 2560 for (i = 0; i < (max_wait * 1000); i++) { 2561 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2562 outbound_scratch_pad)) & MFI_STATE_MASK); 2563 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2564 outbound_scratch_pad)); 2565 if (abs_state == curr_abs_state) 2566 DELAY(1000); 2567 else 2568 break; 2569 } 2570 2571 /* 2572 * Return error if fw_state hasn't changed after max_wait 2573 */ 2574 if (curr_abs_state == abs_state) { 2575 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " 2576 "in %d secs\n", fw_state, max_wait); 2577 return -ENODEV; 2578 } 2579 } 2580 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); 2581 return 0; 2582 } 2583 2584 /* 2585 * mrsas_get_mfi_cmd: Get a cmd from free command pool 2586 * input: Adapter soft state 2587 * 2588 * This function removes an MFI command from the command list. 2589 */ 2590 struct mrsas_mfi_cmd * 2591 mrsas_get_mfi_cmd(struct mrsas_softc *sc) 2592 { 2593 struct mrsas_mfi_cmd *cmd = NULL; 2594 2595 mtx_lock(&sc->mfi_cmd_pool_lock); 2596 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) { 2597 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); 2598 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); 2599 } 2600 mtx_unlock(&sc->mfi_cmd_pool_lock); 2601 2602 return cmd; 2603 } 2604 2605 /* 2606 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter. 2607 * input: Adapter Context. 2608 * 2609 * This function will check FW status register and flag do_timeout_reset flag. 2610 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has 2611 * trigger reset. 2612 */ 2613 static void 2614 mrsas_ocr_thread(void *arg) 2615 { 2616 struct mrsas_softc *sc; 2617 u_int32_t fw_status, fw_state; 2618 2619 sc = (struct mrsas_softc *)arg; 2620 2621 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); 2622 2623 sc->ocr_thread_active = 1; 2624 mtx_lock(&sc->sim_lock); 2625 for (;;) { 2626 /* Sleep for 1 second and check the queue status */ 2627 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 2628 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); 2629 if (sc->remove_in_progress) { 2630 mrsas_dprint(sc, MRSAS_OCR, 2631 "Exit due to shutdown from %s\n", __func__); 2632 break; 2633 } 2634 fw_status = mrsas_read_reg(sc, 2635 offsetof(mrsas_reg_set, outbound_scratch_pad)); 2636 fw_state = fw_status & MFI_STATE_MASK; 2637 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) { 2638 device_printf(sc->mrsas_dev, "OCR started due to %s!\n", 2639 sc->do_timedout_reset ? "IO Timeout" : 2640 "FW fault detected"); 2641 mtx_lock_spin(&sc->ioctl_lock); 2642 sc->reset_in_progress = 1; 2643 sc->reset_count++; 2644 mtx_unlock_spin(&sc->ioctl_lock); 2645 mrsas_xpt_freeze(sc); 2646 mrsas_reset_ctrl(sc); 2647 mrsas_xpt_release(sc); 2648 sc->reset_in_progress = 0; 2649 sc->do_timedout_reset = 0; 2650 } 2651 } 2652 mtx_unlock(&sc->sim_lock); 2653 sc->ocr_thread_active = 0; 2654 mrsas_kproc_exit(0); 2655 } 2656 2657 /* 2658 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR. 2659 * input: Adapter Context. 2660 * 2661 * This function will clear reply descriptor so that post OCR driver and FW will 2662 * lost old history. 2663 */ 2664 void 2665 mrsas_reset_reply_desc(struct mrsas_softc *sc) 2666 { 2667 int i, count; 2668 pMpi2ReplyDescriptorsUnion_t reply_desc; 2669 2670 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2671 for (i = 0; i < count; i++) 2672 sc->last_reply_idx[i] = 0; 2673 2674 reply_desc = sc->reply_desc_mem; 2675 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { 2676 reply_desc->Words = MRSAS_ULONG_MAX; 2677 } 2678 } 2679 2680 /* 2681 * mrsas_reset_ctrl: Core function to OCR/Kill adapter. 2682 * input: Adapter Context. 2683 * 2684 * This function will run from thread context so that it can sleep. 1. Do not 2685 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command 2686 * to complete for 180 seconds. 3. If #2 does not find any outstanding 2687 * command Controller is in working state, so skip OCR. Otherwise, do 2688 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the 2689 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post 2690 * OCR, Re-fire Managment command and move Controller to Operation state. 2691 */ 2692 int 2693 mrsas_reset_ctrl(struct mrsas_softc *sc) 2694 { 2695 int retval = SUCCESS, i, j, retry = 0; 2696 u_int32_t host_diag, abs_state, status_reg, reset_adapter; 2697 union ccb *ccb; 2698 struct mrsas_mfi_cmd *mfi_cmd; 2699 struct mrsas_mpt_cmd *mpt_cmd; 2700 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2701 2702 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 2703 device_printf(sc->mrsas_dev, 2704 "mrsas: Hardware critical error, returning FAIL.\n"); 2705 return FAIL; 2706 } 2707 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2708 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; 2709 mrsas_disable_intr(sc); 2710 DELAY(1000 * 1000); 2711 2712 /* First try waiting for commands to complete */ 2713 if (mrsas_wait_for_outstanding(sc)) { 2714 mrsas_dprint(sc, MRSAS_OCR, 2715 "resetting adapter from %s.\n", 2716 __func__); 2717 /* Now return commands back to the CAM layer */ 2718 for (i = 0; i < sc->max_fw_cmds; i++) { 2719 mpt_cmd = sc->mpt_cmd_list[i]; 2720 if (mpt_cmd->ccb_ptr) { 2721 ccb = (union ccb *)(mpt_cmd->ccb_ptr); 2722 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 2723 mrsas_cmd_done(sc, mpt_cmd); 2724 mrsas_atomic_dec(&sc->fw_outstanding); 2725 } 2726 } 2727 2728 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2729 outbound_scratch_pad)); 2730 abs_state = status_reg & MFI_STATE_MASK; 2731 reset_adapter = status_reg & MFI_RESET_ADAPTER; 2732 if (sc->disableOnlineCtrlReset || 2733 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 2734 /* Reset not supported, kill adapter */ 2735 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n"); 2736 mrsas_kill_hba(sc); 2737 retval = FAIL; 2738 goto out; 2739 } 2740 /* Now try to reset the chip */ 2741 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { 2742 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2743 MPI2_WRSEQ_FLUSH_KEY_VALUE); 2744 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2745 MPI2_WRSEQ_1ST_KEY_VALUE); 2746 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2747 MPI2_WRSEQ_2ND_KEY_VALUE); 2748 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2749 MPI2_WRSEQ_3RD_KEY_VALUE); 2750 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2751 MPI2_WRSEQ_4TH_KEY_VALUE); 2752 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2753 MPI2_WRSEQ_5TH_KEY_VALUE); 2754 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2755 MPI2_WRSEQ_6TH_KEY_VALUE); 2756 2757 /* Check that the diag write enable (DRWE) bit is on */ 2758 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2759 fusion_host_diag)); 2760 retry = 0; 2761 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 2762 DELAY(100 * 1000); 2763 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2764 fusion_host_diag)); 2765 if (retry++ == 100) { 2766 mrsas_dprint(sc, MRSAS_OCR, 2767 "Host diag unlock failed!\n"); 2768 break; 2769 } 2770 } 2771 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 2772 continue; 2773 2774 /* Send chip reset command */ 2775 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), 2776 host_diag | HOST_DIAG_RESET_ADAPTER); 2777 DELAY(3000 * 1000); 2778 2779 /* Make sure reset adapter bit is cleared */ 2780 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2781 fusion_host_diag)); 2782 retry = 0; 2783 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 2784 DELAY(100 * 1000); 2785 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2786 fusion_host_diag)); 2787 if (retry++ == 1000) { 2788 mrsas_dprint(sc, MRSAS_OCR, 2789 "Diag reset adapter never cleared!\n"); 2790 break; 2791 } 2792 } 2793 if (host_diag & HOST_DIAG_RESET_ADAPTER) 2794 continue; 2795 2796 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2797 outbound_scratch_pad)) & MFI_STATE_MASK; 2798 retry = 0; 2799 2800 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 2801 DELAY(100 * 1000); 2802 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2803 outbound_scratch_pad)) & MFI_STATE_MASK; 2804 } 2805 if (abs_state <= MFI_STATE_FW_INIT) { 2806 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," 2807 " state = 0x%x\n", abs_state); 2808 continue; 2809 } 2810 /* Wait for FW to become ready */ 2811 if (mrsas_transition_to_ready(sc, 1)) { 2812 mrsas_dprint(sc, MRSAS_OCR, 2813 "mrsas: Failed to transition controller to ready.\n"); 2814 continue; 2815 } 2816 mrsas_reset_reply_desc(sc); 2817 if (mrsas_ioc_init(sc)) { 2818 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); 2819 continue; 2820 } 2821 /* Re-fire management commands */ 2822 for (j = 0; j < sc->max_fw_cmds; j++) { 2823 mpt_cmd = sc->mpt_cmd_list[j]; 2824 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 2825 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; 2826 if (mfi_cmd->frame->dcmd.opcode == 2827 MR_DCMD_LD_MAP_GET_INFO) { 2828 mrsas_release_mfi_cmd(mfi_cmd); 2829 mrsas_release_mpt_cmd(mpt_cmd); 2830 } else { 2831 req_desc = mrsas_get_request_desc(sc, 2832 mfi_cmd->cmd_id.context.smid - 1); 2833 mrsas_dprint(sc, MRSAS_OCR, 2834 "Re-fire command DCMD opcode 0x%x index %d\n ", 2835 mfi_cmd->frame->dcmd.opcode, j); 2836 if (!req_desc) 2837 device_printf(sc->mrsas_dev, 2838 "Cannot build MPT cmd.\n"); 2839 else 2840 mrsas_fire_cmd(sc, req_desc->addr.u.low, 2841 req_desc->addr.u.high); 2842 } 2843 } 2844 } 2845 2846 /* Reset load balance info */ 2847 memset(sc->load_balance_info, 0, 2848 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); 2849 2850 if (mrsas_get_ctrl_info(sc)) { 2851 mrsas_kill_hba(sc); 2852 retval = FAIL; 2853 goto out; 2854 } 2855 if (!mrsas_get_map_info(sc)) 2856 mrsas_sync_map_info(sc); 2857 2858 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2859 mrsas_enable_intr(sc); 2860 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 2861 2862 /* Adapter reset completed successfully */ 2863 device_printf(sc->mrsas_dev, "Reset successful\n"); 2864 retval = SUCCESS; 2865 goto out; 2866 } 2867 /* Reset failed, kill the adapter */ 2868 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); 2869 mrsas_kill_hba(sc); 2870 retval = FAIL; 2871 } else { 2872 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2873 mrsas_enable_intr(sc); 2874 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 2875 } 2876 out: 2877 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2878 mrsas_dprint(sc, MRSAS_OCR, 2879 "Reset Exit with %d.\n", retval); 2880 return retval; 2881 } 2882 2883 /* 2884 * mrsas_kill_hba: Kill HBA when OCR is not supported 2885 * input: Adapter Context. 2886 * 2887 * This function will kill HBA when OCR is not supported. 2888 */ 2889 void 2890 mrsas_kill_hba(struct mrsas_softc *sc) 2891 { 2892 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; 2893 pause("mrsas_kill_hba", 1000); 2894 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); 2895 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2896 MFI_STOP_ADP); 2897 /* Flush */ 2898 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); 2899 mrsas_complete_outstanding_ioctls(sc); 2900 } 2901 2902 /** 2903 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba 2904 * input: Controller softc 2905 * 2906 * Returns void 2907 */ 2908 void 2909 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc) 2910 { 2911 int i; 2912 struct mrsas_mpt_cmd *cmd_mpt; 2913 struct mrsas_mfi_cmd *cmd_mfi; 2914 u_int32_t count, MSIxIndex; 2915 2916 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2917 for (i = 0; i < sc->max_fw_cmds; i++) { 2918 cmd_mpt = sc->mpt_cmd_list[i]; 2919 2920 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 2921 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 2922 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { 2923 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 2924 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, 2925 cmd_mpt->io_request->RaidContext.status); 2926 } 2927 } 2928 } 2929 } 2930 2931 /* 2932 * mrsas_wait_for_outstanding: Wait for outstanding commands 2933 * input: Adapter Context. 2934 * 2935 * This function will wait for 180 seconds for outstanding commands to be 2936 * completed. 2937 */ 2938 int 2939 mrsas_wait_for_outstanding(struct mrsas_softc *sc) 2940 { 2941 int i, outstanding, retval = 0; 2942 u_int32_t fw_state, count, MSIxIndex; 2943 2944 2945 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { 2946 if (sc->remove_in_progress) { 2947 mrsas_dprint(sc, MRSAS_OCR, 2948 "Driver remove or shutdown called.\n"); 2949 retval = 1; 2950 goto out; 2951 } 2952 /* Check if firmware is in fault state */ 2953 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2954 outbound_scratch_pad)) & MFI_STATE_MASK; 2955 if (fw_state == MFI_STATE_FAULT) { 2956 mrsas_dprint(sc, MRSAS_OCR, 2957 "Found FW in FAULT state, will reset adapter.\n"); 2958 retval = 1; 2959 goto out; 2960 } 2961 outstanding = mrsas_atomic_read(&sc->fw_outstanding); 2962 if (!outstanding) 2963 goto out; 2964 2965 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 2966 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " 2967 "commands to complete\n", i, outstanding); 2968 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2969 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 2970 mrsas_complete_cmd(sc, MSIxIndex); 2971 } 2972 DELAY(1000 * 1000); 2973 } 2974 2975 if (mrsas_atomic_read(&sc->fw_outstanding)) { 2976 mrsas_dprint(sc, MRSAS_OCR, 2977 " pending commands remain after waiting," 2978 " will reset adapter.\n"); 2979 retval = 1; 2980 } 2981 out: 2982 return retval; 2983 } 2984 2985 /* 2986 * mrsas_release_mfi_cmd: Return a cmd to free command pool 2987 * input: Command packet for return to free cmd pool 2988 * 2989 * This function returns the MFI command to the command list. 2990 */ 2991 void 2992 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd) 2993 { 2994 struct mrsas_softc *sc = cmd->sc; 2995 2996 mtx_lock(&sc->mfi_cmd_pool_lock); 2997 cmd->ccb_ptr = NULL; 2998 cmd->cmd_id.frame_count = 0; 2999 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next); 3000 mtx_unlock(&sc->mfi_cmd_pool_lock); 3001 3002 return; 3003 } 3004 3005 /* 3006 * mrsas_get_controller_info: Returns FW's controller structure 3007 * input: Adapter soft state 3008 * Controller information structure 3009 * 3010 * Issues an internal command (DCMD) to get the FW's controller structure. This 3011 * information is mainly used to find out the maximum IO transfer per command 3012 * supported by the FW. 3013 */ 3014 static int 3015 mrsas_get_ctrl_info(struct mrsas_softc *sc) 3016 { 3017 int retcode = 0; 3018 struct mrsas_mfi_cmd *cmd; 3019 struct mrsas_dcmd_frame *dcmd; 3020 3021 cmd = mrsas_get_mfi_cmd(sc); 3022 3023 if (!cmd) { 3024 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 3025 return -ENOMEM; 3026 } 3027 dcmd = &cmd->frame->dcmd; 3028 3029 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { 3030 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); 3031 mrsas_release_mfi_cmd(cmd); 3032 return -ENOMEM; 3033 } 3034 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3035 3036 dcmd->cmd = MFI_CMD_DCMD; 3037 dcmd->cmd_status = 0xFF; 3038 dcmd->sge_count = 1; 3039 dcmd->flags = MFI_FRAME_DIR_READ; 3040 dcmd->timeout = 0; 3041 dcmd->pad_0 = 0; 3042 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info); 3043 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 3044 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr; 3045 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info); 3046 3047 if (!mrsas_issue_polled(sc, cmd)) 3048 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); 3049 else 3050 retcode = 1; 3051 3052 mrsas_update_ext_vd_details(sc); 3053 3054 mrsas_free_ctlr_info_cmd(sc); 3055 mrsas_release_mfi_cmd(cmd); 3056 return (retcode); 3057 } 3058 3059 /* 3060 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD 3061 * input: 3062 * sc - Controller's softc 3063 */ 3064 static void 3065 mrsas_update_ext_vd_details(struct mrsas_softc *sc) 3066 { 3067 sc->max256vdSupport = 3068 sc->ctrl_info->adapterOperations3.supportMaxExtLDs; 3069 /* Below is additional check to address future FW enhancement */ 3070 if (sc->ctrl_info->max_lds > 64) 3071 sc->max256vdSupport = 1; 3072 3073 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS 3074 * MRSAS_MAX_DEV_PER_CHANNEL; 3075 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS 3076 * MRSAS_MAX_DEV_PER_CHANNEL; 3077 if (sc->max256vdSupport) { 3078 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 3079 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3080 } else { 3081 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 3082 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3083 } 3084 3085 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + 3086 (sizeof(MR_LD_SPAN_MAP) * 3087 (sc->fw_supported_vd_count - 1)); 3088 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); 3089 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) + 3090 (sizeof(MR_LD_SPAN_MAP) * 3091 (sc->drv_supported_vd_count - 1)); 3092 3093 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); 3094 3095 if (sc->max256vdSupport) 3096 sc->current_map_sz = sc->new_map_sz; 3097 else 3098 sc->current_map_sz = sc->old_map_sz; 3099 } 3100 3101 /* 3102 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command 3103 * input: Adapter soft state 3104 * 3105 * Allocates DMAable memory for the controller info internal command. 3106 */ 3107 int 3108 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) 3109 { 3110 int ctlr_info_size; 3111 3112 /* Allocate get controller info command */ 3113 ctlr_info_size = sizeof(struct mrsas_ctrl_info); 3114 if (bus_dma_tag_create(sc->mrsas_parent_tag, 3115 1, 0, 3116 BUS_SPACE_MAXADDR_32BIT, 3117 BUS_SPACE_MAXADDR, 3118 NULL, NULL, 3119 ctlr_info_size, 3120 1, 3121 ctlr_info_size, 3122 BUS_DMA_ALLOCNOW, 3123 NULL, NULL, 3124 &sc->ctlr_info_tag)) { 3125 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); 3126 return (ENOMEM); 3127 } 3128 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, 3129 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { 3130 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); 3131 return (ENOMEM); 3132 } 3133 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, 3134 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, 3135 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { 3136 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); 3137 return (ENOMEM); 3138 } 3139 memset(sc->ctlr_info_mem, 0, ctlr_info_size); 3140 return (0); 3141 } 3142 3143 /* 3144 * mrsas_free_ctlr_info_cmd: Free memory for controller info command 3145 * input: Adapter soft state 3146 * 3147 * Deallocates memory of the get controller info cmd. 3148 */ 3149 void 3150 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) 3151 { 3152 if (sc->ctlr_info_phys_addr) 3153 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); 3154 if (sc->ctlr_info_mem != NULL) 3155 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); 3156 if (sc->ctlr_info_tag != NULL) 3157 bus_dma_tag_destroy(sc->ctlr_info_tag); 3158 } 3159 3160 /* 3161 * mrsas_issue_polled: Issues a polling command 3162 * inputs: Adapter soft state 3163 * Command packet to be issued 3164 * 3165 * This function is for posting of internal commands to Firmware. MFI requires 3166 * the cmd_status to be set to 0xFF before posting. The maximun wait time of 3167 * the poll response timer is 180 seconds. 3168 */ 3169 int 3170 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3171 { 3172 struct mrsas_header *frame_hdr = &cmd->frame->hdr; 3173 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3174 int i, retcode = 0; 3175 3176 frame_hdr->cmd_status = 0xFF; 3177 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3178 3179 /* Issue the frame using inbound queue port */ 3180 if (mrsas_issue_dcmd(sc, cmd)) { 3181 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3182 return (1); 3183 } 3184 /* 3185 * Poll response timer to wait for Firmware response. While this 3186 * timer with the DELAY call could block CPU, the time interval for 3187 * this is only 1 millisecond. 3188 */ 3189 if (frame_hdr->cmd_status == 0xFF) { 3190 for (i = 0; i < (max_wait * 1000); i++) { 3191 if (frame_hdr->cmd_status == 0xFF) 3192 DELAY(1000); 3193 else 3194 break; 3195 } 3196 } 3197 if (frame_hdr->cmd_status != 0) { 3198 if (frame_hdr->cmd_status == 0xFF) 3199 device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait); 3200 else 3201 device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status); 3202 retcode = 1; 3203 } 3204 return (retcode); 3205 } 3206 3207 /* 3208 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd 3209 * input: Adapter soft state mfi cmd pointer 3210 * 3211 * This function is called by mrsas_issued_blocked_cmd() and 3212 * mrsas_issued_polled(), to build the MPT command and then fire the command 3213 * to Firmware. 3214 */ 3215 int 3216 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3217 { 3218 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3219 3220 req_desc = mrsas_build_mpt_cmd(sc, cmd); 3221 if (!req_desc) { 3222 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); 3223 return (1); 3224 } 3225 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 3226 3227 return (0); 3228 } 3229 3230 /* 3231 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd 3232 * input: Adapter soft state mfi cmd to build 3233 * 3234 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru 3235 * command and prepares the MPT command to send to Firmware. 3236 */ 3237 MRSAS_REQUEST_DESCRIPTOR_UNION * 3238 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3239 { 3240 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3241 u_int16_t index; 3242 3243 if (mrsas_build_mptmfi_passthru(sc, cmd)) { 3244 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); 3245 return NULL; 3246 } 3247 index = cmd->cmd_id.context.smid; 3248 3249 req_desc = mrsas_get_request_desc(sc, index - 1); 3250 if (!req_desc) 3251 return NULL; 3252 3253 req_desc->addr.Words = 0; 3254 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3255 3256 req_desc->SCSIIO.SMID = index; 3257 3258 return (req_desc); 3259 } 3260 3261 /* 3262 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command 3263 * input: Adapter soft state mfi cmd pointer 3264 * 3265 * The MPT command and the io_request are setup as a passthru command. The SGE 3266 * chain address is set to frame_phys_addr of the MFI command. 3267 */ 3268 u_int8_t 3269 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) 3270 { 3271 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 3272 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; 3273 struct mrsas_mpt_cmd *mpt_cmd; 3274 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; 3275 3276 mpt_cmd = mrsas_get_mpt_cmd(sc); 3277 if (!mpt_cmd) 3278 return (1); 3279 3280 /* Save the smid. To be used for returning the cmd */ 3281 mfi_cmd->cmd_id.context.smid = mpt_cmd->index; 3282 3283 mpt_cmd->sync_cmd_idx = mfi_cmd->index; 3284 3285 /* 3286 * For cmds where the flag is set, store the flag and check on 3287 * completion. For cmds with this flag, don't call 3288 * mrsas_complete_cmd. 3289 */ 3290 3291 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 3292 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3293 3294 io_req = mpt_cmd->io_request; 3295 3296 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { 3297 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL; 3298 3299 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 3300 sgl_ptr_end->Flags = 0; 3301 } 3302 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain; 3303 3304 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 3305 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; 3306 io_req->ChainOffset = sc->chain_offset_mfi_pthru; 3307 3308 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; 3309 3310 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3311 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 3312 3313 mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME; 3314 3315 return (0); 3316 } 3317 3318 /* 3319 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds 3320 * input: Adapter soft state Command to be issued 3321 * 3322 * This function waits on an event for the command to be returned from the ISR. 3323 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing 3324 * internal and ioctl commands. 3325 */ 3326 int 3327 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3328 { 3329 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3330 unsigned long total_time = 0; 3331 int retcode = 0; 3332 3333 /* Initialize cmd_status */ 3334 cmd->cmd_status = ECONNREFUSED; 3335 3336 /* Build MPT-MFI command for issue to FW */ 3337 if (mrsas_issue_dcmd(sc, cmd)) { 3338 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3339 return (1); 3340 } 3341 sc->chan = (void *)&cmd; 3342 3343 while (1) { 3344 if (cmd->cmd_status == ECONNREFUSED) { 3345 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 3346 } else 3347 break; 3348 total_time++; 3349 if (total_time >= max_wait) { 3350 device_printf(sc->mrsas_dev, 3351 "Internal command timed out after %d seconds.\n", max_wait); 3352 retcode = 1; 3353 break; 3354 } 3355 } 3356 return (retcode); 3357 } 3358 3359 /* 3360 * mrsas_complete_mptmfi_passthru: Completes a command 3361 * input: @sc: Adapter soft state 3362 * @cmd: Command to be completed 3363 * @status: cmd completion status 3364 * 3365 * This function is called from mrsas_complete_cmd() after an interrupt is 3366 * received from Firmware, and io_request->Function is 3367 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. 3368 */ 3369 void 3370 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, 3371 u_int8_t status) 3372 { 3373 struct mrsas_header *hdr = &cmd->frame->hdr; 3374 u_int8_t cmd_status = cmd->frame->hdr.cmd_status; 3375 3376 /* Reset the retry counter for future re-tries */ 3377 cmd->retry_for_fw_reset = 0; 3378 3379 if (cmd->ccb_ptr) 3380 cmd->ccb_ptr = NULL; 3381 3382 switch (hdr->cmd) { 3383 case MFI_CMD_INVALID: 3384 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); 3385 break; 3386 case MFI_CMD_PD_SCSI_IO: 3387 case MFI_CMD_LD_SCSI_IO: 3388 /* 3389 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3390 * issued either through an IO path or an IOCTL path. If it 3391 * was via IOCTL, we will send it to internal completion. 3392 */ 3393 if (cmd->sync_cmd) { 3394 cmd->sync_cmd = 0; 3395 mrsas_wakeup(sc, cmd); 3396 break; 3397 } 3398 case MFI_CMD_SMP: 3399 case MFI_CMD_STP: 3400 case MFI_CMD_DCMD: 3401 /* Check for LD map update */ 3402 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && 3403 (cmd->frame->dcmd.mbox.b[1] == 1)) { 3404 sc->fast_path_io = 0; 3405 mtx_lock(&sc->raidmap_lock); 3406 if (cmd_status != 0) { 3407 if (cmd_status != MFI_STAT_NOT_FOUND) 3408 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status); 3409 else { 3410 mrsas_release_mfi_cmd(cmd); 3411 mtx_unlock(&sc->raidmap_lock); 3412 break; 3413 } 3414 } else 3415 sc->map_id++; 3416 mrsas_release_mfi_cmd(cmd); 3417 if (MR_ValidateMapInfo(sc)) 3418 sc->fast_path_io = 0; 3419 else 3420 sc->fast_path_io = 1; 3421 mrsas_sync_map_info(sc); 3422 mtx_unlock(&sc->raidmap_lock); 3423 break; 3424 } 3425 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3426 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { 3427 sc->mrsas_aen_triggered = 0; 3428 } 3429 /* See if got an event notification */ 3430 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) 3431 mrsas_complete_aen(sc, cmd); 3432 else 3433 mrsas_wakeup(sc, cmd); 3434 break; 3435 case MFI_CMD_ABORT: 3436 /* Command issued to abort another cmd return */ 3437 mrsas_complete_abort(sc, cmd); 3438 break; 3439 default: 3440 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd); 3441 break; 3442 } 3443 } 3444 3445 /* 3446 * mrsas_wakeup: Completes an internal command 3447 * input: Adapter soft state 3448 * Command to be completed 3449 * 3450 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait 3451 * timer is started. This function is called from 3452 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up 3453 * from the command wait. 3454 */ 3455 void 3456 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3457 { 3458 cmd->cmd_status = cmd->frame->io.cmd_status; 3459 3460 if (cmd->cmd_status == ECONNREFUSED) 3461 cmd->cmd_status = 0; 3462 3463 sc->chan = (void *)&cmd; 3464 wakeup_one((void *)&sc->chan); 3465 return; 3466 } 3467 3468 /* 3469 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input: 3470 * Adapter soft state Shutdown/Hibernate 3471 * 3472 * This function issues a DCMD internal command to Firmware to initiate shutdown 3473 * of the controller. 3474 */ 3475 static void 3476 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) 3477 { 3478 struct mrsas_mfi_cmd *cmd; 3479 struct mrsas_dcmd_frame *dcmd; 3480 3481 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 3482 return; 3483 3484 cmd = mrsas_get_mfi_cmd(sc); 3485 if (!cmd) { 3486 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n"); 3487 return; 3488 } 3489 if (sc->aen_cmd) 3490 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); 3491 3492 if (sc->map_update_cmd) 3493 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); 3494 3495 dcmd = &cmd->frame->dcmd; 3496 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3497 3498 dcmd->cmd = MFI_CMD_DCMD; 3499 dcmd->cmd_status = 0x0; 3500 dcmd->sge_count = 0; 3501 dcmd->flags = MFI_FRAME_DIR_NONE; 3502 dcmd->timeout = 0; 3503 dcmd->pad_0 = 0; 3504 dcmd->data_xfer_len = 0; 3505 dcmd->opcode = opcode; 3506 3507 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n"); 3508 3509 mrsas_issue_blocked_cmd(sc, cmd); 3510 mrsas_release_mfi_cmd(cmd); 3511 3512 return; 3513 } 3514 3515 /* 3516 * mrsas_flush_cache: Requests FW to flush all its caches input: 3517 * Adapter soft state 3518 * 3519 * This function is issues a DCMD internal command to Firmware to initiate 3520 * flushing of all caches. 3521 */ 3522 static void 3523 mrsas_flush_cache(struct mrsas_softc *sc) 3524 { 3525 struct mrsas_mfi_cmd *cmd; 3526 struct mrsas_dcmd_frame *dcmd; 3527 3528 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 3529 return; 3530 3531 cmd = mrsas_get_mfi_cmd(sc); 3532 if (!cmd) { 3533 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n"); 3534 return; 3535 } 3536 dcmd = &cmd->frame->dcmd; 3537 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3538 3539 dcmd->cmd = MFI_CMD_DCMD; 3540 dcmd->cmd_status = 0x0; 3541 dcmd->sge_count = 0; 3542 dcmd->flags = MFI_FRAME_DIR_NONE; 3543 dcmd->timeout = 0; 3544 dcmd->pad_0 = 0; 3545 dcmd->data_xfer_len = 0; 3546 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 3547 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 3548 3549 mrsas_issue_blocked_cmd(sc, cmd); 3550 mrsas_release_mfi_cmd(cmd); 3551 3552 return; 3553 } 3554 3555 /* 3556 * mrsas_get_map_info: Load and validate RAID map input: 3557 * Adapter instance soft state 3558 * 3559 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load 3560 * and validate RAID map. It returns 0 if successful, 1 other- wise. 3561 */ 3562 static int 3563 mrsas_get_map_info(struct mrsas_softc *sc) 3564 { 3565 uint8_t retcode = 0; 3566 3567 sc->fast_path_io = 0; 3568 if (!mrsas_get_ld_map_info(sc)) { 3569 retcode = MR_ValidateMapInfo(sc); 3570 if (retcode == 0) { 3571 sc->fast_path_io = 1; 3572 return 0; 3573 } 3574 } 3575 return 1; 3576 } 3577 3578 /* 3579 * mrsas_get_ld_map_info: Get FW's ld_map structure input: 3580 * Adapter instance soft state 3581 * 3582 * Issues an internal command (DCMD) to get the FW's controller PD list 3583 * structure. 3584 */ 3585 static int 3586 mrsas_get_ld_map_info(struct mrsas_softc *sc) 3587 { 3588 int retcode = 0; 3589 struct mrsas_mfi_cmd *cmd; 3590 struct mrsas_dcmd_frame *dcmd; 3591 void *map; 3592 bus_addr_t map_phys_addr = 0; 3593 3594 cmd = mrsas_get_mfi_cmd(sc); 3595 if (!cmd) { 3596 device_printf(sc->mrsas_dev, 3597 "Cannot alloc for ld map info cmd.\n"); 3598 return 1; 3599 } 3600 dcmd = &cmd->frame->dcmd; 3601 3602 map = (void *)sc->raidmap_mem[(sc->map_id & 1)]; 3603 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; 3604 if (!map) { 3605 device_printf(sc->mrsas_dev, 3606 "Failed to alloc mem for ld map info.\n"); 3607 mrsas_release_mfi_cmd(cmd); 3608 return (ENOMEM); 3609 } 3610 memset(map, 0, sizeof(sc->max_map_sz)); 3611 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3612 3613 dcmd->cmd = MFI_CMD_DCMD; 3614 dcmd->cmd_status = 0xFF; 3615 dcmd->sge_count = 1; 3616 dcmd->flags = MFI_FRAME_DIR_READ; 3617 dcmd->timeout = 0; 3618 dcmd->pad_0 = 0; 3619 dcmd->data_xfer_len = sc->current_map_sz; 3620 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 3621 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 3622 dcmd->sgl.sge32[0].length = sc->current_map_sz; 3623 3624 if (!mrsas_issue_polled(sc, cmd)) 3625 retcode = 0; 3626 else { 3627 device_printf(sc->mrsas_dev, 3628 "Fail to send get LD map info cmd.\n"); 3629 retcode = 1; 3630 } 3631 mrsas_release_mfi_cmd(cmd); 3632 3633 return (retcode); 3634 } 3635 3636 /* 3637 * mrsas_sync_map_info: Get FW's ld_map structure input: 3638 * Adapter instance soft state 3639 * 3640 * Issues an internal command (DCMD) to get the FW's controller PD list 3641 * structure. 3642 */ 3643 static int 3644 mrsas_sync_map_info(struct mrsas_softc *sc) 3645 { 3646 int retcode = 0, i; 3647 struct mrsas_mfi_cmd *cmd; 3648 struct mrsas_dcmd_frame *dcmd; 3649 uint32_t size_sync_info, num_lds; 3650 MR_LD_TARGET_SYNC *target_map = NULL; 3651 MR_DRV_RAID_MAP_ALL *map; 3652 MR_LD_RAID *raid; 3653 MR_LD_TARGET_SYNC *ld_sync; 3654 bus_addr_t map_phys_addr = 0; 3655 3656 cmd = mrsas_get_mfi_cmd(sc); 3657 if (!cmd) { 3658 device_printf(sc->mrsas_dev, 3659 "Cannot alloc for sync map info cmd\n"); 3660 return 1; 3661 } 3662 map = sc->ld_drv_map[sc->map_id & 1]; 3663 num_lds = map->raidMap.ldCount; 3664 3665 dcmd = &cmd->frame->dcmd; 3666 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds; 3667 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3668 3669 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1]; 3670 memset(target_map, 0, sc->max_map_sz); 3671 3672 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; 3673 3674 ld_sync = (MR_LD_TARGET_SYNC *) target_map; 3675 3676 for (i = 0; i < num_lds; i++, ld_sync++) { 3677 raid = MR_LdRaidGet(i, map); 3678 ld_sync->targetId = MR_GetLDTgtId(i, map); 3679 ld_sync->seqNum = raid->seqNum; 3680 } 3681 3682 dcmd->cmd = MFI_CMD_DCMD; 3683 dcmd->cmd_status = 0xFF; 3684 dcmd->sge_count = 1; 3685 dcmd->flags = MFI_FRAME_DIR_WRITE; 3686 dcmd->timeout = 0; 3687 dcmd->pad_0 = 0; 3688 dcmd->data_xfer_len = sc->current_map_sz; 3689 dcmd->mbox.b[0] = num_lds; 3690 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; 3691 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 3692 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 3693 dcmd->sgl.sge32[0].length = sc->current_map_sz; 3694 3695 sc->map_update_cmd = cmd; 3696 if (mrsas_issue_dcmd(sc, cmd)) { 3697 device_printf(sc->mrsas_dev, 3698 "Fail to send sync map info command.\n"); 3699 return (1); 3700 } 3701 return (retcode); 3702 } 3703 3704 /* 3705 * mrsas_get_pd_list: Returns FW's PD list structure input: 3706 * Adapter soft state 3707 * 3708 * Issues an internal command (DCMD) to get the FW's controller PD list 3709 * structure. This information is mainly used to find out about system 3710 * supported by Firmware. 3711 */ 3712 static int 3713 mrsas_get_pd_list(struct mrsas_softc *sc) 3714 { 3715 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size; 3716 struct mrsas_mfi_cmd *cmd; 3717 struct mrsas_dcmd_frame *dcmd; 3718 struct MR_PD_LIST *pd_list_mem; 3719 struct MR_PD_ADDRESS *pd_addr; 3720 bus_addr_t pd_list_phys_addr = 0; 3721 struct mrsas_tmp_dcmd *tcmd; 3722 3723 cmd = mrsas_get_mfi_cmd(sc); 3724 if (!cmd) { 3725 device_printf(sc->mrsas_dev, 3726 "Cannot alloc for get PD list cmd\n"); 3727 return 1; 3728 } 3729 dcmd = &cmd->frame->dcmd; 3730 3731 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 3732 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 3733 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { 3734 device_printf(sc->mrsas_dev, 3735 "Cannot alloc dmamap for get PD list cmd\n"); 3736 mrsas_release_mfi_cmd(cmd); 3737 return (ENOMEM); 3738 } else { 3739 pd_list_mem = tcmd->tmp_dcmd_mem; 3740 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 3741 } 3742 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3743 3744 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 3745 dcmd->mbox.b[1] = 0; 3746 dcmd->cmd = MFI_CMD_DCMD; 3747 dcmd->cmd_status = 0xFF; 3748 dcmd->sge_count = 1; 3749 dcmd->flags = MFI_FRAME_DIR_READ; 3750 dcmd->timeout = 0; 3751 dcmd->pad_0 = 0; 3752 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 3753 dcmd->opcode = MR_DCMD_PD_LIST_QUERY; 3754 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr; 3755 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 3756 3757 if (!mrsas_issue_polled(sc, cmd)) 3758 retcode = 0; 3759 else 3760 retcode = 1; 3761 3762 /* Get the instance PD list */ 3763 pd_count = MRSAS_MAX_PD; 3764 pd_addr = pd_list_mem->addr; 3765 if (retcode == 0 && pd_list_mem->count < pd_count) { 3766 memset(sc->local_pd_list, 0, 3767 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 3768 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) { 3769 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; 3770 sc->local_pd_list[pd_addr->deviceId].driveType = 3771 pd_addr->scsiDevType; 3772 sc->local_pd_list[pd_addr->deviceId].driveState = 3773 MR_PD_STATE_SYSTEM; 3774 pd_addr++; 3775 } 3776 } 3777 /* 3778 * Use mutext/spinlock if pd_list component size increase more than 3779 * 32 bit. 3780 */ 3781 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); 3782 mrsas_free_tmp_dcmd(tcmd); 3783 mrsas_release_mfi_cmd(cmd); 3784 free(tcmd, M_MRSAS); 3785 return (retcode); 3786 } 3787 3788 /* 3789 * mrsas_get_ld_list: Returns FW's LD list structure input: 3790 * Adapter soft state 3791 * 3792 * Issues an internal command (DCMD) to get the FW's controller PD list 3793 * structure. This information is mainly used to find out about supported by 3794 * the FW. 3795 */ 3796 static int 3797 mrsas_get_ld_list(struct mrsas_softc *sc) 3798 { 3799 int ld_list_size, retcode = 0, ld_index = 0, ids = 0; 3800 struct mrsas_mfi_cmd *cmd; 3801 struct mrsas_dcmd_frame *dcmd; 3802 struct MR_LD_LIST *ld_list_mem; 3803 bus_addr_t ld_list_phys_addr = 0; 3804 struct mrsas_tmp_dcmd *tcmd; 3805 3806 cmd = mrsas_get_mfi_cmd(sc); 3807 if (!cmd) { 3808 device_printf(sc->mrsas_dev, 3809 "Cannot alloc for get LD list cmd\n"); 3810 return 1; 3811 } 3812 dcmd = &cmd->frame->dcmd; 3813 3814 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 3815 ld_list_size = sizeof(struct MR_LD_LIST); 3816 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { 3817 device_printf(sc->mrsas_dev, 3818 "Cannot alloc dmamap for get LD list cmd\n"); 3819 mrsas_release_mfi_cmd(cmd); 3820 return (ENOMEM); 3821 } else { 3822 ld_list_mem = tcmd->tmp_dcmd_mem; 3823 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 3824 } 3825 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3826 3827 if (sc->max256vdSupport) 3828 dcmd->mbox.b[0] = 1; 3829 3830 dcmd->cmd = MFI_CMD_DCMD; 3831 dcmd->cmd_status = 0xFF; 3832 dcmd->sge_count = 1; 3833 dcmd->flags = MFI_FRAME_DIR_READ; 3834 dcmd->timeout = 0; 3835 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); 3836 dcmd->opcode = MR_DCMD_LD_GET_LIST; 3837 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr; 3838 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); 3839 dcmd->pad_0 = 0; 3840 3841 if (!mrsas_issue_polled(sc, cmd)) 3842 retcode = 0; 3843 else 3844 retcode = 1; 3845 3846 #if VD_EXT_DEBUG 3847 printf("Number of LDs %d\n", ld_list_mem->ldCount); 3848 #endif 3849 3850 /* Get the instance LD list */ 3851 if ((retcode == 0) && 3852 (ld_list_mem->ldCount <= sc->fw_supported_vd_count)) { 3853 sc->CurLdCount = ld_list_mem->ldCount; 3854 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 3855 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) { 3856 if (ld_list_mem->ldList[ld_index].state != 0) { 3857 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 3858 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 3859 } 3860 } 3861 } 3862 mrsas_free_tmp_dcmd(tcmd); 3863 mrsas_release_mfi_cmd(cmd); 3864 free(tcmd, M_MRSAS); 3865 return (retcode); 3866 } 3867 3868 /* 3869 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input: 3870 * Adapter soft state Temp command Size of alloction 3871 * 3872 * Allocates DMAable memory for a temporary internal command. The allocated 3873 * memory is initialized to all zeros upon successful loading of the dma 3874 * mapped memory. 3875 */ 3876 int 3877 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, 3878 struct mrsas_tmp_dcmd *tcmd, int size) 3879 { 3880 if (bus_dma_tag_create(sc->mrsas_parent_tag, 3881 1, 0, 3882 BUS_SPACE_MAXADDR_32BIT, 3883 BUS_SPACE_MAXADDR, 3884 NULL, NULL, 3885 size, 3886 1, 3887 size, 3888 BUS_DMA_ALLOCNOW, 3889 NULL, NULL, 3890 &tcmd->tmp_dcmd_tag)) { 3891 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); 3892 return (ENOMEM); 3893 } 3894 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, 3895 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { 3896 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); 3897 return (ENOMEM); 3898 } 3899 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, 3900 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, 3901 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { 3902 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); 3903 return (ENOMEM); 3904 } 3905 memset(tcmd->tmp_dcmd_mem, 0, size); 3906 return (0); 3907 } 3908 3909 /* 3910 * mrsas_free_tmp_dcmd: Free memory for temporary command input: 3911 * temporary dcmd pointer 3912 * 3913 * Deallocates memory of the temporary command for use in the construction of 3914 * the internal DCMD. 3915 */ 3916 void 3917 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) 3918 { 3919 if (tmp->tmp_dcmd_phys_addr) 3920 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); 3921 if (tmp->tmp_dcmd_mem != NULL) 3922 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); 3923 if (tmp->tmp_dcmd_tag != NULL) 3924 bus_dma_tag_destroy(tmp->tmp_dcmd_tag); 3925 } 3926 3927 /* 3928 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input: 3929 * Adapter soft state Previously issued cmd to be aborted 3930 * 3931 * This function is used to abort previously issued commands, such as AEN and 3932 * RAID map sync map commands. The abort command is sent as a DCMD internal 3933 * command and subsequently the driver will wait for a return status. The 3934 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. 3935 */ 3936 static int 3937 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 3938 struct mrsas_mfi_cmd *cmd_to_abort) 3939 { 3940 struct mrsas_mfi_cmd *cmd; 3941 struct mrsas_abort_frame *abort_fr; 3942 u_int8_t retcode = 0; 3943 unsigned long total_time = 0; 3944 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3945 3946 cmd = mrsas_get_mfi_cmd(sc); 3947 if (!cmd) { 3948 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); 3949 return (1); 3950 } 3951 abort_fr = &cmd->frame->abort; 3952 3953 /* Prepare and issue the abort frame */ 3954 abort_fr->cmd = MFI_CMD_ABORT; 3955 abort_fr->cmd_status = 0xFF; 3956 abort_fr->flags = 0; 3957 abort_fr->abort_context = cmd_to_abort->index; 3958 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 3959 abort_fr->abort_mfi_phys_addr_hi = 0; 3960 3961 cmd->sync_cmd = 1; 3962 cmd->cmd_status = 0xFF; 3963 3964 if (mrsas_issue_dcmd(sc, cmd)) { 3965 device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); 3966 return (1); 3967 } 3968 /* Wait for this cmd to complete */ 3969 sc->chan = (void *)&cmd; 3970 while (1) { 3971 if (cmd->cmd_status == 0xFF) { 3972 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 3973 } else 3974 break; 3975 total_time++; 3976 if (total_time >= max_wait) { 3977 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); 3978 retcode = 1; 3979 break; 3980 } 3981 } 3982 3983 cmd->sync_cmd = 0; 3984 mrsas_release_mfi_cmd(cmd); 3985 return (retcode); 3986 } 3987 3988 /* 3989 * mrsas_complete_abort: Completes aborting a command input: 3990 * Adapter soft state Cmd that was issued to abort another cmd 3991 * 3992 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to 3993 * change after sending the command. This function is called from 3994 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. 3995 */ 3996 void 3997 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3998 { 3999 if (cmd->sync_cmd) { 4000 cmd->sync_cmd = 0; 4001 cmd->cmd_status = 0; 4002 sc->chan = (void *)&cmd; 4003 wakeup_one((void *)&sc->chan); 4004 } 4005 return; 4006 } 4007 4008 /* 4009 * mrsas_aen_handler: AEN processing callback function from thread context 4010 * input: Adapter soft state 4011 * 4012 * Asynchronous event handler 4013 */ 4014 void 4015 mrsas_aen_handler(struct mrsas_softc *sc) 4016 { 4017 union mrsas_evt_class_locale class_locale; 4018 int doscan = 0; 4019 u_int32_t seq_num; 4020 int error; 4021 4022 if (!sc) { 4023 device_printf(sc->mrsas_dev, "invalid instance!\n"); 4024 return; 4025 } 4026 if (sc->evt_detail_mem) { 4027 switch (sc->evt_detail_mem->code) { 4028 case MR_EVT_PD_INSERTED: 4029 mrsas_get_pd_list(sc); 4030 mrsas_bus_scan_sim(sc, sc->sim_1); 4031 doscan = 0; 4032 break; 4033 case MR_EVT_PD_REMOVED: 4034 mrsas_get_pd_list(sc); 4035 mrsas_bus_scan_sim(sc, sc->sim_1); 4036 doscan = 0; 4037 break; 4038 case MR_EVT_LD_OFFLINE: 4039 case MR_EVT_CFG_CLEARED: 4040 case MR_EVT_LD_DELETED: 4041 mrsas_bus_scan_sim(sc, sc->sim_0); 4042 doscan = 0; 4043 break; 4044 case MR_EVT_LD_CREATED: 4045 mrsas_get_ld_list(sc); 4046 mrsas_bus_scan_sim(sc, sc->sim_0); 4047 doscan = 0; 4048 break; 4049 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 4050 case MR_EVT_FOREIGN_CFG_IMPORTED: 4051 case MR_EVT_LD_STATE_CHANGE: 4052 doscan = 1; 4053 break; 4054 default: 4055 doscan = 0; 4056 break; 4057 } 4058 } else { 4059 device_printf(sc->mrsas_dev, "invalid evt_detail\n"); 4060 return; 4061 } 4062 if (doscan) { 4063 mrsas_get_pd_list(sc); 4064 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); 4065 mrsas_bus_scan_sim(sc, sc->sim_1); 4066 mrsas_get_ld_list(sc); 4067 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); 4068 mrsas_bus_scan_sim(sc, sc->sim_0); 4069 } 4070 seq_num = sc->evt_detail_mem->seq_num + 1; 4071 4072 /* Register AEN with FW for latest sequence number plus 1 */ 4073 class_locale.members.reserved = 0; 4074 class_locale.members.locale = MR_EVT_LOCALE_ALL; 4075 class_locale.members.class = MR_EVT_CLASS_DEBUG; 4076 4077 if (sc->aen_cmd != NULL) 4078 return; 4079 4080 mtx_lock(&sc->aen_lock); 4081 error = mrsas_register_aen(sc, seq_num, 4082 class_locale.word); 4083 mtx_unlock(&sc->aen_lock); 4084 4085 if (error) 4086 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); 4087 4088 } 4089 4090 4091 /* 4092 * mrsas_complete_aen: Completes AEN command 4093 * input: Adapter soft state 4094 * Cmd that was issued to abort another cmd 4095 * 4096 * This function will be called from ISR and will continue event processing from 4097 * thread context by enqueuing task in ev_tq (callback function 4098 * "mrsas_aen_handler"). 4099 */ 4100 void 4101 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4102 { 4103 /* 4104 * Don't signal app if it is just an aborted previously registered 4105 * aen 4106 */ 4107 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { 4108 sc->mrsas_aen_triggered = 1; 4109 mtx_lock(&sc->aen_lock); 4110 if (sc->mrsas_poll_waiting) { 4111 sc->mrsas_poll_waiting = 0; 4112 selwakeup(&sc->mrsas_select); 4113 } 4114 mtx_unlock(&sc->aen_lock); 4115 } else 4116 cmd->abort_aen = 0; 4117 4118 sc->aen_cmd = NULL; 4119 mrsas_release_mfi_cmd(cmd); 4120 4121 if (!sc->remove_in_progress) 4122 taskqueue_enqueue(sc->ev_tq, &sc->ev_task); 4123 4124 return; 4125 } 4126 4127 static device_method_t mrsas_methods[] = { 4128 DEVMETHOD(device_probe, mrsas_probe), 4129 DEVMETHOD(device_attach, mrsas_attach), 4130 DEVMETHOD(device_detach, mrsas_detach), 4131 DEVMETHOD(device_suspend, mrsas_suspend), 4132 DEVMETHOD(device_resume, mrsas_resume), 4133 DEVMETHOD(bus_print_child, bus_generic_print_child), 4134 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 4135 {0, 0} 4136 }; 4137 4138 static driver_t mrsas_driver = { 4139 "mrsas", 4140 mrsas_methods, 4141 sizeof(struct mrsas_softc) 4142 }; 4143 4144 static devclass_t mrsas_devclass; 4145 4146 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0); 4147 MODULE_DEPEND(mrsas, cam, 1, 1, 1); 4148