1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <dev/mrsas/mrsas.h> 44 #include <dev/mrsas/mrsas_ioctl.h> 45 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 49 #include <sys/sysctl.h> 50 #include <sys/types.h> 51 #include <sys/kthread.h> 52 #include <sys/taskqueue.h> 53 #include <sys/smp.h> 54 55 56 /* 57 * Function prototypes 58 */ 59 static d_open_t mrsas_open; 60 static d_close_t mrsas_close; 61 static d_read_t mrsas_read; 62 static d_write_t mrsas_write; 63 static d_ioctl_t mrsas_ioctl; 64 static d_poll_t mrsas_poll; 65 66 static struct mrsas_mgmt_info mrsas_mgmt_info; 67 static struct mrsas_ident *mrsas_find_ident(device_t); 68 static int mrsas_setup_msix(struct mrsas_softc *sc); 69 static int mrsas_allocate_msix(struct mrsas_softc *sc); 70 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); 71 static void mrsas_flush_cache(struct mrsas_softc *sc); 72 static void mrsas_reset_reply_desc(struct mrsas_softc *sc); 73 static void mrsas_ocr_thread(void *arg); 74 static int mrsas_get_map_info(struct mrsas_softc *sc); 75 static int mrsas_get_ld_map_info(struct mrsas_softc *sc); 76 static int mrsas_sync_map_info(struct mrsas_softc *sc); 77 static int mrsas_get_pd_list(struct mrsas_softc *sc); 78 static int mrsas_get_ld_list(struct mrsas_softc *sc); 79 static int mrsas_setup_irq(struct mrsas_softc *sc); 80 static int mrsas_alloc_mem(struct mrsas_softc *sc); 81 static int mrsas_init_fw(struct mrsas_softc *sc); 82 static int mrsas_setup_raidmap(struct mrsas_softc *sc); 83 static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); 84 static int mrsas_clear_intr(struct mrsas_softc *sc); 85 static int mrsas_get_ctrl_info(struct mrsas_softc *sc); 86 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc); 87 static int 88 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 89 struct mrsas_mfi_cmd *cmd_to_abort); 90 static struct mrsas_softc * 91 mrsas_get_softc_instance(struct cdev *dev, 92 u_long cmd, caddr_t arg); 93 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); 94 u_int8_t 95 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, 96 struct mrsas_mfi_cmd *mfi_cmd); 97 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc); 98 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); 99 int mrsas_init_adapter(struct mrsas_softc *sc); 100 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); 101 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); 102 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); 103 int mrsas_ioc_init(struct mrsas_softc *sc); 104 int mrsas_bus_scan(struct mrsas_softc *sc); 105 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 106 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 107 int mrsas_reset_ctrl(struct mrsas_softc *sc); 108 int mrsas_wait_for_outstanding(struct mrsas_softc *sc); 109 int 110 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, 111 struct mrsas_mfi_cmd *cmd); 112 int 113 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, 114 int size); 115 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); 116 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 117 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 118 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 119 void mrsas_disable_intr(struct mrsas_softc *sc); 120 void mrsas_enable_intr(struct mrsas_softc *sc); 121 void mrsas_free_ioc_cmd(struct mrsas_softc *sc); 122 void mrsas_free_mem(struct mrsas_softc *sc); 123 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); 124 void mrsas_isr(void *arg); 125 void mrsas_teardown_intr(struct mrsas_softc *sc); 126 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 127 void mrsas_kill_hba(struct mrsas_softc *sc); 128 void mrsas_aen_handler(struct mrsas_softc *sc); 129 void 130 mrsas_write_reg(struct mrsas_softc *sc, int offset, 131 u_int32_t value); 132 void 133 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 134 u_int32_t req_desc_hi); 135 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); 136 void 137 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, 138 struct mrsas_mfi_cmd *cmd, u_int8_t status); 139 void 140 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, 141 u_int8_t extStatus); 142 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); 143 144 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd 145 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 146 147 extern int mrsas_cam_attach(struct mrsas_softc *sc); 148 extern void mrsas_cam_detach(struct mrsas_softc *sc); 149 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 150 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 151 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); 152 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); 153 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); 154 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); 155 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 156 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 157 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 158 extern void mrsas_xpt_freeze(struct mrsas_softc *sc); 159 extern void mrsas_xpt_release(struct mrsas_softc *sc); 160 extern MRSAS_REQUEST_DESCRIPTOR_UNION * 161 mrsas_get_request_desc(struct mrsas_softc *sc, 162 u_int16_t index); 163 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 164 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); 165 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); 166 167 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters"); 168 169 /* 170 * PCI device struct and table 171 * 172 */ 173 typedef struct mrsas_ident { 174 uint16_t vendor; 175 uint16_t device; 176 uint16_t subvendor; 177 uint16_t subdevice; 178 const char *desc; 179 } MRSAS_CTLR_ID; 180 181 MRSAS_CTLR_ID device_table[] = { 182 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"}, 183 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"}, 184 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"}, 185 {0, 0, 0, 0, NULL} 186 }; 187 188 /* 189 * Character device entry points 190 * 191 */ 192 static struct cdevsw mrsas_cdevsw = { 193 .d_version = D_VERSION, 194 .d_open = mrsas_open, 195 .d_close = mrsas_close, 196 .d_read = mrsas_read, 197 .d_write = mrsas_write, 198 .d_ioctl = mrsas_ioctl, 199 .d_poll = mrsas_poll, 200 .d_name = "mrsas", 201 }; 202 203 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); 204 205 /* 206 * In the cdevsw routines, we find our softc by using the si_drv1 member of 207 * struct cdev. We set this variable to point to our softc in our attach 208 * routine when we create the /dev entry. 209 */ 210 int 211 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 212 { 213 struct mrsas_softc *sc; 214 215 sc = dev->si_drv1; 216 return (0); 217 } 218 219 int 220 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 221 { 222 struct mrsas_softc *sc; 223 224 sc = dev->si_drv1; 225 return (0); 226 } 227 228 int 229 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag) 230 { 231 struct mrsas_softc *sc; 232 233 sc = dev->si_drv1; 234 return (0); 235 } 236 int 237 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag) 238 { 239 struct mrsas_softc *sc; 240 241 sc = dev->si_drv1; 242 return (0); 243 } 244 245 /* 246 * Register Read/Write Functions 247 * 248 */ 249 void 250 mrsas_write_reg(struct mrsas_softc *sc, int offset, 251 u_int32_t value) 252 { 253 bus_space_tag_t bus_tag = sc->bus_tag; 254 bus_space_handle_t bus_handle = sc->bus_handle; 255 256 bus_space_write_4(bus_tag, bus_handle, offset, value); 257 } 258 259 u_int32_t 260 mrsas_read_reg(struct mrsas_softc *sc, int offset) 261 { 262 bus_space_tag_t bus_tag = sc->bus_tag; 263 bus_space_handle_t bus_handle = sc->bus_handle; 264 265 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); 266 } 267 268 269 /* 270 * Interrupt Disable/Enable/Clear Functions 271 * 272 */ 273 void 274 mrsas_disable_intr(struct mrsas_softc *sc) 275 { 276 u_int32_t mask = 0xFFFFFFFF; 277 u_int32_t status; 278 279 sc->mask_interrupts = 1; 280 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); 281 /* Dummy read to force pci flush */ 282 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 283 } 284 285 void 286 mrsas_enable_intr(struct mrsas_softc *sc) 287 { 288 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; 289 u_int32_t status; 290 291 sc->mask_interrupts = 0; 292 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); 293 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 294 295 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); 296 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 297 } 298 299 static int 300 mrsas_clear_intr(struct mrsas_softc *sc) 301 { 302 u_int32_t status, fw_status, fw_state; 303 304 /* Read received interrupt */ 305 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 306 307 /* 308 * If FW state change interrupt is received, write to it again to 309 * clear 310 */ 311 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) { 312 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 313 outbound_scratch_pad)); 314 fw_state = fw_status & MFI_STATE_MASK; 315 if (fw_state == MFI_STATE_FAULT) { 316 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n"); 317 if (sc->ocr_thread_active) 318 wakeup(&sc->ocr_chan); 319 } 320 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status); 321 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 322 return (1); 323 } 324 /* Not our interrupt, so just return */ 325 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 326 return (0); 327 328 /* We got a reply interrupt */ 329 return (1); 330 } 331 332 /* 333 * PCI Support Functions 334 * 335 */ 336 static struct mrsas_ident * 337 mrsas_find_ident(device_t dev) 338 { 339 struct mrsas_ident *pci_device; 340 341 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) { 342 if ((pci_device->vendor == pci_get_vendor(dev)) && 343 (pci_device->device == pci_get_device(dev)) && 344 ((pci_device->subvendor == pci_get_subvendor(dev)) || 345 (pci_device->subvendor == 0xffff)) && 346 ((pci_device->subdevice == pci_get_subdevice(dev)) || 347 (pci_device->subdevice == 0xffff))) 348 return (pci_device); 349 } 350 return (NULL); 351 } 352 353 static int 354 mrsas_probe(device_t dev) 355 { 356 static u_int8_t first_ctrl = 1; 357 struct mrsas_ident *id; 358 359 if ((id = mrsas_find_ident(dev)) != NULL) { 360 if (first_ctrl) { 361 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n", 362 MRSAS_VERSION); 363 first_ctrl = 0; 364 } 365 device_set_desc(dev, id->desc); 366 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */ 367 return (-30); 368 } 369 return (ENXIO); 370 } 371 372 /* 373 * mrsas_setup_sysctl: setup sysctl values for mrsas 374 * input: Adapter instance soft state 375 * 376 * Setup sysctl entries for mrsas driver. 377 */ 378 static void 379 mrsas_setup_sysctl(struct mrsas_softc *sc) 380 { 381 struct sysctl_ctx_list *sysctl_ctx = NULL; 382 struct sysctl_oid *sysctl_tree = NULL; 383 char tmpstr[80], tmpstr2[80]; 384 385 /* 386 * Setup the sysctl variable so the user can change the debug level 387 * on the fly. 388 */ 389 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", 390 device_get_unit(sc->mrsas_dev)); 391 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev)); 392 393 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); 394 if (sysctl_ctx != NULL) 395 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); 396 397 if (sysctl_tree == NULL) { 398 sysctl_ctx_init(&sc->sysctl_ctx); 399 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 400 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2, 401 CTLFLAG_RD, 0, tmpstr); 402 if (sc->sysctl_tree == NULL) 403 return; 404 sysctl_ctx = &sc->sysctl_ctx; 405 sysctl_tree = sc->sysctl_tree; 406 } 407 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 408 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, 409 "Disable the use of OCR"); 410 411 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 412 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, 413 strlen(MRSAS_VERSION), "driver version"); 414 415 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 416 OID_AUTO, "reset_count", CTLFLAG_RD, 417 &sc->reset_count, 0, "number of ocr from start of the day"); 418 419 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 420 OID_AUTO, "fw_outstanding", CTLFLAG_RD, 421 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands"); 422 423 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 424 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 425 &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); 426 427 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 428 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, 429 "Driver debug level"); 430 431 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 432 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 433 0, "Driver IO timeout value in mili-second."); 434 435 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 436 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, 437 &sc->mrsas_fw_fault_check_delay, 438 0, "FW fault check thread delay in seconds. <default is 1 sec>"); 439 440 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 441 OID_AUTO, "reset_in_progress", CTLFLAG_RD, 442 &sc->reset_in_progress, 0, "ocr in progress status"); 443 444 } 445 446 /* 447 * mrsas_get_tunables: get tunable parameters. 448 * input: Adapter instance soft state 449 * 450 * Get tunable parameters. This will help to debug driver at boot time. 451 */ 452 static void 453 mrsas_get_tunables(struct mrsas_softc *sc) 454 { 455 char tmpstr[80]; 456 457 /* XXX default to some debugging for now */ 458 sc->mrsas_debug = MRSAS_FAULT; 459 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; 460 sc->mrsas_fw_fault_check_delay = 1; 461 sc->reset_count = 0; 462 sc->reset_in_progress = 0; 463 464 /* 465 * Grab the global variables. 466 */ 467 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); 468 469 /* 470 * Grab the global variables. 471 */ 472 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds); 473 474 /* Grab the unit-instance variables */ 475 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", 476 device_get_unit(sc->mrsas_dev)); 477 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); 478 } 479 480 /* 481 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. 482 * Used to get sequence number at driver load time. 483 * input: Adapter soft state 484 * 485 * Allocates DMAable memory for the event log info internal command. 486 */ 487 int 488 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) 489 { 490 int el_info_size; 491 492 /* Allocate get event log info command */ 493 el_info_size = sizeof(struct mrsas_evt_log_info); 494 if (bus_dma_tag_create(sc->mrsas_parent_tag, 495 1, 0, 496 BUS_SPACE_MAXADDR_32BIT, 497 BUS_SPACE_MAXADDR, 498 NULL, NULL, 499 el_info_size, 500 1, 501 el_info_size, 502 BUS_DMA_ALLOCNOW, 503 NULL, NULL, 504 &sc->el_info_tag)) { 505 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); 506 return (ENOMEM); 507 } 508 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, 509 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { 510 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); 511 return (ENOMEM); 512 } 513 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, 514 sc->el_info_mem, el_info_size, mrsas_addr_cb, 515 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { 516 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); 517 return (ENOMEM); 518 } 519 memset(sc->el_info_mem, 0, el_info_size); 520 return (0); 521 } 522 523 /* 524 * mrsas_free_evt_info_cmd: Free memory for Event log info command 525 * input: Adapter soft state 526 * 527 * Deallocates memory for the event log info internal command. 528 */ 529 void 530 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) 531 { 532 if (sc->el_info_phys_addr) 533 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); 534 if (sc->el_info_mem != NULL) 535 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); 536 if (sc->el_info_tag != NULL) 537 bus_dma_tag_destroy(sc->el_info_tag); 538 } 539 540 /* 541 * mrsas_get_seq_num: Get latest event sequence number 542 * @sc: Adapter soft state 543 * @eli: Firmware event log sequence number information. 544 * 545 * Firmware maintains a log of all events in a non-volatile area. 546 * Driver get the sequence number using DCMD 547 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. 548 */ 549 550 static int 551 mrsas_get_seq_num(struct mrsas_softc *sc, 552 struct mrsas_evt_log_info *eli) 553 { 554 struct mrsas_mfi_cmd *cmd; 555 struct mrsas_dcmd_frame *dcmd; 556 557 cmd = mrsas_get_mfi_cmd(sc); 558 559 if (!cmd) { 560 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 561 return -ENOMEM; 562 } 563 dcmd = &cmd->frame->dcmd; 564 565 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { 566 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); 567 mrsas_release_mfi_cmd(cmd); 568 return -ENOMEM; 569 } 570 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 571 572 dcmd->cmd = MFI_CMD_DCMD; 573 dcmd->cmd_status = 0x0; 574 dcmd->sge_count = 1; 575 dcmd->flags = MFI_FRAME_DIR_READ; 576 dcmd->timeout = 0; 577 dcmd->pad_0 = 0; 578 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info); 579 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 580 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr; 581 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info); 582 583 mrsas_issue_blocked_cmd(sc, cmd); 584 585 /* 586 * Copy the data back into callers buffer 587 */ 588 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); 589 mrsas_free_evt_log_info_cmd(sc); 590 mrsas_release_mfi_cmd(cmd); 591 592 return 0; 593 } 594 595 596 /* 597 * mrsas_register_aen: Register for asynchronous event notification 598 * @sc: Adapter soft state 599 * @seq_num: Starting sequence number 600 * @class_locale: Class of the event 601 * 602 * This function subscribes for events beyond the @seq_num 603 * and type @class_locale. 604 * 605 */ 606 static int 607 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, 608 u_int32_t class_locale_word) 609 { 610 int ret_val; 611 struct mrsas_mfi_cmd *cmd; 612 struct mrsas_dcmd_frame *dcmd; 613 union mrsas_evt_class_locale curr_aen; 614 union mrsas_evt_class_locale prev_aen; 615 616 /* 617 * If there an AEN pending already (aen_cmd), check if the 618 * class_locale of that pending AEN is inclusive of the new AEN 619 * request we currently have. If it is, then we don't have to do 620 * anything. In other words, whichever events the current AEN request 621 * is subscribing to, have already been subscribed to. If the old_cmd 622 * is _not_ inclusive, then we have to abort that command, form a 623 * class_locale that is superset of both old and current and re-issue 624 * to the FW 625 */ 626 627 curr_aen.word = class_locale_word; 628 629 if (sc->aen_cmd) { 630 631 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1]; 632 633 /* 634 * A class whose enum value is smaller is inclusive of all 635 * higher values. If a PROGRESS (= -1) was previously 636 * registered, then a new registration requests for higher 637 * classes need not be sent to FW. They are automatically 638 * included. Locale numbers don't have such hierarchy. They 639 * are bitmap values 640 */ 641 if ((prev_aen.members.class <= curr_aen.members.class) && 642 !((prev_aen.members.locale & curr_aen.members.locale) ^ 643 curr_aen.members.locale)) { 644 /* 645 * Previously issued event registration includes 646 * current request. Nothing to do. 647 */ 648 return 0; 649 } else { 650 curr_aen.members.locale |= prev_aen.members.locale; 651 652 if (prev_aen.members.class < curr_aen.members.class) 653 curr_aen.members.class = prev_aen.members.class; 654 655 sc->aen_cmd->abort_aen = 1; 656 ret_val = mrsas_issue_blocked_abort_cmd(sc, 657 sc->aen_cmd); 658 659 if (ret_val) { 660 printf("mrsas: Failed to abort " 661 "previous AEN command\n"); 662 return ret_val; 663 } 664 } 665 } 666 cmd = mrsas_get_mfi_cmd(sc); 667 668 if (!cmd) 669 return -ENOMEM; 670 671 dcmd = &cmd->frame->dcmd; 672 673 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); 674 675 /* 676 * Prepare DCMD for aen registration 677 */ 678 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 679 680 dcmd->cmd = MFI_CMD_DCMD; 681 dcmd->cmd_status = 0x0; 682 dcmd->sge_count = 1; 683 dcmd->flags = MFI_FRAME_DIR_READ; 684 dcmd->timeout = 0; 685 dcmd->pad_0 = 0; 686 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail); 687 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 688 dcmd->mbox.w[0] = seq_num; 689 sc->last_seq_num = seq_num; 690 dcmd->mbox.w[1] = curr_aen.word; 691 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr; 692 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail); 693 694 if (sc->aen_cmd != NULL) { 695 mrsas_release_mfi_cmd(cmd); 696 return 0; 697 } 698 /* 699 * Store reference to the cmd used to register for AEN. When an 700 * application wants us to register for AEN, we have to abort this 701 * cmd and re-register with a new EVENT LOCALE supplied by that app 702 */ 703 sc->aen_cmd = cmd; 704 705 /* 706 * Issue the aen registration frame 707 */ 708 if (mrsas_issue_dcmd(sc, cmd)) { 709 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); 710 return (1); 711 } 712 return 0; 713 } 714 715 /* 716 * mrsas_start_aen: Subscribes to AEN during driver load time 717 * @instance: Adapter soft state 718 */ 719 static int 720 mrsas_start_aen(struct mrsas_softc *sc) 721 { 722 struct mrsas_evt_log_info eli; 723 union mrsas_evt_class_locale class_locale; 724 725 726 /* Get the latest sequence number from FW */ 727 728 memset(&eli, 0, sizeof(eli)); 729 730 if (mrsas_get_seq_num(sc, &eli)) 731 return -1; 732 733 /* Register AEN with FW for latest sequence number plus 1 */ 734 class_locale.members.reserved = 0; 735 class_locale.members.locale = MR_EVT_LOCALE_ALL; 736 class_locale.members.class = MR_EVT_CLASS_DEBUG; 737 738 return mrsas_register_aen(sc, eli.newest_seq_num + 1, 739 class_locale.word); 740 741 } 742 743 /* 744 * mrsas_setup_msix: Allocate MSI-x vectors 745 * @sc: adapter soft state 746 */ 747 static int 748 mrsas_setup_msix(struct mrsas_softc *sc) 749 { 750 int i; 751 752 for (i = 0; i < sc->msix_vectors; i++) { 753 sc->irq_context[i].sc = sc; 754 sc->irq_context[i].MSIxIndex = i; 755 sc->irq_id[i] = i + 1; 756 sc->mrsas_irq[i] = bus_alloc_resource_any 757 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i] 758 ,RF_ACTIVE); 759 if (sc->mrsas_irq[i] == NULL) { 760 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n"); 761 goto irq_alloc_failed; 762 } 763 if (bus_setup_intr(sc->mrsas_dev, 764 sc->mrsas_irq[i], 765 INTR_MPSAFE | INTR_TYPE_CAM, 766 NULL, mrsas_isr, &sc->irq_context[i], 767 &sc->intr_handle[i])) { 768 device_printf(sc->mrsas_dev, 769 "Cannot set up MSI-x interrupt handler\n"); 770 goto irq_alloc_failed; 771 } 772 } 773 return SUCCESS; 774 775 irq_alloc_failed: 776 mrsas_teardown_intr(sc); 777 return (FAIL); 778 } 779 780 /* 781 * mrsas_allocate_msix: Setup MSI-x vectors 782 * @sc: adapter soft state 783 */ 784 static int 785 mrsas_allocate_msix(struct mrsas_softc *sc) 786 { 787 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) { 788 device_printf(sc->mrsas_dev, "Using MSI-X with %d number" 789 " of vectors\n", sc->msix_vectors); 790 } else { 791 device_printf(sc->mrsas_dev, "MSI-x setup failed\n"); 792 goto irq_alloc_failed; 793 } 794 return SUCCESS; 795 796 irq_alloc_failed: 797 mrsas_teardown_intr(sc); 798 return (FAIL); 799 } 800 801 /* 802 * mrsas_attach: PCI entry point 803 * input: pointer to device struct 804 * 805 * Performs setup of PCI and registers, initializes mutexes and linked lists, 806 * registers interrupts and CAM, and initializes the adapter/controller to 807 * its proper state. 808 */ 809 static int 810 mrsas_attach(device_t dev) 811 { 812 struct mrsas_softc *sc = device_get_softc(dev); 813 uint32_t cmd, bar, error; 814 struct cdev *linux_dev; 815 816 /* Look up our softc and initialize its fields. */ 817 sc->mrsas_dev = dev; 818 sc->device_id = pci_get_device(dev); 819 820 mrsas_get_tunables(sc); 821 822 /* 823 * Set up PCI and registers 824 */ 825 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 826 if ((cmd & PCIM_CMD_PORTEN) == 0) { 827 return (ENXIO); 828 } 829 /* Force the busmaster enable bit on. */ 830 cmd |= PCIM_CMD_BUSMASTEREN; 831 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 832 833 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4); 834 835 sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */ 836 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 837 &(sc->reg_res_id), RF_ACTIVE)) 838 == NULL) { 839 device_printf(dev, "Cannot allocate PCI registers\n"); 840 goto attach_fail; 841 } 842 sc->bus_tag = rman_get_bustag(sc->reg_res); 843 sc->bus_handle = rman_get_bushandle(sc->reg_res); 844 845 /* Intialize mutexes */ 846 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF); 847 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF); 848 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF); 849 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF); 850 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN); 851 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF); 852 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF); 853 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF); 854 855 /* 856 * Intialize a counting Semaphore to take care no. of concurrent 857 * IOCTLs 858 */ 859 sema_init(&sc->ioctl_count_sema, MRSAS_MAX_MFI_CMDS - 5, IOCTL_SEMA_DESCRIPTION); 860 861 /* Intialize linked list */ 862 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); 863 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); 864 865 mrsas_atomic_set(&sc->fw_outstanding, 0); 866 867 sc->io_cmds_highwater = 0; 868 869 /* Create a /dev entry for this device. */ 870 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT, 871 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", 872 device_get_unit(dev)); 873 if (device_get_unit(dev) == 0) 874 make_dev_alias_p(MAKEDEV_CHECKNAME, &linux_dev, sc->mrsas_cdev, 875 "megaraid_sas_ioctl_node"); 876 if (sc->mrsas_cdev) 877 sc->mrsas_cdev->si_drv1 = sc; 878 879 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 880 sc->UnevenSpanSupport = 0; 881 882 sc->msix_enable = 0; 883 884 /* Initialize Firmware */ 885 if (mrsas_init_fw(sc) != SUCCESS) { 886 goto attach_fail_fw; 887 } 888 /* Register SCSI mid-layer */ 889 if ((mrsas_cam_attach(sc) != SUCCESS)) { 890 goto attach_fail_cam; 891 } 892 /* Register IRQs */ 893 if (mrsas_setup_irq(sc) != SUCCESS) { 894 goto attach_fail_irq; 895 } 896 /* Enable Interrupts */ 897 mrsas_enable_intr(sc); 898 899 error = mrsas_kproc_create(mrsas_ocr_thread, sc, 900 &sc->ocr_thread, 0, 0, "mrsas_ocr%d", 901 device_get_unit(sc->mrsas_dev)); 902 if (error) { 903 printf("Error %d starting rescan thread\n", error); 904 goto attach_fail_irq; 905 } 906 mrsas_setup_sysctl(sc); 907 908 /* Initiate AEN (Asynchronous Event Notification) */ 909 910 if (mrsas_start_aen(sc)) { 911 printf("Error: start aen failed\n"); 912 goto fail_start_aen; 913 } 914 /* 915 * Add this controller to mrsas_mgmt_info structure so that it can be 916 * exported to management applications 917 */ 918 if (device_get_unit(dev) == 0) 919 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info)); 920 921 mrsas_mgmt_info.count++; 922 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc; 923 mrsas_mgmt_info.max_index++; 924 925 return (0); 926 927 fail_start_aen: 928 attach_fail_irq: 929 mrsas_teardown_intr(sc); 930 attach_fail_cam: 931 mrsas_cam_detach(sc); 932 attach_fail_fw: 933 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */ 934 if (sc->msix_enable == 1) 935 pci_release_msi(sc->mrsas_dev); 936 mrsas_free_mem(sc); 937 mtx_destroy(&sc->sim_lock); 938 mtx_destroy(&sc->aen_lock); 939 mtx_destroy(&sc->pci_lock); 940 mtx_destroy(&sc->io_lock); 941 mtx_destroy(&sc->ioctl_lock); 942 mtx_destroy(&sc->mpt_cmd_pool_lock); 943 mtx_destroy(&sc->mfi_cmd_pool_lock); 944 mtx_destroy(&sc->raidmap_lock); 945 /* Destroy the counting semaphore created for Ioctl */ 946 sema_destroy(&sc->ioctl_count_sema); 947 attach_fail: 948 destroy_dev(sc->mrsas_cdev); 949 if (sc->reg_res) { 950 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, 951 sc->reg_res_id, sc->reg_res); 952 } 953 return (ENXIO); 954 } 955 956 /* 957 * mrsas_detach: De-allocates and teardown resources 958 * input: pointer to device struct 959 * 960 * This function is the entry point for device disconnect and detach. 961 * It performs memory de-allocations, shutdown of the controller and various 962 * teardown and destroy resource functions. 963 */ 964 static int 965 mrsas_detach(device_t dev) 966 { 967 struct mrsas_softc *sc; 968 int i = 0; 969 970 sc = device_get_softc(dev); 971 sc->remove_in_progress = 1; 972 973 /* Destroy the character device so no other IOCTL will be handled */ 974 destroy_dev(sc->mrsas_cdev); 975 976 /* 977 * Take the instance off the instance array. Note that we will not 978 * decrement the max_index. We let this array be sparse array 979 */ 980 for (i = 0; i < mrsas_mgmt_info.max_index; i++) { 981 if (mrsas_mgmt_info.sc_ptr[i] == sc) { 982 mrsas_mgmt_info.count--; 983 mrsas_mgmt_info.sc_ptr[i] = NULL; 984 break; 985 } 986 } 987 988 if (sc->ocr_thread_active) 989 wakeup(&sc->ocr_chan); 990 while (sc->reset_in_progress) { 991 i++; 992 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 993 mrsas_dprint(sc, MRSAS_INFO, 994 "[%2d]waiting for ocr to be finished\n", i); 995 } 996 pause("mr_shutdown", hz); 997 } 998 i = 0; 999 while (sc->ocr_thread_active) { 1000 i++; 1001 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1002 mrsas_dprint(sc, MRSAS_INFO, 1003 "[%2d]waiting for " 1004 "mrsas_ocr thread to quit ocr %d\n", i, 1005 sc->ocr_thread_active); 1006 } 1007 pause("mr_shutdown", hz); 1008 } 1009 mrsas_flush_cache(sc); 1010 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); 1011 mrsas_disable_intr(sc); 1012 mrsas_cam_detach(sc); 1013 mrsas_teardown_intr(sc); 1014 mrsas_free_mem(sc); 1015 mtx_destroy(&sc->sim_lock); 1016 mtx_destroy(&sc->aen_lock); 1017 mtx_destroy(&sc->pci_lock); 1018 mtx_destroy(&sc->io_lock); 1019 mtx_destroy(&sc->ioctl_lock); 1020 mtx_destroy(&sc->mpt_cmd_pool_lock); 1021 mtx_destroy(&sc->mfi_cmd_pool_lock); 1022 mtx_destroy(&sc->raidmap_lock); 1023 1024 /* Wait for all the semaphores to be released */ 1025 while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5)) 1026 pause("mr_shutdown", hz); 1027 1028 /* Destroy the counting semaphore created for Ioctl */ 1029 sema_destroy(&sc->ioctl_count_sema); 1030 1031 if (sc->reg_res) { 1032 bus_release_resource(sc->mrsas_dev, 1033 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); 1034 } 1035 if (sc->sysctl_tree != NULL) 1036 sysctl_ctx_free(&sc->sysctl_ctx); 1037 1038 return (0); 1039 } 1040 1041 /* 1042 * mrsas_free_mem: Frees allocated memory 1043 * input: Adapter instance soft state 1044 * 1045 * This function is called from mrsas_detach() to free previously allocated 1046 * memory. 1047 */ 1048 void 1049 mrsas_free_mem(struct mrsas_softc *sc) 1050 { 1051 int i; 1052 u_int32_t max_cmd; 1053 struct mrsas_mfi_cmd *mfi_cmd; 1054 struct mrsas_mpt_cmd *mpt_cmd; 1055 1056 /* 1057 * Free RAID map memory 1058 */ 1059 for (i = 0; i < 2; i++) { 1060 if (sc->raidmap_phys_addr[i]) 1061 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); 1062 if (sc->raidmap_mem[i] != NULL) 1063 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); 1064 if (sc->raidmap_tag[i] != NULL) 1065 bus_dma_tag_destroy(sc->raidmap_tag[i]); 1066 1067 if (sc->ld_drv_map[i] != NULL) 1068 free(sc->ld_drv_map[i], M_MRSAS); 1069 } 1070 1071 /* 1072 * Free version buffer memroy 1073 */ 1074 if (sc->verbuf_phys_addr) 1075 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); 1076 if (sc->verbuf_mem != NULL) 1077 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); 1078 if (sc->verbuf_tag != NULL) 1079 bus_dma_tag_destroy(sc->verbuf_tag); 1080 1081 1082 /* 1083 * Free sense buffer memory 1084 */ 1085 if (sc->sense_phys_addr) 1086 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); 1087 if (sc->sense_mem != NULL) 1088 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); 1089 if (sc->sense_tag != NULL) 1090 bus_dma_tag_destroy(sc->sense_tag); 1091 1092 /* 1093 * Free chain frame memory 1094 */ 1095 if (sc->chain_frame_phys_addr) 1096 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); 1097 if (sc->chain_frame_mem != NULL) 1098 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); 1099 if (sc->chain_frame_tag != NULL) 1100 bus_dma_tag_destroy(sc->chain_frame_tag); 1101 1102 /* 1103 * Free IO Request memory 1104 */ 1105 if (sc->io_request_phys_addr) 1106 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); 1107 if (sc->io_request_mem != NULL) 1108 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); 1109 if (sc->io_request_tag != NULL) 1110 bus_dma_tag_destroy(sc->io_request_tag); 1111 1112 /* 1113 * Free Reply Descriptor memory 1114 */ 1115 if (sc->reply_desc_phys_addr) 1116 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); 1117 if (sc->reply_desc_mem != NULL) 1118 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); 1119 if (sc->reply_desc_tag != NULL) 1120 bus_dma_tag_destroy(sc->reply_desc_tag); 1121 1122 /* 1123 * Free event detail memory 1124 */ 1125 if (sc->evt_detail_phys_addr) 1126 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); 1127 if (sc->evt_detail_mem != NULL) 1128 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); 1129 if (sc->evt_detail_tag != NULL) 1130 bus_dma_tag_destroy(sc->evt_detail_tag); 1131 1132 /* 1133 * Free MFI frames 1134 */ 1135 if (sc->mfi_cmd_list) { 1136 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1137 mfi_cmd = sc->mfi_cmd_list[i]; 1138 mrsas_free_frame(sc, mfi_cmd); 1139 } 1140 } 1141 if (sc->mficmd_frame_tag != NULL) 1142 bus_dma_tag_destroy(sc->mficmd_frame_tag); 1143 1144 /* 1145 * Free MPT internal command list 1146 */ 1147 max_cmd = sc->max_fw_cmds; 1148 if (sc->mpt_cmd_list) { 1149 for (i = 0; i < max_cmd; i++) { 1150 mpt_cmd = sc->mpt_cmd_list[i]; 1151 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); 1152 free(sc->mpt_cmd_list[i], M_MRSAS); 1153 } 1154 free(sc->mpt_cmd_list, M_MRSAS); 1155 sc->mpt_cmd_list = NULL; 1156 } 1157 /* 1158 * Free MFI internal command list 1159 */ 1160 1161 if (sc->mfi_cmd_list) { 1162 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1163 free(sc->mfi_cmd_list[i], M_MRSAS); 1164 } 1165 free(sc->mfi_cmd_list, M_MRSAS); 1166 sc->mfi_cmd_list = NULL; 1167 } 1168 /* 1169 * Free request descriptor memory 1170 */ 1171 free(sc->req_desc, M_MRSAS); 1172 sc->req_desc = NULL; 1173 1174 /* 1175 * Destroy parent tag 1176 */ 1177 if (sc->mrsas_parent_tag != NULL) 1178 bus_dma_tag_destroy(sc->mrsas_parent_tag); 1179 1180 /* 1181 * Free ctrl_info memory 1182 */ 1183 if (sc->ctrl_info != NULL) 1184 free(sc->ctrl_info, M_MRSAS); 1185 } 1186 1187 /* 1188 * mrsas_teardown_intr: Teardown interrupt 1189 * input: Adapter instance soft state 1190 * 1191 * This function is called from mrsas_detach() to teardown and release bus 1192 * interrupt resourse. 1193 */ 1194 void 1195 mrsas_teardown_intr(struct mrsas_softc *sc) 1196 { 1197 int i; 1198 1199 if (!sc->msix_enable) { 1200 if (sc->intr_handle[0]) 1201 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]); 1202 if (sc->mrsas_irq[0] != NULL) 1203 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1204 sc->irq_id[0], sc->mrsas_irq[0]); 1205 sc->intr_handle[0] = NULL; 1206 } else { 1207 for (i = 0; i < sc->msix_vectors; i++) { 1208 if (sc->intr_handle[i]) 1209 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i], 1210 sc->intr_handle[i]); 1211 1212 if (sc->mrsas_irq[i] != NULL) 1213 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1214 sc->irq_id[i], sc->mrsas_irq[i]); 1215 1216 sc->intr_handle[i] = NULL; 1217 } 1218 pci_release_msi(sc->mrsas_dev); 1219 } 1220 1221 } 1222 1223 /* 1224 * mrsas_suspend: Suspend entry point 1225 * input: Device struct pointer 1226 * 1227 * This function is the entry point for system suspend from the OS. 1228 */ 1229 static int 1230 mrsas_suspend(device_t dev) 1231 { 1232 struct mrsas_softc *sc; 1233 1234 sc = device_get_softc(dev); 1235 return (0); 1236 } 1237 1238 /* 1239 * mrsas_resume: Resume entry point 1240 * input: Device struct pointer 1241 * 1242 * This function is the entry point for system resume from the OS. 1243 */ 1244 static int 1245 mrsas_resume(device_t dev) 1246 { 1247 struct mrsas_softc *sc; 1248 1249 sc = device_get_softc(dev); 1250 return (0); 1251 } 1252 1253 /** 1254 * mrsas_get_softc_instance: Find softc instance based on cmd type 1255 * 1256 * This function will return softc instance based on cmd type. 1257 * In some case, application fire ioctl on required management instance and 1258 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those 1259 * case, else get the softc instance from host_no provided by application in 1260 * user data. 1261 */ 1262 1263 static struct mrsas_softc * 1264 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg) 1265 { 1266 struct mrsas_softc *sc = NULL; 1267 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; 1268 1269 if (cmd == MRSAS_IOC_GET_PCI_INFO) { 1270 sc = dev->si_drv1; 1271 } else { 1272 /* 1273 * get the Host number & the softc from data sent by the 1274 * Application 1275 */ 1276 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no]; 1277 if (sc == NULL) 1278 printf("There is no Controller number %d\n", 1279 user_ioc->host_no); 1280 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index) 1281 mrsas_dprint(sc, MRSAS_FAULT, 1282 "Invalid Controller number %d\n", user_ioc->host_no); 1283 } 1284 1285 return sc; 1286 } 1287 1288 /* 1289 * mrsas_ioctl: IOCtl commands entry point. 1290 * 1291 * This function is the entry point for IOCtls from the OS. It calls the 1292 * appropriate function for processing depending on the command received. 1293 */ 1294 static int 1295 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, 1296 struct thread *td) 1297 { 1298 struct mrsas_softc *sc; 1299 int ret = 0, i = 0; 1300 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo; 1301 1302 sc = mrsas_get_softc_instance(dev, cmd, arg); 1303 if (!sc) 1304 return ENOENT; 1305 1306 if (sc->remove_in_progress) { 1307 mrsas_dprint(sc, MRSAS_INFO, 1308 "Driver remove or shutdown called.\n"); 1309 return ENOENT; 1310 } 1311 mtx_lock_spin(&sc->ioctl_lock); 1312 if (!sc->reset_in_progress) { 1313 mtx_unlock_spin(&sc->ioctl_lock); 1314 goto do_ioctl; 1315 } 1316 mtx_unlock_spin(&sc->ioctl_lock); 1317 while (sc->reset_in_progress) { 1318 i++; 1319 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1320 mrsas_dprint(sc, MRSAS_INFO, 1321 "[%2d]waiting for " 1322 "OCR to be finished %d\n", i, 1323 sc->ocr_thread_active); 1324 } 1325 pause("mr_ioctl", hz); 1326 } 1327 1328 do_ioctl: 1329 switch (cmd) { 1330 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64: 1331 #ifdef COMPAT_FREEBSD32 1332 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32: 1333 #endif 1334 /* 1335 * Decrement the Ioctl counting Semaphore before getting an 1336 * mfi command 1337 */ 1338 sema_wait(&sc->ioctl_count_sema); 1339 1340 ret = mrsas_passthru(sc, (void *)arg, cmd); 1341 1342 /* Increment the Ioctl counting semaphore value */ 1343 sema_post(&sc->ioctl_count_sema); 1344 1345 break; 1346 case MRSAS_IOC_SCAN_BUS: 1347 ret = mrsas_bus_scan(sc); 1348 break; 1349 1350 case MRSAS_IOC_GET_PCI_INFO: 1351 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg; 1352 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION)); 1353 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev); 1354 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev); 1355 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev); 1356 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev); 1357 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d," 1358 "pci device no: %d, pci function no: %d," 1359 "pci domain ID: %d\n", 1360 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber, 1361 pciDrvInfo->functionNumber, pciDrvInfo->domainID); 1362 ret = 0; 1363 break; 1364 1365 default: 1366 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd); 1367 ret = ENOENT; 1368 } 1369 1370 return (ret); 1371 } 1372 1373 /* 1374 * mrsas_poll: poll entry point for mrsas driver fd 1375 * 1376 * This function is the entry point for poll from the OS. It waits for some AEN 1377 * events to be triggered from the controller and notifies back. 1378 */ 1379 static int 1380 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td) 1381 { 1382 struct mrsas_softc *sc; 1383 int revents = 0; 1384 1385 sc = dev->si_drv1; 1386 1387 if (poll_events & (POLLIN | POLLRDNORM)) { 1388 if (sc->mrsas_aen_triggered) { 1389 revents |= poll_events & (POLLIN | POLLRDNORM); 1390 } 1391 } 1392 if (revents == 0) { 1393 if (poll_events & (POLLIN | POLLRDNORM)) { 1394 mtx_lock(&sc->aen_lock); 1395 sc->mrsas_poll_waiting = 1; 1396 selrecord(td, &sc->mrsas_select); 1397 mtx_unlock(&sc->aen_lock); 1398 } 1399 } 1400 return revents; 1401 } 1402 1403 /* 1404 * mrsas_setup_irq: Set up interrupt 1405 * input: Adapter instance soft state 1406 * 1407 * This function sets up interrupts as a bus resource, with flags indicating 1408 * resource permitting contemporaneous sharing and for resource to activate 1409 * atomically. 1410 */ 1411 static int 1412 mrsas_setup_irq(struct mrsas_softc *sc) 1413 { 1414 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS)) 1415 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n"); 1416 1417 else { 1418 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n"); 1419 sc->irq_context[0].sc = sc; 1420 sc->irq_context[0].MSIxIndex = 0; 1421 sc->irq_id[0] = 0; 1422 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev, 1423 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE); 1424 if (sc->mrsas_irq[0] == NULL) { 1425 device_printf(sc->mrsas_dev, "Cannot allocate legcay" 1426 "interrupt\n"); 1427 return (FAIL); 1428 } 1429 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0], 1430 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, 1431 &sc->irq_context[0], &sc->intr_handle[0])) { 1432 device_printf(sc->mrsas_dev, "Cannot set up legacy" 1433 "interrupt\n"); 1434 return (FAIL); 1435 } 1436 } 1437 return (0); 1438 } 1439 1440 /* 1441 * mrsas_isr: ISR entry point 1442 * input: argument pointer 1443 * 1444 * This function is the interrupt service routine entry point. There are two 1445 * types of interrupts, state change interrupt and response interrupt. If an 1446 * interrupt is not ours, we just return. 1447 */ 1448 void 1449 mrsas_isr(void *arg) 1450 { 1451 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg; 1452 struct mrsas_softc *sc = irq_context->sc; 1453 int status = 0; 1454 1455 if (sc->mask_interrupts) 1456 return; 1457 1458 if (!sc->msix_vectors) { 1459 status = mrsas_clear_intr(sc); 1460 if (!status) 1461 return; 1462 } 1463 /* If we are resetting, bail */ 1464 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { 1465 printf(" Entered into ISR when OCR is going active. \n"); 1466 mrsas_clear_intr(sc); 1467 return; 1468 } 1469 /* Process for reply request and clear response interrupt */ 1470 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS) 1471 mrsas_clear_intr(sc); 1472 1473 return; 1474 } 1475 1476 /* 1477 * mrsas_complete_cmd: Process reply request 1478 * input: Adapter instance soft state 1479 * 1480 * This function is called from mrsas_isr() to process reply request and clear 1481 * response interrupt. Processing of the reply request entails walking 1482 * through the reply descriptor array for the command request pended from 1483 * Firmware. We look at the Function field to determine the command type and 1484 * perform the appropriate action. Before we return, we clear the response 1485 * interrupt. 1486 */ 1487 static int 1488 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex) 1489 { 1490 Mpi2ReplyDescriptorsUnion_t *desc; 1491 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 1492 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; 1493 struct mrsas_mpt_cmd *cmd_mpt; 1494 struct mrsas_mfi_cmd *cmd_mfi; 1495 u_int8_t reply_descript_type; 1496 u_int16_t smid, num_completed; 1497 u_int8_t status, extStatus; 1498 union desc_value desc_val; 1499 PLD_LOAD_BALANCE_INFO lbinfo; 1500 u_int32_t device_id; 1501 int threshold_reply_count = 0; 1502 1503 1504 /* If we have a hardware error, not need to continue */ 1505 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 1506 return (DONE); 1507 1508 desc = sc->reply_desc_mem; 1509 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)) 1510 + sc->last_reply_idx[MSIxIndex]; 1511 1512 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1513 1514 desc_val.word = desc->Words; 1515 num_completed = 0; 1516 1517 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1518 1519 /* Find our reply descriptor for the command and process */ 1520 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) { 1521 smid = reply_desc->SMID; 1522 cmd_mpt = sc->mpt_cmd_list[smid - 1]; 1523 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request; 1524 1525 status = scsi_io_req->RaidContext.status; 1526 extStatus = scsi_io_req->RaidContext.exStatus; 1527 1528 switch (scsi_io_req->Function) { 1529 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */ 1530 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; 1531 lbinfo = &sc->load_balance_info[device_id]; 1532 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { 1533 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]); 1534 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; 1535 } 1536 /* Fall thru and complete IO */ 1537 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: 1538 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus); 1539 mrsas_cmd_done(sc, cmd_mpt); 1540 scsi_io_req->RaidContext.status = 0; 1541 scsi_io_req->RaidContext.exStatus = 0; 1542 mrsas_atomic_dec(&sc->fw_outstanding); 1543 break; 1544 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */ 1545 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 1546 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); 1547 cmd_mpt->flags = 0; 1548 mrsas_release_mpt_cmd(cmd_mpt); 1549 break; 1550 } 1551 1552 sc->last_reply_idx[MSIxIndex]++; 1553 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth) 1554 sc->last_reply_idx[MSIxIndex] = 0; 1555 1556 desc->Words = ~((uint64_t)0x00); /* set it back to all 1557 * 0xFFFFFFFFs */ 1558 num_completed++; 1559 threshold_reply_count++; 1560 1561 /* Get the next reply descriptor */ 1562 if (!sc->last_reply_idx[MSIxIndex]) { 1563 desc = sc->reply_desc_mem; 1564 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)); 1565 } else 1566 desc++; 1567 1568 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1569 desc_val.word = desc->Words; 1570 1571 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1572 1573 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1574 break; 1575 1576 /* 1577 * Write to reply post index after completing threshold reply 1578 * count and still there are more replies in reply queue 1579 * pending to be completed. 1580 */ 1581 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 1582 if (sc->msix_enable) { 1583 if ((sc->device_id == MRSAS_INVADER) || 1584 (sc->device_id == MRSAS_FURY)) 1585 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1586 ((MSIxIndex & 0x7) << 24) | 1587 sc->last_reply_idx[MSIxIndex]); 1588 else 1589 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1590 sc->last_reply_idx[MSIxIndex]); 1591 } else 1592 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1593 reply_post_host_index), sc->last_reply_idx[0]); 1594 1595 threshold_reply_count = 0; 1596 } 1597 } 1598 1599 /* No match, just return */ 1600 if (num_completed == 0) 1601 return (DONE); 1602 1603 /* Clear response interrupt */ 1604 if (sc->msix_enable) { 1605 if ((sc->device_id == MRSAS_INVADER) || 1606 (sc->device_id == MRSAS_FURY)) { 1607 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1608 ((MSIxIndex & 0x7) << 24) | 1609 sc->last_reply_idx[MSIxIndex]); 1610 } else 1611 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1612 sc->last_reply_idx[MSIxIndex]); 1613 } else 1614 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1615 reply_post_host_index), sc->last_reply_idx[0]); 1616 1617 return (0); 1618 } 1619 1620 /* 1621 * mrsas_map_mpt_cmd_status: Allocate DMAable memory. 1622 * input: Adapter instance soft state 1623 * 1624 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. 1625 * It checks the command status and maps the appropriate CAM status for the 1626 * CCB. 1627 */ 1628 void 1629 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus) 1630 { 1631 struct mrsas_softc *sc = cmd->sc; 1632 u_int8_t *sense_data; 1633 1634 switch (status) { 1635 case MFI_STAT_OK: 1636 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP; 1637 break; 1638 case MFI_STAT_SCSI_IO_FAILED: 1639 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1640 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1641 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data; 1642 if (sense_data) { 1643 /* For now just copy 18 bytes back */ 1644 memcpy(sense_data, cmd->sense, 18); 1645 cmd->ccb_ptr->csio.sense_len = 18; 1646 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; 1647 } 1648 break; 1649 case MFI_STAT_LD_OFFLINE: 1650 case MFI_STAT_DEVICE_NOT_FOUND: 1651 if (cmd->ccb_ptr->ccb_h.target_lun) 1652 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; 1653 else 1654 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; 1655 break; 1656 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1657 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; 1658 break; 1659 default: 1660 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); 1661 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; 1662 cmd->ccb_ptr->csio.scsi_status = status; 1663 } 1664 return; 1665 } 1666 1667 /* 1668 * mrsas_alloc_mem: Allocate DMAable memory 1669 * input: Adapter instance soft state 1670 * 1671 * This function creates the parent DMA tag and allocates DMAable memory. DMA 1672 * tag describes constraints of DMA mapping. Memory allocated is mapped into 1673 * Kernel virtual address. Callback argument is physical memory address. 1674 */ 1675 static int 1676 mrsas_alloc_mem(struct mrsas_softc *sc) 1677 { 1678 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, 1679 chain_frame_size, evt_detail_size, count; 1680 1681 /* 1682 * Allocate parent DMA tag 1683 */ 1684 if (bus_dma_tag_create(NULL, /* parent */ 1685 1, /* alignment */ 1686 0, /* boundary */ 1687 BUS_SPACE_MAXADDR, /* lowaddr */ 1688 BUS_SPACE_MAXADDR, /* highaddr */ 1689 NULL, NULL, /* filter, filterarg */ 1690 MRSAS_MAX_IO_SIZE, /* maxsize */ 1691 MRSAS_MAX_SGL, /* nsegments */ 1692 MRSAS_MAX_IO_SIZE, /* maxsegsize */ 1693 0, /* flags */ 1694 NULL, NULL, /* lockfunc, lockarg */ 1695 &sc->mrsas_parent_tag /* tag */ 1696 )) { 1697 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); 1698 return (ENOMEM); 1699 } 1700 /* 1701 * Allocate for version buffer 1702 */ 1703 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t)); 1704 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1705 1, 0, 1706 BUS_SPACE_MAXADDR_32BIT, 1707 BUS_SPACE_MAXADDR, 1708 NULL, NULL, 1709 verbuf_size, 1710 1, 1711 verbuf_size, 1712 BUS_DMA_ALLOCNOW, 1713 NULL, NULL, 1714 &sc->verbuf_tag)) { 1715 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); 1716 return (ENOMEM); 1717 } 1718 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, 1719 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { 1720 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); 1721 return (ENOMEM); 1722 } 1723 bzero(sc->verbuf_mem, verbuf_size); 1724 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, 1725 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, 1726 BUS_DMA_NOWAIT)) { 1727 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); 1728 return (ENOMEM); 1729 } 1730 /* 1731 * Allocate IO Request Frames 1732 */ 1733 io_req_size = sc->io_frames_alloc_sz; 1734 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1735 16, 0, 1736 BUS_SPACE_MAXADDR_32BIT, 1737 BUS_SPACE_MAXADDR, 1738 NULL, NULL, 1739 io_req_size, 1740 1, 1741 io_req_size, 1742 BUS_DMA_ALLOCNOW, 1743 NULL, NULL, 1744 &sc->io_request_tag)) { 1745 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); 1746 return (ENOMEM); 1747 } 1748 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, 1749 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { 1750 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); 1751 return (ENOMEM); 1752 } 1753 bzero(sc->io_request_mem, io_req_size); 1754 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, 1755 sc->io_request_mem, io_req_size, mrsas_addr_cb, 1756 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { 1757 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); 1758 return (ENOMEM); 1759 } 1760 /* 1761 * Allocate Chain Frames 1762 */ 1763 chain_frame_size = sc->chain_frames_alloc_sz; 1764 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1765 4, 0, 1766 BUS_SPACE_MAXADDR_32BIT, 1767 BUS_SPACE_MAXADDR, 1768 NULL, NULL, 1769 chain_frame_size, 1770 1, 1771 chain_frame_size, 1772 BUS_DMA_ALLOCNOW, 1773 NULL, NULL, 1774 &sc->chain_frame_tag)) { 1775 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); 1776 return (ENOMEM); 1777 } 1778 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, 1779 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { 1780 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); 1781 return (ENOMEM); 1782 } 1783 bzero(sc->chain_frame_mem, chain_frame_size); 1784 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, 1785 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, 1786 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { 1787 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); 1788 return (ENOMEM); 1789 } 1790 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 1791 /* 1792 * Allocate Reply Descriptor Array 1793 */ 1794 reply_desc_size = sc->reply_alloc_sz * count; 1795 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1796 16, 0, 1797 BUS_SPACE_MAXADDR_32BIT, 1798 BUS_SPACE_MAXADDR, 1799 NULL, NULL, 1800 reply_desc_size, 1801 1, 1802 reply_desc_size, 1803 BUS_DMA_ALLOCNOW, 1804 NULL, NULL, 1805 &sc->reply_desc_tag)) { 1806 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); 1807 return (ENOMEM); 1808 } 1809 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, 1810 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { 1811 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); 1812 return (ENOMEM); 1813 } 1814 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, 1815 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, 1816 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { 1817 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); 1818 return (ENOMEM); 1819 } 1820 /* 1821 * Allocate Sense Buffer Array. Keep in lower 4GB 1822 */ 1823 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; 1824 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1825 64, 0, 1826 BUS_SPACE_MAXADDR_32BIT, 1827 BUS_SPACE_MAXADDR, 1828 NULL, NULL, 1829 sense_size, 1830 1, 1831 sense_size, 1832 BUS_DMA_ALLOCNOW, 1833 NULL, NULL, 1834 &sc->sense_tag)) { 1835 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); 1836 return (ENOMEM); 1837 } 1838 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, 1839 BUS_DMA_NOWAIT, &sc->sense_dmamap)) { 1840 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); 1841 return (ENOMEM); 1842 } 1843 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, 1844 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, 1845 BUS_DMA_NOWAIT)) { 1846 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); 1847 return (ENOMEM); 1848 } 1849 /* 1850 * Allocate for Event detail structure 1851 */ 1852 evt_detail_size = sizeof(struct mrsas_evt_detail); 1853 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1854 1, 0, 1855 BUS_SPACE_MAXADDR_32BIT, 1856 BUS_SPACE_MAXADDR, 1857 NULL, NULL, 1858 evt_detail_size, 1859 1, 1860 evt_detail_size, 1861 BUS_DMA_ALLOCNOW, 1862 NULL, NULL, 1863 &sc->evt_detail_tag)) { 1864 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); 1865 return (ENOMEM); 1866 } 1867 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, 1868 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { 1869 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); 1870 return (ENOMEM); 1871 } 1872 bzero(sc->evt_detail_mem, evt_detail_size); 1873 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, 1874 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, 1875 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { 1876 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); 1877 return (ENOMEM); 1878 } 1879 /* 1880 * Create a dma tag for data buffers; size will be the maximum 1881 * possible I/O size (280kB). 1882 */ 1883 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1884 1, 1885 0, 1886 BUS_SPACE_MAXADDR, 1887 BUS_SPACE_MAXADDR, 1888 NULL, NULL, 1889 MRSAS_MAX_IO_SIZE, 1890 MRSAS_MAX_SGL, 1891 MRSAS_MAX_IO_SIZE, 1892 BUS_DMA_ALLOCNOW, 1893 busdma_lock_mutex, 1894 &sc->io_lock, 1895 &sc->data_tag)) { 1896 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); 1897 return (ENOMEM); 1898 } 1899 return (0); 1900 } 1901 1902 /* 1903 * mrsas_addr_cb: Callback function of bus_dmamap_load() 1904 * input: callback argument, machine dependent type 1905 * that describes DMA segments, number of segments, error code 1906 * 1907 * This function is for the driver to receive mapping information resultant of 1908 * the bus_dmamap_load(). The information is actually not being used, but the 1909 * address is saved anyway. 1910 */ 1911 void 1912 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1913 { 1914 bus_addr_t *addr; 1915 1916 addr = arg; 1917 *addr = segs[0].ds_addr; 1918 } 1919 1920 /* 1921 * mrsas_setup_raidmap: Set up RAID map. 1922 * input: Adapter instance soft state 1923 * 1924 * Allocate DMA memory for the RAID maps and perform setup. 1925 */ 1926 static int 1927 mrsas_setup_raidmap(struct mrsas_softc *sc) 1928 { 1929 int i; 1930 1931 for (i = 0; i < 2; i++) { 1932 sc->ld_drv_map[i] = 1933 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT); 1934 /* Do Error handling */ 1935 if (!sc->ld_drv_map[i]) { 1936 device_printf(sc->mrsas_dev, "Could not allocate memory for local map"); 1937 1938 if (i == 1) 1939 free(sc->ld_drv_map[0], M_MRSAS); 1940 /* ABORT driver initialization */ 1941 goto ABORT; 1942 } 1943 } 1944 1945 for (int i = 0; i < 2; i++) { 1946 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1947 4, 0, 1948 BUS_SPACE_MAXADDR_32BIT, 1949 BUS_SPACE_MAXADDR, 1950 NULL, NULL, 1951 sc->max_map_sz, 1952 1, 1953 sc->max_map_sz, 1954 BUS_DMA_ALLOCNOW, 1955 NULL, NULL, 1956 &sc->raidmap_tag[i])) { 1957 device_printf(sc->mrsas_dev, 1958 "Cannot allocate raid map tag.\n"); 1959 return (ENOMEM); 1960 } 1961 if (bus_dmamem_alloc(sc->raidmap_tag[i], 1962 (void **)&sc->raidmap_mem[i], 1963 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { 1964 device_printf(sc->mrsas_dev, 1965 "Cannot allocate raidmap memory.\n"); 1966 return (ENOMEM); 1967 } 1968 bzero(sc->raidmap_mem[i], sc->max_map_sz); 1969 1970 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], 1971 sc->raidmap_mem[i], sc->max_map_sz, 1972 mrsas_addr_cb, &sc->raidmap_phys_addr[i], 1973 BUS_DMA_NOWAIT)) { 1974 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); 1975 return (ENOMEM); 1976 } 1977 if (!sc->raidmap_mem[i]) { 1978 device_printf(sc->mrsas_dev, 1979 "Cannot allocate memory for raid map.\n"); 1980 return (ENOMEM); 1981 } 1982 } 1983 1984 if (!mrsas_get_map_info(sc)) 1985 mrsas_sync_map_info(sc); 1986 1987 return (0); 1988 1989 ABORT: 1990 return (1); 1991 } 1992 1993 /* 1994 * mrsas_init_fw: Initialize Firmware 1995 * input: Adapter soft state 1996 * 1997 * Calls transition_to_ready() to make sure Firmware is in operational state and 1998 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It 1999 * issues internal commands to get the controller info after the IOC_INIT 2000 * command response is received by Firmware. Note: code relating to 2001 * get_pdlist, get_ld_list and max_sectors are currently not being used, it 2002 * is left here as placeholder. 2003 */ 2004 static int 2005 mrsas_init_fw(struct mrsas_softc *sc) 2006 { 2007 2008 int ret, loop, ocr = 0; 2009 u_int32_t max_sectors_1; 2010 u_int32_t max_sectors_2; 2011 u_int32_t tmp_sectors; 2012 u_int32_t scratch_pad_2; 2013 int msix_enable = 0; 2014 int fw_msix_count = 0; 2015 2016 /* Make sure Firmware is ready */ 2017 ret = mrsas_transition_to_ready(sc, ocr); 2018 if (ret != SUCCESS) { 2019 return (ret); 2020 } 2021 /* MSI-x index 0- reply post host index register */ 2022 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET; 2023 /* Check if MSI-X is supported while in ready state */ 2024 msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a; 2025 2026 if (msix_enable) { 2027 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2028 outbound_scratch_pad_2)); 2029 2030 /* Check max MSI-X vectors */ 2031 if (sc->device_id == MRSAS_TBOLT) { 2032 sc->msix_vectors = (scratch_pad_2 2033 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 2034 fw_msix_count = sc->msix_vectors; 2035 } else { 2036 /* Invader/Fury supports 96 MSI-X vectors */ 2037 sc->msix_vectors = ((scratch_pad_2 2038 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 2039 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 2040 fw_msix_count = sc->msix_vectors; 2041 2042 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; 2043 loop++) { 2044 sc->msix_reg_offset[loop] = 2045 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + 2046 (loop * 0x10); 2047 } 2048 } 2049 2050 /* Don't bother allocating more MSI-X vectors than cpus */ 2051 sc->msix_vectors = min(sc->msix_vectors, 2052 mp_ncpus); 2053 2054 /* Allocate MSI-x vectors */ 2055 if (mrsas_allocate_msix(sc) == SUCCESS) 2056 sc->msix_enable = 1; 2057 else 2058 sc->msix_enable = 0; 2059 2060 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector," 2061 "Online CPU %d Current MSIX <%d>\n", 2062 fw_msix_count, mp_ncpus, sc->msix_vectors); 2063 } 2064 if (mrsas_init_adapter(sc) != SUCCESS) { 2065 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); 2066 return (1); 2067 } 2068 /* Allocate internal commands for pass-thru */ 2069 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) { 2070 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); 2071 return (1); 2072 } 2073 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); 2074 if (!sc->ctrl_info) { 2075 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n"); 2076 return (1); 2077 } 2078 /* 2079 * Get the controller info from FW, so that the MAX VD support 2080 * availability can be decided. 2081 */ 2082 if (mrsas_get_ctrl_info(sc)) { 2083 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n"); 2084 return (1); 2085 } 2086 sc->secure_jbod_support = 2087 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD; 2088 2089 if (sc->secure_jbod_support) 2090 device_printf(sc->mrsas_dev, "FW supports SED \n"); 2091 2092 if (mrsas_setup_raidmap(sc) != SUCCESS) { 2093 device_printf(sc->mrsas_dev, "Set up RAID map failed.\n"); 2094 return (1); 2095 } 2096 /* For pass-thru, get PD/LD list and controller info */ 2097 memset(sc->pd_list, 0, 2098 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 2099 mrsas_get_pd_list(sc); 2100 2101 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); 2102 mrsas_get_ld_list(sc); 2103 2104 /* 2105 * Compute the max allowed sectors per IO: The controller info has 2106 * two limits on max sectors. Driver should use the minimum of these 2107 * two. 2108 * 2109 * 1 << stripe_sz_ops.min = max sectors per strip 2110 * 2111 * Note that older firmwares ( < FW ver 30) didn't report information to 2112 * calculate max_sectors_1. So the number ended up as zero always. 2113 */ 2114 tmp_sectors = 0; 2115 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) * 2116 sc->ctrl_info->max_strips_per_io; 2117 max_sectors_2 = sc->ctrl_info->max_request_size; 2118 tmp_sectors = min(max_sectors_1, max_sectors_2); 2119 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512; 2120 2121 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) 2122 sc->max_sectors_per_req = tmp_sectors; 2123 2124 sc->disableOnlineCtrlReset = 2125 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 2126 sc->UnevenSpanSupport = 2127 sc->ctrl_info->adapterOperations2.supportUnevenSpans; 2128 if (sc->UnevenSpanSupport) { 2129 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n", 2130 sc->UnevenSpanSupport); 2131 2132 if (MR_ValidateMapInfo(sc)) 2133 sc->fast_path_io = 1; 2134 else 2135 sc->fast_path_io = 0; 2136 } 2137 return (0); 2138 } 2139 2140 /* 2141 * mrsas_init_adapter: Initializes the adapter/controller 2142 * input: Adapter soft state 2143 * 2144 * Prepares for the issuing of the IOC Init cmd to FW for initializing the 2145 * ROC/controller. The FW register is read to determined the number of 2146 * commands that is supported. All memory allocations for IO is based on 2147 * max_cmd. Appropriate calculations are performed in this function. 2148 */ 2149 int 2150 mrsas_init_adapter(struct mrsas_softc *sc) 2151 { 2152 uint32_t status; 2153 u_int32_t max_cmd; 2154 int ret; 2155 int i = 0; 2156 2157 /* Read FW status register */ 2158 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2159 2160 /* Get operational params from status register */ 2161 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; 2162 2163 /* Decrement the max supported by 1, to correlate with FW */ 2164 sc->max_fw_cmds = sc->max_fw_cmds - 1; 2165 max_cmd = sc->max_fw_cmds; 2166 2167 /* Determine allocation size of command frames */ 2168 sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2; 2169 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd; 2170 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); 2171 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1)); 2172 sc->chain_frames_alloc_sz = 1024 * max_cmd; 2173 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2174 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16; 2175 2176 sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION); 2177 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; 2178 2179 /* Used for pass thru MFI frame (DCMD) */ 2180 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16; 2181 2182 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2183 sizeof(MPI2_SGE_IO_UNION)) / 16; 2184 2185 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2186 2187 for (i = 0; i < count; i++) 2188 sc->last_reply_idx[i] = 0; 2189 2190 ret = mrsas_alloc_mem(sc); 2191 if (ret != SUCCESS) 2192 return (ret); 2193 2194 ret = mrsas_alloc_mpt_cmds(sc); 2195 if (ret != SUCCESS) 2196 return (ret); 2197 2198 ret = mrsas_ioc_init(sc); 2199 if (ret != SUCCESS) 2200 return (ret); 2201 2202 return (0); 2203 } 2204 2205 /* 2206 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command 2207 * input: Adapter soft state 2208 * 2209 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. 2210 */ 2211 int 2212 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) 2213 { 2214 int ioc_init_size; 2215 2216 /* Allocate IOC INIT command */ 2217 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); 2218 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2219 1, 0, 2220 BUS_SPACE_MAXADDR_32BIT, 2221 BUS_SPACE_MAXADDR, 2222 NULL, NULL, 2223 ioc_init_size, 2224 1, 2225 ioc_init_size, 2226 BUS_DMA_ALLOCNOW, 2227 NULL, NULL, 2228 &sc->ioc_init_tag)) { 2229 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); 2230 return (ENOMEM); 2231 } 2232 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, 2233 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { 2234 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); 2235 return (ENOMEM); 2236 } 2237 bzero(sc->ioc_init_mem, ioc_init_size); 2238 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, 2239 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, 2240 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { 2241 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); 2242 return (ENOMEM); 2243 } 2244 return (0); 2245 } 2246 2247 /* 2248 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command 2249 * input: Adapter soft state 2250 * 2251 * Deallocates memory of the IOC Init cmd. 2252 */ 2253 void 2254 mrsas_free_ioc_cmd(struct mrsas_softc *sc) 2255 { 2256 if (sc->ioc_init_phys_mem) 2257 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); 2258 if (sc->ioc_init_mem != NULL) 2259 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); 2260 if (sc->ioc_init_tag != NULL) 2261 bus_dma_tag_destroy(sc->ioc_init_tag); 2262 } 2263 2264 /* 2265 * mrsas_ioc_init: Sends IOC Init command to FW 2266 * input: Adapter soft state 2267 * 2268 * Issues the IOC Init cmd to FW to initialize the ROC/controller. 2269 */ 2270 int 2271 mrsas_ioc_init(struct mrsas_softc *sc) 2272 { 2273 struct mrsas_init_frame *init_frame; 2274 pMpi2IOCInitRequest_t IOCInitMsg; 2275 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; 2276 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME; 2277 bus_addr_t phys_addr; 2278 int i, retcode = 0; 2279 2280 /* Allocate memory for the IOC INIT command */ 2281 if (mrsas_alloc_ioc_cmd(sc)) { 2282 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); 2283 return (1); 2284 } 2285 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024); 2286 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; 2287 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 2288 IOCInitMsg->MsgVersion = MPI2_VERSION; 2289 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION; 2290 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; 2291 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth; 2292 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr; 2293 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr; 2294 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0); 2295 2296 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; 2297 init_frame->cmd = MFI_CMD_INIT; 2298 init_frame->cmd_status = 0xFF; 2299 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2300 2301 /* driver support Extended MSIX */ 2302 if ((sc->device_id == MRSAS_INVADER) || 2303 (sc->device_id == MRSAS_FURY)) { 2304 init_frame->driver_operations. 2305 mfi_capabilities.support_additional_msix = 1; 2306 } 2307 if (sc->verbuf_mem) { 2308 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n", 2309 MRSAS_VERSION); 2310 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; 2311 init_frame->driver_ver_hi = 0; 2312 } 2313 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1; 2314 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1; 2315 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1; 2316 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; 2317 init_frame->queue_info_new_phys_addr_lo = phys_addr; 2318 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t); 2319 2320 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem; 2321 req_desc.MFAIo.RequestFlags = 2322 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2323 2324 mrsas_disable_intr(sc); 2325 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); 2326 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high); 2327 2328 /* 2329 * Poll response timer to wait for Firmware response. While this 2330 * timer with the DELAY call could block CPU, the time interval for 2331 * this is only 1 millisecond. 2332 */ 2333 if (init_frame->cmd_status == 0xFF) { 2334 for (i = 0; i < (max_wait * 1000); i++) { 2335 if (init_frame->cmd_status == 0xFF) 2336 DELAY(1000); 2337 else 2338 break; 2339 } 2340 } 2341 if (init_frame->cmd_status == 0) 2342 mrsas_dprint(sc, MRSAS_OCR, 2343 "IOC INIT response received from FW.\n"); 2344 else { 2345 if (init_frame->cmd_status == 0xFF) 2346 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); 2347 else 2348 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); 2349 retcode = 1; 2350 } 2351 2352 mrsas_free_ioc_cmd(sc); 2353 return (retcode); 2354 } 2355 2356 /* 2357 * mrsas_alloc_mpt_cmds: Allocates the command packets 2358 * input: Adapter instance soft state 2359 * 2360 * This function allocates the internal commands for IOs. Each command that is 2361 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An 2362 * array is allocated with mrsas_mpt_cmd context. The free commands are 2363 * maintained in a linked list (cmd pool). SMID value range is from 1 to 2364 * max_fw_cmds. 2365 */ 2366 int 2367 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) 2368 { 2369 int i, j; 2370 u_int32_t max_cmd, count; 2371 struct mrsas_mpt_cmd *cmd; 2372 pMpi2ReplyDescriptorsUnion_t reply_desc; 2373 u_int32_t offset, chain_offset, sense_offset; 2374 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; 2375 u_int8_t *io_req_base, *chain_frame_base, *sense_base; 2376 2377 max_cmd = sc->max_fw_cmds; 2378 2379 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); 2380 if (!sc->req_desc) { 2381 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); 2382 return (ENOMEM); 2383 } 2384 memset(sc->req_desc, 0, sc->request_alloc_sz); 2385 2386 /* 2387 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. 2388 * Allocate the dynamic array first and then allocate individual 2389 * commands. 2390 */ 2391 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT); 2392 if (!sc->mpt_cmd_list) { 2393 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); 2394 return (ENOMEM); 2395 } 2396 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd); 2397 for (i = 0; i < max_cmd; i++) { 2398 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd), 2399 M_MRSAS, M_NOWAIT); 2400 if (!sc->mpt_cmd_list[i]) { 2401 for (j = 0; j < i; j++) 2402 free(sc->mpt_cmd_list[j], M_MRSAS); 2403 free(sc->mpt_cmd_list, M_MRSAS); 2404 sc->mpt_cmd_list = NULL; 2405 return (ENOMEM); 2406 } 2407 } 2408 2409 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2410 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2411 chain_frame_base = (u_int8_t *)sc->chain_frame_mem; 2412 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; 2413 sense_base = (u_int8_t *)sc->sense_mem; 2414 sense_base_phys = (bus_addr_t)sc->sense_phys_addr; 2415 for (i = 0; i < max_cmd; i++) { 2416 cmd = sc->mpt_cmd_list[i]; 2417 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 2418 chain_offset = 1024 * i; 2419 sense_offset = MRSAS_SENSE_LEN * i; 2420 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); 2421 cmd->index = i + 1; 2422 cmd->ccb_ptr = NULL; 2423 callout_init(&cmd->cm_callout, 0); 2424 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 2425 cmd->sc = sc; 2426 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); 2427 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); 2428 cmd->io_request_phys_addr = io_req_base_phys + offset; 2429 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); 2430 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; 2431 cmd->sense = sense_base + sense_offset; 2432 cmd->sense_phys_addr = sense_base_phys + sense_offset; 2433 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { 2434 return (FAIL); 2435 } 2436 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 2437 } 2438 2439 /* Initialize reply descriptor array to 0xFFFFFFFF */ 2440 reply_desc = sc->reply_desc_mem; 2441 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2442 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) { 2443 reply_desc->Words = MRSAS_ULONG_MAX; 2444 } 2445 return (0); 2446 } 2447 2448 /* 2449 * mrsas_fire_cmd: Sends command to FW 2450 * input: Adapter softstate 2451 * request descriptor address low 2452 * request descriptor address high 2453 * 2454 * This functions fires the command to Firmware by writing to the 2455 * inbound_low_queue_port and inbound_high_queue_port. 2456 */ 2457 void 2458 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2459 u_int32_t req_desc_hi) 2460 { 2461 mtx_lock(&sc->pci_lock); 2462 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), 2463 req_desc_lo); 2464 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), 2465 req_desc_hi); 2466 mtx_unlock(&sc->pci_lock); 2467 } 2468 2469 /* 2470 * mrsas_transition_to_ready: Move FW to Ready state input: 2471 * Adapter instance soft state 2472 * 2473 * During the initialization, FW passes can potentially be in any one of several 2474 * possible states. If the FW in operational, waiting-for-handshake states, 2475 * driver must take steps to bring it to ready state. Otherwise, it has to 2476 * wait for the ready state. 2477 */ 2478 int 2479 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) 2480 { 2481 int i; 2482 u_int8_t max_wait; 2483 u_int32_t val, fw_state; 2484 u_int32_t cur_state; 2485 u_int32_t abs_state, curr_abs_state; 2486 2487 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2488 fw_state = val & MFI_STATE_MASK; 2489 max_wait = MRSAS_RESET_WAIT_TIME; 2490 2491 if (fw_state != MFI_STATE_READY) 2492 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); 2493 2494 while (fw_state != MFI_STATE_READY) { 2495 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2496 switch (fw_state) { 2497 case MFI_STATE_FAULT: 2498 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); 2499 if (ocr) { 2500 cur_state = MFI_STATE_FAULT; 2501 break; 2502 } else 2503 return -ENODEV; 2504 case MFI_STATE_WAIT_HANDSHAKE: 2505 /* Set the CLR bit in inbound doorbell */ 2506 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2507 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG); 2508 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2509 break; 2510 case MFI_STATE_BOOT_MESSAGE_PENDING: 2511 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2512 MFI_INIT_HOTPLUG); 2513 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2514 break; 2515 case MFI_STATE_OPERATIONAL: 2516 /* 2517 * Bring it to READY state; assuming max wait 10 2518 * secs 2519 */ 2520 mrsas_disable_intr(sc); 2521 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); 2522 for (i = 0; i < max_wait * 1000; i++) { 2523 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1) 2524 DELAY(1000); 2525 else 2526 break; 2527 } 2528 cur_state = MFI_STATE_OPERATIONAL; 2529 break; 2530 case MFI_STATE_UNDEFINED: 2531 /* 2532 * This state should not last for more than 2 2533 * seconds 2534 */ 2535 cur_state = MFI_STATE_UNDEFINED; 2536 break; 2537 case MFI_STATE_BB_INIT: 2538 cur_state = MFI_STATE_BB_INIT; 2539 break; 2540 case MFI_STATE_FW_INIT: 2541 cur_state = MFI_STATE_FW_INIT; 2542 break; 2543 case MFI_STATE_FW_INIT_2: 2544 cur_state = MFI_STATE_FW_INIT_2; 2545 break; 2546 case MFI_STATE_DEVICE_SCAN: 2547 cur_state = MFI_STATE_DEVICE_SCAN; 2548 break; 2549 case MFI_STATE_FLUSH_CACHE: 2550 cur_state = MFI_STATE_FLUSH_CACHE; 2551 break; 2552 default: 2553 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); 2554 return -ENODEV; 2555 } 2556 2557 /* 2558 * The cur_state should not last for more than max_wait secs 2559 */ 2560 for (i = 0; i < (max_wait * 1000); i++) { 2561 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2562 outbound_scratch_pad)) & MFI_STATE_MASK); 2563 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2564 outbound_scratch_pad)); 2565 if (abs_state == curr_abs_state) 2566 DELAY(1000); 2567 else 2568 break; 2569 } 2570 2571 /* 2572 * Return error if fw_state hasn't changed after max_wait 2573 */ 2574 if (curr_abs_state == abs_state) { 2575 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " 2576 "in %d secs\n", fw_state, max_wait); 2577 return -ENODEV; 2578 } 2579 } 2580 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); 2581 return 0; 2582 } 2583 2584 /* 2585 * mrsas_get_mfi_cmd: Get a cmd from free command pool 2586 * input: Adapter soft state 2587 * 2588 * This function removes an MFI command from the command list. 2589 */ 2590 struct mrsas_mfi_cmd * 2591 mrsas_get_mfi_cmd(struct mrsas_softc *sc) 2592 { 2593 struct mrsas_mfi_cmd *cmd = NULL; 2594 2595 mtx_lock(&sc->mfi_cmd_pool_lock); 2596 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) { 2597 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); 2598 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); 2599 } 2600 mtx_unlock(&sc->mfi_cmd_pool_lock); 2601 2602 return cmd; 2603 } 2604 2605 /* 2606 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter. 2607 * input: Adapter Context. 2608 * 2609 * This function will check FW status register and flag do_timeout_reset flag. 2610 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has 2611 * trigger reset. 2612 */ 2613 static void 2614 mrsas_ocr_thread(void *arg) 2615 { 2616 struct mrsas_softc *sc; 2617 u_int32_t fw_status, fw_state; 2618 2619 sc = (struct mrsas_softc *)arg; 2620 2621 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); 2622 2623 sc->ocr_thread_active = 1; 2624 mtx_lock(&sc->sim_lock); 2625 for (;;) { 2626 /* Sleep for 1 second and check the queue status */ 2627 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 2628 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); 2629 if (sc->remove_in_progress) { 2630 mrsas_dprint(sc, MRSAS_OCR, 2631 "Exit due to shutdown from %s\n", __func__); 2632 break; 2633 } 2634 fw_status = mrsas_read_reg(sc, 2635 offsetof(mrsas_reg_set, outbound_scratch_pad)); 2636 fw_state = fw_status & MFI_STATE_MASK; 2637 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) { 2638 device_printf(sc->mrsas_dev, "OCR started due to %s!\n", 2639 sc->do_timedout_reset ? "IO Timeout" : 2640 "FW fault detected"); 2641 mtx_lock_spin(&sc->ioctl_lock); 2642 sc->reset_in_progress = 1; 2643 sc->reset_count++; 2644 mtx_unlock_spin(&sc->ioctl_lock); 2645 mrsas_xpt_freeze(sc); 2646 mrsas_reset_ctrl(sc); 2647 mrsas_xpt_release(sc); 2648 sc->reset_in_progress = 0; 2649 sc->do_timedout_reset = 0; 2650 } 2651 } 2652 mtx_unlock(&sc->sim_lock); 2653 sc->ocr_thread_active = 0; 2654 mrsas_kproc_exit(0); 2655 } 2656 2657 /* 2658 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR. 2659 * input: Adapter Context. 2660 * 2661 * This function will clear reply descriptor so that post OCR driver and FW will 2662 * lost old history. 2663 */ 2664 void 2665 mrsas_reset_reply_desc(struct mrsas_softc *sc) 2666 { 2667 int i, count; 2668 pMpi2ReplyDescriptorsUnion_t reply_desc; 2669 2670 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2671 for (i = 0; i < count; i++) 2672 sc->last_reply_idx[i] = 0; 2673 2674 reply_desc = sc->reply_desc_mem; 2675 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { 2676 reply_desc->Words = MRSAS_ULONG_MAX; 2677 } 2678 } 2679 2680 /* 2681 * mrsas_reset_ctrl: Core function to OCR/Kill adapter. 2682 * input: Adapter Context. 2683 * 2684 * This function will run from thread context so that it can sleep. 1. Do not 2685 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command 2686 * to complete for 180 seconds. 3. If #2 does not find any outstanding 2687 * command Controller is in working state, so skip OCR. Otherwise, do 2688 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the 2689 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post 2690 * OCR, Re-fire Managment command and move Controller to Operation state. 2691 */ 2692 int 2693 mrsas_reset_ctrl(struct mrsas_softc *sc) 2694 { 2695 int retval = SUCCESS, i, j, retry = 0; 2696 u_int32_t host_diag, abs_state, status_reg, reset_adapter; 2697 union ccb *ccb; 2698 struct mrsas_mfi_cmd *mfi_cmd; 2699 struct mrsas_mpt_cmd *mpt_cmd; 2700 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2701 2702 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 2703 device_printf(sc->mrsas_dev, 2704 "mrsas: Hardware critical error, returning FAIL.\n"); 2705 return FAIL; 2706 } 2707 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2708 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; 2709 mrsas_disable_intr(sc); 2710 DELAY(1000 * 1000); 2711 2712 /* First try waiting for commands to complete */ 2713 if (mrsas_wait_for_outstanding(sc)) { 2714 mrsas_dprint(sc, MRSAS_OCR, 2715 "resetting adapter from %s.\n", 2716 __func__); 2717 /* Now return commands back to the CAM layer */ 2718 mtx_unlock(&sc->sim_lock); 2719 for (i = 0; i < sc->max_fw_cmds; i++) { 2720 mpt_cmd = sc->mpt_cmd_list[i]; 2721 if (mpt_cmd->ccb_ptr) { 2722 ccb = (union ccb *)(mpt_cmd->ccb_ptr); 2723 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 2724 mrsas_cmd_done(sc, mpt_cmd); 2725 mrsas_atomic_dec(&sc->fw_outstanding); 2726 } 2727 } 2728 mtx_lock(&sc->sim_lock); 2729 2730 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2731 outbound_scratch_pad)); 2732 abs_state = status_reg & MFI_STATE_MASK; 2733 reset_adapter = status_reg & MFI_RESET_ADAPTER; 2734 if (sc->disableOnlineCtrlReset || 2735 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 2736 /* Reset not supported, kill adapter */ 2737 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n"); 2738 mrsas_kill_hba(sc); 2739 retval = FAIL; 2740 goto out; 2741 } 2742 /* Now try to reset the chip */ 2743 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { 2744 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2745 MPI2_WRSEQ_FLUSH_KEY_VALUE); 2746 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2747 MPI2_WRSEQ_1ST_KEY_VALUE); 2748 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2749 MPI2_WRSEQ_2ND_KEY_VALUE); 2750 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2751 MPI2_WRSEQ_3RD_KEY_VALUE); 2752 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2753 MPI2_WRSEQ_4TH_KEY_VALUE); 2754 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2755 MPI2_WRSEQ_5TH_KEY_VALUE); 2756 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2757 MPI2_WRSEQ_6TH_KEY_VALUE); 2758 2759 /* Check that the diag write enable (DRWE) bit is on */ 2760 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2761 fusion_host_diag)); 2762 retry = 0; 2763 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 2764 DELAY(100 * 1000); 2765 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2766 fusion_host_diag)); 2767 if (retry++ == 100) { 2768 mrsas_dprint(sc, MRSAS_OCR, 2769 "Host diag unlock failed!\n"); 2770 break; 2771 } 2772 } 2773 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 2774 continue; 2775 2776 /* Send chip reset command */ 2777 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), 2778 host_diag | HOST_DIAG_RESET_ADAPTER); 2779 DELAY(3000 * 1000); 2780 2781 /* Make sure reset adapter bit is cleared */ 2782 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2783 fusion_host_diag)); 2784 retry = 0; 2785 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 2786 DELAY(100 * 1000); 2787 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2788 fusion_host_diag)); 2789 if (retry++ == 1000) { 2790 mrsas_dprint(sc, MRSAS_OCR, 2791 "Diag reset adapter never cleared!\n"); 2792 break; 2793 } 2794 } 2795 if (host_diag & HOST_DIAG_RESET_ADAPTER) 2796 continue; 2797 2798 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2799 outbound_scratch_pad)) & MFI_STATE_MASK; 2800 retry = 0; 2801 2802 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 2803 DELAY(100 * 1000); 2804 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2805 outbound_scratch_pad)) & MFI_STATE_MASK; 2806 } 2807 if (abs_state <= MFI_STATE_FW_INIT) { 2808 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," 2809 " state = 0x%x\n", abs_state); 2810 continue; 2811 } 2812 /* Wait for FW to become ready */ 2813 if (mrsas_transition_to_ready(sc, 1)) { 2814 mrsas_dprint(sc, MRSAS_OCR, 2815 "mrsas: Failed to transition controller to ready.\n"); 2816 continue; 2817 } 2818 mrsas_reset_reply_desc(sc); 2819 if (mrsas_ioc_init(sc)) { 2820 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); 2821 continue; 2822 } 2823 /* Re-fire management commands */ 2824 for (j = 0; j < sc->max_fw_cmds; j++) { 2825 mpt_cmd = sc->mpt_cmd_list[j]; 2826 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 2827 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; 2828 if (mfi_cmd->frame->dcmd.opcode == 2829 MR_DCMD_LD_MAP_GET_INFO) { 2830 mrsas_release_mfi_cmd(mfi_cmd); 2831 mrsas_release_mpt_cmd(mpt_cmd); 2832 } else { 2833 req_desc = mrsas_get_request_desc(sc, 2834 mfi_cmd->cmd_id.context.smid - 1); 2835 mrsas_dprint(sc, MRSAS_OCR, 2836 "Re-fire command DCMD opcode 0x%x index %d\n ", 2837 mfi_cmd->frame->dcmd.opcode, j); 2838 if (!req_desc) 2839 device_printf(sc->mrsas_dev, 2840 "Cannot build MPT cmd.\n"); 2841 else 2842 mrsas_fire_cmd(sc, req_desc->addr.u.low, 2843 req_desc->addr.u.high); 2844 } 2845 } 2846 } 2847 2848 /* Reset load balance info */ 2849 memset(sc->load_balance_info, 0, 2850 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); 2851 2852 if (mrsas_get_ctrl_info(sc)) { 2853 mrsas_kill_hba(sc); 2854 retval = FAIL; 2855 goto out; 2856 } 2857 if (!mrsas_get_map_info(sc)) 2858 mrsas_sync_map_info(sc); 2859 2860 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2861 mrsas_enable_intr(sc); 2862 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 2863 2864 /* Adapter reset completed successfully */ 2865 device_printf(sc->mrsas_dev, "Reset successful\n"); 2866 retval = SUCCESS; 2867 goto out; 2868 } 2869 /* Reset failed, kill the adapter */ 2870 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); 2871 mrsas_kill_hba(sc); 2872 retval = FAIL; 2873 } else { 2874 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2875 mrsas_enable_intr(sc); 2876 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 2877 } 2878 out: 2879 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2880 mrsas_dprint(sc, MRSAS_OCR, 2881 "Reset Exit with %d.\n", retval); 2882 return retval; 2883 } 2884 2885 /* 2886 * mrsas_kill_hba: Kill HBA when OCR is not supported 2887 * input: Adapter Context. 2888 * 2889 * This function will kill HBA when OCR is not supported. 2890 */ 2891 void 2892 mrsas_kill_hba(struct mrsas_softc *sc) 2893 { 2894 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; 2895 pause("mrsas_kill_hba", 1000); 2896 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); 2897 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2898 MFI_STOP_ADP); 2899 /* Flush */ 2900 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); 2901 mrsas_complete_outstanding_ioctls(sc); 2902 } 2903 2904 /** 2905 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba 2906 * input: Controller softc 2907 * 2908 * Returns void 2909 */ 2910 void 2911 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc) 2912 { 2913 int i; 2914 struct mrsas_mpt_cmd *cmd_mpt; 2915 struct mrsas_mfi_cmd *cmd_mfi; 2916 u_int32_t count, MSIxIndex; 2917 2918 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2919 for (i = 0; i < sc->max_fw_cmds; i++) { 2920 cmd_mpt = sc->mpt_cmd_list[i]; 2921 2922 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 2923 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 2924 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { 2925 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 2926 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, 2927 cmd_mpt->io_request->RaidContext.status); 2928 } 2929 } 2930 } 2931 } 2932 2933 /* 2934 * mrsas_wait_for_outstanding: Wait for outstanding commands 2935 * input: Adapter Context. 2936 * 2937 * This function will wait for 180 seconds for outstanding commands to be 2938 * completed. 2939 */ 2940 int 2941 mrsas_wait_for_outstanding(struct mrsas_softc *sc) 2942 { 2943 int i, outstanding, retval = 0; 2944 u_int32_t fw_state, count, MSIxIndex; 2945 2946 2947 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { 2948 if (sc->remove_in_progress) { 2949 mrsas_dprint(sc, MRSAS_OCR, 2950 "Driver remove or shutdown called.\n"); 2951 retval = 1; 2952 goto out; 2953 } 2954 /* Check if firmware is in fault state */ 2955 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2956 outbound_scratch_pad)) & MFI_STATE_MASK; 2957 if (fw_state == MFI_STATE_FAULT) { 2958 mrsas_dprint(sc, MRSAS_OCR, 2959 "Found FW in FAULT state, will reset adapter.\n"); 2960 retval = 1; 2961 goto out; 2962 } 2963 outstanding = mrsas_atomic_read(&sc->fw_outstanding); 2964 if (!outstanding) 2965 goto out; 2966 2967 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 2968 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " 2969 "commands to complete\n", i, outstanding); 2970 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2971 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 2972 mrsas_complete_cmd(sc, MSIxIndex); 2973 } 2974 DELAY(1000 * 1000); 2975 } 2976 2977 if (mrsas_atomic_read(&sc->fw_outstanding)) { 2978 mrsas_dprint(sc, MRSAS_OCR, 2979 " pending commands remain after waiting," 2980 " will reset adapter.\n"); 2981 retval = 1; 2982 } 2983 out: 2984 return retval; 2985 } 2986 2987 /* 2988 * mrsas_release_mfi_cmd: Return a cmd to free command pool 2989 * input: Command packet for return to free cmd pool 2990 * 2991 * This function returns the MFI command to the command list. 2992 */ 2993 void 2994 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd) 2995 { 2996 struct mrsas_softc *sc = cmd->sc; 2997 2998 mtx_lock(&sc->mfi_cmd_pool_lock); 2999 cmd->ccb_ptr = NULL; 3000 cmd->cmd_id.frame_count = 0; 3001 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next); 3002 mtx_unlock(&sc->mfi_cmd_pool_lock); 3003 3004 return; 3005 } 3006 3007 /* 3008 * mrsas_get_controller_info: Returns FW's controller structure 3009 * input: Adapter soft state 3010 * Controller information structure 3011 * 3012 * Issues an internal command (DCMD) to get the FW's controller structure. This 3013 * information is mainly used to find out the maximum IO transfer per command 3014 * supported by the FW. 3015 */ 3016 static int 3017 mrsas_get_ctrl_info(struct mrsas_softc *sc) 3018 { 3019 int retcode = 0; 3020 struct mrsas_mfi_cmd *cmd; 3021 struct mrsas_dcmd_frame *dcmd; 3022 3023 cmd = mrsas_get_mfi_cmd(sc); 3024 3025 if (!cmd) { 3026 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 3027 return -ENOMEM; 3028 } 3029 dcmd = &cmd->frame->dcmd; 3030 3031 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { 3032 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); 3033 mrsas_release_mfi_cmd(cmd); 3034 return -ENOMEM; 3035 } 3036 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3037 3038 dcmd->cmd = MFI_CMD_DCMD; 3039 dcmd->cmd_status = 0xFF; 3040 dcmd->sge_count = 1; 3041 dcmd->flags = MFI_FRAME_DIR_READ; 3042 dcmd->timeout = 0; 3043 dcmd->pad_0 = 0; 3044 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info); 3045 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 3046 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr; 3047 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info); 3048 3049 if (!mrsas_issue_polled(sc, cmd)) 3050 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); 3051 else 3052 retcode = 1; 3053 3054 mrsas_update_ext_vd_details(sc); 3055 3056 mrsas_free_ctlr_info_cmd(sc); 3057 mrsas_release_mfi_cmd(cmd); 3058 return (retcode); 3059 } 3060 3061 /* 3062 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD 3063 * input: 3064 * sc - Controller's softc 3065 */ 3066 static void 3067 mrsas_update_ext_vd_details(struct mrsas_softc *sc) 3068 { 3069 sc->max256vdSupport = 3070 sc->ctrl_info->adapterOperations3.supportMaxExtLDs; 3071 /* Below is additional check to address future FW enhancement */ 3072 if (sc->ctrl_info->max_lds > 64) 3073 sc->max256vdSupport = 1; 3074 3075 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS 3076 * MRSAS_MAX_DEV_PER_CHANNEL; 3077 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS 3078 * MRSAS_MAX_DEV_PER_CHANNEL; 3079 if (sc->max256vdSupport) { 3080 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 3081 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3082 } else { 3083 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 3084 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3085 } 3086 3087 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + 3088 (sizeof(MR_LD_SPAN_MAP) * 3089 (sc->fw_supported_vd_count - 1)); 3090 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); 3091 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) + 3092 (sizeof(MR_LD_SPAN_MAP) * 3093 (sc->drv_supported_vd_count - 1)); 3094 3095 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); 3096 3097 if (sc->max256vdSupport) 3098 sc->current_map_sz = sc->new_map_sz; 3099 else 3100 sc->current_map_sz = sc->old_map_sz; 3101 } 3102 3103 /* 3104 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command 3105 * input: Adapter soft state 3106 * 3107 * Allocates DMAable memory for the controller info internal command. 3108 */ 3109 int 3110 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) 3111 { 3112 int ctlr_info_size; 3113 3114 /* Allocate get controller info command */ 3115 ctlr_info_size = sizeof(struct mrsas_ctrl_info); 3116 if (bus_dma_tag_create(sc->mrsas_parent_tag, 3117 1, 0, 3118 BUS_SPACE_MAXADDR_32BIT, 3119 BUS_SPACE_MAXADDR, 3120 NULL, NULL, 3121 ctlr_info_size, 3122 1, 3123 ctlr_info_size, 3124 BUS_DMA_ALLOCNOW, 3125 NULL, NULL, 3126 &sc->ctlr_info_tag)) { 3127 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); 3128 return (ENOMEM); 3129 } 3130 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, 3131 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { 3132 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); 3133 return (ENOMEM); 3134 } 3135 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, 3136 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, 3137 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { 3138 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); 3139 return (ENOMEM); 3140 } 3141 memset(sc->ctlr_info_mem, 0, ctlr_info_size); 3142 return (0); 3143 } 3144 3145 /* 3146 * mrsas_free_ctlr_info_cmd: Free memory for controller info command 3147 * input: Adapter soft state 3148 * 3149 * Deallocates memory of the get controller info cmd. 3150 */ 3151 void 3152 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) 3153 { 3154 if (sc->ctlr_info_phys_addr) 3155 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); 3156 if (sc->ctlr_info_mem != NULL) 3157 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); 3158 if (sc->ctlr_info_tag != NULL) 3159 bus_dma_tag_destroy(sc->ctlr_info_tag); 3160 } 3161 3162 /* 3163 * mrsas_issue_polled: Issues a polling command 3164 * inputs: Adapter soft state 3165 * Command packet to be issued 3166 * 3167 * This function is for posting of internal commands to Firmware. MFI requires 3168 * the cmd_status to be set to 0xFF before posting. The maximun wait time of 3169 * the poll response timer is 180 seconds. 3170 */ 3171 int 3172 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3173 { 3174 struct mrsas_header *frame_hdr = &cmd->frame->hdr; 3175 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3176 int i, retcode = 0; 3177 3178 frame_hdr->cmd_status = 0xFF; 3179 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3180 3181 /* Issue the frame using inbound queue port */ 3182 if (mrsas_issue_dcmd(sc, cmd)) { 3183 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3184 return (1); 3185 } 3186 /* 3187 * Poll response timer to wait for Firmware response. While this 3188 * timer with the DELAY call could block CPU, the time interval for 3189 * this is only 1 millisecond. 3190 */ 3191 if (frame_hdr->cmd_status == 0xFF) { 3192 for (i = 0; i < (max_wait * 1000); i++) { 3193 if (frame_hdr->cmd_status == 0xFF) 3194 DELAY(1000); 3195 else 3196 break; 3197 } 3198 } 3199 if (frame_hdr->cmd_status != 0) { 3200 if (frame_hdr->cmd_status == 0xFF) 3201 device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait); 3202 else 3203 device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status); 3204 retcode = 1; 3205 } 3206 return (retcode); 3207 } 3208 3209 /* 3210 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd 3211 * input: Adapter soft state mfi cmd pointer 3212 * 3213 * This function is called by mrsas_issued_blocked_cmd() and 3214 * mrsas_issued_polled(), to build the MPT command and then fire the command 3215 * to Firmware. 3216 */ 3217 int 3218 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3219 { 3220 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3221 3222 req_desc = mrsas_build_mpt_cmd(sc, cmd); 3223 if (!req_desc) { 3224 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); 3225 return (1); 3226 } 3227 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 3228 3229 return (0); 3230 } 3231 3232 /* 3233 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd 3234 * input: Adapter soft state mfi cmd to build 3235 * 3236 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru 3237 * command and prepares the MPT command to send to Firmware. 3238 */ 3239 MRSAS_REQUEST_DESCRIPTOR_UNION * 3240 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3241 { 3242 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3243 u_int16_t index; 3244 3245 if (mrsas_build_mptmfi_passthru(sc, cmd)) { 3246 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); 3247 return NULL; 3248 } 3249 index = cmd->cmd_id.context.smid; 3250 3251 req_desc = mrsas_get_request_desc(sc, index - 1); 3252 if (!req_desc) 3253 return NULL; 3254 3255 req_desc->addr.Words = 0; 3256 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3257 3258 req_desc->SCSIIO.SMID = index; 3259 3260 return (req_desc); 3261 } 3262 3263 /* 3264 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command 3265 * input: Adapter soft state mfi cmd pointer 3266 * 3267 * The MPT command and the io_request are setup as a passthru command. The SGE 3268 * chain address is set to frame_phys_addr of the MFI command. 3269 */ 3270 u_int8_t 3271 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) 3272 { 3273 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 3274 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; 3275 struct mrsas_mpt_cmd *mpt_cmd; 3276 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; 3277 3278 mpt_cmd = mrsas_get_mpt_cmd(sc); 3279 if (!mpt_cmd) 3280 return (1); 3281 3282 /* Save the smid. To be used for returning the cmd */ 3283 mfi_cmd->cmd_id.context.smid = mpt_cmd->index; 3284 3285 mpt_cmd->sync_cmd_idx = mfi_cmd->index; 3286 3287 /* 3288 * For cmds where the flag is set, store the flag and check on 3289 * completion. For cmds with this flag, don't call 3290 * mrsas_complete_cmd. 3291 */ 3292 3293 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 3294 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3295 3296 io_req = mpt_cmd->io_request; 3297 3298 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { 3299 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL; 3300 3301 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 3302 sgl_ptr_end->Flags = 0; 3303 } 3304 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain; 3305 3306 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 3307 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; 3308 io_req->ChainOffset = sc->chain_offset_mfi_pthru; 3309 3310 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; 3311 3312 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3313 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 3314 3315 mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME; 3316 3317 return (0); 3318 } 3319 3320 /* 3321 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds 3322 * input: Adapter soft state Command to be issued 3323 * 3324 * This function waits on an event for the command to be returned from the ISR. 3325 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing 3326 * internal and ioctl commands. 3327 */ 3328 int 3329 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3330 { 3331 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3332 unsigned long total_time = 0; 3333 int retcode = 0; 3334 3335 /* Initialize cmd_status */ 3336 cmd->cmd_status = ECONNREFUSED; 3337 3338 /* Build MPT-MFI command for issue to FW */ 3339 if (mrsas_issue_dcmd(sc, cmd)) { 3340 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3341 return (1); 3342 } 3343 sc->chan = (void *)&cmd; 3344 3345 while (1) { 3346 if (cmd->cmd_status == ECONNREFUSED) { 3347 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 3348 } else 3349 break; 3350 total_time++; 3351 if (total_time >= max_wait) { 3352 device_printf(sc->mrsas_dev, 3353 "Internal command timed out after %d seconds.\n", max_wait); 3354 retcode = 1; 3355 break; 3356 } 3357 } 3358 return (retcode); 3359 } 3360 3361 /* 3362 * mrsas_complete_mptmfi_passthru: Completes a command 3363 * input: @sc: Adapter soft state 3364 * @cmd: Command to be completed 3365 * @status: cmd completion status 3366 * 3367 * This function is called from mrsas_complete_cmd() after an interrupt is 3368 * received from Firmware, and io_request->Function is 3369 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. 3370 */ 3371 void 3372 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, 3373 u_int8_t status) 3374 { 3375 struct mrsas_header *hdr = &cmd->frame->hdr; 3376 u_int8_t cmd_status = cmd->frame->hdr.cmd_status; 3377 3378 /* Reset the retry counter for future re-tries */ 3379 cmd->retry_for_fw_reset = 0; 3380 3381 if (cmd->ccb_ptr) 3382 cmd->ccb_ptr = NULL; 3383 3384 switch (hdr->cmd) { 3385 case MFI_CMD_INVALID: 3386 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); 3387 break; 3388 case MFI_CMD_PD_SCSI_IO: 3389 case MFI_CMD_LD_SCSI_IO: 3390 /* 3391 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3392 * issued either through an IO path or an IOCTL path. If it 3393 * was via IOCTL, we will send it to internal completion. 3394 */ 3395 if (cmd->sync_cmd) { 3396 cmd->sync_cmd = 0; 3397 mrsas_wakeup(sc, cmd); 3398 break; 3399 } 3400 case MFI_CMD_SMP: 3401 case MFI_CMD_STP: 3402 case MFI_CMD_DCMD: 3403 /* Check for LD map update */ 3404 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && 3405 (cmd->frame->dcmd.mbox.b[1] == 1)) { 3406 sc->fast_path_io = 0; 3407 mtx_lock(&sc->raidmap_lock); 3408 if (cmd_status != 0) { 3409 if (cmd_status != MFI_STAT_NOT_FOUND) 3410 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status); 3411 else { 3412 mrsas_release_mfi_cmd(cmd); 3413 mtx_unlock(&sc->raidmap_lock); 3414 break; 3415 } 3416 } else 3417 sc->map_id++; 3418 mrsas_release_mfi_cmd(cmd); 3419 if (MR_ValidateMapInfo(sc)) 3420 sc->fast_path_io = 0; 3421 else 3422 sc->fast_path_io = 1; 3423 mrsas_sync_map_info(sc); 3424 mtx_unlock(&sc->raidmap_lock); 3425 break; 3426 } 3427 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3428 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { 3429 sc->mrsas_aen_triggered = 0; 3430 } 3431 /* See if got an event notification */ 3432 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) 3433 mrsas_complete_aen(sc, cmd); 3434 else 3435 mrsas_wakeup(sc, cmd); 3436 break; 3437 case MFI_CMD_ABORT: 3438 /* Command issued to abort another cmd return */ 3439 mrsas_complete_abort(sc, cmd); 3440 break; 3441 default: 3442 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd); 3443 break; 3444 } 3445 } 3446 3447 /* 3448 * mrsas_wakeup: Completes an internal command 3449 * input: Adapter soft state 3450 * Command to be completed 3451 * 3452 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait 3453 * timer is started. This function is called from 3454 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up 3455 * from the command wait. 3456 */ 3457 void 3458 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3459 { 3460 cmd->cmd_status = cmd->frame->io.cmd_status; 3461 3462 if (cmd->cmd_status == ECONNREFUSED) 3463 cmd->cmd_status = 0; 3464 3465 sc->chan = (void *)&cmd; 3466 wakeup_one((void *)&sc->chan); 3467 return; 3468 } 3469 3470 /* 3471 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input: 3472 * Adapter soft state Shutdown/Hibernate 3473 * 3474 * This function issues a DCMD internal command to Firmware to initiate shutdown 3475 * of the controller. 3476 */ 3477 static void 3478 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) 3479 { 3480 struct mrsas_mfi_cmd *cmd; 3481 struct mrsas_dcmd_frame *dcmd; 3482 3483 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 3484 return; 3485 3486 cmd = mrsas_get_mfi_cmd(sc); 3487 if (!cmd) { 3488 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n"); 3489 return; 3490 } 3491 if (sc->aen_cmd) 3492 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); 3493 3494 if (sc->map_update_cmd) 3495 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); 3496 3497 dcmd = &cmd->frame->dcmd; 3498 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3499 3500 dcmd->cmd = MFI_CMD_DCMD; 3501 dcmd->cmd_status = 0x0; 3502 dcmd->sge_count = 0; 3503 dcmd->flags = MFI_FRAME_DIR_NONE; 3504 dcmd->timeout = 0; 3505 dcmd->pad_0 = 0; 3506 dcmd->data_xfer_len = 0; 3507 dcmd->opcode = opcode; 3508 3509 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n"); 3510 3511 mrsas_issue_blocked_cmd(sc, cmd); 3512 mrsas_release_mfi_cmd(cmd); 3513 3514 return; 3515 } 3516 3517 /* 3518 * mrsas_flush_cache: Requests FW to flush all its caches input: 3519 * Adapter soft state 3520 * 3521 * This function is issues a DCMD internal command to Firmware to initiate 3522 * flushing of all caches. 3523 */ 3524 static void 3525 mrsas_flush_cache(struct mrsas_softc *sc) 3526 { 3527 struct mrsas_mfi_cmd *cmd; 3528 struct mrsas_dcmd_frame *dcmd; 3529 3530 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 3531 return; 3532 3533 cmd = mrsas_get_mfi_cmd(sc); 3534 if (!cmd) { 3535 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n"); 3536 return; 3537 } 3538 dcmd = &cmd->frame->dcmd; 3539 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3540 3541 dcmd->cmd = MFI_CMD_DCMD; 3542 dcmd->cmd_status = 0x0; 3543 dcmd->sge_count = 0; 3544 dcmd->flags = MFI_FRAME_DIR_NONE; 3545 dcmd->timeout = 0; 3546 dcmd->pad_0 = 0; 3547 dcmd->data_xfer_len = 0; 3548 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 3549 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 3550 3551 mrsas_issue_blocked_cmd(sc, cmd); 3552 mrsas_release_mfi_cmd(cmd); 3553 3554 return; 3555 } 3556 3557 /* 3558 * mrsas_get_map_info: Load and validate RAID map input: 3559 * Adapter instance soft state 3560 * 3561 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load 3562 * and validate RAID map. It returns 0 if successful, 1 other- wise. 3563 */ 3564 static int 3565 mrsas_get_map_info(struct mrsas_softc *sc) 3566 { 3567 uint8_t retcode = 0; 3568 3569 sc->fast_path_io = 0; 3570 if (!mrsas_get_ld_map_info(sc)) { 3571 retcode = MR_ValidateMapInfo(sc); 3572 if (retcode == 0) { 3573 sc->fast_path_io = 1; 3574 return 0; 3575 } 3576 } 3577 return 1; 3578 } 3579 3580 /* 3581 * mrsas_get_ld_map_info: Get FW's ld_map structure input: 3582 * Adapter instance soft state 3583 * 3584 * Issues an internal command (DCMD) to get the FW's controller PD list 3585 * structure. 3586 */ 3587 static int 3588 mrsas_get_ld_map_info(struct mrsas_softc *sc) 3589 { 3590 int retcode = 0; 3591 struct mrsas_mfi_cmd *cmd; 3592 struct mrsas_dcmd_frame *dcmd; 3593 void *map; 3594 bus_addr_t map_phys_addr = 0; 3595 3596 cmd = mrsas_get_mfi_cmd(sc); 3597 if (!cmd) { 3598 device_printf(sc->mrsas_dev, 3599 "Cannot alloc for ld map info cmd.\n"); 3600 return 1; 3601 } 3602 dcmd = &cmd->frame->dcmd; 3603 3604 map = (void *)sc->raidmap_mem[(sc->map_id & 1)]; 3605 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; 3606 if (!map) { 3607 device_printf(sc->mrsas_dev, 3608 "Failed to alloc mem for ld map info.\n"); 3609 mrsas_release_mfi_cmd(cmd); 3610 return (ENOMEM); 3611 } 3612 memset(map, 0, sizeof(sc->max_map_sz)); 3613 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3614 3615 dcmd->cmd = MFI_CMD_DCMD; 3616 dcmd->cmd_status = 0xFF; 3617 dcmd->sge_count = 1; 3618 dcmd->flags = MFI_FRAME_DIR_READ; 3619 dcmd->timeout = 0; 3620 dcmd->pad_0 = 0; 3621 dcmd->data_xfer_len = sc->current_map_sz; 3622 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 3623 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 3624 dcmd->sgl.sge32[0].length = sc->current_map_sz; 3625 3626 if (!mrsas_issue_polled(sc, cmd)) 3627 retcode = 0; 3628 else { 3629 device_printf(sc->mrsas_dev, 3630 "Fail to send get LD map info cmd.\n"); 3631 retcode = 1; 3632 } 3633 mrsas_release_mfi_cmd(cmd); 3634 3635 return (retcode); 3636 } 3637 3638 /* 3639 * mrsas_sync_map_info: Get FW's ld_map structure input: 3640 * Adapter instance soft state 3641 * 3642 * Issues an internal command (DCMD) to get the FW's controller PD list 3643 * structure. 3644 */ 3645 static int 3646 mrsas_sync_map_info(struct mrsas_softc *sc) 3647 { 3648 int retcode = 0, i; 3649 struct mrsas_mfi_cmd *cmd; 3650 struct mrsas_dcmd_frame *dcmd; 3651 uint32_t size_sync_info, num_lds; 3652 MR_LD_TARGET_SYNC *target_map = NULL; 3653 MR_DRV_RAID_MAP_ALL *map; 3654 MR_LD_RAID *raid; 3655 MR_LD_TARGET_SYNC *ld_sync; 3656 bus_addr_t map_phys_addr = 0; 3657 3658 cmd = mrsas_get_mfi_cmd(sc); 3659 if (!cmd) { 3660 device_printf(sc->mrsas_dev, 3661 "Cannot alloc for sync map info cmd\n"); 3662 return 1; 3663 } 3664 map = sc->ld_drv_map[sc->map_id & 1]; 3665 num_lds = map->raidMap.ldCount; 3666 3667 dcmd = &cmd->frame->dcmd; 3668 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds; 3669 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3670 3671 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1]; 3672 memset(target_map, 0, sc->max_map_sz); 3673 3674 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; 3675 3676 ld_sync = (MR_LD_TARGET_SYNC *) target_map; 3677 3678 for (i = 0; i < num_lds; i++, ld_sync++) { 3679 raid = MR_LdRaidGet(i, map); 3680 ld_sync->targetId = MR_GetLDTgtId(i, map); 3681 ld_sync->seqNum = raid->seqNum; 3682 } 3683 3684 dcmd->cmd = MFI_CMD_DCMD; 3685 dcmd->cmd_status = 0xFF; 3686 dcmd->sge_count = 1; 3687 dcmd->flags = MFI_FRAME_DIR_WRITE; 3688 dcmd->timeout = 0; 3689 dcmd->pad_0 = 0; 3690 dcmd->data_xfer_len = sc->current_map_sz; 3691 dcmd->mbox.b[0] = num_lds; 3692 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; 3693 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 3694 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 3695 dcmd->sgl.sge32[0].length = sc->current_map_sz; 3696 3697 sc->map_update_cmd = cmd; 3698 if (mrsas_issue_dcmd(sc, cmd)) { 3699 device_printf(sc->mrsas_dev, 3700 "Fail to send sync map info command.\n"); 3701 return (1); 3702 } 3703 return (retcode); 3704 } 3705 3706 /* 3707 * mrsas_get_pd_list: Returns FW's PD list structure input: 3708 * Adapter soft state 3709 * 3710 * Issues an internal command (DCMD) to get the FW's controller PD list 3711 * structure. This information is mainly used to find out about system 3712 * supported by Firmware. 3713 */ 3714 static int 3715 mrsas_get_pd_list(struct mrsas_softc *sc) 3716 { 3717 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size; 3718 struct mrsas_mfi_cmd *cmd; 3719 struct mrsas_dcmd_frame *dcmd; 3720 struct MR_PD_LIST *pd_list_mem; 3721 struct MR_PD_ADDRESS *pd_addr; 3722 bus_addr_t pd_list_phys_addr = 0; 3723 struct mrsas_tmp_dcmd *tcmd; 3724 3725 cmd = mrsas_get_mfi_cmd(sc); 3726 if (!cmd) { 3727 device_printf(sc->mrsas_dev, 3728 "Cannot alloc for get PD list cmd\n"); 3729 return 1; 3730 } 3731 dcmd = &cmd->frame->dcmd; 3732 3733 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 3734 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 3735 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { 3736 device_printf(sc->mrsas_dev, 3737 "Cannot alloc dmamap for get PD list cmd\n"); 3738 mrsas_release_mfi_cmd(cmd); 3739 return (ENOMEM); 3740 } else { 3741 pd_list_mem = tcmd->tmp_dcmd_mem; 3742 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 3743 } 3744 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3745 3746 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 3747 dcmd->mbox.b[1] = 0; 3748 dcmd->cmd = MFI_CMD_DCMD; 3749 dcmd->cmd_status = 0xFF; 3750 dcmd->sge_count = 1; 3751 dcmd->flags = MFI_FRAME_DIR_READ; 3752 dcmd->timeout = 0; 3753 dcmd->pad_0 = 0; 3754 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 3755 dcmd->opcode = MR_DCMD_PD_LIST_QUERY; 3756 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr; 3757 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 3758 3759 if (!mrsas_issue_polled(sc, cmd)) 3760 retcode = 0; 3761 else 3762 retcode = 1; 3763 3764 /* Get the instance PD list */ 3765 pd_count = MRSAS_MAX_PD; 3766 pd_addr = pd_list_mem->addr; 3767 if (retcode == 0 && pd_list_mem->count < pd_count) { 3768 memset(sc->local_pd_list, 0, 3769 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 3770 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) { 3771 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; 3772 sc->local_pd_list[pd_addr->deviceId].driveType = 3773 pd_addr->scsiDevType; 3774 sc->local_pd_list[pd_addr->deviceId].driveState = 3775 MR_PD_STATE_SYSTEM; 3776 pd_addr++; 3777 } 3778 } 3779 /* 3780 * Use mutext/spinlock if pd_list component size increase more than 3781 * 32 bit. 3782 */ 3783 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); 3784 mrsas_free_tmp_dcmd(tcmd); 3785 mrsas_release_mfi_cmd(cmd); 3786 free(tcmd, M_MRSAS); 3787 return (retcode); 3788 } 3789 3790 /* 3791 * mrsas_get_ld_list: Returns FW's LD list structure input: 3792 * Adapter soft state 3793 * 3794 * Issues an internal command (DCMD) to get the FW's controller PD list 3795 * structure. This information is mainly used to find out about supported by 3796 * the FW. 3797 */ 3798 static int 3799 mrsas_get_ld_list(struct mrsas_softc *sc) 3800 { 3801 int ld_list_size, retcode = 0, ld_index = 0, ids = 0; 3802 struct mrsas_mfi_cmd *cmd; 3803 struct mrsas_dcmd_frame *dcmd; 3804 struct MR_LD_LIST *ld_list_mem; 3805 bus_addr_t ld_list_phys_addr = 0; 3806 struct mrsas_tmp_dcmd *tcmd; 3807 3808 cmd = mrsas_get_mfi_cmd(sc); 3809 if (!cmd) { 3810 device_printf(sc->mrsas_dev, 3811 "Cannot alloc for get LD list cmd\n"); 3812 return 1; 3813 } 3814 dcmd = &cmd->frame->dcmd; 3815 3816 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 3817 ld_list_size = sizeof(struct MR_LD_LIST); 3818 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { 3819 device_printf(sc->mrsas_dev, 3820 "Cannot alloc dmamap for get LD list cmd\n"); 3821 mrsas_release_mfi_cmd(cmd); 3822 return (ENOMEM); 3823 } else { 3824 ld_list_mem = tcmd->tmp_dcmd_mem; 3825 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 3826 } 3827 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3828 3829 if (sc->max256vdSupport) 3830 dcmd->mbox.b[0] = 1; 3831 3832 dcmd->cmd = MFI_CMD_DCMD; 3833 dcmd->cmd_status = 0xFF; 3834 dcmd->sge_count = 1; 3835 dcmd->flags = MFI_FRAME_DIR_READ; 3836 dcmd->timeout = 0; 3837 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); 3838 dcmd->opcode = MR_DCMD_LD_GET_LIST; 3839 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr; 3840 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); 3841 dcmd->pad_0 = 0; 3842 3843 if (!mrsas_issue_polled(sc, cmd)) 3844 retcode = 0; 3845 else 3846 retcode = 1; 3847 3848 #if VD_EXT_DEBUG 3849 printf("Number of LDs %d\n", ld_list_mem->ldCount); 3850 #endif 3851 3852 /* Get the instance LD list */ 3853 if ((retcode == 0) && 3854 (ld_list_mem->ldCount <= sc->fw_supported_vd_count)) { 3855 sc->CurLdCount = ld_list_mem->ldCount; 3856 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 3857 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) { 3858 if (ld_list_mem->ldList[ld_index].state != 0) { 3859 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 3860 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 3861 } 3862 } 3863 } 3864 mrsas_free_tmp_dcmd(tcmd); 3865 mrsas_release_mfi_cmd(cmd); 3866 free(tcmd, M_MRSAS); 3867 return (retcode); 3868 } 3869 3870 /* 3871 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input: 3872 * Adapter soft state Temp command Size of alloction 3873 * 3874 * Allocates DMAable memory for a temporary internal command. The allocated 3875 * memory is initialized to all zeros upon successful loading of the dma 3876 * mapped memory. 3877 */ 3878 int 3879 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, 3880 struct mrsas_tmp_dcmd *tcmd, int size) 3881 { 3882 if (bus_dma_tag_create(sc->mrsas_parent_tag, 3883 1, 0, 3884 BUS_SPACE_MAXADDR_32BIT, 3885 BUS_SPACE_MAXADDR, 3886 NULL, NULL, 3887 size, 3888 1, 3889 size, 3890 BUS_DMA_ALLOCNOW, 3891 NULL, NULL, 3892 &tcmd->tmp_dcmd_tag)) { 3893 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); 3894 return (ENOMEM); 3895 } 3896 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, 3897 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { 3898 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); 3899 return (ENOMEM); 3900 } 3901 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, 3902 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, 3903 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { 3904 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); 3905 return (ENOMEM); 3906 } 3907 memset(tcmd->tmp_dcmd_mem, 0, size); 3908 return (0); 3909 } 3910 3911 /* 3912 * mrsas_free_tmp_dcmd: Free memory for temporary command input: 3913 * temporary dcmd pointer 3914 * 3915 * Deallocates memory of the temporary command for use in the construction of 3916 * the internal DCMD. 3917 */ 3918 void 3919 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) 3920 { 3921 if (tmp->tmp_dcmd_phys_addr) 3922 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); 3923 if (tmp->tmp_dcmd_mem != NULL) 3924 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); 3925 if (tmp->tmp_dcmd_tag != NULL) 3926 bus_dma_tag_destroy(tmp->tmp_dcmd_tag); 3927 } 3928 3929 /* 3930 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input: 3931 * Adapter soft state Previously issued cmd to be aborted 3932 * 3933 * This function is used to abort previously issued commands, such as AEN and 3934 * RAID map sync map commands. The abort command is sent as a DCMD internal 3935 * command and subsequently the driver will wait for a return status. The 3936 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. 3937 */ 3938 static int 3939 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 3940 struct mrsas_mfi_cmd *cmd_to_abort) 3941 { 3942 struct mrsas_mfi_cmd *cmd; 3943 struct mrsas_abort_frame *abort_fr; 3944 u_int8_t retcode = 0; 3945 unsigned long total_time = 0; 3946 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3947 3948 cmd = mrsas_get_mfi_cmd(sc); 3949 if (!cmd) { 3950 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); 3951 return (1); 3952 } 3953 abort_fr = &cmd->frame->abort; 3954 3955 /* Prepare and issue the abort frame */ 3956 abort_fr->cmd = MFI_CMD_ABORT; 3957 abort_fr->cmd_status = 0xFF; 3958 abort_fr->flags = 0; 3959 abort_fr->abort_context = cmd_to_abort->index; 3960 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 3961 abort_fr->abort_mfi_phys_addr_hi = 0; 3962 3963 cmd->sync_cmd = 1; 3964 cmd->cmd_status = 0xFF; 3965 3966 if (mrsas_issue_dcmd(sc, cmd)) { 3967 device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); 3968 return (1); 3969 } 3970 /* Wait for this cmd to complete */ 3971 sc->chan = (void *)&cmd; 3972 while (1) { 3973 if (cmd->cmd_status == 0xFF) { 3974 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 3975 } else 3976 break; 3977 total_time++; 3978 if (total_time >= max_wait) { 3979 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); 3980 retcode = 1; 3981 break; 3982 } 3983 } 3984 3985 cmd->sync_cmd = 0; 3986 mrsas_release_mfi_cmd(cmd); 3987 return (retcode); 3988 } 3989 3990 /* 3991 * mrsas_complete_abort: Completes aborting a command input: 3992 * Adapter soft state Cmd that was issued to abort another cmd 3993 * 3994 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to 3995 * change after sending the command. This function is called from 3996 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. 3997 */ 3998 void 3999 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4000 { 4001 if (cmd->sync_cmd) { 4002 cmd->sync_cmd = 0; 4003 cmd->cmd_status = 0; 4004 sc->chan = (void *)&cmd; 4005 wakeup_one((void *)&sc->chan); 4006 } 4007 return; 4008 } 4009 4010 /* 4011 * mrsas_aen_handler: AEN processing callback function from thread context 4012 * input: Adapter soft state 4013 * 4014 * Asynchronous event handler 4015 */ 4016 void 4017 mrsas_aen_handler(struct mrsas_softc *sc) 4018 { 4019 union mrsas_evt_class_locale class_locale; 4020 int doscan = 0; 4021 u_int32_t seq_num; 4022 int error; 4023 4024 if (sc == NULL) { 4025 printf("invalid instance!\n"); 4026 return; 4027 } 4028 if (sc->evt_detail_mem) { 4029 switch (sc->evt_detail_mem->code) { 4030 case MR_EVT_PD_INSERTED: 4031 mrsas_get_pd_list(sc); 4032 mrsas_bus_scan_sim(sc, sc->sim_1); 4033 doscan = 0; 4034 break; 4035 case MR_EVT_PD_REMOVED: 4036 mrsas_get_pd_list(sc); 4037 mrsas_bus_scan_sim(sc, sc->sim_1); 4038 doscan = 0; 4039 break; 4040 case MR_EVT_LD_OFFLINE: 4041 case MR_EVT_CFG_CLEARED: 4042 case MR_EVT_LD_DELETED: 4043 mrsas_bus_scan_sim(sc, sc->sim_0); 4044 doscan = 0; 4045 break; 4046 case MR_EVT_LD_CREATED: 4047 mrsas_get_ld_list(sc); 4048 mrsas_bus_scan_sim(sc, sc->sim_0); 4049 doscan = 0; 4050 break; 4051 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 4052 case MR_EVT_FOREIGN_CFG_IMPORTED: 4053 case MR_EVT_LD_STATE_CHANGE: 4054 doscan = 1; 4055 break; 4056 default: 4057 doscan = 0; 4058 break; 4059 } 4060 } else { 4061 device_printf(sc->mrsas_dev, "invalid evt_detail\n"); 4062 return; 4063 } 4064 if (doscan) { 4065 mrsas_get_pd_list(sc); 4066 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); 4067 mrsas_bus_scan_sim(sc, sc->sim_1); 4068 mrsas_get_ld_list(sc); 4069 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); 4070 mrsas_bus_scan_sim(sc, sc->sim_0); 4071 } 4072 seq_num = sc->evt_detail_mem->seq_num + 1; 4073 4074 /* Register AEN with FW for latest sequence number plus 1 */ 4075 class_locale.members.reserved = 0; 4076 class_locale.members.locale = MR_EVT_LOCALE_ALL; 4077 class_locale.members.class = MR_EVT_CLASS_DEBUG; 4078 4079 if (sc->aen_cmd != NULL) 4080 return; 4081 4082 mtx_lock(&sc->aen_lock); 4083 error = mrsas_register_aen(sc, seq_num, 4084 class_locale.word); 4085 mtx_unlock(&sc->aen_lock); 4086 4087 if (error) 4088 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); 4089 4090 } 4091 4092 4093 /* 4094 * mrsas_complete_aen: Completes AEN command 4095 * input: Adapter soft state 4096 * Cmd that was issued to abort another cmd 4097 * 4098 * This function will be called from ISR and will continue event processing from 4099 * thread context by enqueuing task in ev_tq (callback function 4100 * "mrsas_aen_handler"). 4101 */ 4102 void 4103 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4104 { 4105 /* 4106 * Don't signal app if it is just an aborted previously registered 4107 * aen 4108 */ 4109 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { 4110 sc->mrsas_aen_triggered = 1; 4111 mtx_lock(&sc->aen_lock); 4112 if (sc->mrsas_poll_waiting) { 4113 sc->mrsas_poll_waiting = 0; 4114 selwakeup(&sc->mrsas_select); 4115 } 4116 mtx_unlock(&sc->aen_lock); 4117 } else 4118 cmd->abort_aen = 0; 4119 4120 sc->aen_cmd = NULL; 4121 mrsas_release_mfi_cmd(cmd); 4122 4123 if (!sc->remove_in_progress) 4124 taskqueue_enqueue(sc->ev_tq, &sc->ev_task); 4125 4126 return; 4127 } 4128 4129 static device_method_t mrsas_methods[] = { 4130 DEVMETHOD(device_probe, mrsas_probe), 4131 DEVMETHOD(device_attach, mrsas_attach), 4132 DEVMETHOD(device_detach, mrsas_detach), 4133 DEVMETHOD(device_suspend, mrsas_suspend), 4134 DEVMETHOD(device_resume, mrsas_resume), 4135 DEVMETHOD(bus_print_child, bus_generic_print_child), 4136 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 4137 {0, 0} 4138 }; 4139 4140 static driver_t mrsas_driver = { 4141 "mrsas", 4142 mrsas_methods, 4143 sizeof(struct mrsas_softc) 4144 }; 4145 4146 static devclass_t mrsas_devclass; 4147 4148 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0); 4149 MODULE_DEPEND(mrsas, cam, 1, 1, 1); 4150