1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <dev/mrsas/mrsas.h> 44 #include <dev/mrsas/mrsas_ioctl.h> 45 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 49 #include <sys/sysctl.h> 50 #include <sys/types.h> 51 #include <sys/sysent.h> 52 #include <sys/kthread.h> 53 #include <sys/taskqueue.h> 54 #include <sys/smp.h> 55 56 57 /* 58 * Function prototypes 59 */ 60 static d_open_t mrsas_open; 61 static d_close_t mrsas_close; 62 static d_read_t mrsas_read; 63 static d_write_t mrsas_write; 64 static d_ioctl_t mrsas_ioctl; 65 static d_poll_t mrsas_poll; 66 67 static void mrsas_ich_startup(void *arg); 68 static struct mrsas_mgmt_info mrsas_mgmt_info; 69 static struct mrsas_ident *mrsas_find_ident(device_t); 70 static int mrsas_setup_msix(struct mrsas_softc *sc); 71 static int mrsas_allocate_msix(struct mrsas_softc *sc); 72 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); 73 static void mrsas_flush_cache(struct mrsas_softc *sc); 74 static void mrsas_reset_reply_desc(struct mrsas_softc *sc); 75 static void mrsas_ocr_thread(void *arg); 76 static int mrsas_get_map_info(struct mrsas_softc *sc); 77 static int mrsas_get_ld_map_info(struct mrsas_softc *sc); 78 static int mrsas_sync_map_info(struct mrsas_softc *sc); 79 static int mrsas_get_pd_list(struct mrsas_softc *sc); 80 static int mrsas_get_ld_list(struct mrsas_softc *sc); 81 static int mrsas_setup_irq(struct mrsas_softc *sc); 82 static int mrsas_alloc_mem(struct mrsas_softc *sc); 83 static int mrsas_init_fw(struct mrsas_softc *sc); 84 static int mrsas_setup_raidmap(struct mrsas_softc *sc); 85 static void megasas_setup_jbod_map(struct mrsas_softc *sc); 86 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend); 87 static int mrsas_clear_intr(struct mrsas_softc *sc); 88 static int mrsas_get_ctrl_info(struct mrsas_softc *sc); 89 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc); 90 static int 91 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 92 struct mrsas_mfi_cmd *cmd_to_abort); 93 static struct mrsas_softc * 94 mrsas_get_softc_instance(struct cdev *dev, 95 u_long cmd, caddr_t arg); 96 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); 97 u_int8_t 98 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, 99 struct mrsas_mfi_cmd *mfi_cmd); 100 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc); 101 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); 102 int mrsas_init_adapter(struct mrsas_softc *sc); 103 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); 104 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); 105 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); 106 int mrsas_ioc_init(struct mrsas_softc *sc); 107 int mrsas_bus_scan(struct mrsas_softc *sc); 108 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 109 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 110 int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason); 111 int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason); 112 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); 113 int 114 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, 115 struct mrsas_mfi_cmd *cmd); 116 int 117 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, 118 int size); 119 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); 120 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 121 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 122 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 123 void mrsas_disable_intr(struct mrsas_softc *sc); 124 void mrsas_enable_intr(struct mrsas_softc *sc); 125 void mrsas_free_ioc_cmd(struct mrsas_softc *sc); 126 void mrsas_free_mem(struct mrsas_softc *sc); 127 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); 128 void mrsas_isr(void *arg); 129 void mrsas_teardown_intr(struct mrsas_softc *sc); 130 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 131 void mrsas_kill_hba(struct mrsas_softc *sc); 132 void mrsas_aen_handler(struct mrsas_softc *sc); 133 void 134 mrsas_write_reg(struct mrsas_softc *sc, int offset, 135 u_int32_t value); 136 void 137 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 138 u_int32_t req_desc_hi); 139 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); 140 void 141 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, 142 struct mrsas_mfi_cmd *cmd, u_int8_t status); 143 void 144 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, 145 u_int8_t extStatus); 146 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); 147 148 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd 149 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 150 151 extern int mrsas_cam_attach(struct mrsas_softc *sc); 152 extern void mrsas_cam_detach(struct mrsas_softc *sc); 153 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 154 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 155 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); 156 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); 157 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); 158 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); 159 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 160 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 161 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 162 extern void mrsas_xpt_freeze(struct mrsas_softc *sc); 163 extern void mrsas_xpt_release(struct mrsas_softc *sc); 164 extern MRSAS_REQUEST_DESCRIPTOR_UNION * 165 mrsas_get_request_desc(struct mrsas_softc *sc, 166 u_int16_t index); 167 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 168 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); 169 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); 170 171 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters"); 172 173 /* 174 * PCI device struct and table 175 * 176 */ 177 typedef struct mrsas_ident { 178 uint16_t vendor; 179 uint16_t device; 180 uint16_t subvendor; 181 uint16_t subdevice; 182 const char *desc; 183 } MRSAS_CTLR_ID; 184 185 MRSAS_CTLR_ID device_table[] = { 186 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"}, 187 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"}, 188 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"}, 189 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"}, 190 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"}, 191 {0, 0, 0, 0, NULL} 192 }; 193 194 /* 195 * Character device entry points 196 * 197 */ 198 static struct cdevsw mrsas_cdevsw = { 199 .d_version = D_VERSION, 200 .d_open = mrsas_open, 201 .d_close = mrsas_close, 202 .d_read = mrsas_read, 203 .d_write = mrsas_write, 204 .d_ioctl = mrsas_ioctl, 205 .d_poll = mrsas_poll, 206 .d_name = "mrsas", 207 }; 208 209 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); 210 211 /* 212 * In the cdevsw routines, we find our softc by using the si_drv1 member of 213 * struct cdev. We set this variable to point to our softc in our attach 214 * routine when we create the /dev entry. 215 */ 216 int 217 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 218 { 219 struct mrsas_softc *sc; 220 221 sc = dev->si_drv1; 222 return (0); 223 } 224 225 int 226 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 227 { 228 struct mrsas_softc *sc; 229 230 sc = dev->si_drv1; 231 return (0); 232 } 233 234 int 235 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag) 236 { 237 struct mrsas_softc *sc; 238 239 sc = dev->si_drv1; 240 return (0); 241 } 242 int 243 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag) 244 { 245 struct mrsas_softc *sc; 246 247 sc = dev->si_drv1; 248 return (0); 249 } 250 251 /* 252 * Register Read/Write Functions 253 * 254 */ 255 void 256 mrsas_write_reg(struct mrsas_softc *sc, int offset, 257 u_int32_t value) 258 { 259 bus_space_tag_t bus_tag = sc->bus_tag; 260 bus_space_handle_t bus_handle = sc->bus_handle; 261 262 bus_space_write_4(bus_tag, bus_handle, offset, value); 263 } 264 265 u_int32_t 266 mrsas_read_reg(struct mrsas_softc *sc, int offset) 267 { 268 bus_space_tag_t bus_tag = sc->bus_tag; 269 bus_space_handle_t bus_handle = sc->bus_handle; 270 271 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); 272 } 273 274 275 /* 276 * Interrupt Disable/Enable/Clear Functions 277 * 278 */ 279 void 280 mrsas_disable_intr(struct mrsas_softc *sc) 281 { 282 u_int32_t mask = 0xFFFFFFFF; 283 u_int32_t status; 284 285 sc->mask_interrupts = 1; 286 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); 287 /* Dummy read to force pci flush */ 288 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 289 } 290 291 void 292 mrsas_enable_intr(struct mrsas_softc *sc) 293 { 294 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; 295 u_int32_t status; 296 297 sc->mask_interrupts = 0; 298 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); 299 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 300 301 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); 302 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 303 } 304 305 static int 306 mrsas_clear_intr(struct mrsas_softc *sc) 307 { 308 u_int32_t status, fw_status, fw_state; 309 310 /* Read received interrupt */ 311 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 312 313 /* 314 * If FW state change interrupt is received, write to it again to 315 * clear 316 */ 317 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) { 318 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 319 outbound_scratch_pad)); 320 fw_state = fw_status & MFI_STATE_MASK; 321 if (fw_state == MFI_STATE_FAULT) { 322 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n"); 323 if (sc->ocr_thread_active) 324 wakeup(&sc->ocr_chan); 325 } 326 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status); 327 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 328 return (1); 329 } 330 /* Not our interrupt, so just return */ 331 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 332 return (0); 333 334 /* We got a reply interrupt */ 335 return (1); 336 } 337 338 /* 339 * PCI Support Functions 340 * 341 */ 342 static struct mrsas_ident * 343 mrsas_find_ident(device_t dev) 344 { 345 struct mrsas_ident *pci_device; 346 347 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) { 348 if ((pci_device->vendor == pci_get_vendor(dev)) && 349 (pci_device->device == pci_get_device(dev)) && 350 ((pci_device->subvendor == pci_get_subvendor(dev)) || 351 (pci_device->subvendor == 0xffff)) && 352 ((pci_device->subdevice == pci_get_subdevice(dev)) || 353 (pci_device->subdevice == 0xffff))) 354 return (pci_device); 355 } 356 return (NULL); 357 } 358 359 static int 360 mrsas_probe(device_t dev) 361 { 362 static u_int8_t first_ctrl = 1; 363 struct mrsas_ident *id; 364 365 if ((id = mrsas_find_ident(dev)) != NULL) { 366 if (first_ctrl) { 367 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n", 368 MRSAS_VERSION); 369 first_ctrl = 0; 370 } 371 device_set_desc(dev, id->desc); 372 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */ 373 return (-30); 374 } 375 return (ENXIO); 376 } 377 378 /* 379 * mrsas_setup_sysctl: setup sysctl values for mrsas 380 * input: Adapter instance soft state 381 * 382 * Setup sysctl entries for mrsas driver. 383 */ 384 static void 385 mrsas_setup_sysctl(struct mrsas_softc *sc) 386 { 387 struct sysctl_ctx_list *sysctl_ctx = NULL; 388 struct sysctl_oid *sysctl_tree = NULL; 389 char tmpstr[80], tmpstr2[80]; 390 391 /* 392 * Setup the sysctl variable so the user can change the debug level 393 * on the fly. 394 */ 395 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", 396 device_get_unit(sc->mrsas_dev)); 397 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev)); 398 399 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); 400 if (sysctl_ctx != NULL) 401 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); 402 403 if (sysctl_tree == NULL) { 404 sysctl_ctx_init(&sc->sysctl_ctx); 405 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 406 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2, 407 CTLFLAG_RD, 0, tmpstr); 408 if (sc->sysctl_tree == NULL) 409 return; 410 sysctl_ctx = &sc->sysctl_ctx; 411 sysctl_tree = sc->sysctl_tree; 412 } 413 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 414 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, 415 "Disable the use of OCR"); 416 417 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 418 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, 419 strlen(MRSAS_VERSION), "driver version"); 420 421 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 422 OID_AUTO, "reset_count", CTLFLAG_RD, 423 &sc->reset_count, 0, "number of ocr from start of the day"); 424 425 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 426 OID_AUTO, "fw_outstanding", CTLFLAG_RD, 427 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands"); 428 429 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 430 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 431 &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); 432 433 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 434 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, 435 "Driver debug level"); 436 437 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 438 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 439 0, "Driver IO timeout value in mili-second."); 440 441 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 442 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, 443 &sc->mrsas_fw_fault_check_delay, 444 0, "FW fault check thread delay in seconds. <default is 1 sec>"); 445 446 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 447 OID_AUTO, "reset_in_progress", CTLFLAG_RD, 448 &sc->reset_in_progress, 0, "ocr in progress status"); 449 450 } 451 452 /* 453 * mrsas_get_tunables: get tunable parameters. 454 * input: Adapter instance soft state 455 * 456 * Get tunable parameters. This will help to debug driver at boot time. 457 */ 458 static void 459 mrsas_get_tunables(struct mrsas_softc *sc) 460 { 461 char tmpstr[80]; 462 463 /* XXX default to some debugging for now */ 464 sc->mrsas_debug = MRSAS_FAULT; 465 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; 466 sc->mrsas_fw_fault_check_delay = 1; 467 sc->reset_count = 0; 468 sc->reset_in_progress = 0; 469 470 /* 471 * Grab the global variables. 472 */ 473 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); 474 475 /* 476 * Grab the global variables. 477 */ 478 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds); 479 480 /* Grab the unit-instance variables */ 481 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", 482 device_get_unit(sc->mrsas_dev)); 483 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); 484 } 485 486 /* 487 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. 488 * Used to get sequence number at driver load time. 489 * input: Adapter soft state 490 * 491 * Allocates DMAable memory for the event log info internal command. 492 */ 493 int 494 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) 495 { 496 int el_info_size; 497 498 /* Allocate get event log info command */ 499 el_info_size = sizeof(struct mrsas_evt_log_info); 500 if (bus_dma_tag_create(sc->mrsas_parent_tag, 501 1, 0, 502 BUS_SPACE_MAXADDR_32BIT, 503 BUS_SPACE_MAXADDR, 504 NULL, NULL, 505 el_info_size, 506 1, 507 el_info_size, 508 BUS_DMA_ALLOCNOW, 509 NULL, NULL, 510 &sc->el_info_tag)) { 511 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); 512 return (ENOMEM); 513 } 514 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, 515 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { 516 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); 517 return (ENOMEM); 518 } 519 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, 520 sc->el_info_mem, el_info_size, mrsas_addr_cb, 521 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { 522 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); 523 return (ENOMEM); 524 } 525 memset(sc->el_info_mem, 0, el_info_size); 526 return (0); 527 } 528 529 /* 530 * mrsas_free_evt_info_cmd: Free memory for Event log info command 531 * input: Adapter soft state 532 * 533 * Deallocates memory for the event log info internal command. 534 */ 535 void 536 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) 537 { 538 if (sc->el_info_phys_addr) 539 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); 540 if (sc->el_info_mem != NULL) 541 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); 542 if (sc->el_info_tag != NULL) 543 bus_dma_tag_destroy(sc->el_info_tag); 544 } 545 546 /* 547 * mrsas_get_seq_num: Get latest event sequence number 548 * @sc: Adapter soft state 549 * @eli: Firmware event log sequence number information. 550 * 551 * Firmware maintains a log of all events in a non-volatile area. 552 * Driver get the sequence number using DCMD 553 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. 554 */ 555 556 static int 557 mrsas_get_seq_num(struct mrsas_softc *sc, 558 struct mrsas_evt_log_info *eli) 559 { 560 struct mrsas_mfi_cmd *cmd; 561 struct mrsas_dcmd_frame *dcmd; 562 u_int8_t do_ocr = 1, retcode = 0; 563 564 cmd = mrsas_get_mfi_cmd(sc); 565 566 if (!cmd) { 567 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 568 return -ENOMEM; 569 } 570 dcmd = &cmd->frame->dcmd; 571 572 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { 573 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); 574 mrsas_release_mfi_cmd(cmd); 575 return -ENOMEM; 576 } 577 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 578 579 dcmd->cmd = MFI_CMD_DCMD; 580 dcmd->cmd_status = 0x0; 581 dcmd->sge_count = 1; 582 dcmd->flags = MFI_FRAME_DIR_READ; 583 dcmd->timeout = 0; 584 dcmd->pad_0 = 0; 585 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info); 586 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 587 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr; 588 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info); 589 590 retcode = mrsas_issue_blocked_cmd(sc, cmd); 591 if (retcode == ETIMEDOUT) 592 goto dcmd_timeout; 593 594 do_ocr = 0; 595 /* 596 * Copy the data back into callers buffer 597 */ 598 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); 599 mrsas_free_evt_log_info_cmd(sc); 600 601 dcmd_timeout: 602 if (do_ocr) 603 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 604 else 605 mrsas_release_mfi_cmd(cmd); 606 607 return retcode; 608 } 609 610 611 /* 612 * mrsas_register_aen: Register for asynchronous event notification 613 * @sc: Adapter soft state 614 * @seq_num: Starting sequence number 615 * @class_locale: Class of the event 616 * 617 * This function subscribes for events beyond the @seq_num 618 * and type @class_locale. 619 * 620 */ 621 static int 622 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, 623 u_int32_t class_locale_word) 624 { 625 int ret_val; 626 struct mrsas_mfi_cmd *cmd; 627 struct mrsas_dcmd_frame *dcmd; 628 union mrsas_evt_class_locale curr_aen; 629 union mrsas_evt_class_locale prev_aen; 630 631 /* 632 * If there an AEN pending already (aen_cmd), check if the 633 * class_locale of that pending AEN is inclusive of the new AEN 634 * request we currently have. If it is, then we don't have to do 635 * anything. In other words, whichever events the current AEN request 636 * is subscribing to, have already been subscribed to. If the old_cmd 637 * is _not_ inclusive, then we have to abort that command, form a 638 * class_locale that is superset of both old and current and re-issue 639 * to the FW 640 */ 641 642 curr_aen.word = class_locale_word; 643 644 if (sc->aen_cmd) { 645 646 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1]; 647 648 /* 649 * A class whose enum value is smaller is inclusive of all 650 * higher values. If a PROGRESS (= -1) was previously 651 * registered, then a new registration requests for higher 652 * classes need not be sent to FW. They are automatically 653 * included. Locale numbers don't have such hierarchy. They 654 * are bitmap values 655 */ 656 if ((prev_aen.members.class <= curr_aen.members.class) && 657 !((prev_aen.members.locale & curr_aen.members.locale) ^ 658 curr_aen.members.locale)) { 659 /* 660 * Previously issued event registration includes 661 * current request. Nothing to do. 662 */ 663 return 0; 664 } else { 665 curr_aen.members.locale |= prev_aen.members.locale; 666 667 if (prev_aen.members.class < curr_aen.members.class) 668 curr_aen.members.class = prev_aen.members.class; 669 670 sc->aen_cmd->abort_aen = 1; 671 ret_val = mrsas_issue_blocked_abort_cmd(sc, 672 sc->aen_cmd); 673 674 if (ret_val) { 675 printf("mrsas: Failed to abort " 676 "previous AEN command\n"); 677 return ret_val; 678 } 679 } 680 } 681 cmd = mrsas_get_mfi_cmd(sc); 682 683 if (!cmd) 684 return -ENOMEM; 685 686 dcmd = &cmd->frame->dcmd; 687 688 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); 689 690 /* 691 * Prepare DCMD for aen registration 692 */ 693 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 694 695 dcmd->cmd = MFI_CMD_DCMD; 696 dcmd->cmd_status = 0x0; 697 dcmd->sge_count = 1; 698 dcmd->flags = MFI_FRAME_DIR_READ; 699 dcmd->timeout = 0; 700 dcmd->pad_0 = 0; 701 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail); 702 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 703 dcmd->mbox.w[0] = seq_num; 704 sc->last_seq_num = seq_num; 705 dcmd->mbox.w[1] = curr_aen.word; 706 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr; 707 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail); 708 709 if (sc->aen_cmd != NULL) { 710 mrsas_release_mfi_cmd(cmd); 711 return 0; 712 } 713 /* 714 * Store reference to the cmd used to register for AEN. When an 715 * application wants us to register for AEN, we have to abort this 716 * cmd and re-register with a new EVENT LOCALE supplied by that app 717 */ 718 sc->aen_cmd = cmd; 719 720 /* 721 * Issue the aen registration frame 722 */ 723 if (mrsas_issue_dcmd(sc, cmd)) { 724 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); 725 return (1); 726 } 727 return 0; 728 } 729 730 /* 731 * mrsas_start_aen: Subscribes to AEN during driver load time 732 * @instance: Adapter soft state 733 */ 734 static int 735 mrsas_start_aen(struct mrsas_softc *sc) 736 { 737 struct mrsas_evt_log_info eli; 738 union mrsas_evt_class_locale class_locale; 739 740 741 /* Get the latest sequence number from FW */ 742 743 memset(&eli, 0, sizeof(eli)); 744 745 if (mrsas_get_seq_num(sc, &eli)) 746 return -1; 747 748 /* Register AEN with FW for latest sequence number plus 1 */ 749 class_locale.members.reserved = 0; 750 class_locale.members.locale = MR_EVT_LOCALE_ALL; 751 class_locale.members.class = MR_EVT_CLASS_DEBUG; 752 753 return mrsas_register_aen(sc, eli.newest_seq_num + 1, 754 class_locale.word); 755 756 } 757 758 /* 759 * mrsas_setup_msix: Allocate MSI-x vectors 760 * @sc: adapter soft state 761 */ 762 static int 763 mrsas_setup_msix(struct mrsas_softc *sc) 764 { 765 int i; 766 767 for (i = 0; i < sc->msix_vectors; i++) { 768 sc->irq_context[i].sc = sc; 769 sc->irq_context[i].MSIxIndex = i; 770 sc->irq_id[i] = i + 1; 771 sc->mrsas_irq[i] = bus_alloc_resource_any 772 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i] 773 ,RF_ACTIVE); 774 if (sc->mrsas_irq[i] == NULL) { 775 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n"); 776 goto irq_alloc_failed; 777 } 778 if (bus_setup_intr(sc->mrsas_dev, 779 sc->mrsas_irq[i], 780 INTR_MPSAFE | INTR_TYPE_CAM, 781 NULL, mrsas_isr, &sc->irq_context[i], 782 &sc->intr_handle[i])) { 783 device_printf(sc->mrsas_dev, 784 "Cannot set up MSI-x interrupt handler\n"); 785 goto irq_alloc_failed; 786 } 787 } 788 return SUCCESS; 789 790 irq_alloc_failed: 791 mrsas_teardown_intr(sc); 792 return (FAIL); 793 } 794 795 /* 796 * mrsas_allocate_msix: Setup MSI-x vectors 797 * @sc: adapter soft state 798 */ 799 static int 800 mrsas_allocate_msix(struct mrsas_softc *sc) 801 { 802 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) { 803 device_printf(sc->mrsas_dev, "Using MSI-X with %d number" 804 " of vectors\n", sc->msix_vectors); 805 } else { 806 device_printf(sc->mrsas_dev, "MSI-x setup failed\n"); 807 goto irq_alloc_failed; 808 } 809 return SUCCESS; 810 811 irq_alloc_failed: 812 mrsas_teardown_intr(sc); 813 return (FAIL); 814 } 815 816 /* 817 * mrsas_attach: PCI entry point 818 * input: pointer to device struct 819 * 820 * Performs setup of PCI and registers, initializes mutexes and linked lists, 821 * registers interrupts and CAM, and initializes the adapter/controller to 822 * its proper state. 823 */ 824 static int 825 mrsas_attach(device_t dev) 826 { 827 struct mrsas_softc *sc = device_get_softc(dev); 828 uint32_t cmd, bar, error; 829 830 memset(sc, 0, sizeof(struct mrsas_softc)); 831 832 /* Look up our softc and initialize its fields. */ 833 sc->mrsas_dev = dev; 834 sc->device_id = pci_get_device(dev); 835 836 mrsas_get_tunables(sc); 837 838 /* 839 * Set up PCI and registers 840 */ 841 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 842 if ((cmd & PCIM_CMD_PORTEN) == 0) { 843 return (ENXIO); 844 } 845 /* Force the busmaster enable bit on. */ 846 cmd |= PCIM_CMD_BUSMASTEREN; 847 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 848 849 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4); 850 851 sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */ 852 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 853 &(sc->reg_res_id), RF_ACTIVE)) 854 == NULL) { 855 device_printf(dev, "Cannot allocate PCI registers\n"); 856 goto attach_fail; 857 } 858 sc->bus_tag = rman_get_bustag(sc->reg_res); 859 sc->bus_handle = rman_get_bushandle(sc->reg_res); 860 861 /* Intialize mutexes */ 862 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF); 863 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF); 864 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF); 865 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF); 866 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN); 867 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF); 868 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF); 869 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF); 870 871 /* Intialize linked list */ 872 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); 873 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); 874 875 mrsas_atomic_set(&sc->fw_outstanding, 0); 876 877 sc->io_cmds_highwater = 0; 878 879 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 880 sc->UnevenSpanSupport = 0; 881 882 sc->msix_enable = 0; 883 884 /* Initialize Firmware */ 885 if (mrsas_init_fw(sc) != SUCCESS) { 886 goto attach_fail_fw; 887 } 888 /* Register mrsas to CAM layer */ 889 if ((mrsas_cam_attach(sc) != SUCCESS)) { 890 goto attach_fail_cam; 891 } 892 /* Register IRQs */ 893 if (mrsas_setup_irq(sc) != SUCCESS) { 894 goto attach_fail_irq; 895 } 896 error = mrsas_kproc_create(mrsas_ocr_thread, sc, 897 &sc->ocr_thread, 0, 0, "mrsas_ocr%d", 898 device_get_unit(sc->mrsas_dev)); 899 if (error) { 900 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error); 901 goto attach_fail_ocr_thread; 902 } 903 /* 904 * After FW initialization and OCR thread creation 905 * we will defer the cdev creation, AEN setup on ICH callback 906 */ 907 sc->mrsas_ich.ich_func = mrsas_ich_startup; 908 sc->mrsas_ich.ich_arg = sc; 909 if (config_intrhook_establish(&sc->mrsas_ich) != 0) { 910 device_printf(sc->mrsas_dev, "Config hook is already established\n"); 911 } 912 mrsas_setup_sysctl(sc); 913 return SUCCESS; 914 915 attach_fail_ocr_thread: 916 if (sc->ocr_thread_active) 917 wakeup(&sc->ocr_chan); 918 attach_fail_irq: 919 mrsas_teardown_intr(sc); 920 attach_fail_cam: 921 mrsas_cam_detach(sc); 922 attach_fail_fw: 923 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */ 924 if (sc->msix_enable == 1) 925 pci_release_msi(sc->mrsas_dev); 926 mrsas_free_mem(sc); 927 mtx_destroy(&sc->sim_lock); 928 mtx_destroy(&sc->aen_lock); 929 mtx_destroy(&sc->pci_lock); 930 mtx_destroy(&sc->io_lock); 931 mtx_destroy(&sc->ioctl_lock); 932 mtx_destroy(&sc->mpt_cmd_pool_lock); 933 mtx_destroy(&sc->mfi_cmd_pool_lock); 934 mtx_destroy(&sc->raidmap_lock); 935 attach_fail: 936 if (sc->reg_res) { 937 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, 938 sc->reg_res_id, sc->reg_res); 939 } 940 return (ENXIO); 941 } 942 943 /* 944 * Interrupt config hook 945 */ 946 static void 947 mrsas_ich_startup(void *arg) 948 { 949 struct mrsas_softc *sc = (struct mrsas_softc *)arg; 950 951 /* 952 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs 953 */ 954 sema_init(&sc->ioctl_count_sema, 955 MRSAS_MAX_MFI_CMDS - 5, 956 IOCTL_SEMA_DESCRIPTION); 957 958 /* Create a /dev entry for mrsas controller. */ 959 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT, 960 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", 961 device_get_unit(sc->mrsas_dev)); 962 963 if (device_get_unit(sc->mrsas_dev) == 0) { 964 make_dev_alias_p(MAKEDEV_CHECKNAME, 965 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev, 966 "megaraid_sas_ioctl_node"); 967 } 968 if (sc->mrsas_cdev) 969 sc->mrsas_cdev->si_drv1 = sc; 970 971 /* 972 * Add this controller to mrsas_mgmt_info structure so that it can be 973 * exported to management applications 974 */ 975 if (device_get_unit(sc->mrsas_dev) == 0) 976 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info)); 977 978 mrsas_mgmt_info.count++; 979 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc; 980 mrsas_mgmt_info.max_index++; 981 982 /* Enable Interrupts */ 983 mrsas_enable_intr(sc); 984 985 /* Initiate AEN (Asynchronous Event Notification) */ 986 if (mrsas_start_aen(sc)) { 987 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! " 988 "Further events from the controller will not be communicated.\n" 989 "Either there is some problem in the controller" 990 "or the controller does not support AEN.\n" 991 "Please contact to the SUPPORT TEAM if the problem persists\n"); 992 } 993 if (sc->mrsas_ich.ich_arg != NULL) { 994 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n"); 995 config_intrhook_disestablish(&sc->mrsas_ich); 996 sc->mrsas_ich.ich_arg = NULL; 997 } 998 } 999 1000 /* 1001 * mrsas_detach: De-allocates and teardown resources 1002 * input: pointer to device struct 1003 * 1004 * This function is the entry point for device disconnect and detach. 1005 * It performs memory de-allocations, shutdown of the controller and various 1006 * teardown and destroy resource functions. 1007 */ 1008 static int 1009 mrsas_detach(device_t dev) 1010 { 1011 struct mrsas_softc *sc; 1012 int i = 0; 1013 1014 sc = device_get_softc(dev); 1015 sc->remove_in_progress = 1; 1016 1017 /* Destroy the character device so no other IOCTL will be handled */ 1018 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev) 1019 destroy_dev(sc->mrsas_linux_emulator_cdev); 1020 destroy_dev(sc->mrsas_cdev); 1021 1022 /* 1023 * Take the instance off the instance array. Note that we will not 1024 * decrement the max_index. We let this array be sparse array 1025 */ 1026 for (i = 0; i < mrsas_mgmt_info.max_index; i++) { 1027 if (mrsas_mgmt_info.sc_ptr[i] == sc) { 1028 mrsas_mgmt_info.count--; 1029 mrsas_mgmt_info.sc_ptr[i] = NULL; 1030 break; 1031 } 1032 } 1033 1034 if (sc->ocr_thread_active) 1035 wakeup(&sc->ocr_chan); 1036 while (sc->reset_in_progress) { 1037 i++; 1038 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1039 mrsas_dprint(sc, MRSAS_INFO, 1040 "[%2d]waiting for OCR to be finished from %s\n", i, __func__); 1041 } 1042 pause("mr_shutdown", hz); 1043 } 1044 i = 0; 1045 while (sc->ocr_thread_active) { 1046 i++; 1047 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1048 mrsas_dprint(sc, MRSAS_INFO, 1049 "[%2d]waiting for " 1050 "mrsas_ocr thread to quit ocr %d\n", i, 1051 sc->ocr_thread_active); 1052 } 1053 pause("mr_shutdown", hz); 1054 } 1055 mrsas_flush_cache(sc); 1056 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); 1057 mrsas_disable_intr(sc); 1058 mrsas_cam_detach(sc); 1059 mrsas_teardown_intr(sc); 1060 mrsas_free_mem(sc); 1061 mtx_destroy(&sc->sim_lock); 1062 mtx_destroy(&sc->aen_lock); 1063 mtx_destroy(&sc->pci_lock); 1064 mtx_destroy(&sc->io_lock); 1065 mtx_destroy(&sc->ioctl_lock); 1066 mtx_destroy(&sc->mpt_cmd_pool_lock); 1067 mtx_destroy(&sc->mfi_cmd_pool_lock); 1068 mtx_destroy(&sc->raidmap_lock); 1069 1070 /* Wait for all the semaphores to be released */ 1071 while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5)) 1072 pause("mr_shutdown", hz); 1073 1074 /* Destroy the counting semaphore created for Ioctl */ 1075 sema_destroy(&sc->ioctl_count_sema); 1076 1077 if (sc->reg_res) { 1078 bus_release_resource(sc->mrsas_dev, 1079 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); 1080 } 1081 if (sc->sysctl_tree != NULL) 1082 sysctl_ctx_free(&sc->sysctl_ctx); 1083 1084 return (0); 1085 } 1086 1087 /* 1088 * mrsas_free_mem: Frees allocated memory 1089 * input: Adapter instance soft state 1090 * 1091 * This function is called from mrsas_detach() to free previously allocated 1092 * memory. 1093 */ 1094 void 1095 mrsas_free_mem(struct mrsas_softc *sc) 1096 { 1097 int i; 1098 u_int32_t max_cmd; 1099 struct mrsas_mfi_cmd *mfi_cmd; 1100 struct mrsas_mpt_cmd *mpt_cmd; 1101 1102 /* 1103 * Free RAID map memory 1104 */ 1105 for (i = 0; i < 2; i++) { 1106 if (sc->raidmap_phys_addr[i]) 1107 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); 1108 if (sc->raidmap_mem[i] != NULL) 1109 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); 1110 if (sc->raidmap_tag[i] != NULL) 1111 bus_dma_tag_destroy(sc->raidmap_tag[i]); 1112 1113 if (sc->ld_drv_map[i] != NULL) 1114 free(sc->ld_drv_map[i], M_MRSAS); 1115 } 1116 for (i = 0; i < 2; i++) { 1117 if (sc->jbodmap_phys_addr[i]) 1118 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]); 1119 if (sc->jbodmap_mem[i] != NULL) 1120 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]); 1121 if (sc->jbodmap_tag[i] != NULL) 1122 bus_dma_tag_destroy(sc->jbodmap_tag[i]); 1123 } 1124 /* 1125 * Free version buffer memory 1126 */ 1127 if (sc->verbuf_phys_addr) 1128 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); 1129 if (sc->verbuf_mem != NULL) 1130 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); 1131 if (sc->verbuf_tag != NULL) 1132 bus_dma_tag_destroy(sc->verbuf_tag); 1133 1134 1135 /* 1136 * Free sense buffer memory 1137 */ 1138 if (sc->sense_phys_addr) 1139 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); 1140 if (sc->sense_mem != NULL) 1141 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); 1142 if (sc->sense_tag != NULL) 1143 bus_dma_tag_destroy(sc->sense_tag); 1144 1145 /* 1146 * Free chain frame memory 1147 */ 1148 if (sc->chain_frame_phys_addr) 1149 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); 1150 if (sc->chain_frame_mem != NULL) 1151 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); 1152 if (sc->chain_frame_tag != NULL) 1153 bus_dma_tag_destroy(sc->chain_frame_tag); 1154 1155 /* 1156 * Free IO Request memory 1157 */ 1158 if (sc->io_request_phys_addr) 1159 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); 1160 if (sc->io_request_mem != NULL) 1161 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); 1162 if (sc->io_request_tag != NULL) 1163 bus_dma_tag_destroy(sc->io_request_tag); 1164 1165 /* 1166 * Free Reply Descriptor memory 1167 */ 1168 if (sc->reply_desc_phys_addr) 1169 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); 1170 if (sc->reply_desc_mem != NULL) 1171 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); 1172 if (sc->reply_desc_tag != NULL) 1173 bus_dma_tag_destroy(sc->reply_desc_tag); 1174 1175 /* 1176 * Free event detail memory 1177 */ 1178 if (sc->evt_detail_phys_addr) 1179 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); 1180 if (sc->evt_detail_mem != NULL) 1181 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); 1182 if (sc->evt_detail_tag != NULL) 1183 bus_dma_tag_destroy(sc->evt_detail_tag); 1184 1185 /* 1186 * Free MFI frames 1187 */ 1188 if (sc->mfi_cmd_list) { 1189 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1190 mfi_cmd = sc->mfi_cmd_list[i]; 1191 mrsas_free_frame(sc, mfi_cmd); 1192 } 1193 } 1194 if (sc->mficmd_frame_tag != NULL) 1195 bus_dma_tag_destroy(sc->mficmd_frame_tag); 1196 1197 /* 1198 * Free MPT internal command list 1199 */ 1200 max_cmd = sc->max_fw_cmds; 1201 if (sc->mpt_cmd_list) { 1202 for (i = 0; i < max_cmd; i++) { 1203 mpt_cmd = sc->mpt_cmd_list[i]; 1204 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); 1205 free(sc->mpt_cmd_list[i], M_MRSAS); 1206 } 1207 free(sc->mpt_cmd_list, M_MRSAS); 1208 sc->mpt_cmd_list = NULL; 1209 } 1210 /* 1211 * Free MFI internal command list 1212 */ 1213 1214 if (sc->mfi_cmd_list) { 1215 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1216 free(sc->mfi_cmd_list[i], M_MRSAS); 1217 } 1218 free(sc->mfi_cmd_list, M_MRSAS); 1219 sc->mfi_cmd_list = NULL; 1220 } 1221 /* 1222 * Free request descriptor memory 1223 */ 1224 free(sc->req_desc, M_MRSAS); 1225 sc->req_desc = NULL; 1226 1227 /* 1228 * Destroy parent tag 1229 */ 1230 if (sc->mrsas_parent_tag != NULL) 1231 bus_dma_tag_destroy(sc->mrsas_parent_tag); 1232 1233 /* 1234 * Free ctrl_info memory 1235 */ 1236 if (sc->ctrl_info != NULL) 1237 free(sc->ctrl_info, M_MRSAS); 1238 } 1239 1240 /* 1241 * mrsas_teardown_intr: Teardown interrupt 1242 * input: Adapter instance soft state 1243 * 1244 * This function is called from mrsas_detach() to teardown and release bus 1245 * interrupt resourse. 1246 */ 1247 void 1248 mrsas_teardown_intr(struct mrsas_softc *sc) 1249 { 1250 int i; 1251 1252 if (!sc->msix_enable) { 1253 if (sc->intr_handle[0]) 1254 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]); 1255 if (sc->mrsas_irq[0] != NULL) 1256 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1257 sc->irq_id[0], sc->mrsas_irq[0]); 1258 sc->intr_handle[0] = NULL; 1259 } else { 1260 for (i = 0; i < sc->msix_vectors; i++) { 1261 if (sc->intr_handle[i]) 1262 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i], 1263 sc->intr_handle[i]); 1264 1265 if (sc->mrsas_irq[i] != NULL) 1266 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1267 sc->irq_id[i], sc->mrsas_irq[i]); 1268 1269 sc->intr_handle[i] = NULL; 1270 } 1271 pci_release_msi(sc->mrsas_dev); 1272 } 1273 1274 } 1275 1276 /* 1277 * mrsas_suspend: Suspend entry point 1278 * input: Device struct pointer 1279 * 1280 * This function is the entry point for system suspend from the OS. 1281 */ 1282 static int 1283 mrsas_suspend(device_t dev) 1284 { 1285 /* This will be filled when the driver will have hibernation support */ 1286 return (0); 1287 } 1288 1289 /* 1290 * mrsas_resume: Resume entry point 1291 * input: Device struct pointer 1292 * 1293 * This function is the entry point for system resume from the OS. 1294 */ 1295 static int 1296 mrsas_resume(device_t dev) 1297 { 1298 /* This will be filled when the driver will have hibernation support */ 1299 return (0); 1300 } 1301 1302 /** 1303 * mrsas_get_softc_instance: Find softc instance based on cmd type 1304 * 1305 * This function will return softc instance based on cmd type. 1306 * In some case, application fire ioctl on required management instance and 1307 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those 1308 * case, else get the softc instance from host_no provided by application in 1309 * user data. 1310 */ 1311 1312 static struct mrsas_softc * 1313 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg) 1314 { 1315 struct mrsas_softc *sc = NULL; 1316 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; 1317 1318 if (cmd == MRSAS_IOC_GET_PCI_INFO) { 1319 sc = dev->si_drv1; 1320 } else { 1321 /* 1322 * get the Host number & the softc from data sent by the 1323 * Application 1324 */ 1325 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no]; 1326 if (sc == NULL) 1327 printf("There is no Controller number %d\n", 1328 user_ioc->host_no); 1329 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index) 1330 mrsas_dprint(sc, MRSAS_FAULT, 1331 "Invalid Controller number %d\n", user_ioc->host_no); 1332 } 1333 1334 return sc; 1335 } 1336 1337 /* 1338 * mrsas_ioctl: IOCtl commands entry point. 1339 * 1340 * This function is the entry point for IOCtls from the OS. It calls the 1341 * appropriate function for processing depending on the command received. 1342 */ 1343 static int 1344 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, 1345 struct thread *td) 1346 { 1347 struct mrsas_softc *sc; 1348 int ret = 0, i = 0; 1349 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo; 1350 1351 sc = mrsas_get_softc_instance(dev, cmd, arg); 1352 if (!sc) 1353 return ENOENT; 1354 1355 if (sc->remove_in_progress) { 1356 mrsas_dprint(sc, MRSAS_INFO, 1357 "Driver remove or shutdown called.\n"); 1358 return ENOENT; 1359 } 1360 mtx_lock_spin(&sc->ioctl_lock); 1361 if (!sc->reset_in_progress) { 1362 mtx_unlock_spin(&sc->ioctl_lock); 1363 goto do_ioctl; 1364 } 1365 mtx_unlock_spin(&sc->ioctl_lock); 1366 while (sc->reset_in_progress) { 1367 i++; 1368 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1369 mrsas_dprint(sc, MRSAS_INFO, 1370 "[%2d]waiting for OCR to be finished from %s\n", i, __func__); 1371 } 1372 pause("mr_ioctl", hz); 1373 } 1374 1375 do_ioctl: 1376 switch (cmd) { 1377 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64: 1378 #ifdef COMPAT_FREEBSD32 1379 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32: 1380 #endif 1381 /* 1382 * Decrement the Ioctl counting Semaphore before getting an 1383 * mfi command 1384 */ 1385 sema_wait(&sc->ioctl_count_sema); 1386 1387 ret = mrsas_passthru(sc, (void *)arg, cmd); 1388 1389 /* Increment the Ioctl counting semaphore value */ 1390 sema_post(&sc->ioctl_count_sema); 1391 1392 break; 1393 case MRSAS_IOC_SCAN_BUS: 1394 ret = mrsas_bus_scan(sc); 1395 break; 1396 1397 case MRSAS_IOC_GET_PCI_INFO: 1398 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg; 1399 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION)); 1400 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev); 1401 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev); 1402 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev); 1403 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev); 1404 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d," 1405 "pci device no: %d, pci function no: %d," 1406 "pci domain ID: %d\n", 1407 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber, 1408 pciDrvInfo->functionNumber, pciDrvInfo->domainID); 1409 ret = 0; 1410 break; 1411 1412 default: 1413 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd); 1414 ret = ENOENT; 1415 } 1416 1417 return (ret); 1418 } 1419 1420 /* 1421 * mrsas_poll: poll entry point for mrsas driver fd 1422 * 1423 * This function is the entry point for poll from the OS. It waits for some AEN 1424 * events to be triggered from the controller and notifies back. 1425 */ 1426 static int 1427 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td) 1428 { 1429 struct mrsas_softc *sc; 1430 int revents = 0; 1431 1432 sc = dev->si_drv1; 1433 1434 if (poll_events & (POLLIN | POLLRDNORM)) { 1435 if (sc->mrsas_aen_triggered) { 1436 revents |= poll_events & (POLLIN | POLLRDNORM); 1437 } 1438 } 1439 if (revents == 0) { 1440 if (poll_events & (POLLIN | POLLRDNORM)) { 1441 mtx_lock(&sc->aen_lock); 1442 sc->mrsas_poll_waiting = 1; 1443 selrecord(td, &sc->mrsas_select); 1444 mtx_unlock(&sc->aen_lock); 1445 } 1446 } 1447 return revents; 1448 } 1449 1450 /* 1451 * mrsas_setup_irq: Set up interrupt 1452 * input: Adapter instance soft state 1453 * 1454 * This function sets up interrupts as a bus resource, with flags indicating 1455 * resource permitting contemporaneous sharing and for resource to activate 1456 * atomically. 1457 */ 1458 static int 1459 mrsas_setup_irq(struct mrsas_softc *sc) 1460 { 1461 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS)) 1462 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n"); 1463 1464 else { 1465 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n"); 1466 sc->irq_context[0].sc = sc; 1467 sc->irq_context[0].MSIxIndex = 0; 1468 sc->irq_id[0] = 0; 1469 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev, 1470 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE); 1471 if (sc->mrsas_irq[0] == NULL) { 1472 device_printf(sc->mrsas_dev, "Cannot allocate legcay" 1473 "interrupt\n"); 1474 return (FAIL); 1475 } 1476 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0], 1477 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, 1478 &sc->irq_context[0], &sc->intr_handle[0])) { 1479 device_printf(sc->mrsas_dev, "Cannot set up legacy" 1480 "interrupt\n"); 1481 return (FAIL); 1482 } 1483 } 1484 return (0); 1485 } 1486 1487 /* 1488 * mrsas_isr: ISR entry point 1489 * input: argument pointer 1490 * 1491 * This function is the interrupt service routine entry point. There are two 1492 * types of interrupts, state change interrupt and response interrupt. If an 1493 * interrupt is not ours, we just return. 1494 */ 1495 void 1496 mrsas_isr(void *arg) 1497 { 1498 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg; 1499 struct mrsas_softc *sc = irq_context->sc; 1500 int status = 0; 1501 1502 if (sc->mask_interrupts) 1503 return; 1504 1505 if (!sc->msix_vectors) { 1506 status = mrsas_clear_intr(sc); 1507 if (!status) 1508 return; 1509 } 1510 /* If we are resetting, bail */ 1511 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { 1512 printf(" Entered into ISR when OCR is going active. \n"); 1513 mrsas_clear_intr(sc); 1514 return; 1515 } 1516 /* Process for reply request and clear response interrupt */ 1517 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS) 1518 mrsas_clear_intr(sc); 1519 1520 return; 1521 } 1522 1523 /* 1524 * mrsas_complete_cmd: Process reply request 1525 * input: Adapter instance soft state 1526 * 1527 * This function is called from mrsas_isr() to process reply request and clear 1528 * response interrupt. Processing of the reply request entails walking 1529 * through the reply descriptor array for the command request pended from 1530 * Firmware. We look at the Function field to determine the command type and 1531 * perform the appropriate action. Before we return, we clear the response 1532 * interrupt. 1533 */ 1534 int 1535 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex) 1536 { 1537 Mpi2ReplyDescriptorsUnion_t *desc; 1538 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 1539 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; 1540 struct mrsas_mpt_cmd *cmd_mpt; 1541 struct mrsas_mfi_cmd *cmd_mfi; 1542 u_int8_t reply_descript_type; 1543 u_int16_t smid, num_completed; 1544 u_int8_t status, extStatus; 1545 union desc_value desc_val; 1546 PLD_LOAD_BALANCE_INFO lbinfo; 1547 u_int32_t device_id; 1548 int threshold_reply_count = 0; 1549 1550 1551 /* If we have a hardware error, not need to continue */ 1552 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 1553 return (DONE); 1554 1555 desc = sc->reply_desc_mem; 1556 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)) 1557 + sc->last_reply_idx[MSIxIndex]; 1558 1559 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1560 1561 desc_val.word = desc->Words; 1562 num_completed = 0; 1563 1564 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1565 1566 /* Find our reply descriptor for the command and process */ 1567 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) { 1568 smid = reply_desc->SMID; 1569 cmd_mpt = sc->mpt_cmd_list[smid - 1]; 1570 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request; 1571 1572 status = scsi_io_req->RaidContext.status; 1573 extStatus = scsi_io_req->RaidContext.exStatus; 1574 1575 switch (scsi_io_req->Function) { 1576 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */ 1577 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; 1578 lbinfo = &sc->load_balance_info[device_id]; 1579 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { 1580 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]); 1581 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; 1582 } 1583 /* Fall thru and complete IO */ 1584 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: 1585 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus); 1586 mrsas_cmd_done(sc, cmd_mpt); 1587 scsi_io_req->RaidContext.status = 0; 1588 scsi_io_req->RaidContext.exStatus = 0; 1589 mrsas_atomic_dec(&sc->fw_outstanding); 1590 break; 1591 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */ 1592 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 1593 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); 1594 cmd_mpt->flags = 0; 1595 mrsas_release_mpt_cmd(cmd_mpt); 1596 break; 1597 } 1598 1599 sc->last_reply_idx[MSIxIndex]++; 1600 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth) 1601 sc->last_reply_idx[MSIxIndex] = 0; 1602 1603 desc->Words = ~((uint64_t)0x00); /* set it back to all 1604 * 0xFFFFFFFFs */ 1605 num_completed++; 1606 threshold_reply_count++; 1607 1608 /* Get the next reply descriptor */ 1609 if (!sc->last_reply_idx[MSIxIndex]) { 1610 desc = sc->reply_desc_mem; 1611 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)); 1612 } else 1613 desc++; 1614 1615 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1616 desc_val.word = desc->Words; 1617 1618 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1619 1620 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1621 break; 1622 1623 /* 1624 * Write to reply post index after completing threshold reply 1625 * count and still there are more replies in reply queue 1626 * pending to be completed. 1627 */ 1628 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 1629 if (sc->msix_enable) { 1630 if ((sc->device_id == MRSAS_INVADER) || 1631 (sc->device_id == MRSAS_FURY) || 1632 (sc->device_id == MRSAS_INTRUDER) || 1633 (sc->device_id == MRSAS_INTRUDER_24)) 1634 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1635 ((MSIxIndex & 0x7) << 24) | 1636 sc->last_reply_idx[MSIxIndex]); 1637 else 1638 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1639 sc->last_reply_idx[MSIxIndex]); 1640 } else 1641 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1642 reply_post_host_index), sc->last_reply_idx[0]); 1643 1644 threshold_reply_count = 0; 1645 } 1646 } 1647 1648 /* No match, just return */ 1649 if (num_completed == 0) 1650 return (DONE); 1651 1652 /* Clear response interrupt */ 1653 if (sc->msix_enable) { 1654 if ((sc->device_id == MRSAS_INVADER) || 1655 (sc->device_id == MRSAS_FURY) || 1656 (sc->device_id == MRSAS_INTRUDER) || 1657 (sc->device_id == MRSAS_INTRUDER_24)) { 1658 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1659 ((MSIxIndex & 0x7) << 24) | 1660 sc->last_reply_idx[MSIxIndex]); 1661 } else 1662 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1663 sc->last_reply_idx[MSIxIndex]); 1664 } else 1665 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1666 reply_post_host_index), sc->last_reply_idx[0]); 1667 1668 return (0); 1669 } 1670 1671 /* 1672 * mrsas_map_mpt_cmd_status: Allocate DMAable memory. 1673 * input: Adapter instance soft state 1674 * 1675 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. 1676 * It checks the command status and maps the appropriate CAM status for the 1677 * CCB. 1678 */ 1679 void 1680 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus) 1681 { 1682 struct mrsas_softc *sc = cmd->sc; 1683 u_int8_t *sense_data; 1684 1685 switch (status) { 1686 case MFI_STAT_OK: 1687 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP; 1688 break; 1689 case MFI_STAT_SCSI_IO_FAILED: 1690 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1691 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1692 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data; 1693 if (sense_data) { 1694 /* For now just copy 18 bytes back */ 1695 memcpy(sense_data, cmd->sense, 18); 1696 cmd->ccb_ptr->csio.sense_len = 18; 1697 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; 1698 } 1699 break; 1700 case MFI_STAT_LD_OFFLINE: 1701 case MFI_STAT_DEVICE_NOT_FOUND: 1702 if (cmd->ccb_ptr->ccb_h.target_lun) 1703 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; 1704 else 1705 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; 1706 break; 1707 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1708 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; 1709 break; 1710 default: 1711 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); 1712 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; 1713 cmd->ccb_ptr->csio.scsi_status = status; 1714 } 1715 return; 1716 } 1717 1718 /* 1719 * mrsas_alloc_mem: Allocate DMAable memory 1720 * input: Adapter instance soft state 1721 * 1722 * This function creates the parent DMA tag and allocates DMAable memory. DMA 1723 * tag describes constraints of DMA mapping. Memory allocated is mapped into 1724 * Kernel virtual address. Callback argument is physical memory address. 1725 */ 1726 static int 1727 mrsas_alloc_mem(struct mrsas_softc *sc) 1728 { 1729 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, 1730 chain_frame_size, evt_detail_size, count; 1731 1732 /* 1733 * Allocate parent DMA tag 1734 */ 1735 if (bus_dma_tag_create(NULL, /* parent */ 1736 1, /* alignment */ 1737 0, /* boundary */ 1738 BUS_SPACE_MAXADDR, /* lowaddr */ 1739 BUS_SPACE_MAXADDR, /* highaddr */ 1740 NULL, NULL, /* filter, filterarg */ 1741 MAXPHYS, /* maxsize */ 1742 sc->max_num_sge, /* nsegments */ 1743 MAXPHYS, /* maxsegsize */ 1744 0, /* flags */ 1745 NULL, NULL, /* lockfunc, lockarg */ 1746 &sc->mrsas_parent_tag /* tag */ 1747 )) { 1748 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); 1749 return (ENOMEM); 1750 } 1751 /* 1752 * Allocate for version buffer 1753 */ 1754 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t)); 1755 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1756 1, 0, 1757 BUS_SPACE_MAXADDR_32BIT, 1758 BUS_SPACE_MAXADDR, 1759 NULL, NULL, 1760 verbuf_size, 1761 1, 1762 verbuf_size, 1763 BUS_DMA_ALLOCNOW, 1764 NULL, NULL, 1765 &sc->verbuf_tag)) { 1766 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); 1767 return (ENOMEM); 1768 } 1769 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, 1770 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { 1771 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); 1772 return (ENOMEM); 1773 } 1774 bzero(sc->verbuf_mem, verbuf_size); 1775 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, 1776 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, 1777 BUS_DMA_NOWAIT)) { 1778 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); 1779 return (ENOMEM); 1780 } 1781 /* 1782 * Allocate IO Request Frames 1783 */ 1784 io_req_size = sc->io_frames_alloc_sz; 1785 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1786 16, 0, 1787 BUS_SPACE_MAXADDR_32BIT, 1788 BUS_SPACE_MAXADDR, 1789 NULL, NULL, 1790 io_req_size, 1791 1, 1792 io_req_size, 1793 BUS_DMA_ALLOCNOW, 1794 NULL, NULL, 1795 &sc->io_request_tag)) { 1796 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); 1797 return (ENOMEM); 1798 } 1799 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, 1800 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { 1801 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); 1802 return (ENOMEM); 1803 } 1804 bzero(sc->io_request_mem, io_req_size); 1805 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, 1806 sc->io_request_mem, io_req_size, mrsas_addr_cb, 1807 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { 1808 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); 1809 return (ENOMEM); 1810 } 1811 /* 1812 * Allocate Chain Frames 1813 */ 1814 chain_frame_size = sc->chain_frames_alloc_sz; 1815 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1816 4, 0, 1817 BUS_SPACE_MAXADDR_32BIT, 1818 BUS_SPACE_MAXADDR, 1819 NULL, NULL, 1820 chain_frame_size, 1821 1, 1822 chain_frame_size, 1823 BUS_DMA_ALLOCNOW, 1824 NULL, NULL, 1825 &sc->chain_frame_tag)) { 1826 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); 1827 return (ENOMEM); 1828 } 1829 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, 1830 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { 1831 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); 1832 return (ENOMEM); 1833 } 1834 bzero(sc->chain_frame_mem, chain_frame_size); 1835 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, 1836 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, 1837 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { 1838 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); 1839 return (ENOMEM); 1840 } 1841 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 1842 /* 1843 * Allocate Reply Descriptor Array 1844 */ 1845 reply_desc_size = sc->reply_alloc_sz * count; 1846 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1847 16, 0, 1848 BUS_SPACE_MAXADDR_32BIT, 1849 BUS_SPACE_MAXADDR, 1850 NULL, NULL, 1851 reply_desc_size, 1852 1, 1853 reply_desc_size, 1854 BUS_DMA_ALLOCNOW, 1855 NULL, NULL, 1856 &sc->reply_desc_tag)) { 1857 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); 1858 return (ENOMEM); 1859 } 1860 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, 1861 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { 1862 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); 1863 return (ENOMEM); 1864 } 1865 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, 1866 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, 1867 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { 1868 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); 1869 return (ENOMEM); 1870 } 1871 /* 1872 * Allocate Sense Buffer Array. Keep in lower 4GB 1873 */ 1874 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; 1875 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1876 64, 0, 1877 BUS_SPACE_MAXADDR_32BIT, 1878 BUS_SPACE_MAXADDR, 1879 NULL, NULL, 1880 sense_size, 1881 1, 1882 sense_size, 1883 BUS_DMA_ALLOCNOW, 1884 NULL, NULL, 1885 &sc->sense_tag)) { 1886 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); 1887 return (ENOMEM); 1888 } 1889 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, 1890 BUS_DMA_NOWAIT, &sc->sense_dmamap)) { 1891 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); 1892 return (ENOMEM); 1893 } 1894 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, 1895 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, 1896 BUS_DMA_NOWAIT)) { 1897 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); 1898 return (ENOMEM); 1899 } 1900 /* 1901 * Allocate for Event detail structure 1902 */ 1903 evt_detail_size = sizeof(struct mrsas_evt_detail); 1904 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1905 1, 0, 1906 BUS_SPACE_MAXADDR_32BIT, 1907 BUS_SPACE_MAXADDR, 1908 NULL, NULL, 1909 evt_detail_size, 1910 1, 1911 evt_detail_size, 1912 BUS_DMA_ALLOCNOW, 1913 NULL, NULL, 1914 &sc->evt_detail_tag)) { 1915 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); 1916 return (ENOMEM); 1917 } 1918 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, 1919 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { 1920 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); 1921 return (ENOMEM); 1922 } 1923 bzero(sc->evt_detail_mem, evt_detail_size); 1924 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, 1925 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, 1926 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { 1927 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); 1928 return (ENOMEM); 1929 } 1930 /* 1931 * Create a dma tag for data buffers; size will be the maximum 1932 * possible I/O size (280kB). 1933 */ 1934 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1935 1, 1936 0, 1937 BUS_SPACE_MAXADDR, 1938 BUS_SPACE_MAXADDR, 1939 NULL, NULL, 1940 MAXPHYS, 1941 sc->max_num_sge, /* nsegments */ 1942 MAXPHYS, 1943 BUS_DMA_ALLOCNOW, 1944 busdma_lock_mutex, 1945 &sc->io_lock, 1946 &sc->data_tag)) { 1947 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); 1948 return (ENOMEM); 1949 } 1950 return (0); 1951 } 1952 1953 /* 1954 * mrsas_addr_cb: Callback function of bus_dmamap_load() 1955 * input: callback argument, machine dependent type 1956 * that describes DMA segments, number of segments, error code 1957 * 1958 * This function is for the driver to receive mapping information resultant of 1959 * the bus_dmamap_load(). The information is actually not being used, but the 1960 * address is saved anyway. 1961 */ 1962 void 1963 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1964 { 1965 bus_addr_t *addr; 1966 1967 addr = arg; 1968 *addr = segs[0].ds_addr; 1969 } 1970 1971 /* 1972 * mrsas_setup_raidmap: Set up RAID map. 1973 * input: Adapter instance soft state 1974 * 1975 * Allocate DMA memory for the RAID maps and perform setup. 1976 */ 1977 static int 1978 mrsas_setup_raidmap(struct mrsas_softc *sc) 1979 { 1980 int i; 1981 1982 for (i = 0; i < 2; i++) { 1983 sc->ld_drv_map[i] = 1984 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT); 1985 /* Do Error handling */ 1986 if (!sc->ld_drv_map[i]) { 1987 device_printf(sc->mrsas_dev, "Could not allocate memory for local map"); 1988 1989 if (i == 1) 1990 free(sc->ld_drv_map[0], M_MRSAS); 1991 /* ABORT driver initialization */ 1992 goto ABORT; 1993 } 1994 } 1995 1996 for (int i = 0; i < 2; i++) { 1997 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1998 4, 0, 1999 BUS_SPACE_MAXADDR_32BIT, 2000 BUS_SPACE_MAXADDR, 2001 NULL, NULL, 2002 sc->max_map_sz, 2003 1, 2004 sc->max_map_sz, 2005 BUS_DMA_ALLOCNOW, 2006 NULL, NULL, 2007 &sc->raidmap_tag[i])) { 2008 device_printf(sc->mrsas_dev, 2009 "Cannot allocate raid map tag.\n"); 2010 return (ENOMEM); 2011 } 2012 if (bus_dmamem_alloc(sc->raidmap_tag[i], 2013 (void **)&sc->raidmap_mem[i], 2014 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { 2015 device_printf(sc->mrsas_dev, 2016 "Cannot allocate raidmap memory.\n"); 2017 return (ENOMEM); 2018 } 2019 bzero(sc->raidmap_mem[i], sc->max_map_sz); 2020 2021 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], 2022 sc->raidmap_mem[i], sc->max_map_sz, 2023 mrsas_addr_cb, &sc->raidmap_phys_addr[i], 2024 BUS_DMA_NOWAIT)) { 2025 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); 2026 return (ENOMEM); 2027 } 2028 if (!sc->raidmap_mem[i]) { 2029 device_printf(sc->mrsas_dev, 2030 "Cannot allocate memory for raid map.\n"); 2031 return (ENOMEM); 2032 } 2033 } 2034 2035 if (!mrsas_get_map_info(sc)) 2036 mrsas_sync_map_info(sc); 2037 2038 return (0); 2039 2040 ABORT: 2041 return (1); 2042 } 2043 2044 /** 2045 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 2046 * @sc: Adapter soft state 2047 * 2048 * Return 0 on success. 2049 */ 2050 void 2051 megasas_setup_jbod_map(struct mrsas_softc *sc) 2052 { 2053 int i; 2054 uint32_t pd_seq_map_sz; 2055 2056 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 2057 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 2058 2059 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) { 2060 sc->use_seqnum_jbod_fp = 0; 2061 return; 2062 } 2063 if (sc->jbodmap_mem[0]) 2064 goto skip_alloc; 2065 2066 for (i = 0; i < 2; i++) { 2067 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2068 4, 0, 2069 BUS_SPACE_MAXADDR_32BIT, 2070 BUS_SPACE_MAXADDR, 2071 NULL, NULL, 2072 pd_seq_map_sz, 2073 1, 2074 pd_seq_map_sz, 2075 BUS_DMA_ALLOCNOW, 2076 NULL, NULL, 2077 &sc->jbodmap_tag[i])) { 2078 device_printf(sc->mrsas_dev, 2079 "Cannot allocate jbod map tag.\n"); 2080 return; 2081 } 2082 if (bus_dmamem_alloc(sc->jbodmap_tag[i], 2083 (void **)&sc->jbodmap_mem[i], 2084 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) { 2085 device_printf(sc->mrsas_dev, 2086 "Cannot allocate jbod map memory.\n"); 2087 return; 2088 } 2089 bzero(sc->jbodmap_mem[i], pd_seq_map_sz); 2090 2091 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i], 2092 sc->jbodmap_mem[i], pd_seq_map_sz, 2093 mrsas_addr_cb, &sc->jbodmap_phys_addr[i], 2094 BUS_DMA_NOWAIT)) { 2095 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n"); 2096 return; 2097 } 2098 if (!sc->jbodmap_mem[i]) { 2099 device_printf(sc->mrsas_dev, 2100 "Cannot allocate memory for jbod map.\n"); 2101 sc->use_seqnum_jbod_fp = 0; 2102 return; 2103 } 2104 } 2105 2106 skip_alloc: 2107 if (!megasas_sync_pd_seq_num(sc, false) && 2108 !megasas_sync_pd_seq_num(sc, true)) 2109 sc->use_seqnum_jbod_fp = 1; 2110 else 2111 sc->use_seqnum_jbod_fp = 0; 2112 2113 device_printf(sc->mrsas_dev, "Jbod map is supported\n"); 2114 } 2115 2116 /* 2117 * mrsas_init_fw: Initialize Firmware 2118 * input: Adapter soft state 2119 * 2120 * Calls transition_to_ready() to make sure Firmware is in operational state and 2121 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It 2122 * issues internal commands to get the controller info after the IOC_INIT 2123 * command response is received by Firmware. Note: code relating to 2124 * get_pdlist, get_ld_list and max_sectors are currently not being used, it 2125 * is left here as placeholder. 2126 */ 2127 static int 2128 mrsas_init_fw(struct mrsas_softc *sc) 2129 { 2130 2131 int ret, loop, ocr = 0; 2132 u_int32_t max_sectors_1; 2133 u_int32_t max_sectors_2; 2134 u_int32_t tmp_sectors; 2135 u_int32_t scratch_pad_2; 2136 int msix_enable = 0; 2137 int fw_msix_count = 0; 2138 2139 /* Make sure Firmware is ready */ 2140 ret = mrsas_transition_to_ready(sc, ocr); 2141 if (ret != SUCCESS) { 2142 return (ret); 2143 } 2144 /* MSI-x index 0- reply post host index register */ 2145 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET; 2146 /* Check if MSI-X is supported while in ready state */ 2147 msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a; 2148 2149 if (msix_enable) { 2150 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2151 outbound_scratch_pad_2)); 2152 2153 /* Check max MSI-X vectors */ 2154 if (sc->device_id == MRSAS_TBOLT) { 2155 sc->msix_vectors = (scratch_pad_2 2156 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 2157 fw_msix_count = sc->msix_vectors; 2158 } else { 2159 /* Invader/Fury supports 96 MSI-X vectors */ 2160 sc->msix_vectors = ((scratch_pad_2 2161 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 2162 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 2163 fw_msix_count = sc->msix_vectors; 2164 2165 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; 2166 loop++) { 2167 sc->msix_reg_offset[loop] = 2168 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + 2169 (loop * 0x10); 2170 } 2171 } 2172 2173 /* Don't bother allocating more MSI-X vectors than cpus */ 2174 sc->msix_vectors = min(sc->msix_vectors, 2175 mp_ncpus); 2176 2177 /* Allocate MSI-x vectors */ 2178 if (mrsas_allocate_msix(sc) == SUCCESS) 2179 sc->msix_enable = 1; 2180 else 2181 sc->msix_enable = 0; 2182 2183 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector," 2184 "Online CPU %d Current MSIX <%d>\n", 2185 fw_msix_count, mp_ncpus, sc->msix_vectors); 2186 } 2187 if (mrsas_init_adapter(sc) != SUCCESS) { 2188 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); 2189 return (1); 2190 } 2191 /* Allocate internal commands for pass-thru */ 2192 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) { 2193 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); 2194 return (1); 2195 } 2196 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); 2197 if (!sc->ctrl_info) { 2198 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n"); 2199 return (1); 2200 } 2201 /* 2202 * Get the controller info from FW, so that the MAX VD support 2203 * availability can be decided. 2204 */ 2205 if (mrsas_get_ctrl_info(sc)) { 2206 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n"); 2207 return (1); 2208 } 2209 sc->secure_jbod_support = 2210 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD; 2211 2212 if (sc->secure_jbod_support) 2213 device_printf(sc->mrsas_dev, "FW supports SED \n"); 2214 2215 if (sc->use_seqnum_jbod_fp) 2216 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n"); 2217 2218 if (mrsas_setup_raidmap(sc) != SUCCESS) { 2219 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! " 2220 "There seems to be some problem in the controller\n" 2221 "Please contact to the SUPPORT TEAM if the problem persists\n"); 2222 } 2223 megasas_setup_jbod_map(sc); 2224 2225 /* For pass-thru, get PD/LD list and controller info */ 2226 memset(sc->pd_list, 0, 2227 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 2228 if (mrsas_get_pd_list(sc) != SUCCESS) { 2229 device_printf(sc->mrsas_dev, "Get PD list failed.\n"); 2230 return (1); 2231 } 2232 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); 2233 if (mrsas_get_ld_list(sc) != SUCCESS) { 2234 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n"); 2235 return (1); 2236 } 2237 /* 2238 * Compute the max allowed sectors per IO: The controller info has 2239 * two limits on max sectors. Driver should use the minimum of these 2240 * two. 2241 * 2242 * 1 << stripe_sz_ops.min = max sectors per strip 2243 * 2244 * Note that older firmwares ( < FW ver 30) didn't report information to 2245 * calculate max_sectors_1. So the number ended up as zero always. 2246 */ 2247 tmp_sectors = 0; 2248 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) * 2249 sc->ctrl_info->max_strips_per_io; 2250 max_sectors_2 = sc->ctrl_info->max_request_size; 2251 tmp_sectors = min(max_sectors_1, max_sectors_2); 2252 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512; 2253 2254 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) 2255 sc->max_sectors_per_req = tmp_sectors; 2256 2257 sc->disableOnlineCtrlReset = 2258 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 2259 sc->UnevenSpanSupport = 2260 sc->ctrl_info->adapterOperations2.supportUnevenSpans; 2261 if (sc->UnevenSpanSupport) { 2262 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n", 2263 sc->UnevenSpanSupport); 2264 2265 if (MR_ValidateMapInfo(sc)) 2266 sc->fast_path_io = 1; 2267 else 2268 sc->fast_path_io = 0; 2269 } 2270 return (0); 2271 } 2272 2273 /* 2274 * mrsas_init_adapter: Initializes the adapter/controller 2275 * input: Adapter soft state 2276 * 2277 * Prepares for the issuing of the IOC Init cmd to FW for initializing the 2278 * ROC/controller. The FW register is read to determined the number of 2279 * commands that is supported. All memory allocations for IO is based on 2280 * max_cmd. Appropriate calculations are performed in this function. 2281 */ 2282 int 2283 mrsas_init_adapter(struct mrsas_softc *sc) 2284 { 2285 uint32_t status; 2286 u_int32_t max_cmd, scratch_pad_2; 2287 int ret; 2288 int i = 0; 2289 2290 /* Read FW status register */ 2291 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2292 2293 /* Get operational params from status register */ 2294 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; 2295 2296 /* Decrement the max supported by 1, to correlate with FW */ 2297 sc->max_fw_cmds = sc->max_fw_cmds - 1; 2298 max_cmd = sc->max_fw_cmds; 2299 2300 /* Determine allocation size of command frames */ 2301 sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2; 2302 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd; 2303 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); 2304 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1)); 2305 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2306 outbound_scratch_pad_2)); 2307 /* 2308 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, 2309 * Firmware support extended IO chain frame which is 4 time more 2310 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) = 2311 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K 2312 */ 2313 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) 2314 sc->max_chain_frame_sz = 2315 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) 2316 * MEGASAS_1MB_IO; 2317 else 2318 sc->max_chain_frame_sz = 2319 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) 2320 * MEGASAS_256K_IO; 2321 2322 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd; 2323 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2324 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16; 2325 2326 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION); 2327 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; 2328 2329 mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n", 2330 sc->max_num_sge, sc->max_chain_frame_sz); 2331 2332 /* Used for pass thru MFI frame (DCMD) */ 2333 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16; 2334 2335 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2336 sizeof(MPI2_SGE_IO_UNION)) / 16; 2337 2338 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2339 2340 for (i = 0; i < count; i++) 2341 sc->last_reply_idx[i] = 0; 2342 2343 ret = mrsas_alloc_mem(sc); 2344 if (ret != SUCCESS) 2345 return (ret); 2346 2347 ret = mrsas_alloc_mpt_cmds(sc); 2348 if (ret != SUCCESS) 2349 return (ret); 2350 2351 ret = mrsas_ioc_init(sc); 2352 if (ret != SUCCESS) 2353 return (ret); 2354 2355 return (0); 2356 } 2357 2358 /* 2359 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command 2360 * input: Adapter soft state 2361 * 2362 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. 2363 */ 2364 int 2365 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) 2366 { 2367 int ioc_init_size; 2368 2369 /* Allocate IOC INIT command */ 2370 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); 2371 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2372 1, 0, 2373 BUS_SPACE_MAXADDR_32BIT, 2374 BUS_SPACE_MAXADDR, 2375 NULL, NULL, 2376 ioc_init_size, 2377 1, 2378 ioc_init_size, 2379 BUS_DMA_ALLOCNOW, 2380 NULL, NULL, 2381 &sc->ioc_init_tag)) { 2382 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); 2383 return (ENOMEM); 2384 } 2385 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, 2386 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { 2387 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); 2388 return (ENOMEM); 2389 } 2390 bzero(sc->ioc_init_mem, ioc_init_size); 2391 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, 2392 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, 2393 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { 2394 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); 2395 return (ENOMEM); 2396 } 2397 return (0); 2398 } 2399 2400 /* 2401 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command 2402 * input: Adapter soft state 2403 * 2404 * Deallocates memory of the IOC Init cmd. 2405 */ 2406 void 2407 mrsas_free_ioc_cmd(struct mrsas_softc *sc) 2408 { 2409 if (sc->ioc_init_phys_mem) 2410 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); 2411 if (sc->ioc_init_mem != NULL) 2412 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); 2413 if (sc->ioc_init_tag != NULL) 2414 bus_dma_tag_destroy(sc->ioc_init_tag); 2415 } 2416 2417 /* 2418 * mrsas_ioc_init: Sends IOC Init command to FW 2419 * input: Adapter soft state 2420 * 2421 * Issues the IOC Init cmd to FW to initialize the ROC/controller. 2422 */ 2423 int 2424 mrsas_ioc_init(struct mrsas_softc *sc) 2425 { 2426 struct mrsas_init_frame *init_frame; 2427 pMpi2IOCInitRequest_t IOCInitMsg; 2428 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; 2429 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME; 2430 bus_addr_t phys_addr; 2431 int i, retcode = 0; 2432 2433 /* Allocate memory for the IOC INIT command */ 2434 if (mrsas_alloc_ioc_cmd(sc)) { 2435 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); 2436 return (1); 2437 } 2438 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024); 2439 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; 2440 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 2441 IOCInitMsg->MsgVersion = MPI2_VERSION; 2442 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION; 2443 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; 2444 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth; 2445 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr; 2446 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr; 2447 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0); 2448 2449 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; 2450 init_frame->cmd = MFI_CMD_INIT; 2451 init_frame->cmd_status = 0xFF; 2452 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2453 2454 /* driver support Extended MSIX */ 2455 if ((sc->device_id == MRSAS_INVADER) || 2456 (sc->device_id == MRSAS_FURY) || 2457 (sc->device_id == MRSAS_INTRUDER) || 2458 (sc->device_id == MRSAS_INTRUDER_24)) { 2459 init_frame->driver_operations. 2460 mfi_capabilities.support_additional_msix = 1; 2461 } 2462 if (sc->verbuf_mem) { 2463 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n", 2464 MRSAS_VERSION); 2465 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; 2466 init_frame->driver_ver_hi = 0; 2467 } 2468 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1; 2469 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1; 2470 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1; 2471 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) 2472 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1; 2473 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; 2474 init_frame->queue_info_new_phys_addr_lo = phys_addr; 2475 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t); 2476 2477 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem; 2478 req_desc.MFAIo.RequestFlags = 2479 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2480 2481 mrsas_disable_intr(sc); 2482 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); 2483 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high); 2484 2485 /* 2486 * Poll response timer to wait for Firmware response. While this 2487 * timer with the DELAY call could block CPU, the time interval for 2488 * this is only 1 millisecond. 2489 */ 2490 if (init_frame->cmd_status == 0xFF) { 2491 for (i = 0; i < (max_wait * 1000); i++) { 2492 if (init_frame->cmd_status == 0xFF) 2493 DELAY(1000); 2494 else 2495 break; 2496 } 2497 } 2498 if (init_frame->cmd_status == 0) 2499 mrsas_dprint(sc, MRSAS_OCR, 2500 "IOC INIT response received from FW.\n"); 2501 else { 2502 if (init_frame->cmd_status == 0xFF) 2503 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); 2504 else 2505 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); 2506 retcode = 1; 2507 } 2508 2509 mrsas_free_ioc_cmd(sc); 2510 return (retcode); 2511 } 2512 2513 /* 2514 * mrsas_alloc_mpt_cmds: Allocates the command packets 2515 * input: Adapter instance soft state 2516 * 2517 * This function allocates the internal commands for IOs. Each command that is 2518 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An 2519 * array is allocated with mrsas_mpt_cmd context. The free commands are 2520 * maintained in a linked list (cmd pool). SMID value range is from 1 to 2521 * max_fw_cmds. 2522 */ 2523 int 2524 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) 2525 { 2526 int i, j; 2527 u_int32_t max_cmd, count; 2528 struct mrsas_mpt_cmd *cmd; 2529 pMpi2ReplyDescriptorsUnion_t reply_desc; 2530 u_int32_t offset, chain_offset, sense_offset; 2531 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; 2532 u_int8_t *io_req_base, *chain_frame_base, *sense_base; 2533 2534 max_cmd = sc->max_fw_cmds; 2535 2536 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); 2537 if (!sc->req_desc) { 2538 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); 2539 return (ENOMEM); 2540 } 2541 memset(sc->req_desc, 0, sc->request_alloc_sz); 2542 2543 /* 2544 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. 2545 * Allocate the dynamic array first and then allocate individual 2546 * commands. 2547 */ 2548 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT); 2549 if (!sc->mpt_cmd_list) { 2550 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); 2551 return (ENOMEM); 2552 } 2553 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd); 2554 for (i = 0; i < max_cmd; i++) { 2555 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd), 2556 M_MRSAS, M_NOWAIT); 2557 if (!sc->mpt_cmd_list[i]) { 2558 for (j = 0; j < i; j++) 2559 free(sc->mpt_cmd_list[j], M_MRSAS); 2560 free(sc->mpt_cmd_list, M_MRSAS); 2561 sc->mpt_cmd_list = NULL; 2562 return (ENOMEM); 2563 } 2564 } 2565 2566 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2567 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2568 chain_frame_base = (u_int8_t *)sc->chain_frame_mem; 2569 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; 2570 sense_base = (u_int8_t *)sc->sense_mem; 2571 sense_base_phys = (bus_addr_t)sc->sense_phys_addr; 2572 for (i = 0; i < max_cmd; i++) { 2573 cmd = sc->mpt_cmd_list[i]; 2574 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 2575 chain_offset = sc->max_chain_frame_sz * i; 2576 sense_offset = MRSAS_SENSE_LEN * i; 2577 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); 2578 cmd->index = i + 1; 2579 cmd->ccb_ptr = NULL; 2580 callout_init(&cmd->cm_callout, 0); 2581 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 2582 cmd->sc = sc; 2583 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); 2584 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); 2585 cmd->io_request_phys_addr = io_req_base_phys + offset; 2586 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); 2587 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; 2588 cmd->sense = sense_base + sense_offset; 2589 cmd->sense_phys_addr = sense_base_phys + sense_offset; 2590 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { 2591 return (FAIL); 2592 } 2593 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 2594 } 2595 2596 /* Initialize reply descriptor array to 0xFFFFFFFF */ 2597 reply_desc = sc->reply_desc_mem; 2598 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2599 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) { 2600 reply_desc->Words = MRSAS_ULONG_MAX; 2601 } 2602 return (0); 2603 } 2604 2605 /* 2606 * mrsas_fire_cmd: Sends command to FW 2607 * input: Adapter softstate 2608 * request descriptor address low 2609 * request descriptor address high 2610 * 2611 * This functions fires the command to Firmware by writing to the 2612 * inbound_low_queue_port and inbound_high_queue_port. 2613 */ 2614 void 2615 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2616 u_int32_t req_desc_hi) 2617 { 2618 mtx_lock(&sc->pci_lock); 2619 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), 2620 req_desc_lo); 2621 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), 2622 req_desc_hi); 2623 mtx_unlock(&sc->pci_lock); 2624 } 2625 2626 /* 2627 * mrsas_transition_to_ready: Move FW to Ready state input: 2628 * Adapter instance soft state 2629 * 2630 * During the initialization, FW passes can potentially be in any one of several 2631 * possible states. If the FW in operational, waiting-for-handshake states, 2632 * driver must take steps to bring it to ready state. Otherwise, it has to 2633 * wait for the ready state. 2634 */ 2635 int 2636 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) 2637 { 2638 int i; 2639 u_int8_t max_wait; 2640 u_int32_t val, fw_state; 2641 u_int32_t cur_state; 2642 u_int32_t abs_state, curr_abs_state; 2643 2644 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2645 fw_state = val & MFI_STATE_MASK; 2646 max_wait = MRSAS_RESET_WAIT_TIME; 2647 2648 if (fw_state != MFI_STATE_READY) 2649 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); 2650 2651 while (fw_state != MFI_STATE_READY) { 2652 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2653 switch (fw_state) { 2654 case MFI_STATE_FAULT: 2655 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); 2656 if (ocr) { 2657 cur_state = MFI_STATE_FAULT; 2658 break; 2659 } else 2660 return -ENODEV; 2661 case MFI_STATE_WAIT_HANDSHAKE: 2662 /* Set the CLR bit in inbound doorbell */ 2663 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2664 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG); 2665 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2666 break; 2667 case MFI_STATE_BOOT_MESSAGE_PENDING: 2668 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2669 MFI_INIT_HOTPLUG); 2670 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2671 break; 2672 case MFI_STATE_OPERATIONAL: 2673 /* 2674 * Bring it to READY state; assuming max wait 10 2675 * secs 2676 */ 2677 mrsas_disable_intr(sc); 2678 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); 2679 for (i = 0; i < max_wait * 1000; i++) { 2680 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1) 2681 DELAY(1000); 2682 else 2683 break; 2684 } 2685 cur_state = MFI_STATE_OPERATIONAL; 2686 break; 2687 case MFI_STATE_UNDEFINED: 2688 /* 2689 * This state should not last for more than 2 2690 * seconds 2691 */ 2692 cur_state = MFI_STATE_UNDEFINED; 2693 break; 2694 case MFI_STATE_BB_INIT: 2695 cur_state = MFI_STATE_BB_INIT; 2696 break; 2697 case MFI_STATE_FW_INIT: 2698 cur_state = MFI_STATE_FW_INIT; 2699 break; 2700 case MFI_STATE_FW_INIT_2: 2701 cur_state = MFI_STATE_FW_INIT_2; 2702 break; 2703 case MFI_STATE_DEVICE_SCAN: 2704 cur_state = MFI_STATE_DEVICE_SCAN; 2705 break; 2706 case MFI_STATE_FLUSH_CACHE: 2707 cur_state = MFI_STATE_FLUSH_CACHE; 2708 break; 2709 default: 2710 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); 2711 return -ENODEV; 2712 } 2713 2714 /* 2715 * The cur_state should not last for more than max_wait secs 2716 */ 2717 for (i = 0; i < (max_wait * 1000); i++) { 2718 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2719 outbound_scratch_pad)) & MFI_STATE_MASK); 2720 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2721 outbound_scratch_pad)); 2722 if (abs_state == curr_abs_state) 2723 DELAY(1000); 2724 else 2725 break; 2726 } 2727 2728 /* 2729 * Return error if fw_state hasn't changed after max_wait 2730 */ 2731 if (curr_abs_state == abs_state) { 2732 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " 2733 "in %d secs\n", fw_state, max_wait); 2734 return -ENODEV; 2735 } 2736 } 2737 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); 2738 return 0; 2739 } 2740 2741 /* 2742 * mrsas_get_mfi_cmd: Get a cmd from free command pool 2743 * input: Adapter soft state 2744 * 2745 * This function removes an MFI command from the command list. 2746 */ 2747 struct mrsas_mfi_cmd * 2748 mrsas_get_mfi_cmd(struct mrsas_softc *sc) 2749 { 2750 struct mrsas_mfi_cmd *cmd = NULL; 2751 2752 mtx_lock(&sc->mfi_cmd_pool_lock); 2753 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) { 2754 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); 2755 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); 2756 } 2757 mtx_unlock(&sc->mfi_cmd_pool_lock); 2758 2759 return cmd; 2760 } 2761 2762 /* 2763 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter. 2764 * input: Adapter Context. 2765 * 2766 * This function will check FW status register and flag do_timeout_reset flag. 2767 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has 2768 * trigger reset. 2769 */ 2770 static void 2771 mrsas_ocr_thread(void *arg) 2772 { 2773 struct mrsas_softc *sc; 2774 u_int32_t fw_status, fw_state; 2775 2776 sc = (struct mrsas_softc *)arg; 2777 2778 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); 2779 2780 sc->ocr_thread_active = 1; 2781 mtx_lock(&sc->sim_lock); 2782 for (;;) { 2783 /* Sleep for 1 second and check the queue status */ 2784 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 2785 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); 2786 if (sc->remove_in_progress || 2787 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 2788 mrsas_dprint(sc, MRSAS_OCR, 2789 "Exit due to %s from %s\n", 2790 sc->remove_in_progress ? "Shutdown" : 2791 "Hardware critical error", __func__); 2792 break; 2793 } 2794 fw_status = mrsas_read_reg(sc, 2795 offsetof(mrsas_reg_set, outbound_scratch_pad)); 2796 fw_state = fw_status & MFI_STATE_MASK; 2797 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) { 2798 device_printf(sc->mrsas_dev, "%s started due to %s!\n", 2799 sc->disableOnlineCtrlReset ? "Kill Adapter" : "OCR", 2800 sc->do_timedout_reset ? "IO Timeout" : 2801 "FW fault detected"); 2802 mtx_lock_spin(&sc->ioctl_lock); 2803 sc->reset_in_progress = 1; 2804 sc->reset_count++; 2805 mtx_unlock_spin(&sc->ioctl_lock); 2806 mrsas_xpt_freeze(sc); 2807 mrsas_reset_ctrl(sc, sc->do_timedout_reset); 2808 mrsas_xpt_release(sc); 2809 sc->reset_in_progress = 0; 2810 sc->do_timedout_reset = 0; 2811 } 2812 } 2813 mtx_unlock(&sc->sim_lock); 2814 sc->ocr_thread_active = 0; 2815 mrsas_kproc_exit(0); 2816 } 2817 2818 /* 2819 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR. 2820 * input: Adapter Context. 2821 * 2822 * This function will clear reply descriptor so that post OCR driver and FW will 2823 * lost old history. 2824 */ 2825 void 2826 mrsas_reset_reply_desc(struct mrsas_softc *sc) 2827 { 2828 int i, count; 2829 pMpi2ReplyDescriptorsUnion_t reply_desc; 2830 2831 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2832 for (i = 0; i < count; i++) 2833 sc->last_reply_idx[i] = 0; 2834 2835 reply_desc = sc->reply_desc_mem; 2836 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { 2837 reply_desc->Words = MRSAS_ULONG_MAX; 2838 } 2839 } 2840 2841 /* 2842 * mrsas_reset_ctrl: Core function to OCR/Kill adapter. 2843 * input: Adapter Context. 2844 * 2845 * This function will run from thread context so that it can sleep. 1. Do not 2846 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command 2847 * to complete for 180 seconds. 3. If #2 does not find any outstanding 2848 * command Controller is in working state, so skip OCR. Otherwise, do 2849 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the 2850 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post 2851 * OCR, Re-fire Management command and move Controller to Operation state. 2852 */ 2853 int 2854 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason) 2855 { 2856 int retval = SUCCESS, i, j, retry = 0; 2857 u_int32_t host_diag, abs_state, status_reg, reset_adapter; 2858 union ccb *ccb; 2859 struct mrsas_mfi_cmd *mfi_cmd; 2860 struct mrsas_mpt_cmd *mpt_cmd; 2861 union mrsas_evt_class_locale class_locale; 2862 2863 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 2864 device_printf(sc->mrsas_dev, 2865 "mrsas: Hardware critical error, returning FAIL.\n"); 2866 return FAIL; 2867 } 2868 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2869 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; 2870 mrsas_disable_intr(sc); 2871 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr", 2872 sc->mrsas_fw_fault_check_delay * hz); 2873 2874 /* First try waiting for commands to complete */ 2875 if (mrsas_wait_for_outstanding(sc, reset_reason)) { 2876 mrsas_dprint(sc, MRSAS_OCR, 2877 "resetting adapter from %s.\n", 2878 __func__); 2879 /* Now return commands back to the CAM layer */ 2880 mtx_unlock(&sc->sim_lock); 2881 for (i = 0; i < sc->max_fw_cmds; i++) { 2882 mpt_cmd = sc->mpt_cmd_list[i]; 2883 if (mpt_cmd->ccb_ptr) { 2884 ccb = (union ccb *)(mpt_cmd->ccb_ptr); 2885 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 2886 mrsas_cmd_done(sc, mpt_cmd); 2887 mrsas_atomic_dec(&sc->fw_outstanding); 2888 } 2889 } 2890 mtx_lock(&sc->sim_lock); 2891 2892 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2893 outbound_scratch_pad)); 2894 abs_state = status_reg & MFI_STATE_MASK; 2895 reset_adapter = status_reg & MFI_RESET_ADAPTER; 2896 if (sc->disableOnlineCtrlReset || 2897 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 2898 /* Reset not supported, kill adapter */ 2899 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n"); 2900 mrsas_kill_hba(sc); 2901 retval = FAIL; 2902 goto out; 2903 } 2904 /* Now try to reset the chip */ 2905 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { 2906 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2907 MPI2_WRSEQ_FLUSH_KEY_VALUE); 2908 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2909 MPI2_WRSEQ_1ST_KEY_VALUE); 2910 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2911 MPI2_WRSEQ_2ND_KEY_VALUE); 2912 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2913 MPI2_WRSEQ_3RD_KEY_VALUE); 2914 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2915 MPI2_WRSEQ_4TH_KEY_VALUE); 2916 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2917 MPI2_WRSEQ_5TH_KEY_VALUE); 2918 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2919 MPI2_WRSEQ_6TH_KEY_VALUE); 2920 2921 /* Check that the diag write enable (DRWE) bit is on */ 2922 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2923 fusion_host_diag)); 2924 retry = 0; 2925 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 2926 DELAY(100 * 1000); 2927 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2928 fusion_host_diag)); 2929 if (retry++ == 100) { 2930 mrsas_dprint(sc, MRSAS_OCR, 2931 "Host diag unlock failed!\n"); 2932 break; 2933 } 2934 } 2935 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 2936 continue; 2937 2938 /* Send chip reset command */ 2939 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), 2940 host_diag | HOST_DIAG_RESET_ADAPTER); 2941 DELAY(3000 * 1000); 2942 2943 /* Make sure reset adapter bit is cleared */ 2944 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2945 fusion_host_diag)); 2946 retry = 0; 2947 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 2948 DELAY(100 * 1000); 2949 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2950 fusion_host_diag)); 2951 if (retry++ == 1000) { 2952 mrsas_dprint(sc, MRSAS_OCR, 2953 "Diag reset adapter never cleared!\n"); 2954 break; 2955 } 2956 } 2957 if (host_diag & HOST_DIAG_RESET_ADAPTER) 2958 continue; 2959 2960 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2961 outbound_scratch_pad)) & MFI_STATE_MASK; 2962 retry = 0; 2963 2964 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 2965 DELAY(100 * 1000); 2966 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2967 outbound_scratch_pad)) & MFI_STATE_MASK; 2968 } 2969 if (abs_state <= MFI_STATE_FW_INIT) { 2970 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," 2971 " state = 0x%x\n", abs_state); 2972 continue; 2973 } 2974 /* Wait for FW to become ready */ 2975 if (mrsas_transition_to_ready(sc, 1)) { 2976 mrsas_dprint(sc, MRSAS_OCR, 2977 "mrsas: Failed to transition controller to ready.\n"); 2978 continue; 2979 } 2980 mrsas_reset_reply_desc(sc); 2981 if (mrsas_ioc_init(sc)) { 2982 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); 2983 continue; 2984 } 2985 for (j = 0; j < sc->max_fw_cmds; j++) { 2986 mpt_cmd = sc->mpt_cmd_list[j]; 2987 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 2988 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; 2989 mrsas_release_mfi_cmd(mfi_cmd); 2990 mrsas_release_mpt_cmd(mpt_cmd); 2991 } 2992 } 2993 2994 sc->aen_cmd = NULL; 2995 2996 /* Reset load balance info */ 2997 memset(sc->load_balance_info, 0, 2998 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); 2999 3000 if (mrsas_get_ctrl_info(sc)) { 3001 mrsas_kill_hba(sc); 3002 retval = FAIL; 3003 goto out; 3004 } 3005 if (!mrsas_get_map_info(sc)) 3006 mrsas_sync_map_info(sc); 3007 3008 megasas_setup_jbod_map(sc); 3009 3010 memset(sc->pd_list, 0, 3011 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 3012 if (mrsas_get_pd_list(sc) != SUCCESS) { 3013 device_printf(sc->mrsas_dev, "Get PD list failed from OCR.\n" 3014 "Will get the latest PD LIST after OCR on event.\n"); 3015 } 3016 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); 3017 if (mrsas_get_ld_list(sc) != SUCCESS) { 3018 device_printf(sc->mrsas_dev, "Get LD lsit failed from OCR.\n" 3019 "Will get the latest LD LIST after OCR on event.\n"); 3020 } 3021 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3022 mrsas_enable_intr(sc); 3023 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 3024 3025 /* Register AEN with FW for last sequence number */ 3026 class_locale.members.reserved = 0; 3027 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3028 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3029 3030 if (mrsas_register_aen(sc, sc->last_seq_num, 3031 class_locale.word)) { 3032 device_printf(sc->mrsas_dev, 3033 "ERROR: AEN registration FAILED from OCR !!! " 3034 "Further events from the controller cannot be notified." 3035 "Either there is some problem in the controller" 3036 "or the controller does not support AEN.\n" 3037 "Please contact to the SUPPORT TEAM if the problem persists\n"); 3038 } 3039 /* Adapter reset completed successfully */ 3040 device_printf(sc->mrsas_dev, "Reset successful\n"); 3041 retval = SUCCESS; 3042 goto out; 3043 } 3044 /* Reset failed, kill the adapter */ 3045 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); 3046 mrsas_kill_hba(sc); 3047 retval = FAIL; 3048 } else { 3049 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3050 mrsas_enable_intr(sc); 3051 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 3052 } 3053 out: 3054 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3055 mrsas_dprint(sc, MRSAS_OCR, 3056 "Reset Exit with %d.\n", retval); 3057 return retval; 3058 } 3059 3060 /* 3061 * mrsas_kill_hba: Kill HBA when OCR is not supported 3062 * input: Adapter Context. 3063 * 3064 * This function will kill HBA when OCR is not supported. 3065 */ 3066 void 3067 mrsas_kill_hba(struct mrsas_softc *sc) 3068 { 3069 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; 3070 DELAY(1000 * 1000); 3071 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); 3072 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 3073 MFI_STOP_ADP); 3074 /* Flush */ 3075 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); 3076 mrsas_complete_outstanding_ioctls(sc); 3077 } 3078 3079 /** 3080 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba 3081 * input: Controller softc 3082 * 3083 * Returns void 3084 */ 3085 void 3086 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc) 3087 { 3088 int i; 3089 struct mrsas_mpt_cmd *cmd_mpt; 3090 struct mrsas_mfi_cmd *cmd_mfi; 3091 u_int32_t count, MSIxIndex; 3092 3093 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3094 for (i = 0; i < sc->max_fw_cmds; i++) { 3095 cmd_mpt = sc->mpt_cmd_list[i]; 3096 3097 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 3098 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 3099 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { 3100 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3101 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, 3102 cmd_mpt->io_request->RaidContext.status); 3103 } 3104 } 3105 } 3106 } 3107 3108 /* 3109 * mrsas_wait_for_outstanding: Wait for outstanding commands 3110 * input: Adapter Context. 3111 * 3112 * This function will wait for 180 seconds for outstanding commands to be 3113 * completed. 3114 */ 3115 int 3116 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason) 3117 { 3118 int i, outstanding, retval = 0; 3119 u_int32_t fw_state, count, MSIxIndex; 3120 3121 3122 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { 3123 if (sc->remove_in_progress) { 3124 mrsas_dprint(sc, MRSAS_OCR, 3125 "Driver remove or shutdown called.\n"); 3126 retval = 1; 3127 goto out; 3128 } 3129 /* Check if firmware is in fault state */ 3130 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 3131 outbound_scratch_pad)) & MFI_STATE_MASK; 3132 if (fw_state == MFI_STATE_FAULT) { 3133 mrsas_dprint(sc, MRSAS_OCR, 3134 "Found FW in FAULT state, will reset adapter.\n"); 3135 retval = 1; 3136 goto out; 3137 } 3138 if (check_reason == MFI_DCMD_TIMEOUT_OCR) { 3139 mrsas_dprint(sc, MRSAS_OCR, 3140 "DCMD IO TIMEOUT detected, will reset adapter.\n"); 3141 retval = 1; 3142 goto out; 3143 } 3144 outstanding = mrsas_atomic_read(&sc->fw_outstanding); 3145 if (!outstanding) 3146 goto out; 3147 3148 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 3149 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " 3150 "commands to complete\n", i, outstanding); 3151 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3152 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3153 mrsas_complete_cmd(sc, MSIxIndex); 3154 } 3155 DELAY(1000 * 1000); 3156 } 3157 3158 if (mrsas_atomic_read(&sc->fw_outstanding)) { 3159 mrsas_dprint(sc, MRSAS_OCR, 3160 " pending commands remain after waiting," 3161 " will reset adapter.\n"); 3162 retval = 1; 3163 } 3164 out: 3165 return retval; 3166 } 3167 3168 /* 3169 * mrsas_release_mfi_cmd: Return a cmd to free command pool 3170 * input: Command packet for return to free cmd pool 3171 * 3172 * This function returns the MFI command to the command list. 3173 */ 3174 void 3175 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd) 3176 { 3177 struct mrsas_softc *sc = cmd->sc; 3178 3179 mtx_lock(&sc->mfi_cmd_pool_lock); 3180 cmd->ccb_ptr = NULL; 3181 cmd->cmd_id.frame_count = 0; 3182 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next); 3183 mtx_unlock(&sc->mfi_cmd_pool_lock); 3184 3185 return; 3186 } 3187 3188 /* 3189 * mrsas_get_controller_info: Returns FW's controller structure 3190 * input: Adapter soft state 3191 * Controller information structure 3192 * 3193 * Issues an internal command (DCMD) to get the FW's controller structure. This 3194 * information is mainly used to find out the maximum IO transfer per command 3195 * supported by the FW. 3196 */ 3197 static int 3198 mrsas_get_ctrl_info(struct mrsas_softc *sc) 3199 { 3200 int retcode = 0; 3201 u_int8_t do_ocr = 1; 3202 struct mrsas_mfi_cmd *cmd; 3203 struct mrsas_dcmd_frame *dcmd; 3204 3205 cmd = mrsas_get_mfi_cmd(sc); 3206 3207 if (!cmd) { 3208 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 3209 return -ENOMEM; 3210 } 3211 dcmd = &cmd->frame->dcmd; 3212 3213 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { 3214 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); 3215 mrsas_release_mfi_cmd(cmd); 3216 return -ENOMEM; 3217 } 3218 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3219 3220 dcmd->cmd = MFI_CMD_DCMD; 3221 dcmd->cmd_status = 0xFF; 3222 dcmd->sge_count = 1; 3223 dcmd->flags = MFI_FRAME_DIR_READ; 3224 dcmd->timeout = 0; 3225 dcmd->pad_0 = 0; 3226 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info); 3227 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 3228 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr; 3229 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info); 3230 3231 retcode = mrsas_issue_polled(sc, cmd); 3232 if (retcode == ETIMEDOUT) 3233 goto dcmd_timeout; 3234 else 3235 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); 3236 3237 do_ocr = 0; 3238 mrsas_update_ext_vd_details(sc); 3239 3240 sc->use_seqnum_jbod_fp = 3241 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP; 3242 3243 dcmd_timeout: 3244 mrsas_free_ctlr_info_cmd(sc); 3245 3246 if (do_ocr) 3247 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 3248 else 3249 mrsas_release_mfi_cmd(cmd); 3250 3251 return (retcode); 3252 } 3253 3254 /* 3255 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD 3256 * input: 3257 * sc - Controller's softc 3258 */ 3259 static void 3260 mrsas_update_ext_vd_details(struct mrsas_softc *sc) 3261 { 3262 sc->max256vdSupport = 3263 sc->ctrl_info->adapterOperations3.supportMaxExtLDs; 3264 /* Below is additional check to address future FW enhancement */ 3265 if (sc->ctrl_info->max_lds > 64) 3266 sc->max256vdSupport = 1; 3267 3268 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS 3269 * MRSAS_MAX_DEV_PER_CHANNEL; 3270 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS 3271 * MRSAS_MAX_DEV_PER_CHANNEL; 3272 if (sc->max256vdSupport) { 3273 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 3274 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3275 } else { 3276 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 3277 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3278 } 3279 3280 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + 3281 (sizeof(MR_LD_SPAN_MAP) * 3282 (sc->fw_supported_vd_count - 1)); 3283 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); 3284 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) + 3285 (sizeof(MR_LD_SPAN_MAP) * 3286 (sc->drv_supported_vd_count - 1)); 3287 3288 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); 3289 3290 if (sc->max256vdSupport) 3291 sc->current_map_sz = sc->new_map_sz; 3292 else 3293 sc->current_map_sz = sc->old_map_sz; 3294 } 3295 3296 /* 3297 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command 3298 * input: Adapter soft state 3299 * 3300 * Allocates DMAable memory for the controller info internal command. 3301 */ 3302 int 3303 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) 3304 { 3305 int ctlr_info_size; 3306 3307 /* Allocate get controller info command */ 3308 ctlr_info_size = sizeof(struct mrsas_ctrl_info); 3309 if (bus_dma_tag_create(sc->mrsas_parent_tag, 3310 1, 0, 3311 BUS_SPACE_MAXADDR_32BIT, 3312 BUS_SPACE_MAXADDR, 3313 NULL, NULL, 3314 ctlr_info_size, 3315 1, 3316 ctlr_info_size, 3317 BUS_DMA_ALLOCNOW, 3318 NULL, NULL, 3319 &sc->ctlr_info_tag)) { 3320 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); 3321 return (ENOMEM); 3322 } 3323 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, 3324 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { 3325 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); 3326 return (ENOMEM); 3327 } 3328 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, 3329 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, 3330 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { 3331 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); 3332 return (ENOMEM); 3333 } 3334 memset(sc->ctlr_info_mem, 0, ctlr_info_size); 3335 return (0); 3336 } 3337 3338 /* 3339 * mrsas_free_ctlr_info_cmd: Free memory for controller info command 3340 * input: Adapter soft state 3341 * 3342 * Deallocates memory of the get controller info cmd. 3343 */ 3344 void 3345 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) 3346 { 3347 if (sc->ctlr_info_phys_addr) 3348 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); 3349 if (sc->ctlr_info_mem != NULL) 3350 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); 3351 if (sc->ctlr_info_tag != NULL) 3352 bus_dma_tag_destroy(sc->ctlr_info_tag); 3353 } 3354 3355 /* 3356 * mrsas_issue_polled: Issues a polling command 3357 * inputs: Adapter soft state 3358 * Command packet to be issued 3359 * 3360 * This function is for posting of internal commands to Firmware. MFI requires 3361 * the cmd_status to be set to 0xFF before posting. The maximun wait time of 3362 * the poll response timer is 180 seconds. 3363 */ 3364 int 3365 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3366 { 3367 struct mrsas_header *frame_hdr = &cmd->frame->hdr; 3368 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3369 int i, retcode = SUCCESS; 3370 3371 frame_hdr->cmd_status = 0xFF; 3372 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3373 3374 /* Issue the frame using inbound queue port */ 3375 if (mrsas_issue_dcmd(sc, cmd)) { 3376 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3377 return (1); 3378 } 3379 /* 3380 * Poll response timer to wait for Firmware response. While this 3381 * timer with the DELAY call could block CPU, the time interval for 3382 * this is only 1 millisecond. 3383 */ 3384 if (frame_hdr->cmd_status == 0xFF) { 3385 for (i = 0; i < (max_wait * 1000); i++) { 3386 if (frame_hdr->cmd_status == 0xFF) 3387 DELAY(1000); 3388 else 3389 break; 3390 } 3391 } 3392 if (frame_hdr->cmd_status == 0xFF) { 3393 device_printf(sc->mrsas_dev, "DCMD timed out after %d " 3394 "seconds from %s\n", max_wait, __func__); 3395 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", 3396 cmd->frame->dcmd.opcode); 3397 retcode = ETIMEDOUT; 3398 } 3399 return (retcode); 3400 } 3401 3402 /* 3403 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd 3404 * input: Adapter soft state mfi cmd pointer 3405 * 3406 * This function is called by mrsas_issued_blocked_cmd() and 3407 * mrsas_issued_polled(), to build the MPT command and then fire the command 3408 * to Firmware. 3409 */ 3410 int 3411 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3412 { 3413 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3414 3415 req_desc = mrsas_build_mpt_cmd(sc, cmd); 3416 if (!req_desc) { 3417 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); 3418 return (1); 3419 } 3420 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 3421 3422 return (0); 3423 } 3424 3425 /* 3426 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd 3427 * input: Adapter soft state mfi cmd to build 3428 * 3429 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru 3430 * command and prepares the MPT command to send to Firmware. 3431 */ 3432 MRSAS_REQUEST_DESCRIPTOR_UNION * 3433 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3434 { 3435 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3436 u_int16_t index; 3437 3438 if (mrsas_build_mptmfi_passthru(sc, cmd)) { 3439 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); 3440 return NULL; 3441 } 3442 index = cmd->cmd_id.context.smid; 3443 3444 req_desc = mrsas_get_request_desc(sc, index - 1); 3445 if (!req_desc) 3446 return NULL; 3447 3448 req_desc->addr.Words = 0; 3449 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3450 3451 req_desc->SCSIIO.SMID = index; 3452 3453 return (req_desc); 3454 } 3455 3456 /* 3457 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command 3458 * input: Adapter soft state mfi cmd pointer 3459 * 3460 * The MPT command and the io_request are setup as a passthru command. The SGE 3461 * chain address is set to frame_phys_addr of the MFI command. 3462 */ 3463 u_int8_t 3464 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) 3465 { 3466 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 3467 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; 3468 struct mrsas_mpt_cmd *mpt_cmd; 3469 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; 3470 3471 mpt_cmd = mrsas_get_mpt_cmd(sc); 3472 if (!mpt_cmd) 3473 return (1); 3474 3475 /* Save the smid. To be used for returning the cmd */ 3476 mfi_cmd->cmd_id.context.smid = mpt_cmd->index; 3477 3478 mpt_cmd->sync_cmd_idx = mfi_cmd->index; 3479 3480 /* 3481 * For cmds where the flag is set, store the flag and check on 3482 * completion. For cmds with this flag, don't call 3483 * mrsas_complete_cmd. 3484 */ 3485 3486 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 3487 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3488 3489 io_req = mpt_cmd->io_request; 3490 3491 if ((sc->device_id == MRSAS_INVADER) || 3492 (sc->device_id == MRSAS_FURY) || 3493 (sc->device_id == MRSAS_INTRUDER) || 3494 (sc->device_id == MRSAS_INTRUDER_24)) { 3495 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL; 3496 3497 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 3498 sgl_ptr_end->Flags = 0; 3499 } 3500 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain; 3501 3502 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 3503 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; 3504 io_req->ChainOffset = sc->chain_offset_mfi_pthru; 3505 3506 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; 3507 3508 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3509 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 3510 3511 mpi25_ieee_chain->Length = sc->max_chain_frame_sz; 3512 3513 return (0); 3514 } 3515 3516 /* 3517 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds 3518 * input: Adapter soft state Command to be issued 3519 * 3520 * This function waits on an event for the command to be returned from the ISR. 3521 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing 3522 * internal and ioctl commands. 3523 */ 3524 int 3525 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3526 { 3527 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3528 unsigned long total_time = 0; 3529 int retcode = SUCCESS; 3530 3531 /* Initialize cmd_status */ 3532 cmd->cmd_status = 0xFF; 3533 3534 /* Build MPT-MFI command for issue to FW */ 3535 if (mrsas_issue_dcmd(sc, cmd)) { 3536 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3537 return (1); 3538 } 3539 sc->chan = (void *)&cmd; 3540 3541 while (1) { 3542 if (cmd->cmd_status == 0xFF) { 3543 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 3544 } else 3545 break; 3546 3547 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL 3548 * command */ 3549 total_time++; 3550 if (total_time >= max_wait) { 3551 device_printf(sc->mrsas_dev, 3552 "Internal command timed out after %d seconds.\n", max_wait); 3553 retcode = 1; 3554 break; 3555 } 3556 } 3557 } 3558 3559 if (cmd->cmd_status == 0xFF) { 3560 device_printf(sc->mrsas_dev, "DCMD timed out after %d " 3561 "seconds from %s\n", max_wait, __func__); 3562 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", 3563 cmd->frame->dcmd.opcode); 3564 retcode = ETIMEDOUT; 3565 } 3566 return (retcode); 3567 } 3568 3569 /* 3570 * mrsas_complete_mptmfi_passthru: Completes a command 3571 * input: @sc: Adapter soft state 3572 * @cmd: Command to be completed 3573 * @status: cmd completion status 3574 * 3575 * This function is called from mrsas_complete_cmd() after an interrupt is 3576 * received from Firmware, and io_request->Function is 3577 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. 3578 */ 3579 void 3580 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, 3581 u_int8_t status) 3582 { 3583 struct mrsas_header *hdr = &cmd->frame->hdr; 3584 u_int8_t cmd_status = cmd->frame->hdr.cmd_status; 3585 3586 /* Reset the retry counter for future re-tries */ 3587 cmd->retry_for_fw_reset = 0; 3588 3589 if (cmd->ccb_ptr) 3590 cmd->ccb_ptr = NULL; 3591 3592 switch (hdr->cmd) { 3593 case MFI_CMD_INVALID: 3594 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); 3595 break; 3596 case MFI_CMD_PD_SCSI_IO: 3597 case MFI_CMD_LD_SCSI_IO: 3598 /* 3599 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3600 * issued either through an IO path or an IOCTL path. If it 3601 * was via IOCTL, we will send it to internal completion. 3602 */ 3603 if (cmd->sync_cmd) { 3604 cmd->sync_cmd = 0; 3605 mrsas_wakeup(sc, cmd); 3606 break; 3607 } 3608 case MFI_CMD_SMP: 3609 case MFI_CMD_STP: 3610 case MFI_CMD_DCMD: 3611 /* Check for LD map update */ 3612 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && 3613 (cmd->frame->dcmd.mbox.b[1] == 1)) { 3614 sc->fast_path_io = 0; 3615 mtx_lock(&sc->raidmap_lock); 3616 sc->map_update_cmd = NULL; 3617 if (cmd_status != 0) { 3618 if (cmd_status != MFI_STAT_NOT_FOUND) 3619 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status); 3620 else { 3621 mrsas_release_mfi_cmd(cmd); 3622 mtx_unlock(&sc->raidmap_lock); 3623 break; 3624 } 3625 } else 3626 sc->map_id++; 3627 mrsas_release_mfi_cmd(cmd); 3628 if (MR_ValidateMapInfo(sc)) 3629 sc->fast_path_io = 0; 3630 else 3631 sc->fast_path_io = 1; 3632 mrsas_sync_map_info(sc); 3633 mtx_unlock(&sc->raidmap_lock); 3634 break; 3635 } 3636 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3637 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { 3638 sc->mrsas_aen_triggered = 0; 3639 } 3640 /* FW has an updated PD sequence */ 3641 if ((cmd->frame->dcmd.opcode == 3642 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3643 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3644 3645 mtx_lock(&sc->raidmap_lock); 3646 sc->jbod_seq_cmd = NULL; 3647 mrsas_release_mfi_cmd(cmd); 3648 3649 if (cmd_status == MFI_STAT_OK) { 3650 sc->pd_seq_map_id++; 3651 /* Re-register a pd sync seq num cmd */ 3652 if (megasas_sync_pd_seq_num(sc, true)) 3653 sc->use_seqnum_jbod_fp = 0; 3654 } else { 3655 sc->use_seqnum_jbod_fp = 0; 3656 device_printf(sc->mrsas_dev, 3657 "Jbod map sync failed, status=%x\n", cmd_status); 3658 } 3659 mtx_unlock(&sc->raidmap_lock); 3660 break; 3661 } 3662 /* See if got an event notification */ 3663 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) 3664 mrsas_complete_aen(sc, cmd); 3665 else 3666 mrsas_wakeup(sc, cmd); 3667 break; 3668 case MFI_CMD_ABORT: 3669 /* Command issued to abort another cmd return */ 3670 mrsas_complete_abort(sc, cmd); 3671 break; 3672 default: 3673 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd); 3674 break; 3675 } 3676 } 3677 3678 /* 3679 * mrsas_wakeup: Completes an internal command 3680 * input: Adapter soft state 3681 * Command to be completed 3682 * 3683 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait 3684 * timer is started. This function is called from 3685 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up 3686 * from the command wait. 3687 */ 3688 void 3689 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3690 { 3691 cmd->cmd_status = cmd->frame->io.cmd_status; 3692 3693 if (cmd->cmd_status == 0xFF) 3694 cmd->cmd_status = 0; 3695 3696 sc->chan = (void *)&cmd; 3697 wakeup_one((void *)&sc->chan); 3698 return; 3699 } 3700 3701 /* 3702 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input: 3703 * Adapter soft state Shutdown/Hibernate 3704 * 3705 * This function issues a DCMD internal command to Firmware to initiate shutdown 3706 * of the controller. 3707 */ 3708 static void 3709 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) 3710 { 3711 struct mrsas_mfi_cmd *cmd; 3712 struct mrsas_dcmd_frame *dcmd; 3713 3714 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 3715 return; 3716 3717 cmd = mrsas_get_mfi_cmd(sc); 3718 if (!cmd) { 3719 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n"); 3720 return; 3721 } 3722 if (sc->aen_cmd) 3723 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); 3724 if (sc->map_update_cmd) 3725 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); 3726 if (sc->jbod_seq_cmd) 3727 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd); 3728 3729 dcmd = &cmd->frame->dcmd; 3730 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3731 3732 dcmd->cmd = MFI_CMD_DCMD; 3733 dcmd->cmd_status = 0x0; 3734 dcmd->sge_count = 0; 3735 dcmd->flags = MFI_FRAME_DIR_NONE; 3736 dcmd->timeout = 0; 3737 dcmd->pad_0 = 0; 3738 dcmd->data_xfer_len = 0; 3739 dcmd->opcode = opcode; 3740 3741 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n"); 3742 3743 mrsas_issue_blocked_cmd(sc, cmd); 3744 mrsas_release_mfi_cmd(cmd); 3745 3746 return; 3747 } 3748 3749 /* 3750 * mrsas_flush_cache: Requests FW to flush all its caches input: 3751 * Adapter soft state 3752 * 3753 * This function is issues a DCMD internal command to Firmware to initiate 3754 * flushing of all caches. 3755 */ 3756 static void 3757 mrsas_flush_cache(struct mrsas_softc *sc) 3758 { 3759 struct mrsas_mfi_cmd *cmd; 3760 struct mrsas_dcmd_frame *dcmd; 3761 3762 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 3763 return; 3764 3765 cmd = mrsas_get_mfi_cmd(sc); 3766 if (!cmd) { 3767 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n"); 3768 return; 3769 } 3770 dcmd = &cmd->frame->dcmd; 3771 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3772 3773 dcmd->cmd = MFI_CMD_DCMD; 3774 dcmd->cmd_status = 0x0; 3775 dcmd->sge_count = 0; 3776 dcmd->flags = MFI_FRAME_DIR_NONE; 3777 dcmd->timeout = 0; 3778 dcmd->pad_0 = 0; 3779 dcmd->data_xfer_len = 0; 3780 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 3781 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 3782 3783 mrsas_issue_blocked_cmd(sc, cmd); 3784 mrsas_release_mfi_cmd(cmd); 3785 3786 return; 3787 } 3788 3789 int 3790 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend) 3791 { 3792 int retcode = 0; 3793 u_int8_t do_ocr = 1; 3794 struct mrsas_mfi_cmd *cmd; 3795 struct mrsas_dcmd_frame *dcmd; 3796 uint32_t pd_seq_map_sz; 3797 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 3798 bus_addr_t pd_seq_h; 3799 3800 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 3801 (sizeof(struct MR_PD_CFG_SEQ) * 3802 (MAX_PHYSICAL_DEVICES - 1)); 3803 3804 cmd = mrsas_get_mfi_cmd(sc); 3805 if (!cmd) { 3806 device_printf(sc->mrsas_dev, 3807 "Cannot alloc for ld map info cmd.\n"); 3808 return 1; 3809 } 3810 dcmd = &cmd->frame->dcmd; 3811 3812 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)]; 3813 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)]; 3814 if (!pd_sync) { 3815 device_printf(sc->mrsas_dev, 3816 "Failed to alloc mem for jbod map info.\n"); 3817 mrsas_release_mfi_cmd(cmd); 3818 return (ENOMEM); 3819 } 3820 memset(pd_sync, 0, pd_seq_map_sz); 3821 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3822 dcmd->cmd = MFI_CMD_DCMD; 3823 dcmd->cmd_status = 0xFF; 3824 dcmd->sge_count = 1; 3825 dcmd->timeout = 0; 3826 dcmd->pad_0 = 0; 3827 dcmd->data_xfer_len = (pd_seq_map_sz); 3828 dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO); 3829 dcmd->sgl.sge32[0].phys_addr = (pd_seq_h); 3830 dcmd->sgl.sge32[0].length = (pd_seq_map_sz); 3831 3832 if (pend) { 3833 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG; 3834 dcmd->flags = (MFI_FRAME_DIR_WRITE); 3835 sc->jbod_seq_cmd = cmd; 3836 if (mrsas_issue_dcmd(sc, cmd)) { 3837 device_printf(sc->mrsas_dev, 3838 "Fail to send sync map info command.\n"); 3839 return 1; 3840 } else 3841 return 0; 3842 } else 3843 dcmd->flags = MFI_FRAME_DIR_READ; 3844 3845 retcode = mrsas_issue_polled(sc, cmd); 3846 if (retcode == ETIMEDOUT) 3847 goto dcmd_timeout; 3848 3849 if (pd_sync->count > MAX_PHYSICAL_DEVICES) { 3850 device_printf(sc->mrsas_dev, 3851 "driver supports max %d JBOD, but FW reports %d\n", 3852 MAX_PHYSICAL_DEVICES, pd_sync->count); 3853 retcode = -EINVAL; 3854 } 3855 if (!retcode) 3856 sc->pd_seq_map_id++; 3857 do_ocr = 0; 3858 3859 dcmd_timeout: 3860 if (do_ocr) 3861 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 3862 else 3863 mrsas_release_mfi_cmd(cmd); 3864 3865 return (retcode); 3866 } 3867 3868 /* 3869 * mrsas_get_map_info: Load and validate RAID map input: 3870 * Adapter instance soft state 3871 * 3872 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load 3873 * and validate RAID map. It returns 0 if successful, 1 other- wise. 3874 */ 3875 static int 3876 mrsas_get_map_info(struct mrsas_softc *sc) 3877 { 3878 uint8_t retcode = 0; 3879 3880 sc->fast_path_io = 0; 3881 if (!mrsas_get_ld_map_info(sc)) { 3882 retcode = MR_ValidateMapInfo(sc); 3883 if (retcode == 0) { 3884 sc->fast_path_io = 1; 3885 return 0; 3886 } 3887 } 3888 return 1; 3889 } 3890 3891 /* 3892 * mrsas_get_ld_map_info: Get FW's ld_map structure input: 3893 * Adapter instance soft state 3894 * 3895 * Issues an internal command (DCMD) to get the FW's controller PD list 3896 * structure. 3897 */ 3898 static int 3899 mrsas_get_ld_map_info(struct mrsas_softc *sc) 3900 { 3901 int retcode = 0; 3902 struct mrsas_mfi_cmd *cmd; 3903 struct mrsas_dcmd_frame *dcmd; 3904 void *map; 3905 bus_addr_t map_phys_addr = 0; 3906 3907 cmd = mrsas_get_mfi_cmd(sc); 3908 if (!cmd) { 3909 device_printf(sc->mrsas_dev, 3910 "Cannot alloc for ld map info cmd.\n"); 3911 return 1; 3912 } 3913 dcmd = &cmd->frame->dcmd; 3914 3915 map = (void *)sc->raidmap_mem[(sc->map_id & 1)]; 3916 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; 3917 if (!map) { 3918 device_printf(sc->mrsas_dev, 3919 "Failed to alloc mem for ld map info.\n"); 3920 mrsas_release_mfi_cmd(cmd); 3921 return (ENOMEM); 3922 } 3923 memset(map, 0, sizeof(sc->max_map_sz)); 3924 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3925 3926 dcmd->cmd = MFI_CMD_DCMD; 3927 dcmd->cmd_status = 0xFF; 3928 dcmd->sge_count = 1; 3929 dcmd->flags = MFI_FRAME_DIR_READ; 3930 dcmd->timeout = 0; 3931 dcmd->pad_0 = 0; 3932 dcmd->data_xfer_len = sc->current_map_sz; 3933 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 3934 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 3935 dcmd->sgl.sge32[0].length = sc->current_map_sz; 3936 3937 retcode = mrsas_issue_polled(sc, cmd); 3938 if (retcode == ETIMEDOUT) 3939 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 3940 else 3941 mrsas_release_mfi_cmd(cmd); 3942 3943 return (retcode); 3944 } 3945 3946 /* 3947 * mrsas_sync_map_info: Get FW's ld_map structure input: 3948 * Adapter instance soft state 3949 * 3950 * Issues an internal command (DCMD) to get the FW's controller PD list 3951 * structure. 3952 */ 3953 static int 3954 mrsas_sync_map_info(struct mrsas_softc *sc) 3955 { 3956 int retcode = 0, i; 3957 struct mrsas_mfi_cmd *cmd; 3958 struct mrsas_dcmd_frame *dcmd; 3959 uint32_t size_sync_info, num_lds; 3960 MR_LD_TARGET_SYNC *target_map = NULL; 3961 MR_DRV_RAID_MAP_ALL *map; 3962 MR_LD_RAID *raid; 3963 MR_LD_TARGET_SYNC *ld_sync; 3964 bus_addr_t map_phys_addr = 0; 3965 3966 cmd = mrsas_get_mfi_cmd(sc); 3967 if (!cmd) { 3968 device_printf(sc->mrsas_dev, 3969 "Cannot alloc for sync map info cmd\n"); 3970 return 1; 3971 } 3972 map = sc->ld_drv_map[sc->map_id & 1]; 3973 num_lds = map->raidMap.ldCount; 3974 3975 dcmd = &cmd->frame->dcmd; 3976 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds; 3977 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3978 3979 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1]; 3980 memset(target_map, 0, sc->max_map_sz); 3981 3982 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; 3983 3984 ld_sync = (MR_LD_TARGET_SYNC *) target_map; 3985 3986 for (i = 0; i < num_lds; i++, ld_sync++) { 3987 raid = MR_LdRaidGet(i, map); 3988 ld_sync->targetId = MR_GetLDTgtId(i, map); 3989 ld_sync->seqNum = raid->seqNum; 3990 } 3991 3992 dcmd->cmd = MFI_CMD_DCMD; 3993 dcmd->cmd_status = 0xFF; 3994 dcmd->sge_count = 1; 3995 dcmd->flags = MFI_FRAME_DIR_WRITE; 3996 dcmd->timeout = 0; 3997 dcmd->pad_0 = 0; 3998 dcmd->data_xfer_len = sc->current_map_sz; 3999 dcmd->mbox.b[0] = num_lds; 4000 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; 4001 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 4002 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 4003 dcmd->sgl.sge32[0].length = sc->current_map_sz; 4004 4005 sc->map_update_cmd = cmd; 4006 if (mrsas_issue_dcmd(sc, cmd)) { 4007 device_printf(sc->mrsas_dev, 4008 "Fail to send sync map info command.\n"); 4009 return (1); 4010 } 4011 return (retcode); 4012 } 4013 4014 /* 4015 * mrsas_get_pd_list: Returns FW's PD list structure input: 4016 * Adapter soft state 4017 * 4018 * Issues an internal command (DCMD) to get the FW's controller PD list 4019 * structure. This information is mainly used to find out about system 4020 * supported by Firmware. 4021 */ 4022 static int 4023 mrsas_get_pd_list(struct mrsas_softc *sc) 4024 { 4025 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size; 4026 u_int8_t do_ocr = 1; 4027 struct mrsas_mfi_cmd *cmd; 4028 struct mrsas_dcmd_frame *dcmd; 4029 struct MR_PD_LIST *pd_list_mem; 4030 struct MR_PD_ADDRESS *pd_addr; 4031 bus_addr_t pd_list_phys_addr = 0; 4032 struct mrsas_tmp_dcmd *tcmd; 4033 4034 cmd = mrsas_get_mfi_cmd(sc); 4035 if (!cmd) { 4036 device_printf(sc->mrsas_dev, 4037 "Cannot alloc for get PD list cmd\n"); 4038 return 1; 4039 } 4040 dcmd = &cmd->frame->dcmd; 4041 4042 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 4043 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4044 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { 4045 device_printf(sc->mrsas_dev, 4046 "Cannot alloc dmamap for get PD list cmd\n"); 4047 mrsas_release_mfi_cmd(cmd); 4048 mrsas_free_tmp_dcmd(tcmd); 4049 free(tcmd, M_MRSAS); 4050 return (ENOMEM); 4051 } else { 4052 pd_list_mem = tcmd->tmp_dcmd_mem; 4053 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 4054 } 4055 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4056 4057 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4058 dcmd->mbox.b[1] = 0; 4059 dcmd->cmd = MFI_CMD_DCMD; 4060 dcmd->cmd_status = 0xFF; 4061 dcmd->sge_count = 1; 4062 dcmd->flags = MFI_FRAME_DIR_READ; 4063 dcmd->timeout = 0; 4064 dcmd->pad_0 = 0; 4065 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4066 dcmd->opcode = MR_DCMD_PD_LIST_QUERY; 4067 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr; 4068 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4069 4070 retcode = mrsas_issue_polled(sc, cmd); 4071 if (retcode == ETIMEDOUT) 4072 goto dcmd_timeout; 4073 4074 /* Get the instance PD list */ 4075 pd_count = MRSAS_MAX_PD; 4076 pd_addr = pd_list_mem->addr; 4077 if (pd_list_mem->count < pd_count) { 4078 memset(sc->local_pd_list, 0, 4079 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 4080 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) { 4081 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; 4082 sc->local_pd_list[pd_addr->deviceId].driveType = 4083 pd_addr->scsiDevType; 4084 sc->local_pd_list[pd_addr->deviceId].driveState = 4085 MR_PD_STATE_SYSTEM; 4086 pd_addr++; 4087 } 4088 /* 4089 * Use mutext/spinlock if pd_list component size increase more than 4090 * 32 bit. 4091 */ 4092 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); 4093 do_ocr = 0; 4094 } 4095 dcmd_timeout: 4096 mrsas_free_tmp_dcmd(tcmd); 4097 free(tcmd, M_MRSAS); 4098 4099 if (do_ocr) 4100 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4101 else 4102 mrsas_release_mfi_cmd(cmd); 4103 4104 return (retcode); 4105 } 4106 4107 /* 4108 * mrsas_get_ld_list: Returns FW's LD list structure input: 4109 * Adapter soft state 4110 * 4111 * Issues an internal command (DCMD) to get the FW's controller PD list 4112 * structure. This information is mainly used to find out about supported by 4113 * the FW. 4114 */ 4115 static int 4116 mrsas_get_ld_list(struct mrsas_softc *sc) 4117 { 4118 int ld_list_size, retcode = 0, ld_index = 0, ids = 0; 4119 u_int8_t do_ocr = 1; 4120 struct mrsas_mfi_cmd *cmd; 4121 struct mrsas_dcmd_frame *dcmd; 4122 struct MR_LD_LIST *ld_list_mem; 4123 bus_addr_t ld_list_phys_addr = 0; 4124 struct mrsas_tmp_dcmd *tcmd; 4125 4126 cmd = mrsas_get_mfi_cmd(sc); 4127 if (!cmd) { 4128 device_printf(sc->mrsas_dev, 4129 "Cannot alloc for get LD list cmd\n"); 4130 return 1; 4131 } 4132 dcmd = &cmd->frame->dcmd; 4133 4134 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 4135 ld_list_size = sizeof(struct MR_LD_LIST); 4136 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { 4137 device_printf(sc->mrsas_dev, 4138 "Cannot alloc dmamap for get LD list cmd\n"); 4139 mrsas_release_mfi_cmd(cmd); 4140 mrsas_free_tmp_dcmd(tcmd); 4141 free(tcmd, M_MRSAS); 4142 return (ENOMEM); 4143 } else { 4144 ld_list_mem = tcmd->tmp_dcmd_mem; 4145 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 4146 } 4147 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4148 4149 if (sc->max256vdSupport) 4150 dcmd->mbox.b[0] = 1; 4151 4152 dcmd->cmd = MFI_CMD_DCMD; 4153 dcmd->cmd_status = 0xFF; 4154 dcmd->sge_count = 1; 4155 dcmd->flags = MFI_FRAME_DIR_READ; 4156 dcmd->timeout = 0; 4157 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); 4158 dcmd->opcode = MR_DCMD_LD_GET_LIST; 4159 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr; 4160 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); 4161 dcmd->pad_0 = 0; 4162 4163 retcode = mrsas_issue_polled(sc, cmd); 4164 if (retcode == ETIMEDOUT) 4165 goto dcmd_timeout; 4166 4167 #if VD_EXT_DEBUG 4168 printf("Number of LDs %d\n", ld_list_mem->ldCount); 4169 #endif 4170 4171 /* Get the instance LD list */ 4172 if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) { 4173 sc->CurLdCount = ld_list_mem->ldCount; 4174 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4175 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) { 4176 if (ld_list_mem->ldList[ld_index].state != 0) { 4177 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 4178 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 4179 } 4180 } 4181 do_ocr = 0; 4182 } 4183 dcmd_timeout: 4184 mrsas_free_tmp_dcmd(tcmd); 4185 free(tcmd, M_MRSAS); 4186 4187 if (do_ocr) 4188 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4189 else 4190 mrsas_release_mfi_cmd(cmd); 4191 4192 return (retcode); 4193 } 4194 4195 /* 4196 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input: 4197 * Adapter soft state Temp command Size of alloction 4198 * 4199 * Allocates DMAable memory for a temporary internal command. The allocated 4200 * memory is initialized to all zeros upon successful loading of the dma 4201 * mapped memory. 4202 */ 4203 int 4204 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, 4205 struct mrsas_tmp_dcmd *tcmd, int size) 4206 { 4207 if (bus_dma_tag_create(sc->mrsas_parent_tag, 4208 1, 0, 4209 BUS_SPACE_MAXADDR_32BIT, 4210 BUS_SPACE_MAXADDR, 4211 NULL, NULL, 4212 size, 4213 1, 4214 size, 4215 BUS_DMA_ALLOCNOW, 4216 NULL, NULL, 4217 &tcmd->tmp_dcmd_tag)) { 4218 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); 4219 return (ENOMEM); 4220 } 4221 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, 4222 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { 4223 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); 4224 return (ENOMEM); 4225 } 4226 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, 4227 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, 4228 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { 4229 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); 4230 return (ENOMEM); 4231 } 4232 memset(tcmd->tmp_dcmd_mem, 0, size); 4233 return (0); 4234 } 4235 4236 /* 4237 * mrsas_free_tmp_dcmd: Free memory for temporary command input: 4238 * temporary dcmd pointer 4239 * 4240 * Deallocates memory of the temporary command for use in the construction of 4241 * the internal DCMD. 4242 */ 4243 void 4244 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) 4245 { 4246 if (tmp->tmp_dcmd_phys_addr) 4247 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); 4248 if (tmp->tmp_dcmd_mem != NULL) 4249 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); 4250 if (tmp->tmp_dcmd_tag != NULL) 4251 bus_dma_tag_destroy(tmp->tmp_dcmd_tag); 4252 } 4253 4254 /* 4255 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input: 4256 * Adapter soft state Previously issued cmd to be aborted 4257 * 4258 * This function is used to abort previously issued commands, such as AEN and 4259 * RAID map sync map commands. The abort command is sent as a DCMD internal 4260 * command and subsequently the driver will wait for a return status. The 4261 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. 4262 */ 4263 static int 4264 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 4265 struct mrsas_mfi_cmd *cmd_to_abort) 4266 { 4267 struct mrsas_mfi_cmd *cmd; 4268 struct mrsas_abort_frame *abort_fr; 4269 u_int8_t retcode = 0; 4270 unsigned long total_time = 0; 4271 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 4272 4273 cmd = mrsas_get_mfi_cmd(sc); 4274 if (!cmd) { 4275 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); 4276 return (1); 4277 } 4278 abort_fr = &cmd->frame->abort; 4279 4280 /* Prepare and issue the abort frame */ 4281 abort_fr->cmd = MFI_CMD_ABORT; 4282 abort_fr->cmd_status = 0xFF; 4283 abort_fr->flags = 0; 4284 abort_fr->abort_context = cmd_to_abort->index; 4285 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 4286 abort_fr->abort_mfi_phys_addr_hi = 0; 4287 4288 cmd->sync_cmd = 1; 4289 cmd->cmd_status = 0xFF; 4290 4291 if (mrsas_issue_dcmd(sc, cmd)) { 4292 device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); 4293 return (1); 4294 } 4295 /* Wait for this cmd to complete */ 4296 sc->chan = (void *)&cmd; 4297 while (1) { 4298 if (cmd->cmd_status == 0xFF) { 4299 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 4300 } else 4301 break; 4302 total_time++; 4303 if (total_time >= max_wait) { 4304 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); 4305 retcode = 1; 4306 break; 4307 } 4308 } 4309 4310 cmd->sync_cmd = 0; 4311 mrsas_release_mfi_cmd(cmd); 4312 return (retcode); 4313 } 4314 4315 /* 4316 * mrsas_complete_abort: Completes aborting a command input: 4317 * Adapter soft state Cmd that was issued to abort another cmd 4318 * 4319 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to 4320 * change after sending the command. This function is called from 4321 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. 4322 */ 4323 void 4324 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4325 { 4326 if (cmd->sync_cmd) { 4327 cmd->sync_cmd = 0; 4328 cmd->cmd_status = 0; 4329 sc->chan = (void *)&cmd; 4330 wakeup_one((void *)&sc->chan); 4331 } 4332 return; 4333 } 4334 4335 /* 4336 * mrsas_aen_handler: AEN processing callback function from thread context 4337 * input: Adapter soft state 4338 * 4339 * Asynchronous event handler 4340 */ 4341 void 4342 mrsas_aen_handler(struct mrsas_softc *sc) 4343 { 4344 union mrsas_evt_class_locale class_locale; 4345 int doscan = 0; 4346 u_int32_t seq_num; 4347 int error, fail_aen = 0; 4348 4349 if (sc == NULL) { 4350 printf("invalid instance!\n"); 4351 return; 4352 } 4353 if (sc->evt_detail_mem) { 4354 switch (sc->evt_detail_mem->code) { 4355 case MR_EVT_PD_INSERTED: 4356 fail_aen = mrsas_get_pd_list(sc); 4357 if (!fail_aen) 4358 mrsas_bus_scan_sim(sc, sc->sim_1); 4359 else 4360 goto skip_register_aen; 4361 doscan = 0; 4362 break; 4363 case MR_EVT_PD_REMOVED: 4364 fail_aen = mrsas_get_pd_list(sc); 4365 if (!fail_aen) 4366 mrsas_bus_scan_sim(sc, sc->sim_1); 4367 else 4368 goto skip_register_aen; 4369 doscan = 0; 4370 break; 4371 case MR_EVT_LD_OFFLINE: 4372 case MR_EVT_CFG_CLEARED: 4373 case MR_EVT_LD_DELETED: 4374 mrsas_bus_scan_sim(sc, sc->sim_0); 4375 doscan = 0; 4376 break; 4377 case MR_EVT_LD_CREATED: 4378 fail_aen = mrsas_get_ld_list(sc); 4379 if (!fail_aen) 4380 mrsas_bus_scan_sim(sc, sc->sim_0); 4381 else 4382 goto skip_register_aen; 4383 doscan = 0; 4384 break; 4385 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 4386 case MR_EVT_FOREIGN_CFG_IMPORTED: 4387 case MR_EVT_LD_STATE_CHANGE: 4388 doscan = 1; 4389 break; 4390 default: 4391 doscan = 0; 4392 break; 4393 } 4394 } else { 4395 device_printf(sc->mrsas_dev, "invalid evt_detail\n"); 4396 return; 4397 } 4398 if (doscan) { 4399 fail_aen = mrsas_get_pd_list(sc); 4400 if (!fail_aen) { 4401 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); 4402 mrsas_bus_scan_sim(sc, sc->sim_1); 4403 } else 4404 goto skip_register_aen; 4405 4406 fail_aen = mrsas_get_ld_list(sc); 4407 if (!fail_aen) { 4408 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); 4409 mrsas_bus_scan_sim(sc, sc->sim_0); 4410 } else 4411 goto skip_register_aen; 4412 } 4413 seq_num = sc->evt_detail_mem->seq_num + 1; 4414 4415 /* Register AEN with FW for latest sequence number plus 1 */ 4416 class_locale.members.reserved = 0; 4417 class_locale.members.locale = MR_EVT_LOCALE_ALL; 4418 class_locale.members.class = MR_EVT_CLASS_DEBUG; 4419 4420 if (sc->aen_cmd != NULL) 4421 return; 4422 4423 mtx_lock(&sc->aen_lock); 4424 error = mrsas_register_aen(sc, seq_num, 4425 class_locale.word); 4426 mtx_unlock(&sc->aen_lock); 4427 4428 if (error) 4429 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); 4430 4431 skip_register_aen: 4432 return; 4433 4434 } 4435 4436 4437 /* 4438 * mrsas_complete_aen: Completes AEN command 4439 * input: Adapter soft state 4440 * Cmd that was issued to abort another cmd 4441 * 4442 * This function will be called from ISR and will continue event processing from 4443 * thread context by enqueuing task in ev_tq (callback function 4444 * "mrsas_aen_handler"). 4445 */ 4446 void 4447 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4448 { 4449 /* 4450 * Don't signal app if it is just an aborted previously registered 4451 * aen 4452 */ 4453 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { 4454 sc->mrsas_aen_triggered = 1; 4455 mtx_lock(&sc->aen_lock); 4456 if (sc->mrsas_poll_waiting) { 4457 sc->mrsas_poll_waiting = 0; 4458 selwakeup(&sc->mrsas_select); 4459 } 4460 mtx_unlock(&sc->aen_lock); 4461 } else 4462 cmd->abort_aen = 0; 4463 4464 sc->aen_cmd = NULL; 4465 mrsas_release_mfi_cmd(cmd); 4466 4467 if (!sc->remove_in_progress) 4468 taskqueue_enqueue(sc->ev_tq, &sc->ev_task); 4469 4470 return; 4471 } 4472 4473 static device_method_t mrsas_methods[] = { 4474 DEVMETHOD(device_probe, mrsas_probe), 4475 DEVMETHOD(device_attach, mrsas_attach), 4476 DEVMETHOD(device_detach, mrsas_detach), 4477 DEVMETHOD(device_suspend, mrsas_suspend), 4478 DEVMETHOD(device_resume, mrsas_resume), 4479 DEVMETHOD(bus_print_child, bus_generic_print_child), 4480 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 4481 {0, 0} 4482 }; 4483 4484 static driver_t mrsas_driver = { 4485 "mrsas", 4486 mrsas_methods, 4487 sizeof(struct mrsas_softc) 4488 }; 4489 4490 static devclass_t mrsas_devclass; 4491 4492 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0); 4493 MODULE_DEPEND(mrsas, cam, 1, 1, 1); 4494