1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <dev/mrsas/mrsas.h> 44 #include <dev/mrsas/mrsas_ioctl.h> 45 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 49 #include <sys/sysctl.h> 50 #include <sys/types.h> 51 #include <sys/sysent.h> 52 #include <sys/kthread.h> 53 #include <sys/taskqueue.h> 54 #include <sys/smp.h> 55 #include <sys/endian.h> 56 57 /* 58 * Function prototypes 59 */ 60 static d_open_t mrsas_open; 61 static d_close_t mrsas_close; 62 static d_ioctl_t mrsas_ioctl; 63 static d_poll_t mrsas_poll; 64 65 static void mrsas_ich_startup(void *arg); 66 static struct mrsas_mgmt_info mrsas_mgmt_info; 67 static struct mrsas_ident *mrsas_find_ident(device_t); 68 static int mrsas_setup_msix(struct mrsas_softc *sc); 69 static int mrsas_allocate_msix(struct mrsas_softc *sc); 70 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); 71 static void mrsas_flush_cache(struct mrsas_softc *sc); 72 static void mrsas_reset_reply_desc(struct mrsas_softc *sc); 73 static void mrsas_ocr_thread(void *arg); 74 static int mrsas_get_map_info(struct mrsas_softc *sc); 75 static int mrsas_get_ld_map_info(struct mrsas_softc *sc); 76 static int mrsas_sync_map_info(struct mrsas_softc *sc); 77 static int mrsas_get_pd_list(struct mrsas_softc *sc); 78 static int mrsas_get_ld_list(struct mrsas_softc *sc); 79 static int mrsas_setup_irq(struct mrsas_softc *sc); 80 static int mrsas_alloc_mem(struct mrsas_softc *sc); 81 static int mrsas_init_fw(struct mrsas_softc *sc); 82 static int mrsas_setup_raidmap(struct mrsas_softc *sc); 83 static void megasas_setup_jbod_map(struct mrsas_softc *sc); 84 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend); 85 static int mrsas_clear_intr(struct mrsas_softc *sc); 86 static int mrsas_get_ctrl_info(struct mrsas_softc *sc); 87 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc); 88 static int 89 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 90 struct mrsas_mfi_cmd *cmd_to_abort); 91 static void 92 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id); 93 static struct mrsas_softc * 94 mrsas_get_softc_instance(struct cdev *dev, 95 u_long cmd, caddr_t arg); 96 u_int32_t 97 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset); 98 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); 99 u_int8_t 100 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, 101 struct mrsas_mfi_cmd *mfi_cmd); 102 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc); 103 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); 104 int mrsas_init_adapter(struct mrsas_softc *sc); 105 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); 106 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); 107 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); 108 int mrsas_ioc_init(struct mrsas_softc *sc); 109 int mrsas_bus_scan(struct mrsas_softc *sc); 110 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 111 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 112 int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason); 113 int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason); 114 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); 115 int mrsas_reset_targets(struct mrsas_softc *sc); 116 int 117 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, 118 struct mrsas_mfi_cmd *cmd); 119 int 120 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, 121 int size); 122 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); 123 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 124 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 125 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 126 void mrsas_disable_intr(struct mrsas_softc *sc); 127 void mrsas_enable_intr(struct mrsas_softc *sc); 128 void mrsas_free_ioc_cmd(struct mrsas_softc *sc); 129 void mrsas_free_mem(struct mrsas_softc *sc); 130 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); 131 void mrsas_isr(void *arg); 132 void mrsas_teardown_intr(struct mrsas_softc *sc); 133 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 134 void mrsas_kill_hba(struct mrsas_softc *sc); 135 void mrsas_aen_handler(struct mrsas_softc *sc); 136 void 137 mrsas_write_reg(struct mrsas_softc *sc, int offset, 138 u_int32_t value); 139 void 140 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 141 u_int32_t req_desc_hi); 142 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); 143 void 144 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, 145 struct mrsas_mfi_cmd *cmd, u_int8_t status); 146 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); 147 148 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd 149 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 150 151 extern int mrsas_cam_attach(struct mrsas_softc *sc); 152 extern void mrsas_cam_detach(struct mrsas_softc *sc); 153 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 154 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 155 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); 156 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); 157 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); 158 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 159 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 160 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 161 extern void mrsas_xpt_freeze(struct mrsas_softc *sc); 162 extern void mrsas_xpt_release(struct mrsas_softc *sc); 163 extern MRSAS_REQUEST_DESCRIPTOR_UNION * 164 mrsas_get_request_desc(struct mrsas_softc *sc, 165 u_int16_t index); 166 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 167 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); 168 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); 169 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); 170 171 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, 172 union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus, 173 u_int32_t data_length, u_int8_t *sense); 174 void 175 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo, 176 u_int32_t req_desc_hi); 177 178 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 179 "MRSAS Driver Parameters"); 180 181 /* 182 * PCI device struct and table 183 * 184 */ 185 typedef struct mrsas_ident { 186 uint16_t vendor; 187 uint16_t device; 188 uint16_t subvendor; 189 uint16_t subdevice; 190 const char *desc; 191 } MRSAS_CTLR_ID; 192 193 MRSAS_CTLR_ID device_table[] = { 194 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"}, 195 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"}, 196 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"}, 197 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"}, 198 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"}, 199 {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"}, 200 {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"}, 201 {0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"}, 202 {0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"}, 203 {0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"}, 204 {0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"}, 205 {0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"}, 206 {0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"}, 207 {0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"}, 208 {0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"}, 209 {0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"}, 210 {0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"}, 211 {0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"}, 212 {0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"}, 213 {0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"}, 214 {0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"}, 215 {0, 0, 0, 0, NULL} 216 }; 217 218 /* 219 * Character device entry points 220 * 221 */ 222 static struct cdevsw mrsas_cdevsw = { 223 .d_version = D_VERSION, 224 .d_open = mrsas_open, 225 .d_close = mrsas_close, 226 .d_ioctl = mrsas_ioctl, 227 .d_poll = mrsas_poll, 228 .d_name = "mrsas", 229 }; 230 231 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); 232 233 int 234 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 235 { 236 237 return (0); 238 } 239 240 int 241 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 242 { 243 244 return (0); 245 } 246 247 u_int32_t 248 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset) 249 { 250 u_int32_t i = 0, ret_val; 251 252 if (sc->is_aero) { 253 do { 254 ret_val = mrsas_read_reg(sc, offset); 255 i++; 256 } while(ret_val == 0 && i < 3); 257 } else 258 ret_val = mrsas_read_reg(sc, offset); 259 260 return ret_val; 261 } 262 263 /* 264 * Register Read/Write Functions 265 * 266 */ 267 void 268 mrsas_write_reg(struct mrsas_softc *sc, int offset, 269 u_int32_t value) 270 { 271 bus_space_tag_t bus_tag = sc->bus_tag; 272 bus_space_handle_t bus_handle = sc->bus_handle; 273 274 bus_space_write_4(bus_tag, bus_handle, offset, value); 275 } 276 277 u_int32_t 278 mrsas_read_reg(struct mrsas_softc *sc, int offset) 279 { 280 bus_space_tag_t bus_tag = sc->bus_tag; 281 bus_space_handle_t bus_handle = sc->bus_handle; 282 283 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); 284 } 285 286 /* 287 * Interrupt Disable/Enable/Clear Functions 288 * 289 */ 290 void 291 mrsas_disable_intr(struct mrsas_softc *sc) 292 { 293 u_int32_t mask = 0xFFFFFFFF; 294 295 sc->mask_interrupts = 1; 296 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); 297 /* Dummy read to force pci flush */ 298 (void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 299 } 300 301 void 302 mrsas_enable_intr(struct mrsas_softc *sc) 303 { 304 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; 305 306 sc->mask_interrupts = 0; 307 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); 308 (void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 309 310 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); 311 (void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 312 } 313 314 static int 315 mrsas_clear_intr(struct mrsas_softc *sc) 316 { 317 u_int32_t status; 318 319 /* Read received interrupt */ 320 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 321 322 /* Not our interrupt, so just return */ 323 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 324 return (0); 325 326 /* We got a reply interrupt */ 327 return (1); 328 } 329 330 /* 331 * PCI Support Functions 332 * 333 */ 334 static struct mrsas_ident * 335 mrsas_find_ident(device_t dev) 336 { 337 struct mrsas_ident *pci_device; 338 339 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) { 340 if ((pci_device->vendor == pci_get_vendor(dev)) && 341 (pci_device->device == pci_get_device(dev)) && 342 ((pci_device->subvendor == pci_get_subvendor(dev)) || 343 (pci_device->subvendor == 0xffff)) && 344 ((pci_device->subdevice == pci_get_subdevice(dev)) || 345 (pci_device->subdevice == 0xffff))) 346 return (pci_device); 347 } 348 return (NULL); 349 } 350 351 static int 352 mrsas_probe(device_t dev) 353 { 354 static u_int8_t first_ctrl = 1; 355 struct mrsas_ident *id; 356 357 if ((id = mrsas_find_ident(dev)) != NULL) { 358 if (first_ctrl) { 359 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n", 360 MRSAS_VERSION); 361 first_ctrl = 0; 362 } 363 device_set_desc(dev, id->desc); 364 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */ 365 return (-30); 366 } 367 return (ENXIO); 368 } 369 370 /* 371 * mrsas_setup_sysctl: setup sysctl values for mrsas 372 * input: Adapter instance soft state 373 * 374 * Setup sysctl entries for mrsas driver. 375 */ 376 static void 377 mrsas_setup_sysctl(struct mrsas_softc *sc) 378 { 379 struct sysctl_ctx_list *sysctl_ctx = NULL; 380 struct sysctl_oid *sysctl_tree = NULL; 381 char tmpstr[80], tmpstr2[80]; 382 383 /* 384 * Setup the sysctl variable so the user can change the debug level 385 * on the fly. 386 */ 387 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", 388 device_get_unit(sc->mrsas_dev)); 389 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev)); 390 391 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); 392 if (sysctl_ctx != NULL) 393 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); 394 395 if (sysctl_tree == NULL) { 396 sysctl_ctx_init(&sc->sysctl_ctx); 397 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 398 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2, 399 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr); 400 if (sc->sysctl_tree == NULL) 401 return; 402 sysctl_ctx = &sc->sysctl_ctx; 403 sysctl_tree = sc->sysctl_tree; 404 } 405 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 406 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, 407 "Disable the use of OCR"); 408 409 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 410 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, 411 strlen(MRSAS_VERSION), "driver version"); 412 413 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 414 OID_AUTO, "reset_count", CTLFLAG_RD, 415 &sc->reset_count, 0, "number of ocr from start of the day"); 416 417 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 418 OID_AUTO, "fw_outstanding", CTLFLAG_RD, 419 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands"); 420 421 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 422 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 423 &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); 424 425 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 426 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, 427 "Driver debug level"); 428 429 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 430 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 431 0, "Driver IO timeout value in mili-second."); 432 433 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 434 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, 435 &sc->mrsas_fw_fault_check_delay, 436 0, "FW fault check thread delay in seconds. <default is 1 sec>"); 437 438 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 439 OID_AUTO, "reset_in_progress", CTLFLAG_RD, 440 &sc->reset_in_progress, 0, "ocr in progress status"); 441 442 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 443 OID_AUTO, "block_sync_cache", CTLFLAG_RW, 444 &sc->block_sync_cache, 0, 445 "Block SYNC CACHE at driver. <default: 0, send it to FW>"); 446 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 447 OID_AUTO, "stream detection", CTLFLAG_RW, 448 &sc->drv_stream_detection, 0, 449 "Disable/Enable Stream detection. <default: 1, Enable Stream Detection>"); 450 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 451 OID_AUTO, "prp_count", CTLFLAG_RD, 452 &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built"); 453 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 454 OID_AUTO, "SGE holes", CTLFLAG_RD, 455 &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs"); 456 } 457 458 /* 459 * mrsas_get_tunables: get tunable parameters. 460 * input: Adapter instance soft state 461 * 462 * Get tunable parameters. This will help to debug driver at boot time. 463 */ 464 static void 465 mrsas_get_tunables(struct mrsas_softc *sc) 466 { 467 char tmpstr[80]; 468 469 /* XXX default to some debugging for now */ 470 sc->mrsas_debug = 471 (MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN); 472 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; 473 sc->mrsas_fw_fault_check_delay = 1; 474 sc->reset_count = 0; 475 sc->reset_in_progress = 0; 476 sc->block_sync_cache = 0; 477 sc->drv_stream_detection = 1; 478 479 /* 480 * Grab the global variables. 481 */ 482 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); 483 484 /* 485 * Grab the global variables. 486 */ 487 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds); 488 489 /* Grab the unit-instance variables */ 490 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", 491 device_get_unit(sc->mrsas_dev)); 492 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); 493 } 494 495 /* 496 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. 497 * Used to get sequence number at driver load time. 498 * input: Adapter soft state 499 * 500 * Allocates DMAable memory for the event log info internal command. 501 */ 502 int 503 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) 504 { 505 int el_info_size; 506 507 /* Allocate get event log info command */ 508 el_info_size = sizeof(struct mrsas_evt_log_info); 509 if (bus_dma_tag_create(sc->mrsas_parent_tag, 510 1, 0, 511 BUS_SPACE_MAXADDR_32BIT, 512 BUS_SPACE_MAXADDR, 513 NULL, NULL, 514 el_info_size, 515 1, 516 el_info_size, 517 BUS_DMA_ALLOCNOW, 518 NULL, NULL, 519 &sc->el_info_tag)) { 520 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); 521 return (ENOMEM); 522 } 523 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, 524 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { 525 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); 526 return (ENOMEM); 527 } 528 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, 529 sc->el_info_mem, el_info_size, mrsas_addr_cb, 530 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { 531 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); 532 return (ENOMEM); 533 } 534 memset(sc->el_info_mem, 0, el_info_size); 535 return (0); 536 } 537 538 /* 539 * mrsas_free_evt_info_cmd: Free memory for Event log info command 540 * input: Adapter soft state 541 * 542 * Deallocates memory for the event log info internal command. 543 */ 544 void 545 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) 546 { 547 if (sc->el_info_phys_addr) 548 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); 549 if (sc->el_info_mem != NULL) 550 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); 551 if (sc->el_info_tag != NULL) 552 bus_dma_tag_destroy(sc->el_info_tag); 553 } 554 555 /* 556 * mrsas_get_seq_num: Get latest event sequence number 557 * @sc: Adapter soft state 558 * @eli: Firmware event log sequence number information. 559 * 560 * Firmware maintains a log of all events in a non-volatile area. 561 * Driver get the sequence number using DCMD 562 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. 563 */ 564 565 static int 566 mrsas_get_seq_num(struct mrsas_softc *sc, 567 struct mrsas_evt_log_info *eli) 568 { 569 struct mrsas_mfi_cmd *cmd; 570 struct mrsas_dcmd_frame *dcmd; 571 u_int8_t do_ocr = 1, retcode = 0; 572 573 cmd = mrsas_get_mfi_cmd(sc); 574 575 if (!cmd) { 576 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 577 return -ENOMEM; 578 } 579 dcmd = &cmd->frame->dcmd; 580 581 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { 582 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); 583 mrsas_release_mfi_cmd(cmd); 584 return -ENOMEM; 585 } 586 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 587 588 dcmd->cmd = MFI_CMD_DCMD; 589 dcmd->cmd_status = 0x0; 590 dcmd->sge_count = 1; 591 dcmd->flags = htole16(MFI_FRAME_DIR_READ); 592 dcmd->timeout = 0; 593 dcmd->pad_0 = 0; 594 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_log_info)); 595 dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_GET_INFO); 596 dcmd->sgl.sge32[0].phys_addr = htole32(sc->el_info_phys_addr & 0xFFFFFFFF); 597 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_log_info)); 598 599 retcode = mrsas_issue_blocked_cmd(sc, cmd); 600 if (retcode == ETIMEDOUT) 601 goto dcmd_timeout; 602 603 do_ocr = 0; 604 /* 605 * Copy the data back into callers buffer 606 */ 607 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); 608 mrsas_free_evt_log_info_cmd(sc); 609 610 dcmd_timeout: 611 if (do_ocr) 612 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 613 else 614 mrsas_release_mfi_cmd(cmd); 615 616 return retcode; 617 } 618 619 /* 620 * mrsas_register_aen: Register for asynchronous event notification 621 * @sc: Adapter soft state 622 * @seq_num: Starting sequence number 623 * @class_locale: Class of the event 624 * 625 * This function subscribes for events beyond the @seq_num 626 * and type @class_locale. 627 * 628 */ 629 static int 630 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, 631 u_int32_t class_locale_word) 632 { 633 int ret_val; 634 struct mrsas_mfi_cmd *cmd; 635 struct mrsas_dcmd_frame *dcmd; 636 union mrsas_evt_class_locale curr_aen; 637 union mrsas_evt_class_locale prev_aen; 638 639 /* 640 * If there an AEN pending already (aen_cmd), check if the 641 * class_locale of that pending AEN is inclusive of the new AEN 642 * request we currently have. If it is, then we don't have to do 643 * anything. In other words, whichever events the current AEN request 644 * is subscribing to, have already been subscribed to. If the old_cmd 645 * is _not_ inclusive, then we have to abort that command, form a 646 * class_locale that is superset of both old and current and re-issue 647 * to the FW 648 */ 649 650 curr_aen.word = class_locale_word; 651 652 if (sc->aen_cmd) { 653 prev_aen.word = le32toh(sc->aen_cmd->frame->dcmd.mbox.w[1]); 654 655 /* 656 * A class whose enum value is smaller is inclusive of all 657 * higher values. If a PROGRESS (= -1) was previously 658 * registered, then a new registration requests for higher 659 * classes need not be sent to FW. They are automatically 660 * included. Locale numbers don't have such hierarchy. They 661 * are bitmap values 662 */ 663 if ((prev_aen.members.class <= curr_aen.members.class) && 664 !((prev_aen.members.locale & curr_aen.members.locale) ^ 665 curr_aen.members.locale)) { 666 /* 667 * Previously issued event registration includes 668 * current request. Nothing to do. 669 */ 670 return 0; 671 } else { 672 curr_aen.members.locale |= prev_aen.members.locale; 673 674 if (prev_aen.members.class < curr_aen.members.class) 675 curr_aen.members.class = prev_aen.members.class; 676 677 sc->aen_cmd->abort_aen = 1; 678 ret_val = mrsas_issue_blocked_abort_cmd(sc, 679 sc->aen_cmd); 680 681 if (ret_val) { 682 printf("mrsas: Failed to abort previous AEN command\n"); 683 return ret_val; 684 } else 685 sc->aen_cmd = NULL; 686 } 687 } 688 cmd = mrsas_get_mfi_cmd(sc); 689 if (!cmd) 690 return ENOMEM; 691 692 dcmd = &cmd->frame->dcmd; 693 694 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); 695 696 /* 697 * Prepare DCMD for aen registration 698 */ 699 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 700 701 dcmd->cmd = MFI_CMD_DCMD; 702 dcmd->cmd_status = 0x0; 703 dcmd->sge_count = 1; 704 dcmd->flags = htole16(MFI_FRAME_DIR_READ); 705 dcmd->timeout = 0; 706 dcmd->pad_0 = 0; 707 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_detail)); 708 dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT); 709 dcmd->mbox.w[0] = htole32(seq_num); 710 sc->last_seq_num = seq_num; 711 dcmd->mbox.w[1] = htole32(curr_aen.word); 712 dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->evt_detail_phys_addr & 0xFFFFFFFF); 713 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_detail)); 714 715 if (sc->aen_cmd != NULL) { 716 mrsas_release_mfi_cmd(cmd); 717 return 0; 718 } 719 /* 720 * Store reference to the cmd used to register for AEN. When an 721 * application wants us to register for AEN, we have to abort this 722 * cmd and re-register with a new EVENT LOCALE supplied by that app 723 */ 724 sc->aen_cmd = cmd; 725 726 /* 727 * Issue the aen registration frame 728 */ 729 if (mrsas_issue_dcmd(sc, cmd)) { 730 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); 731 return (1); 732 } 733 return 0; 734 } 735 736 /* 737 * mrsas_start_aen: Subscribes to AEN during driver load time 738 * @instance: Adapter soft state 739 */ 740 static int 741 mrsas_start_aen(struct mrsas_softc *sc) 742 { 743 struct mrsas_evt_log_info eli; 744 union mrsas_evt_class_locale class_locale; 745 746 /* Get the latest sequence number from FW */ 747 748 memset(&eli, 0, sizeof(eli)); 749 750 if (mrsas_get_seq_num(sc, &eli)) 751 return -1; 752 753 /* Register AEN with FW for latest sequence number plus 1 */ 754 class_locale.members.reserved = 0; 755 class_locale.members.locale = MR_EVT_LOCALE_ALL; 756 class_locale.members.class = MR_EVT_CLASS_DEBUG; 757 758 return mrsas_register_aen(sc, eli.newest_seq_num + 1, 759 class_locale.word); 760 761 } 762 763 /* 764 * mrsas_setup_msix: Allocate MSI-x vectors 765 * @sc: adapter soft state 766 */ 767 static int 768 mrsas_setup_msix(struct mrsas_softc *sc) 769 { 770 int i; 771 772 for (i = 0; i < sc->msix_vectors; i++) { 773 sc->irq_context[i].sc = sc; 774 sc->irq_context[i].MSIxIndex = i; 775 sc->irq_id[i] = i + 1; 776 sc->mrsas_irq[i] = bus_alloc_resource_any 777 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i] 778 ,RF_ACTIVE); 779 if (sc->mrsas_irq[i] == NULL) { 780 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n"); 781 goto irq_alloc_failed; 782 } 783 if (bus_setup_intr(sc->mrsas_dev, 784 sc->mrsas_irq[i], 785 INTR_MPSAFE | INTR_TYPE_CAM, 786 NULL, mrsas_isr, &sc->irq_context[i], 787 &sc->intr_handle[i])) { 788 device_printf(sc->mrsas_dev, 789 "Cannot set up MSI-x interrupt handler\n"); 790 goto irq_alloc_failed; 791 } 792 } 793 return SUCCESS; 794 795 irq_alloc_failed: 796 mrsas_teardown_intr(sc); 797 return (FAIL); 798 } 799 800 /* 801 * mrsas_allocate_msix: Setup MSI-x vectors 802 * @sc: adapter soft state 803 */ 804 static int 805 mrsas_allocate_msix(struct mrsas_softc *sc) 806 { 807 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) { 808 device_printf(sc->mrsas_dev, "Using MSI-X with %d number" 809 " of vectors\n", sc->msix_vectors); 810 } else { 811 device_printf(sc->mrsas_dev, "MSI-x setup failed\n"); 812 goto irq_alloc_failed; 813 } 814 return SUCCESS; 815 816 irq_alloc_failed: 817 mrsas_teardown_intr(sc); 818 return (FAIL); 819 } 820 821 /* 822 * mrsas_attach: PCI entry point 823 * input: pointer to device struct 824 * 825 * Performs setup of PCI and registers, initializes mutexes and linked lists, 826 * registers interrupts and CAM, and initializes the adapter/controller to 827 * its proper state. 828 */ 829 static int 830 mrsas_attach(device_t dev) 831 { 832 struct mrsas_softc *sc = device_get_softc(dev); 833 uint32_t cmd, error; 834 835 memset(sc, 0, sizeof(struct mrsas_softc)); 836 837 /* Look up our softc and initialize its fields. */ 838 sc->mrsas_dev = dev; 839 sc->device_id = pci_get_device(dev); 840 841 switch (sc->device_id) { 842 case MRSAS_INVADER: 843 case MRSAS_FURY: 844 case MRSAS_INTRUDER: 845 case MRSAS_INTRUDER_24: 846 case MRSAS_CUTLASS_52: 847 case MRSAS_CUTLASS_53: 848 sc->mrsas_gen3_ctrl = 1; 849 break; 850 case MRSAS_VENTURA: 851 case MRSAS_CRUSADER: 852 case MRSAS_HARPOON: 853 case MRSAS_TOMCAT: 854 case MRSAS_VENTURA_4PORT: 855 case MRSAS_CRUSADER_4PORT: 856 sc->is_ventura = true; 857 break; 858 case MRSAS_AERO_10E1: 859 case MRSAS_AERO_10E5: 860 device_printf(dev, "Adapter is in configurable secure mode\n"); 861 case MRSAS_AERO_10E2: 862 case MRSAS_AERO_10E6: 863 sc->is_aero = true; 864 break; 865 case MRSAS_AERO_10E0: 866 case MRSAS_AERO_10E3: 867 case MRSAS_AERO_10E4: 868 case MRSAS_AERO_10E7: 869 device_printf(dev, "Adapter is in non-secure mode\n"); 870 return SUCCESS; 871 } 872 873 mrsas_get_tunables(sc); 874 875 /* 876 * Set up PCI and registers 877 */ 878 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 879 /* Force the busmaster enable bit on. */ 880 cmd |= PCIM_CMD_BUSMASTEREN; 881 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 882 883 /* For Ventura/Aero system registers are mapped to BAR0 */ 884 if (sc->is_ventura || sc->is_aero) 885 sc->reg_res_id = PCIR_BAR(0); /* BAR0 offset */ 886 else 887 sc->reg_res_id = PCIR_BAR(1); /* BAR1 offset */ 888 889 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 890 &(sc->reg_res_id), RF_ACTIVE)) 891 == NULL) { 892 device_printf(dev, "Cannot allocate PCI registers\n"); 893 goto attach_fail; 894 } 895 sc->bus_tag = rman_get_bustag(sc->reg_res); 896 sc->bus_handle = rman_get_bushandle(sc->reg_res); 897 898 /* Intialize mutexes */ 899 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF); 900 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF); 901 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF); 902 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF); 903 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN); 904 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF); 905 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF); 906 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF); 907 mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF); 908 909 /* Intialize linked list */ 910 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); 911 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); 912 913 mrsas_atomic_set(&sc->fw_outstanding, 0); 914 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 915 mrsas_atomic_set(&sc->prp_count, 0); 916 mrsas_atomic_set(&sc->sge_holes, 0); 917 918 sc->io_cmds_highwater = 0; 919 920 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 921 sc->UnevenSpanSupport = 0; 922 923 sc->msix_enable = 0; 924 925 /* Initialize Firmware */ 926 if (mrsas_init_fw(sc) != SUCCESS) { 927 goto attach_fail_fw; 928 } 929 /* Register mrsas to CAM layer */ 930 if ((mrsas_cam_attach(sc) != SUCCESS)) { 931 goto attach_fail_cam; 932 } 933 /* Register IRQs */ 934 if (mrsas_setup_irq(sc) != SUCCESS) { 935 goto attach_fail_irq; 936 } 937 error = mrsas_kproc_create(mrsas_ocr_thread, sc, 938 &sc->ocr_thread, 0, 0, "mrsas_ocr%d", 939 device_get_unit(sc->mrsas_dev)); 940 if (error) { 941 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error); 942 goto attach_fail_ocr_thread; 943 } 944 /* 945 * After FW initialization and OCR thread creation 946 * we will defer the cdev creation, AEN setup on ICH callback 947 */ 948 sc->mrsas_ich.ich_func = mrsas_ich_startup; 949 sc->mrsas_ich.ich_arg = sc; 950 if (config_intrhook_establish(&sc->mrsas_ich) != 0) { 951 device_printf(sc->mrsas_dev, "Config hook is already established\n"); 952 } 953 mrsas_setup_sysctl(sc); 954 return SUCCESS; 955 956 attach_fail_ocr_thread: 957 if (sc->ocr_thread_active) 958 wakeup(&sc->ocr_chan); 959 attach_fail_irq: 960 mrsas_teardown_intr(sc); 961 attach_fail_cam: 962 mrsas_cam_detach(sc); 963 attach_fail_fw: 964 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */ 965 if (sc->msix_enable == 1) 966 pci_release_msi(sc->mrsas_dev); 967 mrsas_free_mem(sc); 968 mtx_destroy(&sc->sim_lock); 969 mtx_destroy(&sc->aen_lock); 970 mtx_destroy(&sc->pci_lock); 971 mtx_destroy(&sc->io_lock); 972 mtx_destroy(&sc->ioctl_lock); 973 mtx_destroy(&sc->mpt_cmd_pool_lock); 974 mtx_destroy(&sc->mfi_cmd_pool_lock); 975 mtx_destroy(&sc->raidmap_lock); 976 mtx_destroy(&sc->stream_lock); 977 attach_fail: 978 if (sc->reg_res) { 979 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, 980 sc->reg_res_id, sc->reg_res); 981 } 982 return (ENXIO); 983 } 984 985 /* 986 * Interrupt config hook 987 */ 988 static void 989 mrsas_ich_startup(void *arg) 990 { 991 int i = 0; 992 struct mrsas_softc *sc = (struct mrsas_softc *)arg; 993 994 /* 995 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs 996 */ 997 sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS, 998 IOCTL_SEMA_DESCRIPTION); 999 1000 /* Create a /dev entry for mrsas controller. */ 1001 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT, 1002 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", 1003 device_get_unit(sc->mrsas_dev)); 1004 1005 if (device_get_unit(sc->mrsas_dev) == 0) { 1006 make_dev_alias_p(MAKEDEV_CHECKNAME, 1007 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev, 1008 "megaraid_sas_ioctl_node"); 1009 } 1010 if (sc->mrsas_cdev) 1011 sc->mrsas_cdev->si_drv1 = sc; 1012 1013 /* 1014 * Add this controller to mrsas_mgmt_info structure so that it can be 1015 * exported to management applications 1016 */ 1017 if (device_get_unit(sc->mrsas_dev) == 0) 1018 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info)); 1019 1020 mrsas_mgmt_info.count++; 1021 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc; 1022 mrsas_mgmt_info.max_index++; 1023 1024 /* Enable Interrupts */ 1025 mrsas_enable_intr(sc); 1026 1027 /* Call DCMD get_pd_info for all system PDs */ 1028 for (i = 0; i < MRSAS_MAX_PD; i++) { 1029 if ((sc->target_list[i].target_id != 0xffff) && 1030 sc->pd_info_mem) 1031 mrsas_get_pd_info(sc, sc->target_list[i].target_id); 1032 } 1033 1034 /* Initiate AEN (Asynchronous Event Notification) */ 1035 if (mrsas_start_aen(sc)) { 1036 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! " 1037 "Further events from the controller will not be communicated.\n" 1038 "Either there is some problem in the controller" 1039 "or the controller does not support AEN.\n" 1040 "Please contact to the SUPPORT TEAM if the problem persists\n"); 1041 } 1042 if (sc->mrsas_ich.ich_arg != NULL) { 1043 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n"); 1044 config_intrhook_disestablish(&sc->mrsas_ich); 1045 sc->mrsas_ich.ich_arg = NULL; 1046 } 1047 } 1048 1049 /* 1050 * mrsas_detach: De-allocates and teardown resources 1051 * input: pointer to device struct 1052 * 1053 * This function is the entry point for device disconnect and detach. 1054 * It performs memory de-allocations, shutdown of the controller and various 1055 * teardown and destroy resource functions. 1056 */ 1057 static int 1058 mrsas_detach(device_t dev) 1059 { 1060 struct mrsas_softc *sc; 1061 int i = 0; 1062 1063 sc = device_get_softc(dev); 1064 sc->remove_in_progress = 1; 1065 1066 /* Destroy the character device so no other IOCTL will be handled */ 1067 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev) 1068 destroy_dev(sc->mrsas_linux_emulator_cdev); 1069 destroy_dev(sc->mrsas_cdev); 1070 1071 /* 1072 * Take the instance off the instance array. Note that we will not 1073 * decrement the max_index. We let this array be sparse array 1074 */ 1075 for (i = 0; i < mrsas_mgmt_info.max_index; i++) { 1076 if (mrsas_mgmt_info.sc_ptr[i] == sc) { 1077 mrsas_mgmt_info.count--; 1078 mrsas_mgmt_info.sc_ptr[i] = NULL; 1079 break; 1080 } 1081 } 1082 1083 if (sc->ocr_thread_active) 1084 wakeup(&sc->ocr_chan); 1085 while (sc->reset_in_progress) { 1086 i++; 1087 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1088 mrsas_dprint(sc, MRSAS_INFO, 1089 "[%2d]waiting for OCR to be finished from %s\n", i, __func__); 1090 } 1091 pause("mr_shutdown", hz); 1092 } 1093 i = 0; 1094 while (sc->ocr_thread_active) { 1095 i++; 1096 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1097 mrsas_dprint(sc, MRSAS_INFO, 1098 "[%2d]waiting for " 1099 "mrsas_ocr thread to quit ocr %d\n", i, 1100 sc->ocr_thread_active); 1101 } 1102 pause("mr_shutdown", hz); 1103 } 1104 mrsas_flush_cache(sc); 1105 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); 1106 mrsas_disable_intr(sc); 1107 1108 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) { 1109 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 1110 free(sc->streamDetectByLD[i], M_MRSAS); 1111 free(sc->streamDetectByLD, M_MRSAS); 1112 sc->streamDetectByLD = NULL; 1113 } 1114 1115 mrsas_cam_detach(sc); 1116 mrsas_teardown_intr(sc); 1117 mrsas_free_mem(sc); 1118 mtx_destroy(&sc->sim_lock); 1119 mtx_destroy(&sc->aen_lock); 1120 mtx_destroy(&sc->pci_lock); 1121 mtx_destroy(&sc->io_lock); 1122 mtx_destroy(&sc->ioctl_lock); 1123 mtx_destroy(&sc->mpt_cmd_pool_lock); 1124 mtx_destroy(&sc->mfi_cmd_pool_lock); 1125 mtx_destroy(&sc->raidmap_lock); 1126 mtx_destroy(&sc->stream_lock); 1127 1128 /* Wait for all the semaphores to be released */ 1129 while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS) 1130 pause("mr_shutdown", hz); 1131 1132 /* Destroy the counting semaphore created for Ioctl */ 1133 sema_destroy(&sc->ioctl_count_sema); 1134 1135 if (sc->reg_res) { 1136 bus_release_resource(sc->mrsas_dev, 1137 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); 1138 } 1139 if (sc->sysctl_tree != NULL) 1140 sysctl_ctx_free(&sc->sysctl_ctx); 1141 1142 return (0); 1143 } 1144 1145 static int 1146 mrsas_shutdown(device_t dev) 1147 { 1148 struct mrsas_softc *sc; 1149 int i; 1150 1151 sc = device_get_softc(dev); 1152 sc->remove_in_progress = 1; 1153 if (!KERNEL_PANICKED()) { 1154 if (sc->ocr_thread_active) 1155 wakeup(&sc->ocr_chan); 1156 i = 0; 1157 while (sc->reset_in_progress && i < 15) { 1158 i++; 1159 if ((i % MRSAS_RESET_NOTICE_INTERVAL) == 0) { 1160 mrsas_dprint(sc, MRSAS_INFO, 1161 "[%2d]waiting for OCR to be finished " 1162 "from %s\n", i, __func__); 1163 } 1164 pause("mr_shutdown", hz); 1165 } 1166 if (sc->reset_in_progress) { 1167 mrsas_dprint(sc, MRSAS_INFO, 1168 "gave up waiting for OCR to be finished\n"); 1169 } 1170 } 1171 1172 mrsas_flush_cache(sc); 1173 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); 1174 mrsas_disable_intr(sc); 1175 return (0); 1176 } 1177 1178 /* 1179 * mrsas_free_mem: Frees allocated memory 1180 * input: Adapter instance soft state 1181 * 1182 * This function is called from mrsas_detach() to free previously allocated 1183 * memory. 1184 */ 1185 void 1186 mrsas_free_mem(struct mrsas_softc *sc) 1187 { 1188 int i; 1189 u_int32_t max_fw_cmds; 1190 struct mrsas_mfi_cmd *mfi_cmd; 1191 struct mrsas_mpt_cmd *mpt_cmd; 1192 1193 /* 1194 * Free RAID map memory 1195 */ 1196 for (i = 0; i < 2; i++) { 1197 if (sc->raidmap_phys_addr[i]) 1198 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); 1199 if (sc->raidmap_mem[i] != NULL) 1200 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); 1201 if (sc->raidmap_tag[i] != NULL) 1202 bus_dma_tag_destroy(sc->raidmap_tag[i]); 1203 1204 if (sc->ld_drv_map[i] != NULL) 1205 free(sc->ld_drv_map[i], M_MRSAS); 1206 } 1207 for (i = 0; i < 2; i++) { 1208 if (sc->jbodmap_phys_addr[i]) 1209 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]); 1210 if (sc->jbodmap_mem[i] != NULL) 1211 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]); 1212 if (sc->jbodmap_tag[i] != NULL) 1213 bus_dma_tag_destroy(sc->jbodmap_tag[i]); 1214 } 1215 /* 1216 * Free version buffer memory 1217 */ 1218 if (sc->verbuf_phys_addr) 1219 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); 1220 if (sc->verbuf_mem != NULL) 1221 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); 1222 if (sc->verbuf_tag != NULL) 1223 bus_dma_tag_destroy(sc->verbuf_tag); 1224 1225 /* 1226 * Free sense buffer memory 1227 */ 1228 if (sc->sense_phys_addr) 1229 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); 1230 if (sc->sense_mem != NULL) 1231 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); 1232 if (sc->sense_tag != NULL) 1233 bus_dma_tag_destroy(sc->sense_tag); 1234 1235 /* 1236 * Free chain frame memory 1237 */ 1238 if (sc->chain_frame_phys_addr) 1239 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); 1240 if (sc->chain_frame_mem != NULL) 1241 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); 1242 if (sc->chain_frame_tag != NULL) 1243 bus_dma_tag_destroy(sc->chain_frame_tag); 1244 1245 /* 1246 * Free IO Request memory 1247 */ 1248 if (sc->io_request_phys_addr) 1249 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); 1250 if (sc->io_request_mem != NULL) 1251 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); 1252 if (sc->io_request_tag != NULL) 1253 bus_dma_tag_destroy(sc->io_request_tag); 1254 1255 /* 1256 * Free Reply Descriptor memory 1257 */ 1258 if (sc->reply_desc_phys_addr) 1259 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); 1260 if (sc->reply_desc_mem != NULL) 1261 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); 1262 if (sc->reply_desc_tag != NULL) 1263 bus_dma_tag_destroy(sc->reply_desc_tag); 1264 1265 /* 1266 * Free event detail memory 1267 */ 1268 if (sc->evt_detail_phys_addr) 1269 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); 1270 if (sc->evt_detail_mem != NULL) 1271 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); 1272 if (sc->evt_detail_tag != NULL) 1273 bus_dma_tag_destroy(sc->evt_detail_tag); 1274 1275 /* 1276 * Free PD info memory 1277 */ 1278 if (sc->pd_info_phys_addr) 1279 bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap); 1280 if (sc->pd_info_mem != NULL) 1281 bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap); 1282 if (sc->pd_info_tag != NULL) 1283 bus_dma_tag_destroy(sc->pd_info_tag); 1284 1285 /* 1286 * Free MFI frames 1287 */ 1288 if (sc->mfi_cmd_list) { 1289 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1290 mfi_cmd = sc->mfi_cmd_list[i]; 1291 mrsas_free_frame(sc, mfi_cmd); 1292 } 1293 } 1294 if (sc->mficmd_frame_tag != NULL) 1295 bus_dma_tag_destroy(sc->mficmd_frame_tag); 1296 1297 /* 1298 * Free MPT internal command list 1299 */ 1300 max_fw_cmds = sc->max_fw_cmds; 1301 if (sc->mpt_cmd_list) { 1302 for (i = 0; i < max_fw_cmds; i++) { 1303 mpt_cmd = sc->mpt_cmd_list[i]; 1304 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); 1305 free(sc->mpt_cmd_list[i], M_MRSAS); 1306 } 1307 free(sc->mpt_cmd_list, M_MRSAS); 1308 sc->mpt_cmd_list = NULL; 1309 } 1310 /* 1311 * Free MFI internal command list 1312 */ 1313 1314 if (sc->mfi_cmd_list) { 1315 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1316 free(sc->mfi_cmd_list[i], M_MRSAS); 1317 } 1318 free(sc->mfi_cmd_list, M_MRSAS); 1319 sc->mfi_cmd_list = NULL; 1320 } 1321 /* 1322 * Free request descriptor memory 1323 */ 1324 free(sc->req_desc, M_MRSAS); 1325 sc->req_desc = NULL; 1326 1327 /* 1328 * Destroy parent tag 1329 */ 1330 if (sc->mrsas_parent_tag != NULL) 1331 bus_dma_tag_destroy(sc->mrsas_parent_tag); 1332 1333 /* 1334 * Free ctrl_info memory 1335 */ 1336 if (sc->ctrl_info != NULL) 1337 free(sc->ctrl_info, M_MRSAS); 1338 } 1339 1340 /* 1341 * mrsas_teardown_intr: Teardown interrupt 1342 * input: Adapter instance soft state 1343 * 1344 * This function is called from mrsas_detach() to teardown and release bus 1345 * interrupt resourse. 1346 */ 1347 void 1348 mrsas_teardown_intr(struct mrsas_softc *sc) 1349 { 1350 int i; 1351 1352 if (!sc->msix_enable) { 1353 if (sc->intr_handle[0]) 1354 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]); 1355 if (sc->mrsas_irq[0] != NULL) 1356 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1357 sc->irq_id[0], sc->mrsas_irq[0]); 1358 sc->intr_handle[0] = NULL; 1359 } else { 1360 for (i = 0; i < sc->msix_vectors; i++) { 1361 if (sc->intr_handle[i]) 1362 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i], 1363 sc->intr_handle[i]); 1364 1365 if (sc->mrsas_irq[i] != NULL) 1366 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1367 sc->irq_id[i], sc->mrsas_irq[i]); 1368 1369 sc->intr_handle[i] = NULL; 1370 } 1371 pci_release_msi(sc->mrsas_dev); 1372 } 1373 1374 } 1375 1376 /* 1377 * mrsas_suspend: Suspend entry point 1378 * input: Device struct pointer 1379 * 1380 * This function is the entry point for system suspend from the OS. 1381 */ 1382 static int 1383 mrsas_suspend(device_t dev) 1384 { 1385 /* This will be filled when the driver will have hibernation support */ 1386 return (0); 1387 } 1388 1389 /* 1390 * mrsas_resume: Resume entry point 1391 * input: Device struct pointer 1392 * 1393 * This function is the entry point for system resume from the OS. 1394 */ 1395 static int 1396 mrsas_resume(device_t dev) 1397 { 1398 /* This will be filled when the driver will have hibernation support */ 1399 return (0); 1400 } 1401 1402 /** 1403 * mrsas_get_softc_instance: Find softc instance based on cmd type 1404 * 1405 * This function will return softc instance based on cmd type. 1406 * In some case, application fire ioctl on required management instance and 1407 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those 1408 * case, else get the softc instance from host_no provided by application in 1409 * user data. 1410 */ 1411 1412 static struct mrsas_softc * 1413 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg) 1414 { 1415 struct mrsas_softc *sc = NULL; 1416 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; 1417 1418 if (cmd == MRSAS_IOC_GET_PCI_INFO) { 1419 sc = dev->si_drv1; 1420 } else { 1421 /* 1422 * get the Host number & the softc from data sent by the 1423 * Application 1424 */ 1425 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no]; 1426 if (sc == NULL) 1427 printf("There is no Controller number %d\n", 1428 user_ioc->host_no); 1429 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index) 1430 mrsas_dprint(sc, MRSAS_FAULT, 1431 "Invalid Controller number %d\n", user_ioc->host_no); 1432 } 1433 1434 return sc; 1435 } 1436 1437 /* 1438 * mrsas_ioctl: IOCtl commands entry point. 1439 * 1440 * This function is the entry point for IOCtls from the OS. It calls the 1441 * appropriate function for processing depending on the command received. 1442 */ 1443 static int 1444 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, 1445 struct thread *td) 1446 { 1447 struct mrsas_softc *sc; 1448 int ret = 0, i = 0; 1449 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo; 1450 1451 sc = mrsas_get_softc_instance(dev, cmd, arg); 1452 if (!sc) 1453 return ENOENT; 1454 1455 if (sc->remove_in_progress || 1456 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) { 1457 mrsas_dprint(sc, MRSAS_INFO, 1458 "Either driver remove or shutdown called or " 1459 "HW is in unrecoverable critical error state.\n"); 1460 return ENOENT; 1461 } 1462 mtx_lock_spin(&sc->ioctl_lock); 1463 if (!sc->reset_in_progress) { 1464 mtx_unlock_spin(&sc->ioctl_lock); 1465 goto do_ioctl; 1466 } 1467 mtx_unlock_spin(&sc->ioctl_lock); 1468 while (sc->reset_in_progress) { 1469 i++; 1470 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1471 mrsas_dprint(sc, MRSAS_INFO, 1472 "[%2d]waiting for OCR to be finished from %s\n", i, __func__); 1473 } 1474 pause("mr_ioctl", hz); 1475 } 1476 1477 do_ioctl: 1478 switch (cmd) { 1479 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64: 1480 #ifdef COMPAT_FREEBSD32 1481 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32: 1482 #endif 1483 /* 1484 * Decrement the Ioctl counting Semaphore before getting an 1485 * mfi command 1486 */ 1487 sema_wait(&sc->ioctl_count_sema); 1488 1489 ret = mrsas_passthru(sc, (void *)arg, cmd); 1490 1491 /* Increment the Ioctl counting semaphore value */ 1492 sema_post(&sc->ioctl_count_sema); 1493 1494 break; 1495 case MRSAS_IOC_SCAN_BUS: 1496 ret = mrsas_bus_scan(sc); 1497 break; 1498 1499 case MRSAS_IOC_GET_PCI_INFO: 1500 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg; 1501 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION)); 1502 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev); 1503 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev); 1504 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev); 1505 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev); 1506 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d," 1507 "pci device no: %d, pci function no: %d," 1508 "pci domain ID: %d\n", 1509 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber, 1510 pciDrvInfo->functionNumber, pciDrvInfo->domainID); 1511 ret = 0; 1512 break; 1513 1514 default: 1515 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd); 1516 ret = ENOENT; 1517 } 1518 1519 return (ret); 1520 } 1521 1522 /* 1523 * mrsas_poll: poll entry point for mrsas driver fd 1524 * 1525 * This function is the entry point for poll from the OS. It waits for some AEN 1526 * events to be triggered from the controller and notifies back. 1527 */ 1528 static int 1529 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td) 1530 { 1531 struct mrsas_softc *sc; 1532 int revents = 0; 1533 1534 sc = dev->si_drv1; 1535 1536 if (poll_events & (POLLIN | POLLRDNORM)) { 1537 if (sc->mrsas_aen_triggered) { 1538 revents |= poll_events & (POLLIN | POLLRDNORM); 1539 } 1540 } 1541 if (revents == 0) { 1542 if (poll_events & (POLLIN | POLLRDNORM)) { 1543 mtx_lock(&sc->aen_lock); 1544 sc->mrsas_poll_waiting = 1; 1545 selrecord(td, &sc->mrsas_select); 1546 mtx_unlock(&sc->aen_lock); 1547 } 1548 } 1549 return revents; 1550 } 1551 1552 /* 1553 * mrsas_setup_irq: Set up interrupt 1554 * input: Adapter instance soft state 1555 * 1556 * This function sets up interrupts as a bus resource, with flags indicating 1557 * resource permitting contemporaneous sharing and for resource to activate 1558 * atomically. 1559 */ 1560 static int 1561 mrsas_setup_irq(struct mrsas_softc *sc) 1562 { 1563 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS)) 1564 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n"); 1565 1566 else { 1567 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n"); 1568 sc->irq_context[0].sc = sc; 1569 sc->irq_context[0].MSIxIndex = 0; 1570 sc->irq_id[0] = 0; 1571 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev, 1572 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE); 1573 if (sc->mrsas_irq[0] == NULL) { 1574 device_printf(sc->mrsas_dev, "Cannot allocate legcay" 1575 "interrupt\n"); 1576 return (FAIL); 1577 } 1578 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0], 1579 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, 1580 &sc->irq_context[0], &sc->intr_handle[0])) { 1581 device_printf(sc->mrsas_dev, "Cannot set up legacy" 1582 "interrupt\n"); 1583 return (FAIL); 1584 } 1585 } 1586 return (0); 1587 } 1588 1589 /* 1590 * mrsas_isr: ISR entry point 1591 * input: argument pointer 1592 * 1593 * This function is the interrupt service routine entry point. There are two 1594 * types of interrupts, state change interrupt and response interrupt. If an 1595 * interrupt is not ours, we just return. 1596 */ 1597 void 1598 mrsas_isr(void *arg) 1599 { 1600 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg; 1601 struct mrsas_softc *sc = irq_context->sc; 1602 int status = 0; 1603 1604 if (sc->mask_interrupts) 1605 return; 1606 1607 if (!sc->msix_vectors) { 1608 status = mrsas_clear_intr(sc); 1609 if (!status) 1610 return; 1611 } 1612 /* If we are resetting, bail */ 1613 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { 1614 printf(" Entered into ISR when OCR is going active. \n"); 1615 mrsas_clear_intr(sc); 1616 return; 1617 } 1618 /* Process for reply request and clear response interrupt */ 1619 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS) 1620 mrsas_clear_intr(sc); 1621 1622 return; 1623 } 1624 1625 /* 1626 * mrsas_complete_cmd: Process reply request 1627 * input: Adapter instance soft state 1628 * 1629 * This function is called from mrsas_isr() to process reply request and clear 1630 * response interrupt. Processing of the reply request entails walking 1631 * through the reply descriptor array for the command request pended from 1632 * Firmware. We look at the Function field to determine the command type and 1633 * perform the appropriate action. Before we return, we clear the response 1634 * interrupt. 1635 */ 1636 int 1637 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex) 1638 { 1639 Mpi2ReplyDescriptorsUnion_t *desc; 1640 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 1641 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; 1642 struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL; 1643 struct mrsas_mfi_cmd *cmd_mfi; 1644 u_int8_t reply_descript_type, *sense; 1645 u_int16_t smid, num_completed; 1646 u_int8_t status, extStatus; 1647 union desc_value desc_val; 1648 PLD_LOAD_BALANCE_INFO lbinfo; 1649 u_int32_t device_id, data_length; 1650 int threshold_reply_count = 0; 1651 #if TM_DEBUG 1652 MR_TASK_MANAGE_REQUEST *mr_tm_req; 1653 MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; 1654 #endif 1655 1656 /* If we have a hardware error, not need to continue */ 1657 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 1658 return (DONE); 1659 1660 desc = sc->reply_desc_mem; 1661 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)) 1662 + sc->last_reply_idx[MSIxIndex]; 1663 1664 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1665 1666 desc_val.word = desc->Words; 1667 num_completed = 0; 1668 1669 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1670 1671 /* Find our reply descriptor for the command and process */ 1672 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) { 1673 smid = le16toh(reply_desc->SMID); 1674 cmd_mpt = sc->mpt_cmd_list[smid - 1]; 1675 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request; 1676 1677 status = scsi_io_req->RaidContext.raid_context.status; 1678 extStatus = scsi_io_req->RaidContext.raid_context.exStatus; 1679 sense = cmd_mpt->sense; 1680 data_length = scsi_io_req->DataLength; 1681 1682 switch (scsi_io_req->Function) { 1683 case MPI2_FUNCTION_SCSI_TASK_MGMT: 1684 #if TM_DEBUG 1685 mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request; 1686 mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *) 1687 &mr_tm_req->TmRequest; 1688 device_printf(sc->mrsas_dev, "TM completion type 0x%X, " 1689 "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID); 1690 #endif 1691 wakeup_one((void *)&sc->ocr_chan); 1692 break; 1693 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */ 1694 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; 1695 lbinfo = &sc->load_balance_info[device_id]; 1696 /* R1 load balancing for READ */ 1697 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { 1698 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]); 1699 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; 1700 } 1701 /* Fall thru and complete IO */ 1702 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: 1703 if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { 1704 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status, 1705 extStatus, le32toh(data_length), sense); 1706 mrsas_cmd_done(sc, cmd_mpt); 1707 mrsas_atomic_dec(&sc->fw_outstanding); 1708 } else { 1709 /* 1710 * If the peer Raid 1/10 fast path failed, 1711 * mark IO as failed to the scsi layer. 1712 * Overwrite the current status by the failed status 1713 * and make sure that if any command fails, 1714 * driver returns fail status to CAM. 1715 */ 1716 cmd_mpt->cmd_completed = 1; 1717 r1_cmd = cmd_mpt->peer_cmd; 1718 if (r1_cmd->cmd_completed) { 1719 if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) { 1720 status = r1_cmd->io_request->RaidContext.raid_context.status; 1721 extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus; 1722 data_length = r1_cmd->io_request->DataLength; 1723 sense = r1_cmd->sense; 1724 } 1725 r1_cmd->ccb_ptr = NULL; 1726 if (r1_cmd->callout_owner) { 1727 callout_stop(&r1_cmd->cm_callout); 1728 r1_cmd->callout_owner = false; 1729 } 1730 mrsas_release_mpt_cmd(r1_cmd); 1731 mrsas_atomic_dec(&sc->fw_outstanding); 1732 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status, 1733 extStatus, le32toh(data_length), sense); 1734 mrsas_cmd_done(sc, cmd_mpt); 1735 mrsas_atomic_dec(&sc->fw_outstanding); 1736 } 1737 } 1738 break; 1739 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */ 1740 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 1741 /* 1742 * Make sure NOT TO release the mfi command from the called 1743 * function's context if it is fired with issue_polled call. 1744 * And also make sure that the issue_polled call should only be 1745 * used if INTERRUPT IS DISABLED. 1746 */ 1747 if (cmd_mfi->frame->hdr.flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) 1748 mrsas_release_mfi_cmd(cmd_mfi); 1749 else 1750 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); 1751 break; 1752 } 1753 1754 sc->last_reply_idx[MSIxIndex]++; 1755 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth) 1756 sc->last_reply_idx[MSIxIndex] = 0; 1757 1758 desc->Words = ~((uint64_t)0x00); /* set it back to all 1759 * 0xFFFFFFFFs */ 1760 num_completed++; 1761 threshold_reply_count++; 1762 1763 /* Get the next reply descriptor */ 1764 if (!sc->last_reply_idx[MSIxIndex]) { 1765 desc = sc->reply_desc_mem; 1766 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)); 1767 } else 1768 desc++; 1769 1770 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1771 desc_val.word = desc->Words; 1772 1773 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1774 1775 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1776 break; 1777 1778 /* 1779 * Write to reply post index after completing threshold reply 1780 * count and still there are more replies in reply queue 1781 * pending to be completed. 1782 */ 1783 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 1784 if (sc->msix_enable) { 1785 if (sc->msix_combined) 1786 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1787 ((MSIxIndex & 0x7) << 24) | 1788 sc->last_reply_idx[MSIxIndex]); 1789 else 1790 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1791 sc->last_reply_idx[MSIxIndex]); 1792 } else 1793 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1794 reply_post_host_index), sc->last_reply_idx[0]); 1795 1796 threshold_reply_count = 0; 1797 } 1798 } 1799 1800 /* No match, just return */ 1801 if (num_completed == 0) 1802 return (DONE); 1803 1804 /* Clear response interrupt */ 1805 if (sc->msix_enable) { 1806 if (sc->msix_combined) { 1807 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1808 ((MSIxIndex & 0x7) << 24) | 1809 sc->last_reply_idx[MSIxIndex]); 1810 } else 1811 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1812 sc->last_reply_idx[MSIxIndex]); 1813 } else 1814 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1815 reply_post_host_index), sc->last_reply_idx[0]); 1816 1817 return (0); 1818 } 1819 1820 /* 1821 * mrsas_map_mpt_cmd_status: Allocate DMAable memory. 1822 * input: Adapter instance soft state 1823 * 1824 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. 1825 * It checks the command status and maps the appropriate CAM status for the 1826 * CCB. 1827 */ 1828 void 1829 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status, 1830 u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense) 1831 { 1832 struct mrsas_softc *sc = cmd->sc; 1833 u_int8_t *sense_data; 1834 1835 switch (status) { 1836 case MFI_STAT_OK: 1837 ccb_ptr->ccb_h.status = CAM_REQ_CMP; 1838 break; 1839 case MFI_STAT_SCSI_IO_FAILED: 1840 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1841 ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1842 sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data; 1843 if (sense_data) { 1844 /* For now just copy 18 bytes back */ 1845 memcpy(sense_data, sense, 18); 1846 ccb_ptr->csio.sense_len = 18; 1847 ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; 1848 } 1849 break; 1850 case MFI_STAT_LD_OFFLINE: 1851 case MFI_STAT_DEVICE_NOT_FOUND: 1852 if (ccb_ptr->ccb_h.target_lun) 1853 ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; 1854 else 1855 ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; 1856 break; 1857 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1858 ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; 1859 break; 1860 default: 1861 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); 1862 ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; 1863 ccb_ptr->csio.scsi_status = status; 1864 } 1865 return; 1866 } 1867 1868 /* 1869 * mrsas_alloc_mem: Allocate DMAable memory 1870 * input: Adapter instance soft state 1871 * 1872 * This function creates the parent DMA tag and allocates DMAable memory. DMA 1873 * tag describes constraints of DMA mapping. Memory allocated is mapped into 1874 * Kernel virtual address. Callback argument is physical memory address. 1875 */ 1876 static int 1877 mrsas_alloc_mem(struct mrsas_softc *sc) 1878 { 1879 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size, 1880 evt_detail_size, count, pd_info_size; 1881 1882 /* 1883 * Allocate parent DMA tag 1884 */ 1885 if (bus_dma_tag_create( 1886 bus_get_dma_tag(sc->mrsas_dev), /* parent */ 1887 1, /* alignment */ 1888 0, /* boundary */ 1889 BUS_SPACE_MAXADDR, /* lowaddr */ 1890 BUS_SPACE_MAXADDR, /* highaddr */ 1891 NULL, NULL, /* filter, filterarg */ 1892 BUS_SPACE_MAXSIZE, /* maxsize */ 1893 BUS_SPACE_UNRESTRICTED, /* nsegments */ 1894 BUS_SPACE_MAXSIZE, /* maxsegsize */ 1895 0, /* flags */ 1896 NULL, NULL, /* lockfunc, lockarg */ 1897 &sc->mrsas_parent_tag /* tag */ 1898 )) { 1899 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); 1900 return (ENOMEM); 1901 } 1902 /* 1903 * Allocate for version buffer 1904 */ 1905 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t)); 1906 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1907 1, 0, 1908 BUS_SPACE_MAXADDR_32BIT, 1909 BUS_SPACE_MAXADDR, 1910 NULL, NULL, 1911 verbuf_size, 1912 1, 1913 verbuf_size, 1914 BUS_DMA_ALLOCNOW, 1915 NULL, NULL, 1916 &sc->verbuf_tag)) { 1917 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); 1918 return (ENOMEM); 1919 } 1920 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, 1921 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { 1922 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); 1923 return (ENOMEM); 1924 } 1925 bzero(sc->verbuf_mem, verbuf_size); 1926 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, 1927 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, 1928 BUS_DMA_NOWAIT)) { 1929 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); 1930 return (ENOMEM); 1931 } 1932 /* 1933 * Allocate IO Request Frames 1934 */ 1935 io_req_size = sc->io_frames_alloc_sz; 1936 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1937 16, 0, 1938 BUS_SPACE_MAXADDR_32BIT, 1939 BUS_SPACE_MAXADDR, 1940 NULL, NULL, 1941 io_req_size, 1942 1, 1943 io_req_size, 1944 BUS_DMA_ALLOCNOW, 1945 NULL, NULL, 1946 &sc->io_request_tag)) { 1947 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); 1948 return (ENOMEM); 1949 } 1950 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, 1951 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { 1952 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); 1953 return (ENOMEM); 1954 } 1955 bzero(sc->io_request_mem, io_req_size); 1956 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, 1957 sc->io_request_mem, io_req_size, mrsas_addr_cb, 1958 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { 1959 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); 1960 return (ENOMEM); 1961 } 1962 /* 1963 * Allocate Chain Frames 1964 */ 1965 chain_frame_size = sc->chain_frames_alloc_sz; 1966 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1967 4, 0, 1968 BUS_SPACE_MAXADDR_32BIT, 1969 BUS_SPACE_MAXADDR, 1970 NULL, NULL, 1971 chain_frame_size, 1972 1, 1973 chain_frame_size, 1974 BUS_DMA_ALLOCNOW, 1975 NULL, NULL, 1976 &sc->chain_frame_tag)) { 1977 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); 1978 return (ENOMEM); 1979 } 1980 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, 1981 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { 1982 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); 1983 return (ENOMEM); 1984 } 1985 bzero(sc->chain_frame_mem, chain_frame_size); 1986 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, 1987 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, 1988 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { 1989 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); 1990 return (ENOMEM); 1991 } 1992 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 1993 /* 1994 * Allocate Reply Descriptor Array 1995 */ 1996 reply_desc_size = sc->reply_alloc_sz * count; 1997 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1998 16, 0, 1999 BUS_SPACE_MAXADDR_32BIT, 2000 BUS_SPACE_MAXADDR, 2001 NULL, NULL, 2002 reply_desc_size, 2003 1, 2004 reply_desc_size, 2005 BUS_DMA_ALLOCNOW, 2006 NULL, NULL, 2007 &sc->reply_desc_tag)) { 2008 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); 2009 return (ENOMEM); 2010 } 2011 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, 2012 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { 2013 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); 2014 return (ENOMEM); 2015 } 2016 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, 2017 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, 2018 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { 2019 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); 2020 return (ENOMEM); 2021 } 2022 /* 2023 * Allocate Sense Buffer Array. Keep in lower 4GB 2024 */ 2025 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; 2026 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2027 64, 0, 2028 BUS_SPACE_MAXADDR_32BIT, 2029 BUS_SPACE_MAXADDR, 2030 NULL, NULL, 2031 sense_size, 2032 1, 2033 sense_size, 2034 BUS_DMA_ALLOCNOW, 2035 NULL, NULL, 2036 &sc->sense_tag)) { 2037 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); 2038 return (ENOMEM); 2039 } 2040 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, 2041 BUS_DMA_NOWAIT, &sc->sense_dmamap)) { 2042 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); 2043 return (ENOMEM); 2044 } 2045 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, 2046 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, 2047 BUS_DMA_NOWAIT)) { 2048 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); 2049 return (ENOMEM); 2050 } 2051 2052 /* 2053 * Allocate for Event detail structure 2054 */ 2055 evt_detail_size = sizeof(struct mrsas_evt_detail); 2056 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2057 1, 0, 2058 BUS_SPACE_MAXADDR_32BIT, 2059 BUS_SPACE_MAXADDR, 2060 NULL, NULL, 2061 evt_detail_size, 2062 1, 2063 evt_detail_size, 2064 BUS_DMA_ALLOCNOW, 2065 NULL, NULL, 2066 &sc->evt_detail_tag)) { 2067 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); 2068 return (ENOMEM); 2069 } 2070 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, 2071 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { 2072 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); 2073 return (ENOMEM); 2074 } 2075 bzero(sc->evt_detail_mem, evt_detail_size); 2076 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, 2077 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, 2078 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { 2079 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); 2080 return (ENOMEM); 2081 } 2082 2083 /* 2084 * Allocate for PD INFO structure 2085 */ 2086 pd_info_size = sizeof(struct mrsas_pd_info); 2087 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2088 1, 0, 2089 BUS_SPACE_MAXADDR_32BIT, 2090 BUS_SPACE_MAXADDR, 2091 NULL, NULL, 2092 pd_info_size, 2093 1, 2094 pd_info_size, 2095 BUS_DMA_ALLOCNOW, 2096 NULL, NULL, 2097 &sc->pd_info_tag)) { 2098 device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n"); 2099 return (ENOMEM); 2100 } 2101 if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem, 2102 BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) { 2103 device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n"); 2104 return (ENOMEM); 2105 } 2106 bzero(sc->pd_info_mem, pd_info_size); 2107 if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap, 2108 sc->pd_info_mem, pd_info_size, mrsas_addr_cb, 2109 &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) { 2110 device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n"); 2111 return (ENOMEM); 2112 } 2113 2114 /* 2115 * Create a dma tag for data buffers; size will be the maximum 2116 * possible I/O size (280kB). 2117 */ 2118 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2119 1, 2120 0, 2121 BUS_SPACE_MAXADDR, 2122 BUS_SPACE_MAXADDR, 2123 NULL, NULL, 2124 maxphys, 2125 sc->max_num_sge, /* nsegments */ 2126 maxphys, 2127 BUS_DMA_ALLOCNOW, 2128 busdma_lock_mutex, 2129 &sc->io_lock, 2130 &sc->data_tag)) { 2131 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); 2132 return (ENOMEM); 2133 } 2134 return (0); 2135 } 2136 2137 /* 2138 * mrsas_addr_cb: Callback function of bus_dmamap_load() 2139 * input: callback argument, machine dependent type 2140 * that describes DMA segments, number of segments, error code 2141 * 2142 * This function is for the driver to receive mapping information resultant of 2143 * the bus_dmamap_load(). The information is actually not being used, but the 2144 * address is saved anyway. 2145 */ 2146 void 2147 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2148 { 2149 bus_addr_t *addr; 2150 2151 addr = arg; 2152 *addr = segs[0].ds_addr; 2153 } 2154 2155 /* 2156 * mrsas_setup_raidmap: Set up RAID map. 2157 * input: Adapter instance soft state 2158 * 2159 * Allocate DMA memory for the RAID maps and perform setup. 2160 */ 2161 static int 2162 mrsas_setup_raidmap(struct mrsas_softc *sc) 2163 { 2164 int i; 2165 2166 for (i = 0; i < 2; i++) { 2167 sc->ld_drv_map[i] = 2168 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT); 2169 /* Do Error handling */ 2170 if (!sc->ld_drv_map[i]) { 2171 device_printf(sc->mrsas_dev, "Could not allocate memory for local map"); 2172 2173 if (i == 1) 2174 free(sc->ld_drv_map[0], M_MRSAS); 2175 /* ABORT driver initialization */ 2176 goto ABORT; 2177 } 2178 } 2179 2180 for (int i = 0; i < 2; i++) { 2181 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2182 4, 0, 2183 BUS_SPACE_MAXADDR_32BIT, 2184 BUS_SPACE_MAXADDR, 2185 NULL, NULL, 2186 sc->max_map_sz, 2187 1, 2188 sc->max_map_sz, 2189 BUS_DMA_ALLOCNOW, 2190 NULL, NULL, 2191 &sc->raidmap_tag[i])) { 2192 device_printf(sc->mrsas_dev, 2193 "Cannot allocate raid map tag.\n"); 2194 return (ENOMEM); 2195 } 2196 if (bus_dmamem_alloc(sc->raidmap_tag[i], 2197 (void **)&sc->raidmap_mem[i], 2198 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { 2199 device_printf(sc->mrsas_dev, 2200 "Cannot allocate raidmap memory.\n"); 2201 return (ENOMEM); 2202 } 2203 bzero(sc->raidmap_mem[i], sc->max_map_sz); 2204 2205 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], 2206 sc->raidmap_mem[i], sc->max_map_sz, 2207 mrsas_addr_cb, &sc->raidmap_phys_addr[i], 2208 BUS_DMA_NOWAIT)) { 2209 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); 2210 return (ENOMEM); 2211 } 2212 if (!sc->raidmap_mem[i]) { 2213 device_printf(sc->mrsas_dev, 2214 "Cannot allocate memory for raid map.\n"); 2215 return (ENOMEM); 2216 } 2217 } 2218 2219 if (!mrsas_get_map_info(sc)) 2220 mrsas_sync_map_info(sc); 2221 2222 return (0); 2223 2224 ABORT: 2225 return (1); 2226 } 2227 2228 /** 2229 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 2230 * @sc: Adapter soft state 2231 * 2232 * Return 0 on success. 2233 */ 2234 void 2235 megasas_setup_jbod_map(struct mrsas_softc *sc) 2236 { 2237 int i; 2238 uint32_t pd_seq_map_sz; 2239 2240 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 2241 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 2242 2243 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) { 2244 sc->use_seqnum_jbod_fp = 0; 2245 return; 2246 } 2247 if (sc->jbodmap_mem[0]) 2248 goto skip_alloc; 2249 2250 for (i = 0; i < 2; i++) { 2251 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2252 4, 0, 2253 BUS_SPACE_MAXADDR_32BIT, 2254 BUS_SPACE_MAXADDR, 2255 NULL, NULL, 2256 pd_seq_map_sz, 2257 1, 2258 pd_seq_map_sz, 2259 BUS_DMA_ALLOCNOW, 2260 NULL, NULL, 2261 &sc->jbodmap_tag[i])) { 2262 device_printf(sc->mrsas_dev, 2263 "Cannot allocate jbod map tag.\n"); 2264 return; 2265 } 2266 if (bus_dmamem_alloc(sc->jbodmap_tag[i], 2267 (void **)&sc->jbodmap_mem[i], 2268 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) { 2269 device_printf(sc->mrsas_dev, 2270 "Cannot allocate jbod map memory.\n"); 2271 return; 2272 } 2273 bzero(sc->jbodmap_mem[i], pd_seq_map_sz); 2274 2275 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i], 2276 sc->jbodmap_mem[i], pd_seq_map_sz, 2277 mrsas_addr_cb, &sc->jbodmap_phys_addr[i], 2278 BUS_DMA_NOWAIT)) { 2279 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n"); 2280 return; 2281 } 2282 if (!sc->jbodmap_mem[i]) { 2283 device_printf(sc->mrsas_dev, 2284 "Cannot allocate memory for jbod map.\n"); 2285 sc->use_seqnum_jbod_fp = 0; 2286 return; 2287 } 2288 } 2289 2290 skip_alloc: 2291 if (!megasas_sync_pd_seq_num(sc, false) && 2292 !megasas_sync_pd_seq_num(sc, true)) 2293 sc->use_seqnum_jbod_fp = 1; 2294 else 2295 sc->use_seqnum_jbod_fp = 0; 2296 2297 device_printf(sc->mrsas_dev, "Jbod map is supported\n"); 2298 } 2299 2300 /* 2301 * mrsas_init_fw: Initialize Firmware 2302 * input: Adapter soft state 2303 * 2304 * Calls transition_to_ready() to make sure Firmware is in operational state and 2305 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It 2306 * issues internal commands to get the controller info after the IOC_INIT 2307 * command response is received by Firmware. Note: code relating to 2308 * get_pdlist, get_ld_list and max_sectors are currently not being used, it 2309 * is left here as placeholder. 2310 */ 2311 static int 2312 mrsas_init_fw(struct mrsas_softc *sc) 2313 { 2314 2315 int ret, loop, ocr = 0; 2316 u_int32_t max_sectors_1; 2317 u_int32_t max_sectors_2; 2318 u_int32_t tmp_sectors; 2319 u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4; 2320 int msix_enable = 0; 2321 int fw_msix_count = 0; 2322 int i, j; 2323 2324 /* Make sure Firmware is ready */ 2325 ret = mrsas_transition_to_ready(sc, ocr); 2326 if (ret != SUCCESS) { 2327 return (ret); 2328 } 2329 if (sc->is_ventura || sc->is_aero) { 2330 scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3)); 2331 #if VD_EXT_DEBUG 2332 device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3); 2333 #endif 2334 sc->maxRaidMapSize = ((scratch_pad_3 >> 2335 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 2336 MR_MAX_RAID_MAP_SIZE_MASK); 2337 } 2338 /* MSI-x index 0- reply post host index register */ 2339 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET; 2340 /* Check if MSI-X is supported while in ready state */ 2341 msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a; 2342 2343 if (msix_enable) { 2344 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2345 outbound_scratch_pad_2)); 2346 2347 /* Check max MSI-X vectors */ 2348 if (sc->device_id == MRSAS_TBOLT) { 2349 sc->msix_vectors = (scratch_pad_2 2350 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 2351 fw_msix_count = sc->msix_vectors; 2352 } else { 2353 /* Invader/Fury supports 96 MSI-X vectors */ 2354 sc->msix_vectors = ((scratch_pad_2 2355 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 2356 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 2357 fw_msix_count = sc->msix_vectors; 2358 2359 if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) || 2360 ((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16))) 2361 sc->msix_combined = true; 2362 /* 2363 * Save 1-15 reply post index 2364 * address to local memory Index 0 2365 * is already saved from reg offset 2366 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 2367 */ 2368 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; 2369 loop++) { 2370 sc->msix_reg_offset[loop] = 2371 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + 2372 (loop * 0x10); 2373 } 2374 } 2375 2376 /* Don't bother allocating more MSI-X vectors than cpus */ 2377 sc->msix_vectors = min(sc->msix_vectors, 2378 mp_ncpus); 2379 2380 /* Allocate MSI-x vectors */ 2381 if (mrsas_allocate_msix(sc) == SUCCESS) 2382 sc->msix_enable = 1; 2383 else 2384 sc->msix_enable = 0; 2385 2386 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector," 2387 "Online CPU %d Current MSIX <%d>\n", 2388 fw_msix_count, mp_ncpus, sc->msix_vectors); 2389 } 2390 /* 2391 * MSI-X host index 0 is common for all adapter. 2392 * It is used for all MPT based Adapters. 2393 */ 2394 if (sc->msix_combined) { 2395 sc->msix_reg_offset[0] = 2396 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET; 2397 } 2398 if (mrsas_init_adapter(sc) != SUCCESS) { 2399 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); 2400 return (1); 2401 } 2402 2403 if (sc->is_ventura || sc->is_aero) { 2404 scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2405 outbound_scratch_pad_4)); 2406 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT) 2407 sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK); 2408 2409 device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size); 2410 } 2411 2412 /* Allocate internal commands for pass-thru */ 2413 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) { 2414 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); 2415 return (1); 2416 } 2417 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); 2418 if (!sc->ctrl_info) { 2419 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n"); 2420 return (1); 2421 } 2422 /* 2423 * Get the controller info from FW, so that the MAX VD support 2424 * availability can be decided. 2425 */ 2426 if (mrsas_get_ctrl_info(sc)) { 2427 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n"); 2428 return (1); 2429 } 2430 sc->secure_jbod_support = 2431 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD; 2432 2433 if (sc->secure_jbod_support) 2434 device_printf(sc->mrsas_dev, "FW supports SED \n"); 2435 2436 if (sc->use_seqnum_jbod_fp) 2437 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n"); 2438 2439 if (sc->support_morethan256jbod) 2440 device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n"); 2441 2442 if (mrsas_setup_raidmap(sc) != SUCCESS) { 2443 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! " 2444 "There seems to be some problem in the controller\n" 2445 "Please contact to the SUPPORT TEAM if the problem persists\n"); 2446 } 2447 megasas_setup_jbod_map(sc); 2448 2449 memset(sc->target_list, 0, 2450 MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target)); 2451 for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++) 2452 sc->target_list[i].target_id = 0xffff; 2453 2454 /* For pass-thru, get PD/LD list and controller info */ 2455 memset(sc->pd_list, 0, 2456 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 2457 if (mrsas_get_pd_list(sc) != SUCCESS) { 2458 device_printf(sc->mrsas_dev, "Get PD list failed.\n"); 2459 return (1); 2460 } 2461 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); 2462 if (mrsas_get_ld_list(sc) != SUCCESS) { 2463 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n"); 2464 return (1); 2465 } 2466 2467 if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) { 2468 sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) * 2469 MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT); 2470 if (!sc->streamDetectByLD) { 2471 device_printf(sc->mrsas_dev, 2472 "unable to allocate stream detection for pool of LDs\n"); 2473 return (1); 2474 } 2475 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 2476 sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT); 2477 if (!sc->streamDetectByLD[i]) { 2478 device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n"); 2479 for (j = 0; j < i; ++j) 2480 free(sc->streamDetectByLD[j], M_MRSAS); 2481 free(sc->streamDetectByLD, M_MRSAS); 2482 sc->streamDetectByLD = NULL; 2483 return (1); 2484 } 2485 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT)); 2486 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP; 2487 } 2488 } 2489 2490 /* 2491 * Compute the max allowed sectors per IO: The controller info has 2492 * two limits on max sectors. Driver should use the minimum of these 2493 * two. 2494 * 2495 * 1 << stripe_sz_ops.min = max sectors per strip 2496 * 2497 * Note that older firmwares ( < FW ver 30) didn't report information to 2498 * calculate max_sectors_1. So the number ended up as zero always. 2499 */ 2500 tmp_sectors = 0; 2501 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) * 2502 sc->ctrl_info->max_strips_per_io; 2503 max_sectors_2 = sc->ctrl_info->max_request_size; 2504 tmp_sectors = min(max_sectors_1, max_sectors_2); 2505 sc->max_sectors_per_req = (sc->max_num_sge - 1) * MRSAS_PAGE_SIZE / 512; 2506 2507 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) 2508 sc->max_sectors_per_req = tmp_sectors; 2509 2510 sc->disableOnlineCtrlReset = 2511 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 2512 sc->UnevenSpanSupport = 2513 sc->ctrl_info->adapterOperations2.supportUnevenSpans; 2514 if (sc->UnevenSpanSupport) { 2515 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n", 2516 sc->UnevenSpanSupport); 2517 2518 if (MR_ValidateMapInfo(sc)) 2519 sc->fast_path_io = 1; 2520 else 2521 sc->fast_path_io = 0; 2522 } 2523 2524 device_printf(sc->mrsas_dev, "max_fw_cmds: %u max_scsi_cmds: %u\n", 2525 sc->max_fw_cmds, sc->max_scsi_cmds); 2526 return (0); 2527 } 2528 2529 /* 2530 * mrsas_init_adapter: Initializes the adapter/controller 2531 * input: Adapter soft state 2532 * 2533 * Prepares for the issuing of the IOC Init cmd to FW for initializing the 2534 * ROC/controller. The FW register is read to determined the number of 2535 * commands that is supported. All memory allocations for IO is based on 2536 * max_cmd. Appropriate calculations are performed in this function. 2537 */ 2538 int 2539 mrsas_init_adapter(struct mrsas_softc *sc) 2540 { 2541 uint32_t status; 2542 u_int32_t scratch_pad_2; 2543 int ret; 2544 int i = 0; 2545 2546 /* Read FW status register */ 2547 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2548 2549 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; 2550 2551 /* Decrement the max supported by 1, to correlate with FW */ 2552 sc->max_fw_cmds = sc->max_fw_cmds - 1; 2553 sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS; 2554 2555 /* Determine allocation size of command frames */ 2556 sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2; 2557 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds; 2558 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); 2559 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + 2560 (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1)); 2561 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2562 outbound_scratch_pad_2)); 2563 2564 mrsas_dprint(sc, MRSAS_TRACE, "%s: sc->reply_q_depth 0x%x," 2565 "sc->request_alloc_sz 0x%x, sc->reply_alloc_sz 0x%x," 2566 "sc->io_frames_alloc_sz 0x%x\n", __func__, 2567 sc->reply_q_depth, sc->request_alloc_sz, 2568 sc->reply_alloc_sz, sc->io_frames_alloc_sz); 2569 2570 /* 2571 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, 2572 * Firmware support extended IO chain frame which is 4 time more 2573 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) = 2574 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K 2575 */ 2576 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) 2577 sc->max_chain_frame_sz = 2578 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) 2579 * MEGASAS_1MB_IO; 2580 else 2581 sc->max_chain_frame_sz = 2582 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) 2583 * MEGASAS_256K_IO; 2584 2585 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds; 2586 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2587 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16; 2588 2589 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION); 2590 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; 2591 2592 mrsas_dprint(sc, MRSAS_INFO, 2593 "max sge: 0x%x, max chain frame size: 0x%x, " 2594 "max fw cmd: 0x%x sc->chain_frames_alloc_sz: 0x%x\n", 2595 sc->max_num_sge, 2596 sc->max_chain_frame_sz, sc->max_fw_cmds, 2597 sc->chain_frames_alloc_sz); 2598 2599 /* Used for pass thru MFI frame (DCMD) */ 2600 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16; 2601 2602 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2603 sizeof(MPI2_SGE_IO_UNION)) / 16; 2604 2605 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2606 2607 for (i = 0; i < count; i++) 2608 sc->last_reply_idx[i] = 0; 2609 2610 ret = mrsas_alloc_mem(sc); 2611 if (ret != SUCCESS) 2612 return (ret); 2613 2614 ret = mrsas_alloc_mpt_cmds(sc); 2615 if (ret != SUCCESS) 2616 return (ret); 2617 2618 ret = mrsas_ioc_init(sc); 2619 if (ret != SUCCESS) 2620 return (ret); 2621 2622 return (0); 2623 } 2624 2625 /* 2626 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command 2627 * input: Adapter soft state 2628 * 2629 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. 2630 */ 2631 int 2632 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) 2633 { 2634 int ioc_init_size; 2635 2636 /* Allocate IOC INIT command */ 2637 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); 2638 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2639 1, 0, 2640 BUS_SPACE_MAXADDR_32BIT, 2641 BUS_SPACE_MAXADDR, 2642 NULL, NULL, 2643 ioc_init_size, 2644 1, 2645 ioc_init_size, 2646 BUS_DMA_ALLOCNOW, 2647 NULL, NULL, 2648 &sc->ioc_init_tag)) { 2649 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); 2650 return (ENOMEM); 2651 } 2652 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, 2653 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { 2654 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); 2655 return (ENOMEM); 2656 } 2657 bzero(sc->ioc_init_mem, ioc_init_size); 2658 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, 2659 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, 2660 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { 2661 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); 2662 return (ENOMEM); 2663 } 2664 return (0); 2665 } 2666 2667 /* 2668 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command 2669 * input: Adapter soft state 2670 * 2671 * Deallocates memory of the IOC Init cmd. 2672 */ 2673 void 2674 mrsas_free_ioc_cmd(struct mrsas_softc *sc) 2675 { 2676 if (sc->ioc_init_phys_mem) 2677 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); 2678 if (sc->ioc_init_mem != NULL) 2679 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); 2680 if (sc->ioc_init_tag != NULL) 2681 bus_dma_tag_destroy(sc->ioc_init_tag); 2682 } 2683 2684 /* 2685 * mrsas_ioc_init: Sends IOC Init command to FW 2686 * input: Adapter soft state 2687 * 2688 * Issues the IOC Init cmd to FW to initialize the ROC/controller. 2689 */ 2690 int 2691 mrsas_ioc_init(struct mrsas_softc *sc) 2692 { 2693 struct mrsas_init_frame *init_frame; 2694 pMpi2IOCInitRequest_t IOCInitMsg; 2695 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; 2696 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 2697 bus_addr_t phys_addr; 2698 int i, retcode = 0; 2699 u_int32_t scratch_pad_2; 2700 2701 /* Allocate memory for the IOC INIT command */ 2702 if (mrsas_alloc_ioc_cmd(sc)) { 2703 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); 2704 return (1); 2705 } 2706 2707 if (!sc->block_sync_cache) { 2708 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2709 outbound_scratch_pad_2)); 2710 sc->fw_sync_cache_support = (scratch_pad_2 & 2711 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; 2712 } 2713 2714 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024); 2715 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; 2716 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 2717 IOCInitMsg->MsgVersion = htole16(MPI2_VERSION); 2718 IOCInitMsg->HeaderVersion = htole16(MPI2_HEADER_VERSION); 2719 IOCInitMsg->SystemRequestFrameSize = htole16(MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); 2720 IOCInitMsg->ReplyDescriptorPostQueueDepth = htole16(sc->reply_q_depth); 2721 IOCInitMsg->ReplyDescriptorPostQueueAddress = htole64(sc->reply_desc_phys_addr); 2722 IOCInitMsg->SystemRequestFrameBaseAddress = htole64(sc->io_request_phys_addr); 2723 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0); 2724 IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; 2725 2726 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; 2727 init_frame->cmd = MFI_CMD_INIT; 2728 init_frame->cmd_status = 0xFF; 2729 init_frame->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 2730 2731 /* driver support Extended MSIX */ 2732 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 2733 init_frame->driver_operations. 2734 mfi_capabilities.support_additional_msix = 1; 2735 } 2736 if (sc->verbuf_mem) { 2737 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n", 2738 MRSAS_VERSION); 2739 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; 2740 init_frame->driver_ver_hi = 0; 2741 } 2742 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1; 2743 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1; 2744 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1; 2745 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) 2746 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1; 2747 2748 init_frame->driver_operations.reg = htole32(init_frame->driver_operations.reg); 2749 2750 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; 2751 init_frame->queue_info_new_phys_addr_lo = htole32(phys_addr); 2752 init_frame->data_xfer_len = htole32(sizeof(Mpi2IOCInitRequest_t)); 2753 2754 req_desc.addr.Words = htole64((bus_addr_t)sc->ioc_init_phys_mem); 2755 req_desc.MFAIo.RequestFlags = 2756 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2757 2758 mrsas_disable_intr(sc); 2759 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); 2760 mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high); 2761 2762 /* 2763 * Poll response timer to wait for Firmware response. While this 2764 * timer with the DELAY call could block CPU, the time interval for 2765 * this is only 1 millisecond. 2766 */ 2767 if (init_frame->cmd_status == 0xFF) { 2768 for (i = 0; i < (max_wait * 1000); i++) { 2769 if (init_frame->cmd_status == 0xFF) 2770 DELAY(1000); 2771 else 2772 break; 2773 } 2774 } 2775 if (init_frame->cmd_status == 0) 2776 mrsas_dprint(sc, MRSAS_OCR, 2777 "IOC INIT response received from FW.\n"); 2778 else { 2779 if (init_frame->cmd_status == 0xFF) 2780 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); 2781 else 2782 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); 2783 retcode = 1; 2784 } 2785 2786 if (sc->is_aero) { 2787 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2788 outbound_scratch_pad_2)); 2789 sc->atomic_desc_support = (scratch_pad_2 & 2790 MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0; 2791 device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n", 2792 sc->atomic_desc_support ? "Yes" : "No"); 2793 } 2794 2795 mrsas_free_ioc_cmd(sc); 2796 return (retcode); 2797 } 2798 2799 /* 2800 * mrsas_alloc_mpt_cmds: Allocates the command packets 2801 * input: Adapter instance soft state 2802 * 2803 * This function allocates the internal commands for IOs. Each command that is 2804 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An 2805 * array is allocated with mrsas_mpt_cmd context. The free commands are 2806 * maintained in a linked list (cmd pool). SMID value range is from 1 to 2807 * max_fw_cmds. 2808 */ 2809 int 2810 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) 2811 { 2812 int i, j; 2813 u_int32_t max_fw_cmds, count; 2814 struct mrsas_mpt_cmd *cmd; 2815 pMpi2ReplyDescriptorsUnion_t reply_desc; 2816 u_int32_t offset, chain_offset, sense_offset; 2817 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; 2818 u_int8_t *io_req_base, *chain_frame_base, *sense_base; 2819 2820 max_fw_cmds = sc->max_fw_cmds; 2821 2822 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); 2823 if (!sc->req_desc) { 2824 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); 2825 return (ENOMEM); 2826 } 2827 memset(sc->req_desc, 0, sc->request_alloc_sz); 2828 2829 /* 2830 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. 2831 * Allocate the dynamic array first and then allocate individual 2832 * commands. 2833 */ 2834 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds, 2835 M_MRSAS, M_NOWAIT); 2836 if (!sc->mpt_cmd_list) { 2837 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); 2838 return (ENOMEM); 2839 } 2840 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds); 2841 for (i = 0; i < max_fw_cmds; i++) { 2842 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd), 2843 M_MRSAS, M_NOWAIT); 2844 if (!sc->mpt_cmd_list[i]) { 2845 for (j = 0; j < i; j++) 2846 free(sc->mpt_cmd_list[j], M_MRSAS); 2847 free(sc->mpt_cmd_list, M_MRSAS); 2848 sc->mpt_cmd_list = NULL; 2849 return (ENOMEM); 2850 } 2851 } 2852 2853 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2854 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2855 chain_frame_base = (u_int8_t *)sc->chain_frame_mem; 2856 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; 2857 sense_base = (u_int8_t *)sc->sense_mem; 2858 sense_base_phys = (bus_addr_t)sc->sense_phys_addr; 2859 for (i = 0; i < max_fw_cmds; i++) { 2860 cmd = sc->mpt_cmd_list[i]; 2861 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 2862 chain_offset = sc->max_chain_frame_sz * i; 2863 sense_offset = MRSAS_SENSE_LEN * i; 2864 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); 2865 cmd->index = i + 1; 2866 cmd->ccb_ptr = NULL; 2867 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 2868 callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0); 2869 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 2870 cmd->sc = sc; 2871 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); 2872 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); 2873 cmd->io_request_phys_addr = io_req_base_phys + offset; 2874 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); 2875 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; 2876 cmd->sense = sense_base + sense_offset; 2877 cmd->sense_phys_addr = sense_base_phys + sense_offset; 2878 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { 2879 return (FAIL); 2880 } 2881 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 2882 } 2883 2884 /* Initialize reply descriptor array to 0xFFFFFFFF */ 2885 reply_desc = sc->reply_desc_mem; 2886 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2887 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) { 2888 reply_desc->Words = MRSAS_ULONG_MAX; 2889 } 2890 return (0); 2891 } 2892 2893 /* 2894 * mrsas_write_64bit_req_dsc: Writes 64 bit request descriptor to FW 2895 * input: Adapter softstate 2896 * request descriptor address low 2897 * request descriptor address high 2898 */ 2899 void 2900 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2901 u_int32_t req_desc_hi) 2902 { 2903 mtx_lock(&sc->pci_lock); 2904 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), 2905 le32toh(req_desc_lo)); 2906 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), 2907 le32toh(req_desc_hi)); 2908 mtx_unlock(&sc->pci_lock); 2909 } 2910 2911 /* 2912 * mrsas_fire_cmd: Sends command to FW 2913 * input: Adapter softstate 2914 * request descriptor address low 2915 * request descriptor address high 2916 * 2917 * This functions fires the command to Firmware by writing to the 2918 * inbound_low_queue_port and inbound_high_queue_port. 2919 */ 2920 void 2921 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2922 u_int32_t req_desc_hi) 2923 { 2924 if (sc->atomic_desc_support) 2925 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port), 2926 le32toh(req_desc_lo)); 2927 else 2928 mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi); 2929 } 2930 2931 /* 2932 * mrsas_transition_to_ready: Move FW to Ready state input: 2933 * Adapter instance soft state 2934 * 2935 * During the initialization, FW passes can potentially be in any one of several 2936 * possible states. If the FW in operational, waiting-for-handshake states, 2937 * driver must take steps to bring it to ready state. Otherwise, it has to 2938 * wait for the ready state. 2939 */ 2940 int 2941 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) 2942 { 2943 int i; 2944 u_int8_t max_wait; 2945 u_int32_t val, fw_state; 2946 u_int32_t cur_state __unused; 2947 u_int32_t abs_state, curr_abs_state; 2948 2949 val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2950 fw_state = val & MFI_STATE_MASK; 2951 max_wait = MRSAS_RESET_WAIT_TIME; 2952 2953 if (fw_state != MFI_STATE_READY) 2954 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); 2955 2956 while (fw_state != MFI_STATE_READY) { 2957 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2958 switch (fw_state) { 2959 case MFI_STATE_FAULT: 2960 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); 2961 if (ocr) { 2962 cur_state = MFI_STATE_FAULT; 2963 break; 2964 } else 2965 return -ENODEV; 2966 case MFI_STATE_WAIT_HANDSHAKE: 2967 /* Set the CLR bit in inbound doorbell */ 2968 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2969 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG); 2970 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2971 break; 2972 case MFI_STATE_BOOT_MESSAGE_PENDING: 2973 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2974 MFI_INIT_HOTPLUG); 2975 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2976 break; 2977 case MFI_STATE_OPERATIONAL: 2978 /* 2979 * Bring it to READY state; assuming max wait 10 2980 * secs 2981 */ 2982 mrsas_disable_intr(sc); 2983 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); 2984 for (i = 0; i < max_wait * 1000; i++) { 2985 if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1) 2986 DELAY(1000); 2987 else 2988 break; 2989 } 2990 cur_state = MFI_STATE_OPERATIONAL; 2991 break; 2992 case MFI_STATE_UNDEFINED: 2993 /* 2994 * This state should not last for more than 2 2995 * seconds 2996 */ 2997 cur_state = MFI_STATE_UNDEFINED; 2998 break; 2999 case MFI_STATE_BB_INIT: 3000 cur_state = MFI_STATE_BB_INIT; 3001 break; 3002 case MFI_STATE_FW_INIT: 3003 cur_state = MFI_STATE_FW_INIT; 3004 break; 3005 case MFI_STATE_FW_INIT_2: 3006 cur_state = MFI_STATE_FW_INIT_2; 3007 break; 3008 case MFI_STATE_DEVICE_SCAN: 3009 cur_state = MFI_STATE_DEVICE_SCAN; 3010 break; 3011 case MFI_STATE_FLUSH_CACHE: 3012 cur_state = MFI_STATE_FLUSH_CACHE; 3013 break; 3014 default: 3015 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); 3016 return -ENODEV; 3017 } 3018 3019 /* 3020 * The cur_state should not last for more than max_wait secs 3021 */ 3022 for (i = 0; i < (max_wait * 1000); i++) { 3023 fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3024 outbound_scratch_pad)) & MFI_STATE_MASK); 3025 curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3026 outbound_scratch_pad)); 3027 if (abs_state == curr_abs_state) 3028 DELAY(1000); 3029 else 3030 break; 3031 } 3032 3033 /* 3034 * Return error if fw_state hasn't changed after max_wait 3035 */ 3036 if (curr_abs_state == abs_state) { 3037 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " 3038 "in %d secs\n", fw_state, max_wait); 3039 return -ENODEV; 3040 } 3041 } 3042 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); 3043 return 0; 3044 } 3045 3046 /* 3047 * mrsas_get_mfi_cmd: Get a cmd from free command pool 3048 * input: Adapter soft state 3049 * 3050 * This function removes an MFI command from the command list. 3051 */ 3052 struct mrsas_mfi_cmd * 3053 mrsas_get_mfi_cmd(struct mrsas_softc *sc) 3054 { 3055 struct mrsas_mfi_cmd *cmd = NULL; 3056 3057 mtx_lock(&sc->mfi_cmd_pool_lock); 3058 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) { 3059 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); 3060 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); 3061 } 3062 mtx_unlock(&sc->mfi_cmd_pool_lock); 3063 3064 return cmd; 3065 } 3066 3067 /* 3068 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter. 3069 * input: Adapter Context. 3070 * 3071 * This function will check FW status register and flag do_timeout_reset flag. 3072 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has 3073 * trigger reset. 3074 */ 3075 static void 3076 mrsas_ocr_thread(void *arg) 3077 { 3078 struct mrsas_softc *sc; 3079 u_int32_t fw_status, fw_state; 3080 u_int8_t tm_target_reset_failed = 0; 3081 3082 sc = (struct mrsas_softc *)arg; 3083 3084 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); 3085 sc->ocr_thread_active = 1; 3086 mtx_lock(&sc->sim_lock); 3087 for (;;) { 3088 /* Sleep for 1 second and check the queue status */ 3089 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 3090 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); 3091 if (sc->remove_in_progress || 3092 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 3093 mrsas_dprint(sc, MRSAS_OCR, 3094 "Exit due to %s from %s\n", 3095 sc->remove_in_progress ? "Shutdown" : 3096 "Hardware critical error", __func__); 3097 break; 3098 } 3099 fw_status = mrsas_read_reg_with_retries(sc, 3100 offsetof(mrsas_reg_set, outbound_scratch_pad)); 3101 fw_state = fw_status & MFI_STATE_MASK; 3102 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset || 3103 mrsas_atomic_read(&sc->target_reset_outstanding)) { 3104 /* First, freeze further IOs to come to the SIM */ 3105 mrsas_xpt_freeze(sc); 3106 3107 /* If this is an IO timeout then go for target reset */ 3108 if (mrsas_atomic_read(&sc->target_reset_outstanding)) { 3109 device_printf(sc->mrsas_dev, "Initiating Target RESET " 3110 "because of SCSI IO timeout!\n"); 3111 3112 /* Let the remaining IOs to complete */ 3113 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 3114 "mrsas_reset_targets", 5 * hz); 3115 3116 /* Try to reset the target device */ 3117 if (mrsas_reset_targets(sc) == FAIL) 3118 tm_target_reset_failed = 1; 3119 } 3120 3121 /* If this is a DCMD timeout or FW fault, 3122 * then go for controller reset 3123 */ 3124 if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed || 3125 (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) { 3126 if (tm_target_reset_failed) 3127 device_printf(sc->mrsas_dev, "Initiaiting OCR because of " 3128 "TM FAILURE!\n"); 3129 else 3130 device_printf(sc->mrsas_dev, "Initiaiting OCR " 3131 "because of %s!\n", sc->do_timedout_reset ? 3132 "DCMD IO Timeout" : "FW fault"); 3133 3134 mtx_lock_spin(&sc->ioctl_lock); 3135 sc->reset_in_progress = 1; 3136 mtx_unlock_spin(&sc->ioctl_lock); 3137 sc->reset_count++; 3138 3139 /* 3140 * Wait for the AEN task to be completed if it is running. 3141 */ 3142 mtx_unlock(&sc->sim_lock); 3143 taskqueue_drain(sc->ev_tq, &sc->ev_task); 3144 mtx_lock(&sc->sim_lock); 3145 3146 taskqueue_block(sc->ev_tq); 3147 /* Try to reset the controller */ 3148 mrsas_reset_ctrl(sc, sc->do_timedout_reset); 3149 3150 sc->do_timedout_reset = 0; 3151 sc->reset_in_progress = 0; 3152 tm_target_reset_failed = 0; 3153 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 3154 memset(sc->target_reset_pool, 0, 3155 sizeof(sc->target_reset_pool)); 3156 taskqueue_unblock(sc->ev_tq); 3157 } 3158 3159 /* Now allow IOs to come to the SIM */ 3160 mrsas_xpt_release(sc); 3161 } 3162 } 3163 mtx_unlock(&sc->sim_lock); 3164 sc->ocr_thread_active = 0; 3165 mrsas_kproc_exit(0); 3166 } 3167 3168 /* 3169 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR. 3170 * input: Adapter Context. 3171 * 3172 * This function will clear reply descriptor so that post OCR driver and FW will 3173 * lost old history. 3174 */ 3175 void 3176 mrsas_reset_reply_desc(struct mrsas_softc *sc) 3177 { 3178 int i, count; 3179 pMpi2ReplyDescriptorsUnion_t reply_desc; 3180 3181 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3182 for (i = 0; i < count; i++) 3183 sc->last_reply_idx[i] = 0; 3184 3185 reply_desc = sc->reply_desc_mem; 3186 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { 3187 reply_desc->Words = MRSAS_ULONG_MAX; 3188 } 3189 } 3190 3191 /* 3192 * mrsas_reset_ctrl: Core function to OCR/Kill adapter. 3193 * input: Adapter Context. 3194 * 3195 * This function will run from thread context so that it can sleep. 1. Do not 3196 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command 3197 * to complete for 180 seconds. 3. If #2 does not find any outstanding 3198 * command Controller is in working state, so skip OCR. Otherwise, do 3199 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the 3200 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post 3201 * OCR, Re-fire Management command and move Controller to Operation state. 3202 */ 3203 int 3204 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason) 3205 { 3206 int retval = SUCCESS, i, j, retry = 0; 3207 u_int32_t host_diag, abs_state, status_reg, reset_adapter; 3208 union ccb *ccb; 3209 struct mrsas_mfi_cmd *mfi_cmd; 3210 struct mrsas_mpt_cmd *mpt_cmd; 3211 union mrsas_evt_class_locale class_locale; 3212 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3213 3214 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 3215 device_printf(sc->mrsas_dev, 3216 "mrsas: Hardware critical error, returning FAIL.\n"); 3217 return FAIL; 3218 } 3219 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3220 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; 3221 mrsas_disable_intr(sc); 3222 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr", 3223 sc->mrsas_fw_fault_check_delay * hz); 3224 3225 /* First try waiting for commands to complete */ 3226 if (mrsas_wait_for_outstanding(sc, reset_reason)) { 3227 mrsas_dprint(sc, MRSAS_OCR, 3228 "resetting adapter from %s.\n", 3229 __func__); 3230 /* Now return commands back to the CAM layer */ 3231 mtx_unlock(&sc->sim_lock); 3232 for (i = 0; i < sc->max_fw_cmds; i++) { 3233 mpt_cmd = sc->mpt_cmd_list[i]; 3234 3235 if (mpt_cmd->peer_cmd) { 3236 mrsas_dprint(sc, MRSAS_OCR, 3237 "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n", 3238 i, mpt_cmd, mpt_cmd->peer_cmd); 3239 } 3240 3241 if (mpt_cmd->ccb_ptr) { 3242 if (mpt_cmd->callout_owner) { 3243 ccb = (union ccb *)(mpt_cmd->ccb_ptr); 3244 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3245 mrsas_cmd_done(sc, mpt_cmd); 3246 } else { 3247 mpt_cmd->ccb_ptr = NULL; 3248 mrsas_release_mpt_cmd(mpt_cmd); 3249 } 3250 } 3251 } 3252 3253 mrsas_atomic_set(&sc->fw_outstanding, 0); 3254 3255 mtx_lock(&sc->sim_lock); 3256 3257 status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3258 outbound_scratch_pad)); 3259 abs_state = status_reg & MFI_STATE_MASK; 3260 reset_adapter = status_reg & MFI_RESET_ADAPTER; 3261 if (sc->disableOnlineCtrlReset || 3262 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 3263 /* Reset not supported, kill adapter */ 3264 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n"); 3265 mrsas_kill_hba(sc); 3266 retval = FAIL; 3267 goto out; 3268 } 3269 /* Now try to reset the chip */ 3270 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { 3271 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3272 MPI2_WRSEQ_FLUSH_KEY_VALUE); 3273 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3274 MPI2_WRSEQ_1ST_KEY_VALUE); 3275 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3276 MPI2_WRSEQ_2ND_KEY_VALUE); 3277 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3278 MPI2_WRSEQ_3RD_KEY_VALUE); 3279 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3280 MPI2_WRSEQ_4TH_KEY_VALUE); 3281 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3282 MPI2_WRSEQ_5TH_KEY_VALUE); 3283 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3284 MPI2_WRSEQ_6TH_KEY_VALUE); 3285 3286 /* Check that the diag write enable (DRWE) bit is on */ 3287 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3288 fusion_host_diag)); 3289 retry = 0; 3290 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 3291 DELAY(100 * 1000); 3292 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3293 fusion_host_diag)); 3294 if (retry++ == 100) { 3295 mrsas_dprint(sc, MRSAS_OCR, 3296 "Host diag unlock failed!\n"); 3297 break; 3298 } 3299 } 3300 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 3301 continue; 3302 3303 /* Send chip reset command */ 3304 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), 3305 host_diag | HOST_DIAG_RESET_ADAPTER); 3306 DELAY(3000 * 1000); 3307 3308 /* Make sure reset adapter bit is cleared */ 3309 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3310 fusion_host_diag)); 3311 retry = 0; 3312 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 3313 DELAY(100 * 1000); 3314 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3315 fusion_host_diag)); 3316 if (retry++ == 1000) { 3317 mrsas_dprint(sc, MRSAS_OCR, 3318 "Diag reset adapter never cleared!\n"); 3319 break; 3320 } 3321 } 3322 if (host_diag & HOST_DIAG_RESET_ADAPTER) 3323 continue; 3324 3325 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3326 outbound_scratch_pad)) & MFI_STATE_MASK; 3327 retry = 0; 3328 3329 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 3330 DELAY(100 * 1000); 3331 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3332 outbound_scratch_pad)) & MFI_STATE_MASK; 3333 } 3334 if (abs_state <= MFI_STATE_FW_INIT) { 3335 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," 3336 " state = 0x%x\n", abs_state); 3337 continue; 3338 } 3339 /* Wait for FW to become ready */ 3340 if (mrsas_transition_to_ready(sc, 1)) { 3341 mrsas_dprint(sc, MRSAS_OCR, 3342 "mrsas: Failed to transition controller to ready.\n"); 3343 continue; 3344 } 3345 mrsas_reset_reply_desc(sc); 3346 if (mrsas_ioc_init(sc)) { 3347 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); 3348 continue; 3349 } 3350 for (j = 0; j < sc->max_fw_cmds; j++) { 3351 mpt_cmd = sc->mpt_cmd_list[j]; 3352 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 3353 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; 3354 /* If not an IOCTL then release the command else re-fire */ 3355 if (!mfi_cmd->sync_cmd) { 3356 mrsas_release_mfi_cmd(mfi_cmd); 3357 } else { 3358 req_desc = mrsas_get_request_desc(sc, 3359 mfi_cmd->cmd_id.context.smid - 1); 3360 mrsas_dprint(sc, MRSAS_OCR, 3361 "Re-fire command DCMD opcode 0x%x index %d\n ", 3362 mfi_cmd->frame->dcmd.opcode, j); 3363 if (!req_desc) 3364 device_printf(sc->mrsas_dev, 3365 "Cannot build MPT cmd.\n"); 3366 else 3367 mrsas_fire_cmd(sc, req_desc->addr.u.low, 3368 req_desc->addr.u.high); 3369 } 3370 } 3371 } 3372 3373 /* Reset load balance info */ 3374 memset(sc->load_balance_info, 0, 3375 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); 3376 3377 if (mrsas_get_ctrl_info(sc)) { 3378 mrsas_kill_hba(sc); 3379 retval = FAIL; 3380 goto out; 3381 } 3382 if (!mrsas_get_map_info(sc)) 3383 mrsas_sync_map_info(sc); 3384 3385 megasas_setup_jbod_map(sc); 3386 3387 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) { 3388 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { 3389 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT)); 3390 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP; 3391 } 3392 } 3393 3394 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3395 mrsas_enable_intr(sc); 3396 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 3397 3398 /* Register AEN with FW for last sequence number */ 3399 class_locale.members.reserved = 0; 3400 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3401 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3402 3403 mtx_unlock(&sc->sim_lock); 3404 if (mrsas_register_aen(sc, sc->last_seq_num, 3405 class_locale.word)) { 3406 device_printf(sc->mrsas_dev, 3407 "ERROR: AEN registration FAILED from OCR !!! " 3408 "Further events from the controller cannot be notified." 3409 "Either there is some problem in the controller" 3410 "or the controller does not support AEN.\n" 3411 "Please contact to the SUPPORT TEAM if the problem persists\n"); 3412 } 3413 mtx_lock(&sc->sim_lock); 3414 3415 /* Adapter reset completed successfully */ 3416 device_printf(sc->mrsas_dev, "Reset successful\n"); 3417 retval = SUCCESS; 3418 goto out; 3419 } 3420 /* Reset failed, kill the adapter */ 3421 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); 3422 mrsas_kill_hba(sc); 3423 retval = FAIL; 3424 } else { 3425 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3426 mrsas_enable_intr(sc); 3427 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 3428 } 3429 out: 3430 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3431 mrsas_dprint(sc, MRSAS_OCR, 3432 "Reset Exit with %d.\n", retval); 3433 return retval; 3434 } 3435 3436 /* 3437 * mrsas_kill_hba: Kill HBA when OCR is not supported 3438 * input: Adapter Context. 3439 * 3440 * This function will kill HBA when OCR is not supported. 3441 */ 3442 void 3443 mrsas_kill_hba(struct mrsas_softc *sc) 3444 { 3445 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; 3446 DELAY(1000 * 1000); 3447 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); 3448 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 3449 MFI_STOP_ADP); 3450 /* Flush */ 3451 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); 3452 mrsas_complete_outstanding_ioctls(sc); 3453 } 3454 3455 /** 3456 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba 3457 * input: Controller softc 3458 * 3459 * Returns void 3460 */ 3461 void 3462 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc) 3463 { 3464 int i; 3465 struct mrsas_mpt_cmd *cmd_mpt; 3466 struct mrsas_mfi_cmd *cmd_mfi; 3467 u_int32_t count, MSIxIndex; 3468 3469 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3470 for (i = 0; i < sc->max_fw_cmds; i++) { 3471 cmd_mpt = sc->mpt_cmd_list[i]; 3472 3473 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 3474 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 3475 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { 3476 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3477 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, 3478 cmd_mpt->io_request->RaidContext.raid_context.status); 3479 } 3480 } 3481 } 3482 } 3483 3484 /* 3485 * mrsas_wait_for_outstanding: Wait for outstanding commands 3486 * input: Adapter Context. 3487 * 3488 * This function will wait for 180 seconds for outstanding commands to be 3489 * completed. 3490 */ 3491 int 3492 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason) 3493 { 3494 int i, outstanding, retval = 0; 3495 u_int32_t fw_state, count, MSIxIndex; 3496 3497 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { 3498 if (sc->remove_in_progress) { 3499 mrsas_dprint(sc, MRSAS_OCR, 3500 "Driver remove or shutdown called.\n"); 3501 retval = 1; 3502 goto out; 3503 } 3504 /* Check if firmware is in fault state */ 3505 fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3506 outbound_scratch_pad)) & MFI_STATE_MASK; 3507 if (fw_state == MFI_STATE_FAULT) { 3508 mrsas_dprint(sc, MRSAS_OCR, 3509 "Found FW in FAULT state, will reset adapter.\n"); 3510 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3511 mtx_unlock(&sc->sim_lock); 3512 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3513 mrsas_complete_cmd(sc, MSIxIndex); 3514 mtx_lock(&sc->sim_lock); 3515 retval = 1; 3516 goto out; 3517 } 3518 if (check_reason == MFI_DCMD_TIMEOUT_OCR) { 3519 mrsas_dprint(sc, MRSAS_OCR, 3520 "DCMD IO TIMEOUT detected, will reset adapter.\n"); 3521 retval = 1; 3522 goto out; 3523 } 3524 outstanding = mrsas_atomic_read(&sc->fw_outstanding); 3525 if (!outstanding) 3526 goto out; 3527 3528 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 3529 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " 3530 "commands to complete\n", i, outstanding); 3531 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3532 mtx_unlock(&sc->sim_lock); 3533 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3534 mrsas_complete_cmd(sc, MSIxIndex); 3535 mtx_lock(&sc->sim_lock); 3536 } 3537 DELAY(1000 * 1000); 3538 } 3539 3540 if (mrsas_atomic_read(&sc->fw_outstanding)) { 3541 mrsas_dprint(sc, MRSAS_OCR, 3542 " pending commands remain after waiting," 3543 " will reset adapter.\n"); 3544 retval = 1; 3545 } 3546 out: 3547 return retval; 3548 } 3549 3550 /* 3551 * mrsas_release_mfi_cmd: Return a cmd to free command pool 3552 * input: Command packet for return to free cmd pool 3553 * 3554 * This function returns the MFI & MPT command to the command list. 3555 */ 3556 void 3557 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi) 3558 { 3559 struct mrsas_softc *sc = cmd_mfi->sc; 3560 struct mrsas_mpt_cmd *cmd_mpt; 3561 3562 mtx_lock(&sc->mfi_cmd_pool_lock); 3563 /* 3564 * Release the mpt command (if at all it is allocated 3565 * associated with the mfi command 3566 */ 3567 if (cmd_mfi->cmd_id.context.smid) { 3568 mtx_lock(&sc->mpt_cmd_pool_lock); 3569 /* Get the mpt cmd from mfi cmd frame's smid value */ 3570 cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1]; 3571 cmd_mpt->flags = 0; 3572 cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 3573 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next); 3574 mtx_unlock(&sc->mpt_cmd_pool_lock); 3575 } 3576 /* Release the mfi command */ 3577 cmd_mfi->ccb_ptr = NULL; 3578 cmd_mfi->cmd_id.frame_count = 0; 3579 TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next); 3580 mtx_unlock(&sc->mfi_cmd_pool_lock); 3581 3582 return; 3583 } 3584 3585 /* 3586 * mrsas_get_controller_info: Returns FW's controller structure 3587 * input: Adapter soft state 3588 * Controller information structure 3589 * 3590 * Issues an internal command (DCMD) to get the FW's controller structure. This 3591 * information is mainly used to find out the maximum IO transfer per command 3592 * supported by the FW. 3593 */ 3594 static int 3595 mrsas_get_ctrl_info(struct mrsas_softc *sc) 3596 { 3597 int retcode = 0; 3598 u_int8_t do_ocr = 1; 3599 struct mrsas_mfi_cmd *cmd; 3600 struct mrsas_dcmd_frame *dcmd; 3601 3602 cmd = mrsas_get_mfi_cmd(sc); 3603 3604 if (!cmd) { 3605 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 3606 return -ENOMEM; 3607 } 3608 dcmd = &cmd->frame->dcmd; 3609 3610 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { 3611 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); 3612 mrsas_release_mfi_cmd(cmd); 3613 return -ENOMEM; 3614 } 3615 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3616 3617 dcmd->cmd = MFI_CMD_DCMD; 3618 dcmd->cmd_status = 0xFF; 3619 dcmd->sge_count = 1; 3620 dcmd->flags = MFI_FRAME_DIR_READ; 3621 dcmd->timeout = 0; 3622 dcmd->pad_0 = 0; 3623 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_ctrl_info)); 3624 dcmd->opcode = htole32(MR_DCMD_CTRL_GET_INFO); 3625 dcmd->sgl.sge32[0].phys_addr = htole32(sc->ctlr_info_phys_addr & 0xFFFFFFFF); 3626 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_ctrl_info)); 3627 3628 if (!sc->mask_interrupts) 3629 retcode = mrsas_issue_blocked_cmd(sc, cmd); 3630 else 3631 retcode = mrsas_issue_polled(sc, cmd); 3632 3633 if (retcode == ETIMEDOUT) 3634 goto dcmd_timeout; 3635 else { 3636 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); 3637 le32_to_cpus(&sc->ctrl_info->properties.OnOffProperties); 3638 le32_to_cpus(&sc->ctrl_info->adapterOperations2); 3639 le32_to_cpus(&sc->ctrl_info->adapterOperations3); 3640 le16_to_cpus(&sc->ctrl_info->adapterOperations4); 3641 } 3642 3643 do_ocr = 0; 3644 mrsas_update_ext_vd_details(sc); 3645 3646 sc->use_seqnum_jbod_fp = 3647 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP; 3648 sc->support_morethan256jbod = 3649 sc->ctrl_info->adapterOperations4.supportPdMapTargetId; 3650 3651 sc->disableOnlineCtrlReset = 3652 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 3653 3654 dcmd_timeout: 3655 mrsas_free_ctlr_info_cmd(sc); 3656 3657 if (do_ocr) 3658 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 3659 3660 if (!sc->mask_interrupts) 3661 mrsas_release_mfi_cmd(cmd); 3662 3663 return (retcode); 3664 } 3665 3666 /* 3667 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD 3668 * input: 3669 * sc - Controller's softc 3670 */ 3671 static void 3672 mrsas_update_ext_vd_details(struct mrsas_softc *sc) 3673 { 3674 u_int32_t ventura_map_sz = 0; 3675 sc->max256vdSupport = 3676 sc->ctrl_info->adapterOperations3.supportMaxExtLDs; 3677 3678 /* Below is additional check to address future FW enhancement */ 3679 if (sc->ctrl_info->max_lds > 64) 3680 sc->max256vdSupport = 1; 3681 3682 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS 3683 * MRSAS_MAX_DEV_PER_CHANNEL; 3684 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS 3685 * MRSAS_MAX_DEV_PER_CHANNEL; 3686 if (sc->max256vdSupport) { 3687 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 3688 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3689 } else { 3690 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 3691 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3692 } 3693 3694 if (sc->maxRaidMapSize) { 3695 ventura_map_sz = sc->maxRaidMapSize * 3696 MR_MIN_MAP_SIZE; 3697 sc->current_map_sz = ventura_map_sz; 3698 sc->max_map_sz = ventura_map_sz; 3699 } else { 3700 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + 3701 (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1)); 3702 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); 3703 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); 3704 if (sc->max256vdSupport) 3705 sc->current_map_sz = sc->new_map_sz; 3706 else 3707 sc->current_map_sz = sc->old_map_sz; 3708 } 3709 3710 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL); 3711 #if VD_EXT_DEBUG 3712 device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n", 3713 sc->maxRaidMapSize); 3714 device_printf(sc->mrsas_dev, 3715 "new_map_sz = 0x%x, old_map_sz = 0x%x, " 3716 "ventura_map_sz = 0x%x, current_map_sz = 0x%x " 3717 "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n", 3718 sc->new_map_sz, sc->old_map_sz, ventura_map_sz, 3719 sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL)); 3720 #endif 3721 } 3722 3723 /* 3724 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command 3725 * input: Adapter soft state 3726 * 3727 * Allocates DMAable memory for the controller info internal command. 3728 */ 3729 int 3730 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) 3731 { 3732 int ctlr_info_size; 3733 3734 /* Allocate get controller info command */ 3735 ctlr_info_size = sizeof(struct mrsas_ctrl_info); 3736 if (bus_dma_tag_create(sc->mrsas_parent_tag, 3737 1, 0, 3738 BUS_SPACE_MAXADDR_32BIT, 3739 BUS_SPACE_MAXADDR, 3740 NULL, NULL, 3741 ctlr_info_size, 3742 1, 3743 ctlr_info_size, 3744 BUS_DMA_ALLOCNOW, 3745 NULL, NULL, 3746 &sc->ctlr_info_tag)) { 3747 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); 3748 return (ENOMEM); 3749 } 3750 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, 3751 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { 3752 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); 3753 return (ENOMEM); 3754 } 3755 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, 3756 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, 3757 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { 3758 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); 3759 return (ENOMEM); 3760 } 3761 memset(sc->ctlr_info_mem, 0, ctlr_info_size); 3762 return (0); 3763 } 3764 3765 /* 3766 * mrsas_free_ctlr_info_cmd: Free memory for controller info command 3767 * input: Adapter soft state 3768 * 3769 * Deallocates memory of the get controller info cmd. 3770 */ 3771 void 3772 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) 3773 { 3774 if (sc->ctlr_info_phys_addr) 3775 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); 3776 if (sc->ctlr_info_mem != NULL) 3777 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); 3778 if (sc->ctlr_info_tag != NULL) 3779 bus_dma_tag_destroy(sc->ctlr_info_tag); 3780 } 3781 3782 /* 3783 * mrsas_issue_polled: Issues a polling command 3784 * inputs: Adapter soft state 3785 * Command packet to be issued 3786 * 3787 * This function is for posting of internal commands to Firmware. MFI requires 3788 * the cmd_status to be set to 0xFF before posting. The maximun wait time of 3789 * the poll response timer is 180 seconds. 3790 */ 3791 int 3792 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3793 { 3794 struct mrsas_header *frame_hdr = &cmd->frame->hdr; 3795 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3796 int i, retcode = SUCCESS; 3797 3798 frame_hdr->cmd_status = 0xFF; 3799 frame_hdr->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 3800 3801 /* Issue the frame using inbound queue port */ 3802 if (mrsas_issue_dcmd(sc, cmd)) { 3803 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3804 return (1); 3805 } 3806 /* 3807 * Poll response timer to wait for Firmware response. While this 3808 * timer with the DELAY call could block CPU, the time interval for 3809 * this is only 1 millisecond. 3810 */ 3811 if (frame_hdr->cmd_status == 0xFF) { 3812 for (i = 0; i < (max_wait * 1000); i++) { 3813 if (frame_hdr->cmd_status == 0xFF) 3814 DELAY(1000); 3815 else 3816 break; 3817 } 3818 } 3819 if (frame_hdr->cmd_status == 0xFF) { 3820 device_printf(sc->mrsas_dev, "DCMD timed out after %d " 3821 "seconds from %s\n", max_wait, __func__); 3822 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", 3823 cmd->frame->dcmd.opcode); 3824 retcode = ETIMEDOUT; 3825 } 3826 return (retcode); 3827 } 3828 3829 /* 3830 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd 3831 * input: Adapter soft state mfi cmd pointer 3832 * 3833 * This function is called by mrsas_issued_blocked_cmd() and 3834 * mrsas_issued_polled(), to build the MPT command and then fire the command 3835 * to Firmware. 3836 */ 3837 int 3838 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3839 { 3840 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3841 3842 req_desc = mrsas_build_mpt_cmd(sc, cmd); 3843 if (!req_desc) { 3844 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); 3845 return (1); 3846 } 3847 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 3848 3849 return (0); 3850 } 3851 3852 /* 3853 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd 3854 * input: Adapter soft state mfi cmd to build 3855 * 3856 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru 3857 * command and prepares the MPT command to send to Firmware. 3858 */ 3859 MRSAS_REQUEST_DESCRIPTOR_UNION * 3860 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3861 { 3862 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3863 u_int16_t index; 3864 3865 if (mrsas_build_mptmfi_passthru(sc, cmd)) { 3866 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); 3867 return NULL; 3868 } 3869 index = cmd->cmd_id.context.smid; 3870 3871 req_desc = mrsas_get_request_desc(sc, index - 1); 3872 if (!req_desc) 3873 return NULL; 3874 3875 req_desc->addr.Words = 0; 3876 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3877 3878 req_desc->SCSIIO.SMID = htole16(index); 3879 3880 return (req_desc); 3881 } 3882 3883 /* 3884 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command 3885 * input: Adapter soft state mfi cmd pointer 3886 * 3887 * The MPT command and the io_request are setup as a passthru command. The SGE 3888 * chain address is set to frame_phys_addr of the MFI command. 3889 */ 3890 u_int8_t 3891 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) 3892 { 3893 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 3894 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; 3895 struct mrsas_mpt_cmd *mpt_cmd; 3896 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; 3897 3898 mpt_cmd = mrsas_get_mpt_cmd(sc); 3899 if (!mpt_cmd) 3900 return (1); 3901 3902 /* Save the smid. To be used for returning the cmd */ 3903 mfi_cmd->cmd_id.context.smid = mpt_cmd->index; 3904 3905 mpt_cmd->sync_cmd_idx = mfi_cmd->index; 3906 3907 /* 3908 * For cmds where the flag is set, store the flag and check on 3909 * completion. For cmds with this flag, don't call 3910 * mrsas_complete_cmd. 3911 */ 3912 3913 if (frame_hdr->flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) 3914 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3915 3916 io_req = mpt_cmd->io_request; 3917 3918 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 3919 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL; 3920 3921 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 3922 sgl_ptr_end->Flags = 0; 3923 } 3924 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain; 3925 3926 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 3927 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; 3928 io_req->ChainOffset = sc->chain_offset_mfi_pthru; 3929 3930 mpi25_ieee_chain->Address = htole64(mfi_cmd->frame_phys_addr); 3931 3932 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3933 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 3934 3935 mpi25_ieee_chain->Length = htole32(sc->max_chain_frame_sz); 3936 3937 return (0); 3938 } 3939 3940 /* 3941 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds 3942 * input: Adapter soft state Command to be issued 3943 * 3944 * This function waits on an event for the command to be returned from the ISR. 3945 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing 3946 * internal and ioctl commands. 3947 */ 3948 int 3949 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3950 { 3951 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3952 unsigned long total_time = 0; 3953 int retcode = SUCCESS; 3954 3955 /* Initialize cmd_status */ 3956 cmd->cmd_status = 0xFF; 3957 3958 /* Build MPT-MFI command for issue to FW */ 3959 if (mrsas_issue_dcmd(sc, cmd)) { 3960 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3961 return (1); 3962 } 3963 sc->chan = (void *)&cmd; 3964 3965 while (1) { 3966 if (cmd->cmd_status == 0xFF) { 3967 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 3968 } else 3969 break; 3970 3971 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL 3972 * command */ 3973 total_time++; 3974 if (total_time >= max_wait) { 3975 device_printf(sc->mrsas_dev, 3976 "Internal command timed out after %d seconds.\n", max_wait); 3977 retcode = 1; 3978 break; 3979 } 3980 } 3981 } 3982 3983 if (cmd->cmd_status == 0xFF) { 3984 device_printf(sc->mrsas_dev, "DCMD timed out after %d " 3985 "seconds from %s\n", max_wait, __func__); 3986 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", 3987 cmd->frame->dcmd.opcode); 3988 retcode = ETIMEDOUT; 3989 } 3990 return (retcode); 3991 } 3992 3993 /* 3994 * mrsas_complete_mptmfi_passthru: Completes a command 3995 * input: @sc: Adapter soft state 3996 * @cmd: Command to be completed 3997 * @status: cmd completion status 3998 * 3999 * This function is called from mrsas_complete_cmd() after an interrupt is 4000 * received from Firmware, and io_request->Function is 4001 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. 4002 */ 4003 void 4004 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, 4005 u_int8_t status) 4006 { 4007 struct mrsas_header *hdr = &cmd->frame->hdr; 4008 u_int8_t cmd_status = cmd->frame->hdr.cmd_status; 4009 4010 /* Reset the retry counter for future re-tries */ 4011 cmd->retry_for_fw_reset = 0; 4012 4013 if (cmd->ccb_ptr) 4014 cmd->ccb_ptr = NULL; 4015 4016 switch (hdr->cmd) { 4017 case MFI_CMD_INVALID: 4018 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); 4019 break; 4020 case MFI_CMD_PD_SCSI_IO: 4021 case MFI_CMD_LD_SCSI_IO: 4022 /* 4023 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 4024 * issued either through an IO path or an IOCTL path. If it 4025 * was via IOCTL, we will send it to internal completion. 4026 */ 4027 if (cmd->sync_cmd) { 4028 cmd->sync_cmd = 0; 4029 mrsas_wakeup(sc, cmd); 4030 break; 4031 } 4032 case MFI_CMD_SMP: 4033 case MFI_CMD_STP: 4034 case MFI_CMD_DCMD: 4035 /* Check for LD map update */ 4036 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && 4037 (cmd->frame->dcmd.mbox.b[1] == 1)) { 4038 sc->fast_path_io = 0; 4039 mtx_lock(&sc->raidmap_lock); 4040 sc->map_update_cmd = NULL; 4041 if (cmd_status != 0) { 4042 if (cmd_status != MFI_STAT_NOT_FOUND) 4043 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status); 4044 else { 4045 mrsas_release_mfi_cmd(cmd); 4046 mtx_unlock(&sc->raidmap_lock); 4047 break; 4048 } 4049 } else 4050 sc->map_id++; 4051 mrsas_release_mfi_cmd(cmd); 4052 if (MR_ValidateMapInfo(sc)) 4053 sc->fast_path_io = 0; 4054 else 4055 sc->fast_path_io = 1; 4056 mrsas_sync_map_info(sc); 4057 mtx_unlock(&sc->raidmap_lock); 4058 break; 4059 } 4060 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 4061 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { 4062 sc->mrsas_aen_triggered = 0; 4063 } 4064 /* FW has an updated PD sequence */ 4065 if ((cmd->frame->dcmd.opcode == 4066 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 4067 (cmd->frame->dcmd.mbox.b[0] == 1)) { 4068 mtx_lock(&sc->raidmap_lock); 4069 sc->jbod_seq_cmd = NULL; 4070 mrsas_release_mfi_cmd(cmd); 4071 4072 if (cmd_status == MFI_STAT_OK) { 4073 sc->pd_seq_map_id++; 4074 /* Re-register a pd sync seq num cmd */ 4075 if (megasas_sync_pd_seq_num(sc, true)) 4076 sc->use_seqnum_jbod_fp = 0; 4077 } else { 4078 sc->use_seqnum_jbod_fp = 0; 4079 device_printf(sc->mrsas_dev, 4080 "Jbod map sync failed, status=%x\n", cmd_status); 4081 } 4082 mtx_unlock(&sc->raidmap_lock); 4083 break; 4084 } 4085 /* See if got an event notification */ 4086 if (le32toh(cmd->frame->dcmd.opcode) == MR_DCMD_CTRL_EVENT_WAIT) 4087 mrsas_complete_aen(sc, cmd); 4088 else 4089 mrsas_wakeup(sc, cmd); 4090 break; 4091 case MFI_CMD_ABORT: 4092 /* Command issued to abort another cmd return */ 4093 mrsas_complete_abort(sc, cmd); 4094 break; 4095 default: 4096 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd); 4097 break; 4098 } 4099 } 4100 4101 /* 4102 * mrsas_wakeup: Completes an internal command 4103 * input: Adapter soft state 4104 * Command to be completed 4105 * 4106 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait 4107 * timer is started. This function is called from 4108 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up 4109 * from the command wait. 4110 */ 4111 void 4112 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4113 { 4114 cmd->cmd_status = cmd->frame->io.cmd_status; 4115 4116 if (cmd->cmd_status == 0xFF) 4117 cmd->cmd_status = 0; 4118 4119 sc->chan = (void *)&cmd; 4120 wakeup_one((void *)&sc->chan); 4121 return; 4122 } 4123 4124 /* 4125 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input: 4126 * Adapter soft state Shutdown/Hibernate 4127 * 4128 * This function issues a DCMD internal command to Firmware to initiate shutdown 4129 * of the controller. 4130 */ 4131 static void 4132 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) 4133 { 4134 struct mrsas_mfi_cmd *cmd; 4135 struct mrsas_dcmd_frame *dcmd; 4136 4137 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 4138 return; 4139 4140 cmd = mrsas_get_mfi_cmd(sc); 4141 if (!cmd) { 4142 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n"); 4143 return; 4144 } 4145 if (sc->aen_cmd) 4146 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); 4147 if (sc->map_update_cmd) 4148 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); 4149 if (sc->jbod_seq_cmd) 4150 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd); 4151 4152 dcmd = &cmd->frame->dcmd; 4153 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4154 4155 dcmd->cmd = MFI_CMD_DCMD; 4156 dcmd->cmd_status = 0x0; 4157 dcmd->sge_count = 0; 4158 dcmd->flags = MFI_FRAME_DIR_NONE; 4159 dcmd->timeout = 0; 4160 dcmd->pad_0 = 0; 4161 dcmd->data_xfer_len = 0; 4162 dcmd->opcode = opcode; 4163 4164 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n"); 4165 4166 mrsas_issue_blocked_cmd(sc, cmd); 4167 mrsas_release_mfi_cmd(cmd); 4168 4169 return; 4170 } 4171 4172 /* 4173 * mrsas_flush_cache: Requests FW to flush all its caches input: 4174 * Adapter soft state 4175 * 4176 * This function is issues a DCMD internal command to Firmware to initiate 4177 * flushing of all caches. 4178 */ 4179 static void 4180 mrsas_flush_cache(struct mrsas_softc *sc) 4181 { 4182 struct mrsas_mfi_cmd *cmd; 4183 struct mrsas_dcmd_frame *dcmd; 4184 4185 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 4186 return; 4187 4188 cmd = mrsas_get_mfi_cmd(sc); 4189 if (!cmd) { 4190 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n"); 4191 return; 4192 } 4193 dcmd = &cmd->frame->dcmd; 4194 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4195 4196 dcmd->cmd = MFI_CMD_DCMD; 4197 dcmd->cmd_status = 0x0; 4198 dcmd->sge_count = 0; 4199 dcmd->flags = MFI_FRAME_DIR_NONE; 4200 dcmd->timeout = 0; 4201 dcmd->pad_0 = 0; 4202 dcmd->data_xfer_len = 0; 4203 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 4204 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 4205 4206 mrsas_issue_blocked_cmd(sc, cmd); 4207 mrsas_release_mfi_cmd(cmd); 4208 4209 return; 4210 } 4211 4212 int 4213 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend) 4214 { 4215 int retcode = 0; 4216 u_int8_t do_ocr = 1; 4217 struct mrsas_mfi_cmd *cmd; 4218 struct mrsas_dcmd_frame *dcmd; 4219 uint32_t pd_seq_map_sz; 4220 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 4221 bus_addr_t pd_seq_h; 4222 4223 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 4224 (sizeof(struct MR_PD_CFG_SEQ) * 4225 (MAX_PHYSICAL_DEVICES - 1)); 4226 4227 cmd = mrsas_get_mfi_cmd(sc); 4228 if (!cmd) { 4229 device_printf(sc->mrsas_dev, 4230 "Cannot alloc for ld map info cmd.\n"); 4231 return 1; 4232 } 4233 dcmd = &cmd->frame->dcmd; 4234 4235 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)]; 4236 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)]; 4237 if (!pd_sync) { 4238 device_printf(sc->mrsas_dev, 4239 "Failed to alloc mem for jbod map info.\n"); 4240 mrsas_release_mfi_cmd(cmd); 4241 return (ENOMEM); 4242 } 4243 memset(pd_sync, 0, pd_seq_map_sz); 4244 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4245 dcmd->cmd = MFI_CMD_DCMD; 4246 dcmd->cmd_status = 0xFF; 4247 dcmd->sge_count = 1; 4248 dcmd->timeout = 0; 4249 dcmd->pad_0 = 0; 4250 dcmd->data_xfer_len = htole32(pd_seq_map_sz); 4251 dcmd->opcode = htole32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO); 4252 dcmd->sgl.sge32[0].phys_addr = htole32(pd_seq_h & 0xFFFFFFFF); 4253 dcmd->sgl.sge32[0].length = htole32(pd_seq_map_sz); 4254 4255 if (pend) { 4256 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG; 4257 dcmd->flags = htole16(MFI_FRAME_DIR_WRITE); 4258 sc->jbod_seq_cmd = cmd; 4259 if (mrsas_issue_dcmd(sc, cmd)) { 4260 device_printf(sc->mrsas_dev, 4261 "Fail to send sync map info command.\n"); 4262 return 1; 4263 } else 4264 return 0; 4265 } else 4266 dcmd->flags = htole16(MFI_FRAME_DIR_READ); 4267 4268 retcode = mrsas_issue_polled(sc, cmd); 4269 if (retcode == ETIMEDOUT) 4270 goto dcmd_timeout; 4271 4272 if (le32toh(pd_sync->count) > MAX_PHYSICAL_DEVICES) { 4273 device_printf(sc->mrsas_dev, 4274 "driver supports max %d JBOD, but FW reports %d\n", 4275 MAX_PHYSICAL_DEVICES, pd_sync->count); 4276 retcode = -EINVAL; 4277 } 4278 if (!retcode) 4279 sc->pd_seq_map_id++; 4280 do_ocr = 0; 4281 4282 dcmd_timeout: 4283 if (do_ocr) 4284 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4285 4286 return (retcode); 4287 } 4288 4289 /* 4290 * mrsas_get_map_info: Load and validate RAID map input: 4291 * Adapter instance soft state 4292 * 4293 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load 4294 * and validate RAID map. It returns 0 if successful, 1 other- wise. 4295 */ 4296 static int 4297 mrsas_get_map_info(struct mrsas_softc *sc) 4298 { 4299 uint8_t retcode = 0; 4300 4301 sc->fast_path_io = 0; 4302 if (!mrsas_get_ld_map_info(sc)) { 4303 retcode = MR_ValidateMapInfo(sc); 4304 if (retcode == 0) { 4305 sc->fast_path_io = 1; 4306 return 0; 4307 } 4308 } 4309 return 1; 4310 } 4311 4312 /* 4313 * mrsas_get_ld_map_info: Get FW's ld_map structure input: 4314 * Adapter instance soft state 4315 * 4316 * Issues an internal command (DCMD) to get the FW's controller PD list 4317 * structure. 4318 */ 4319 static int 4320 mrsas_get_ld_map_info(struct mrsas_softc *sc) 4321 { 4322 int retcode = 0; 4323 struct mrsas_mfi_cmd *cmd; 4324 struct mrsas_dcmd_frame *dcmd; 4325 void *map; 4326 bus_addr_t map_phys_addr = 0; 4327 4328 cmd = mrsas_get_mfi_cmd(sc); 4329 if (!cmd) { 4330 device_printf(sc->mrsas_dev, 4331 "Cannot alloc for ld map info cmd.\n"); 4332 return 1; 4333 } 4334 dcmd = &cmd->frame->dcmd; 4335 4336 map = (void *)sc->raidmap_mem[(sc->map_id & 1)]; 4337 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; 4338 if (!map) { 4339 device_printf(sc->mrsas_dev, 4340 "Failed to alloc mem for ld map info.\n"); 4341 mrsas_release_mfi_cmd(cmd); 4342 return (ENOMEM); 4343 } 4344 memset(map, 0, sizeof(sc->max_map_sz)); 4345 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4346 4347 dcmd->cmd = MFI_CMD_DCMD; 4348 dcmd->cmd_status = 0xFF; 4349 dcmd->sge_count = 1; 4350 dcmd->flags = htole16(MFI_FRAME_DIR_READ); 4351 dcmd->timeout = 0; 4352 dcmd->pad_0 = 0; 4353 dcmd->data_xfer_len = htole32(sc->current_map_sz); 4354 dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO); 4355 dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF); 4356 dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz); 4357 4358 retcode = mrsas_issue_polled(sc, cmd); 4359 if (retcode == ETIMEDOUT) 4360 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4361 4362 return (retcode); 4363 } 4364 4365 /* 4366 * mrsas_sync_map_info: Get FW's ld_map structure input: 4367 * Adapter instance soft state 4368 * 4369 * Issues an internal command (DCMD) to get the FW's controller PD list 4370 * structure. 4371 */ 4372 static int 4373 mrsas_sync_map_info(struct mrsas_softc *sc) 4374 { 4375 int retcode = 0, i; 4376 struct mrsas_mfi_cmd *cmd; 4377 struct mrsas_dcmd_frame *dcmd; 4378 uint32_t num_lds; 4379 MR_LD_TARGET_SYNC *target_map = NULL; 4380 MR_DRV_RAID_MAP_ALL *map; 4381 MR_LD_RAID *raid; 4382 MR_LD_TARGET_SYNC *ld_sync; 4383 bus_addr_t map_phys_addr = 0; 4384 4385 cmd = mrsas_get_mfi_cmd(sc); 4386 if (!cmd) { 4387 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n"); 4388 return ENOMEM; 4389 } 4390 map = sc->ld_drv_map[sc->map_id & 1]; 4391 num_lds = map->raidMap.ldCount; 4392 4393 dcmd = &cmd->frame->dcmd; 4394 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4395 4396 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1]; 4397 memset(target_map, 0, sc->max_map_sz); 4398 4399 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; 4400 4401 ld_sync = (MR_LD_TARGET_SYNC *) target_map; 4402 4403 for (i = 0; i < num_lds; i++, ld_sync++) { 4404 raid = MR_LdRaidGet(i, map); 4405 ld_sync->targetId = MR_GetLDTgtId(i, map); 4406 ld_sync->seqNum = raid->seqNum; 4407 } 4408 4409 dcmd->cmd = MFI_CMD_DCMD; 4410 dcmd->cmd_status = 0xFF; 4411 dcmd->sge_count = 1; 4412 dcmd->flags = htole16(MFI_FRAME_DIR_WRITE); 4413 dcmd->timeout = 0; 4414 dcmd->pad_0 = 0; 4415 dcmd->data_xfer_len = htole32(sc->current_map_sz); 4416 dcmd->mbox.b[0] = num_lds; 4417 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; 4418 dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO); 4419 dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF); 4420 dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz); 4421 4422 sc->map_update_cmd = cmd; 4423 if (mrsas_issue_dcmd(sc, cmd)) { 4424 device_printf(sc->mrsas_dev, 4425 "Fail to send sync map info command.\n"); 4426 return (1); 4427 } 4428 return (retcode); 4429 } 4430 4431 /* Input: dcmd.opcode - MR_DCMD_PD_GET_INFO 4432 * dcmd.mbox.s[0] - deviceId for this physical drive 4433 * dcmd.sge IN - ptr to returned MR_PD_INFO structure 4434 * Desc: Firmware return the physical drive info structure 4435 * 4436 */ 4437 static void 4438 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id) 4439 { 4440 int retcode; 4441 u_int8_t do_ocr = 1; 4442 struct mrsas_mfi_cmd *cmd; 4443 struct mrsas_dcmd_frame *dcmd; 4444 4445 cmd = mrsas_get_mfi_cmd(sc); 4446 4447 if (!cmd) { 4448 device_printf(sc->mrsas_dev, 4449 "Cannot alloc for get PD info cmd\n"); 4450 return; 4451 } 4452 dcmd = &cmd->frame->dcmd; 4453 4454 memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info)); 4455 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4456 4457 dcmd->mbox.s[0] = htole16(device_id); 4458 dcmd->cmd = MFI_CMD_DCMD; 4459 dcmd->cmd_status = 0xFF; 4460 dcmd->sge_count = 1; 4461 dcmd->flags = MFI_FRAME_DIR_READ; 4462 dcmd->timeout = 0; 4463 dcmd->pad_0 = 0; 4464 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_pd_info)); 4465 dcmd->opcode = htole32(MR_DCMD_PD_GET_INFO); 4466 dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->pd_info_phys_addr & 0xFFFFFFFF); 4467 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_pd_info)); 4468 4469 if (!sc->mask_interrupts) 4470 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4471 else 4472 retcode = mrsas_issue_polled(sc, cmd); 4473 4474 if (retcode == ETIMEDOUT) 4475 goto dcmd_timeout; 4476 4477 sc->target_list[device_id].interface_type = 4478 le16toh(sc->pd_info_mem->state.ddf.pdType.intf); 4479 4480 do_ocr = 0; 4481 4482 dcmd_timeout: 4483 4484 if (do_ocr) 4485 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4486 4487 if (!sc->mask_interrupts) 4488 mrsas_release_mfi_cmd(cmd); 4489 } 4490 4491 /* 4492 * mrsas_add_target: Add target ID of system PD/VD to driver's data structure. 4493 * sc: Adapter's soft state 4494 * target_id: Unique target id per controller(managed by driver) 4495 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1) 4496 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS 4497 * return: void 4498 * Descripton: This function will be called whenever system PD or VD is created. 4499 */ 4500 static void mrsas_add_target(struct mrsas_softc *sc, 4501 u_int16_t target_id) 4502 { 4503 sc->target_list[target_id].target_id = target_id; 4504 4505 device_printf(sc->mrsas_dev, 4506 "%s created target ID: 0x%x\n", 4507 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"), 4508 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD))); 4509 /* 4510 * If interrupts are enabled, then only fire DCMD to get pd_info 4511 * for system PDs 4512 */ 4513 if (!sc->mask_interrupts && sc->pd_info_mem && 4514 (target_id < MRSAS_MAX_PD)) 4515 mrsas_get_pd_info(sc, target_id); 4516 4517 } 4518 4519 /* 4520 * mrsas_remove_target: Remove target ID of system PD/VD from driver's data structure. 4521 * sc: Adapter's soft state 4522 * target_id: Unique target id per controller(managed by driver) 4523 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1) 4524 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS 4525 * return: void 4526 * Descripton: This function will be called whenever system PD or VD is deleted 4527 */ 4528 static void mrsas_remove_target(struct mrsas_softc *sc, 4529 u_int16_t target_id) 4530 { 4531 sc->target_list[target_id].target_id = 0xffff; 4532 device_printf(sc->mrsas_dev, 4533 "%s deleted target ID: 0x%x\n", 4534 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"), 4535 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD))); 4536 } 4537 4538 /* 4539 * mrsas_get_pd_list: Returns FW's PD list structure input: 4540 * Adapter soft state 4541 * 4542 * Issues an internal command (DCMD) to get the FW's controller PD list 4543 * structure. This information is mainly used to find out about system 4544 * supported by Firmware. 4545 */ 4546 static int 4547 mrsas_get_pd_list(struct mrsas_softc *sc) 4548 { 4549 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size; 4550 u_int8_t do_ocr = 1; 4551 struct mrsas_mfi_cmd *cmd; 4552 struct mrsas_dcmd_frame *dcmd; 4553 struct MR_PD_LIST *pd_list_mem; 4554 struct MR_PD_ADDRESS *pd_addr; 4555 bus_addr_t pd_list_phys_addr = 0; 4556 struct mrsas_tmp_dcmd *tcmd; 4557 u_int16_t dev_id; 4558 4559 cmd = mrsas_get_mfi_cmd(sc); 4560 if (!cmd) { 4561 device_printf(sc->mrsas_dev, 4562 "Cannot alloc for get PD list cmd\n"); 4563 return 1; 4564 } 4565 dcmd = &cmd->frame->dcmd; 4566 4567 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 4568 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4569 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { 4570 device_printf(sc->mrsas_dev, 4571 "Cannot alloc dmamap for get PD list cmd\n"); 4572 mrsas_release_mfi_cmd(cmd); 4573 mrsas_free_tmp_dcmd(tcmd); 4574 free(tcmd, M_MRSAS); 4575 return (ENOMEM); 4576 } else { 4577 pd_list_mem = tcmd->tmp_dcmd_mem; 4578 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 4579 } 4580 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4581 4582 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4583 dcmd->mbox.b[1] = 0; 4584 dcmd->cmd = MFI_CMD_DCMD; 4585 dcmd->cmd_status = 0xFF; 4586 dcmd->sge_count = 1; 4587 dcmd->flags = htole16(MFI_FRAME_DIR_READ); 4588 dcmd->timeout = 0; 4589 dcmd->pad_0 = 0; 4590 dcmd->data_xfer_len = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4591 dcmd->opcode = htole32(MR_DCMD_PD_LIST_QUERY); 4592 dcmd->sgl.sge32[0].phys_addr = htole32(pd_list_phys_addr & 0xFFFFFFFF); 4593 dcmd->sgl.sge32[0].length = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4594 4595 if (!sc->mask_interrupts) 4596 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4597 else 4598 retcode = mrsas_issue_polled(sc, cmd); 4599 4600 if (retcode == ETIMEDOUT) 4601 goto dcmd_timeout; 4602 4603 /* Get the instance PD list */ 4604 pd_count = MRSAS_MAX_PD; 4605 pd_addr = pd_list_mem->addr; 4606 if (le32toh(pd_list_mem->count) < pd_count) { 4607 memset(sc->local_pd_list, 0, 4608 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 4609 for (pd_index = 0; pd_index < le32toh(pd_list_mem->count); pd_index++) { 4610 dev_id = le16toh(pd_addr->deviceId); 4611 sc->local_pd_list[dev_id].tid = dev_id; 4612 sc->local_pd_list[dev_id].driveType = 4613 le16toh(pd_addr->scsiDevType); 4614 sc->local_pd_list[dev_id].driveState = 4615 MR_PD_STATE_SYSTEM; 4616 if (sc->target_list[dev_id].target_id == 0xffff) 4617 mrsas_add_target(sc, dev_id); 4618 pd_addr++; 4619 } 4620 for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) { 4621 if ((sc->local_pd_list[pd_index].driveState != 4622 MR_PD_STATE_SYSTEM) && 4623 (sc->target_list[pd_index].target_id != 4624 0xffff)) { 4625 mrsas_remove_target(sc, pd_index); 4626 } 4627 } 4628 /* 4629 * Use mutext/spinlock if pd_list component size increase more than 4630 * 32 bit. 4631 */ 4632 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); 4633 do_ocr = 0; 4634 } 4635 dcmd_timeout: 4636 mrsas_free_tmp_dcmd(tcmd); 4637 free(tcmd, M_MRSAS); 4638 4639 if (do_ocr) 4640 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4641 4642 if (!sc->mask_interrupts) 4643 mrsas_release_mfi_cmd(cmd); 4644 4645 return (retcode); 4646 } 4647 4648 /* 4649 * mrsas_get_ld_list: Returns FW's LD list structure input: 4650 * Adapter soft state 4651 * 4652 * Issues an internal command (DCMD) to get the FW's controller PD list 4653 * structure. This information is mainly used to find out about supported by 4654 * the FW. 4655 */ 4656 static int 4657 mrsas_get_ld_list(struct mrsas_softc *sc) 4658 { 4659 int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id; 4660 u_int8_t do_ocr = 1; 4661 struct mrsas_mfi_cmd *cmd; 4662 struct mrsas_dcmd_frame *dcmd; 4663 struct MR_LD_LIST *ld_list_mem; 4664 bus_addr_t ld_list_phys_addr = 0; 4665 struct mrsas_tmp_dcmd *tcmd; 4666 4667 cmd = mrsas_get_mfi_cmd(sc); 4668 if (!cmd) { 4669 device_printf(sc->mrsas_dev, 4670 "Cannot alloc for get LD list cmd\n"); 4671 return 1; 4672 } 4673 dcmd = &cmd->frame->dcmd; 4674 4675 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 4676 ld_list_size = sizeof(struct MR_LD_LIST); 4677 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { 4678 device_printf(sc->mrsas_dev, 4679 "Cannot alloc dmamap for get LD list cmd\n"); 4680 mrsas_release_mfi_cmd(cmd); 4681 mrsas_free_tmp_dcmd(tcmd); 4682 free(tcmd, M_MRSAS); 4683 return (ENOMEM); 4684 } else { 4685 ld_list_mem = tcmd->tmp_dcmd_mem; 4686 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 4687 } 4688 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4689 4690 if (sc->max256vdSupport) 4691 dcmd->mbox.b[0] = 1; 4692 4693 dcmd->cmd = MFI_CMD_DCMD; 4694 dcmd->cmd_status = 0xFF; 4695 dcmd->sge_count = 1; 4696 dcmd->flags = MFI_FRAME_DIR_READ; 4697 dcmd->timeout = 0; 4698 dcmd->data_xfer_len = htole32(sizeof(struct MR_LD_LIST)); 4699 dcmd->opcode = htole32(MR_DCMD_LD_GET_LIST); 4700 dcmd->sgl.sge32[0].phys_addr = htole32(ld_list_phys_addr); 4701 dcmd->sgl.sge32[0].length = htole32(sizeof(struct MR_LD_LIST)); 4702 dcmd->pad_0 = 0; 4703 4704 if (!sc->mask_interrupts) 4705 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4706 else 4707 retcode = mrsas_issue_polled(sc, cmd); 4708 4709 if (retcode == ETIMEDOUT) 4710 goto dcmd_timeout; 4711 4712 #if VD_EXT_DEBUG 4713 printf("Number of LDs %d\n", ld_list_mem->ldCount); 4714 #endif 4715 4716 /* Get the instance LD list */ 4717 if (le32toh(ld_list_mem->ldCount) <= sc->fw_supported_vd_count) { 4718 sc->CurLdCount = le32toh(ld_list_mem->ldCount); 4719 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4720 for (ld_index = 0; ld_index < le32toh(ld_list_mem->ldCount); ld_index++) { 4721 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 4722 drv_tgt_id = ids + MRSAS_MAX_PD; 4723 if (ld_list_mem->ldList[ld_index].state != 0) { 4724 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 4725 if (sc->target_list[drv_tgt_id].target_id == 4726 0xffff) 4727 mrsas_add_target(sc, drv_tgt_id); 4728 } else { 4729 if (sc->target_list[drv_tgt_id].target_id != 4730 0xffff) 4731 mrsas_remove_target(sc, 4732 drv_tgt_id); 4733 } 4734 } 4735 4736 do_ocr = 0; 4737 } 4738 dcmd_timeout: 4739 mrsas_free_tmp_dcmd(tcmd); 4740 free(tcmd, M_MRSAS); 4741 4742 if (do_ocr) 4743 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4744 if (!sc->mask_interrupts) 4745 mrsas_release_mfi_cmd(cmd); 4746 4747 return (retcode); 4748 } 4749 4750 /* 4751 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input: 4752 * Adapter soft state Temp command Size of alloction 4753 * 4754 * Allocates DMAable memory for a temporary internal command. The allocated 4755 * memory is initialized to all zeros upon successful loading of the dma 4756 * mapped memory. 4757 */ 4758 int 4759 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, 4760 struct mrsas_tmp_dcmd *tcmd, int size) 4761 { 4762 if (bus_dma_tag_create(sc->mrsas_parent_tag, 4763 1, 0, 4764 BUS_SPACE_MAXADDR_32BIT, 4765 BUS_SPACE_MAXADDR, 4766 NULL, NULL, 4767 size, 4768 1, 4769 size, 4770 BUS_DMA_ALLOCNOW, 4771 NULL, NULL, 4772 &tcmd->tmp_dcmd_tag)) { 4773 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); 4774 return (ENOMEM); 4775 } 4776 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, 4777 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { 4778 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); 4779 return (ENOMEM); 4780 } 4781 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, 4782 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, 4783 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { 4784 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); 4785 return (ENOMEM); 4786 } 4787 memset(tcmd->tmp_dcmd_mem, 0, size); 4788 return (0); 4789 } 4790 4791 /* 4792 * mrsas_free_tmp_dcmd: Free memory for temporary command input: 4793 * temporary dcmd pointer 4794 * 4795 * Deallocates memory of the temporary command for use in the construction of 4796 * the internal DCMD. 4797 */ 4798 void 4799 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) 4800 { 4801 if (tmp->tmp_dcmd_phys_addr) 4802 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); 4803 if (tmp->tmp_dcmd_mem != NULL) 4804 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); 4805 if (tmp->tmp_dcmd_tag != NULL) 4806 bus_dma_tag_destroy(tmp->tmp_dcmd_tag); 4807 } 4808 4809 /* 4810 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input: 4811 * Adapter soft state Previously issued cmd to be aborted 4812 * 4813 * This function is used to abort previously issued commands, such as AEN and 4814 * RAID map sync map commands. The abort command is sent as a DCMD internal 4815 * command and subsequently the driver will wait for a return status. The 4816 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. 4817 */ 4818 static int 4819 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 4820 struct mrsas_mfi_cmd *cmd_to_abort) 4821 { 4822 struct mrsas_mfi_cmd *cmd; 4823 struct mrsas_abort_frame *abort_fr; 4824 u_int8_t retcode = 0; 4825 unsigned long total_time = 0; 4826 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 4827 4828 cmd = mrsas_get_mfi_cmd(sc); 4829 if (!cmd) { 4830 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); 4831 return (1); 4832 } 4833 abort_fr = &cmd->frame->abort; 4834 4835 /* Prepare and issue the abort frame */ 4836 abort_fr->cmd = MFI_CMD_ABORT; 4837 abort_fr->cmd_status = 0xFF; 4838 abort_fr->flags = 0; 4839 abort_fr->abort_context = cmd_to_abort->index; 4840 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 4841 abort_fr->abort_mfi_phys_addr_hi = 0; 4842 4843 cmd->sync_cmd = 1; 4844 cmd->cmd_status = 0xFF; 4845 4846 if (mrsas_issue_dcmd(sc, cmd)) { 4847 device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); 4848 return (1); 4849 } 4850 /* Wait for this cmd to complete */ 4851 sc->chan = (void *)&cmd; 4852 while (1) { 4853 if (cmd->cmd_status == 0xFF) { 4854 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 4855 } else 4856 break; 4857 total_time++; 4858 if (total_time >= max_wait) { 4859 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); 4860 retcode = 1; 4861 break; 4862 } 4863 } 4864 4865 cmd->sync_cmd = 0; 4866 mrsas_release_mfi_cmd(cmd); 4867 return (retcode); 4868 } 4869 4870 /* 4871 * mrsas_complete_abort: Completes aborting a command input: 4872 * Adapter soft state Cmd that was issued to abort another cmd 4873 * 4874 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to 4875 * change after sending the command. This function is called from 4876 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. 4877 */ 4878 void 4879 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4880 { 4881 if (cmd->sync_cmd) { 4882 cmd->sync_cmd = 0; 4883 cmd->cmd_status = 0; 4884 sc->chan = (void *)&cmd; 4885 wakeup_one((void *)&sc->chan); 4886 } 4887 return; 4888 } 4889 4890 /* 4891 * mrsas_aen_handler: AEN processing callback function from thread context 4892 * input: Adapter soft state 4893 * 4894 * Asynchronous event handler 4895 */ 4896 void 4897 mrsas_aen_handler(struct mrsas_softc *sc) 4898 { 4899 union mrsas_evt_class_locale class_locale; 4900 int doscan = 0; 4901 u_int32_t seq_num; 4902 int error, fail_aen = 0; 4903 4904 if (sc == NULL) { 4905 printf("invalid instance!\n"); 4906 return; 4907 } 4908 if (sc->remove_in_progress || sc->reset_in_progress) { 4909 device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n", 4910 __func__, __LINE__); 4911 return; 4912 } 4913 if (sc->evt_detail_mem) { 4914 switch (sc->evt_detail_mem->code) { 4915 case MR_EVT_PD_INSERTED: 4916 fail_aen = mrsas_get_pd_list(sc); 4917 if (!fail_aen) 4918 mrsas_bus_scan_sim(sc, sc->sim_1); 4919 else 4920 goto skip_register_aen; 4921 break; 4922 case MR_EVT_PD_REMOVED: 4923 fail_aen = mrsas_get_pd_list(sc); 4924 if (!fail_aen) 4925 mrsas_bus_scan_sim(sc, sc->sim_1); 4926 else 4927 goto skip_register_aen; 4928 break; 4929 case MR_EVT_LD_OFFLINE: 4930 case MR_EVT_CFG_CLEARED: 4931 case MR_EVT_LD_DELETED: 4932 mrsas_bus_scan_sim(sc, sc->sim_0); 4933 break; 4934 case MR_EVT_LD_CREATED: 4935 fail_aen = mrsas_get_ld_list(sc); 4936 if (!fail_aen) 4937 mrsas_bus_scan_sim(sc, sc->sim_0); 4938 else 4939 goto skip_register_aen; 4940 break; 4941 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 4942 case MR_EVT_FOREIGN_CFG_IMPORTED: 4943 case MR_EVT_LD_STATE_CHANGE: 4944 doscan = 1; 4945 break; 4946 case MR_EVT_CTRL_PROP_CHANGED: 4947 fail_aen = mrsas_get_ctrl_info(sc); 4948 if (fail_aen) 4949 goto skip_register_aen; 4950 break; 4951 default: 4952 break; 4953 } 4954 } else { 4955 device_printf(sc->mrsas_dev, "invalid evt_detail\n"); 4956 return; 4957 } 4958 if (doscan) { 4959 fail_aen = mrsas_get_pd_list(sc); 4960 if (!fail_aen) { 4961 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); 4962 mrsas_bus_scan_sim(sc, sc->sim_1); 4963 } else 4964 goto skip_register_aen; 4965 4966 fail_aen = mrsas_get_ld_list(sc); 4967 if (!fail_aen) { 4968 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); 4969 mrsas_bus_scan_sim(sc, sc->sim_0); 4970 } else 4971 goto skip_register_aen; 4972 } 4973 seq_num = sc->evt_detail_mem->seq_num + 1; 4974 4975 /* Register AEN with FW for latest sequence number plus 1 */ 4976 class_locale.members.reserved = 0; 4977 class_locale.members.locale = MR_EVT_LOCALE_ALL; 4978 class_locale.members.class = MR_EVT_CLASS_DEBUG; 4979 4980 if (sc->aen_cmd != NULL) 4981 return; 4982 4983 mtx_lock(&sc->aen_lock); 4984 error = mrsas_register_aen(sc, seq_num, 4985 class_locale.word); 4986 mtx_unlock(&sc->aen_lock); 4987 4988 if (error) 4989 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); 4990 4991 skip_register_aen: 4992 return; 4993 4994 } 4995 4996 /* 4997 * mrsas_complete_aen: Completes AEN command 4998 * input: Adapter soft state 4999 * Cmd that was issued to abort another cmd 5000 * 5001 * This function will be called from ISR and will continue event processing from 5002 * thread context by enqueuing task in ev_tq (callback function 5003 * "mrsas_aen_handler"). 5004 */ 5005 void 5006 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 5007 { 5008 /* 5009 * Don't signal app if it is just an aborted previously registered 5010 * aen 5011 */ 5012 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { 5013 sc->mrsas_aen_triggered = 1; 5014 mtx_lock(&sc->aen_lock); 5015 if (sc->mrsas_poll_waiting) { 5016 sc->mrsas_poll_waiting = 0; 5017 selwakeup(&sc->mrsas_select); 5018 } 5019 mtx_unlock(&sc->aen_lock); 5020 } else 5021 cmd->abort_aen = 0; 5022 5023 sc->aen_cmd = NULL; 5024 mrsas_release_mfi_cmd(cmd); 5025 5026 taskqueue_enqueue(sc->ev_tq, &sc->ev_task); 5027 5028 return; 5029 } 5030 5031 static device_method_t mrsas_methods[] = { 5032 DEVMETHOD(device_probe, mrsas_probe), 5033 DEVMETHOD(device_attach, mrsas_attach), 5034 DEVMETHOD(device_detach, mrsas_detach), 5035 DEVMETHOD(device_shutdown, mrsas_shutdown), 5036 DEVMETHOD(device_suspend, mrsas_suspend), 5037 DEVMETHOD(device_resume, mrsas_resume), 5038 DEVMETHOD(bus_print_child, bus_generic_print_child), 5039 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 5040 {0, 0} 5041 }; 5042 5043 static driver_t mrsas_driver = { 5044 "mrsas", 5045 mrsas_methods, 5046 sizeof(struct mrsas_softc) 5047 }; 5048 5049 static devclass_t mrsas_devclass; 5050 5051 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0); 5052 MODULE_DEPEND(mrsas, cam, 1, 1, 1); 5053