1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <dev/mrsas/mrsas.h> 44 #include <dev/mrsas/mrsas_ioctl.h> 45 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 49 #include <sys/sysctl.h> 50 #include <sys/types.h> 51 #include <sys/sysent.h> 52 #include <sys/kthread.h> 53 #include <sys/taskqueue.h> 54 #include <sys/smp.h> 55 #include <sys/endian.h> 56 57 /* 58 * Function prototypes 59 */ 60 static d_open_t mrsas_open; 61 static d_close_t mrsas_close; 62 static d_read_t mrsas_read; 63 static d_write_t mrsas_write; 64 static d_ioctl_t mrsas_ioctl; 65 static d_poll_t mrsas_poll; 66 67 static void mrsas_ich_startup(void *arg); 68 static struct mrsas_mgmt_info mrsas_mgmt_info; 69 static struct mrsas_ident *mrsas_find_ident(device_t); 70 static int mrsas_setup_msix(struct mrsas_softc *sc); 71 static int mrsas_allocate_msix(struct mrsas_softc *sc); 72 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); 73 static void mrsas_flush_cache(struct mrsas_softc *sc); 74 static void mrsas_reset_reply_desc(struct mrsas_softc *sc); 75 static void mrsas_ocr_thread(void *arg); 76 static int mrsas_get_map_info(struct mrsas_softc *sc); 77 static int mrsas_get_ld_map_info(struct mrsas_softc *sc); 78 static int mrsas_sync_map_info(struct mrsas_softc *sc); 79 static int mrsas_get_pd_list(struct mrsas_softc *sc); 80 static int mrsas_get_ld_list(struct mrsas_softc *sc); 81 static int mrsas_setup_irq(struct mrsas_softc *sc); 82 static int mrsas_alloc_mem(struct mrsas_softc *sc); 83 static int mrsas_init_fw(struct mrsas_softc *sc); 84 static int mrsas_setup_raidmap(struct mrsas_softc *sc); 85 static void megasas_setup_jbod_map(struct mrsas_softc *sc); 86 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend); 87 static int mrsas_clear_intr(struct mrsas_softc *sc); 88 static int mrsas_get_ctrl_info(struct mrsas_softc *sc); 89 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc); 90 static int 91 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 92 struct mrsas_mfi_cmd *cmd_to_abort); 93 static void 94 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id); 95 static struct mrsas_softc * 96 mrsas_get_softc_instance(struct cdev *dev, 97 u_long cmd, caddr_t arg); 98 u_int32_t 99 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset); 100 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); 101 u_int8_t 102 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, 103 struct mrsas_mfi_cmd *mfi_cmd); 104 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc); 105 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); 106 int mrsas_init_adapter(struct mrsas_softc *sc); 107 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); 108 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); 109 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); 110 int mrsas_ioc_init(struct mrsas_softc *sc); 111 int mrsas_bus_scan(struct mrsas_softc *sc); 112 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 113 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 114 int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason); 115 int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason); 116 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); 117 int mrsas_reset_targets(struct mrsas_softc *sc); 118 int 119 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, 120 struct mrsas_mfi_cmd *cmd); 121 int 122 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, 123 int size); 124 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); 125 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 126 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 127 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 128 void mrsas_disable_intr(struct mrsas_softc *sc); 129 void mrsas_enable_intr(struct mrsas_softc *sc); 130 void mrsas_free_ioc_cmd(struct mrsas_softc *sc); 131 void mrsas_free_mem(struct mrsas_softc *sc); 132 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); 133 void mrsas_isr(void *arg); 134 void mrsas_teardown_intr(struct mrsas_softc *sc); 135 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 136 void mrsas_kill_hba(struct mrsas_softc *sc); 137 void mrsas_aen_handler(struct mrsas_softc *sc); 138 void 139 mrsas_write_reg(struct mrsas_softc *sc, int offset, 140 u_int32_t value); 141 void 142 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 143 u_int32_t req_desc_hi); 144 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); 145 void 146 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, 147 struct mrsas_mfi_cmd *cmd, u_int8_t status); 148 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); 149 150 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd 151 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 152 153 extern int mrsas_cam_attach(struct mrsas_softc *sc); 154 extern void mrsas_cam_detach(struct mrsas_softc *sc); 155 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 156 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 157 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); 158 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); 159 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); 160 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 161 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 162 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 163 extern void mrsas_xpt_freeze(struct mrsas_softc *sc); 164 extern void mrsas_xpt_release(struct mrsas_softc *sc); 165 extern MRSAS_REQUEST_DESCRIPTOR_UNION * 166 mrsas_get_request_desc(struct mrsas_softc *sc, 167 u_int16_t index); 168 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 169 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); 170 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); 171 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); 172 173 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, 174 union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus, 175 u_int32_t data_length, u_int8_t *sense); 176 void 177 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo, 178 u_int32_t req_desc_hi); 179 180 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 181 "MRSAS Driver Parameters"); 182 183 /* 184 * PCI device struct and table 185 * 186 */ 187 typedef struct mrsas_ident { 188 uint16_t vendor; 189 uint16_t device; 190 uint16_t subvendor; 191 uint16_t subdevice; 192 const char *desc; 193 } MRSAS_CTLR_ID; 194 195 MRSAS_CTLR_ID device_table[] = { 196 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"}, 197 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"}, 198 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"}, 199 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"}, 200 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"}, 201 {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"}, 202 {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"}, 203 {0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"}, 204 {0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"}, 205 {0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"}, 206 {0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"}, 207 {0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"}, 208 {0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"}, 209 {0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"}, 210 {0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"}, 211 {0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"}, 212 {0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"}, 213 {0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"}, 214 {0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"}, 215 {0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"}, 216 {0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"}, 217 {0, 0, 0, 0, NULL} 218 }; 219 220 /* 221 * Character device entry points 222 * 223 */ 224 static struct cdevsw mrsas_cdevsw = { 225 .d_version = D_VERSION, 226 .d_open = mrsas_open, 227 .d_close = mrsas_close, 228 .d_read = mrsas_read, 229 .d_write = mrsas_write, 230 .d_ioctl = mrsas_ioctl, 231 .d_poll = mrsas_poll, 232 .d_name = "mrsas", 233 }; 234 235 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); 236 237 /* 238 * In the cdevsw routines, we find our softc by using the si_drv1 member of 239 * struct cdev. We set this variable to point to our softc in our attach 240 * routine when we create the /dev entry. 241 */ 242 int 243 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 244 { 245 struct mrsas_softc *sc; 246 247 sc = dev->si_drv1; 248 return (0); 249 } 250 251 int 252 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 253 { 254 struct mrsas_softc *sc; 255 256 sc = dev->si_drv1; 257 return (0); 258 } 259 260 int 261 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag) 262 { 263 struct mrsas_softc *sc; 264 265 sc = dev->si_drv1; 266 return (0); 267 } 268 int 269 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag) 270 { 271 struct mrsas_softc *sc; 272 273 sc = dev->si_drv1; 274 return (0); 275 } 276 277 u_int32_t 278 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset) 279 { 280 u_int32_t i = 0, ret_val; 281 282 if (sc->is_aero) { 283 do { 284 ret_val = mrsas_read_reg(sc, offset); 285 i++; 286 } while(ret_val == 0 && i < 3); 287 } else 288 ret_val = mrsas_read_reg(sc, offset); 289 290 return ret_val; 291 } 292 293 /* 294 * Register Read/Write Functions 295 * 296 */ 297 void 298 mrsas_write_reg(struct mrsas_softc *sc, int offset, 299 u_int32_t value) 300 { 301 bus_space_tag_t bus_tag = sc->bus_tag; 302 bus_space_handle_t bus_handle = sc->bus_handle; 303 304 bus_space_write_4(bus_tag, bus_handle, offset, value); 305 } 306 307 u_int32_t 308 mrsas_read_reg(struct mrsas_softc *sc, int offset) 309 { 310 bus_space_tag_t bus_tag = sc->bus_tag; 311 bus_space_handle_t bus_handle = sc->bus_handle; 312 313 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); 314 } 315 316 /* 317 * Interrupt Disable/Enable/Clear Functions 318 * 319 */ 320 void 321 mrsas_disable_intr(struct mrsas_softc *sc) 322 { 323 u_int32_t mask = 0xFFFFFFFF; 324 u_int32_t status; 325 326 sc->mask_interrupts = 1; 327 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); 328 /* Dummy read to force pci flush */ 329 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 330 } 331 332 void 333 mrsas_enable_intr(struct mrsas_softc *sc) 334 { 335 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; 336 u_int32_t status; 337 338 sc->mask_interrupts = 0; 339 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); 340 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 341 342 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); 343 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 344 } 345 346 static int 347 mrsas_clear_intr(struct mrsas_softc *sc) 348 { 349 u_int32_t status; 350 351 /* Read received interrupt */ 352 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 353 354 /* Not our interrupt, so just return */ 355 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 356 return (0); 357 358 /* We got a reply interrupt */ 359 return (1); 360 } 361 362 /* 363 * PCI Support Functions 364 * 365 */ 366 static struct mrsas_ident * 367 mrsas_find_ident(device_t dev) 368 { 369 struct mrsas_ident *pci_device; 370 371 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) { 372 if ((pci_device->vendor == pci_get_vendor(dev)) && 373 (pci_device->device == pci_get_device(dev)) && 374 ((pci_device->subvendor == pci_get_subvendor(dev)) || 375 (pci_device->subvendor == 0xffff)) && 376 ((pci_device->subdevice == pci_get_subdevice(dev)) || 377 (pci_device->subdevice == 0xffff))) 378 return (pci_device); 379 } 380 return (NULL); 381 } 382 383 static int 384 mrsas_probe(device_t dev) 385 { 386 static u_int8_t first_ctrl = 1; 387 struct mrsas_ident *id; 388 389 if ((id = mrsas_find_ident(dev)) != NULL) { 390 if (first_ctrl) { 391 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n", 392 MRSAS_VERSION); 393 first_ctrl = 0; 394 } 395 device_set_desc(dev, id->desc); 396 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */ 397 return (-30); 398 } 399 return (ENXIO); 400 } 401 402 /* 403 * mrsas_setup_sysctl: setup sysctl values for mrsas 404 * input: Adapter instance soft state 405 * 406 * Setup sysctl entries for mrsas driver. 407 */ 408 static void 409 mrsas_setup_sysctl(struct mrsas_softc *sc) 410 { 411 struct sysctl_ctx_list *sysctl_ctx = NULL; 412 struct sysctl_oid *sysctl_tree = NULL; 413 char tmpstr[80], tmpstr2[80]; 414 415 /* 416 * Setup the sysctl variable so the user can change the debug level 417 * on the fly. 418 */ 419 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", 420 device_get_unit(sc->mrsas_dev)); 421 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev)); 422 423 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); 424 if (sysctl_ctx != NULL) 425 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); 426 427 if (sysctl_tree == NULL) { 428 sysctl_ctx_init(&sc->sysctl_ctx); 429 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 430 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2, 431 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr); 432 if (sc->sysctl_tree == NULL) 433 return; 434 sysctl_ctx = &sc->sysctl_ctx; 435 sysctl_tree = sc->sysctl_tree; 436 } 437 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 438 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, 439 "Disable the use of OCR"); 440 441 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 442 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, 443 strlen(MRSAS_VERSION), "driver version"); 444 445 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 446 OID_AUTO, "reset_count", CTLFLAG_RD, 447 &sc->reset_count, 0, "number of ocr from start of the day"); 448 449 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 450 OID_AUTO, "fw_outstanding", CTLFLAG_RD, 451 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands"); 452 453 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 454 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 455 &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); 456 457 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 458 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, 459 "Driver debug level"); 460 461 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 462 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 463 0, "Driver IO timeout value in mili-second."); 464 465 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 466 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, 467 &sc->mrsas_fw_fault_check_delay, 468 0, "FW fault check thread delay in seconds. <default is 1 sec>"); 469 470 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 471 OID_AUTO, "reset_in_progress", CTLFLAG_RD, 472 &sc->reset_in_progress, 0, "ocr in progress status"); 473 474 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 475 OID_AUTO, "block_sync_cache", CTLFLAG_RW, 476 &sc->block_sync_cache, 0, 477 "Block SYNC CACHE at driver. <default: 0, send it to FW>"); 478 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 479 OID_AUTO, "stream detection", CTLFLAG_RW, 480 &sc->drv_stream_detection, 0, 481 "Disable/Enable Stream detection. <default: 1, Enable Stream Detection>"); 482 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 483 OID_AUTO, "prp_count", CTLFLAG_RD, 484 &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built"); 485 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 486 OID_AUTO, "SGE holes", CTLFLAG_RD, 487 &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs"); 488 } 489 490 /* 491 * mrsas_get_tunables: get tunable parameters. 492 * input: Adapter instance soft state 493 * 494 * Get tunable parameters. This will help to debug driver at boot time. 495 */ 496 static void 497 mrsas_get_tunables(struct mrsas_softc *sc) 498 { 499 char tmpstr[80]; 500 501 /* XXX default to some debugging for now */ 502 sc->mrsas_debug = 503 (MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN); 504 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; 505 sc->mrsas_fw_fault_check_delay = 1; 506 sc->reset_count = 0; 507 sc->reset_in_progress = 0; 508 sc->block_sync_cache = 0; 509 sc->drv_stream_detection = 1; 510 511 /* 512 * Grab the global variables. 513 */ 514 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); 515 516 /* 517 * Grab the global variables. 518 */ 519 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds); 520 521 /* Grab the unit-instance variables */ 522 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", 523 device_get_unit(sc->mrsas_dev)); 524 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); 525 } 526 527 /* 528 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. 529 * Used to get sequence number at driver load time. 530 * input: Adapter soft state 531 * 532 * Allocates DMAable memory for the event log info internal command. 533 */ 534 int 535 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) 536 { 537 int el_info_size; 538 539 /* Allocate get event log info command */ 540 el_info_size = sizeof(struct mrsas_evt_log_info); 541 if (bus_dma_tag_create(sc->mrsas_parent_tag, 542 1, 0, 543 BUS_SPACE_MAXADDR_32BIT, 544 BUS_SPACE_MAXADDR, 545 NULL, NULL, 546 el_info_size, 547 1, 548 el_info_size, 549 BUS_DMA_ALLOCNOW, 550 NULL, NULL, 551 &sc->el_info_tag)) { 552 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); 553 return (ENOMEM); 554 } 555 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, 556 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { 557 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); 558 return (ENOMEM); 559 } 560 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, 561 sc->el_info_mem, el_info_size, mrsas_addr_cb, 562 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { 563 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); 564 return (ENOMEM); 565 } 566 memset(sc->el_info_mem, 0, el_info_size); 567 return (0); 568 } 569 570 /* 571 * mrsas_free_evt_info_cmd: Free memory for Event log info command 572 * input: Adapter soft state 573 * 574 * Deallocates memory for the event log info internal command. 575 */ 576 void 577 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) 578 { 579 if (sc->el_info_phys_addr) 580 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); 581 if (sc->el_info_mem != NULL) 582 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); 583 if (sc->el_info_tag != NULL) 584 bus_dma_tag_destroy(sc->el_info_tag); 585 } 586 587 /* 588 * mrsas_get_seq_num: Get latest event sequence number 589 * @sc: Adapter soft state 590 * @eli: Firmware event log sequence number information. 591 * 592 * Firmware maintains a log of all events in a non-volatile area. 593 * Driver get the sequence number using DCMD 594 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. 595 */ 596 597 static int 598 mrsas_get_seq_num(struct mrsas_softc *sc, 599 struct mrsas_evt_log_info *eli) 600 { 601 struct mrsas_mfi_cmd *cmd; 602 struct mrsas_dcmd_frame *dcmd; 603 u_int8_t do_ocr = 1, retcode = 0; 604 605 cmd = mrsas_get_mfi_cmd(sc); 606 607 if (!cmd) { 608 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 609 return -ENOMEM; 610 } 611 dcmd = &cmd->frame->dcmd; 612 613 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { 614 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); 615 mrsas_release_mfi_cmd(cmd); 616 return -ENOMEM; 617 } 618 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 619 620 dcmd->cmd = MFI_CMD_DCMD; 621 dcmd->cmd_status = 0x0; 622 dcmd->sge_count = 1; 623 dcmd->flags = htole16(MFI_FRAME_DIR_READ); 624 dcmd->timeout = 0; 625 dcmd->pad_0 = 0; 626 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_log_info)); 627 dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_GET_INFO); 628 dcmd->sgl.sge32[0].phys_addr = htole32(sc->el_info_phys_addr & 0xFFFFFFFF); 629 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_log_info)); 630 631 retcode = mrsas_issue_blocked_cmd(sc, cmd); 632 if (retcode == ETIMEDOUT) 633 goto dcmd_timeout; 634 635 do_ocr = 0; 636 /* 637 * Copy the data back into callers buffer 638 */ 639 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); 640 mrsas_free_evt_log_info_cmd(sc); 641 642 dcmd_timeout: 643 if (do_ocr) 644 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 645 else 646 mrsas_release_mfi_cmd(cmd); 647 648 return retcode; 649 } 650 651 /* 652 * mrsas_register_aen: Register for asynchronous event notification 653 * @sc: Adapter soft state 654 * @seq_num: Starting sequence number 655 * @class_locale: Class of the event 656 * 657 * This function subscribes for events beyond the @seq_num 658 * and type @class_locale. 659 * 660 */ 661 static int 662 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, 663 u_int32_t class_locale_word) 664 { 665 int ret_val; 666 struct mrsas_mfi_cmd *cmd; 667 struct mrsas_dcmd_frame *dcmd; 668 union mrsas_evt_class_locale curr_aen; 669 union mrsas_evt_class_locale prev_aen; 670 671 /* 672 * If there an AEN pending already (aen_cmd), check if the 673 * class_locale of that pending AEN is inclusive of the new AEN 674 * request we currently have. If it is, then we don't have to do 675 * anything. In other words, whichever events the current AEN request 676 * is subscribing to, have already been subscribed to. If the old_cmd 677 * is _not_ inclusive, then we have to abort that command, form a 678 * class_locale that is superset of both old and current and re-issue 679 * to the FW 680 */ 681 682 curr_aen.word = class_locale_word; 683 684 if (sc->aen_cmd) { 685 prev_aen.word = le32toh(sc->aen_cmd->frame->dcmd.mbox.w[1]); 686 687 /* 688 * A class whose enum value is smaller is inclusive of all 689 * higher values. If a PROGRESS (= -1) was previously 690 * registered, then a new registration requests for higher 691 * classes need not be sent to FW. They are automatically 692 * included. Locale numbers don't have such hierarchy. They 693 * are bitmap values 694 */ 695 if ((prev_aen.members.class <= curr_aen.members.class) && 696 !((prev_aen.members.locale & curr_aen.members.locale) ^ 697 curr_aen.members.locale)) { 698 /* 699 * Previously issued event registration includes 700 * current request. Nothing to do. 701 */ 702 return 0; 703 } else { 704 curr_aen.members.locale |= prev_aen.members.locale; 705 706 if (prev_aen.members.class < curr_aen.members.class) 707 curr_aen.members.class = prev_aen.members.class; 708 709 sc->aen_cmd->abort_aen = 1; 710 ret_val = mrsas_issue_blocked_abort_cmd(sc, 711 sc->aen_cmd); 712 713 if (ret_val) { 714 printf("mrsas: Failed to abort previous AEN command\n"); 715 return ret_val; 716 } else 717 sc->aen_cmd = NULL; 718 } 719 } 720 cmd = mrsas_get_mfi_cmd(sc); 721 if (!cmd) 722 return ENOMEM; 723 724 dcmd = &cmd->frame->dcmd; 725 726 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); 727 728 /* 729 * Prepare DCMD for aen registration 730 */ 731 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 732 733 dcmd->cmd = MFI_CMD_DCMD; 734 dcmd->cmd_status = 0x0; 735 dcmd->sge_count = 1; 736 dcmd->flags = htole16(MFI_FRAME_DIR_READ); 737 dcmd->timeout = 0; 738 dcmd->pad_0 = 0; 739 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_detail)); 740 dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT); 741 dcmd->mbox.w[0] = htole32(seq_num); 742 sc->last_seq_num = seq_num; 743 dcmd->mbox.w[1] = htole32(curr_aen.word); 744 dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->evt_detail_phys_addr & 0xFFFFFFFF); 745 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_detail)); 746 747 if (sc->aen_cmd != NULL) { 748 mrsas_release_mfi_cmd(cmd); 749 return 0; 750 } 751 /* 752 * Store reference to the cmd used to register for AEN. When an 753 * application wants us to register for AEN, we have to abort this 754 * cmd and re-register with a new EVENT LOCALE supplied by that app 755 */ 756 sc->aen_cmd = cmd; 757 758 /* 759 * Issue the aen registration frame 760 */ 761 if (mrsas_issue_dcmd(sc, cmd)) { 762 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); 763 return (1); 764 } 765 return 0; 766 } 767 768 /* 769 * mrsas_start_aen: Subscribes to AEN during driver load time 770 * @instance: Adapter soft state 771 */ 772 static int 773 mrsas_start_aen(struct mrsas_softc *sc) 774 { 775 struct mrsas_evt_log_info eli; 776 union mrsas_evt_class_locale class_locale; 777 778 /* Get the latest sequence number from FW */ 779 780 memset(&eli, 0, sizeof(eli)); 781 782 if (mrsas_get_seq_num(sc, &eli)) 783 return -1; 784 785 /* Register AEN with FW for latest sequence number plus 1 */ 786 class_locale.members.reserved = 0; 787 class_locale.members.locale = MR_EVT_LOCALE_ALL; 788 class_locale.members.class = MR_EVT_CLASS_DEBUG; 789 790 return mrsas_register_aen(sc, eli.newest_seq_num + 1, 791 class_locale.word); 792 793 } 794 795 /* 796 * mrsas_setup_msix: Allocate MSI-x vectors 797 * @sc: adapter soft state 798 */ 799 static int 800 mrsas_setup_msix(struct mrsas_softc *sc) 801 { 802 int i; 803 804 for (i = 0; i < sc->msix_vectors; i++) { 805 sc->irq_context[i].sc = sc; 806 sc->irq_context[i].MSIxIndex = i; 807 sc->irq_id[i] = i + 1; 808 sc->mrsas_irq[i] = bus_alloc_resource_any 809 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i] 810 ,RF_ACTIVE); 811 if (sc->mrsas_irq[i] == NULL) { 812 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n"); 813 goto irq_alloc_failed; 814 } 815 if (bus_setup_intr(sc->mrsas_dev, 816 sc->mrsas_irq[i], 817 INTR_MPSAFE | INTR_TYPE_CAM, 818 NULL, mrsas_isr, &sc->irq_context[i], 819 &sc->intr_handle[i])) { 820 device_printf(sc->mrsas_dev, 821 "Cannot set up MSI-x interrupt handler\n"); 822 goto irq_alloc_failed; 823 } 824 } 825 return SUCCESS; 826 827 irq_alloc_failed: 828 mrsas_teardown_intr(sc); 829 return (FAIL); 830 } 831 832 /* 833 * mrsas_allocate_msix: Setup MSI-x vectors 834 * @sc: adapter soft state 835 */ 836 static int 837 mrsas_allocate_msix(struct mrsas_softc *sc) 838 { 839 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) { 840 device_printf(sc->mrsas_dev, "Using MSI-X with %d number" 841 " of vectors\n", sc->msix_vectors); 842 } else { 843 device_printf(sc->mrsas_dev, "MSI-x setup failed\n"); 844 goto irq_alloc_failed; 845 } 846 return SUCCESS; 847 848 irq_alloc_failed: 849 mrsas_teardown_intr(sc); 850 return (FAIL); 851 } 852 853 /* 854 * mrsas_attach: PCI entry point 855 * input: pointer to device struct 856 * 857 * Performs setup of PCI and registers, initializes mutexes and linked lists, 858 * registers interrupts and CAM, and initializes the adapter/controller to 859 * its proper state. 860 */ 861 static int 862 mrsas_attach(device_t dev) 863 { 864 struct mrsas_softc *sc = device_get_softc(dev); 865 uint32_t cmd, error; 866 867 memset(sc, 0, sizeof(struct mrsas_softc)); 868 869 /* Look up our softc and initialize its fields. */ 870 sc->mrsas_dev = dev; 871 sc->device_id = pci_get_device(dev); 872 873 switch (sc->device_id) { 874 case MRSAS_INVADER: 875 case MRSAS_FURY: 876 case MRSAS_INTRUDER: 877 case MRSAS_INTRUDER_24: 878 case MRSAS_CUTLASS_52: 879 case MRSAS_CUTLASS_53: 880 sc->mrsas_gen3_ctrl = 1; 881 break; 882 case MRSAS_VENTURA: 883 case MRSAS_CRUSADER: 884 case MRSAS_HARPOON: 885 case MRSAS_TOMCAT: 886 case MRSAS_VENTURA_4PORT: 887 case MRSAS_CRUSADER_4PORT: 888 sc->is_ventura = true; 889 break; 890 case MRSAS_AERO_10E1: 891 case MRSAS_AERO_10E5: 892 device_printf(dev, "Adapter is in configurable secure mode\n"); 893 case MRSAS_AERO_10E2: 894 case MRSAS_AERO_10E6: 895 sc->is_aero = true; 896 break; 897 case MRSAS_AERO_10E0: 898 case MRSAS_AERO_10E3: 899 case MRSAS_AERO_10E4: 900 case MRSAS_AERO_10E7: 901 device_printf(dev, "Adapter is in non-secure mode\n"); 902 return SUCCESS; 903 } 904 905 mrsas_get_tunables(sc); 906 907 /* 908 * Set up PCI and registers 909 */ 910 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 911 /* Force the busmaster enable bit on. */ 912 cmd |= PCIM_CMD_BUSMASTEREN; 913 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 914 915 /* For Ventura/Aero system registers are mapped to BAR0 */ 916 if (sc->is_ventura || sc->is_aero) 917 sc->reg_res_id = PCIR_BAR(0); /* BAR0 offset */ 918 else 919 sc->reg_res_id = PCIR_BAR(1); /* BAR1 offset */ 920 921 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 922 &(sc->reg_res_id), RF_ACTIVE)) 923 == NULL) { 924 device_printf(dev, "Cannot allocate PCI registers\n"); 925 goto attach_fail; 926 } 927 sc->bus_tag = rman_get_bustag(sc->reg_res); 928 sc->bus_handle = rman_get_bushandle(sc->reg_res); 929 930 /* Intialize mutexes */ 931 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF); 932 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF); 933 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF); 934 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF); 935 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN); 936 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF); 937 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF); 938 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF); 939 mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF); 940 941 /* Intialize linked list */ 942 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); 943 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); 944 945 mrsas_atomic_set(&sc->fw_outstanding, 0); 946 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 947 mrsas_atomic_set(&sc->prp_count, 0); 948 mrsas_atomic_set(&sc->sge_holes, 0); 949 950 sc->io_cmds_highwater = 0; 951 952 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 953 sc->UnevenSpanSupport = 0; 954 955 sc->msix_enable = 0; 956 957 /* Initialize Firmware */ 958 if (mrsas_init_fw(sc) != SUCCESS) { 959 goto attach_fail_fw; 960 } 961 /* Register mrsas to CAM layer */ 962 if ((mrsas_cam_attach(sc) != SUCCESS)) { 963 goto attach_fail_cam; 964 } 965 /* Register IRQs */ 966 if (mrsas_setup_irq(sc) != SUCCESS) { 967 goto attach_fail_irq; 968 } 969 error = mrsas_kproc_create(mrsas_ocr_thread, sc, 970 &sc->ocr_thread, 0, 0, "mrsas_ocr%d", 971 device_get_unit(sc->mrsas_dev)); 972 if (error) { 973 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error); 974 goto attach_fail_ocr_thread; 975 } 976 /* 977 * After FW initialization and OCR thread creation 978 * we will defer the cdev creation, AEN setup on ICH callback 979 */ 980 sc->mrsas_ich.ich_func = mrsas_ich_startup; 981 sc->mrsas_ich.ich_arg = sc; 982 if (config_intrhook_establish(&sc->mrsas_ich) != 0) { 983 device_printf(sc->mrsas_dev, "Config hook is already established\n"); 984 } 985 mrsas_setup_sysctl(sc); 986 return SUCCESS; 987 988 attach_fail_ocr_thread: 989 if (sc->ocr_thread_active) 990 wakeup(&sc->ocr_chan); 991 attach_fail_irq: 992 mrsas_teardown_intr(sc); 993 attach_fail_cam: 994 mrsas_cam_detach(sc); 995 attach_fail_fw: 996 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */ 997 if (sc->msix_enable == 1) 998 pci_release_msi(sc->mrsas_dev); 999 mrsas_free_mem(sc); 1000 mtx_destroy(&sc->sim_lock); 1001 mtx_destroy(&sc->aen_lock); 1002 mtx_destroy(&sc->pci_lock); 1003 mtx_destroy(&sc->io_lock); 1004 mtx_destroy(&sc->ioctl_lock); 1005 mtx_destroy(&sc->mpt_cmd_pool_lock); 1006 mtx_destroy(&sc->mfi_cmd_pool_lock); 1007 mtx_destroy(&sc->raidmap_lock); 1008 mtx_destroy(&sc->stream_lock); 1009 attach_fail: 1010 if (sc->reg_res) { 1011 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, 1012 sc->reg_res_id, sc->reg_res); 1013 } 1014 return (ENXIO); 1015 } 1016 1017 /* 1018 * Interrupt config hook 1019 */ 1020 static void 1021 mrsas_ich_startup(void *arg) 1022 { 1023 int i = 0; 1024 struct mrsas_softc *sc = (struct mrsas_softc *)arg; 1025 1026 /* 1027 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs 1028 */ 1029 sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS, 1030 IOCTL_SEMA_DESCRIPTION); 1031 1032 /* Create a /dev entry for mrsas controller. */ 1033 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT, 1034 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", 1035 device_get_unit(sc->mrsas_dev)); 1036 1037 if (device_get_unit(sc->mrsas_dev) == 0) { 1038 make_dev_alias_p(MAKEDEV_CHECKNAME, 1039 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev, 1040 "megaraid_sas_ioctl_node"); 1041 } 1042 if (sc->mrsas_cdev) 1043 sc->mrsas_cdev->si_drv1 = sc; 1044 1045 /* 1046 * Add this controller to mrsas_mgmt_info structure so that it can be 1047 * exported to management applications 1048 */ 1049 if (device_get_unit(sc->mrsas_dev) == 0) 1050 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info)); 1051 1052 mrsas_mgmt_info.count++; 1053 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc; 1054 mrsas_mgmt_info.max_index++; 1055 1056 /* Enable Interrupts */ 1057 mrsas_enable_intr(sc); 1058 1059 /* Call DCMD get_pd_info for all system PDs */ 1060 for (i = 0; i < MRSAS_MAX_PD; i++) { 1061 if ((sc->target_list[i].target_id != 0xffff) && 1062 sc->pd_info_mem) 1063 mrsas_get_pd_info(sc, sc->target_list[i].target_id); 1064 } 1065 1066 /* Initiate AEN (Asynchronous Event Notification) */ 1067 if (mrsas_start_aen(sc)) { 1068 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! " 1069 "Further events from the controller will not be communicated.\n" 1070 "Either there is some problem in the controller" 1071 "or the controller does not support AEN.\n" 1072 "Please contact to the SUPPORT TEAM if the problem persists\n"); 1073 } 1074 if (sc->mrsas_ich.ich_arg != NULL) { 1075 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n"); 1076 config_intrhook_disestablish(&sc->mrsas_ich); 1077 sc->mrsas_ich.ich_arg = NULL; 1078 } 1079 } 1080 1081 /* 1082 * mrsas_detach: De-allocates and teardown resources 1083 * input: pointer to device struct 1084 * 1085 * This function is the entry point for device disconnect and detach. 1086 * It performs memory de-allocations, shutdown of the controller and various 1087 * teardown and destroy resource functions. 1088 */ 1089 static int 1090 mrsas_detach(device_t dev) 1091 { 1092 struct mrsas_softc *sc; 1093 int i = 0; 1094 1095 sc = device_get_softc(dev); 1096 sc->remove_in_progress = 1; 1097 1098 /* Destroy the character device so no other IOCTL will be handled */ 1099 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev) 1100 destroy_dev(sc->mrsas_linux_emulator_cdev); 1101 destroy_dev(sc->mrsas_cdev); 1102 1103 /* 1104 * Take the instance off the instance array. Note that we will not 1105 * decrement the max_index. We let this array be sparse array 1106 */ 1107 for (i = 0; i < mrsas_mgmt_info.max_index; i++) { 1108 if (mrsas_mgmt_info.sc_ptr[i] == sc) { 1109 mrsas_mgmt_info.count--; 1110 mrsas_mgmt_info.sc_ptr[i] = NULL; 1111 break; 1112 } 1113 } 1114 1115 if (sc->ocr_thread_active) 1116 wakeup(&sc->ocr_chan); 1117 while (sc->reset_in_progress) { 1118 i++; 1119 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1120 mrsas_dprint(sc, MRSAS_INFO, 1121 "[%2d]waiting for OCR to be finished from %s\n", i, __func__); 1122 } 1123 pause("mr_shutdown", hz); 1124 } 1125 i = 0; 1126 while (sc->ocr_thread_active) { 1127 i++; 1128 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1129 mrsas_dprint(sc, MRSAS_INFO, 1130 "[%2d]waiting for " 1131 "mrsas_ocr thread to quit ocr %d\n", i, 1132 sc->ocr_thread_active); 1133 } 1134 pause("mr_shutdown", hz); 1135 } 1136 mrsas_flush_cache(sc); 1137 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); 1138 mrsas_disable_intr(sc); 1139 1140 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) { 1141 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 1142 free(sc->streamDetectByLD[i], M_MRSAS); 1143 free(sc->streamDetectByLD, M_MRSAS); 1144 sc->streamDetectByLD = NULL; 1145 } 1146 1147 mrsas_cam_detach(sc); 1148 mrsas_teardown_intr(sc); 1149 mrsas_free_mem(sc); 1150 mtx_destroy(&sc->sim_lock); 1151 mtx_destroy(&sc->aen_lock); 1152 mtx_destroy(&sc->pci_lock); 1153 mtx_destroy(&sc->io_lock); 1154 mtx_destroy(&sc->ioctl_lock); 1155 mtx_destroy(&sc->mpt_cmd_pool_lock); 1156 mtx_destroy(&sc->mfi_cmd_pool_lock); 1157 mtx_destroy(&sc->raidmap_lock); 1158 mtx_destroy(&sc->stream_lock); 1159 1160 /* Wait for all the semaphores to be released */ 1161 while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS) 1162 pause("mr_shutdown", hz); 1163 1164 /* Destroy the counting semaphore created for Ioctl */ 1165 sema_destroy(&sc->ioctl_count_sema); 1166 1167 if (sc->reg_res) { 1168 bus_release_resource(sc->mrsas_dev, 1169 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); 1170 } 1171 if (sc->sysctl_tree != NULL) 1172 sysctl_ctx_free(&sc->sysctl_ctx); 1173 1174 return (0); 1175 } 1176 1177 static int 1178 mrsas_shutdown(device_t dev) 1179 { 1180 struct mrsas_softc *sc; 1181 int i; 1182 1183 sc = device_get_softc(dev); 1184 sc->remove_in_progress = 1; 1185 if (!KERNEL_PANICKED()) { 1186 if (sc->ocr_thread_active) 1187 wakeup(&sc->ocr_chan); 1188 i = 0; 1189 while (sc->reset_in_progress && i < 15) { 1190 i++; 1191 if ((i % MRSAS_RESET_NOTICE_INTERVAL) == 0) { 1192 mrsas_dprint(sc, MRSAS_INFO, 1193 "[%2d]waiting for OCR to be finished " 1194 "from %s\n", i, __func__); 1195 } 1196 pause("mr_shutdown", hz); 1197 } 1198 if (sc->reset_in_progress) { 1199 mrsas_dprint(sc, MRSAS_INFO, 1200 "gave up waiting for OCR to be finished\n"); 1201 } 1202 } 1203 1204 mrsas_flush_cache(sc); 1205 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); 1206 mrsas_disable_intr(sc); 1207 return (0); 1208 } 1209 1210 /* 1211 * mrsas_free_mem: Frees allocated memory 1212 * input: Adapter instance soft state 1213 * 1214 * This function is called from mrsas_detach() to free previously allocated 1215 * memory. 1216 */ 1217 void 1218 mrsas_free_mem(struct mrsas_softc *sc) 1219 { 1220 int i; 1221 u_int32_t max_fw_cmds; 1222 struct mrsas_mfi_cmd *mfi_cmd; 1223 struct mrsas_mpt_cmd *mpt_cmd; 1224 1225 /* 1226 * Free RAID map memory 1227 */ 1228 for (i = 0; i < 2; i++) { 1229 if (sc->raidmap_phys_addr[i]) 1230 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); 1231 if (sc->raidmap_mem[i] != NULL) 1232 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); 1233 if (sc->raidmap_tag[i] != NULL) 1234 bus_dma_tag_destroy(sc->raidmap_tag[i]); 1235 1236 if (sc->ld_drv_map[i] != NULL) 1237 free(sc->ld_drv_map[i], M_MRSAS); 1238 } 1239 for (i = 0; i < 2; i++) { 1240 if (sc->jbodmap_phys_addr[i]) 1241 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]); 1242 if (sc->jbodmap_mem[i] != NULL) 1243 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]); 1244 if (sc->jbodmap_tag[i] != NULL) 1245 bus_dma_tag_destroy(sc->jbodmap_tag[i]); 1246 } 1247 /* 1248 * Free version buffer memory 1249 */ 1250 if (sc->verbuf_phys_addr) 1251 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); 1252 if (sc->verbuf_mem != NULL) 1253 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); 1254 if (sc->verbuf_tag != NULL) 1255 bus_dma_tag_destroy(sc->verbuf_tag); 1256 1257 /* 1258 * Free sense buffer memory 1259 */ 1260 if (sc->sense_phys_addr) 1261 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); 1262 if (sc->sense_mem != NULL) 1263 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); 1264 if (sc->sense_tag != NULL) 1265 bus_dma_tag_destroy(sc->sense_tag); 1266 1267 /* 1268 * Free chain frame memory 1269 */ 1270 if (sc->chain_frame_phys_addr) 1271 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); 1272 if (sc->chain_frame_mem != NULL) 1273 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); 1274 if (sc->chain_frame_tag != NULL) 1275 bus_dma_tag_destroy(sc->chain_frame_tag); 1276 1277 /* 1278 * Free IO Request memory 1279 */ 1280 if (sc->io_request_phys_addr) 1281 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); 1282 if (sc->io_request_mem != NULL) 1283 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); 1284 if (sc->io_request_tag != NULL) 1285 bus_dma_tag_destroy(sc->io_request_tag); 1286 1287 /* 1288 * Free Reply Descriptor memory 1289 */ 1290 if (sc->reply_desc_phys_addr) 1291 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); 1292 if (sc->reply_desc_mem != NULL) 1293 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); 1294 if (sc->reply_desc_tag != NULL) 1295 bus_dma_tag_destroy(sc->reply_desc_tag); 1296 1297 /* 1298 * Free event detail memory 1299 */ 1300 if (sc->evt_detail_phys_addr) 1301 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); 1302 if (sc->evt_detail_mem != NULL) 1303 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); 1304 if (sc->evt_detail_tag != NULL) 1305 bus_dma_tag_destroy(sc->evt_detail_tag); 1306 1307 /* 1308 * Free PD info memory 1309 */ 1310 if (sc->pd_info_phys_addr) 1311 bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap); 1312 if (sc->pd_info_mem != NULL) 1313 bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap); 1314 if (sc->pd_info_tag != NULL) 1315 bus_dma_tag_destroy(sc->pd_info_tag); 1316 1317 /* 1318 * Free MFI frames 1319 */ 1320 if (sc->mfi_cmd_list) { 1321 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1322 mfi_cmd = sc->mfi_cmd_list[i]; 1323 mrsas_free_frame(sc, mfi_cmd); 1324 } 1325 } 1326 if (sc->mficmd_frame_tag != NULL) 1327 bus_dma_tag_destroy(sc->mficmd_frame_tag); 1328 1329 /* 1330 * Free MPT internal command list 1331 */ 1332 max_fw_cmds = sc->max_fw_cmds; 1333 if (sc->mpt_cmd_list) { 1334 for (i = 0; i < max_fw_cmds; i++) { 1335 mpt_cmd = sc->mpt_cmd_list[i]; 1336 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); 1337 free(sc->mpt_cmd_list[i], M_MRSAS); 1338 } 1339 free(sc->mpt_cmd_list, M_MRSAS); 1340 sc->mpt_cmd_list = NULL; 1341 } 1342 /* 1343 * Free MFI internal command list 1344 */ 1345 1346 if (sc->mfi_cmd_list) { 1347 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1348 free(sc->mfi_cmd_list[i], M_MRSAS); 1349 } 1350 free(sc->mfi_cmd_list, M_MRSAS); 1351 sc->mfi_cmd_list = NULL; 1352 } 1353 /* 1354 * Free request descriptor memory 1355 */ 1356 free(sc->req_desc, M_MRSAS); 1357 sc->req_desc = NULL; 1358 1359 /* 1360 * Destroy parent tag 1361 */ 1362 if (sc->mrsas_parent_tag != NULL) 1363 bus_dma_tag_destroy(sc->mrsas_parent_tag); 1364 1365 /* 1366 * Free ctrl_info memory 1367 */ 1368 if (sc->ctrl_info != NULL) 1369 free(sc->ctrl_info, M_MRSAS); 1370 } 1371 1372 /* 1373 * mrsas_teardown_intr: Teardown interrupt 1374 * input: Adapter instance soft state 1375 * 1376 * This function is called from mrsas_detach() to teardown and release bus 1377 * interrupt resourse. 1378 */ 1379 void 1380 mrsas_teardown_intr(struct mrsas_softc *sc) 1381 { 1382 int i; 1383 1384 if (!sc->msix_enable) { 1385 if (sc->intr_handle[0]) 1386 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]); 1387 if (sc->mrsas_irq[0] != NULL) 1388 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1389 sc->irq_id[0], sc->mrsas_irq[0]); 1390 sc->intr_handle[0] = NULL; 1391 } else { 1392 for (i = 0; i < sc->msix_vectors; i++) { 1393 if (sc->intr_handle[i]) 1394 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i], 1395 sc->intr_handle[i]); 1396 1397 if (sc->mrsas_irq[i] != NULL) 1398 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1399 sc->irq_id[i], sc->mrsas_irq[i]); 1400 1401 sc->intr_handle[i] = NULL; 1402 } 1403 pci_release_msi(sc->mrsas_dev); 1404 } 1405 1406 } 1407 1408 /* 1409 * mrsas_suspend: Suspend entry point 1410 * input: Device struct pointer 1411 * 1412 * This function is the entry point for system suspend from the OS. 1413 */ 1414 static int 1415 mrsas_suspend(device_t dev) 1416 { 1417 /* This will be filled when the driver will have hibernation support */ 1418 return (0); 1419 } 1420 1421 /* 1422 * mrsas_resume: Resume entry point 1423 * input: Device struct pointer 1424 * 1425 * This function is the entry point for system resume from the OS. 1426 */ 1427 static int 1428 mrsas_resume(device_t dev) 1429 { 1430 /* This will be filled when the driver will have hibernation support */ 1431 return (0); 1432 } 1433 1434 /** 1435 * mrsas_get_softc_instance: Find softc instance based on cmd type 1436 * 1437 * This function will return softc instance based on cmd type. 1438 * In some case, application fire ioctl on required management instance and 1439 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those 1440 * case, else get the softc instance from host_no provided by application in 1441 * user data. 1442 */ 1443 1444 static struct mrsas_softc * 1445 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg) 1446 { 1447 struct mrsas_softc *sc = NULL; 1448 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; 1449 1450 if (cmd == MRSAS_IOC_GET_PCI_INFO) { 1451 sc = dev->si_drv1; 1452 } else { 1453 /* 1454 * get the Host number & the softc from data sent by the 1455 * Application 1456 */ 1457 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no]; 1458 if (sc == NULL) 1459 printf("There is no Controller number %d\n", 1460 user_ioc->host_no); 1461 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index) 1462 mrsas_dprint(sc, MRSAS_FAULT, 1463 "Invalid Controller number %d\n", user_ioc->host_no); 1464 } 1465 1466 return sc; 1467 } 1468 1469 /* 1470 * mrsas_ioctl: IOCtl commands entry point. 1471 * 1472 * This function is the entry point for IOCtls from the OS. It calls the 1473 * appropriate function for processing depending on the command received. 1474 */ 1475 static int 1476 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, 1477 struct thread *td) 1478 { 1479 struct mrsas_softc *sc; 1480 int ret = 0, i = 0; 1481 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo; 1482 1483 sc = mrsas_get_softc_instance(dev, cmd, arg); 1484 if (!sc) 1485 return ENOENT; 1486 1487 if (sc->remove_in_progress || 1488 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) { 1489 mrsas_dprint(sc, MRSAS_INFO, 1490 "Either driver remove or shutdown called or " 1491 "HW is in unrecoverable critical error state.\n"); 1492 return ENOENT; 1493 } 1494 mtx_lock_spin(&sc->ioctl_lock); 1495 if (!sc->reset_in_progress) { 1496 mtx_unlock_spin(&sc->ioctl_lock); 1497 goto do_ioctl; 1498 } 1499 mtx_unlock_spin(&sc->ioctl_lock); 1500 while (sc->reset_in_progress) { 1501 i++; 1502 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1503 mrsas_dprint(sc, MRSAS_INFO, 1504 "[%2d]waiting for OCR to be finished from %s\n", i, __func__); 1505 } 1506 pause("mr_ioctl", hz); 1507 } 1508 1509 do_ioctl: 1510 switch (cmd) { 1511 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64: 1512 #ifdef COMPAT_FREEBSD32 1513 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32: 1514 #endif 1515 /* 1516 * Decrement the Ioctl counting Semaphore before getting an 1517 * mfi command 1518 */ 1519 sema_wait(&sc->ioctl_count_sema); 1520 1521 ret = mrsas_passthru(sc, (void *)arg, cmd); 1522 1523 /* Increment the Ioctl counting semaphore value */ 1524 sema_post(&sc->ioctl_count_sema); 1525 1526 break; 1527 case MRSAS_IOC_SCAN_BUS: 1528 ret = mrsas_bus_scan(sc); 1529 break; 1530 1531 case MRSAS_IOC_GET_PCI_INFO: 1532 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg; 1533 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION)); 1534 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev); 1535 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev); 1536 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev); 1537 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev); 1538 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d," 1539 "pci device no: %d, pci function no: %d," 1540 "pci domain ID: %d\n", 1541 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber, 1542 pciDrvInfo->functionNumber, pciDrvInfo->domainID); 1543 ret = 0; 1544 break; 1545 1546 default: 1547 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd); 1548 ret = ENOENT; 1549 } 1550 1551 return (ret); 1552 } 1553 1554 /* 1555 * mrsas_poll: poll entry point for mrsas driver fd 1556 * 1557 * This function is the entry point for poll from the OS. It waits for some AEN 1558 * events to be triggered from the controller and notifies back. 1559 */ 1560 static int 1561 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td) 1562 { 1563 struct mrsas_softc *sc; 1564 int revents = 0; 1565 1566 sc = dev->si_drv1; 1567 1568 if (poll_events & (POLLIN | POLLRDNORM)) { 1569 if (sc->mrsas_aen_triggered) { 1570 revents |= poll_events & (POLLIN | POLLRDNORM); 1571 } 1572 } 1573 if (revents == 0) { 1574 if (poll_events & (POLLIN | POLLRDNORM)) { 1575 mtx_lock(&sc->aen_lock); 1576 sc->mrsas_poll_waiting = 1; 1577 selrecord(td, &sc->mrsas_select); 1578 mtx_unlock(&sc->aen_lock); 1579 } 1580 } 1581 return revents; 1582 } 1583 1584 /* 1585 * mrsas_setup_irq: Set up interrupt 1586 * input: Adapter instance soft state 1587 * 1588 * This function sets up interrupts as a bus resource, with flags indicating 1589 * resource permitting contemporaneous sharing and for resource to activate 1590 * atomically. 1591 */ 1592 static int 1593 mrsas_setup_irq(struct mrsas_softc *sc) 1594 { 1595 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS)) 1596 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n"); 1597 1598 else { 1599 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n"); 1600 sc->irq_context[0].sc = sc; 1601 sc->irq_context[0].MSIxIndex = 0; 1602 sc->irq_id[0] = 0; 1603 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev, 1604 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE); 1605 if (sc->mrsas_irq[0] == NULL) { 1606 device_printf(sc->mrsas_dev, "Cannot allocate legcay" 1607 "interrupt\n"); 1608 return (FAIL); 1609 } 1610 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0], 1611 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, 1612 &sc->irq_context[0], &sc->intr_handle[0])) { 1613 device_printf(sc->mrsas_dev, "Cannot set up legacy" 1614 "interrupt\n"); 1615 return (FAIL); 1616 } 1617 } 1618 return (0); 1619 } 1620 1621 /* 1622 * mrsas_isr: ISR entry point 1623 * input: argument pointer 1624 * 1625 * This function is the interrupt service routine entry point. There are two 1626 * types of interrupts, state change interrupt and response interrupt. If an 1627 * interrupt is not ours, we just return. 1628 */ 1629 void 1630 mrsas_isr(void *arg) 1631 { 1632 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg; 1633 struct mrsas_softc *sc = irq_context->sc; 1634 int status = 0; 1635 1636 if (sc->mask_interrupts) 1637 return; 1638 1639 if (!sc->msix_vectors) { 1640 status = mrsas_clear_intr(sc); 1641 if (!status) 1642 return; 1643 } 1644 /* If we are resetting, bail */ 1645 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { 1646 printf(" Entered into ISR when OCR is going active. \n"); 1647 mrsas_clear_intr(sc); 1648 return; 1649 } 1650 /* Process for reply request and clear response interrupt */ 1651 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS) 1652 mrsas_clear_intr(sc); 1653 1654 return; 1655 } 1656 1657 /* 1658 * mrsas_complete_cmd: Process reply request 1659 * input: Adapter instance soft state 1660 * 1661 * This function is called from mrsas_isr() to process reply request and clear 1662 * response interrupt. Processing of the reply request entails walking 1663 * through the reply descriptor array for the command request pended from 1664 * Firmware. We look at the Function field to determine the command type and 1665 * perform the appropriate action. Before we return, we clear the response 1666 * interrupt. 1667 */ 1668 int 1669 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex) 1670 { 1671 Mpi2ReplyDescriptorsUnion_t *desc; 1672 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 1673 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; 1674 struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL; 1675 struct mrsas_mfi_cmd *cmd_mfi; 1676 u_int8_t reply_descript_type, *sense; 1677 u_int16_t smid, num_completed; 1678 u_int8_t status, extStatus; 1679 union desc_value desc_val; 1680 PLD_LOAD_BALANCE_INFO lbinfo; 1681 u_int32_t device_id, data_length; 1682 int threshold_reply_count = 0; 1683 #if TM_DEBUG 1684 MR_TASK_MANAGE_REQUEST *mr_tm_req; 1685 MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; 1686 #endif 1687 1688 /* If we have a hardware error, not need to continue */ 1689 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 1690 return (DONE); 1691 1692 desc = sc->reply_desc_mem; 1693 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)) 1694 + sc->last_reply_idx[MSIxIndex]; 1695 1696 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1697 1698 desc_val.word = desc->Words; 1699 num_completed = 0; 1700 1701 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1702 1703 /* Find our reply descriptor for the command and process */ 1704 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) { 1705 smid = le16toh(reply_desc->SMID); 1706 cmd_mpt = sc->mpt_cmd_list[smid - 1]; 1707 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request; 1708 1709 status = scsi_io_req->RaidContext.raid_context.status; 1710 extStatus = scsi_io_req->RaidContext.raid_context.exStatus; 1711 sense = cmd_mpt->sense; 1712 data_length = scsi_io_req->DataLength; 1713 1714 switch (scsi_io_req->Function) { 1715 case MPI2_FUNCTION_SCSI_TASK_MGMT: 1716 #if TM_DEBUG 1717 mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request; 1718 mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *) 1719 &mr_tm_req->TmRequest; 1720 device_printf(sc->mrsas_dev, "TM completion type 0x%X, " 1721 "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID); 1722 #endif 1723 wakeup_one((void *)&sc->ocr_chan); 1724 break; 1725 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */ 1726 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; 1727 lbinfo = &sc->load_balance_info[device_id]; 1728 /* R1 load balancing for READ */ 1729 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { 1730 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]); 1731 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; 1732 } 1733 /* Fall thru and complete IO */ 1734 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: 1735 if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { 1736 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status, 1737 extStatus, le32toh(data_length), sense); 1738 mrsas_cmd_done(sc, cmd_mpt); 1739 mrsas_atomic_dec(&sc->fw_outstanding); 1740 } else { 1741 /* 1742 * If the peer Raid 1/10 fast path failed, 1743 * mark IO as failed to the scsi layer. 1744 * Overwrite the current status by the failed status 1745 * and make sure that if any command fails, 1746 * driver returns fail status to CAM. 1747 */ 1748 cmd_mpt->cmd_completed = 1; 1749 r1_cmd = cmd_mpt->peer_cmd; 1750 if (r1_cmd->cmd_completed) { 1751 if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) { 1752 status = r1_cmd->io_request->RaidContext.raid_context.status; 1753 extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus; 1754 data_length = r1_cmd->io_request->DataLength; 1755 sense = r1_cmd->sense; 1756 } 1757 r1_cmd->ccb_ptr = NULL; 1758 if (r1_cmd->callout_owner) { 1759 callout_stop(&r1_cmd->cm_callout); 1760 r1_cmd->callout_owner = false; 1761 } 1762 mrsas_release_mpt_cmd(r1_cmd); 1763 mrsas_atomic_dec(&sc->fw_outstanding); 1764 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status, 1765 extStatus, le32toh(data_length), sense); 1766 mrsas_cmd_done(sc, cmd_mpt); 1767 mrsas_atomic_dec(&sc->fw_outstanding); 1768 } 1769 } 1770 break; 1771 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */ 1772 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 1773 /* 1774 * Make sure NOT TO release the mfi command from the called 1775 * function's context if it is fired with issue_polled call. 1776 * And also make sure that the issue_polled call should only be 1777 * used if INTERRUPT IS DISABLED. 1778 */ 1779 if (cmd_mfi->frame->hdr.flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) 1780 mrsas_release_mfi_cmd(cmd_mfi); 1781 else 1782 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); 1783 break; 1784 } 1785 1786 sc->last_reply_idx[MSIxIndex]++; 1787 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth) 1788 sc->last_reply_idx[MSIxIndex] = 0; 1789 1790 desc->Words = ~((uint64_t)0x00); /* set it back to all 1791 * 0xFFFFFFFFs */ 1792 num_completed++; 1793 threshold_reply_count++; 1794 1795 /* Get the next reply descriptor */ 1796 if (!sc->last_reply_idx[MSIxIndex]) { 1797 desc = sc->reply_desc_mem; 1798 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)); 1799 } else 1800 desc++; 1801 1802 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1803 desc_val.word = desc->Words; 1804 1805 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1806 1807 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1808 break; 1809 1810 /* 1811 * Write to reply post index after completing threshold reply 1812 * count and still there are more replies in reply queue 1813 * pending to be completed. 1814 */ 1815 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 1816 if (sc->msix_enable) { 1817 if (sc->msix_combined) 1818 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1819 ((MSIxIndex & 0x7) << 24) | 1820 sc->last_reply_idx[MSIxIndex]); 1821 else 1822 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1823 sc->last_reply_idx[MSIxIndex]); 1824 } else 1825 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1826 reply_post_host_index), sc->last_reply_idx[0]); 1827 1828 threshold_reply_count = 0; 1829 } 1830 } 1831 1832 /* No match, just return */ 1833 if (num_completed == 0) 1834 return (DONE); 1835 1836 /* Clear response interrupt */ 1837 if (sc->msix_enable) { 1838 if (sc->msix_combined) { 1839 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1840 ((MSIxIndex & 0x7) << 24) | 1841 sc->last_reply_idx[MSIxIndex]); 1842 } else 1843 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1844 sc->last_reply_idx[MSIxIndex]); 1845 } else 1846 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1847 reply_post_host_index), sc->last_reply_idx[0]); 1848 1849 return (0); 1850 } 1851 1852 /* 1853 * mrsas_map_mpt_cmd_status: Allocate DMAable memory. 1854 * input: Adapter instance soft state 1855 * 1856 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. 1857 * It checks the command status and maps the appropriate CAM status for the 1858 * CCB. 1859 */ 1860 void 1861 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status, 1862 u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense) 1863 { 1864 struct mrsas_softc *sc = cmd->sc; 1865 u_int8_t *sense_data; 1866 1867 switch (status) { 1868 case MFI_STAT_OK: 1869 ccb_ptr->ccb_h.status = CAM_REQ_CMP; 1870 break; 1871 case MFI_STAT_SCSI_IO_FAILED: 1872 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1873 ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1874 sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data; 1875 if (sense_data) { 1876 /* For now just copy 18 bytes back */ 1877 memcpy(sense_data, sense, 18); 1878 ccb_ptr->csio.sense_len = 18; 1879 ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; 1880 } 1881 break; 1882 case MFI_STAT_LD_OFFLINE: 1883 case MFI_STAT_DEVICE_NOT_FOUND: 1884 if (ccb_ptr->ccb_h.target_lun) 1885 ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; 1886 else 1887 ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; 1888 break; 1889 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1890 ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; 1891 break; 1892 default: 1893 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); 1894 ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; 1895 ccb_ptr->csio.scsi_status = status; 1896 } 1897 return; 1898 } 1899 1900 /* 1901 * mrsas_alloc_mem: Allocate DMAable memory 1902 * input: Adapter instance soft state 1903 * 1904 * This function creates the parent DMA tag and allocates DMAable memory. DMA 1905 * tag describes constraints of DMA mapping. Memory allocated is mapped into 1906 * Kernel virtual address. Callback argument is physical memory address. 1907 */ 1908 static int 1909 mrsas_alloc_mem(struct mrsas_softc *sc) 1910 { 1911 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size, 1912 evt_detail_size, count, pd_info_size; 1913 1914 /* 1915 * Allocate parent DMA tag 1916 */ 1917 if (bus_dma_tag_create(NULL, /* parent */ 1918 1, /* alignment */ 1919 0, /* boundary */ 1920 BUS_SPACE_MAXADDR, /* lowaddr */ 1921 BUS_SPACE_MAXADDR, /* highaddr */ 1922 NULL, NULL, /* filter, filterarg */ 1923 maxphys, /* maxsize */ 1924 sc->max_num_sge, /* nsegments */ 1925 maxphys, /* maxsegsize */ 1926 0, /* flags */ 1927 NULL, NULL, /* lockfunc, lockarg */ 1928 &sc->mrsas_parent_tag /* tag */ 1929 )) { 1930 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); 1931 return (ENOMEM); 1932 } 1933 /* 1934 * Allocate for version buffer 1935 */ 1936 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t)); 1937 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1938 1, 0, 1939 BUS_SPACE_MAXADDR_32BIT, 1940 BUS_SPACE_MAXADDR, 1941 NULL, NULL, 1942 verbuf_size, 1943 1, 1944 verbuf_size, 1945 BUS_DMA_ALLOCNOW, 1946 NULL, NULL, 1947 &sc->verbuf_tag)) { 1948 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); 1949 return (ENOMEM); 1950 } 1951 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, 1952 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { 1953 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); 1954 return (ENOMEM); 1955 } 1956 bzero(sc->verbuf_mem, verbuf_size); 1957 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, 1958 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, 1959 BUS_DMA_NOWAIT)) { 1960 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); 1961 return (ENOMEM); 1962 } 1963 /* 1964 * Allocate IO Request Frames 1965 */ 1966 io_req_size = sc->io_frames_alloc_sz; 1967 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1968 16, 0, 1969 BUS_SPACE_MAXADDR_32BIT, 1970 BUS_SPACE_MAXADDR, 1971 NULL, NULL, 1972 io_req_size, 1973 1, 1974 io_req_size, 1975 BUS_DMA_ALLOCNOW, 1976 NULL, NULL, 1977 &sc->io_request_tag)) { 1978 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); 1979 return (ENOMEM); 1980 } 1981 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, 1982 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { 1983 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); 1984 return (ENOMEM); 1985 } 1986 bzero(sc->io_request_mem, io_req_size); 1987 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, 1988 sc->io_request_mem, io_req_size, mrsas_addr_cb, 1989 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { 1990 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); 1991 return (ENOMEM); 1992 } 1993 /* 1994 * Allocate Chain Frames 1995 */ 1996 chain_frame_size = sc->chain_frames_alloc_sz; 1997 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1998 4, 0, 1999 BUS_SPACE_MAXADDR_32BIT, 2000 BUS_SPACE_MAXADDR, 2001 NULL, NULL, 2002 chain_frame_size, 2003 1, 2004 chain_frame_size, 2005 BUS_DMA_ALLOCNOW, 2006 NULL, NULL, 2007 &sc->chain_frame_tag)) { 2008 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); 2009 return (ENOMEM); 2010 } 2011 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, 2012 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { 2013 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); 2014 return (ENOMEM); 2015 } 2016 bzero(sc->chain_frame_mem, chain_frame_size); 2017 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, 2018 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, 2019 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { 2020 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); 2021 return (ENOMEM); 2022 } 2023 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2024 /* 2025 * Allocate Reply Descriptor Array 2026 */ 2027 reply_desc_size = sc->reply_alloc_sz * count; 2028 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2029 16, 0, 2030 BUS_SPACE_MAXADDR_32BIT, 2031 BUS_SPACE_MAXADDR, 2032 NULL, NULL, 2033 reply_desc_size, 2034 1, 2035 reply_desc_size, 2036 BUS_DMA_ALLOCNOW, 2037 NULL, NULL, 2038 &sc->reply_desc_tag)) { 2039 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); 2040 return (ENOMEM); 2041 } 2042 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, 2043 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { 2044 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); 2045 return (ENOMEM); 2046 } 2047 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, 2048 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, 2049 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { 2050 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); 2051 return (ENOMEM); 2052 } 2053 /* 2054 * Allocate Sense Buffer Array. Keep in lower 4GB 2055 */ 2056 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; 2057 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2058 64, 0, 2059 BUS_SPACE_MAXADDR_32BIT, 2060 BUS_SPACE_MAXADDR, 2061 NULL, NULL, 2062 sense_size, 2063 1, 2064 sense_size, 2065 BUS_DMA_ALLOCNOW, 2066 NULL, NULL, 2067 &sc->sense_tag)) { 2068 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); 2069 return (ENOMEM); 2070 } 2071 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, 2072 BUS_DMA_NOWAIT, &sc->sense_dmamap)) { 2073 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); 2074 return (ENOMEM); 2075 } 2076 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, 2077 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, 2078 BUS_DMA_NOWAIT)) { 2079 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); 2080 return (ENOMEM); 2081 } 2082 2083 /* 2084 * Allocate for Event detail structure 2085 */ 2086 evt_detail_size = sizeof(struct mrsas_evt_detail); 2087 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2088 1, 0, 2089 BUS_SPACE_MAXADDR_32BIT, 2090 BUS_SPACE_MAXADDR, 2091 NULL, NULL, 2092 evt_detail_size, 2093 1, 2094 evt_detail_size, 2095 BUS_DMA_ALLOCNOW, 2096 NULL, NULL, 2097 &sc->evt_detail_tag)) { 2098 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); 2099 return (ENOMEM); 2100 } 2101 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, 2102 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { 2103 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); 2104 return (ENOMEM); 2105 } 2106 bzero(sc->evt_detail_mem, evt_detail_size); 2107 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, 2108 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, 2109 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { 2110 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); 2111 return (ENOMEM); 2112 } 2113 2114 /* 2115 * Allocate for PD INFO structure 2116 */ 2117 pd_info_size = sizeof(struct mrsas_pd_info); 2118 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2119 1, 0, 2120 BUS_SPACE_MAXADDR_32BIT, 2121 BUS_SPACE_MAXADDR, 2122 NULL, NULL, 2123 pd_info_size, 2124 1, 2125 pd_info_size, 2126 BUS_DMA_ALLOCNOW, 2127 NULL, NULL, 2128 &sc->pd_info_tag)) { 2129 device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n"); 2130 return (ENOMEM); 2131 } 2132 if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem, 2133 BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) { 2134 device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n"); 2135 return (ENOMEM); 2136 } 2137 bzero(sc->pd_info_mem, pd_info_size); 2138 if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap, 2139 sc->pd_info_mem, pd_info_size, mrsas_addr_cb, 2140 &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) { 2141 device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n"); 2142 return (ENOMEM); 2143 } 2144 2145 /* 2146 * Create a dma tag for data buffers; size will be the maximum 2147 * possible I/O size (280kB). 2148 */ 2149 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2150 1, 2151 0, 2152 BUS_SPACE_MAXADDR, 2153 BUS_SPACE_MAXADDR, 2154 NULL, NULL, 2155 maxphys, 2156 sc->max_num_sge, /* nsegments */ 2157 maxphys, 2158 BUS_DMA_ALLOCNOW, 2159 busdma_lock_mutex, 2160 &sc->io_lock, 2161 &sc->data_tag)) { 2162 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); 2163 return (ENOMEM); 2164 } 2165 return (0); 2166 } 2167 2168 /* 2169 * mrsas_addr_cb: Callback function of bus_dmamap_load() 2170 * input: callback argument, machine dependent type 2171 * that describes DMA segments, number of segments, error code 2172 * 2173 * This function is for the driver to receive mapping information resultant of 2174 * the bus_dmamap_load(). The information is actually not being used, but the 2175 * address is saved anyway. 2176 */ 2177 void 2178 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2179 { 2180 bus_addr_t *addr; 2181 2182 addr = arg; 2183 *addr = segs[0].ds_addr; 2184 } 2185 2186 /* 2187 * mrsas_setup_raidmap: Set up RAID map. 2188 * input: Adapter instance soft state 2189 * 2190 * Allocate DMA memory for the RAID maps and perform setup. 2191 */ 2192 static int 2193 mrsas_setup_raidmap(struct mrsas_softc *sc) 2194 { 2195 int i; 2196 2197 for (i = 0; i < 2; i++) { 2198 sc->ld_drv_map[i] = 2199 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT); 2200 /* Do Error handling */ 2201 if (!sc->ld_drv_map[i]) { 2202 device_printf(sc->mrsas_dev, "Could not allocate memory for local map"); 2203 2204 if (i == 1) 2205 free(sc->ld_drv_map[0], M_MRSAS); 2206 /* ABORT driver initialization */ 2207 goto ABORT; 2208 } 2209 } 2210 2211 for (int i = 0; i < 2; i++) { 2212 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2213 4, 0, 2214 BUS_SPACE_MAXADDR_32BIT, 2215 BUS_SPACE_MAXADDR, 2216 NULL, NULL, 2217 sc->max_map_sz, 2218 1, 2219 sc->max_map_sz, 2220 BUS_DMA_ALLOCNOW, 2221 NULL, NULL, 2222 &sc->raidmap_tag[i])) { 2223 device_printf(sc->mrsas_dev, 2224 "Cannot allocate raid map tag.\n"); 2225 return (ENOMEM); 2226 } 2227 if (bus_dmamem_alloc(sc->raidmap_tag[i], 2228 (void **)&sc->raidmap_mem[i], 2229 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { 2230 device_printf(sc->mrsas_dev, 2231 "Cannot allocate raidmap memory.\n"); 2232 return (ENOMEM); 2233 } 2234 bzero(sc->raidmap_mem[i], sc->max_map_sz); 2235 2236 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], 2237 sc->raidmap_mem[i], sc->max_map_sz, 2238 mrsas_addr_cb, &sc->raidmap_phys_addr[i], 2239 BUS_DMA_NOWAIT)) { 2240 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); 2241 return (ENOMEM); 2242 } 2243 if (!sc->raidmap_mem[i]) { 2244 device_printf(sc->mrsas_dev, 2245 "Cannot allocate memory for raid map.\n"); 2246 return (ENOMEM); 2247 } 2248 } 2249 2250 if (!mrsas_get_map_info(sc)) 2251 mrsas_sync_map_info(sc); 2252 2253 return (0); 2254 2255 ABORT: 2256 return (1); 2257 } 2258 2259 /** 2260 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 2261 * @sc: Adapter soft state 2262 * 2263 * Return 0 on success. 2264 */ 2265 void 2266 megasas_setup_jbod_map(struct mrsas_softc *sc) 2267 { 2268 int i; 2269 uint32_t pd_seq_map_sz; 2270 2271 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 2272 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 2273 2274 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) { 2275 sc->use_seqnum_jbod_fp = 0; 2276 return; 2277 } 2278 if (sc->jbodmap_mem[0]) 2279 goto skip_alloc; 2280 2281 for (i = 0; i < 2; i++) { 2282 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2283 4, 0, 2284 BUS_SPACE_MAXADDR_32BIT, 2285 BUS_SPACE_MAXADDR, 2286 NULL, NULL, 2287 pd_seq_map_sz, 2288 1, 2289 pd_seq_map_sz, 2290 BUS_DMA_ALLOCNOW, 2291 NULL, NULL, 2292 &sc->jbodmap_tag[i])) { 2293 device_printf(sc->mrsas_dev, 2294 "Cannot allocate jbod map tag.\n"); 2295 return; 2296 } 2297 if (bus_dmamem_alloc(sc->jbodmap_tag[i], 2298 (void **)&sc->jbodmap_mem[i], 2299 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) { 2300 device_printf(sc->mrsas_dev, 2301 "Cannot allocate jbod map memory.\n"); 2302 return; 2303 } 2304 bzero(sc->jbodmap_mem[i], pd_seq_map_sz); 2305 2306 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i], 2307 sc->jbodmap_mem[i], pd_seq_map_sz, 2308 mrsas_addr_cb, &sc->jbodmap_phys_addr[i], 2309 BUS_DMA_NOWAIT)) { 2310 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n"); 2311 return; 2312 } 2313 if (!sc->jbodmap_mem[i]) { 2314 device_printf(sc->mrsas_dev, 2315 "Cannot allocate memory for jbod map.\n"); 2316 sc->use_seqnum_jbod_fp = 0; 2317 return; 2318 } 2319 } 2320 2321 skip_alloc: 2322 if (!megasas_sync_pd_seq_num(sc, false) && 2323 !megasas_sync_pd_seq_num(sc, true)) 2324 sc->use_seqnum_jbod_fp = 1; 2325 else 2326 sc->use_seqnum_jbod_fp = 0; 2327 2328 device_printf(sc->mrsas_dev, "Jbod map is supported\n"); 2329 } 2330 2331 /* 2332 * mrsas_init_fw: Initialize Firmware 2333 * input: Adapter soft state 2334 * 2335 * Calls transition_to_ready() to make sure Firmware is in operational state and 2336 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It 2337 * issues internal commands to get the controller info after the IOC_INIT 2338 * command response is received by Firmware. Note: code relating to 2339 * get_pdlist, get_ld_list and max_sectors are currently not being used, it 2340 * is left here as placeholder. 2341 */ 2342 static int 2343 mrsas_init_fw(struct mrsas_softc *sc) 2344 { 2345 2346 int ret, loop, ocr = 0; 2347 u_int32_t max_sectors_1; 2348 u_int32_t max_sectors_2; 2349 u_int32_t tmp_sectors; 2350 u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4; 2351 int msix_enable = 0; 2352 int fw_msix_count = 0; 2353 int i, j; 2354 2355 /* Make sure Firmware is ready */ 2356 ret = mrsas_transition_to_ready(sc, ocr); 2357 if (ret != SUCCESS) { 2358 return (ret); 2359 } 2360 if (sc->is_ventura || sc->is_aero) { 2361 scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3)); 2362 #if VD_EXT_DEBUG 2363 device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3); 2364 #endif 2365 sc->maxRaidMapSize = ((scratch_pad_3 >> 2366 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 2367 MR_MAX_RAID_MAP_SIZE_MASK); 2368 } 2369 /* MSI-x index 0- reply post host index register */ 2370 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET; 2371 /* Check if MSI-X is supported while in ready state */ 2372 msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a; 2373 2374 if (msix_enable) { 2375 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2376 outbound_scratch_pad_2)); 2377 2378 /* Check max MSI-X vectors */ 2379 if (sc->device_id == MRSAS_TBOLT) { 2380 sc->msix_vectors = (scratch_pad_2 2381 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 2382 fw_msix_count = sc->msix_vectors; 2383 } else { 2384 /* Invader/Fury supports 96 MSI-X vectors */ 2385 sc->msix_vectors = ((scratch_pad_2 2386 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 2387 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 2388 fw_msix_count = sc->msix_vectors; 2389 2390 if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) || 2391 ((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16))) 2392 sc->msix_combined = true; 2393 /* 2394 * Save 1-15 reply post index 2395 * address to local memory Index 0 2396 * is already saved from reg offset 2397 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 2398 */ 2399 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; 2400 loop++) { 2401 sc->msix_reg_offset[loop] = 2402 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + 2403 (loop * 0x10); 2404 } 2405 } 2406 2407 /* Don't bother allocating more MSI-X vectors than cpus */ 2408 sc->msix_vectors = min(sc->msix_vectors, 2409 mp_ncpus); 2410 2411 /* Allocate MSI-x vectors */ 2412 if (mrsas_allocate_msix(sc) == SUCCESS) 2413 sc->msix_enable = 1; 2414 else 2415 sc->msix_enable = 0; 2416 2417 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector," 2418 "Online CPU %d Current MSIX <%d>\n", 2419 fw_msix_count, mp_ncpus, sc->msix_vectors); 2420 } 2421 /* 2422 * MSI-X host index 0 is common for all adapter. 2423 * It is used for all MPT based Adapters. 2424 */ 2425 if (sc->msix_combined) { 2426 sc->msix_reg_offset[0] = 2427 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET; 2428 } 2429 if (mrsas_init_adapter(sc) != SUCCESS) { 2430 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); 2431 return (1); 2432 } 2433 2434 if (sc->is_ventura || sc->is_aero) { 2435 scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2436 outbound_scratch_pad_4)); 2437 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT) 2438 sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK); 2439 2440 device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size); 2441 } 2442 2443 /* Allocate internal commands for pass-thru */ 2444 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) { 2445 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); 2446 return (1); 2447 } 2448 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); 2449 if (!sc->ctrl_info) { 2450 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n"); 2451 return (1); 2452 } 2453 /* 2454 * Get the controller info from FW, so that the MAX VD support 2455 * availability can be decided. 2456 */ 2457 if (mrsas_get_ctrl_info(sc)) { 2458 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n"); 2459 return (1); 2460 } 2461 sc->secure_jbod_support = 2462 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD; 2463 2464 if (sc->secure_jbod_support) 2465 device_printf(sc->mrsas_dev, "FW supports SED \n"); 2466 2467 if (sc->use_seqnum_jbod_fp) 2468 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n"); 2469 2470 if (sc->support_morethan256jbod) 2471 device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n"); 2472 2473 if (mrsas_setup_raidmap(sc) != SUCCESS) { 2474 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! " 2475 "There seems to be some problem in the controller\n" 2476 "Please contact to the SUPPORT TEAM if the problem persists\n"); 2477 } 2478 megasas_setup_jbod_map(sc); 2479 2480 memset(sc->target_list, 0, 2481 MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target)); 2482 for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++) 2483 sc->target_list[i].target_id = 0xffff; 2484 2485 /* For pass-thru, get PD/LD list and controller info */ 2486 memset(sc->pd_list, 0, 2487 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 2488 if (mrsas_get_pd_list(sc) != SUCCESS) { 2489 device_printf(sc->mrsas_dev, "Get PD list failed.\n"); 2490 return (1); 2491 } 2492 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); 2493 if (mrsas_get_ld_list(sc) != SUCCESS) { 2494 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n"); 2495 return (1); 2496 } 2497 2498 if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) { 2499 sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) * 2500 MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT); 2501 if (!sc->streamDetectByLD) { 2502 device_printf(sc->mrsas_dev, 2503 "unable to allocate stream detection for pool of LDs\n"); 2504 return (1); 2505 } 2506 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 2507 sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT); 2508 if (!sc->streamDetectByLD[i]) { 2509 device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n"); 2510 for (j = 0; j < i; ++j) 2511 free(sc->streamDetectByLD[j], M_MRSAS); 2512 free(sc->streamDetectByLD, M_MRSAS); 2513 sc->streamDetectByLD = NULL; 2514 return (1); 2515 } 2516 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT)); 2517 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP; 2518 } 2519 } 2520 2521 /* 2522 * Compute the max allowed sectors per IO: The controller info has 2523 * two limits on max sectors. Driver should use the minimum of these 2524 * two. 2525 * 2526 * 1 << stripe_sz_ops.min = max sectors per strip 2527 * 2528 * Note that older firmwares ( < FW ver 30) didn't report information to 2529 * calculate max_sectors_1. So the number ended up as zero always. 2530 */ 2531 tmp_sectors = 0; 2532 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) * 2533 sc->ctrl_info->max_strips_per_io; 2534 max_sectors_2 = sc->ctrl_info->max_request_size; 2535 tmp_sectors = min(max_sectors_1, max_sectors_2); 2536 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512; 2537 2538 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) 2539 sc->max_sectors_per_req = tmp_sectors; 2540 2541 sc->disableOnlineCtrlReset = 2542 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 2543 sc->UnevenSpanSupport = 2544 sc->ctrl_info->adapterOperations2.supportUnevenSpans; 2545 if (sc->UnevenSpanSupport) { 2546 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n", 2547 sc->UnevenSpanSupport); 2548 2549 if (MR_ValidateMapInfo(sc)) 2550 sc->fast_path_io = 1; 2551 else 2552 sc->fast_path_io = 0; 2553 } 2554 2555 device_printf(sc->mrsas_dev, "max_fw_cmds: %u max_scsi_cmds: %u\n", 2556 sc->max_fw_cmds, sc->max_scsi_cmds); 2557 return (0); 2558 } 2559 2560 /* 2561 * mrsas_init_adapter: Initializes the adapter/controller 2562 * input: Adapter soft state 2563 * 2564 * Prepares for the issuing of the IOC Init cmd to FW for initializing the 2565 * ROC/controller. The FW register is read to determined the number of 2566 * commands that is supported. All memory allocations for IO is based on 2567 * max_cmd. Appropriate calculations are performed in this function. 2568 */ 2569 int 2570 mrsas_init_adapter(struct mrsas_softc *sc) 2571 { 2572 uint32_t status; 2573 u_int32_t scratch_pad_2; 2574 int ret; 2575 int i = 0; 2576 2577 /* Read FW status register */ 2578 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2579 2580 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; 2581 2582 /* Decrement the max supported by 1, to correlate with FW */ 2583 sc->max_fw_cmds = sc->max_fw_cmds - 1; 2584 sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS; 2585 2586 /* Determine allocation size of command frames */ 2587 sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2; 2588 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds; 2589 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); 2590 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + 2591 (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1)); 2592 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2593 outbound_scratch_pad_2)); 2594 2595 mrsas_dprint(sc, MRSAS_TRACE, "%s: sc->reply_q_depth 0x%x," 2596 "sc->request_alloc_sz 0x%x, sc->reply_alloc_sz 0x%x," 2597 "sc->io_frames_alloc_sz 0x%x\n", __func__, 2598 sc->reply_q_depth, sc->request_alloc_sz, 2599 sc->reply_alloc_sz, sc->io_frames_alloc_sz); 2600 2601 /* 2602 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, 2603 * Firmware support extended IO chain frame which is 4 time more 2604 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) = 2605 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K 2606 */ 2607 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) 2608 sc->max_chain_frame_sz = 2609 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) 2610 * MEGASAS_1MB_IO; 2611 else 2612 sc->max_chain_frame_sz = 2613 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) 2614 * MEGASAS_256K_IO; 2615 2616 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds; 2617 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2618 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16; 2619 2620 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION); 2621 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; 2622 2623 mrsas_dprint(sc, MRSAS_INFO, 2624 "max sge: 0x%x, max chain frame size: 0x%x, " 2625 "max fw cmd: 0x%x sc->chain_frames_alloc_sz: 0x%x\n", 2626 sc->max_num_sge, 2627 sc->max_chain_frame_sz, sc->max_fw_cmds, 2628 sc->chain_frames_alloc_sz); 2629 2630 /* Used for pass thru MFI frame (DCMD) */ 2631 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16; 2632 2633 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2634 sizeof(MPI2_SGE_IO_UNION)) / 16; 2635 2636 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2637 2638 for (i = 0; i < count; i++) 2639 sc->last_reply_idx[i] = 0; 2640 2641 ret = mrsas_alloc_mem(sc); 2642 if (ret != SUCCESS) 2643 return (ret); 2644 2645 ret = mrsas_alloc_mpt_cmds(sc); 2646 if (ret != SUCCESS) 2647 return (ret); 2648 2649 ret = mrsas_ioc_init(sc); 2650 if (ret != SUCCESS) 2651 return (ret); 2652 2653 return (0); 2654 } 2655 2656 /* 2657 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command 2658 * input: Adapter soft state 2659 * 2660 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. 2661 */ 2662 int 2663 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) 2664 { 2665 int ioc_init_size; 2666 2667 /* Allocate IOC INIT command */ 2668 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); 2669 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2670 1, 0, 2671 BUS_SPACE_MAXADDR_32BIT, 2672 BUS_SPACE_MAXADDR, 2673 NULL, NULL, 2674 ioc_init_size, 2675 1, 2676 ioc_init_size, 2677 BUS_DMA_ALLOCNOW, 2678 NULL, NULL, 2679 &sc->ioc_init_tag)) { 2680 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); 2681 return (ENOMEM); 2682 } 2683 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, 2684 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { 2685 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); 2686 return (ENOMEM); 2687 } 2688 bzero(sc->ioc_init_mem, ioc_init_size); 2689 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, 2690 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, 2691 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { 2692 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); 2693 return (ENOMEM); 2694 } 2695 return (0); 2696 } 2697 2698 /* 2699 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command 2700 * input: Adapter soft state 2701 * 2702 * Deallocates memory of the IOC Init cmd. 2703 */ 2704 void 2705 mrsas_free_ioc_cmd(struct mrsas_softc *sc) 2706 { 2707 if (sc->ioc_init_phys_mem) 2708 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); 2709 if (sc->ioc_init_mem != NULL) 2710 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); 2711 if (sc->ioc_init_tag != NULL) 2712 bus_dma_tag_destroy(sc->ioc_init_tag); 2713 } 2714 2715 /* 2716 * mrsas_ioc_init: Sends IOC Init command to FW 2717 * input: Adapter soft state 2718 * 2719 * Issues the IOC Init cmd to FW to initialize the ROC/controller. 2720 */ 2721 int 2722 mrsas_ioc_init(struct mrsas_softc *sc) 2723 { 2724 struct mrsas_init_frame *init_frame; 2725 pMpi2IOCInitRequest_t IOCInitMsg; 2726 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; 2727 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 2728 bus_addr_t phys_addr; 2729 int i, retcode = 0; 2730 u_int32_t scratch_pad_2; 2731 2732 /* Allocate memory for the IOC INIT command */ 2733 if (mrsas_alloc_ioc_cmd(sc)) { 2734 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); 2735 return (1); 2736 } 2737 2738 if (!sc->block_sync_cache) { 2739 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2740 outbound_scratch_pad_2)); 2741 sc->fw_sync_cache_support = (scratch_pad_2 & 2742 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; 2743 } 2744 2745 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024); 2746 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; 2747 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 2748 IOCInitMsg->MsgVersion = htole16(MPI2_VERSION); 2749 IOCInitMsg->HeaderVersion = htole16(MPI2_HEADER_VERSION); 2750 IOCInitMsg->SystemRequestFrameSize = htole16(MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); 2751 IOCInitMsg->ReplyDescriptorPostQueueDepth = htole16(sc->reply_q_depth); 2752 IOCInitMsg->ReplyDescriptorPostQueueAddress = htole64(sc->reply_desc_phys_addr); 2753 IOCInitMsg->SystemRequestFrameBaseAddress = htole64(sc->io_request_phys_addr); 2754 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0); 2755 IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; 2756 2757 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; 2758 init_frame->cmd = MFI_CMD_INIT; 2759 init_frame->cmd_status = 0xFF; 2760 init_frame->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 2761 2762 /* driver support Extended MSIX */ 2763 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 2764 init_frame->driver_operations. 2765 mfi_capabilities.support_additional_msix = 1; 2766 } 2767 if (sc->verbuf_mem) { 2768 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n", 2769 MRSAS_VERSION); 2770 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; 2771 init_frame->driver_ver_hi = 0; 2772 } 2773 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1; 2774 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1; 2775 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1; 2776 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) 2777 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1; 2778 2779 init_frame->driver_operations.reg = htole32(init_frame->driver_operations.reg); 2780 2781 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; 2782 init_frame->queue_info_new_phys_addr_lo = htole32(phys_addr); 2783 init_frame->data_xfer_len = htole32(sizeof(Mpi2IOCInitRequest_t)); 2784 2785 req_desc.addr.u.low = htole32((bus_addr_t)sc->ioc_init_phys_mem & 0xFFFFFFFF); 2786 req_desc.addr.u.high = htole32((bus_addr_t)sc->ioc_init_phys_mem >> 32); 2787 2788 req_desc.MFAIo.RequestFlags = 2789 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2790 2791 mrsas_disable_intr(sc); 2792 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); 2793 mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high); 2794 2795 /* 2796 * Poll response timer to wait for Firmware response. While this 2797 * timer with the DELAY call could block CPU, the time interval for 2798 * this is only 1 millisecond. 2799 */ 2800 if (init_frame->cmd_status == 0xFF) { 2801 for (i = 0; i < (max_wait * 1000); i++) { 2802 if (init_frame->cmd_status == 0xFF) 2803 DELAY(1000); 2804 else 2805 break; 2806 } 2807 } 2808 if (init_frame->cmd_status == 0) 2809 mrsas_dprint(sc, MRSAS_OCR, 2810 "IOC INIT response received from FW.\n"); 2811 else { 2812 if (init_frame->cmd_status == 0xFF) 2813 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); 2814 else 2815 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); 2816 retcode = 1; 2817 } 2818 2819 if (sc->is_aero) { 2820 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2821 outbound_scratch_pad_2)); 2822 sc->atomic_desc_support = (scratch_pad_2 & 2823 MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0; 2824 device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n", 2825 sc->atomic_desc_support ? "Yes" : "No"); 2826 } 2827 2828 mrsas_free_ioc_cmd(sc); 2829 return (retcode); 2830 } 2831 2832 /* 2833 * mrsas_alloc_mpt_cmds: Allocates the command packets 2834 * input: Adapter instance soft state 2835 * 2836 * This function allocates the internal commands for IOs. Each command that is 2837 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An 2838 * array is allocated with mrsas_mpt_cmd context. The free commands are 2839 * maintained in a linked list (cmd pool). SMID value range is from 1 to 2840 * max_fw_cmds. 2841 */ 2842 int 2843 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) 2844 { 2845 int i, j; 2846 u_int32_t max_fw_cmds, count; 2847 struct mrsas_mpt_cmd *cmd; 2848 pMpi2ReplyDescriptorsUnion_t reply_desc; 2849 u_int32_t offset, chain_offset, sense_offset; 2850 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; 2851 u_int8_t *io_req_base, *chain_frame_base, *sense_base; 2852 2853 max_fw_cmds = sc->max_fw_cmds; 2854 2855 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); 2856 if (!sc->req_desc) { 2857 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); 2858 return (ENOMEM); 2859 } 2860 memset(sc->req_desc, 0, sc->request_alloc_sz); 2861 2862 /* 2863 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. 2864 * Allocate the dynamic array first and then allocate individual 2865 * commands. 2866 */ 2867 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds, 2868 M_MRSAS, M_NOWAIT); 2869 if (!sc->mpt_cmd_list) { 2870 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); 2871 return (ENOMEM); 2872 } 2873 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds); 2874 for (i = 0; i < max_fw_cmds; i++) { 2875 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd), 2876 M_MRSAS, M_NOWAIT); 2877 if (!sc->mpt_cmd_list[i]) { 2878 for (j = 0; j < i; j++) 2879 free(sc->mpt_cmd_list[j], M_MRSAS); 2880 free(sc->mpt_cmd_list, M_MRSAS); 2881 sc->mpt_cmd_list = NULL; 2882 return (ENOMEM); 2883 } 2884 } 2885 2886 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2887 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2888 chain_frame_base = (u_int8_t *)sc->chain_frame_mem; 2889 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; 2890 sense_base = (u_int8_t *)sc->sense_mem; 2891 sense_base_phys = (bus_addr_t)sc->sense_phys_addr; 2892 for (i = 0; i < max_fw_cmds; i++) { 2893 cmd = sc->mpt_cmd_list[i]; 2894 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 2895 chain_offset = sc->max_chain_frame_sz * i; 2896 sense_offset = MRSAS_SENSE_LEN * i; 2897 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); 2898 cmd->index = i + 1; 2899 cmd->ccb_ptr = NULL; 2900 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 2901 callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0); 2902 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 2903 cmd->sc = sc; 2904 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); 2905 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); 2906 cmd->io_request_phys_addr = io_req_base_phys + offset; 2907 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); 2908 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; 2909 cmd->sense = sense_base + sense_offset; 2910 cmd->sense_phys_addr = sense_base_phys + sense_offset; 2911 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { 2912 return (FAIL); 2913 } 2914 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 2915 } 2916 2917 /* Initialize reply descriptor array to 0xFFFFFFFF */ 2918 reply_desc = sc->reply_desc_mem; 2919 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2920 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) { 2921 reply_desc->Words = MRSAS_ULONG_MAX; 2922 } 2923 return (0); 2924 } 2925 2926 /* 2927 * mrsas_write_64bit_req_dsc: Writes 64 bit request descriptor to FW 2928 * input: Adapter softstate 2929 * request descriptor address low 2930 * request descriptor address high 2931 */ 2932 void 2933 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2934 u_int32_t req_desc_hi) 2935 { 2936 mtx_lock(&sc->pci_lock); 2937 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), 2938 le32toh(req_desc_lo)); 2939 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), 2940 le32toh(req_desc_hi)); 2941 mtx_unlock(&sc->pci_lock); 2942 } 2943 2944 /* 2945 * mrsas_fire_cmd: Sends command to FW 2946 * input: Adapter softstate 2947 * request descriptor address low 2948 * request descriptor address high 2949 * 2950 * This functions fires the command to Firmware by writing to the 2951 * inbound_low_queue_port and inbound_high_queue_port. 2952 */ 2953 void 2954 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2955 u_int32_t req_desc_hi) 2956 { 2957 if (sc->atomic_desc_support) 2958 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port), 2959 le32toh(req_desc_lo)); 2960 else 2961 mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi); 2962 } 2963 2964 /* 2965 * mrsas_transition_to_ready: Move FW to Ready state input: 2966 * Adapter instance soft state 2967 * 2968 * During the initialization, FW passes can potentially be in any one of several 2969 * possible states. If the FW in operational, waiting-for-handshake states, 2970 * driver must take steps to bring it to ready state. Otherwise, it has to 2971 * wait for the ready state. 2972 */ 2973 int 2974 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) 2975 { 2976 int i; 2977 u_int8_t max_wait; 2978 u_int32_t val, fw_state; 2979 u_int32_t cur_state; 2980 u_int32_t abs_state, curr_abs_state; 2981 2982 val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2983 fw_state = val & MFI_STATE_MASK; 2984 max_wait = MRSAS_RESET_WAIT_TIME; 2985 2986 if (fw_state != MFI_STATE_READY) 2987 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); 2988 2989 while (fw_state != MFI_STATE_READY) { 2990 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2991 switch (fw_state) { 2992 case MFI_STATE_FAULT: 2993 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); 2994 if (ocr) { 2995 cur_state = MFI_STATE_FAULT; 2996 break; 2997 } else 2998 return -ENODEV; 2999 case MFI_STATE_WAIT_HANDSHAKE: 3000 /* Set the CLR bit in inbound doorbell */ 3001 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 3002 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG); 3003 cur_state = MFI_STATE_WAIT_HANDSHAKE; 3004 break; 3005 case MFI_STATE_BOOT_MESSAGE_PENDING: 3006 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 3007 MFI_INIT_HOTPLUG); 3008 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 3009 break; 3010 case MFI_STATE_OPERATIONAL: 3011 /* 3012 * Bring it to READY state; assuming max wait 10 3013 * secs 3014 */ 3015 mrsas_disable_intr(sc); 3016 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); 3017 for (i = 0; i < max_wait * 1000; i++) { 3018 if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1) 3019 DELAY(1000); 3020 else 3021 break; 3022 } 3023 cur_state = MFI_STATE_OPERATIONAL; 3024 break; 3025 case MFI_STATE_UNDEFINED: 3026 /* 3027 * This state should not last for more than 2 3028 * seconds 3029 */ 3030 cur_state = MFI_STATE_UNDEFINED; 3031 break; 3032 case MFI_STATE_BB_INIT: 3033 cur_state = MFI_STATE_BB_INIT; 3034 break; 3035 case MFI_STATE_FW_INIT: 3036 cur_state = MFI_STATE_FW_INIT; 3037 break; 3038 case MFI_STATE_FW_INIT_2: 3039 cur_state = MFI_STATE_FW_INIT_2; 3040 break; 3041 case MFI_STATE_DEVICE_SCAN: 3042 cur_state = MFI_STATE_DEVICE_SCAN; 3043 break; 3044 case MFI_STATE_FLUSH_CACHE: 3045 cur_state = MFI_STATE_FLUSH_CACHE; 3046 break; 3047 default: 3048 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); 3049 return -ENODEV; 3050 } 3051 3052 /* 3053 * The cur_state should not last for more than max_wait secs 3054 */ 3055 for (i = 0; i < (max_wait * 1000); i++) { 3056 fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3057 outbound_scratch_pad)) & MFI_STATE_MASK); 3058 curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3059 outbound_scratch_pad)); 3060 if (abs_state == curr_abs_state) 3061 DELAY(1000); 3062 else 3063 break; 3064 } 3065 3066 /* 3067 * Return error if fw_state hasn't changed after max_wait 3068 */ 3069 if (curr_abs_state == abs_state) { 3070 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " 3071 "in %d secs\n", fw_state, max_wait); 3072 return -ENODEV; 3073 } 3074 } 3075 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); 3076 return 0; 3077 } 3078 3079 /* 3080 * mrsas_get_mfi_cmd: Get a cmd from free command pool 3081 * input: Adapter soft state 3082 * 3083 * This function removes an MFI command from the command list. 3084 */ 3085 struct mrsas_mfi_cmd * 3086 mrsas_get_mfi_cmd(struct mrsas_softc *sc) 3087 { 3088 struct mrsas_mfi_cmd *cmd = NULL; 3089 3090 mtx_lock(&sc->mfi_cmd_pool_lock); 3091 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) { 3092 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); 3093 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); 3094 } 3095 mtx_unlock(&sc->mfi_cmd_pool_lock); 3096 3097 return cmd; 3098 } 3099 3100 /* 3101 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter. 3102 * input: Adapter Context. 3103 * 3104 * This function will check FW status register and flag do_timeout_reset flag. 3105 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has 3106 * trigger reset. 3107 */ 3108 static void 3109 mrsas_ocr_thread(void *arg) 3110 { 3111 struct mrsas_softc *sc; 3112 u_int32_t fw_status, fw_state; 3113 u_int8_t tm_target_reset_failed = 0; 3114 3115 sc = (struct mrsas_softc *)arg; 3116 3117 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); 3118 sc->ocr_thread_active = 1; 3119 mtx_lock(&sc->sim_lock); 3120 for (;;) { 3121 /* Sleep for 1 second and check the queue status */ 3122 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 3123 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); 3124 if (sc->remove_in_progress || 3125 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 3126 mrsas_dprint(sc, MRSAS_OCR, 3127 "Exit due to %s from %s\n", 3128 sc->remove_in_progress ? "Shutdown" : 3129 "Hardware critical error", __func__); 3130 break; 3131 } 3132 fw_status = mrsas_read_reg_with_retries(sc, 3133 offsetof(mrsas_reg_set, outbound_scratch_pad)); 3134 fw_state = fw_status & MFI_STATE_MASK; 3135 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset || 3136 mrsas_atomic_read(&sc->target_reset_outstanding)) { 3137 /* First, freeze further IOs to come to the SIM */ 3138 mrsas_xpt_freeze(sc); 3139 3140 /* If this is an IO timeout then go for target reset */ 3141 if (mrsas_atomic_read(&sc->target_reset_outstanding)) { 3142 device_printf(sc->mrsas_dev, "Initiating Target RESET " 3143 "because of SCSI IO timeout!\n"); 3144 3145 /* Let the remaining IOs to complete */ 3146 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 3147 "mrsas_reset_targets", 5 * hz); 3148 3149 /* Try to reset the target device */ 3150 if (mrsas_reset_targets(sc) == FAIL) 3151 tm_target_reset_failed = 1; 3152 } 3153 3154 /* If this is a DCMD timeout or FW fault, 3155 * then go for controller reset 3156 */ 3157 if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed || 3158 (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) { 3159 if (tm_target_reset_failed) 3160 device_printf(sc->mrsas_dev, "Initiaiting OCR because of " 3161 "TM FAILURE!\n"); 3162 else 3163 device_printf(sc->mrsas_dev, "Initiaiting OCR " 3164 "because of %s!\n", sc->do_timedout_reset ? 3165 "DCMD IO Timeout" : "FW fault"); 3166 3167 mtx_lock_spin(&sc->ioctl_lock); 3168 sc->reset_in_progress = 1; 3169 mtx_unlock_spin(&sc->ioctl_lock); 3170 sc->reset_count++; 3171 3172 /* 3173 * Wait for the AEN task to be completed if it is running. 3174 */ 3175 mtx_unlock(&sc->sim_lock); 3176 taskqueue_drain(sc->ev_tq, &sc->ev_task); 3177 mtx_lock(&sc->sim_lock); 3178 3179 taskqueue_block(sc->ev_tq); 3180 /* Try to reset the controller */ 3181 mrsas_reset_ctrl(sc, sc->do_timedout_reset); 3182 3183 sc->do_timedout_reset = 0; 3184 sc->reset_in_progress = 0; 3185 tm_target_reset_failed = 0; 3186 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 3187 memset(sc->target_reset_pool, 0, 3188 sizeof(sc->target_reset_pool)); 3189 taskqueue_unblock(sc->ev_tq); 3190 } 3191 3192 /* Now allow IOs to come to the SIM */ 3193 mrsas_xpt_release(sc); 3194 } 3195 } 3196 mtx_unlock(&sc->sim_lock); 3197 sc->ocr_thread_active = 0; 3198 mrsas_kproc_exit(0); 3199 } 3200 3201 /* 3202 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR. 3203 * input: Adapter Context. 3204 * 3205 * This function will clear reply descriptor so that post OCR driver and FW will 3206 * lost old history. 3207 */ 3208 void 3209 mrsas_reset_reply_desc(struct mrsas_softc *sc) 3210 { 3211 int i, count; 3212 pMpi2ReplyDescriptorsUnion_t reply_desc; 3213 3214 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3215 for (i = 0; i < count; i++) 3216 sc->last_reply_idx[i] = 0; 3217 3218 reply_desc = sc->reply_desc_mem; 3219 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { 3220 reply_desc->Words = MRSAS_ULONG_MAX; 3221 } 3222 } 3223 3224 /* 3225 * mrsas_reset_ctrl: Core function to OCR/Kill adapter. 3226 * input: Adapter Context. 3227 * 3228 * This function will run from thread context so that it can sleep. 1. Do not 3229 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command 3230 * to complete for 180 seconds. 3. If #2 does not find any outstanding 3231 * command Controller is in working state, so skip OCR. Otherwise, do 3232 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the 3233 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post 3234 * OCR, Re-fire Management command and move Controller to Operation state. 3235 */ 3236 int 3237 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason) 3238 { 3239 int retval = SUCCESS, i, j, retry = 0; 3240 u_int32_t host_diag, abs_state, status_reg, reset_adapter; 3241 union ccb *ccb; 3242 struct mrsas_mfi_cmd *mfi_cmd; 3243 struct mrsas_mpt_cmd *mpt_cmd; 3244 union mrsas_evt_class_locale class_locale; 3245 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3246 3247 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 3248 device_printf(sc->mrsas_dev, 3249 "mrsas: Hardware critical error, returning FAIL.\n"); 3250 return FAIL; 3251 } 3252 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3253 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; 3254 mrsas_disable_intr(sc); 3255 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr", 3256 sc->mrsas_fw_fault_check_delay * hz); 3257 3258 /* First try waiting for commands to complete */ 3259 if (mrsas_wait_for_outstanding(sc, reset_reason)) { 3260 mrsas_dprint(sc, MRSAS_OCR, 3261 "resetting adapter from %s.\n", 3262 __func__); 3263 /* Now return commands back to the CAM layer */ 3264 mtx_unlock(&sc->sim_lock); 3265 for (i = 0; i < sc->max_fw_cmds; i++) { 3266 mpt_cmd = sc->mpt_cmd_list[i]; 3267 3268 if (mpt_cmd->peer_cmd) { 3269 mrsas_dprint(sc, MRSAS_OCR, 3270 "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n", 3271 i, mpt_cmd, mpt_cmd->peer_cmd); 3272 } 3273 3274 if (mpt_cmd->ccb_ptr) { 3275 if (mpt_cmd->callout_owner) { 3276 ccb = (union ccb *)(mpt_cmd->ccb_ptr); 3277 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3278 mrsas_cmd_done(sc, mpt_cmd); 3279 } else { 3280 mpt_cmd->ccb_ptr = NULL; 3281 mrsas_release_mpt_cmd(mpt_cmd); 3282 } 3283 } 3284 } 3285 3286 mrsas_atomic_set(&sc->fw_outstanding, 0); 3287 3288 mtx_lock(&sc->sim_lock); 3289 3290 status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3291 outbound_scratch_pad)); 3292 abs_state = status_reg & MFI_STATE_MASK; 3293 reset_adapter = status_reg & MFI_RESET_ADAPTER; 3294 if (sc->disableOnlineCtrlReset || 3295 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 3296 /* Reset not supported, kill adapter */ 3297 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n"); 3298 mrsas_kill_hba(sc); 3299 retval = FAIL; 3300 goto out; 3301 } 3302 /* Now try to reset the chip */ 3303 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { 3304 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3305 MPI2_WRSEQ_FLUSH_KEY_VALUE); 3306 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3307 MPI2_WRSEQ_1ST_KEY_VALUE); 3308 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3309 MPI2_WRSEQ_2ND_KEY_VALUE); 3310 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3311 MPI2_WRSEQ_3RD_KEY_VALUE); 3312 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3313 MPI2_WRSEQ_4TH_KEY_VALUE); 3314 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3315 MPI2_WRSEQ_5TH_KEY_VALUE); 3316 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3317 MPI2_WRSEQ_6TH_KEY_VALUE); 3318 3319 /* Check that the diag write enable (DRWE) bit is on */ 3320 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3321 fusion_host_diag)); 3322 retry = 0; 3323 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 3324 DELAY(100 * 1000); 3325 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3326 fusion_host_diag)); 3327 if (retry++ == 100) { 3328 mrsas_dprint(sc, MRSAS_OCR, 3329 "Host diag unlock failed!\n"); 3330 break; 3331 } 3332 } 3333 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 3334 continue; 3335 3336 /* Send chip reset command */ 3337 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), 3338 host_diag | HOST_DIAG_RESET_ADAPTER); 3339 DELAY(3000 * 1000); 3340 3341 /* Make sure reset adapter bit is cleared */ 3342 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3343 fusion_host_diag)); 3344 retry = 0; 3345 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 3346 DELAY(100 * 1000); 3347 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3348 fusion_host_diag)); 3349 if (retry++ == 1000) { 3350 mrsas_dprint(sc, MRSAS_OCR, 3351 "Diag reset adapter never cleared!\n"); 3352 break; 3353 } 3354 } 3355 if (host_diag & HOST_DIAG_RESET_ADAPTER) 3356 continue; 3357 3358 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3359 outbound_scratch_pad)) & MFI_STATE_MASK; 3360 retry = 0; 3361 3362 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 3363 DELAY(100 * 1000); 3364 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3365 outbound_scratch_pad)) & MFI_STATE_MASK; 3366 } 3367 if (abs_state <= MFI_STATE_FW_INIT) { 3368 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," 3369 " state = 0x%x\n", abs_state); 3370 continue; 3371 } 3372 /* Wait for FW to become ready */ 3373 if (mrsas_transition_to_ready(sc, 1)) { 3374 mrsas_dprint(sc, MRSAS_OCR, 3375 "mrsas: Failed to transition controller to ready.\n"); 3376 continue; 3377 } 3378 mrsas_reset_reply_desc(sc); 3379 if (mrsas_ioc_init(sc)) { 3380 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); 3381 continue; 3382 } 3383 for (j = 0; j < sc->max_fw_cmds; j++) { 3384 mpt_cmd = sc->mpt_cmd_list[j]; 3385 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 3386 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; 3387 /* If not an IOCTL then release the command else re-fire */ 3388 if (!mfi_cmd->sync_cmd) { 3389 mrsas_release_mfi_cmd(mfi_cmd); 3390 } else { 3391 req_desc = mrsas_get_request_desc(sc, 3392 mfi_cmd->cmd_id.context.smid - 1); 3393 mrsas_dprint(sc, MRSAS_OCR, 3394 "Re-fire command DCMD opcode 0x%x index %d\n ", 3395 mfi_cmd->frame->dcmd.opcode, j); 3396 if (!req_desc) 3397 device_printf(sc->mrsas_dev, 3398 "Cannot build MPT cmd.\n"); 3399 else 3400 mrsas_fire_cmd(sc, req_desc->addr.u.low, 3401 req_desc->addr.u.high); 3402 } 3403 } 3404 } 3405 3406 /* Reset load balance info */ 3407 memset(sc->load_balance_info, 0, 3408 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); 3409 3410 if (mrsas_get_ctrl_info(sc)) { 3411 mrsas_kill_hba(sc); 3412 retval = FAIL; 3413 goto out; 3414 } 3415 if (!mrsas_get_map_info(sc)) 3416 mrsas_sync_map_info(sc); 3417 3418 megasas_setup_jbod_map(sc); 3419 3420 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) { 3421 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { 3422 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT)); 3423 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP; 3424 } 3425 } 3426 3427 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3428 mrsas_enable_intr(sc); 3429 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 3430 3431 /* Register AEN with FW for last sequence number */ 3432 class_locale.members.reserved = 0; 3433 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3434 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3435 3436 mtx_unlock(&sc->sim_lock); 3437 if (mrsas_register_aen(sc, sc->last_seq_num, 3438 class_locale.word)) { 3439 device_printf(sc->mrsas_dev, 3440 "ERROR: AEN registration FAILED from OCR !!! " 3441 "Further events from the controller cannot be notified." 3442 "Either there is some problem in the controller" 3443 "or the controller does not support AEN.\n" 3444 "Please contact to the SUPPORT TEAM if the problem persists\n"); 3445 } 3446 mtx_lock(&sc->sim_lock); 3447 3448 /* Adapter reset completed successfully */ 3449 device_printf(sc->mrsas_dev, "Reset successful\n"); 3450 retval = SUCCESS; 3451 goto out; 3452 } 3453 /* Reset failed, kill the adapter */ 3454 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); 3455 mrsas_kill_hba(sc); 3456 retval = FAIL; 3457 } else { 3458 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3459 mrsas_enable_intr(sc); 3460 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 3461 } 3462 out: 3463 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3464 mrsas_dprint(sc, MRSAS_OCR, 3465 "Reset Exit with %d.\n", retval); 3466 return retval; 3467 } 3468 3469 /* 3470 * mrsas_kill_hba: Kill HBA when OCR is not supported 3471 * input: Adapter Context. 3472 * 3473 * This function will kill HBA when OCR is not supported. 3474 */ 3475 void 3476 mrsas_kill_hba(struct mrsas_softc *sc) 3477 { 3478 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; 3479 DELAY(1000 * 1000); 3480 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); 3481 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 3482 MFI_STOP_ADP); 3483 /* Flush */ 3484 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); 3485 mrsas_complete_outstanding_ioctls(sc); 3486 } 3487 3488 /** 3489 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba 3490 * input: Controller softc 3491 * 3492 * Returns void 3493 */ 3494 void 3495 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc) 3496 { 3497 int i; 3498 struct mrsas_mpt_cmd *cmd_mpt; 3499 struct mrsas_mfi_cmd *cmd_mfi; 3500 u_int32_t count, MSIxIndex; 3501 3502 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3503 for (i = 0; i < sc->max_fw_cmds; i++) { 3504 cmd_mpt = sc->mpt_cmd_list[i]; 3505 3506 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 3507 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 3508 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { 3509 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3510 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, 3511 cmd_mpt->io_request->RaidContext.raid_context.status); 3512 } 3513 } 3514 } 3515 } 3516 3517 /* 3518 * mrsas_wait_for_outstanding: Wait for outstanding commands 3519 * input: Adapter Context. 3520 * 3521 * This function will wait for 180 seconds for outstanding commands to be 3522 * completed. 3523 */ 3524 int 3525 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason) 3526 { 3527 int i, outstanding, retval = 0; 3528 u_int32_t fw_state, count, MSIxIndex; 3529 3530 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { 3531 if (sc->remove_in_progress) { 3532 mrsas_dprint(sc, MRSAS_OCR, 3533 "Driver remove or shutdown called.\n"); 3534 retval = 1; 3535 goto out; 3536 } 3537 /* Check if firmware is in fault state */ 3538 fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3539 outbound_scratch_pad)) & MFI_STATE_MASK; 3540 if (fw_state == MFI_STATE_FAULT) { 3541 mrsas_dprint(sc, MRSAS_OCR, 3542 "Found FW in FAULT state, will reset adapter.\n"); 3543 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3544 mtx_unlock(&sc->sim_lock); 3545 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3546 mrsas_complete_cmd(sc, MSIxIndex); 3547 mtx_lock(&sc->sim_lock); 3548 retval = 1; 3549 goto out; 3550 } 3551 if (check_reason == MFI_DCMD_TIMEOUT_OCR) { 3552 mrsas_dprint(sc, MRSAS_OCR, 3553 "DCMD IO TIMEOUT detected, will reset adapter.\n"); 3554 retval = 1; 3555 goto out; 3556 } 3557 outstanding = mrsas_atomic_read(&sc->fw_outstanding); 3558 if (!outstanding) 3559 goto out; 3560 3561 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 3562 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " 3563 "commands to complete\n", i, outstanding); 3564 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3565 mtx_unlock(&sc->sim_lock); 3566 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3567 mrsas_complete_cmd(sc, MSIxIndex); 3568 mtx_lock(&sc->sim_lock); 3569 } 3570 DELAY(1000 * 1000); 3571 } 3572 3573 if (mrsas_atomic_read(&sc->fw_outstanding)) { 3574 mrsas_dprint(sc, MRSAS_OCR, 3575 " pending commands remain after waiting," 3576 " will reset adapter.\n"); 3577 retval = 1; 3578 } 3579 out: 3580 return retval; 3581 } 3582 3583 /* 3584 * mrsas_release_mfi_cmd: Return a cmd to free command pool 3585 * input: Command packet for return to free cmd pool 3586 * 3587 * This function returns the MFI & MPT command to the command list. 3588 */ 3589 void 3590 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi) 3591 { 3592 struct mrsas_softc *sc = cmd_mfi->sc; 3593 struct mrsas_mpt_cmd *cmd_mpt; 3594 3595 mtx_lock(&sc->mfi_cmd_pool_lock); 3596 /* 3597 * Release the mpt command (if at all it is allocated 3598 * associated with the mfi command 3599 */ 3600 if (cmd_mfi->cmd_id.context.smid) { 3601 mtx_lock(&sc->mpt_cmd_pool_lock); 3602 /* Get the mpt cmd from mfi cmd frame's smid value */ 3603 cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1]; 3604 cmd_mpt->flags = 0; 3605 cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 3606 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next); 3607 mtx_unlock(&sc->mpt_cmd_pool_lock); 3608 } 3609 /* Release the mfi command */ 3610 cmd_mfi->ccb_ptr = NULL; 3611 cmd_mfi->cmd_id.frame_count = 0; 3612 TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next); 3613 mtx_unlock(&sc->mfi_cmd_pool_lock); 3614 3615 return; 3616 } 3617 3618 /* 3619 * mrsas_get_controller_info: Returns FW's controller structure 3620 * input: Adapter soft state 3621 * Controller information structure 3622 * 3623 * Issues an internal command (DCMD) to get the FW's controller structure. This 3624 * information is mainly used to find out the maximum IO transfer per command 3625 * supported by the FW. 3626 */ 3627 static int 3628 mrsas_get_ctrl_info(struct mrsas_softc *sc) 3629 { 3630 int retcode = 0; 3631 u_int8_t do_ocr = 1; 3632 struct mrsas_mfi_cmd *cmd; 3633 struct mrsas_dcmd_frame *dcmd; 3634 3635 cmd = mrsas_get_mfi_cmd(sc); 3636 3637 if (!cmd) { 3638 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 3639 return -ENOMEM; 3640 } 3641 dcmd = &cmd->frame->dcmd; 3642 3643 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { 3644 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); 3645 mrsas_release_mfi_cmd(cmd); 3646 return -ENOMEM; 3647 } 3648 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3649 3650 dcmd->cmd = MFI_CMD_DCMD; 3651 dcmd->cmd_status = 0xFF; 3652 dcmd->sge_count = 1; 3653 dcmd->flags = MFI_FRAME_DIR_READ; 3654 dcmd->timeout = 0; 3655 dcmd->pad_0 = 0; 3656 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_ctrl_info)); 3657 dcmd->opcode = htole32(MR_DCMD_CTRL_GET_INFO); 3658 dcmd->sgl.sge32[0].phys_addr = htole32(sc->ctlr_info_phys_addr & 0xFFFFFFFF); 3659 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_ctrl_info)); 3660 3661 if (!sc->mask_interrupts) 3662 retcode = mrsas_issue_blocked_cmd(sc, cmd); 3663 else 3664 retcode = mrsas_issue_polled(sc, cmd); 3665 3666 if (retcode == ETIMEDOUT) 3667 goto dcmd_timeout; 3668 else { 3669 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); 3670 le32_to_cpus(&sc->ctrl_info->properties.OnOffProperties); 3671 le32_to_cpus(&sc->ctrl_info->adapterOperations2); 3672 le32_to_cpus(&sc->ctrl_info->adapterOperations3); 3673 le16_to_cpus(&sc->ctrl_info->adapterOperations4); 3674 } 3675 3676 do_ocr = 0; 3677 mrsas_update_ext_vd_details(sc); 3678 3679 sc->use_seqnum_jbod_fp = 3680 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP; 3681 sc->support_morethan256jbod = 3682 sc->ctrl_info->adapterOperations4.supportPdMapTargetId; 3683 3684 sc->disableOnlineCtrlReset = 3685 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 3686 3687 dcmd_timeout: 3688 mrsas_free_ctlr_info_cmd(sc); 3689 3690 if (do_ocr) 3691 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 3692 3693 if (!sc->mask_interrupts) 3694 mrsas_release_mfi_cmd(cmd); 3695 3696 return (retcode); 3697 } 3698 3699 /* 3700 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD 3701 * input: 3702 * sc - Controller's softc 3703 */ 3704 static void 3705 mrsas_update_ext_vd_details(struct mrsas_softc *sc) 3706 { 3707 u_int32_t ventura_map_sz = 0; 3708 sc->max256vdSupport = 3709 sc->ctrl_info->adapterOperations3.supportMaxExtLDs; 3710 3711 /* Below is additional check to address future FW enhancement */ 3712 if (sc->ctrl_info->max_lds > 64) 3713 sc->max256vdSupport = 1; 3714 3715 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS 3716 * MRSAS_MAX_DEV_PER_CHANNEL; 3717 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS 3718 * MRSAS_MAX_DEV_PER_CHANNEL; 3719 if (sc->max256vdSupport) { 3720 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 3721 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3722 } else { 3723 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 3724 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3725 } 3726 3727 if (sc->maxRaidMapSize) { 3728 ventura_map_sz = sc->maxRaidMapSize * 3729 MR_MIN_MAP_SIZE; 3730 sc->current_map_sz = ventura_map_sz; 3731 sc->max_map_sz = ventura_map_sz; 3732 } else { 3733 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + 3734 (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1)); 3735 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); 3736 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); 3737 if (sc->max256vdSupport) 3738 sc->current_map_sz = sc->new_map_sz; 3739 else 3740 sc->current_map_sz = sc->old_map_sz; 3741 } 3742 3743 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL); 3744 #if VD_EXT_DEBUG 3745 device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n", 3746 sc->maxRaidMapSize); 3747 device_printf(sc->mrsas_dev, 3748 "new_map_sz = 0x%x, old_map_sz = 0x%x, " 3749 "ventura_map_sz = 0x%x, current_map_sz = 0x%x " 3750 "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n", 3751 sc->new_map_sz, sc->old_map_sz, ventura_map_sz, 3752 sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL)); 3753 #endif 3754 } 3755 3756 /* 3757 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command 3758 * input: Adapter soft state 3759 * 3760 * Allocates DMAable memory for the controller info internal command. 3761 */ 3762 int 3763 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) 3764 { 3765 int ctlr_info_size; 3766 3767 /* Allocate get controller info command */ 3768 ctlr_info_size = sizeof(struct mrsas_ctrl_info); 3769 if (bus_dma_tag_create(sc->mrsas_parent_tag, 3770 1, 0, 3771 BUS_SPACE_MAXADDR_32BIT, 3772 BUS_SPACE_MAXADDR, 3773 NULL, NULL, 3774 ctlr_info_size, 3775 1, 3776 ctlr_info_size, 3777 BUS_DMA_ALLOCNOW, 3778 NULL, NULL, 3779 &sc->ctlr_info_tag)) { 3780 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); 3781 return (ENOMEM); 3782 } 3783 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, 3784 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { 3785 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); 3786 return (ENOMEM); 3787 } 3788 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, 3789 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, 3790 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { 3791 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); 3792 return (ENOMEM); 3793 } 3794 memset(sc->ctlr_info_mem, 0, ctlr_info_size); 3795 return (0); 3796 } 3797 3798 /* 3799 * mrsas_free_ctlr_info_cmd: Free memory for controller info command 3800 * input: Adapter soft state 3801 * 3802 * Deallocates memory of the get controller info cmd. 3803 */ 3804 void 3805 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) 3806 { 3807 if (sc->ctlr_info_phys_addr) 3808 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); 3809 if (sc->ctlr_info_mem != NULL) 3810 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); 3811 if (sc->ctlr_info_tag != NULL) 3812 bus_dma_tag_destroy(sc->ctlr_info_tag); 3813 } 3814 3815 /* 3816 * mrsas_issue_polled: Issues a polling command 3817 * inputs: Adapter soft state 3818 * Command packet to be issued 3819 * 3820 * This function is for posting of internal commands to Firmware. MFI requires 3821 * the cmd_status to be set to 0xFF before posting. The maximun wait time of 3822 * the poll response timer is 180 seconds. 3823 */ 3824 int 3825 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3826 { 3827 struct mrsas_header *frame_hdr = &cmd->frame->hdr; 3828 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3829 int i, retcode = SUCCESS; 3830 3831 frame_hdr->cmd_status = 0xFF; 3832 frame_hdr->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 3833 3834 /* Issue the frame using inbound queue port */ 3835 if (mrsas_issue_dcmd(sc, cmd)) { 3836 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3837 return (1); 3838 } 3839 /* 3840 * Poll response timer to wait for Firmware response. While this 3841 * timer with the DELAY call could block CPU, the time interval for 3842 * this is only 1 millisecond. 3843 */ 3844 if (frame_hdr->cmd_status == 0xFF) { 3845 for (i = 0; i < (max_wait * 1000); i++) { 3846 if (frame_hdr->cmd_status == 0xFF) 3847 DELAY(1000); 3848 else 3849 break; 3850 } 3851 } 3852 if (frame_hdr->cmd_status == 0xFF) { 3853 device_printf(sc->mrsas_dev, "DCMD timed out after %d " 3854 "seconds from %s\n", max_wait, __func__); 3855 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", 3856 cmd->frame->dcmd.opcode); 3857 retcode = ETIMEDOUT; 3858 } 3859 return (retcode); 3860 } 3861 3862 /* 3863 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd 3864 * input: Adapter soft state mfi cmd pointer 3865 * 3866 * This function is called by mrsas_issued_blocked_cmd() and 3867 * mrsas_issued_polled(), to build the MPT command and then fire the command 3868 * to Firmware. 3869 */ 3870 int 3871 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3872 { 3873 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3874 3875 req_desc = mrsas_build_mpt_cmd(sc, cmd); 3876 if (!req_desc) { 3877 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); 3878 return (1); 3879 } 3880 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 3881 3882 return (0); 3883 } 3884 3885 /* 3886 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd 3887 * input: Adapter soft state mfi cmd to build 3888 * 3889 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru 3890 * command and prepares the MPT command to send to Firmware. 3891 */ 3892 MRSAS_REQUEST_DESCRIPTOR_UNION * 3893 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3894 { 3895 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3896 u_int16_t index; 3897 3898 if (mrsas_build_mptmfi_passthru(sc, cmd)) { 3899 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); 3900 return NULL; 3901 } 3902 index = cmd->cmd_id.context.smid; 3903 3904 req_desc = mrsas_get_request_desc(sc, index - 1); 3905 if (!req_desc) 3906 return NULL; 3907 3908 req_desc->addr.Words = 0; 3909 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3910 3911 req_desc->SCSIIO.SMID = htole16(index); 3912 3913 return (req_desc); 3914 } 3915 3916 /* 3917 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command 3918 * input: Adapter soft state mfi cmd pointer 3919 * 3920 * The MPT command and the io_request are setup as a passthru command. The SGE 3921 * chain address is set to frame_phys_addr of the MFI command. 3922 */ 3923 u_int8_t 3924 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) 3925 { 3926 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 3927 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; 3928 struct mrsas_mpt_cmd *mpt_cmd; 3929 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; 3930 3931 mpt_cmd = mrsas_get_mpt_cmd(sc); 3932 if (!mpt_cmd) 3933 return (1); 3934 3935 /* Save the smid. To be used for returning the cmd */ 3936 mfi_cmd->cmd_id.context.smid = mpt_cmd->index; 3937 3938 mpt_cmd->sync_cmd_idx = mfi_cmd->index; 3939 3940 /* 3941 * For cmds where the flag is set, store the flag and check on 3942 * completion. For cmds with this flag, don't call 3943 * mrsas_complete_cmd. 3944 */ 3945 3946 if (frame_hdr->flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) 3947 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3948 3949 io_req = mpt_cmd->io_request; 3950 3951 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 3952 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL; 3953 3954 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 3955 sgl_ptr_end->Flags = 0; 3956 } 3957 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain; 3958 3959 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 3960 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; 3961 io_req->ChainOffset = sc->chain_offset_mfi_pthru; 3962 3963 mpi25_ieee_chain->Address = htole64(mfi_cmd->frame_phys_addr); 3964 3965 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3966 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 3967 3968 mpi25_ieee_chain->Length = htole32(sc->max_chain_frame_sz); 3969 3970 return (0); 3971 } 3972 3973 /* 3974 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds 3975 * input: Adapter soft state Command to be issued 3976 * 3977 * This function waits on an event for the command to be returned from the ISR. 3978 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing 3979 * internal and ioctl commands. 3980 */ 3981 int 3982 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3983 { 3984 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3985 unsigned long total_time = 0; 3986 int retcode = SUCCESS; 3987 3988 /* Initialize cmd_status */ 3989 cmd->cmd_status = 0xFF; 3990 3991 /* Build MPT-MFI command for issue to FW */ 3992 if (mrsas_issue_dcmd(sc, cmd)) { 3993 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3994 return (1); 3995 } 3996 sc->chan = (void *)&cmd; 3997 3998 while (1) { 3999 if (cmd->cmd_status == 0xFF) { 4000 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 4001 } else 4002 break; 4003 4004 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL 4005 * command */ 4006 total_time++; 4007 if (total_time >= max_wait) { 4008 device_printf(sc->mrsas_dev, 4009 "Internal command timed out after %d seconds.\n", max_wait); 4010 retcode = 1; 4011 break; 4012 } 4013 } 4014 } 4015 4016 if (cmd->cmd_status == 0xFF) { 4017 device_printf(sc->mrsas_dev, "DCMD timed out after %d " 4018 "seconds from %s\n", max_wait, __func__); 4019 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", 4020 cmd->frame->dcmd.opcode); 4021 retcode = ETIMEDOUT; 4022 } 4023 return (retcode); 4024 } 4025 4026 /* 4027 * mrsas_complete_mptmfi_passthru: Completes a command 4028 * input: @sc: Adapter soft state 4029 * @cmd: Command to be completed 4030 * @status: cmd completion status 4031 * 4032 * This function is called from mrsas_complete_cmd() after an interrupt is 4033 * received from Firmware, and io_request->Function is 4034 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. 4035 */ 4036 void 4037 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, 4038 u_int8_t status) 4039 { 4040 struct mrsas_header *hdr = &cmd->frame->hdr; 4041 u_int8_t cmd_status = cmd->frame->hdr.cmd_status; 4042 4043 /* Reset the retry counter for future re-tries */ 4044 cmd->retry_for_fw_reset = 0; 4045 4046 if (cmd->ccb_ptr) 4047 cmd->ccb_ptr = NULL; 4048 4049 switch (hdr->cmd) { 4050 case MFI_CMD_INVALID: 4051 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); 4052 break; 4053 case MFI_CMD_PD_SCSI_IO: 4054 case MFI_CMD_LD_SCSI_IO: 4055 /* 4056 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 4057 * issued either through an IO path or an IOCTL path. If it 4058 * was via IOCTL, we will send it to internal completion. 4059 */ 4060 if (cmd->sync_cmd) { 4061 cmd->sync_cmd = 0; 4062 mrsas_wakeup(sc, cmd); 4063 break; 4064 } 4065 case MFI_CMD_SMP: 4066 case MFI_CMD_STP: 4067 case MFI_CMD_DCMD: 4068 /* Check for LD map update */ 4069 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && 4070 (cmd->frame->dcmd.mbox.b[1] == 1)) { 4071 sc->fast_path_io = 0; 4072 mtx_lock(&sc->raidmap_lock); 4073 sc->map_update_cmd = NULL; 4074 if (cmd_status != 0) { 4075 if (cmd_status != MFI_STAT_NOT_FOUND) 4076 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status); 4077 else { 4078 mrsas_release_mfi_cmd(cmd); 4079 mtx_unlock(&sc->raidmap_lock); 4080 break; 4081 } 4082 } else 4083 sc->map_id++; 4084 mrsas_release_mfi_cmd(cmd); 4085 if (MR_ValidateMapInfo(sc)) 4086 sc->fast_path_io = 0; 4087 else 4088 sc->fast_path_io = 1; 4089 mrsas_sync_map_info(sc); 4090 mtx_unlock(&sc->raidmap_lock); 4091 break; 4092 } 4093 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 4094 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { 4095 sc->mrsas_aen_triggered = 0; 4096 } 4097 /* FW has an updated PD sequence */ 4098 if ((cmd->frame->dcmd.opcode == 4099 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 4100 (cmd->frame->dcmd.mbox.b[0] == 1)) { 4101 mtx_lock(&sc->raidmap_lock); 4102 sc->jbod_seq_cmd = NULL; 4103 mrsas_release_mfi_cmd(cmd); 4104 4105 if (cmd_status == MFI_STAT_OK) { 4106 sc->pd_seq_map_id++; 4107 /* Re-register a pd sync seq num cmd */ 4108 if (megasas_sync_pd_seq_num(sc, true)) 4109 sc->use_seqnum_jbod_fp = 0; 4110 } else { 4111 sc->use_seqnum_jbod_fp = 0; 4112 device_printf(sc->mrsas_dev, 4113 "Jbod map sync failed, status=%x\n", cmd_status); 4114 } 4115 mtx_unlock(&sc->raidmap_lock); 4116 break; 4117 } 4118 /* See if got an event notification */ 4119 if (le32toh(cmd->frame->dcmd.opcode) == MR_DCMD_CTRL_EVENT_WAIT) 4120 mrsas_complete_aen(sc, cmd); 4121 else 4122 mrsas_wakeup(sc, cmd); 4123 break; 4124 case MFI_CMD_ABORT: 4125 /* Command issued to abort another cmd return */ 4126 mrsas_complete_abort(sc, cmd); 4127 break; 4128 default: 4129 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd); 4130 break; 4131 } 4132 } 4133 4134 /* 4135 * mrsas_wakeup: Completes an internal command 4136 * input: Adapter soft state 4137 * Command to be completed 4138 * 4139 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait 4140 * timer is started. This function is called from 4141 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up 4142 * from the command wait. 4143 */ 4144 void 4145 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4146 { 4147 cmd->cmd_status = cmd->frame->io.cmd_status; 4148 4149 if (cmd->cmd_status == 0xFF) 4150 cmd->cmd_status = 0; 4151 4152 sc->chan = (void *)&cmd; 4153 wakeup_one((void *)&sc->chan); 4154 return; 4155 } 4156 4157 /* 4158 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input: 4159 * Adapter soft state Shutdown/Hibernate 4160 * 4161 * This function issues a DCMD internal command to Firmware to initiate shutdown 4162 * of the controller. 4163 */ 4164 static void 4165 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) 4166 { 4167 struct mrsas_mfi_cmd *cmd; 4168 struct mrsas_dcmd_frame *dcmd; 4169 4170 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 4171 return; 4172 4173 cmd = mrsas_get_mfi_cmd(sc); 4174 if (!cmd) { 4175 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n"); 4176 return; 4177 } 4178 if (sc->aen_cmd) 4179 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); 4180 if (sc->map_update_cmd) 4181 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); 4182 if (sc->jbod_seq_cmd) 4183 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd); 4184 4185 dcmd = &cmd->frame->dcmd; 4186 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4187 4188 dcmd->cmd = MFI_CMD_DCMD; 4189 dcmd->cmd_status = 0x0; 4190 dcmd->sge_count = 0; 4191 dcmd->flags = MFI_FRAME_DIR_NONE; 4192 dcmd->timeout = 0; 4193 dcmd->pad_0 = 0; 4194 dcmd->data_xfer_len = 0; 4195 dcmd->opcode = opcode; 4196 4197 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n"); 4198 4199 mrsas_issue_blocked_cmd(sc, cmd); 4200 mrsas_release_mfi_cmd(cmd); 4201 4202 return; 4203 } 4204 4205 /* 4206 * mrsas_flush_cache: Requests FW to flush all its caches input: 4207 * Adapter soft state 4208 * 4209 * This function is issues a DCMD internal command to Firmware to initiate 4210 * flushing of all caches. 4211 */ 4212 static void 4213 mrsas_flush_cache(struct mrsas_softc *sc) 4214 { 4215 struct mrsas_mfi_cmd *cmd; 4216 struct mrsas_dcmd_frame *dcmd; 4217 4218 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 4219 return; 4220 4221 cmd = mrsas_get_mfi_cmd(sc); 4222 if (!cmd) { 4223 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n"); 4224 return; 4225 } 4226 dcmd = &cmd->frame->dcmd; 4227 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4228 4229 dcmd->cmd = MFI_CMD_DCMD; 4230 dcmd->cmd_status = 0x0; 4231 dcmd->sge_count = 0; 4232 dcmd->flags = MFI_FRAME_DIR_NONE; 4233 dcmd->timeout = 0; 4234 dcmd->pad_0 = 0; 4235 dcmd->data_xfer_len = 0; 4236 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 4237 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 4238 4239 mrsas_issue_blocked_cmd(sc, cmd); 4240 mrsas_release_mfi_cmd(cmd); 4241 4242 return; 4243 } 4244 4245 int 4246 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend) 4247 { 4248 int retcode = 0; 4249 u_int8_t do_ocr = 1; 4250 struct mrsas_mfi_cmd *cmd; 4251 struct mrsas_dcmd_frame *dcmd; 4252 uint32_t pd_seq_map_sz; 4253 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 4254 bus_addr_t pd_seq_h; 4255 4256 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 4257 (sizeof(struct MR_PD_CFG_SEQ) * 4258 (MAX_PHYSICAL_DEVICES - 1)); 4259 4260 cmd = mrsas_get_mfi_cmd(sc); 4261 if (!cmd) { 4262 device_printf(sc->mrsas_dev, 4263 "Cannot alloc for ld map info cmd.\n"); 4264 return 1; 4265 } 4266 dcmd = &cmd->frame->dcmd; 4267 4268 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)]; 4269 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)]; 4270 if (!pd_sync) { 4271 device_printf(sc->mrsas_dev, 4272 "Failed to alloc mem for jbod map info.\n"); 4273 mrsas_release_mfi_cmd(cmd); 4274 return (ENOMEM); 4275 } 4276 memset(pd_sync, 0, pd_seq_map_sz); 4277 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4278 dcmd->cmd = MFI_CMD_DCMD; 4279 dcmd->cmd_status = 0xFF; 4280 dcmd->sge_count = 1; 4281 dcmd->timeout = 0; 4282 dcmd->pad_0 = 0; 4283 dcmd->data_xfer_len = htole32(pd_seq_map_sz); 4284 dcmd->opcode = htole32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO); 4285 dcmd->sgl.sge32[0].phys_addr = htole32(pd_seq_h & 0xFFFFFFFF); 4286 dcmd->sgl.sge32[0].length = htole32(pd_seq_map_sz); 4287 4288 if (pend) { 4289 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG; 4290 dcmd->flags = htole16(MFI_FRAME_DIR_WRITE); 4291 sc->jbod_seq_cmd = cmd; 4292 if (mrsas_issue_dcmd(sc, cmd)) { 4293 device_printf(sc->mrsas_dev, 4294 "Fail to send sync map info command.\n"); 4295 return 1; 4296 } else 4297 return 0; 4298 } else 4299 dcmd->flags = htole16(MFI_FRAME_DIR_READ); 4300 4301 retcode = mrsas_issue_polled(sc, cmd); 4302 if (retcode == ETIMEDOUT) 4303 goto dcmd_timeout; 4304 4305 if (le32toh(pd_sync->count) > MAX_PHYSICAL_DEVICES) { 4306 device_printf(sc->mrsas_dev, 4307 "driver supports max %d JBOD, but FW reports %d\n", 4308 MAX_PHYSICAL_DEVICES, pd_sync->count); 4309 retcode = -EINVAL; 4310 } 4311 if (!retcode) 4312 sc->pd_seq_map_id++; 4313 do_ocr = 0; 4314 4315 dcmd_timeout: 4316 if (do_ocr) 4317 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4318 4319 return (retcode); 4320 } 4321 4322 /* 4323 * mrsas_get_map_info: Load and validate RAID map input: 4324 * Adapter instance soft state 4325 * 4326 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load 4327 * and validate RAID map. It returns 0 if successful, 1 other- wise. 4328 */ 4329 static int 4330 mrsas_get_map_info(struct mrsas_softc *sc) 4331 { 4332 uint8_t retcode = 0; 4333 4334 sc->fast_path_io = 0; 4335 if (!mrsas_get_ld_map_info(sc)) { 4336 retcode = MR_ValidateMapInfo(sc); 4337 if (retcode == 0) { 4338 sc->fast_path_io = 1; 4339 return 0; 4340 } 4341 } 4342 return 1; 4343 } 4344 4345 /* 4346 * mrsas_get_ld_map_info: Get FW's ld_map structure input: 4347 * Adapter instance soft state 4348 * 4349 * Issues an internal command (DCMD) to get the FW's controller PD list 4350 * structure. 4351 */ 4352 static int 4353 mrsas_get_ld_map_info(struct mrsas_softc *sc) 4354 { 4355 int retcode = 0; 4356 struct mrsas_mfi_cmd *cmd; 4357 struct mrsas_dcmd_frame *dcmd; 4358 void *map; 4359 bus_addr_t map_phys_addr = 0; 4360 4361 cmd = mrsas_get_mfi_cmd(sc); 4362 if (!cmd) { 4363 device_printf(sc->mrsas_dev, 4364 "Cannot alloc for ld map info cmd.\n"); 4365 return 1; 4366 } 4367 dcmd = &cmd->frame->dcmd; 4368 4369 map = (void *)sc->raidmap_mem[(sc->map_id & 1)]; 4370 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; 4371 if (!map) { 4372 device_printf(sc->mrsas_dev, 4373 "Failed to alloc mem for ld map info.\n"); 4374 mrsas_release_mfi_cmd(cmd); 4375 return (ENOMEM); 4376 } 4377 memset(map, 0, sizeof(sc->max_map_sz)); 4378 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4379 4380 dcmd->cmd = MFI_CMD_DCMD; 4381 dcmd->cmd_status = 0xFF; 4382 dcmd->sge_count = 1; 4383 dcmd->flags = htole16(MFI_FRAME_DIR_READ); 4384 dcmd->timeout = 0; 4385 dcmd->pad_0 = 0; 4386 dcmd->data_xfer_len = htole32(sc->current_map_sz); 4387 dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO); 4388 dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF); 4389 dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz); 4390 4391 retcode = mrsas_issue_polled(sc, cmd); 4392 if (retcode == ETIMEDOUT) 4393 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4394 4395 return (retcode); 4396 } 4397 4398 /* 4399 * mrsas_sync_map_info: Get FW's ld_map structure input: 4400 * Adapter instance soft state 4401 * 4402 * Issues an internal command (DCMD) to get the FW's controller PD list 4403 * structure. 4404 */ 4405 static int 4406 mrsas_sync_map_info(struct mrsas_softc *sc) 4407 { 4408 int retcode = 0, i; 4409 struct mrsas_mfi_cmd *cmd; 4410 struct mrsas_dcmd_frame *dcmd; 4411 uint32_t size_sync_info, num_lds; 4412 MR_LD_TARGET_SYNC *target_map = NULL; 4413 MR_DRV_RAID_MAP_ALL *map; 4414 MR_LD_RAID *raid; 4415 MR_LD_TARGET_SYNC *ld_sync; 4416 bus_addr_t map_phys_addr = 0; 4417 4418 cmd = mrsas_get_mfi_cmd(sc); 4419 if (!cmd) { 4420 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n"); 4421 return ENOMEM; 4422 } 4423 map = sc->ld_drv_map[sc->map_id & 1]; 4424 num_lds = map->raidMap.ldCount; 4425 4426 dcmd = &cmd->frame->dcmd; 4427 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds; 4428 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4429 4430 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1]; 4431 memset(target_map, 0, sc->max_map_sz); 4432 4433 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; 4434 4435 ld_sync = (MR_LD_TARGET_SYNC *) target_map; 4436 4437 for (i = 0; i < num_lds; i++, ld_sync++) { 4438 raid = MR_LdRaidGet(i, map); 4439 ld_sync->targetId = MR_GetLDTgtId(i, map); 4440 ld_sync->seqNum = raid->seqNum; 4441 } 4442 4443 dcmd->cmd = MFI_CMD_DCMD; 4444 dcmd->cmd_status = 0xFF; 4445 dcmd->sge_count = 1; 4446 dcmd->flags = htole16(MFI_FRAME_DIR_WRITE); 4447 dcmd->timeout = 0; 4448 dcmd->pad_0 = 0; 4449 dcmd->data_xfer_len = htole32(sc->current_map_sz); 4450 dcmd->mbox.b[0] = num_lds; 4451 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; 4452 dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO); 4453 dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF); 4454 dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz); 4455 4456 sc->map_update_cmd = cmd; 4457 if (mrsas_issue_dcmd(sc, cmd)) { 4458 device_printf(sc->mrsas_dev, 4459 "Fail to send sync map info command.\n"); 4460 return (1); 4461 } 4462 return (retcode); 4463 } 4464 4465 /* Input: dcmd.opcode - MR_DCMD_PD_GET_INFO 4466 * dcmd.mbox.s[0] - deviceId for this physical drive 4467 * dcmd.sge IN - ptr to returned MR_PD_INFO structure 4468 * Desc: Firmware return the physical drive info structure 4469 * 4470 */ 4471 static void 4472 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id) 4473 { 4474 int retcode; 4475 u_int8_t do_ocr = 1; 4476 struct mrsas_mfi_cmd *cmd; 4477 struct mrsas_dcmd_frame *dcmd; 4478 4479 cmd = mrsas_get_mfi_cmd(sc); 4480 4481 if (!cmd) { 4482 device_printf(sc->mrsas_dev, 4483 "Cannot alloc for get PD info cmd\n"); 4484 return; 4485 } 4486 dcmd = &cmd->frame->dcmd; 4487 4488 memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info)); 4489 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4490 4491 dcmd->mbox.s[0] = htole16(device_id); 4492 dcmd->cmd = MFI_CMD_DCMD; 4493 dcmd->cmd_status = 0xFF; 4494 dcmd->sge_count = 1; 4495 dcmd->flags = MFI_FRAME_DIR_READ; 4496 dcmd->timeout = 0; 4497 dcmd->pad_0 = 0; 4498 dcmd->data_xfer_len = htole32(sizeof(struct mrsas_pd_info)); 4499 dcmd->opcode = htole32(MR_DCMD_PD_GET_INFO); 4500 dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->pd_info_phys_addr & 0xFFFFFFFF); 4501 dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_pd_info)); 4502 4503 if (!sc->mask_interrupts) 4504 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4505 else 4506 retcode = mrsas_issue_polled(sc, cmd); 4507 4508 if (retcode == ETIMEDOUT) 4509 goto dcmd_timeout; 4510 4511 sc->target_list[device_id].interface_type = 4512 le16toh(sc->pd_info_mem->state.ddf.pdType.intf); 4513 4514 do_ocr = 0; 4515 4516 dcmd_timeout: 4517 4518 if (do_ocr) 4519 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4520 4521 if (!sc->mask_interrupts) 4522 mrsas_release_mfi_cmd(cmd); 4523 } 4524 4525 /* 4526 * mrsas_add_target: Add target ID of system PD/VD to driver's data structure. 4527 * sc: Adapter's soft state 4528 * target_id: Unique target id per controller(managed by driver) 4529 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1) 4530 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS 4531 * return: void 4532 * Descripton: This function will be called whenever system PD or VD is created. 4533 */ 4534 static void mrsas_add_target(struct mrsas_softc *sc, 4535 u_int16_t target_id) 4536 { 4537 sc->target_list[target_id].target_id = target_id; 4538 4539 device_printf(sc->mrsas_dev, 4540 "%s created target ID: 0x%x\n", 4541 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"), 4542 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD))); 4543 /* 4544 * If interrupts are enabled, then only fire DCMD to get pd_info 4545 * for system PDs 4546 */ 4547 if (!sc->mask_interrupts && sc->pd_info_mem && 4548 (target_id < MRSAS_MAX_PD)) 4549 mrsas_get_pd_info(sc, target_id); 4550 4551 } 4552 4553 /* 4554 * mrsas_remove_target: Remove target ID of system PD/VD from driver's data structure. 4555 * sc: Adapter's soft state 4556 * target_id: Unique target id per controller(managed by driver) 4557 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1) 4558 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS 4559 * return: void 4560 * Descripton: This function will be called whenever system PD or VD is deleted 4561 */ 4562 static void mrsas_remove_target(struct mrsas_softc *sc, 4563 u_int16_t target_id) 4564 { 4565 sc->target_list[target_id].target_id = 0xffff; 4566 device_printf(sc->mrsas_dev, 4567 "%s deleted target ID: 0x%x\n", 4568 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"), 4569 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD))); 4570 } 4571 4572 /* 4573 * mrsas_get_pd_list: Returns FW's PD list structure input: 4574 * Adapter soft state 4575 * 4576 * Issues an internal command (DCMD) to get the FW's controller PD list 4577 * structure. This information is mainly used to find out about system 4578 * supported by Firmware. 4579 */ 4580 static int 4581 mrsas_get_pd_list(struct mrsas_softc *sc) 4582 { 4583 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size; 4584 u_int8_t do_ocr = 1; 4585 struct mrsas_mfi_cmd *cmd; 4586 struct mrsas_dcmd_frame *dcmd; 4587 struct MR_PD_LIST *pd_list_mem; 4588 struct MR_PD_ADDRESS *pd_addr; 4589 bus_addr_t pd_list_phys_addr = 0; 4590 struct mrsas_tmp_dcmd *tcmd; 4591 u_int16_t dev_id; 4592 4593 cmd = mrsas_get_mfi_cmd(sc); 4594 if (!cmd) { 4595 device_printf(sc->mrsas_dev, 4596 "Cannot alloc for get PD list cmd\n"); 4597 return 1; 4598 } 4599 dcmd = &cmd->frame->dcmd; 4600 4601 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 4602 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4603 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { 4604 device_printf(sc->mrsas_dev, 4605 "Cannot alloc dmamap for get PD list cmd\n"); 4606 mrsas_release_mfi_cmd(cmd); 4607 mrsas_free_tmp_dcmd(tcmd); 4608 free(tcmd, M_MRSAS); 4609 return (ENOMEM); 4610 } else { 4611 pd_list_mem = tcmd->tmp_dcmd_mem; 4612 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 4613 } 4614 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4615 4616 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4617 dcmd->mbox.b[1] = 0; 4618 dcmd->cmd = MFI_CMD_DCMD; 4619 dcmd->cmd_status = 0xFF; 4620 dcmd->sge_count = 1; 4621 dcmd->flags = htole16(MFI_FRAME_DIR_READ); 4622 dcmd->timeout = 0; 4623 dcmd->pad_0 = 0; 4624 dcmd->data_xfer_len = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4625 dcmd->opcode = htole32(MR_DCMD_PD_LIST_QUERY); 4626 dcmd->sgl.sge32[0].phys_addr = htole32(pd_list_phys_addr & 0xFFFFFFFF); 4627 dcmd->sgl.sge32[0].length = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4628 4629 if (!sc->mask_interrupts) 4630 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4631 else 4632 retcode = mrsas_issue_polled(sc, cmd); 4633 4634 if (retcode == ETIMEDOUT) 4635 goto dcmd_timeout; 4636 4637 /* Get the instance PD list */ 4638 pd_count = MRSAS_MAX_PD; 4639 pd_addr = pd_list_mem->addr; 4640 if (le32toh(pd_list_mem->count) < pd_count) { 4641 memset(sc->local_pd_list, 0, 4642 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 4643 for (pd_index = 0; pd_index < le32toh(pd_list_mem->count); pd_index++) { 4644 dev_id = le16toh(pd_addr->deviceId); 4645 sc->local_pd_list[dev_id].tid = dev_id; 4646 sc->local_pd_list[dev_id].driveType = 4647 le16toh(pd_addr->scsiDevType); 4648 sc->local_pd_list[dev_id].driveState = 4649 MR_PD_STATE_SYSTEM; 4650 if (sc->target_list[dev_id].target_id == 0xffff) 4651 mrsas_add_target(sc, dev_id); 4652 pd_addr++; 4653 } 4654 for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) { 4655 if ((sc->local_pd_list[pd_index].driveState != 4656 MR_PD_STATE_SYSTEM) && 4657 (sc->target_list[pd_index].target_id != 4658 0xffff)) { 4659 mrsas_remove_target(sc, pd_index); 4660 } 4661 } 4662 /* 4663 * Use mutext/spinlock if pd_list component size increase more than 4664 * 32 bit. 4665 */ 4666 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); 4667 do_ocr = 0; 4668 } 4669 dcmd_timeout: 4670 mrsas_free_tmp_dcmd(tcmd); 4671 free(tcmd, M_MRSAS); 4672 4673 if (do_ocr) 4674 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4675 4676 if (!sc->mask_interrupts) 4677 mrsas_release_mfi_cmd(cmd); 4678 4679 return (retcode); 4680 } 4681 4682 /* 4683 * mrsas_get_ld_list: Returns FW's LD list structure input: 4684 * Adapter soft state 4685 * 4686 * Issues an internal command (DCMD) to get the FW's controller PD list 4687 * structure. This information is mainly used to find out about supported by 4688 * the FW. 4689 */ 4690 static int 4691 mrsas_get_ld_list(struct mrsas_softc *sc) 4692 { 4693 int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id; 4694 u_int8_t do_ocr = 1; 4695 struct mrsas_mfi_cmd *cmd; 4696 struct mrsas_dcmd_frame *dcmd; 4697 struct MR_LD_LIST *ld_list_mem; 4698 bus_addr_t ld_list_phys_addr = 0; 4699 struct mrsas_tmp_dcmd *tcmd; 4700 4701 cmd = mrsas_get_mfi_cmd(sc); 4702 if (!cmd) { 4703 device_printf(sc->mrsas_dev, 4704 "Cannot alloc for get LD list cmd\n"); 4705 return 1; 4706 } 4707 dcmd = &cmd->frame->dcmd; 4708 4709 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 4710 ld_list_size = sizeof(struct MR_LD_LIST); 4711 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { 4712 device_printf(sc->mrsas_dev, 4713 "Cannot alloc dmamap for get LD list cmd\n"); 4714 mrsas_release_mfi_cmd(cmd); 4715 mrsas_free_tmp_dcmd(tcmd); 4716 free(tcmd, M_MRSAS); 4717 return (ENOMEM); 4718 } else { 4719 ld_list_mem = tcmd->tmp_dcmd_mem; 4720 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 4721 } 4722 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4723 4724 if (sc->max256vdSupport) 4725 dcmd->mbox.b[0] = 1; 4726 4727 dcmd->cmd = MFI_CMD_DCMD; 4728 dcmd->cmd_status = 0xFF; 4729 dcmd->sge_count = 1; 4730 dcmd->flags = MFI_FRAME_DIR_READ; 4731 dcmd->timeout = 0; 4732 dcmd->data_xfer_len = htole32(sizeof(struct MR_LD_LIST)); 4733 dcmd->opcode = htole32(MR_DCMD_LD_GET_LIST); 4734 dcmd->sgl.sge32[0].phys_addr = htole32(ld_list_phys_addr); 4735 dcmd->sgl.sge32[0].length = htole32(sizeof(struct MR_LD_LIST)); 4736 dcmd->pad_0 = 0; 4737 4738 if (!sc->mask_interrupts) 4739 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4740 else 4741 retcode = mrsas_issue_polled(sc, cmd); 4742 4743 if (retcode == ETIMEDOUT) 4744 goto dcmd_timeout; 4745 4746 #if VD_EXT_DEBUG 4747 printf("Number of LDs %d\n", ld_list_mem->ldCount); 4748 #endif 4749 4750 /* Get the instance LD list */ 4751 if (le32toh(ld_list_mem->ldCount) <= sc->fw_supported_vd_count) { 4752 sc->CurLdCount = le32toh(ld_list_mem->ldCount); 4753 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4754 for (ld_index = 0; ld_index < le32toh(ld_list_mem->ldCount); ld_index++) { 4755 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 4756 drv_tgt_id = ids + MRSAS_MAX_PD; 4757 if (ld_list_mem->ldList[ld_index].state != 0) { 4758 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 4759 if (sc->target_list[drv_tgt_id].target_id == 4760 0xffff) 4761 mrsas_add_target(sc, drv_tgt_id); 4762 } else { 4763 if (sc->target_list[drv_tgt_id].target_id != 4764 0xffff) 4765 mrsas_remove_target(sc, 4766 drv_tgt_id); 4767 } 4768 } 4769 4770 do_ocr = 0; 4771 } 4772 dcmd_timeout: 4773 mrsas_free_tmp_dcmd(tcmd); 4774 free(tcmd, M_MRSAS); 4775 4776 if (do_ocr) 4777 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4778 if (!sc->mask_interrupts) 4779 mrsas_release_mfi_cmd(cmd); 4780 4781 return (retcode); 4782 } 4783 4784 /* 4785 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input: 4786 * Adapter soft state Temp command Size of alloction 4787 * 4788 * Allocates DMAable memory for a temporary internal command. The allocated 4789 * memory is initialized to all zeros upon successful loading of the dma 4790 * mapped memory. 4791 */ 4792 int 4793 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, 4794 struct mrsas_tmp_dcmd *tcmd, int size) 4795 { 4796 if (bus_dma_tag_create(sc->mrsas_parent_tag, 4797 1, 0, 4798 BUS_SPACE_MAXADDR_32BIT, 4799 BUS_SPACE_MAXADDR, 4800 NULL, NULL, 4801 size, 4802 1, 4803 size, 4804 BUS_DMA_ALLOCNOW, 4805 NULL, NULL, 4806 &tcmd->tmp_dcmd_tag)) { 4807 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); 4808 return (ENOMEM); 4809 } 4810 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, 4811 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { 4812 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); 4813 return (ENOMEM); 4814 } 4815 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, 4816 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, 4817 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { 4818 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); 4819 return (ENOMEM); 4820 } 4821 memset(tcmd->tmp_dcmd_mem, 0, size); 4822 return (0); 4823 } 4824 4825 /* 4826 * mrsas_free_tmp_dcmd: Free memory for temporary command input: 4827 * temporary dcmd pointer 4828 * 4829 * Deallocates memory of the temporary command for use in the construction of 4830 * the internal DCMD. 4831 */ 4832 void 4833 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) 4834 { 4835 if (tmp->tmp_dcmd_phys_addr) 4836 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); 4837 if (tmp->tmp_dcmd_mem != NULL) 4838 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); 4839 if (tmp->tmp_dcmd_tag != NULL) 4840 bus_dma_tag_destroy(tmp->tmp_dcmd_tag); 4841 } 4842 4843 /* 4844 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input: 4845 * Adapter soft state Previously issued cmd to be aborted 4846 * 4847 * This function is used to abort previously issued commands, such as AEN and 4848 * RAID map sync map commands. The abort command is sent as a DCMD internal 4849 * command and subsequently the driver will wait for a return status. The 4850 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. 4851 */ 4852 static int 4853 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 4854 struct mrsas_mfi_cmd *cmd_to_abort) 4855 { 4856 struct mrsas_mfi_cmd *cmd; 4857 struct mrsas_abort_frame *abort_fr; 4858 u_int8_t retcode = 0; 4859 unsigned long total_time = 0; 4860 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 4861 4862 cmd = mrsas_get_mfi_cmd(sc); 4863 if (!cmd) { 4864 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); 4865 return (1); 4866 } 4867 abort_fr = &cmd->frame->abort; 4868 4869 /* Prepare and issue the abort frame */ 4870 abort_fr->cmd = MFI_CMD_ABORT; 4871 abort_fr->cmd_status = 0xFF; 4872 abort_fr->flags = 0; 4873 abort_fr->abort_context = cmd_to_abort->index; 4874 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 4875 abort_fr->abort_mfi_phys_addr_hi = 0; 4876 4877 cmd->sync_cmd = 1; 4878 cmd->cmd_status = 0xFF; 4879 4880 if (mrsas_issue_dcmd(sc, cmd)) { 4881 device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); 4882 return (1); 4883 } 4884 /* Wait for this cmd to complete */ 4885 sc->chan = (void *)&cmd; 4886 while (1) { 4887 if (cmd->cmd_status == 0xFF) { 4888 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 4889 } else 4890 break; 4891 total_time++; 4892 if (total_time >= max_wait) { 4893 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); 4894 retcode = 1; 4895 break; 4896 } 4897 } 4898 4899 cmd->sync_cmd = 0; 4900 mrsas_release_mfi_cmd(cmd); 4901 return (retcode); 4902 } 4903 4904 /* 4905 * mrsas_complete_abort: Completes aborting a command input: 4906 * Adapter soft state Cmd that was issued to abort another cmd 4907 * 4908 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to 4909 * change after sending the command. This function is called from 4910 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. 4911 */ 4912 void 4913 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4914 { 4915 if (cmd->sync_cmd) { 4916 cmd->sync_cmd = 0; 4917 cmd->cmd_status = 0; 4918 sc->chan = (void *)&cmd; 4919 wakeup_one((void *)&sc->chan); 4920 } 4921 return; 4922 } 4923 4924 /* 4925 * mrsas_aen_handler: AEN processing callback function from thread context 4926 * input: Adapter soft state 4927 * 4928 * Asynchronous event handler 4929 */ 4930 void 4931 mrsas_aen_handler(struct mrsas_softc *sc) 4932 { 4933 union mrsas_evt_class_locale class_locale; 4934 int doscan = 0; 4935 u_int32_t seq_num; 4936 int error, fail_aen = 0; 4937 4938 if (sc == NULL) { 4939 printf("invalid instance!\n"); 4940 return; 4941 } 4942 if (sc->remove_in_progress || sc->reset_in_progress) { 4943 device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n", 4944 __func__, __LINE__); 4945 return; 4946 } 4947 if (sc->evt_detail_mem) { 4948 switch (sc->evt_detail_mem->code) { 4949 case MR_EVT_PD_INSERTED: 4950 fail_aen = mrsas_get_pd_list(sc); 4951 if (!fail_aen) 4952 mrsas_bus_scan_sim(sc, sc->sim_1); 4953 else 4954 goto skip_register_aen; 4955 break; 4956 case MR_EVT_PD_REMOVED: 4957 fail_aen = mrsas_get_pd_list(sc); 4958 if (!fail_aen) 4959 mrsas_bus_scan_sim(sc, sc->sim_1); 4960 else 4961 goto skip_register_aen; 4962 break; 4963 case MR_EVT_LD_OFFLINE: 4964 case MR_EVT_CFG_CLEARED: 4965 case MR_EVT_LD_DELETED: 4966 mrsas_bus_scan_sim(sc, sc->sim_0); 4967 break; 4968 case MR_EVT_LD_CREATED: 4969 fail_aen = mrsas_get_ld_list(sc); 4970 if (!fail_aen) 4971 mrsas_bus_scan_sim(sc, sc->sim_0); 4972 else 4973 goto skip_register_aen; 4974 break; 4975 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 4976 case MR_EVT_FOREIGN_CFG_IMPORTED: 4977 case MR_EVT_LD_STATE_CHANGE: 4978 doscan = 1; 4979 break; 4980 case MR_EVT_CTRL_PROP_CHANGED: 4981 fail_aen = mrsas_get_ctrl_info(sc); 4982 if (fail_aen) 4983 goto skip_register_aen; 4984 break; 4985 default: 4986 break; 4987 } 4988 } else { 4989 device_printf(sc->mrsas_dev, "invalid evt_detail\n"); 4990 return; 4991 } 4992 if (doscan) { 4993 fail_aen = mrsas_get_pd_list(sc); 4994 if (!fail_aen) { 4995 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); 4996 mrsas_bus_scan_sim(sc, sc->sim_1); 4997 } else 4998 goto skip_register_aen; 4999 5000 fail_aen = mrsas_get_ld_list(sc); 5001 if (!fail_aen) { 5002 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); 5003 mrsas_bus_scan_sim(sc, sc->sim_0); 5004 } else 5005 goto skip_register_aen; 5006 } 5007 seq_num = sc->evt_detail_mem->seq_num + 1; 5008 5009 /* Register AEN with FW for latest sequence number plus 1 */ 5010 class_locale.members.reserved = 0; 5011 class_locale.members.locale = MR_EVT_LOCALE_ALL; 5012 class_locale.members.class = MR_EVT_CLASS_DEBUG; 5013 5014 if (sc->aen_cmd != NULL) 5015 return; 5016 5017 mtx_lock(&sc->aen_lock); 5018 error = mrsas_register_aen(sc, seq_num, 5019 class_locale.word); 5020 mtx_unlock(&sc->aen_lock); 5021 5022 if (error) 5023 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); 5024 5025 skip_register_aen: 5026 return; 5027 5028 } 5029 5030 /* 5031 * mrsas_complete_aen: Completes AEN command 5032 * input: Adapter soft state 5033 * Cmd that was issued to abort another cmd 5034 * 5035 * This function will be called from ISR and will continue event processing from 5036 * thread context by enqueuing task in ev_tq (callback function 5037 * "mrsas_aen_handler"). 5038 */ 5039 void 5040 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 5041 { 5042 /* 5043 * Don't signal app if it is just an aborted previously registered 5044 * aen 5045 */ 5046 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { 5047 sc->mrsas_aen_triggered = 1; 5048 mtx_lock(&sc->aen_lock); 5049 if (sc->mrsas_poll_waiting) { 5050 sc->mrsas_poll_waiting = 0; 5051 selwakeup(&sc->mrsas_select); 5052 } 5053 mtx_unlock(&sc->aen_lock); 5054 } else 5055 cmd->abort_aen = 0; 5056 5057 sc->aen_cmd = NULL; 5058 mrsas_release_mfi_cmd(cmd); 5059 5060 taskqueue_enqueue(sc->ev_tq, &sc->ev_task); 5061 5062 return; 5063 } 5064 5065 static device_method_t mrsas_methods[] = { 5066 DEVMETHOD(device_probe, mrsas_probe), 5067 DEVMETHOD(device_attach, mrsas_attach), 5068 DEVMETHOD(device_detach, mrsas_detach), 5069 DEVMETHOD(device_shutdown, mrsas_shutdown), 5070 DEVMETHOD(device_suspend, mrsas_suspend), 5071 DEVMETHOD(device_resume, mrsas_resume), 5072 DEVMETHOD(bus_print_child, bus_generic_print_child), 5073 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 5074 {0, 0} 5075 }; 5076 5077 static driver_t mrsas_driver = { 5078 "mrsas", 5079 mrsas_methods, 5080 sizeof(struct mrsas_softc) 5081 }; 5082 5083 static devclass_t mrsas_devclass; 5084 5085 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0); 5086 MODULE_DEPEND(mrsas, cam, 1, 1, 1); 5087