1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <dev/mrsas/mrsas.h> 44 #include <dev/mrsas/mrsas_ioctl.h> 45 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 49 #include <sys/sysctl.h> 50 #include <sys/types.h> 51 #include <sys/sysent.h> 52 #include <sys/kthread.h> 53 #include <sys/taskqueue.h> 54 #include <sys/smp.h> 55 56 57 /* 58 * Function prototypes 59 */ 60 static d_open_t mrsas_open; 61 static d_close_t mrsas_close; 62 static d_read_t mrsas_read; 63 static d_write_t mrsas_write; 64 static d_ioctl_t mrsas_ioctl; 65 static d_poll_t mrsas_poll; 66 67 static void mrsas_ich_startup(void *arg); 68 static struct mrsas_mgmt_info mrsas_mgmt_info; 69 static struct mrsas_ident *mrsas_find_ident(device_t); 70 static int mrsas_setup_msix(struct mrsas_softc *sc); 71 static int mrsas_allocate_msix(struct mrsas_softc *sc); 72 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); 73 static void mrsas_flush_cache(struct mrsas_softc *sc); 74 static void mrsas_reset_reply_desc(struct mrsas_softc *sc); 75 static void mrsas_ocr_thread(void *arg); 76 static int mrsas_get_map_info(struct mrsas_softc *sc); 77 static int mrsas_get_ld_map_info(struct mrsas_softc *sc); 78 static int mrsas_sync_map_info(struct mrsas_softc *sc); 79 static int mrsas_get_pd_list(struct mrsas_softc *sc); 80 static int mrsas_get_ld_list(struct mrsas_softc *sc); 81 static int mrsas_setup_irq(struct mrsas_softc *sc); 82 static int mrsas_alloc_mem(struct mrsas_softc *sc); 83 static int mrsas_init_fw(struct mrsas_softc *sc); 84 static int mrsas_setup_raidmap(struct mrsas_softc *sc); 85 static void megasas_setup_jbod_map(struct mrsas_softc *sc); 86 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend); 87 static int mrsas_clear_intr(struct mrsas_softc *sc); 88 static int mrsas_get_ctrl_info(struct mrsas_softc *sc); 89 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc); 90 static int 91 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 92 struct mrsas_mfi_cmd *cmd_to_abort); 93 static void 94 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id); 95 static struct mrsas_softc * 96 mrsas_get_softc_instance(struct cdev *dev, 97 u_long cmd, caddr_t arg); 98 u_int32_t 99 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset); 100 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); 101 u_int8_t 102 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, 103 struct mrsas_mfi_cmd *mfi_cmd); 104 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc); 105 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); 106 int mrsas_init_adapter(struct mrsas_softc *sc); 107 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); 108 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); 109 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); 110 int mrsas_ioc_init(struct mrsas_softc *sc); 111 int mrsas_bus_scan(struct mrsas_softc *sc); 112 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 113 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 114 int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason); 115 int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason); 116 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); 117 int mrsas_reset_targets(struct mrsas_softc *sc); 118 int 119 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, 120 struct mrsas_mfi_cmd *cmd); 121 int 122 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, 123 int size); 124 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); 125 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 126 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 127 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 128 void mrsas_disable_intr(struct mrsas_softc *sc); 129 void mrsas_enable_intr(struct mrsas_softc *sc); 130 void mrsas_free_ioc_cmd(struct mrsas_softc *sc); 131 void mrsas_free_mem(struct mrsas_softc *sc); 132 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); 133 void mrsas_isr(void *arg); 134 void mrsas_teardown_intr(struct mrsas_softc *sc); 135 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 136 void mrsas_kill_hba(struct mrsas_softc *sc); 137 void mrsas_aen_handler(struct mrsas_softc *sc); 138 void 139 mrsas_write_reg(struct mrsas_softc *sc, int offset, 140 u_int32_t value); 141 void 142 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 143 u_int32_t req_desc_hi); 144 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); 145 void 146 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, 147 struct mrsas_mfi_cmd *cmd, u_int8_t status); 148 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); 149 150 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd 151 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 152 153 extern int mrsas_cam_attach(struct mrsas_softc *sc); 154 extern void mrsas_cam_detach(struct mrsas_softc *sc); 155 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 156 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 157 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); 158 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); 159 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); 160 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 161 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 162 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 163 extern void mrsas_xpt_freeze(struct mrsas_softc *sc); 164 extern void mrsas_xpt_release(struct mrsas_softc *sc); 165 extern MRSAS_REQUEST_DESCRIPTOR_UNION * 166 mrsas_get_request_desc(struct mrsas_softc *sc, 167 u_int16_t index); 168 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 169 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); 170 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); 171 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); 172 173 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, 174 union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus, 175 u_int32_t data_length, u_int8_t *sense); 176 void 177 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo, 178 u_int32_t req_desc_hi); 179 180 181 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters"); 182 183 /* 184 * PCI device struct and table 185 * 186 */ 187 typedef struct mrsas_ident { 188 uint16_t vendor; 189 uint16_t device; 190 uint16_t subvendor; 191 uint16_t subdevice; 192 const char *desc; 193 } MRSAS_CTLR_ID; 194 195 MRSAS_CTLR_ID device_table[] = { 196 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"}, 197 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"}, 198 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"}, 199 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"}, 200 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"}, 201 {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"}, 202 {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"}, 203 {0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"}, 204 {0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"}, 205 {0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"}, 206 {0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"}, 207 {0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"}, 208 {0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"}, 209 {0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"}, 210 {0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"}, 211 {0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"}, 212 {0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"}, 213 {0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"}, 214 {0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"}, 215 {0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"}, 216 {0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"}, 217 {0, 0, 0, 0, NULL} 218 }; 219 220 /* 221 * Character device entry points 222 * 223 */ 224 static struct cdevsw mrsas_cdevsw = { 225 .d_version = D_VERSION, 226 .d_open = mrsas_open, 227 .d_close = mrsas_close, 228 .d_read = mrsas_read, 229 .d_write = mrsas_write, 230 .d_ioctl = mrsas_ioctl, 231 .d_poll = mrsas_poll, 232 .d_name = "mrsas", 233 }; 234 235 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); 236 237 /* 238 * In the cdevsw routines, we find our softc by using the si_drv1 member of 239 * struct cdev. We set this variable to point to our softc in our attach 240 * routine when we create the /dev entry. 241 */ 242 int 243 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 244 { 245 struct mrsas_softc *sc; 246 247 sc = dev->si_drv1; 248 return (0); 249 } 250 251 int 252 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 253 { 254 struct mrsas_softc *sc; 255 256 sc = dev->si_drv1; 257 return (0); 258 } 259 260 int 261 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag) 262 { 263 struct mrsas_softc *sc; 264 265 sc = dev->si_drv1; 266 return (0); 267 } 268 int 269 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag) 270 { 271 struct mrsas_softc *sc; 272 273 sc = dev->si_drv1; 274 return (0); 275 } 276 277 u_int32_t 278 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset) 279 { 280 u_int32_t i = 0, ret_val; 281 282 if (sc->is_aero) { 283 do { 284 ret_val = mrsas_read_reg(sc, offset); 285 i++; 286 } while(ret_val == 0 && i < 3); 287 } else 288 ret_val = mrsas_read_reg(sc, offset); 289 290 return ret_val; 291 } 292 293 /* 294 * Register Read/Write Functions 295 * 296 */ 297 void 298 mrsas_write_reg(struct mrsas_softc *sc, int offset, 299 u_int32_t value) 300 { 301 bus_space_tag_t bus_tag = sc->bus_tag; 302 bus_space_handle_t bus_handle = sc->bus_handle; 303 304 bus_space_write_4(bus_tag, bus_handle, offset, value); 305 } 306 307 u_int32_t 308 mrsas_read_reg(struct mrsas_softc *sc, int offset) 309 { 310 bus_space_tag_t bus_tag = sc->bus_tag; 311 bus_space_handle_t bus_handle = sc->bus_handle; 312 313 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); 314 } 315 316 317 /* 318 * Interrupt Disable/Enable/Clear Functions 319 * 320 */ 321 void 322 mrsas_disable_intr(struct mrsas_softc *sc) 323 { 324 u_int32_t mask = 0xFFFFFFFF; 325 u_int32_t status; 326 327 sc->mask_interrupts = 1; 328 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); 329 /* Dummy read to force pci flush */ 330 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 331 } 332 333 void 334 mrsas_enable_intr(struct mrsas_softc *sc) 335 { 336 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; 337 u_int32_t status; 338 339 sc->mask_interrupts = 0; 340 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); 341 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 342 343 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); 344 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 345 } 346 347 static int 348 mrsas_clear_intr(struct mrsas_softc *sc) 349 { 350 u_int32_t status; 351 352 /* Read received interrupt */ 353 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 354 355 /* Not our interrupt, so just return */ 356 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 357 return (0); 358 359 /* We got a reply interrupt */ 360 return (1); 361 } 362 363 /* 364 * PCI Support Functions 365 * 366 */ 367 static struct mrsas_ident * 368 mrsas_find_ident(device_t dev) 369 { 370 struct mrsas_ident *pci_device; 371 372 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) { 373 if ((pci_device->vendor == pci_get_vendor(dev)) && 374 (pci_device->device == pci_get_device(dev)) && 375 ((pci_device->subvendor == pci_get_subvendor(dev)) || 376 (pci_device->subvendor == 0xffff)) && 377 ((pci_device->subdevice == pci_get_subdevice(dev)) || 378 (pci_device->subdevice == 0xffff))) 379 return (pci_device); 380 } 381 return (NULL); 382 } 383 384 static int 385 mrsas_probe(device_t dev) 386 { 387 static u_int8_t first_ctrl = 1; 388 struct mrsas_ident *id; 389 390 if ((id = mrsas_find_ident(dev)) != NULL) { 391 if (first_ctrl) { 392 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n", 393 MRSAS_VERSION); 394 first_ctrl = 0; 395 } 396 device_set_desc(dev, id->desc); 397 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */ 398 return (-30); 399 } 400 return (ENXIO); 401 } 402 403 /* 404 * mrsas_setup_sysctl: setup sysctl values for mrsas 405 * input: Adapter instance soft state 406 * 407 * Setup sysctl entries for mrsas driver. 408 */ 409 static void 410 mrsas_setup_sysctl(struct mrsas_softc *sc) 411 { 412 struct sysctl_ctx_list *sysctl_ctx = NULL; 413 struct sysctl_oid *sysctl_tree = NULL; 414 char tmpstr[80], tmpstr2[80]; 415 416 /* 417 * Setup the sysctl variable so the user can change the debug level 418 * on the fly. 419 */ 420 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", 421 device_get_unit(sc->mrsas_dev)); 422 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev)); 423 424 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); 425 if (sysctl_ctx != NULL) 426 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); 427 428 if (sysctl_tree == NULL) { 429 sysctl_ctx_init(&sc->sysctl_ctx); 430 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 431 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2, 432 CTLFLAG_RD, 0, tmpstr); 433 if (sc->sysctl_tree == NULL) 434 return; 435 sysctl_ctx = &sc->sysctl_ctx; 436 sysctl_tree = sc->sysctl_tree; 437 } 438 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 439 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, 440 "Disable the use of OCR"); 441 442 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 443 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, 444 strlen(MRSAS_VERSION), "driver version"); 445 446 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 447 OID_AUTO, "reset_count", CTLFLAG_RD, 448 &sc->reset_count, 0, "number of ocr from start of the day"); 449 450 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 451 OID_AUTO, "fw_outstanding", CTLFLAG_RD, 452 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands"); 453 454 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 455 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 456 &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); 457 458 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 459 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, 460 "Driver debug level"); 461 462 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 463 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 464 0, "Driver IO timeout value in mili-second."); 465 466 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 467 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, 468 &sc->mrsas_fw_fault_check_delay, 469 0, "FW fault check thread delay in seconds. <default is 1 sec>"); 470 471 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 472 OID_AUTO, "reset_in_progress", CTLFLAG_RD, 473 &sc->reset_in_progress, 0, "ocr in progress status"); 474 475 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 476 OID_AUTO, "block_sync_cache", CTLFLAG_RW, 477 &sc->block_sync_cache, 0, 478 "Block SYNC CACHE at driver. <default: 0, send it to FW>"); 479 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 480 OID_AUTO, "stream detection", CTLFLAG_RW, 481 &sc->drv_stream_detection, 0, 482 "Disable/Enable Stream detection. <default: 1, Enable Stream Detection>"); 483 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 484 OID_AUTO, "prp_count", CTLFLAG_RD, 485 &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built"); 486 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 487 OID_AUTO, "SGE holes", CTLFLAG_RD, 488 &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs"); 489 } 490 491 /* 492 * mrsas_get_tunables: get tunable parameters. 493 * input: Adapter instance soft state 494 * 495 * Get tunable parameters. This will help to debug driver at boot time. 496 */ 497 static void 498 mrsas_get_tunables(struct mrsas_softc *sc) 499 { 500 char tmpstr[80]; 501 502 /* XXX default to some debugging for now */ 503 sc->mrsas_debug = 504 (MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN); 505 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; 506 sc->mrsas_fw_fault_check_delay = 1; 507 sc->reset_count = 0; 508 sc->reset_in_progress = 0; 509 sc->block_sync_cache = 0; 510 sc->drv_stream_detection = 1; 511 512 /* 513 * Grab the global variables. 514 */ 515 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); 516 517 /* 518 * Grab the global variables. 519 */ 520 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds); 521 522 /* Grab the unit-instance variables */ 523 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", 524 device_get_unit(sc->mrsas_dev)); 525 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); 526 } 527 528 /* 529 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. 530 * Used to get sequence number at driver load time. 531 * input: Adapter soft state 532 * 533 * Allocates DMAable memory for the event log info internal command. 534 */ 535 int 536 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) 537 { 538 int el_info_size; 539 540 /* Allocate get event log info command */ 541 el_info_size = sizeof(struct mrsas_evt_log_info); 542 if (bus_dma_tag_create(sc->mrsas_parent_tag, 543 1, 0, 544 BUS_SPACE_MAXADDR_32BIT, 545 BUS_SPACE_MAXADDR, 546 NULL, NULL, 547 el_info_size, 548 1, 549 el_info_size, 550 BUS_DMA_ALLOCNOW, 551 NULL, NULL, 552 &sc->el_info_tag)) { 553 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); 554 return (ENOMEM); 555 } 556 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, 557 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { 558 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); 559 return (ENOMEM); 560 } 561 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, 562 sc->el_info_mem, el_info_size, mrsas_addr_cb, 563 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { 564 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); 565 return (ENOMEM); 566 } 567 memset(sc->el_info_mem, 0, el_info_size); 568 return (0); 569 } 570 571 /* 572 * mrsas_free_evt_info_cmd: Free memory for Event log info command 573 * input: Adapter soft state 574 * 575 * Deallocates memory for the event log info internal command. 576 */ 577 void 578 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) 579 { 580 if (sc->el_info_phys_addr) 581 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); 582 if (sc->el_info_mem != NULL) 583 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); 584 if (sc->el_info_tag != NULL) 585 bus_dma_tag_destroy(sc->el_info_tag); 586 } 587 588 /* 589 * mrsas_get_seq_num: Get latest event sequence number 590 * @sc: Adapter soft state 591 * @eli: Firmware event log sequence number information. 592 * 593 * Firmware maintains a log of all events in a non-volatile area. 594 * Driver get the sequence number using DCMD 595 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. 596 */ 597 598 static int 599 mrsas_get_seq_num(struct mrsas_softc *sc, 600 struct mrsas_evt_log_info *eli) 601 { 602 struct mrsas_mfi_cmd *cmd; 603 struct mrsas_dcmd_frame *dcmd; 604 u_int8_t do_ocr = 1, retcode = 0; 605 606 cmd = mrsas_get_mfi_cmd(sc); 607 608 if (!cmd) { 609 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 610 return -ENOMEM; 611 } 612 dcmd = &cmd->frame->dcmd; 613 614 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { 615 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); 616 mrsas_release_mfi_cmd(cmd); 617 return -ENOMEM; 618 } 619 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 620 621 dcmd->cmd = MFI_CMD_DCMD; 622 dcmd->cmd_status = 0x0; 623 dcmd->sge_count = 1; 624 dcmd->flags = MFI_FRAME_DIR_READ; 625 dcmd->timeout = 0; 626 dcmd->pad_0 = 0; 627 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info); 628 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 629 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr; 630 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info); 631 632 retcode = mrsas_issue_blocked_cmd(sc, cmd); 633 if (retcode == ETIMEDOUT) 634 goto dcmd_timeout; 635 636 do_ocr = 0; 637 /* 638 * Copy the data back into callers buffer 639 */ 640 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); 641 mrsas_free_evt_log_info_cmd(sc); 642 643 dcmd_timeout: 644 if (do_ocr) 645 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 646 else 647 mrsas_release_mfi_cmd(cmd); 648 649 return retcode; 650 } 651 652 653 /* 654 * mrsas_register_aen: Register for asynchronous event notification 655 * @sc: Adapter soft state 656 * @seq_num: Starting sequence number 657 * @class_locale: Class of the event 658 * 659 * This function subscribes for events beyond the @seq_num 660 * and type @class_locale. 661 * 662 */ 663 static int 664 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, 665 u_int32_t class_locale_word) 666 { 667 int ret_val; 668 struct mrsas_mfi_cmd *cmd; 669 struct mrsas_dcmd_frame *dcmd; 670 union mrsas_evt_class_locale curr_aen; 671 union mrsas_evt_class_locale prev_aen; 672 673 /* 674 * If there an AEN pending already (aen_cmd), check if the 675 * class_locale of that pending AEN is inclusive of the new AEN 676 * request we currently have. If it is, then we don't have to do 677 * anything. In other words, whichever events the current AEN request 678 * is subscribing to, have already been subscribed to. If the old_cmd 679 * is _not_ inclusive, then we have to abort that command, form a 680 * class_locale that is superset of both old and current and re-issue 681 * to the FW 682 */ 683 684 curr_aen.word = class_locale_word; 685 686 if (sc->aen_cmd) { 687 688 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1]; 689 690 /* 691 * A class whose enum value is smaller is inclusive of all 692 * higher values. If a PROGRESS (= -1) was previously 693 * registered, then a new registration requests for higher 694 * classes need not be sent to FW. They are automatically 695 * included. Locale numbers don't have such hierarchy. They 696 * are bitmap values 697 */ 698 if ((prev_aen.members.class <= curr_aen.members.class) && 699 !((prev_aen.members.locale & curr_aen.members.locale) ^ 700 curr_aen.members.locale)) { 701 /* 702 * Previously issued event registration includes 703 * current request. Nothing to do. 704 */ 705 return 0; 706 } else { 707 curr_aen.members.locale |= prev_aen.members.locale; 708 709 if (prev_aen.members.class < curr_aen.members.class) 710 curr_aen.members.class = prev_aen.members.class; 711 712 sc->aen_cmd->abort_aen = 1; 713 ret_val = mrsas_issue_blocked_abort_cmd(sc, 714 sc->aen_cmd); 715 716 if (ret_val) { 717 printf("mrsas: Failed to abort previous AEN command\n"); 718 return ret_val; 719 } else 720 sc->aen_cmd = NULL; 721 } 722 } 723 cmd = mrsas_get_mfi_cmd(sc); 724 if (!cmd) 725 return ENOMEM; 726 727 dcmd = &cmd->frame->dcmd; 728 729 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); 730 731 /* 732 * Prepare DCMD for aen registration 733 */ 734 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 735 736 dcmd->cmd = MFI_CMD_DCMD; 737 dcmd->cmd_status = 0x0; 738 dcmd->sge_count = 1; 739 dcmd->flags = MFI_FRAME_DIR_READ; 740 dcmd->timeout = 0; 741 dcmd->pad_0 = 0; 742 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail); 743 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 744 dcmd->mbox.w[0] = seq_num; 745 sc->last_seq_num = seq_num; 746 dcmd->mbox.w[1] = curr_aen.word; 747 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr; 748 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail); 749 750 if (sc->aen_cmd != NULL) { 751 mrsas_release_mfi_cmd(cmd); 752 return 0; 753 } 754 /* 755 * Store reference to the cmd used to register for AEN. When an 756 * application wants us to register for AEN, we have to abort this 757 * cmd and re-register with a new EVENT LOCALE supplied by that app 758 */ 759 sc->aen_cmd = cmd; 760 761 /* 762 * Issue the aen registration frame 763 */ 764 if (mrsas_issue_dcmd(sc, cmd)) { 765 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); 766 return (1); 767 } 768 return 0; 769 } 770 771 /* 772 * mrsas_start_aen: Subscribes to AEN during driver load time 773 * @instance: Adapter soft state 774 */ 775 static int 776 mrsas_start_aen(struct mrsas_softc *sc) 777 { 778 struct mrsas_evt_log_info eli; 779 union mrsas_evt_class_locale class_locale; 780 781 782 /* Get the latest sequence number from FW */ 783 784 memset(&eli, 0, sizeof(eli)); 785 786 if (mrsas_get_seq_num(sc, &eli)) 787 return -1; 788 789 /* Register AEN with FW for latest sequence number plus 1 */ 790 class_locale.members.reserved = 0; 791 class_locale.members.locale = MR_EVT_LOCALE_ALL; 792 class_locale.members.class = MR_EVT_CLASS_DEBUG; 793 794 return mrsas_register_aen(sc, eli.newest_seq_num + 1, 795 class_locale.word); 796 797 } 798 799 /* 800 * mrsas_setup_msix: Allocate MSI-x vectors 801 * @sc: adapter soft state 802 */ 803 static int 804 mrsas_setup_msix(struct mrsas_softc *sc) 805 { 806 int i; 807 808 for (i = 0; i < sc->msix_vectors; i++) { 809 sc->irq_context[i].sc = sc; 810 sc->irq_context[i].MSIxIndex = i; 811 sc->irq_id[i] = i + 1; 812 sc->mrsas_irq[i] = bus_alloc_resource_any 813 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i] 814 ,RF_ACTIVE); 815 if (sc->mrsas_irq[i] == NULL) { 816 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n"); 817 goto irq_alloc_failed; 818 } 819 if (bus_setup_intr(sc->mrsas_dev, 820 sc->mrsas_irq[i], 821 INTR_MPSAFE | INTR_TYPE_CAM, 822 NULL, mrsas_isr, &sc->irq_context[i], 823 &sc->intr_handle[i])) { 824 device_printf(sc->mrsas_dev, 825 "Cannot set up MSI-x interrupt handler\n"); 826 goto irq_alloc_failed; 827 } 828 } 829 return SUCCESS; 830 831 irq_alloc_failed: 832 mrsas_teardown_intr(sc); 833 return (FAIL); 834 } 835 836 /* 837 * mrsas_allocate_msix: Setup MSI-x vectors 838 * @sc: adapter soft state 839 */ 840 static int 841 mrsas_allocate_msix(struct mrsas_softc *sc) 842 { 843 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) { 844 device_printf(sc->mrsas_dev, "Using MSI-X with %d number" 845 " of vectors\n", sc->msix_vectors); 846 } else { 847 device_printf(sc->mrsas_dev, "MSI-x setup failed\n"); 848 goto irq_alloc_failed; 849 } 850 return SUCCESS; 851 852 irq_alloc_failed: 853 mrsas_teardown_intr(sc); 854 return (FAIL); 855 } 856 857 /* 858 * mrsas_attach: PCI entry point 859 * input: pointer to device struct 860 * 861 * Performs setup of PCI and registers, initializes mutexes and linked lists, 862 * registers interrupts and CAM, and initializes the adapter/controller to 863 * its proper state. 864 */ 865 static int 866 mrsas_attach(device_t dev) 867 { 868 struct mrsas_softc *sc = device_get_softc(dev); 869 uint32_t cmd, error; 870 871 memset(sc, 0, sizeof(struct mrsas_softc)); 872 873 /* Look up our softc and initialize its fields. */ 874 sc->mrsas_dev = dev; 875 sc->device_id = pci_get_device(dev); 876 877 switch (sc->device_id) { 878 case MRSAS_INVADER: 879 case MRSAS_FURY: 880 case MRSAS_INTRUDER: 881 case MRSAS_INTRUDER_24: 882 case MRSAS_CUTLASS_52: 883 case MRSAS_CUTLASS_53: 884 sc->mrsas_gen3_ctrl = 1; 885 break; 886 case MRSAS_VENTURA: 887 case MRSAS_CRUSADER: 888 case MRSAS_HARPOON: 889 case MRSAS_TOMCAT: 890 case MRSAS_VENTURA_4PORT: 891 case MRSAS_CRUSADER_4PORT: 892 sc->is_ventura = true; 893 break; 894 case MRSAS_AERO_10E1: 895 case MRSAS_AERO_10E5: 896 device_printf(dev, "Adapter is in configurable secure mode\n"); 897 case MRSAS_AERO_10E2: 898 case MRSAS_AERO_10E6: 899 sc->is_aero = true; 900 break; 901 case MRSAS_AERO_10E0: 902 case MRSAS_AERO_10E3: 903 case MRSAS_AERO_10E4: 904 case MRSAS_AERO_10E7: 905 device_printf(dev, "Adapter is in non-secure mode\n"); 906 return SUCCESS; 907 908 } 909 910 mrsas_get_tunables(sc); 911 912 /* 913 * Set up PCI and registers 914 */ 915 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 916 if ((cmd & PCIM_CMD_PORTEN) == 0) { 917 return (ENXIO); 918 } 919 /* Force the busmaster enable bit on. */ 920 cmd |= PCIM_CMD_BUSMASTEREN; 921 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 922 923 /* For Ventura/Aero system registers are mapped to BAR0 */ 924 if (sc->is_ventura || sc->is_aero) 925 sc->reg_res_id = PCIR_BAR(0); /* BAR0 offset */ 926 else 927 sc->reg_res_id = PCIR_BAR(1); /* BAR1 offset */ 928 929 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 930 &(sc->reg_res_id), RF_ACTIVE)) 931 == NULL) { 932 device_printf(dev, "Cannot allocate PCI registers\n"); 933 goto attach_fail; 934 } 935 sc->bus_tag = rman_get_bustag(sc->reg_res); 936 sc->bus_handle = rman_get_bushandle(sc->reg_res); 937 938 /* Intialize mutexes */ 939 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF); 940 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF); 941 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF); 942 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF); 943 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN); 944 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF); 945 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF); 946 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF); 947 mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF); 948 949 /* Intialize linked list */ 950 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); 951 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); 952 953 mrsas_atomic_set(&sc->fw_outstanding, 0); 954 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 955 mrsas_atomic_set(&sc->prp_count, 0); 956 mrsas_atomic_set(&sc->sge_holes, 0); 957 958 sc->io_cmds_highwater = 0; 959 960 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 961 sc->UnevenSpanSupport = 0; 962 963 sc->msix_enable = 0; 964 965 /* Initialize Firmware */ 966 if (mrsas_init_fw(sc) != SUCCESS) { 967 goto attach_fail_fw; 968 } 969 /* Register mrsas to CAM layer */ 970 if ((mrsas_cam_attach(sc) != SUCCESS)) { 971 goto attach_fail_cam; 972 } 973 /* Register IRQs */ 974 if (mrsas_setup_irq(sc) != SUCCESS) { 975 goto attach_fail_irq; 976 } 977 error = mrsas_kproc_create(mrsas_ocr_thread, sc, 978 &sc->ocr_thread, 0, 0, "mrsas_ocr%d", 979 device_get_unit(sc->mrsas_dev)); 980 if (error) { 981 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error); 982 goto attach_fail_ocr_thread; 983 } 984 /* 985 * After FW initialization and OCR thread creation 986 * we will defer the cdev creation, AEN setup on ICH callback 987 */ 988 sc->mrsas_ich.ich_func = mrsas_ich_startup; 989 sc->mrsas_ich.ich_arg = sc; 990 if (config_intrhook_establish(&sc->mrsas_ich) != 0) { 991 device_printf(sc->mrsas_dev, "Config hook is already established\n"); 992 } 993 mrsas_setup_sysctl(sc); 994 return SUCCESS; 995 996 attach_fail_ocr_thread: 997 if (sc->ocr_thread_active) 998 wakeup(&sc->ocr_chan); 999 attach_fail_irq: 1000 mrsas_teardown_intr(sc); 1001 attach_fail_cam: 1002 mrsas_cam_detach(sc); 1003 attach_fail_fw: 1004 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */ 1005 if (sc->msix_enable == 1) 1006 pci_release_msi(sc->mrsas_dev); 1007 mrsas_free_mem(sc); 1008 mtx_destroy(&sc->sim_lock); 1009 mtx_destroy(&sc->aen_lock); 1010 mtx_destroy(&sc->pci_lock); 1011 mtx_destroy(&sc->io_lock); 1012 mtx_destroy(&sc->ioctl_lock); 1013 mtx_destroy(&sc->mpt_cmd_pool_lock); 1014 mtx_destroy(&sc->mfi_cmd_pool_lock); 1015 mtx_destroy(&sc->raidmap_lock); 1016 mtx_destroy(&sc->stream_lock); 1017 attach_fail: 1018 if (sc->reg_res) { 1019 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, 1020 sc->reg_res_id, sc->reg_res); 1021 } 1022 return (ENXIO); 1023 } 1024 1025 /* 1026 * Interrupt config hook 1027 */ 1028 static void 1029 mrsas_ich_startup(void *arg) 1030 { 1031 int i = 0; 1032 struct mrsas_softc *sc = (struct mrsas_softc *)arg; 1033 1034 /* 1035 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs 1036 */ 1037 sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS, 1038 IOCTL_SEMA_DESCRIPTION); 1039 1040 /* Create a /dev entry for mrsas controller. */ 1041 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT, 1042 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", 1043 device_get_unit(sc->mrsas_dev)); 1044 1045 if (device_get_unit(sc->mrsas_dev) == 0) { 1046 make_dev_alias_p(MAKEDEV_CHECKNAME, 1047 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev, 1048 "megaraid_sas_ioctl_node"); 1049 } 1050 if (sc->mrsas_cdev) 1051 sc->mrsas_cdev->si_drv1 = sc; 1052 1053 /* 1054 * Add this controller to mrsas_mgmt_info structure so that it can be 1055 * exported to management applications 1056 */ 1057 if (device_get_unit(sc->mrsas_dev) == 0) 1058 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info)); 1059 1060 mrsas_mgmt_info.count++; 1061 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc; 1062 mrsas_mgmt_info.max_index++; 1063 1064 /* Enable Interrupts */ 1065 mrsas_enable_intr(sc); 1066 1067 /* Call DCMD get_pd_info for all system PDs */ 1068 for (i = 0; i < MRSAS_MAX_PD; i++) { 1069 if ((sc->target_list[i].target_id != 0xffff) && 1070 sc->pd_info_mem) 1071 mrsas_get_pd_info(sc, sc->target_list[i].target_id); 1072 } 1073 1074 /* Initiate AEN (Asynchronous Event Notification) */ 1075 if (mrsas_start_aen(sc)) { 1076 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! " 1077 "Further events from the controller will not be communicated.\n" 1078 "Either there is some problem in the controller" 1079 "or the controller does not support AEN.\n" 1080 "Please contact to the SUPPORT TEAM if the problem persists\n"); 1081 } 1082 if (sc->mrsas_ich.ich_arg != NULL) { 1083 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n"); 1084 config_intrhook_disestablish(&sc->mrsas_ich); 1085 sc->mrsas_ich.ich_arg = NULL; 1086 } 1087 } 1088 1089 /* 1090 * mrsas_detach: De-allocates and teardown resources 1091 * input: pointer to device struct 1092 * 1093 * This function is the entry point for device disconnect and detach. 1094 * It performs memory de-allocations, shutdown of the controller and various 1095 * teardown and destroy resource functions. 1096 */ 1097 static int 1098 mrsas_detach(device_t dev) 1099 { 1100 struct mrsas_softc *sc; 1101 int i = 0; 1102 1103 sc = device_get_softc(dev); 1104 sc->remove_in_progress = 1; 1105 1106 /* Destroy the character device so no other IOCTL will be handled */ 1107 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev) 1108 destroy_dev(sc->mrsas_linux_emulator_cdev); 1109 destroy_dev(sc->mrsas_cdev); 1110 1111 /* 1112 * Take the instance off the instance array. Note that we will not 1113 * decrement the max_index. We let this array be sparse array 1114 */ 1115 for (i = 0; i < mrsas_mgmt_info.max_index; i++) { 1116 if (mrsas_mgmt_info.sc_ptr[i] == sc) { 1117 mrsas_mgmt_info.count--; 1118 mrsas_mgmt_info.sc_ptr[i] = NULL; 1119 break; 1120 } 1121 } 1122 1123 if (sc->ocr_thread_active) 1124 wakeup(&sc->ocr_chan); 1125 while (sc->reset_in_progress) { 1126 i++; 1127 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1128 mrsas_dprint(sc, MRSAS_INFO, 1129 "[%2d]waiting for OCR to be finished from %s\n", i, __func__); 1130 } 1131 pause("mr_shutdown", hz); 1132 } 1133 i = 0; 1134 while (sc->ocr_thread_active) { 1135 i++; 1136 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1137 mrsas_dprint(sc, MRSAS_INFO, 1138 "[%2d]waiting for " 1139 "mrsas_ocr thread to quit ocr %d\n", i, 1140 sc->ocr_thread_active); 1141 } 1142 pause("mr_shutdown", hz); 1143 } 1144 mrsas_flush_cache(sc); 1145 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); 1146 mrsas_disable_intr(sc); 1147 1148 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) { 1149 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 1150 free(sc->streamDetectByLD[i], M_MRSAS); 1151 free(sc->streamDetectByLD, M_MRSAS); 1152 sc->streamDetectByLD = NULL; 1153 } 1154 1155 mrsas_cam_detach(sc); 1156 mrsas_teardown_intr(sc); 1157 mrsas_free_mem(sc); 1158 mtx_destroy(&sc->sim_lock); 1159 mtx_destroy(&sc->aen_lock); 1160 mtx_destroy(&sc->pci_lock); 1161 mtx_destroy(&sc->io_lock); 1162 mtx_destroy(&sc->ioctl_lock); 1163 mtx_destroy(&sc->mpt_cmd_pool_lock); 1164 mtx_destroy(&sc->mfi_cmd_pool_lock); 1165 mtx_destroy(&sc->raidmap_lock); 1166 mtx_destroy(&sc->stream_lock); 1167 1168 /* Wait for all the semaphores to be released */ 1169 while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS) 1170 pause("mr_shutdown", hz); 1171 1172 /* Destroy the counting semaphore created for Ioctl */ 1173 sema_destroy(&sc->ioctl_count_sema); 1174 1175 if (sc->reg_res) { 1176 bus_release_resource(sc->mrsas_dev, 1177 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); 1178 } 1179 if (sc->sysctl_tree != NULL) 1180 sysctl_ctx_free(&sc->sysctl_ctx); 1181 1182 return (0); 1183 } 1184 1185 /* 1186 * mrsas_free_mem: Frees allocated memory 1187 * input: Adapter instance soft state 1188 * 1189 * This function is called from mrsas_detach() to free previously allocated 1190 * memory. 1191 */ 1192 void 1193 mrsas_free_mem(struct mrsas_softc *sc) 1194 { 1195 int i; 1196 u_int32_t max_fw_cmds; 1197 struct mrsas_mfi_cmd *mfi_cmd; 1198 struct mrsas_mpt_cmd *mpt_cmd; 1199 1200 /* 1201 * Free RAID map memory 1202 */ 1203 for (i = 0; i < 2; i++) { 1204 if (sc->raidmap_phys_addr[i]) 1205 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); 1206 if (sc->raidmap_mem[i] != NULL) 1207 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); 1208 if (sc->raidmap_tag[i] != NULL) 1209 bus_dma_tag_destroy(sc->raidmap_tag[i]); 1210 1211 if (sc->ld_drv_map[i] != NULL) 1212 free(sc->ld_drv_map[i], M_MRSAS); 1213 } 1214 for (i = 0; i < 2; i++) { 1215 if (sc->jbodmap_phys_addr[i]) 1216 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]); 1217 if (sc->jbodmap_mem[i] != NULL) 1218 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]); 1219 if (sc->jbodmap_tag[i] != NULL) 1220 bus_dma_tag_destroy(sc->jbodmap_tag[i]); 1221 } 1222 /* 1223 * Free version buffer memory 1224 */ 1225 if (sc->verbuf_phys_addr) 1226 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); 1227 if (sc->verbuf_mem != NULL) 1228 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); 1229 if (sc->verbuf_tag != NULL) 1230 bus_dma_tag_destroy(sc->verbuf_tag); 1231 1232 1233 /* 1234 * Free sense buffer memory 1235 */ 1236 if (sc->sense_phys_addr) 1237 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); 1238 if (sc->sense_mem != NULL) 1239 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); 1240 if (sc->sense_tag != NULL) 1241 bus_dma_tag_destroy(sc->sense_tag); 1242 1243 /* 1244 * Free chain frame memory 1245 */ 1246 if (sc->chain_frame_phys_addr) 1247 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); 1248 if (sc->chain_frame_mem != NULL) 1249 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); 1250 if (sc->chain_frame_tag != NULL) 1251 bus_dma_tag_destroy(sc->chain_frame_tag); 1252 1253 /* 1254 * Free IO Request memory 1255 */ 1256 if (sc->io_request_phys_addr) 1257 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); 1258 if (sc->io_request_mem != NULL) 1259 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); 1260 if (sc->io_request_tag != NULL) 1261 bus_dma_tag_destroy(sc->io_request_tag); 1262 1263 /* 1264 * Free Reply Descriptor memory 1265 */ 1266 if (sc->reply_desc_phys_addr) 1267 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); 1268 if (sc->reply_desc_mem != NULL) 1269 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); 1270 if (sc->reply_desc_tag != NULL) 1271 bus_dma_tag_destroy(sc->reply_desc_tag); 1272 1273 /* 1274 * Free event detail memory 1275 */ 1276 if (sc->evt_detail_phys_addr) 1277 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); 1278 if (sc->evt_detail_mem != NULL) 1279 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); 1280 if (sc->evt_detail_tag != NULL) 1281 bus_dma_tag_destroy(sc->evt_detail_tag); 1282 1283 /* 1284 * Free PD info memory 1285 */ 1286 if (sc->pd_info_phys_addr) 1287 bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap); 1288 if (sc->pd_info_mem != NULL) 1289 bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap); 1290 if (sc->pd_info_tag != NULL) 1291 bus_dma_tag_destroy(sc->pd_info_tag); 1292 1293 /* 1294 * Free MFI frames 1295 */ 1296 if (sc->mfi_cmd_list) { 1297 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1298 mfi_cmd = sc->mfi_cmd_list[i]; 1299 mrsas_free_frame(sc, mfi_cmd); 1300 } 1301 } 1302 if (sc->mficmd_frame_tag != NULL) 1303 bus_dma_tag_destroy(sc->mficmd_frame_tag); 1304 1305 /* 1306 * Free MPT internal command list 1307 */ 1308 max_fw_cmds = sc->max_fw_cmds; 1309 if (sc->mpt_cmd_list) { 1310 for (i = 0; i < max_fw_cmds; i++) { 1311 mpt_cmd = sc->mpt_cmd_list[i]; 1312 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); 1313 free(sc->mpt_cmd_list[i], M_MRSAS); 1314 } 1315 free(sc->mpt_cmd_list, M_MRSAS); 1316 sc->mpt_cmd_list = NULL; 1317 } 1318 /* 1319 * Free MFI internal command list 1320 */ 1321 1322 if (sc->mfi_cmd_list) { 1323 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1324 free(sc->mfi_cmd_list[i], M_MRSAS); 1325 } 1326 free(sc->mfi_cmd_list, M_MRSAS); 1327 sc->mfi_cmd_list = NULL; 1328 } 1329 /* 1330 * Free request descriptor memory 1331 */ 1332 free(sc->req_desc, M_MRSAS); 1333 sc->req_desc = NULL; 1334 1335 /* 1336 * Destroy parent tag 1337 */ 1338 if (sc->mrsas_parent_tag != NULL) 1339 bus_dma_tag_destroy(sc->mrsas_parent_tag); 1340 1341 /* 1342 * Free ctrl_info memory 1343 */ 1344 if (sc->ctrl_info != NULL) 1345 free(sc->ctrl_info, M_MRSAS); 1346 } 1347 1348 /* 1349 * mrsas_teardown_intr: Teardown interrupt 1350 * input: Adapter instance soft state 1351 * 1352 * This function is called from mrsas_detach() to teardown and release bus 1353 * interrupt resourse. 1354 */ 1355 void 1356 mrsas_teardown_intr(struct mrsas_softc *sc) 1357 { 1358 int i; 1359 1360 if (!sc->msix_enable) { 1361 if (sc->intr_handle[0]) 1362 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]); 1363 if (sc->mrsas_irq[0] != NULL) 1364 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1365 sc->irq_id[0], sc->mrsas_irq[0]); 1366 sc->intr_handle[0] = NULL; 1367 } else { 1368 for (i = 0; i < sc->msix_vectors; i++) { 1369 if (sc->intr_handle[i]) 1370 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i], 1371 sc->intr_handle[i]); 1372 1373 if (sc->mrsas_irq[i] != NULL) 1374 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1375 sc->irq_id[i], sc->mrsas_irq[i]); 1376 1377 sc->intr_handle[i] = NULL; 1378 } 1379 pci_release_msi(sc->mrsas_dev); 1380 } 1381 1382 } 1383 1384 /* 1385 * mrsas_suspend: Suspend entry point 1386 * input: Device struct pointer 1387 * 1388 * This function is the entry point for system suspend from the OS. 1389 */ 1390 static int 1391 mrsas_suspend(device_t dev) 1392 { 1393 /* This will be filled when the driver will have hibernation support */ 1394 return (0); 1395 } 1396 1397 /* 1398 * mrsas_resume: Resume entry point 1399 * input: Device struct pointer 1400 * 1401 * This function is the entry point for system resume from the OS. 1402 */ 1403 static int 1404 mrsas_resume(device_t dev) 1405 { 1406 /* This will be filled when the driver will have hibernation support */ 1407 return (0); 1408 } 1409 1410 /** 1411 * mrsas_get_softc_instance: Find softc instance based on cmd type 1412 * 1413 * This function will return softc instance based on cmd type. 1414 * In some case, application fire ioctl on required management instance and 1415 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those 1416 * case, else get the softc instance from host_no provided by application in 1417 * user data. 1418 */ 1419 1420 static struct mrsas_softc * 1421 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg) 1422 { 1423 struct mrsas_softc *sc = NULL; 1424 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; 1425 1426 if (cmd == MRSAS_IOC_GET_PCI_INFO) { 1427 sc = dev->si_drv1; 1428 } else { 1429 /* 1430 * get the Host number & the softc from data sent by the 1431 * Application 1432 */ 1433 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no]; 1434 if (sc == NULL) 1435 printf("There is no Controller number %d\n", 1436 user_ioc->host_no); 1437 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index) 1438 mrsas_dprint(sc, MRSAS_FAULT, 1439 "Invalid Controller number %d\n", user_ioc->host_no); 1440 } 1441 1442 return sc; 1443 } 1444 1445 /* 1446 * mrsas_ioctl: IOCtl commands entry point. 1447 * 1448 * This function is the entry point for IOCtls from the OS. It calls the 1449 * appropriate function for processing depending on the command received. 1450 */ 1451 static int 1452 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, 1453 struct thread *td) 1454 { 1455 struct mrsas_softc *sc; 1456 int ret = 0, i = 0; 1457 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo; 1458 1459 sc = mrsas_get_softc_instance(dev, cmd, arg); 1460 if (!sc) 1461 return ENOENT; 1462 1463 if (sc->remove_in_progress || 1464 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) { 1465 mrsas_dprint(sc, MRSAS_INFO, 1466 "Either driver remove or shutdown called or " 1467 "HW is in unrecoverable critical error state.\n"); 1468 return ENOENT; 1469 } 1470 mtx_lock_spin(&sc->ioctl_lock); 1471 if (!sc->reset_in_progress) { 1472 mtx_unlock_spin(&sc->ioctl_lock); 1473 goto do_ioctl; 1474 } 1475 mtx_unlock_spin(&sc->ioctl_lock); 1476 while (sc->reset_in_progress) { 1477 i++; 1478 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1479 mrsas_dprint(sc, MRSAS_INFO, 1480 "[%2d]waiting for OCR to be finished from %s\n", i, __func__); 1481 } 1482 pause("mr_ioctl", hz); 1483 } 1484 1485 do_ioctl: 1486 switch (cmd) { 1487 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64: 1488 #ifdef COMPAT_FREEBSD32 1489 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32: 1490 #endif 1491 /* 1492 * Decrement the Ioctl counting Semaphore before getting an 1493 * mfi command 1494 */ 1495 sema_wait(&sc->ioctl_count_sema); 1496 1497 ret = mrsas_passthru(sc, (void *)arg, cmd); 1498 1499 /* Increment the Ioctl counting semaphore value */ 1500 sema_post(&sc->ioctl_count_sema); 1501 1502 break; 1503 case MRSAS_IOC_SCAN_BUS: 1504 ret = mrsas_bus_scan(sc); 1505 break; 1506 1507 case MRSAS_IOC_GET_PCI_INFO: 1508 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg; 1509 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION)); 1510 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev); 1511 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev); 1512 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev); 1513 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev); 1514 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d," 1515 "pci device no: %d, pci function no: %d," 1516 "pci domain ID: %d\n", 1517 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber, 1518 pciDrvInfo->functionNumber, pciDrvInfo->domainID); 1519 ret = 0; 1520 break; 1521 1522 default: 1523 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd); 1524 ret = ENOENT; 1525 } 1526 1527 return (ret); 1528 } 1529 1530 /* 1531 * mrsas_poll: poll entry point for mrsas driver fd 1532 * 1533 * This function is the entry point for poll from the OS. It waits for some AEN 1534 * events to be triggered from the controller and notifies back. 1535 */ 1536 static int 1537 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td) 1538 { 1539 struct mrsas_softc *sc; 1540 int revents = 0; 1541 1542 sc = dev->si_drv1; 1543 1544 if (poll_events & (POLLIN | POLLRDNORM)) { 1545 if (sc->mrsas_aen_triggered) { 1546 revents |= poll_events & (POLLIN | POLLRDNORM); 1547 } 1548 } 1549 if (revents == 0) { 1550 if (poll_events & (POLLIN | POLLRDNORM)) { 1551 mtx_lock(&sc->aen_lock); 1552 sc->mrsas_poll_waiting = 1; 1553 selrecord(td, &sc->mrsas_select); 1554 mtx_unlock(&sc->aen_lock); 1555 } 1556 } 1557 return revents; 1558 } 1559 1560 /* 1561 * mrsas_setup_irq: Set up interrupt 1562 * input: Adapter instance soft state 1563 * 1564 * This function sets up interrupts as a bus resource, with flags indicating 1565 * resource permitting contemporaneous sharing and for resource to activate 1566 * atomically. 1567 */ 1568 static int 1569 mrsas_setup_irq(struct mrsas_softc *sc) 1570 { 1571 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS)) 1572 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n"); 1573 1574 else { 1575 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n"); 1576 sc->irq_context[0].sc = sc; 1577 sc->irq_context[0].MSIxIndex = 0; 1578 sc->irq_id[0] = 0; 1579 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev, 1580 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE); 1581 if (sc->mrsas_irq[0] == NULL) { 1582 device_printf(sc->mrsas_dev, "Cannot allocate legcay" 1583 "interrupt\n"); 1584 return (FAIL); 1585 } 1586 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0], 1587 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, 1588 &sc->irq_context[0], &sc->intr_handle[0])) { 1589 device_printf(sc->mrsas_dev, "Cannot set up legacy" 1590 "interrupt\n"); 1591 return (FAIL); 1592 } 1593 } 1594 return (0); 1595 } 1596 1597 /* 1598 * mrsas_isr: ISR entry point 1599 * input: argument pointer 1600 * 1601 * This function is the interrupt service routine entry point. There are two 1602 * types of interrupts, state change interrupt and response interrupt. If an 1603 * interrupt is not ours, we just return. 1604 */ 1605 void 1606 mrsas_isr(void *arg) 1607 { 1608 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg; 1609 struct mrsas_softc *sc = irq_context->sc; 1610 int status = 0; 1611 1612 if (sc->mask_interrupts) 1613 return; 1614 1615 if (!sc->msix_vectors) { 1616 status = mrsas_clear_intr(sc); 1617 if (!status) 1618 return; 1619 } 1620 /* If we are resetting, bail */ 1621 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { 1622 printf(" Entered into ISR when OCR is going active. \n"); 1623 mrsas_clear_intr(sc); 1624 return; 1625 } 1626 /* Process for reply request and clear response interrupt */ 1627 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS) 1628 mrsas_clear_intr(sc); 1629 1630 return; 1631 } 1632 1633 /* 1634 * mrsas_complete_cmd: Process reply request 1635 * input: Adapter instance soft state 1636 * 1637 * This function is called from mrsas_isr() to process reply request and clear 1638 * response interrupt. Processing of the reply request entails walking 1639 * through the reply descriptor array for the command request pended from 1640 * Firmware. We look at the Function field to determine the command type and 1641 * perform the appropriate action. Before we return, we clear the response 1642 * interrupt. 1643 */ 1644 int 1645 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex) 1646 { 1647 Mpi2ReplyDescriptorsUnion_t *desc; 1648 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 1649 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; 1650 struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL; 1651 struct mrsas_mfi_cmd *cmd_mfi; 1652 u_int8_t reply_descript_type, *sense; 1653 u_int16_t smid, num_completed; 1654 u_int8_t status, extStatus; 1655 union desc_value desc_val; 1656 PLD_LOAD_BALANCE_INFO lbinfo; 1657 u_int32_t device_id, data_length; 1658 int threshold_reply_count = 0; 1659 #if TM_DEBUG 1660 MR_TASK_MANAGE_REQUEST *mr_tm_req; 1661 MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; 1662 #endif 1663 1664 /* If we have a hardware error, not need to continue */ 1665 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 1666 return (DONE); 1667 1668 desc = sc->reply_desc_mem; 1669 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)) 1670 + sc->last_reply_idx[MSIxIndex]; 1671 1672 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1673 1674 desc_val.word = desc->Words; 1675 num_completed = 0; 1676 1677 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1678 1679 /* Find our reply descriptor for the command and process */ 1680 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) { 1681 smid = reply_desc->SMID; 1682 cmd_mpt = sc->mpt_cmd_list[smid - 1]; 1683 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request; 1684 1685 status = scsi_io_req->RaidContext.raid_context.status; 1686 extStatus = scsi_io_req->RaidContext.raid_context.exStatus; 1687 sense = cmd_mpt->sense; 1688 data_length = scsi_io_req->DataLength; 1689 1690 switch (scsi_io_req->Function) { 1691 case MPI2_FUNCTION_SCSI_TASK_MGMT: 1692 #if TM_DEBUG 1693 mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request; 1694 mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *) 1695 &mr_tm_req->TmRequest; 1696 device_printf(sc->mrsas_dev, "TM completion type 0x%X, " 1697 "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID); 1698 #endif 1699 wakeup_one((void *)&sc->ocr_chan); 1700 break; 1701 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */ 1702 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; 1703 lbinfo = &sc->load_balance_info[device_id]; 1704 /* R1 load balancing for READ */ 1705 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { 1706 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]); 1707 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; 1708 } 1709 /* Fall thru and complete IO */ 1710 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: 1711 if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { 1712 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status, 1713 extStatus, data_length, sense); 1714 mrsas_cmd_done(sc, cmd_mpt); 1715 } else { 1716 /* 1717 * If the peer Raid 1/10 fast path failed, 1718 * mark IO as failed to the scsi layer. 1719 * Overwrite the current status by the failed status 1720 * and make sure that if any command fails, 1721 * driver returns fail status to CAM. 1722 */ 1723 cmd_mpt->cmd_completed = 1; 1724 r1_cmd = cmd_mpt->peer_cmd; 1725 if (r1_cmd->cmd_completed) { 1726 if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) { 1727 status = r1_cmd->io_request->RaidContext.raid_context.status; 1728 extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus; 1729 data_length = r1_cmd->io_request->DataLength; 1730 sense = r1_cmd->sense; 1731 } 1732 r1_cmd->ccb_ptr = NULL; 1733 if (r1_cmd->callout_owner) { 1734 callout_stop(&r1_cmd->cm_callout); 1735 r1_cmd->callout_owner = false; 1736 } 1737 mrsas_release_mpt_cmd(r1_cmd); 1738 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status, 1739 extStatus, data_length, sense); 1740 mrsas_cmd_done(sc, cmd_mpt); 1741 } 1742 } 1743 mrsas_atomic_dec(&sc->fw_outstanding); 1744 break; 1745 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */ 1746 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 1747 /* 1748 * Make sure NOT TO release the mfi command from the called 1749 * function's context if it is fired with issue_polled call. 1750 * And also make sure that the issue_polled call should only be 1751 * used if INTERRUPT IS DISABLED. 1752 */ 1753 if (cmd_mfi->frame->hdr.flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 1754 mrsas_release_mfi_cmd(cmd_mfi); 1755 else 1756 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); 1757 break; 1758 } 1759 1760 sc->last_reply_idx[MSIxIndex]++; 1761 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth) 1762 sc->last_reply_idx[MSIxIndex] = 0; 1763 1764 desc->Words = ~((uint64_t)0x00); /* set it back to all 1765 * 0xFFFFFFFFs */ 1766 num_completed++; 1767 threshold_reply_count++; 1768 1769 /* Get the next reply descriptor */ 1770 if (!sc->last_reply_idx[MSIxIndex]) { 1771 desc = sc->reply_desc_mem; 1772 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)); 1773 } else 1774 desc++; 1775 1776 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1777 desc_val.word = desc->Words; 1778 1779 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1780 1781 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1782 break; 1783 1784 /* 1785 * Write to reply post index after completing threshold reply 1786 * count and still there are more replies in reply queue 1787 * pending to be completed. 1788 */ 1789 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 1790 if (sc->msix_enable) { 1791 if (sc->msix_combined) 1792 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1793 ((MSIxIndex & 0x7) << 24) | 1794 sc->last_reply_idx[MSIxIndex]); 1795 else 1796 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1797 sc->last_reply_idx[MSIxIndex]); 1798 } else 1799 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1800 reply_post_host_index), sc->last_reply_idx[0]); 1801 1802 threshold_reply_count = 0; 1803 } 1804 } 1805 1806 /* No match, just return */ 1807 if (num_completed == 0) 1808 return (DONE); 1809 1810 /* Clear response interrupt */ 1811 if (sc->msix_enable) { 1812 if (sc->msix_combined) { 1813 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1814 ((MSIxIndex & 0x7) << 24) | 1815 sc->last_reply_idx[MSIxIndex]); 1816 } else 1817 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1818 sc->last_reply_idx[MSIxIndex]); 1819 } else 1820 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1821 reply_post_host_index), sc->last_reply_idx[0]); 1822 1823 return (0); 1824 } 1825 1826 /* 1827 * mrsas_map_mpt_cmd_status: Allocate DMAable memory. 1828 * input: Adapter instance soft state 1829 * 1830 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. 1831 * It checks the command status and maps the appropriate CAM status for the 1832 * CCB. 1833 */ 1834 void 1835 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status, 1836 u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense) 1837 { 1838 struct mrsas_softc *sc = cmd->sc; 1839 u_int8_t *sense_data; 1840 1841 switch (status) { 1842 case MFI_STAT_OK: 1843 ccb_ptr->ccb_h.status = CAM_REQ_CMP; 1844 break; 1845 case MFI_STAT_SCSI_IO_FAILED: 1846 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1847 ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1848 sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data; 1849 if (sense_data) { 1850 /* For now just copy 18 bytes back */ 1851 memcpy(sense_data, sense, 18); 1852 ccb_ptr->csio.sense_len = 18; 1853 ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; 1854 } 1855 break; 1856 case MFI_STAT_LD_OFFLINE: 1857 case MFI_STAT_DEVICE_NOT_FOUND: 1858 if (ccb_ptr->ccb_h.target_lun) 1859 ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; 1860 else 1861 ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; 1862 break; 1863 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1864 ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; 1865 break; 1866 default: 1867 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); 1868 ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; 1869 ccb_ptr->csio.scsi_status = status; 1870 } 1871 return; 1872 } 1873 1874 /* 1875 * mrsas_alloc_mem: Allocate DMAable memory 1876 * input: Adapter instance soft state 1877 * 1878 * This function creates the parent DMA tag and allocates DMAable memory. DMA 1879 * tag describes constraints of DMA mapping. Memory allocated is mapped into 1880 * Kernel virtual address. Callback argument is physical memory address. 1881 */ 1882 static int 1883 mrsas_alloc_mem(struct mrsas_softc *sc) 1884 { 1885 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size, 1886 evt_detail_size, count, pd_info_size; 1887 1888 /* 1889 * Allocate parent DMA tag 1890 */ 1891 if (bus_dma_tag_create(NULL, /* parent */ 1892 1, /* alignment */ 1893 0, /* boundary */ 1894 BUS_SPACE_MAXADDR, /* lowaddr */ 1895 BUS_SPACE_MAXADDR, /* highaddr */ 1896 NULL, NULL, /* filter, filterarg */ 1897 MAXPHYS, /* maxsize */ 1898 sc->max_num_sge, /* nsegments */ 1899 MAXPHYS, /* maxsegsize */ 1900 0, /* flags */ 1901 NULL, NULL, /* lockfunc, lockarg */ 1902 &sc->mrsas_parent_tag /* tag */ 1903 )) { 1904 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); 1905 return (ENOMEM); 1906 } 1907 /* 1908 * Allocate for version buffer 1909 */ 1910 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t)); 1911 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1912 1, 0, 1913 BUS_SPACE_MAXADDR_32BIT, 1914 BUS_SPACE_MAXADDR, 1915 NULL, NULL, 1916 verbuf_size, 1917 1, 1918 verbuf_size, 1919 BUS_DMA_ALLOCNOW, 1920 NULL, NULL, 1921 &sc->verbuf_tag)) { 1922 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); 1923 return (ENOMEM); 1924 } 1925 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, 1926 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { 1927 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); 1928 return (ENOMEM); 1929 } 1930 bzero(sc->verbuf_mem, verbuf_size); 1931 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, 1932 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, 1933 BUS_DMA_NOWAIT)) { 1934 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); 1935 return (ENOMEM); 1936 } 1937 /* 1938 * Allocate IO Request Frames 1939 */ 1940 io_req_size = sc->io_frames_alloc_sz; 1941 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1942 16, 0, 1943 BUS_SPACE_MAXADDR_32BIT, 1944 BUS_SPACE_MAXADDR, 1945 NULL, NULL, 1946 io_req_size, 1947 1, 1948 io_req_size, 1949 BUS_DMA_ALLOCNOW, 1950 NULL, NULL, 1951 &sc->io_request_tag)) { 1952 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); 1953 return (ENOMEM); 1954 } 1955 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, 1956 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { 1957 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); 1958 return (ENOMEM); 1959 } 1960 bzero(sc->io_request_mem, io_req_size); 1961 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, 1962 sc->io_request_mem, io_req_size, mrsas_addr_cb, 1963 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { 1964 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); 1965 return (ENOMEM); 1966 } 1967 /* 1968 * Allocate Chain Frames 1969 */ 1970 chain_frame_size = sc->chain_frames_alloc_sz; 1971 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1972 4, 0, 1973 BUS_SPACE_MAXADDR_32BIT, 1974 BUS_SPACE_MAXADDR, 1975 NULL, NULL, 1976 chain_frame_size, 1977 1, 1978 chain_frame_size, 1979 BUS_DMA_ALLOCNOW, 1980 NULL, NULL, 1981 &sc->chain_frame_tag)) { 1982 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); 1983 return (ENOMEM); 1984 } 1985 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, 1986 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { 1987 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); 1988 return (ENOMEM); 1989 } 1990 bzero(sc->chain_frame_mem, chain_frame_size); 1991 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, 1992 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, 1993 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { 1994 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); 1995 return (ENOMEM); 1996 } 1997 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 1998 /* 1999 * Allocate Reply Descriptor Array 2000 */ 2001 reply_desc_size = sc->reply_alloc_sz * count; 2002 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2003 16, 0, 2004 BUS_SPACE_MAXADDR_32BIT, 2005 BUS_SPACE_MAXADDR, 2006 NULL, NULL, 2007 reply_desc_size, 2008 1, 2009 reply_desc_size, 2010 BUS_DMA_ALLOCNOW, 2011 NULL, NULL, 2012 &sc->reply_desc_tag)) { 2013 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); 2014 return (ENOMEM); 2015 } 2016 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, 2017 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { 2018 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); 2019 return (ENOMEM); 2020 } 2021 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, 2022 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, 2023 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { 2024 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); 2025 return (ENOMEM); 2026 } 2027 /* 2028 * Allocate Sense Buffer Array. Keep in lower 4GB 2029 */ 2030 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; 2031 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2032 64, 0, 2033 BUS_SPACE_MAXADDR_32BIT, 2034 BUS_SPACE_MAXADDR, 2035 NULL, NULL, 2036 sense_size, 2037 1, 2038 sense_size, 2039 BUS_DMA_ALLOCNOW, 2040 NULL, NULL, 2041 &sc->sense_tag)) { 2042 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); 2043 return (ENOMEM); 2044 } 2045 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, 2046 BUS_DMA_NOWAIT, &sc->sense_dmamap)) { 2047 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); 2048 return (ENOMEM); 2049 } 2050 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, 2051 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, 2052 BUS_DMA_NOWAIT)) { 2053 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); 2054 return (ENOMEM); 2055 } 2056 2057 /* 2058 * Allocate for Event detail structure 2059 */ 2060 evt_detail_size = sizeof(struct mrsas_evt_detail); 2061 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2062 1, 0, 2063 BUS_SPACE_MAXADDR_32BIT, 2064 BUS_SPACE_MAXADDR, 2065 NULL, NULL, 2066 evt_detail_size, 2067 1, 2068 evt_detail_size, 2069 BUS_DMA_ALLOCNOW, 2070 NULL, NULL, 2071 &sc->evt_detail_tag)) { 2072 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); 2073 return (ENOMEM); 2074 } 2075 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, 2076 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { 2077 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); 2078 return (ENOMEM); 2079 } 2080 bzero(sc->evt_detail_mem, evt_detail_size); 2081 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, 2082 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, 2083 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { 2084 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); 2085 return (ENOMEM); 2086 } 2087 2088 /* 2089 * Allocate for PD INFO structure 2090 */ 2091 pd_info_size = sizeof(struct mrsas_pd_info); 2092 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2093 1, 0, 2094 BUS_SPACE_MAXADDR_32BIT, 2095 BUS_SPACE_MAXADDR, 2096 NULL, NULL, 2097 pd_info_size, 2098 1, 2099 pd_info_size, 2100 BUS_DMA_ALLOCNOW, 2101 NULL, NULL, 2102 &sc->pd_info_tag)) { 2103 device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n"); 2104 return (ENOMEM); 2105 } 2106 if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem, 2107 BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) { 2108 device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n"); 2109 return (ENOMEM); 2110 } 2111 bzero(sc->pd_info_mem, pd_info_size); 2112 if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap, 2113 sc->pd_info_mem, pd_info_size, mrsas_addr_cb, 2114 &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) { 2115 device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n"); 2116 return (ENOMEM); 2117 } 2118 2119 /* 2120 * Create a dma tag for data buffers; size will be the maximum 2121 * possible I/O size (280kB). 2122 */ 2123 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2124 1, 2125 0, 2126 BUS_SPACE_MAXADDR, 2127 BUS_SPACE_MAXADDR, 2128 NULL, NULL, 2129 MAXPHYS, 2130 sc->max_num_sge, /* nsegments */ 2131 MAXPHYS, 2132 BUS_DMA_ALLOCNOW, 2133 busdma_lock_mutex, 2134 &sc->io_lock, 2135 &sc->data_tag)) { 2136 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); 2137 return (ENOMEM); 2138 } 2139 return (0); 2140 } 2141 2142 /* 2143 * mrsas_addr_cb: Callback function of bus_dmamap_load() 2144 * input: callback argument, machine dependent type 2145 * that describes DMA segments, number of segments, error code 2146 * 2147 * This function is for the driver to receive mapping information resultant of 2148 * the bus_dmamap_load(). The information is actually not being used, but the 2149 * address is saved anyway. 2150 */ 2151 void 2152 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2153 { 2154 bus_addr_t *addr; 2155 2156 addr = arg; 2157 *addr = segs[0].ds_addr; 2158 } 2159 2160 /* 2161 * mrsas_setup_raidmap: Set up RAID map. 2162 * input: Adapter instance soft state 2163 * 2164 * Allocate DMA memory for the RAID maps and perform setup. 2165 */ 2166 static int 2167 mrsas_setup_raidmap(struct mrsas_softc *sc) 2168 { 2169 int i; 2170 2171 for (i = 0; i < 2; i++) { 2172 sc->ld_drv_map[i] = 2173 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT); 2174 /* Do Error handling */ 2175 if (!sc->ld_drv_map[i]) { 2176 device_printf(sc->mrsas_dev, "Could not allocate memory for local map"); 2177 2178 if (i == 1) 2179 free(sc->ld_drv_map[0], M_MRSAS); 2180 /* ABORT driver initialization */ 2181 goto ABORT; 2182 } 2183 } 2184 2185 for (int i = 0; i < 2; i++) { 2186 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2187 4, 0, 2188 BUS_SPACE_MAXADDR_32BIT, 2189 BUS_SPACE_MAXADDR, 2190 NULL, NULL, 2191 sc->max_map_sz, 2192 1, 2193 sc->max_map_sz, 2194 BUS_DMA_ALLOCNOW, 2195 NULL, NULL, 2196 &sc->raidmap_tag[i])) { 2197 device_printf(sc->mrsas_dev, 2198 "Cannot allocate raid map tag.\n"); 2199 return (ENOMEM); 2200 } 2201 if (bus_dmamem_alloc(sc->raidmap_tag[i], 2202 (void **)&sc->raidmap_mem[i], 2203 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { 2204 device_printf(sc->mrsas_dev, 2205 "Cannot allocate raidmap memory.\n"); 2206 return (ENOMEM); 2207 } 2208 bzero(sc->raidmap_mem[i], sc->max_map_sz); 2209 2210 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], 2211 sc->raidmap_mem[i], sc->max_map_sz, 2212 mrsas_addr_cb, &sc->raidmap_phys_addr[i], 2213 BUS_DMA_NOWAIT)) { 2214 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); 2215 return (ENOMEM); 2216 } 2217 if (!sc->raidmap_mem[i]) { 2218 device_printf(sc->mrsas_dev, 2219 "Cannot allocate memory for raid map.\n"); 2220 return (ENOMEM); 2221 } 2222 } 2223 2224 if (!mrsas_get_map_info(sc)) 2225 mrsas_sync_map_info(sc); 2226 2227 return (0); 2228 2229 ABORT: 2230 return (1); 2231 } 2232 2233 /** 2234 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 2235 * @sc: Adapter soft state 2236 * 2237 * Return 0 on success. 2238 */ 2239 void 2240 megasas_setup_jbod_map(struct mrsas_softc *sc) 2241 { 2242 int i; 2243 uint32_t pd_seq_map_sz; 2244 2245 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 2246 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 2247 2248 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) { 2249 sc->use_seqnum_jbod_fp = 0; 2250 return; 2251 } 2252 if (sc->jbodmap_mem[0]) 2253 goto skip_alloc; 2254 2255 for (i = 0; i < 2; i++) { 2256 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2257 4, 0, 2258 BUS_SPACE_MAXADDR_32BIT, 2259 BUS_SPACE_MAXADDR, 2260 NULL, NULL, 2261 pd_seq_map_sz, 2262 1, 2263 pd_seq_map_sz, 2264 BUS_DMA_ALLOCNOW, 2265 NULL, NULL, 2266 &sc->jbodmap_tag[i])) { 2267 device_printf(sc->mrsas_dev, 2268 "Cannot allocate jbod map tag.\n"); 2269 return; 2270 } 2271 if (bus_dmamem_alloc(sc->jbodmap_tag[i], 2272 (void **)&sc->jbodmap_mem[i], 2273 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) { 2274 device_printf(sc->mrsas_dev, 2275 "Cannot allocate jbod map memory.\n"); 2276 return; 2277 } 2278 bzero(sc->jbodmap_mem[i], pd_seq_map_sz); 2279 2280 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i], 2281 sc->jbodmap_mem[i], pd_seq_map_sz, 2282 mrsas_addr_cb, &sc->jbodmap_phys_addr[i], 2283 BUS_DMA_NOWAIT)) { 2284 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n"); 2285 return; 2286 } 2287 if (!sc->jbodmap_mem[i]) { 2288 device_printf(sc->mrsas_dev, 2289 "Cannot allocate memory for jbod map.\n"); 2290 sc->use_seqnum_jbod_fp = 0; 2291 return; 2292 } 2293 } 2294 2295 skip_alloc: 2296 if (!megasas_sync_pd_seq_num(sc, false) && 2297 !megasas_sync_pd_seq_num(sc, true)) 2298 sc->use_seqnum_jbod_fp = 1; 2299 else 2300 sc->use_seqnum_jbod_fp = 0; 2301 2302 device_printf(sc->mrsas_dev, "Jbod map is supported\n"); 2303 } 2304 2305 /* 2306 * mrsas_init_fw: Initialize Firmware 2307 * input: Adapter soft state 2308 * 2309 * Calls transition_to_ready() to make sure Firmware is in operational state and 2310 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It 2311 * issues internal commands to get the controller info after the IOC_INIT 2312 * command response is received by Firmware. Note: code relating to 2313 * get_pdlist, get_ld_list and max_sectors are currently not being used, it 2314 * is left here as placeholder. 2315 */ 2316 static int 2317 mrsas_init_fw(struct mrsas_softc *sc) 2318 { 2319 2320 int ret, loop, ocr = 0; 2321 u_int32_t max_sectors_1; 2322 u_int32_t max_sectors_2; 2323 u_int32_t tmp_sectors; 2324 u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4; 2325 int msix_enable = 0; 2326 int fw_msix_count = 0; 2327 int i, j; 2328 2329 /* Make sure Firmware is ready */ 2330 ret = mrsas_transition_to_ready(sc, ocr); 2331 if (ret != SUCCESS) { 2332 return (ret); 2333 } 2334 if (sc->is_ventura || sc->is_aero) { 2335 scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3)); 2336 #if VD_EXT_DEBUG 2337 device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3); 2338 #endif 2339 sc->maxRaidMapSize = ((scratch_pad_3 >> 2340 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 2341 MR_MAX_RAID_MAP_SIZE_MASK); 2342 } 2343 /* MSI-x index 0- reply post host index register */ 2344 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET; 2345 /* Check if MSI-X is supported while in ready state */ 2346 msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a; 2347 2348 if (msix_enable) { 2349 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2350 outbound_scratch_pad_2)); 2351 2352 /* Check max MSI-X vectors */ 2353 if (sc->device_id == MRSAS_TBOLT) { 2354 sc->msix_vectors = (scratch_pad_2 2355 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 2356 fw_msix_count = sc->msix_vectors; 2357 } else { 2358 /* Invader/Fury supports 96 MSI-X vectors */ 2359 sc->msix_vectors = ((scratch_pad_2 2360 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 2361 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 2362 fw_msix_count = sc->msix_vectors; 2363 2364 if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) || 2365 ((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16))) 2366 sc->msix_combined = true; 2367 /* 2368 * Save 1-15 reply post index 2369 * address to local memory Index 0 2370 * is already saved from reg offset 2371 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 2372 */ 2373 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; 2374 loop++) { 2375 sc->msix_reg_offset[loop] = 2376 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + 2377 (loop * 0x10); 2378 } 2379 } 2380 2381 /* Don't bother allocating more MSI-X vectors than cpus */ 2382 sc->msix_vectors = min(sc->msix_vectors, 2383 mp_ncpus); 2384 2385 /* Allocate MSI-x vectors */ 2386 if (mrsas_allocate_msix(sc) == SUCCESS) 2387 sc->msix_enable = 1; 2388 else 2389 sc->msix_enable = 0; 2390 2391 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector," 2392 "Online CPU %d Current MSIX <%d>\n", 2393 fw_msix_count, mp_ncpus, sc->msix_vectors); 2394 } 2395 /* 2396 * MSI-X host index 0 is common for all adapter. 2397 * It is used for all MPT based Adapters. 2398 */ 2399 if (sc->msix_combined) { 2400 sc->msix_reg_offset[0] = 2401 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET; 2402 } 2403 if (mrsas_init_adapter(sc) != SUCCESS) { 2404 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); 2405 return (1); 2406 } 2407 2408 if (sc->is_ventura || sc->is_aero) { 2409 scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2410 outbound_scratch_pad_4)); 2411 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT) 2412 sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK); 2413 2414 device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size); 2415 } 2416 2417 /* Allocate internal commands for pass-thru */ 2418 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) { 2419 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); 2420 return (1); 2421 } 2422 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); 2423 if (!sc->ctrl_info) { 2424 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n"); 2425 return (1); 2426 } 2427 /* 2428 * Get the controller info from FW, so that the MAX VD support 2429 * availability can be decided. 2430 */ 2431 if (mrsas_get_ctrl_info(sc)) { 2432 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n"); 2433 return (1); 2434 } 2435 sc->secure_jbod_support = 2436 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD; 2437 2438 if (sc->secure_jbod_support) 2439 device_printf(sc->mrsas_dev, "FW supports SED \n"); 2440 2441 if (sc->use_seqnum_jbod_fp) 2442 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n"); 2443 2444 if (sc->support_morethan256jbod) 2445 device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n"); 2446 2447 if (mrsas_setup_raidmap(sc) != SUCCESS) { 2448 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! " 2449 "There seems to be some problem in the controller\n" 2450 "Please contact to the SUPPORT TEAM if the problem persists\n"); 2451 } 2452 megasas_setup_jbod_map(sc); 2453 2454 2455 memset(sc->target_list, 0, 2456 MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target)); 2457 for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++) 2458 sc->target_list[i].target_id = 0xffff; 2459 2460 /* For pass-thru, get PD/LD list and controller info */ 2461 memset(sc->pd_list, 0, 2462 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 2463 if (mrsas_get_pd_list(sc) != SUCCESS) { 2464 device_printf(sc->mrsas_dev, "Get PD list failed.\n"); 2465 return (1); 2466 } 2467 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); 2468 if (mrsas_get_ld_list(sc) != SUCCESS) { 2469 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n"); 2470 return (1); 2471 } 2472 2473 if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) { 2474 sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) * 2475 MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT); 2476 if (!sc->streamDetectByLD) { 2477 device_printf(sc->mrsas_dev, 2478 "unable to allocate stream detection for pool of LDs\n"); 2479 return (1); 2480 } 2481 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 2482 sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT); 2483 if (!sc->streamDetectByLD[i]) { 2484 device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n"); 2485 for (j = 0; j < i; ++j) 2486 free(sc->streamDetectByLD[j], M_MRSAS); 2487 free(sc->streamDetectByLD, M_MRSAS); 2488 sc->streamDetectByLD = NULL; 2489 return (1); 2490 } 2491 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT)); 2492 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP; 2493 } 2494 } 2495 2496 /* 2497 * Compute the max allowed sectors per IO: The controller info has 2498 * two limits on max sectors. Driver should use the minimum of these 2499 * two. 2500 * 2501 * 1 << stripe_sz_ops.min = max sectors per strip 2502 * 2503 * Note that older firmwares ( < FW ver 30) didn't report information to 2504 * calculate max_sectors_1. So the number ended up as zero always. 2505 */ 2506 tmp_sectors = 0; 2507 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) * 2508 sc->ctrl_info->max_strips_per_io; 2509 max_sectors_2 = sc->ctrl_info->max_request_size; 2510 tmp_sectors = min(max_sectors_1, max_sectors_2); 2511 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512; 2512 2513 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) 2514 sc->max_sectors_per_req = tmp_sectors; 2515 2516 sc->disableOnlineCtrlReset = 2517 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 2518 sc->UnevenSpanSupport = 2519 sc->ctrl_info->adapterOperations2.supportUnevenSpans; 2520 if (sc->UnevenSpanSupport) { 2521 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n", 2522 sc->UnevenSpanSupport); 2523 2524 if (MR_ValidateMapInfo(sc)) 2525 sc->fast_path_io = 1; 2526 else 2527 sc->fast_path_io = 0; 2528 } 2529 return (0); 2530 } 2531 2532 /* 2533 * mrsas_init_adapter: Initializes the adapter/controller 2534 * input: Adapter soft state 2535 * 2536 * Prepares for the issuing of the IOC Init cmd to FW for initializing the 2537 * ROC/controller. The FW register is read to determined the number of 2538 * commands that is supported. All memory allocations for IO is based on 2539 * max_cmd. Appropriate calculations are performed in this function. 2540 */ 2541 int 2542 mrsas_init_adapter(struct mrsas_softc *sc) 2543 { 2544 uint32_t status; 2545 u_int32_t scratch_pad_2; 2546 int ret; 2547 int i = 0; 2548 2549 /* Read FW status register */ 2550 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2551 2552 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; 2553 2554 /* Decrement the max supported by 1, to correlate with FW */ 2555 sc->max_fw_cmds = sc->max_fw_cmds - 1; 2556 sc->max_scsi_cmds = sc->max_fw_cmds - 2557 (MRSAS_FUSION_INT_CMDS + MRSAS_MAX_IOCTL_CMDS); 2558 2559 /* Determine allocation size of command frames */ 2560 sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2; 2561 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds; 2562 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); 2563 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + 2564 (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1)); 2565 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2566 outbound_scratch_pad_2)); 2567 /* 2568 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, 2569 * Firmware support extended IO chain frame which is 4 time more 2570 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) = 2571 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K 2572 */ 2573 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) 2574 sc->max_chain_frame_sz = 2575 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) 2576 * MEGASAS_1MB_IO; 2577 else 2578 sc->max_chain_frame_sz = 2579 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) 2580 * MEGASAS_256K_IO; 2581 2582 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds; 2583 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2584 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16; 2585 2586 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION); 2587 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; 2588 2589 mrsas_dprint(sc, MRSAS_INFO, 2590 "max sge: 0x%x, max chain frame size: 0x%x, " 2591 "max fw cmd: 0x%x\n", sc->max_num_sge, 2592 sc->max_chain_frame_sz, sc->max_fw_cmds); 2593 2594 /* Used for pass thru MFI frame (DCMD) */ 2595 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16; 2596 2597 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2598 sizeof(MPI2_SGE_IO_UNION)) / 16; 2599 2600 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2601 2602 for (i = 0; i < count; i++) 2603 sc->last_reply_idx[i] = 0; 2604 2605 ret = mrsas_alloc_mem(sc); 2606 if (ret != SUCCESS) 2607 return (ret); 2608 2609 ret = mrsas_alloc_mpt_cmds(sc); 2610 if (ret != SUCCESS) 2611 return (ret); 2612 2613 ret = mrsas_ioc_init(sc); 2614 if (ret != SUCCESS) 2615 return (ret); 2616 2617 return (0); 2618 } 2619 2620 /* 2621 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command 2622 * input: Adapter soft state 2623 * 2624 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. 2625 */ 2626 int 2627 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) 2628 { 2629 int ioc_init_size; 2630 2631 /* Allocate IOC INIT command */ 2632 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); 2633 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2634 1, 0, 2635 BUS_SPACE_MAXADDR_32BIT, 2636 BUS_SPACE_MAXADDR, 2637 NULL, NULL, 2638 ioc_init_size, 2639 1, 2640 ioc_init_size, 2641 BUS_DMA_ALLOCNOW, 2642 NULL, NULL, 2643 &sc->ioc_init_tag)) { 2644 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); 2645 return (ENOMEM); 2646 } 2647 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, 2648 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { 2649 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); 2650 return (ENOMEM); 2651 } 2652 bzero(sc->ioc_init_mem, ioc_init_size); 2653 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, 2654 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, 2655 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { 2656 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); 2657 return (ENOMEM); 2658 } 2659 return (0); 2660 } 2661 2662 /* 2663 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command 2664 * input: Adapter soft state 2665 * 2666 * Deallocates memory of the IOC Init cmd. 2667 */ 2668 void 2669 mrsas_free_ioc_cmd(struct mrsas_softc *sc) 2670 { 2671 if (sc->ioc_init_phys_mem) 2672 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); 2673 if (sc->ioc_init_mem != NULL) 2674 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); 2675 if (sc->ioc_init_tag != NULL) 2676 bus_dma_tag_destroy(sc->ioc_init_tag); 2677 } 2678 2679 /* 2680 * mrsas_ioc_init: Sends IOC Init command to FW 2681 * input: Adapter soft state 2682 * 2683 * Issues the IOC Init cmd to FW to initialize the ROC/controller. 2684 */ 2685 int 2686 mrsas_ioc_init(struct mrsas_softc *sc) 2687 { 2688 struct mrsas_init_frame *init_frame; 2689 pMpi2IOCInitRequest_t IOCInitMsg; 2690 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; 2691 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 2692 bus_addr_t phys_addr; 2693 int i, retcode = 0; 2694 u_int32_t scratch_pad_2; 2695 2696 /* Allocate memory for the IOC INIT command */ 2697 if (mrsas_alloc_ioc_cmd(sc)) { 2698 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); 2699 return (1); 2700 } 2701 2702 if (!sc->block_sync_cache) { 2703 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2704 outbound_scratch_pad_2)); 2705 sc->fw_sync_cache_support = (scratch_pad_2 & 2706 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; 2707 } 2708 2709 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024); 2710 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; 2711 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 2712 IOCInitMsg->MsgVersion = MPI2_VERSION; 2713 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION; 2714 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; 2715 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth; 2716 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr; 2717 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr; 2718 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0); 2719 IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; 2720 2721 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; 2722 init_frame->cmd = MFI_CMD_INIT; 2723 init_frame->cmd_status = 0xFF; 2724 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2725 2726 /* driver support Extended MSIX */ 2727 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 2728 init_frame->driver_operations. 2729 mfi_capabilities.support_additional_msix = 1; 2730 } 2731 if (sc->verbuf_mem) { 2732 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n", 2733 MRSAS_VERSION); 2734 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; 2735 init_frame->driver_ver_hi = 0; 2736 } 2737 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1; 2738 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1; 2739 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1; 2740 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) 2741 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1; 2742 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; 2743 init_frame->queue_info_new_phys_addr_lo = phys_addr; 2744 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t); 2745 2746 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem; 2747 req_desc.MFAIo.RequestFlags = 2748 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2749 2750 mrsas_disable_intr(sc); 2751 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); 2752 mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high); 2753 2754 /* 2755 * Poll response timer to wait for Firmware response. While this 2756 * timer with the DELAY call could block CPU, the time interval for 2757 * this is only 1 millisecond. 2758 */ 2759 if (init_frame->cmd_status == 0xFF) { 2760 for (i = 0; i < (max_wait * 1000); i++) { 2761 if (init_frame->cmd_status == 0xFF) 2762 DELAY(1000); 2763 else 2764 break; 2765 } 2766 } 2767 if (init_frame->cmd_status == 0) 2768 mrsas_dprint(sc, MRSAS_OCR, 2769 "IOC INIT response received from FW.\n"); 2770 else { 2771 if (init_frame->cmd_status == 0xFF) 2772 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); 2773 else 2774 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); 2775 retcode = 1; 2776 } 2777 2778 if (sc->is_aero) { 2779 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2780 outbound_scratch_pad_2)); 2781 sc->atomic_desc_support = (scratch_pad_2 & 2782 MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0; 2783 device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n", 2784 sc->atomic_desc_support ? "Yes" : "No"); 2785 } 2786 2787 mrsas_free_ioc_cmd(sc); 2788 return (retcode); 2789 } 2790 2791 /* 2792 * mrsas_alloc_mpt_cmds: Allocates the command packets 2793 * input: Adapter instance soft state 2794 * 2795 * This function allocates the internal commands for IOs. Each command that is 2796 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An 2797 * array is allocated with mrsas_mpt_cmd context. The free commands are 2798 * maintained in a linked list (cmd pool). SMID value range is from 1 to 2799 * max_fw_cmds. 2800 */ 2801 int 2802 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) 2803 { 2804 int i, j; 2805 u_int32_t max_fw_cmds, count; 2806 struct mrsas_mpt_cmd *cmd; 2807 pMpi2ReplyDescriptorsUnion_t reply_desc; 2808 u_int32_t offset, chain_offset, sense_offset; 2809 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; 2810 u_int8_t *io_req_base, *chain_frame_base, *sense_base; 2811 2812 max_fw_cmds = sc->max_fw_cmds; 2813 2814 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); 2815 if (!sc->req_desc) { 2816 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); 2817 return (ENOMEM); 2818 } 2819 memset(sc->req_desc, 0, sc->request_alloc_sz); 2820 2821 /* 2822 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. 2823 * Allocate the dynamic array first and then allocate individual 2824 * commands. 2825 */ 2826 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds, 2827 M_MRSAS, M_NOWAIT); 2828 if (!sc->mpt_cmd_list) { 2829 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); 2830 return (ENOMEM); 2831 } 2832 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds); 2833 for (i = 0; i < max_fw_cmds; i++) { 2834 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd), 2835 M_MRSAS, M_NOWAIT); 2836 if (!sc->mpt_cmd_list[i]) { 2837 for (j = 0; j < i; j++) 2838 free(sc->mpt_cmd_list[j], M_MRSAS); 2839 free(sc->mpt_cmd_list, M_MRSAS); 2840 sc->mpt_cmd_list = NULL; 2841 return (ENOMEM); 2842 } 2843 } 2844 2845 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2846 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2847 chain_frame_base = (u_int8_t *)sc->chain_frame_mem; 2848 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; 2849 sense_base = (u_int8_t *)sc->sense_mem; 2850 sense_base_phys = (bus_addr_t)sc->sense_phys_addr; 2851 for (i = 0; i < max_fw_cmds; i++) { 2852 cmd = sc->mpt_cmd_list[i]; 2853 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 2854 chain_offset = sc->max_chain_frame_sz * i; 2855 sense_offset = MRSAS_SENSE_LEN * i; 2856 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); 2857 cmd->index = i + 1; 2858 cmd->ccb_ptr = NULL; 2859 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 2860 callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0); 2861 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 2862 cmd->sc = sc; 2863 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); 2864 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); 2865 cmd->io_request_phys_addr = io_req_base_phys + offset; 2866 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); 2867 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; 2868 cmd->sense = sense_base + sense_offset; 2869 cmd->sense_phys_addr = sense_base_phys + sense_offset; 2870 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { 2871 return (FAIL); 2872 } 2873 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 2874 } 2875 2876 /* Initialize reply descriptor array to 0xFFFFFFFF */ 2877 reply_desc = sc->reply_desc_mem; 2878 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2879 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) { 2880 reply_desc->Words = MRSAS_ULONG_MAX; 2881 } 2882 return (0); 2883 } 2884 2885 /* 2886 * mrsas_write_64bit_req_dsc: Writes 64 bit request descriptor to FW 2887 * input: Adapter softstate 2888 * request descriptor address low 2889 * request descriptor address high 2890 */ 2891 void 2892 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2893 u_int32_t req_desc_hi) 2894 { 2895 mtx_lock(&sc->pci_lock); 2896 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), 2897 req_desc_lo); 2898 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), 2899 req_desc_hi); 2900 mtx_unlock(&sc->pci_lock); 2901 } 2902 2903 /* 2904 * mrsas_fire_cmd: Sends command to FW 2905 * input: Adapter softstate 2906 * request descriptor address low 2907 * request descriptor address high 2908 * 2909 * This functions fires the command to Firmware by writing to the 2910 * inbound_low_queue_port and inbound_high_queue_port. 2911 */ 2912 void 2913 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2914 u_int32_t req_desc_hi) 2915 { 2916 if (sc->atomic_desc_support) 2917 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port), 2918 req_desc_lo); 2919 else 2920 mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi); 2921 } 2922 2923 /* 2924 * mrsas_transition_to_ready: Move FW to Ready state input: 2925 * Adapter instance soft state 2926 * 2927 * During the initialization, FW passes can potentially be in any one of several 2928 * possible states. If the FW in operational, waiting-for-handshake states, 2929 * driver must take steps to bring it to ready state. Otherwise, it has to 2930 * wait for the ready state. 2931 */ 2932 int 2933 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) 2934 { 2935 int i; 2936 u_int8_t max_wait; 2937 u_int32_t val, fw_state; 2938 u_int32_t cur_state; 2939 u_int32_t abs_state, curr_abs_state; 2940 2941 val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2942 fw_state = val & MFI_STATE_MASK; 2943 max_wait = MRSAS_RESET_WAIT_TIME; 2944 2945 if (fw_state != MFI_STATE_READY) 2946 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); 2947 2948 while (fw_state != MFI_STATE_READY) { 2949 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2950 switch (fw_state) { 2951 case MFI_STATE_FAULT: 2952 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); 2953 if (ocr) { 2954 cur_state = MFI_STATE_FAULT; 2955 break; 2956 } else 2957 return -ENODEV; 2958 case MFI_STATE_WAIT_HANDSHAKE: 2959 /* Set the CLR bit in inbound doorbell */ 2960 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2961 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG); 2962 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2963 break; 2964 case MFI_STATE_BOOT_MESSAGE_PENDING: 2965 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2966 MFI_INIT_HOTPLUG); 2967 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2968 break; 2969 case MFI_STATE_OPERATIONAL: 2970 /* 2971 * Bring it to READY state; assuming max wait 10 2972 * secs 2973 */ 2974 mrsas_disable_intr(sc); 2975 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); 2976 for (i = 0; i < max_wait * 1000; i++) { 2977 if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1) 2978 DELAY(1000); 2979 else 2980 break; 2981 } 2982 cur_state = MFI_STATE_OPERATIONAL; 2983 break; 2984 case MFI_STATE_UNDEFINED: 2985 /* 2986 * This state should not last for more than 2 2987 * seconds 2988 */ 2989 cur_state = MFI_STATE_UNDEFINED; 2990 break; 2991 case MFI_STATE_BB_INIT: 2992 cur_state = MFI_STATE_BB_INIT; 2993 break; 2994 case MFI_STATE_FW_INIT: 2995 cur_state = MFI_STATE_FW_INIT; 2996 break; 2997 case MFI_STATE_FW_INIT_2: 2998 cur_state = MFI_STATE_FW_INIT_2; 2999 break; 3000 case MFI_STATE_DEVICE_SCAN: 3001 cur_state = MFI_STATE_DEVICE_SCAN; 3002 break; 3003 case MFI_STATE_FLUSH_CACHE: 3004 cur_state = MFI_STATE_FLUSH_CACHE; 3005 break; 3006 default: 3007 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); 3008 return -ENODEV; 3009 } 3010 3011 /* 3012 * The cur_state should not last for more than max_wait secs 3013 */ 3014 for (i = 0; i < (max_wait * 1000); i++) { 3015 fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3016 outbound_scratch_pad)) & MFI_STATE_MASK); 3017 curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3018 outbound_scratch_pad)); 3019 if (abs_state == curr_abs_state) 3020 DELAY(1000); 3021 else 3022 break; 3023 } 3024 3025 /* 3026 * Return error if fw_state hasn't changed after max_wait 3027 */ 3028 if (curr_abs_state == abs_state) { 3029 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " 3030 "in %d secs\n", fw_state, max_wait); 3031 return -ENODEV; 3032 } 3033 } 3034 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); 3035 return 0; 3036 } 3037 3038 /* 3039 * mrsas_get_mfi_cmd: Get a cmd from free command pool 3040 * input: Adapter soft state 3041 * 3042 * This function removes an MFI command from the command list. 3043 */ 3044 struct mrsas_mfi_cmd * 3045 mrsas_get_mfi_cmd(struct mrsas_softc *sc) 3046 { 3047 struct mrsas_mfi_cmd *cmd = NULL; 3048 3049 mtx_lock(&sc->mfi_cmd_pool_lock); 3050 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) { 3051 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); 3052 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); 3053 } 3054 mtx_unlock(&sc->mfi_cmd_pool_lock); 3055 3056 return cmd; 3057 } 3058 3059 /* 3060 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter. 3061 * input: Adapter Context. 3062 * 3063 * This function will check FW status register and flag do_timeout_reset flag. 3064 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has 3065 * trigger reset. 3066 */ 3067 static void 3068 mrsas_ocr_thread(void *arg) 3069 { 3070 struct mrsas_softc *sc; 3071 u_int32_t fw_status, fw_state; 3072 u_int8_t tm_target_reset_failed = 0; 3073 3074 sc = (struct mrsas_softc *)arg; 3075 3076 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); 3077 3078 sc->ocr_thread_active = 1; 3079 mtx_lock(&sc->sim_lock); 3080 for (;;) { 3081 /* Sleep for 1 second and check the queue status */ 3082 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 3083 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); 3084 if (sc->remove_in_progress || 3085 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 3086 mrsas_dprint(sc, MRSAS_OCR, 3087 "Exit due to %s from %s\n", 3088 sc->remove_in_progress ? "Shutdown" : 3089 "Hardware critical error", __func__); 3090 break; 3091 } 3092 fw_status = mrsas_read_reg_with_retries(sc, 3093 offsetof(mrsas_reg_set, outbound_scratch_pad)); 3094 fw_state = fw_status & MFI_STATE_MASK; 3095 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset || 3096 mrsas_atomic_read(&sc->target_reset_outstanding)) { 3097 3098 /* First, freeze further IOs to come to the SIM */ 3099 mrsas_xpt_freeze(sc); 3100 3101 /* If this is an IO timeout then go for target reset */ 3102 if (mrsas_atomic_read(&sc->target_reset_outstanding)) { 3103 device_printf(sc->mrsas_dev, "Initiating Target RESET " 3104 "because of SCSI IO timeout!\n"); 3105 3106 /* Let the remaining IOs to complete */ 3107 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 3108 "mrsas_reset_targets", 5 * hz); 3109 3110 /* Try to reset the target device */ 3111 if (mrsas_reset_targets(sc) == FAIL) 3112 tm_target_reset_failed = 1; 3113 } 3114 3115 /* If this is a DCMD timeout or FW fault, 3116 * then go for controller reset 3117 */ 3118 if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed || 3119 (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) { 3120 if (tm_target_reset_failed) 3121 device_printf(sc->mrsas_dev, "Initiaiting OCR because of " 3122 "TM FAILURE!\n"); 3123 else 3124 device_printf(sc->mrsas_dev, "Initiaiting OCR " 3125 "because of %s!\n", sc->do_timedout_reset ? 3126 "DCMD IO Timeout" : "FW fault"); 3127 3128 mtx_lock_spin(&sc->ioctl_lock); 3129 sc->reset_in_progress = 1; 3130 mtx_unlock_spin(&sc->ioctl_lock); 3131 sc->reset_count++; 3132 3133 /* 3134 * Wait for the AEN task to be completed if it is running. 3135 */ 3136 mtx_unlock(&sc->sim_lock); 3137 taskqueue_drain(sc->ev_tq, &sc->ev_task); 3138 mtx_lock(&sc->sim_lock); 3139 3140 taskqueue_block(sc->ev_tq); 3141 /* Try to reset the controller */ 3142 mrsas_reset_ctrl(sc, sc->do_timedout_reset); 3143 3144 sc->do_timedout_reset = 0; 3145 sc->reset_in_progress = 0; 3146 tm_target_reset_failed = 0; 3147 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 3148 memset(sc->target_reset_pool, 0, 3149 sizeof(sc->target_reset_pool)); 3150 taskqueue_unblock(sc->ev_tq); 3151 } 3152 3153 /* Now allow IOs to come to the SIM */ 3154 mrsas_xpt_release(sc); 3155 } 3156 } 3157 mtx_unlock(&sc->sim_lock); 3158 sc->ocr_thread_active = 0; 3159 mrsas_kproc_exit(0); 3160 } 3161 3162 /* 3163 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR. 3164 * input: Adapter Context. 3165 * 3166 * This function will clear reply descriptor so that post OCR driver and FW will 3167 * lost old history. 3168 */ 3169 void 3170 mrsas_reset_reply_desc(struct mrsas_softc *sc) 3171 { 3172 int i, count; 3173 pMpi2ReplyDescriptorsUnion_t reply_desc; 3174 3175 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3176 for (i = 0; i < count; i++) 3177 sc->last_reply_idx[i] = 0; 3178 3179 reply_desc = sc->reply_desc_mem; 3180 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { 3181 reply_desc->Words = MRSAS_ULONG_MAX; 3182 } 3183 } 3184 3185 /* 3186 * mrsas_reset_ctrl: Core function to OCR/Kill adapter. 3187 * input: Adapter Context. 3188 * 3189 * This function will run from thread context so that it can sleep. 1. Do not 3190 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command 3191 * to complete for 180 seconds. 3. If #2 does not find any outstanding 3192 * command Controller is in working state, so skip OCR. Otherwise, do 3193 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the 3194 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post 3195 * OCR, Re-fire Management command and move Controller to Operation state. 3196 */ 3197 int 3198 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason) 3199 { 3200 int retval = SUCCESS, i, j, retry = 0; 3201 u_int32_t host_diag, abs_state, status_reg, reset_adapter; 3202 union ccb *ccb; 3203 struct mrsas_mfi_cmd *mfi_cmd; 3204 struct mrsas_mpt_cmd *mpt_cmd; 3205 union mrsas_evt_class_locale class_locale; 3206 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3207 3208 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 3209 device_printf(sc->mrsas_dev, 3210 "mrsas: Hardware critical error, returning FAIL.\n"); 3211 return FAIL; 3212 } 3213 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3214 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; 3215 mrsas_disable_intr(sc); 3216 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr", 3217 sc->mrsas_fw_fault_check_delay * hz); 3218 3219 /* First try waiting for commands to complete */ 3220 if (mrsas_wait_for_outstanding(sc, reset_reason)) { 3221 mrsas_dprint(sc, MRSAS_OCR, 3222 "resetting adapter from %s.\n", 3223 __func__); 3224 /* Now return commands back to the CAM layer */ 3225 mtx_unlock(&sc->sim_lock); 3226 for (i = 0; i < sc->max_fw_cmds; i++) { 3227 mpt_cmd = sc->mpt_cmd_list[i]; 3228 3229 if (mpt_cmd->peer_cmd) { 3230 mrsas_dprint(sc, MRSAS_OCR, 3231 "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n", 3232 i, mpt_cmd, mpt_cmd->peer_cmd); 3233 } 3234 3235 if (mpt_cmd->ccb_ptr) { 3236 if (mpt_cmd->callout_owner) { 3237 ccb = (union ccb *)(mpt_cmd->ccb_ptr); 3238 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3239 mrsas_cmd_done(sc, mpt_cmd); 3240 } else { 3241 mpt_cmd->ccb_ptr = NULL; 3242 mrsas_release_mpt_cmd(mpt_cmd); 3243 } 3244 } 3245 } 3246 3247 mrsas_atomic_set(&sc->fw_outstanding, 0); 3248 3249 mtx_lock(&sc->sim_lock); 3250 3251 status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3252 outbound_scratch_pad)); 3253 abs_state = status_reg & MFI_STATE_MASK; 3254 reset_adapter = status_reg & MFI_RESET_ADAPTER; 3255 if (sc->disableOnlineCtrlReset || 3256 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 3257 /* Reset not supported, kill adapter */ 3258 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n"); 3259 mrsas_kill_hba(sc); 3260 retval = FAIL; 3261 goto out; 3262 } 3263 /* Now try to reset the chip */ 3264 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { 3265 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3266 MPI2_WRSEQ_FLUSH_KEY_VALUE); 3267 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3268 MPI2_WRSEQ_1ST_KEY_VALUE); 3269 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3270 MPI2_WRSEQ_2ND_KEY_VALUE); 3271 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3272 MPI2_WRSEQ_3RD_KEY_VALUE); 3273 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3274 MPI2_WRSEQ_4TH_KEY_VALUE); 3275 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3276 MPI2_WRSEQ_5TH_KEY_VALUE); 3277 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3278 MPI2_WRSEQ_6TH_KEY_VALUE); 3279 3280 /* Check that the diag write enable (DRWE) bit is on */ 3281 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3282 fusion_host_diag)); 3283 retry = 0; 3284 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 3285 DELAY(100 * 1000); 3286 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3287 fusion_host_diag)); 3288 if (retry++ == 100) { 3289 mrsas_dprint(sc, MRSAS_OCR, 3290 "Host diag unlock failed!\n"); 3291 break; 3292 } 3293 } 3294 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 3295 continue; 3296 3297 /* Send chip reset command */ 3298 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), 3299 host_diag | HOST_DIAG_RESET_ADAPTER); 3300 DELAY(3000 * 1000); 3301 3302 /* Make sure reset adapter bit is cleared */ 3303 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3304 fusion_host_diag)); 3305 retry = 0; 3306 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 3307 DELAY(100 * 1000); 3308 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3309 fusion_host_diag)); 3310 if (retry++ == 1000) { 3311 mrsas_dprint(sc, MRSAS_OCR, 3312 "Diag reset adapter never cleared!\n"); 3313 break; 3314 } 3315 } 3316 if (host_diag & HOST_DIAG_RESET_ADAPTER) 3317 continue; 3318 3319 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3320 outbound_scratch_pad)) & MFI_STATE_MASK; 3321 retry = 0; 3322 3323 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 3324 DELAY(100 * 1000); 3325 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3326 outbound_scratch_pad)) & MFI_STATE_MASK; 3327 } 3328 if (abs_state <= MFI_STATE_FW_INIT) { 3329 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," 3330 " state = 0x%x\n", abs_state); 3331 continue; 3332 } 3333 /* Wait for FW to become ready */ 3334 if (mrsas_transition_to_ready(sc, 1)) { 3335 mrsas_dprint(sc, MRSAS_OCR, 3336 "mrsas: Failed to transition controller to ready.\n"); 3337 continue; 3338 } 3339 mrsas_reset_reply_desc(sc); 3340 if (mrsas_ioc_init(sc)) { 3341 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); 3342 continue; 3343 } 3344 for (j = 0; j < sc->max_fw_cmds; j++) { 3345 mpt_cmd = sc->mpt_cmd_list[j]; 3346 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 3347 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; 3348 /* If not an IOCTL then release the command else re-fire */ 3349 if (!mfi_cmd->sync_cmd) { 3350 mrsas_release_mfi_cmd(mfi_cmd); 3351 } else { 3352 req_desc = mrsas_get_request_desc(sc, 3353 mfi_cmd->cmd_id.context.smid - 1); 3354 mrsas_dprint(sc, MRSAS_OCR, 3355 "Re-fire command DCMD opcode 0x%x index %d\n ", 3356 mfi_cmd->frame->dcmd.opcode, j); 3357 if (!req_desc) 3358 device_printf(sc->mrsas_dev, 3359 "Cannot build MPT cmd.\n"); 3360 else 3361 mrsas_fire_cmd(sc, req_desc->addr.u.low, 3362 req_desc->addr.u.high); 3363 } 3364 } 3365 } 3366 3367 /* Reset load balance info */ 3368 memset(sc->load_balance_info, 0, 3369 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); 3370 3371 if (mrsas_get_ctrl_info(sc)) { 3372 mrsas_kill_hba(sc); 3373 retval = FAIL; 3374 goto out; 3375 } 3376 if (!mrsas_get_map_info(sc)) 3377 mrsas_sync_map_info(sc); 3378 3379 megasas_setup_jbod_map(sc); 3380 3381 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) { 3382 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { 3383 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT)); 3384 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP; 3385 } 3386 } 3387 3388 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3389 mrsas_enable_intr(sc); 3390 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 3391 3392 /* Register AEN with FW for last sequence number */ 3393 class_locale.members.reserved = 0; 3394 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3395 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3396 3397 mtx_unlock(&sc->sim_lock); 3398 if (mrsas_register_aen(sc, sc->last_seq_num, 3399 class_locale.word)) { 3400 device_printf(sc->mrsas_dev, 3401 "ERROR: AEN registration FAILED from OCR !!! " 3402 "Further events from the controller cannot be notified." 3403 "Either there is some problem in the controller" 3404 "or the controller does not support AEN.\n" 3405 "Please contact to the SUPPORT TEAM if the problem persists\n"); 3406 } 3407 mtx_lock(&sc->sim_lock); 3408 3409 /* Adapter reset completed successfully */ 3410 device_printf(sc->mrsas_dev, "Reset successful\n"); 3411 retval = SUCCESS; 3412 goto out; 3413 } 3414 /* Reset failed, kill the adapter */ 3415 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); 3416 mrsas_kill_hba(sc); 3417 retval = FAIL; 3418 } else { 3419 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3420 mrsas_enable_intr(sc); 3421 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 3422 } 3423 out: 3424 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3425 mrsas_dprint(sc, MRSAS_OCR, 3426 "Reset Exit with %d.\n", retval); 3427 return retval; 3428 } 3429 3430 /* 3431 * mrsas_kill_hba: Kill HBA when OCR is not supported 3432 * input: Adapter Context. 3433 * 3434 * This function will kill HBA when OCR is not supported. 3435 */ 3436 void 3437 mrsas_kill_hba(struct mrsas_softc *sc) 3438 { 3439 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; 3440 DELAY(1000 * 1000); 3441 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); 3442 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 3443 MFI_STOP_ADP); 3444 /* Flush */ 3445 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); 3446 mrsas_complete_outstanding_ioctls(sc); 3447 } 3448 3449 /** 3450 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba 3451 * input: Controller softc 3452 * 3453 * Returns void 3454 */ 3455 void 3456 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc) 3457 { 3458 int i; 3459 struct mrsas_mpt_cmd *cmd_mpt; 3460 struct mrsas_mfi_cmd *cmd_mfi; 3461 u_int32_t count, MSIxIndex; 3462 3463 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3464 for (i = 0; i < sc->max_fw_cmds; i++) { 3465 cmd_mpt = sc->mpt_cmd_list[i]; 3466 3467 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 3468 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 3469 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { 3470 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3471 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, 3472 cmd_mpt->io_request->RaidContext.raid_context.status); 3473 } 3474 } 3475 } 3476 } 3477 3478 /* 3479 * mrsas_wait_for_outstanding: Wait for outstanding commands 3480 * input: Adapter Context. 3481 * 3482 * This function will wait for 180 seconds for outstanding commands to be 3483 * completed. 3484 */ 3485 int 3486 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason) 3487 { 3488 int i, outstanding, retval = 0; 3489 u_int32_t fw_state, count, MSIxIndex; 3490 3491 3492 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { 3493 if (sc->remove_in_progress) { 3494 mrsas_dprint(sc, MRSAS_OCR, 3495 "Driver remove or shutdown called.\n"); 3496 retval = 1; 3497 goto out; 3498 } 3499 /* Check if firmware is in fault state */ 3500 fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3501 outbound_scratch_pad)) & MFI_STATE_MASK; 3502 if (fw_state == MFI_STATE_FAULT) { 3503 mrsas_dprint(sc, MRSAS_OCR, 3504 "Found FW in FAULT state, will reset adapter.\n"); 3505 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3506 mtx_unlock(&sc->sim_lock); 3507 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3508 mrsas_complete_cmd(sc, MSIxIndex); 3509 mtx_lock(&sc->sim_lock); 3510 retval = 1; 3511 goto out; 3512 } 3513 if (check_reason == MFI_DCMD_TIMEOUT_OCR) { 3514 mrsas_dprint(sc, MRSAS_OCR, 3515 "DCMD IO TIMEOUT detected, will reset adapter.\n"); 3516 retval = 1; 3517 goto out; 3518 } 3519 outstanding = mrsas_atomic_read(&sc->fw_outstanding); 3520 if (!outstanding) 3521 goto out; 3522 3523 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 3524 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " 3525 "commands to complete\n", i, outstanding); 3526 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3527 mtx_unlock(&sc->sim_lock); 3528 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3529 mrsas_complete_cmd(sc, MSIxIndex); 3530 mtx_lock(&sc->sim_lock); 3531 } 3532 DELAY(1000 * 1000); 3533 } 3534 3535 if (mrsas_atomic_read(&sc->fw_outstanding)) { 3536 mrsas_dprint(sc, MRSAS_OCR, 3537 " pending commands remain after waiting," 3538 " will reset adapter.\n"); 3539 retval = 1; 3540 } 3541 out: 3542 return retval; 3543 } 3544 3545 /* 3546 * mrsas_release_mfi_cmd: Return a cmd to free command pool 3547 * input: Command packet for return to free cmd pool 3548 * 3549 * This function returns the MFI & MPT command to the command list. 3550 */ 3551 void 3552 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi) 3553 { 3554 struct mrsas_softc *sc = cmd_mfi->sc; 3555 struct mrsas_mpt_cmd *cmd_mpt; 3556 3557 3558 mtx_lock(&sc->mfi_cmd_pool_lock); 3559 /* 3560 * Release the mpt command (if at all it is allocated 3561 * associated with the mfi command 3562 */ 3563 if (cmd_mfi->cmd_id.context.smid) { 3564 mtx_lock(&sc->mpt_cmd_pool_lock); 3565 /* Get the mpt cmd from mfi cmd frame's smid value */ 3566 cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1]; 3567 cmd_mpt->flags = 0; 3568 cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 3569 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next); 3570 mtx_unlock(&sc->mpt_cmd_pool_lock); 3571 } 3572 /* Release the mfi command */ 3573 cmd_mfi->ccb_ptr = NULL; 3574 cmd_mfi->cmd_id.frame_count = 0; 3575 TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next); 3576 mtx_unlock(&sc->mfi_cmd_pool_lock); 3577 3578 return; 3579 } 3580 3581 /* 3582 * mrsas_get_controller_info: Returns FW's controller structure 3583 * input: Adapter soft state 3584 * Controller information structure 3585 * 3586 * Issues an internal command (DCMD) to get the FW's controller structure. This 3587 * information is mainly used to find out the maximum IO transfer per command 3588 * supported by the FW. 3589 */ 3590 static int 3591 mrsas_get_ctrl_info(struct mrsas_softc *sc) 3592 { 3593 int retcode = 0; 3594 u_int8_t do_ocr = 1; 3595 struct mrsas_mfi_cmd *cmd; 3596 struct mrsas_dcmd_frame *dcmd; 3597 3598 cmd = mrsas_get_mfi_cmd(sc); 3599 3600 if (!cmd) { 3601 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 3602 return -ENOMEM; 3603 } 3604 dcmd = &cmd->frame->dcmd; 3605 3606 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { 3607 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); 3608 mrsas_release_mfi_cmd(cmd); 3609 return -ENOMEM; 3610 } 3611 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3612 3613 dcmd->cmd = MFI_CMD_DCMD; 3614 dcmd->cmd_status = 0xFF; 3615 dcmd->sge_count = 1; 3616 dcmd->flags = MFI_FRAME_DIR_READ; 3617 dcmd->timeout = 0; 3618 dcmd->pad_0 = 0; 3619 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info); 3620 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 3621 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr; 3622 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info); 3623 3624 if (!sc->mask_interrupts) 3625 retcode = mrsas_issue_blocked_cmd(sc, cmd); 3626 else 3627 retcode = mrsas_issue_polled(sc, cmd); 3628 3629 if (retcode == ETIMEDOUT) 3630 goto dcmd_timeout; 3631 else 3632 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); 3633 3634 do_ocr = 0; 3635 mrsas_update_ext_vd_details(sc); 3636 3637 sc->use_seqnum_jbod_fp = 3638 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP; 3639 sc->support_morethan256jbod = 3640 sc->ctrl_info->adapterOperations4.supportPdMapTargetId; 3641 3642 sc->disableOnlineCtrlReset = 3643 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 3644 3645 dcmd_timeout: 3646 mrsas_free_ctlr_info_cmd(sc); 3647 3648 if (do_ocr) 3649 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 3650 3651 if (!sc->mask_interrupts) 3652 mrsas_release_mfi_cmd(cmd); 3653 3654 return (retcode); 3655 } 3656 3657 /* 3658 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD 3659 * input: 3660 * sc - Controller's softc 3661 */ 3662 static void 3663 mrsas_update_ext_vd_details(struct mrsas_softc *sc) 3664 { 3665 u_int32_t ventura_map_sz = 0; 3666 sc->max256vdSupport = 3667 sc->ctrl_info->adapterOperations3.supportMaxExtLDs; 3668 3669 /* Below is additional check to address future FW enhancement */ 3670 if (sc->ctrl_info->max_lds > 64) 3671 sc->max256vdSupport = 1; 3672 3673 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS 3674 * MRSAS_MAX_DEV_PER_CHANNEL; 3675 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS 3676 * MRSAS_MAX_DEV_PER_CHANNEL; 3677 if (sc->max256vdSupport) { 3678 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 3679 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3680 } else { 3681 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 3682 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3683 } 3684 3685 if (sc->maxRaidMapSize) { 3686 ventura_map_sz = sc->maxRaidMapSize * 3687 MR_MIN_MAP_SIZE; 3688 sc->current_map_sz = ventura_map_sz; 3689 sc->max_map_sz = ventura_map_sz; 3690 } else { 3691 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + 3692 (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1)); 3693 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); 3694 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); 3695 if (sc->max256vdSupport) 3696 sc->current_map_sz = sc->new_map_sz; 3697 else 3698 sc->current_map_sz = sc->old_map_sz; 3699 } 3700 3701 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL); 3702 #if VD_EXT_DEBUG 3703 device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n", 3704 sc->maxRaidMapSize); 3705 device_printf(sc->mrsas_dev, 3706 "new_map_sz = 0x%x, old_map_sz = 0x%x, " 3707 "ventura_map_sz = 0x%x, current_map_sz = 0x%x " 3708 "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n", 3709 sc->new_map_sz, sc->old_map_sz, ventura_map_sz, 3710 sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL)); 3711 #endif 3712 } 3713 3714 /* 3715 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command 3716 * input: Adapter soft state 3717 * 3718 * Allocates DMAable memory for the controller info internal command. 3719 */ 3720 int 3721 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) 3722 { 3723 int ctlr_info_size; 3724 3725 /* Allocate get controller info command */ 3726 ctlr_info_size = sizeof(struct mrsas_ctrl_info); 3727 if (bus_dma_tag_create(sc->mrsas_parent_tag, 3728 1, 0, 3729 BUS_SPACE_MAXADDR_32BIT, 3730 BUS_SPACE_MAXADDR, 3731 NULL, NULL, 3732 ctlr_info_size, 3733 1, 3734 ctlr_info_size, 3735 BUS_DMA_ALLOCNOW, 3736 NULL, NULL, 3737 &sc->ctlr_info_tag)) { 3738 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); 3739 return (ENOMEM); 3740 } 3741 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, 3742 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { 3743 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); 3744 return (ENOMEM); 3745 } 3746 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, 3747 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, 3748 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { 3749 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); 3750 return (ENOMEM); 3751 } 3752 memset(sc->ctlr_info_mem, 0, ctlr_info_size); 3753 return (0); 3754 } 3755 3756 /* 3757 * mrsas_free_ctlr_info_cmd: Free memory for controller info command 3758 * input: Adapter soft state 3759 * 3760 * Deallocates memory of the get controller info cmd. 3761 */ 3762 void 3763 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) 3764 { 3765 if (sc->ctlr_info_phys_addr) 3766 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); 3767 if (sc->ctlr_info_mem != NULL) 3768 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); 3769 if (sc->ctlr_info_tag != NULL) 3770 bus_dma_tag_destroy(sc->ctlr_info_tag); 3771 } 3772 3773 /* 3774 * mrsas_issue_polled: Issues a polling command 3775 * inputs: Adapter soft state 3776 * Command packet to be issued 3777 * 3778 * This function is for posting of internal commands to Firmware. MFI requires 3779 * the cmd_status to be set to 0xFF before posting. The maximun wait time of 3780 * the poll response timer is 180 seconds. 3781 */ 3782 int 3783 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3784 { 3785 struct mrsas_header *frame_hdr = &cmd->frame->hdr; 3786 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3787 int i, retcode = SUCCESS; 3788 3789 frame_hdr->cmd_status = 0xFF; 3790 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3791 3792 /* Issue the frame using inbound queue port */ 3793 if (mrsas_issue_dcmd(sc, cmd)) { 3794 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3795 return (1); 3796 } 3797 /* 3798 * Poll response timer to wait for Firmware response. While this 3799 * timer with the DELAY call could block CPU, the time interval for 3800 * this is only 1 millisecond. 3801 */ 3802 if (frame_hdr->cmd_status == 0xFF) { 3803 for (i = 0; i < (max_wait * 1000); i++) { 3804 if (frame_hdr->cmd_status == 0xFF) 3805 DELAY(1000); 3806 else 3807 break; 3808 } 3809 } 3810 if (frame_hdr->cmd_status == 0xFF) { 3811 device_printf(sc->mrsas_dev, "DCMD timed out after %d " 3812 "seconds from %s\n", max_wait, __func__); 3813 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", 3814 cmd->frame->dcmd.opcode); 3815 retcode = ETIMEDOUT; 3816 } 3817 return (retcode); 3818 } 3819 3820 /* 3821 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd 3822 * input: Adapter soft state mfi cmd pointer 3823 * 3824 * This function is called by mrsas_issued_blocked_cmd() and 3825 * mrsas_issued_polled(), to build the MPT command and then fire the command 3826 * to Firmware. 3827 */ 3828 int 3829 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3830 { 3831 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3832 3833 req_desc = mrsas_build_mpt_cmd(sc, cmd); 3834 if (!req_desc) { 3835 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); 3836 return (1); 3837 } 3838 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 3839 3840 return (0); 3841 } 3842 3843 /* 3844 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd 3845 * input: Adapter soft state mfi cmd to build 3846 * 3847 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru 3848 * command and prepares the MPT command to send to Firmware. 3849 */ 3850 MRSAS_REQUEST_DESCRIPTOR_UNION * 3851 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3852 { 3853 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3854 u_int16_t index; 3855 3856 if (mrsas_build_mptmfi_passthru(sc, cmd)) { 3857 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); 3858 return NULL; 3859 } 3860 index = cmd->cmd_id.context.smid; 3861 3862 req_desc = mrsas_get_request_desc(sc, index - 1); 3863 if (!req_desc) 3864 return NULL; 3865 3866 req_desc->addr.Words = 0; 3867 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3868 3869 req_desc->SCSIIO.SMID = index; 3870 3871 return (req_desc); 3872 } 3873 3874 /* 3875 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command 3876 * input: Adapter soft state mfi cmd pointer 3877 * 3878 * The MPT command and the io_request are setup as a passthru command. The SGE 3879 * chain address is set to frame_phys_addr of the MFI command. 3880 */ 3881 u_int8_t 3882 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) 3883 { 3884 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 3885 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; 3886 struct mrsas_mpt_cmd *mpt_cmd; 3887 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; 3888 3889 mpt_cmd = mrsas_get_mpt_cmd(sc); 3890 if (!mpt_cmd) 3891 return (1); 3892 3893 /* Save the smid. To be used for returning the cmd */ 3894 mfi_cmd->cmd_id.context.smid = mpt_cmd->index; 3895 3896 mpt_cmd->sync_cmd_idx = mfi_cmd->index; 3897 3898 /* 3899 * For cmds where the flag is set, store the flag and check on 3900 * completion. For cmds with this flag, don't call 3901 * mrsas_complete_cmd. 3902 */ 3903 3904 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 3905 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3906 3907 io_req = mpt_cmd->io_request; 3908 3909 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 3910 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL; 3911 3912 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 3913 sgl_ptr_end->Flags = 0; 3914 } 3915 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain; 3916 3917 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 3918 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; 3919 io_req->ChainOffset = sc->chain_offset_mfi_pthru; 3920 3921 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; 3922 3923 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3924 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 3925 3926 mpi25_ieee_chain->Length = sc->max_chain_frame_sz; 3927 3928 return (0); 3929 } 3930 3931 /* 3932 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds 3933 * input: Adapter soft state Command to be issued 3934 * 3935 * This function waits on an event for the command to be returned from the ISR. 3936 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing 3937 * internal and ioctl commands. 3938 */ 3939 int 3940 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3941 { 3942 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3943 unsigned long total_time = 0; 3944 int retcode = SUCCESS; 3945 3946 /* Initialize cmd_status */ 3947 cmd->cmd_status = 0xFF; 3948 3949 /* Build MPT-MFI command for issue to FW */ 3950 if (mrsas_issue_dcmd(sc, cmd)) { 3951 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3952 return (1); 3953 } 3954 sc->chan = (void *)&cmd; 3955 3956 while (1) { 3957 if (cmd->cmd_status == 0xFF) { 3958 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 3959 } else 3960 break; 3961 3962 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL 3963 * command */ 3964 total_time++; 3965 if (total_time >= max_wait) { 3966 device_printf(sc->mrsas_dev, 3967 "Internal command timed out after %d seconds.\n", max_wait); 3968 retcode = 1; 3969 break; 3970 } 3971 } 3972 } 3973 3974 if (cmd->cmd_status == 0xFF) { 3975 device_printf(sc->mrsas_dev, "DCMD timed out after %d " 3976 "seconds from %s\n", max_wait, __func__); 3977 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", 3978 cmd->frame->dcmd.opcode); 3979 retcode = ETIMEDOUT; 3980 } 3981 return (retcode); 3982 } 3983 3984 /* 3985 * mrsas_complete_mptmfi_passthru: Completes a command 3986 * input: @sc: Adapter soft state 3987 * @cmd: Command to be completed 3988 * @status: cmd completion status 3989 * 3990 * This function is called from mrsas_complete_cmd() after an interrupt is 3991 * received from Firmware, and io_request->Function is 3992 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. 3993 */ 3994 void 3995 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, 3996 u_int8_t status) 3997 { 3998 struct mrsas_header *hdr = &cmd->frame->hdr; 3999 u_int8_t cmd_status = cmd->frame->hdr.cmd_status; 4000 4001 /* Reset the retry counter for future re-tries */ 4002 cmd->retry_for_fw_reset = 0; 4003 4004 if (cmd->ccb_ptr) 4005 cmd->ccb_ptr = NULL; 4006 4007 switch (hdr->cmd) { 4008 case MFI_CMD_INVALID: 4009 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); 4010 break; 4011 case MFI_CMD_PD_SCSI_IO: 4012 case MFI_CMD_LD_SCSI_IO: 4013 /* 4014 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 4015 * issued either through an IO path or an IOCTL path. If it 4016 * was via IOCTL, we will send it to internal completion. 4017 */ 4018 if (cmd->sync_cmd) { 4019 cmd->sync_cmd = 0; 4020 mrsas_wakeup(sc, cmd); 4021 break; 4022 } 4023 case MFI_CMD_SMP: 4024 case MFI_CMD_STP: 4025 case MFI_CMD_DCMD: 4026 /* Check for LD map update */ 4027 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && 4028 (cmd->frame->dcmd.mbox.b[1] == 1)) { 4029 sc->fast_path_io = 0; 4030 mtx_lock(&sc->raidmap_lock); 4031 sc->map_update_cmd = NULL; 4032 if (cmd_status != 0) { 4033 if (cmd_status != MFI_STAT_NOT_FOUND) 4034 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status); 4035 else { 4036 mrsas_release_mfi_cmd(cmd); 4037 mtx_unlock(&sc->raidmap_lock); 4038 break; 4039 } 4040 } else 4041 sc->map_id++; 4042 mrsas_release_mfi_cmd(cmd); 4043 if (MR_ValidateMapInfo(sc)) 4044 sc->fast_path_io = 0; 4045 else 4046 sc->fast_path_io = 1; 4047 mrsas_sync_map_info(sc); 4048 mtx_unlock(&sc->raidmap_lock); 4049 break; 4050 } 4051 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 4052 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { 4053 sc->mrsas_aen_triggered = 0; 4054 } 4055 /* FW has an updated PD sequence */ 4056 if ((cmd->frame->dcmd.opcode == 4057 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 4058 (cmd->frame->dcmd.mbox.b[0] == 1)) { 4059 4060 mtx_lock(&sc->raidmap_lock); 4061 sc->jbod_seq_cmd = NULL; 4062 mrsas_release_mfi_cmd(cmd); 4063 4064 if (cmd_status == MFI_STAT_OK) { 4065 sc->pd_seq_map_id++; 4066 /* Re-register a pd sync seq num cmd */ 4067 if (megasas_sync_pd_seq_num(sc, true)) 4068 sc->use_seqnum_jbod_fp = 0; 4069 } else { 4070 sc->use_seqnum_jbod_fp = 0; 4071 device_printf(sc->mrsas_dev, 4072 "Jbod map sync failed, status=%x\n", cmd_status); 4073 } 4074 mtx_unlock(&sc->raidmap_lock); 4075 break; 4076 } 4077 /* See if got an event notification */ 4078 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) 4079 mrsas_complete_aen(sc, cmd); 4080 else 4081 mrsas_wakeup(sc, cmd); 4082 break; 4083 case MFI_CMD_ABORT: 4084 /* Command issued to abort another cmd return */ 4085 mrsas_complete_abort(sc, cmd); 4086 break; 4087 default: 4088 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd); 4089 break; 4090 } 4091 } 4092 4093 /* 4094 * mrsas_wakeup: Completes an internal command 4095 * input: Adapter soft state 4096 * Command to be completed 4097 * 4098 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait 4099 * timer is started. This function is called from 4100 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up 4101 * from the command wait. 4102 */ 4103 void 4104 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4105 { 4106 cmd->cmd_status = cmd->frame->io.cmd_status; 4107 4108 if (cmd->cmd_status == 0xFF) 4109 cmd->cmd_status = 0; 4110 4111 sc->chan = (void *)&cmd; 4112 wakeup_one((void *)&sc->chan); 4113 return; 4114 } 4115 4116 /* 4117 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input: 4118 * Adapter soft state Shutdown/Hibernate 4119 * 4120 * This function issues a DCMD internal command to Firmware to initiate shutdown 4121 * of the controller. 4122 */ 4123 static void 4124 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) 4125 { 4126 struct mrsas_mfi_cmd *cmd; 4127 struct mrsas_dcmd_frame *dcmd; 4128 4129 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 4130 return; 4131 4132 cmd = mrsas_get_mfi_cmd(sc); 4133 if (!cmd) { 4134 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n"); 4135 return; 4136 } 4137 if (sc->aen_cmd) 4138 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); 4139 if (sc->map_update_cmd) 4140 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); 4141 if (sc->jbod_seq_cmd) 4142 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd); 4143 4144 dcmd = &cmd->frame->dcmd; 4145 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4146 4147 dcmd->cmd = MFI_CMD_DCMD; 4148 dcmd->cmd_status = 0x0; 4149 dcmd->sge_count = 0; 4150 dcmd->flags = MFI_FRAME_DIR_NONE; 4151 dcmd->timeout = 0; 4152 dcmd->pad_0 = 0; 4153 dcmd->data_xfer_len = 0; 4154 dcmd->opcode = opcode; 4155 4156 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n"); 4157 4158 mrsas_issue_blocked_cmd(sc, cmd); 4159 mrsas_release_mfi_cmd(cmd); 4160 4161 return; 4162 } 4163 4164 /* 4165 * mrsas_flush_cache: Requests FW to flush all its caches input: 4166 * Adapter soft state 4167 * 4168 * This function is issues a DCMD internal command to Firmware to initiate 4169 * flushing of all caches. 4170 */ 4171 static void 4172 mrsas_flush_cache(struct mrsas_softc *sc) 4173 { 4174 struct mrsas_mfi_cmd *cmd; 4175 struct mrsas_dcmd_frame *dcmd; 4176 4177 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 4178 return; 4179 4180 cmd = mrsas_get_mfi_cmd(sc); 4181 if (!cmd) { 4182 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n"); 4183 return; 4184 } 4185 dcmd = &cmd->frame->dcmd; 4186 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4187 4188 dcmd->cmd = MFI_CMD_DCMD; 4189 dcmd->cmd_status = 0x0; 4190 dcmd->sge_count = 0; 4191 dcmd->flags = MFI_FRAME_DIR_NONE; 4192 dcmd->timeout = 0; 4193 dcmd->pad_0 = 0; 4194 dcmd->data_xfer_len = 0; 4195 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 4196 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 4197 4198 mrsas_issue_blocked_cmd(sc, cmd); 4199 mrsas_release_mfi_cmd(cmd); 4200 4201 return; 4202 } 4203 4204 int 4205 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend) 4206 { 4207 int retcode = 0; 4208 u_int8_t do_ocr = 1; 4209 struct mrsas_mfi_cmd *cmd; 4210 struct mrsas_dcmd_frame *dcmd; 4211 uint32_t pd_seq_map_sz; 4212 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 4213 bus_addr_t pd_seq_h; 4214 4215 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 4216 (sizeof(struct MR_PD_CFG_SEQ) * 4217 (MAX_PHYSICAL_DEVICES - 1)); 4218 4219 cmd = mrsas_get_mfi_cmd(sc); 4220 if (!cmd) { 4221 device_printf(sc->mrsas_dev, 4222 "Cannot alloc for ld map info cmd.\n"); 4223 return 1; 4224 } 4225 dcmd = &cmd->frame->dcmd; 4226 4227 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)]; 4228 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)]; 4229 if (!pd_sync) { 4230 device_printf(sc->mrsas_dev, 4231 "Failed to alloc mem for jbod map info.\n"); 4232 mrsas_release_mfi_cmd(cmd); 4233 return (ENOMEM); 4234 } 4235 memset(pd_sync, 0, pd_seq_map_sz); 4236 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4237 dcmd->cmd = MFI_CMD_DCMD; 4238 dcmd->cmd_status = 0xFF; 4239 dcmd->sge_count = 1; 4240 dcmd->timeout = 0; 4241 dcmd->pad_0 = 0; 4242 dcmd->data_xfer_len = (pd_seq_map_sz); 4243 dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO); 4244 dcmd->sgl.sge32[0].phys_addr = (pd_seq_h); 4245 dcmd->sgl.sge32[0].length = (pd_seq_map_sz); 4246 4247 if (pend) { 4248 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG; 4249 dcmd->flags = (MFI_FRAME_DIR_WRITE); 4250 sc->jbod_seq_cmd = cmd; 4251 if (mrsas_issue_dcmd(sc, cmd)) { 4252 device_printf(sc->mrsas_dev, 4253 "Fail to send sync map info command.\n"); 4254 return 1; 4255 } else 4256 return 0; 4257 } else 4258 dcmd->flags = MFI_FRAME_DIR_READ; 4259 4260 retcode = mrsas_issue_polled(sc, cmd); 4261 if (retcode == ETIMEDOUT) 4262 goto dcmd_timeout; 4263 4264 if (pd_sync->count > MAX_PHYSICAL_DEVICES) { 4265 device_printf(sc->mrsas_dev, 4266 "driver supports max %d JBOD, but FW reports %d\n", 4267 MAX_PHYSICAL_DEVICES, pd_sync->count); 4268 retcode = -EINVAL; 4269 } 4270 if (!retcode) 4271 sc->pd_seq_map_id++; 4272 do_ocr = 0; 4273 4274 dcmd_timeout: 4275 if (do_ocr) 4276 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4277 4278 return (retcode); 4279 } 4280 4281 /* 4282 * mrsas_get_map_info: Load and validate RAID map input: 4283 * Adapter instance soft state 4284 * 4285 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load 4286 * and validate RAID map. It returns 0 if successful, 1 other- wise. 4287 */ 4288 static int 4289 mrsas_get_map_info(struct mrsas_softc *sc) 4290 { 4291 uint8_t retcode = 0; 4292 4293 sc->fast_path_io = 0; 4294 if (!mrsas_get_ld_map_info(sc)) { 4295 retcode = MR_ValidateMapInfo(sc); 4296 if (retcode == 0) { 4297 sc->fast_path_io = 1; 4298 return 0; 4299 } 4300 } 4301 return 1; 4302 } 4303 4304 /* 4305 * mrsas_get_ld_map_info: Get FW's ld_map structure input: 4306 * Adapter instance soft state 4307 * 4308 * Issues an internal command (DCMD) to get the FW's controller PD list 4309 * structure. 4310 */ 4311 static int 4312 mrsas_get_ld_map_info(struct mrsas_softc *sc) 4313 { 4314 int retcode = 0; 4315 struct mrsas_mfi_cmd *cmd; 4316 struct mrsas_dcmd_frame *dcmd; 4317 void *map; 4318 bus_addr_t map_phys_addr = 0; 4319 4320 cmd = mrsas_get_mfi_cmd(sc); 4321 if (!cmd) { 4322 device_printf(sc->mrsas_dev, 4323 "Cannot alloc for ld map info cmd.\n"); 4324 return 1; 4325 } 4326 dcmd = &cmd->frame->dcmd; 4327 4328 map = (void *)sc->raidmap_mem[(sc->map_id & 1)]; 4329 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; 4330 if (!map) { 4331 device_printf(sc->mrsas_dev, 4332 "Failed to alloc mem for ld map info.\n"); 4333 mrsas_release_mfi_cmd(cmd); 4334 return (ENOMEM); 4335 } 4336 memset(map, 0, sizeof(sc->max_map_sz)); 4337 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4338 4339 dcmd->cmd = MFI_CMD_DCMD; 4340 dcmd->cmd_status = 0xFF; 4341 dcmd->sge_count = 1; 4342 dcmd->flags = MFI_FRAME_DIR_READ; 4343 dcmd->timeout = 0; 4344 dcmd->pad_0 = 0; 4345 dcmd->data_xfer_len = sc->current_map_sz; 4346 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 4347 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 4348 dcmd->sgl.sge32[0].length = sc->current_map_sz; 4349 4350 retcode = mrsas_issue_polled(sc, cmd); 4351 if (retcode == ETIMEDOUT) 4352 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4353 4354 return (retcode); 4355 } 4356 4357 /* 4358 * mrsas_sync_map_info: Get FW's ld_map structure input: 4359 * Adapter instance soft state 4360 * 4361 * Issues an internal command (DCMD) to get the FW's controller PD list 4362 * structure. 4363 */ 4364 static int 4365 mrsas_sync_map_info(struct mrsas_softc *sc) 4366 { 4367 int retcode = 0, i; 4368 struct mrsas_mfi_cmd *cmd; 4369 struct mrsas_dcmd_frame *dcmd; 4370 uint32_t size_sync_info, num_lds; 4371 MR_LD_TARGET_SYNC *target_map = NULL; 4372 MR_DRV_RAID_MAP_ALL *map; 4373 MR_LD_RAID *raid; 4374 MR_LD_TARGET_SYNC *ld_sync; 4375 bus_addr_t map_phys_addr = 0; 4376 4377 cmd = mrsas_get_mfi_cmd(sc); 4378 if (!cmd) { 4379 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n"); 4380 return ENOMEM; 4381 } 4382 map = sc->ld_drv_map[sc->map_id & 1]; 4383 num_lds = map->raidMap.ldCount; 4384 4385 dcmd = &cmd->frame->dcmd; 4386 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds; 4387 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4388 4389 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1]; 4390 memset(target_map, 0, sc->max_map_sz); 4391 4392 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; 4393 4394 ld_sync = (MR_LD_TARGET_SYNC *) target_map; 4395 4396 for (i = 0; i < num_lds; i++, ld_sync++) { 4397 raid = MR_LdRaidGet(i, map); 4398 ld_sync->targetId = MR_GetLDTgtId(i, map); 4399 ld_sync->seqNum = raid->seqNum; 4400 } 4401 4402 dcmd->cmd = MFI_CMD_DCMD; 4403 dcmd->cmd_status = 0xFF; 4404 dcmd->sge_count = 1; 4405 dcmd->flags = MFI_FRAME_DIR_WRITE; 4406 dcmd->timeout = 0; 4407 dcmd->pad_0 = 0; 4408 dcmd->data_xfer_len = sc->current_map_sz; 4409 dcmd->mbox.b[0] = num_lds; 4410 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; 4411 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 4412 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 4413 dcmd->sgl.sge32[0].length = sc->current_map_sz; 4414 4415 sc->map_update_cmd = cmd; 4416 if (mrsas_issue_dcmd(sc, cmd)) { 4417 device_printf(sc->mrsas_dev, 4418 "Fail to send sync map info command.\n"); 4419 return (1); 4420 } 4421 return (retcode); 4422 } 4423 4424 /* Input: dcmd.opcode - MR_DCMD_PD_GET_INFO 4425 * dcmd.mbox.s[0] - deviceId for this physical drive 4426 * dcmd.sge IN - ptr to returned MR_PD_INFO structure 4427 * Desc: Firmware return the physical drive info structure 4428 * 4429 */ 4430 static void 4431 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id) 4432 { 4433 int retcode; 4434 u_int8_t do_ocr = 1; 4435 struct mrsas_mfi_cmd *cmd; 4436 struct mrsas_dcmd_frame *dcmd; 4437 4438 cmd = mrsas_get_mfi_cmd(sc); 4439 4440 if (!cmd) { 4441 device_printf(sc->mrsas_dev, 4442 "Cannot alloc for get PD info cmd\n"); 4443 return; 4444 } 4445 dcmd = &cmd->frame->dcmd; 4446 4447 memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info)); 4448 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4449 4450 dcmd->mbox.s[0] = device_id; 4451 dcmd->cmd = MFI_CMD_DCMD; 4452 dcmd->cmd_status = 0xFF; 4453 dcmd->sge_count = 1; 4454 dcmd->flags = MFI_FRAME_DIR_READ; 4455 dcmd->timeout = 0; 4456 dcmd->pad_0 = 0; 4457 dcmd->data_xfer_len = sizeof(struct mrsas_pd_info); 4458 dcmd->opcode = MR_DCMD_PD_GET_INFO; 4459 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->pd_info_phys_addr; 4460 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_pd_info); 4461 4462 if (!sc->mask_interrupts) 4463 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4464 else 4465 retcode = mrsas_issue_polled(sc, cmd); 4466 4467 if (retcode == ETIMEDOUT) 4468 goto dcmd_timeout; 4469 4470 sc->target_list[device_id].interface_type = 4471 sc->pd_info_mem->state.ddf.pdType.intf; 4472 4473 do_ocr = 0; 4474 4475 dcmd_timeout: 4476 4477 if (do_ocr) 4478 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4479 4480 if (!sc->mask_interrupts) 4481 mrsas_release_mfi_cmd(cmd); 4482 } 4483 4484 /* 4485 * mrsas_add_target: Add target ID of system PD/VD to driver's data structure. 4486 * sc: Adapter's soft state 4487 * target_id: Unique target id per controller(managed by driver) 4488 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1) 4489 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS 4490 * return: void 4491 * Descripton: This function will be called whenever system PD or VD is created. 4492 */ 4493 static void mrsas_add_target(struct mrsas_softc *sc, 4494 u_int16_t target_id) 4495 { 4496 sc->target_list[target_id].target_id = target_id; 4497 4498 device_printf(sc->mrsas_dev, 4499 "%s created target ID: 0x%x\n", 4500 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"), 4501 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD))); 4502 /* 4503 * If interrupts are enabled, then only fire DCMD to get pd_info 4504 * for system PDs 4505 */ 4506 if (!sc->mask_interrupts && sc->pd_info_mem && 4507 (target_id < MRSAS_MAX_PD)) 4508 mrsas_get_pd_info(sc, target_id); 4509 4510 } 4511 4512 /* 4513 * mrsas_remove_target: Remove target ID of system PD/VD from driver's data structure. 4514 * sc: Adapter's soft state 4515 * target_id: Unique target id per controller(managed by driver) 4516 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1) 4517 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS 4518 * return: void 4519 * Descripton: This function will be called whenever system PD or VD is deleted 4520 */ 4521 static void mrsas_remove_target(struct mrsas_softc *sc, 4522 u_int16_t target_id) 4523 { 4524 sc->target_list[target_id].target_id = 0xffff; 4525 device_printf(sc->mrsas_dev, 4526 "%s deleted target ID: 0x%x\n", 4527 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"), 4528 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD))); 4529 } 4530 4531 /* 4532 * mrsas_get_pd_list: Returns FW's PD list structure input: 4533 * Adapter soft state 4534 * 4535 * Issues an internal command (DCMD) to get the FW's controller PD list 4536 * structure. This information is mainly used to find out about system 4537 * supported by Firmware. 4538 */ 4539 static int 4540 mrsas_get_pd_list(struct mrsas_softc *sc) 4541 { 4542 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size; 4543 u_int8_t do_ocr = 1; 4544 struct mrsas_mfi_cmd *cmd; 4545 struct mrsas_dcmd_frame *dcmd; 4546 struct MR_PD_LIST *pd_list_mem; 4547 struct MR_PD_ADDRESS *pd_addr; 4548 bus_addr_t pd_list_phys_addr = 0; 4549 struct mrsas_tmp_dcmd *tcmd; 4550 4551 cmd = mrsas_get_mfi_cmd(sc); 4552 if (!cmd) { 4553 device_printf(sc->mrsas_dev, 4554 "Cannot alloc for get PD list cmd\n"); 4555 return 1; 4556 } 4557 dcmd = &cmd->frame->dcmd; 4558 4559 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 4560 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4561 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { 4562 device_printf(sc->mrsas_dev, 4563 "Cannot alloc dmamap for get PD list cmd\n"); 4564 mrsas_release_mfi_cmd(cmd); 4565 mrsas_free_tmp_dcmd(tcmd); 4566 free(tcmd, M_MRSAS); 4567 return (ENOMEM); 4568 } else { 4569 pd_list_mem = tcmd->tmp_dcmd_mem; 4570 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 4571 } 4572 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4573 4574 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4575 dcmd->mbox.b[1] = 0; 4576 dcmd->cmd = MFI_CMD_DCMD; 4577 dcmd->cmd_status = 0xFF; 4578 dcmd->sge_count = 1; 4579 dcmd->flags = MFI_FRAME_DIR_READ; 4580 dcmd->timeout = 0; 4581 dcmd->pad_0 = 0; 4582 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4583 dcmd->opcode = MR_DCMD_PD_LIST_QUERY; 4584 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr; 4585 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4586 4587 if (!sc->mask_interrupts) 4588 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4589 else 4590 retcode = mrsas_issue_polled(sc, cmd); 4591 4592 if (retcode == ETIMEDOUT) 4593 goto dcmd_timeout; 4594 4595 /* Get the instance PD list */ 4596 pd_count = MRSAS_MAX_PD; 4597 pd_addr = pd_list_mem->addr; 4598 if (pd_list_mem->count < pd_count) { 4599 memset(sc->local_pd_list, 0, 4600 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 4601 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) { 4602 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; 4603 sc->local_pd_list[pd_addr->deviceId].driveType = 4604 pd_addr->scsiDevType; 4605 sc->local_pd_list[pd_addr->deviceId].driveState = 4606 MR_PD_STATE_SYSTEM; 4607 if (sc->target_list[pd_addr->deviceId].target_id == 0xffff) 4608 mrsas_add_target(sc, pd_addr->deviceId); 4609 pd_addr++; 4610 } 4611 for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) { 4612 if ((sc->local_pd_list[pd_index].driveState != 4613 MR_PD_STATE_SYSTEM) && 4614 (sc->target_list[pd_index].target_id != 4615 0xffff)) { 4616 mrsas_remove_target(sc, pd_index); 4617 } 4618 } 4619 /* 4620 * Use mutext/spinlock if pd_list component size increase more than 4621 * 32 bit. 4622 */ 4623 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); 4624 do_ocr = 0; 4625 } 4626 dcmd_timeout: 4627 mrsas_free_tmp_dcmd(tcmd); 4628 free(tcmd, M_MRSAS); 4629 4630 if (do_ocr) 4631 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4632 4633 if (!sc->mask_interrupts) 4634 mrsas_release_mfi_cmd(cmd); 4635 4636 return (retcode); 4637 } 4638 4639 /* 4640 * mrsas_get_ld_list: Returns FW's LD list structure input: 4641 * Adapter soft state 4642 * 4643 * Issues an internal command (DCMD) to get the FW's controller PD list 4644 * structure. This information is mainly used to find out about supported by 4645 * the FW. 4646 */ 4647 static int 4648 mrsas_get_ld_list(struct mrsas_softc *sc) 4649 { 4650 int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id; 4651 u_int8_t do_ocr = 1; 4652 struct mrsas_mfi_cmd *cmd; 4653 struct mrsas_dcmd_frame *dcmd; 4654 struct MR_LD_LIST *ld_list_mem; 4655 bus_addr_t ld_list_phys_addr = 0; 4656 struct mrsas_tmp_dcmd *tcmd; 4657 4658 cmd = mrsas_get_mfi_cmd(sc); 4659 if (!cmd) { 4660 device_printf(sc->mrsas_dev, 4661 "Cannot alloc for get LD list cmd\n"); 4662 return 1; 4663 } 4664 dcmd = &cmd->frame->dcmd; 4665 4666 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 4667 ld_list_size = sizeof(struct MR_LD_LIST); 4668 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { 4669 device_printf(sc->mrsas_dev, 4670 "Cannot alloc dmamap for get LD list cmd\n"); 4671 mrsas_release_mfi_cmd(cmd); 4672 mrsas_free_tmp_dcmd(tcmd); 4673 free(tcmd, M_MRSAS); 4674 return (ENOMEM); 4675 } else { 4676 ld_list_mem = tcmd->tmp_dcmd_mem; 4677 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 4678 } 4679 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4680 4681 if (sc->max256vdSupport) 4682 dcmd->mbox.b[0] = 1; 4683 4684 dcmd->cmd = MFI_CMD_DCMD; 4685 dcmd->cmd_status = 0xFF; 4686 dcmd->sge_count = 1; 4687 dcmd->flags = MFI_FRAME_DIR_READ; 4688 dcmd->timeout = 0; 4689 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); 4690 dcmd->opcode = MR_DCMD_LD_GET_LIST; 4691 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr; 4692 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); 4693 dcmd->pad_0 = 0; 4694 4695 if (!sc->mask_interrupts) 4696 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4697 else 4698 retcode = mrsas_issue_polled(sc, cmd); 4699 4700 if (retcode == ETIMEDOUT) 4701 goto dcmd_timeout; 4702 4703 #if VD_EXT_DEBUG 4704 printf("Number of LDs %d\n", ld_list_mem->ldCount); 4705 #endif 4706 4707 /* Get the instance LD list */ 4708 if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) { 4709 sc->CurLdCount = ld_list_mem->ldCount; 4710 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4711 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) { 4712 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 4713 drv_tgt_id = ids + MRSAS_MAX_PD; 4714 if (ld_list_mem->ldList[ld_index].state != 0) { 4715 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 4716 if (sc->target_list[drv_tgt_id].target_id == 4717 0xffff) 4718 mrsas_add_target(sc, drv_tgt_id); 4719 } else { 4720 if (sc->target_list[drv_tgt_id].target_id != 4721 0xffff) 4722 mrsas_remove_target(sc, 4723 drv_tgt_id); 4724 } 4725 } 4726 4727 do_ocr = 0; 4728 } 4729 dcmd_timeout: 4730 mrsas_free_tmp_dcmd(tcmd); 4731 free(tcmd, M_MRSAS); 4732 4733 if (do_ocr) 4734 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4735 if (!sc->mask_interrupts) 4736 mrsas_release_mfi_cmd(cmd); 4737 4738 return (retcode); 4739 } 4740 4741 /* 4742 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input: 4743 * Adapter soft state Temp command Size of alloction 4744 * 4745 * Allocates DMAable memory for a temporary internal command. The allocated 4746 * memory is initialized to all zeros upon successful loading of the dma 4747 * mapped memory. 4748 */ 4749 int 4750 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, 4751 struct mrsas_tmp_dcmd *tcmd, int size) 4752 { 4753 if (bus_dma_tag_create(sc->mrsas_parent_tag, 4754 1, 0, 4755 BUS_SPACE_MAXADDR_32BIT, 4756 BUS_SPACE_MAXADDR, 4757 NULL, NULL, 4758 size, 4759 1, 4760 size, 4761 BUS_DMA_ALLOCNOW, 4762 NULL, NULL, 4763 &tcmd->tmp_dcmd_tag)) { 4764 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); 4765 return (ENOMEM); 4766 } 4767 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, 4768 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { 4769 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); 4770 return (ENOMEM); 4771 } 4772 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, 4773 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, 4774 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { 4775 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); 4776 return (ENOMEM); 4777 } 4778 memset(tcmd->tmp_dcmd_mem, 0, size); 4779 return (0); 4780 } 4781 4782 /* 4783 * mrsas_free_tmp_dcmd: Free memory for temporary command input: 4784 * temporary dcmd pointer 4785 * 4786 * Deallocates memory of the temporary command for use in the construction of 4787 * the internal DCMD. 4788 */ 4789 void 4790 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) 4791 { 4792 if (tmp->tmp_dcmd_phys_addr) 4793 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); 4794 if (tmp->tmp_dcmd_mem != NULL) 4795 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); 4796 if (tmp->tmp_dcmd_tag != NULL) 4797 bus_dma_tag_destroy(tmp->tmp_dcmd_tag); 4798 } 4799 4800 /* 4801 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input: 4802 * Adapter soft state Previously issued cmd to be aborted 4803 * 4804 * This function is used to abort previously issued commands, such as AEN and 4805 * RAID map sync map commands. The abort command is sent as a DCMD internal 4806 * command and subsequently the driver will wait for a return status. The 4807 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. 4808 */ 4809 static int 4810 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 4811 struct mrsas_mfi_cmd *cmd_to_abort) 4812 { 4813 struct mrsas_mfi_cmd *cmd; 4814 struct mrsas_abort_frame *abort_fr; 4815 u_int8_t retcode = 0; 4816 unsigned long total_time = 0; 4817 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 4818 4819 cmd = mrsas_get_mfi_cmd(sc); 4820 if (!cmd) { 4821 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); 4822 return (1); 4823 } 4824 abort_fr = &cmd->frame->abort; 4825 4826 /* Prepare and issue the abort frame */ 4827 abort_fr->cmd = MFI_CMD_ABORT; 4828 abort_fr->cmd_status = 0xFF; 4829 abort_fr->flags = 0; 4830 abort_fr->abort_context = cmd_to_abort->index; 4831 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 4832 abort_fr->abort_mfi_phys_addr_hi = 0; 4833 4834 cmd->sync_cmd = 1; 4835 cmd->cmd_status = 0xFF; 4836 4837 if (mrsas_issue_dcmd(sc, cmd)) { 4838 device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); 4839 return (1); 4840 } 4841 /* Wait for this cmd to complete */ 4842 sc->chan = (void *)&cmd; 4843 while (1) { 4844 if (cmd->cmd_status == 0xFF) { 4845 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 4846 } else 4847 break; 4848 total_time++; 4849 if (total_time >= max_wait) { 4850 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); 4851 retcode = 1; 4852 break; 4853 } 4854 } 4855 4856 cmd->sync_cmd = 0; 4857 mrsas_release_mfi_cmd(cmd); 4858 return (retcode); 4859 } 4860 4861 /* 4862 * mrsas_complete_abort: Completes aborting a command input: 4863 * Adapter soft state Cmd that was issued to abort another cmd 4864 * 4865 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to 4866 * change after sending the command. This function is called from 4867 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. 4868 */ 4869 void 4870 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4871 { 4872 if (cmd->sync_cmd) { 4873 cmd->sync_cmd = 0; 4874 cmd->cmd_status = 0; 4875 sc->chan = (void *)&cmd; 4876 wakeup_one((void *)&sc->chan); 4877 } 4878 return; 4879 } 4880 4881 /* 4882 * mrsas_aen_handler: AEN processing callback function from thread context 4883 * input: Adapter soft state 4884 * 4885 * Asynchronous event handler 4886 */ 4887 void 4888 mrsas_aen_handler(struct mrsas_softc *sc) 4889 { 4890 union mrsas_evt_class_locale class_locale; 4891 int doscan = 0; 4892 u_int32_t seq_num; 4893 int error, fail_aen = 0; 4894 4895 if (sc == NULL) { 4896 printf("invalid instance!\n"); 4897 return; 4898 } 4899 if (sc->remove_in_progress || sc->reset_in_progress) { 4900 device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n", 4901 __func__, __LINE__); 4902 return; 4903 } 4904 if (sc->evt_detail_mem) { 4905 switch (sc->evt_detail_mem->code) { 4906 case MR_EVT_PD_INSERTED: 4907 fail_aen = mrsas_get_pd_list(sc); 4908 if (!fail_aen) 4909 mrsas_bus_scan_sim(sc, sc->sim_1); 4910 else 4911 goto skip_register_aen; 4912 break; 4913 case MR_EVT_PD_REMOVED: 4914 fail_aen = mrsas_get_pd_list(sc); 4915 if (!fail_aen) 4916 mrsas_bus_scan_sim(sc, sc->sim_1); 4917 else 4918 goto skip_register_aen; 4919 break; 4920 case MR_EVT_LD_OFFLINE: 4921 case MR_EVT_CFG_CLEARED: 4922 case MR_EVT_LD_DELETED: 4923 mrsas_bus_scan_sim(sc, sc->sim_0); 4924 break; 4925 case MR_EVT_LD_CREATED: 4926 fail_aen = mrsas_get_ld_list(sc); 4927 if (!fail_aen) 4928 mrsas_bus_scan_sim(sc, sc->sim_0); 4929 else 4930 goto skip_register_aen; 4931 break; 4932 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 4933 case MR_EVT_FOREIGN_CFG_IMPORTED: 4934 case MR_EVT_LD_STATE_CHANGE: 4935 doscan = 1; 4936 break; 4937 case MR_EVT_CTRL_PROP_CHANGED: 4938 fail_aen = mrsas_get_ctrl_info(sc); 4939 if (fail_aen) 4940 goto skip_register_aen; 4941 break; 4942 default: 4943 break; 4944 } 4945 } else { 4946 device_printf(sc->mrsas_dev, "invalid evt_detail\n"); 4947 return; 4948 } 4949 if (doscan) { 4950 fail_aen = mrsas_get_pd_list(sc); 4951 if (!fail_aen) { 4952 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); 4953 mrsas_bus_scan_sim(sc, sc->sim_1); 4954 } else 4955 goto skip_register_aen; 4956 4957 fail_aen = mrsas_get_ld_list(sc); 4958 if (!fail_aen) { 4959 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); 4960 mrsas_bus_scan_sim(sc, sc->sim_0); 4961 } else 4962 goto skip_register_aen; 4963 } 4964 seq_num = sc->evt_detail_mem->seq_num + 1; 4965 4966 /* Register AEN with FW for latest sequence number plus 1 */ 4967 class_locale.members.reserved = 0; 4968 class_locale.members.locale = MR_EVT_LOCALE_ALL; 4969 class_locale.members.class = MR_EVT_CLASS_DEBUG; 4970 4971 if (sc->aen_cmd != NULL) 4972 return; 4973 4974 mtx_lock(&sc->aen_lock); 4975 error = mrsas_register_aen(sc, seq_num, 4976 class_locale.word); 4977 mtx_unlock(&sc->aen_lock); 4978 4979 if (error) 4980 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); 4981 4982 skip_register_aen: 4983 return; 4984 4985 } 4986 4987 4988 /* 4989 * mrsas_complete_aen: Completes AEN command 4990 * input: Adapter soft state 4991 * Cmd that was issued to abort another cmd 4992 * 4993 * This function will be called from ISR and will continue event processing from 4994 * thread context by enqueuing task in ev_tq (callback function 4995 * "mrsas_aen_handler"). 4996 */ 4997 void 4998 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4999 { 5000 /* 5001 * Don't signal app if it is just an aborted previously registered 5002 * aen 5003 */ 5004 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { 5005 sc->mrsas_aen_triggered = 1; 5006 mtx_lock(&sc->aen_lock); 5007 if (sc->mrsas_poll_waiting) { 5008 sc->mrsas_poll_waiting = 0; 5009 selwakeup(&sc->mrsas_select); 5010 } 5011 mtx_unlock(&sc->aen_lock); 5012 } else 5013 cmd->abort_aen = 0; 5014 5015 sc->aen_cmd = NULL; 5016 mrsas_release_mfi_cmd(cmd); 5017 5018 taskqueue_enqueue(sc->ev_tq, &sc->ev_task); 5019 5020 return; 5021 } 5022 5023 static device_method_t mrsas_methods[] = { 5024 DEVMETHOD(device_probe, mrsas_probe), 5025 DEVMETHOD(device_attach, mrsas_attach), 5026 DEVMETHOD(device_detach, mrsas_detach), 5027 DEVMETHOD(device_suspend, mrsas_suspend), 5028 DEVMETHOD(device_resume, mrsas_resume), 5029 DEVMETHOD(bus_print_child, bus_generic_print_child), 5030 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 5031 {0, 0} 5032 }; 5033 5034 static driver_t mrsas_driver = { 5035 "mrsas", 5036 mrsas_methods, 5037 sizeof(struct mrsas_softc) 5038 }; 5039 5040 static devclass_t mrsas_devclass; 5041 5042 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0); 5043 MODULE_DEPEND(mrsas, cam, 1, 1, 1); 5044