1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <dev/mrsas/mrsas.h> 44 #include <dev/mrsas/mrsas_ioctl.h> 45 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 49 #include <sys/sysctl.h> 50 #include <sys/types.h> 51 #include <sys/sysent.h> 52 #include <sys/kthread.h> 53 #include <sys/taskqueue.h> 54 #include <sys/smp.h> 55 56 57 /* 58 * Function prototypes 59 */ 60 static d_open_t mrsas_open; 61 static d_close_t mrsas_close; 62 static d_read_t mrsas_read; 63 static d_write_t mrsas_write; 64 static d_ioctl_t mrsas_ioctl; 65 static d_poll_t mrsas_poll; 66 67 static void mrsas_ich_startup(void *arg); 68 static struct mrsas_mgmt_info mrsas_mgmt_info; 69 static struct mrsas_ident *mrsas_find_ident(device_t); 70 static int mrsas_setup_msix(struct mrsas_softc *sc); 71 static int mrsas_allocate_msix(struct mrsas_softc *sc); 72 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); 73 static void mrsas_flush_cache(struct mrsas_softc *sc); 74 static void mrsas_reset_reply_desc(struct mrsas_softc *sc); 75 static void mrsas_ocr_thread(void *arg); 76 static int mrsas_get_map_info(struct mrsas_softc *sc); 77 static int mrsas_get_ld_map_info(struct mrsas_softc *sc); 78 static int mrsas_sync_map_info(struct mrsas_softc *sc); 79 static int mrsas_get_pd_list(struct mrsas_softc *sc); 80 static int mrsas_get_ld_list(struct mrsas_softc *sc); 81 static int mrsas_setup_irq(struct mrsas_softc *sc); 82 static int mrsas_alloc_mem(struct mrsas_softc *sc); 83 static int mrsas_init_fw(struct mrsas_softc *sc); 84 static int mrsas_setup_raidmap(struct mrsas_softc *sc); 85 static void megasas_setup_jbod_map(struct mrsas_softc *sc); 86 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend); 87 static int mrsas_clear_intr(struct mrsas_softc *sc); 88 static int mrsas_get_ctrl_info(struct mrsas_softc *sc); 89 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc); 90 static int 91 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 92 struct mrsas_mfi_cmd *cmd_to_abort); 93 static void 94 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id); 95 static struct mrsas_softc * 96 mrsas_get_softc_instance(struct cdev *dev, 97 u_long cmd, caddr_t arg); 98 u_int32_t 99 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset); 100 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); 101 u_int8_t 102 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, 103 struct mrsas_mfi_cmd *mfi_cmd); 104 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc); 105 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); 106 int mrsas_init_adapter(struct mrsas_softc *sc); 107 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); 108 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); 109 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); 110 int mrsas_ioc_init(struct mrsas_softc *sc); 111 int mrsas_bus_scan(struct mrsas_softc *sc); 112 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 113 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 114 int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason); 115 int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason); 116 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); 117 int mrsas_reset_targets(struct mrsas_softc *sc); 118 int 119 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, 120 struct mrsas_mfi_cmd *cmd); 121 int 122 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, 123 int size); 124 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); 125 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 126 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 127 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 128 void mrsas_disable_intr(struct mrsas_softc *sc); 129 void mrsas_enable_intr(struct mrsas_softc *sc); 130 void mrsas_free_ioc_cmd(struct mrsas_softc *sc); 131 void mrsas_free_mem(struct mrsas_softc *sc); 132 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); 133 void mrsas_isr(void *arg); 134 void mrsas_teardown_intr(struct mrsas_softc *sc); 135 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 136 void mrsas_kill_hba(struct mrsas_softc *sc); 137 void mrsas_aen_handler(struct mrsas_softc *sc); 138 void 139 mrsas_write_reg(struct mrsas_softc *sc, int offset, 140 u_int32_t value); 141 void 142 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 143 u_int32_t req_desc_hi); 144 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); 145 void 146 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, 147 struct mrsas_mfi_cmd *cmd, u_int8_t status); 148 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); 149 150 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd 151 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 152 153 extern int mrsas_cam_attach(struct mrsas_softc *sc); 154 extern void mrsas_cam_detach(struct mrsas_softc *sc); 155 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 156 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 157 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); 158 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); 159 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); 160 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 161 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 162 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 163 extern void mrsas_xpt_freeze(struct mrsas_softc *sc); 164 extern void mrsas_xpt_release(struct mrsas_softc *sc); 165 extern MRSAS_REQUEST_DESCRIPTOR_UNION * 166 mrsas_get_request_desc(struct mrsas_softc *sc, 167 u_int16_t index); 168 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 169 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); 170 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); 171 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); 172 173 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, 174 union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus, 175 u_int32_t data_length, u_int8_t *sense); 176 void 177 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo, 178 u_int32_t req_desc_hi); 179 180 181 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters"); 182 183 /* 184 * PCI device struct and table 185 * 186 */ 187 typedef struct mrsas_ident { 188 uint16_t vendor; 189 uint16_t device; 190 uint16_t subvendor; 191 uint16_t subdevice; 192 const char *desc; 193 } MRSAS_CTLR_ID; 194 195 MRSAS_CTLR_ID device_table[] = { 196 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"}, 197 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"}, 198 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"}, 199 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"}, 200 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"}, 201 {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"}, 202 {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"}, 203 {0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"}, 204 {0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"}, 205 {0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"}, 206 {0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"}, 207 {0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"}, 208 {0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"}, 209 {0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"}, 210 {0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"}, 211 {0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"}, 212 {0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"}, 213 {0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"}, 214 {0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"}, 215 {0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"}, 216 {0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"}, 217 {0, 0, 0, 0, NULL} 218 }; 219 220 /* 221 * Character device entry points 222 * 223 */ 224 static struct cdevsw mrsas_cdevsw = { 225 .d_version = D_VERSION, 226 .d_open = mrsas_open, 227 .d_close = mrsas_close, 228 .d_read = mrsas_read, 229 .d_write = mrsas_write, 230 .d_ioctl = mrsas_ioctl, 231 .d_poll = mrsas_poll, 232 .d_name = "mrsas", 233 }; 234 235 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); 236 237 /* 238 * In the cdevsw routines, we find our softc by using the si_drv1 member of 239 * struct cdev. We set this variable to point to our softc in our attach 240 * routine when we create the /dev entry. 241 */ 242 int 243 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 244 { 245 struct mrsas_softc *sc; 246 247 sc = dev->si_drv1; 248 return (0); 249 } 250 251 int 252 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 253 { 254 struct mrsas_softc *sc; 255 256 sc = dev->si_drv1; 257 return (0); 258 } 259 260 int 261 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag) 262 { 263 struct mrsas_softc *sc; 264 265 sc = dev->si_drv1; 266 return (0); 267 } 268 int 269 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag) 270 { 271 struct mrsas_softc *sc; 272 273 sc = dev->si_drv1; 274 return (0); 275 } 276 277 u_int32_t 278 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset) 279 { 280 u_int32_t i = 0, ret_val; 281 282 if (sc->is_aero) { 283 do { 284 ret_val = mrsas_read_reg(sc, offset); 285 i++; 286 } while(ret_val == 0 && i < 3); 287 } else 288 ret_val = mrsas_read_reg(sc, offset); 289 290 return ret_val; 291 } 292 293 /* 294 * Register Read/Write Functions 295 * 296 */ 297 void 298 mrsas_write_reg(struct mrsas_softc *sc, int offset, 299 u_int32_t value) 300 { 301 bus_space_tag_t bus_tag = sc->bus_tag; 302 bus_space_handle_t bus_handle = sc->bus_handle; 303 304 bus_space_write_4(bus_tag, bus_handle, offset, value); 305 } 306 307 u_int32_t 308 mrsas_read_reg(struct mrsas_softc *sc, int offset) 309 { 310 bus_space_tag_t bus_tag = sc->bus_tag; 311 bus_space_handle_t bus_handle = sc->bus_handle; 312 313 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); 314 } 315 316 317 /* 318 * Interrupt Disable/Enable/Clear Functions 319 * 320 */ 321 void 322 mrsas_disable_intr(struct mrsas_softc *sc) 323 { 324 u_int32_t mask = 0xFFFFFFFF; 325 u_int32_t status; 326 327 sc->mask_interrupts = 1; 328 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); 329 /* Dummy read to force pci flush */ 330 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 331 } 332 333 void 334 mrsas_enable_intr(struct mrsas_softc *sc) 335 { 336 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; 337 u_int32_t status; 338 339 sc->mask_interrupts = 0; 340 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); 341 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 342 343 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); 344 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 345 } 346 347 static int 348 mrsas_clear_intr(struct mrsas_softc *sc) 349 { 350 u_int32_t status; 351 352 /* Read received interrupt */ 353 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 354 355 /* Not our interrupt, so just return */ 356 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 357 return (0); 358 359 /* We got a reply interrupt */ 360 return (1); 361 } 362 363 /* 364 * PCI Support Functions 365 * 366 */ 367 static struct mrsas_ident * 368 mrsas_find_ident(device_t dev) 369 { 370 struct mrsas_ident *pci_device; 371 372 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) { 373 if ((pci_device->vendor == pci_get_vendor(dev)) && 374 (pci_device->device == pci_get_device(dev)) && 375 ((pci_device->subvendor == pci_get_subvendor(dev)) || 376 (pci_device->subvendor == 0xffff)) && 377 ((pci_device->subdevice == pci_get_subdevice(dev)) || 378 (pci_device->subdevice == 0xffff))) 379 return (pci_device); 380 } 381 return (NULL); 382 } 383 384 static int 385 mrsas_probe(device_t dev) 386 { 387 static u_int8_t first_ctrl = 1; 388 struct mrsas_ident *id; 389 390 if ((id = mrsas_find_ident(dev)) != NULL) { 391 if (first_ctrl) { 392 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n", 393 MRSAS_VERSION); 394 first_ctrl = 0; 395 } 396 device_set_desc(dev, id->desc); 397 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */ 398 return (-30); 399 } 400 return (ENXIO); 401 } 402 403 /* 404 * mrsas_setup_sysctl: setup sysctl values for mrsas 405 * input: Adapter instance soft state 406 * 407 * Setup sysctl entries for mrsas driver. 408 */ 409 static void 410 mrsas_setup_sysctl(struct mrsas_softc *sc) 411 { 412 struct sysctl_ctx_list *sysctl_ctx = NULL; 413 struct sysctl_oid *sysctl_tree = NULL; 414 char tmpstr[80], tmpstr2[80]; 415 416 /* 417 * Setup the sysctl variable so the user can change the debug level 418 * on the fly. 419 */ 420 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", 421 device_get_unit(sc->mrsas_dev)); 422 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev)); 423 424 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); 425 if (sysctl_ctx != NULL) 426 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); 427 428 if (sysctl_tree == NULL) { 429 sysctl_ctx_init(&sc->sysctl_ctx); 430 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 431 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2, 432 CTLFLAG_RD, 0, tmpstr); 433 if (sc->sysctl_tree == NULL) 434 return; 435 sysctl_ctx = &sc->sysctl_ctx; 436 sysctl_tree = sc->sysctl_tree; 437 } 438 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 439 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, 440 "Disable the use of OCR"); 441 442 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 443 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, 444 strlen(MRSAS_VERSION), "driver version"); 445 446 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 447 OID_AUTO, "reset_count", CTLFLAG_RD, 448 &sc->reset_count, 0, "number of ocr from start of the day"); 449 450 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 451 OID_AUTO, "fw_outstanding", CTLFLAG_RD, 452 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands"); 453 454 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 455 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 456 &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); 457 458 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 459 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, 460 "Driver debug level"); 461 462 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 463 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 464 0, "Driver IO timeout value in mili-second."); 465 466 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 467 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, 468 &sc->mrsas_fw_fault_check_delay, 469 0, "FW fault check thread delay in seconds. <default is 1 sec>"); 470 471 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 472 OID_AUTO, "reset_in_progress", CTLFLAG_RD, 473 &sc->reset_in_progress, 0, "ocr in progress status"); 474 475 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 476 OID_AUTO, "block_sync_cache", CTLFLAG_RW, 477 &sc->block_sync_cache, 0, 478 "Block SYNC CACHE at driver. <default: 0, send it to FW>"); 479 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 480 OID_AUTO, "stream detection", CTLFLAG_RW, 481 &sc->drv_stream_detection, 0, 482 "Disable/Enable Stream detection. <default: 1, Enable Stream Detection>"); 483 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 484 OID_AUTO, "prp_count", CTLFLAG_RD, 485 &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built"); 486 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 487 OID_AUTO, "SGE holes", CTLFLAG_RD, 488 &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs"); 489 } 490 491 /* 492 * mrsas_get_tunables: get tunable parameters. 493 * input: Adapter instance soft state 494 * 495 * Get tunable parameters. This will help to debug driver at boot time. 496 */ 497 static void 498 mrsas_get_tunables(struct mrsas_softc *sc) 499 { 500 char tmpstr[80]; 501 502 /* XXX default to some debugging for now */ 503 sc->mrsas_debug = 504 (MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN); 505 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; 506 sc->mrsas_fw_fault_check_delay = 1; 507 sc->reset_count = 0; 508 sc->reset_in_progress = 0; 509 sc->block_sync_cache = 0; 510 sc->drv_stream_detection = 1; 511 512 /* 513 * Grab the global variables. 514 */ 515 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); 516 517 /* 518 * Grab the global variables. 519 */ 520 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds); 521 522 /* Grab the unit-instance variables */ 523 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", 524 device_get_unit(sc->mrsas_dev)); 525 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); 526 } 527 528 /* 529 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. 530 * Used to get sequence number at driver load time. 531 * input: Adapter soft state 532 * 533 * Allocates DMAable memory for the event log info internal command. 534 */ 535 int 536 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) 537 { 538 int el_info_size; 539 540 /* Allocate get event log info command */ 541 el_info_size = sizeof(struct mrsas_evt_log_info); 542 if (bus_dma_tag_create(sc->mrsas_parent_tag, 543 1, 0, 544 BUS_SPACE_MAXADDR_32BIT, 545 BUS_SPACE_MAXADDR, 546 NULL, NULL, 547 el_info_size, 548 1, 549 el_info_size, 550 BUS_DMA_ALLOCNOW, 551 NULL, NULL, 552 &sc->el_info_tag)) { 553 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); 554 return (ENOMEM); 555 } 556 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, 557 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { 558 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); 559 return (ENOMEM); 560 } 561 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, 562 sc->el_info_mem, el_info_size, mrsas_addr_cb, 563 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { 564 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); 565 return (ENOMEM); 566 } 567 memset(sc->el_info_mem, 0, el_info_size); 568 return (0); 569 } 570 571 /* 572 * mrsas_free_evt_info_cmd: Free memory for Event log info command 573 * input: Adapter soft state 574 * 575 * Deallocates memory for the event log info internal command. 576 */ 577 void 578 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) 579 { 580 if (sc->el_info_phys_addr) 581 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); 582 if (sc->el_info_mem != NULL) 583 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); 584 if (sc->el_info_tag != NULL) 585 bus_dma_tag_destroy(sc->el_info_tag); 586 } 587 588 /* 589 * mrsas_get_seq_num: Get latest event sequence number 590 * @sc: Adapter soft state 591 * @eli: Firmware event log sequence number information. 592 * 593 * Firmware maintains a log of all events in a non-volatile area. 594 * Driver get the sequence number using DCMD 595 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. 596 */ 597 598 static int 599 mrsas_get_seq_num(struct mrsas_softc *sc, 600 struct mrsas_evt_log_info *eli) 601 { 602 struct mrsas_mfi_cmd *cmd; 603 struct mrsas_dcmd_frame *dcmd; 604 u_int8_t do_ocr = 1, retcode = 0; 605 606 cmd = mrsas_get_mfi_cmd(sc); 607 608 if (!cmd) { 609 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 610 return -ENOMEM; 611 } 612 dcmd = &cmd->frame->dcmd; 613 614 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { 615 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); 616 mrsas_release_mfi_cmd(cmd); 617 return -ENOMEM; 618 } 619 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 620 621 dcmd->cmd = MFI_CMD_DCMD; 622 dcmd->cmd_status = 0x0; 623 dcmd->sge_count = 1; 624 dcmd->flags = MFI_FRAME_DIR_READ; 625 dcmd->timeout = 0; 626 dcmd->pad_0 = 0; 627 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info); 628 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 629 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr; 630 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info); 631 632 retcode = mrsas_issue_blocked_cmd(sc, cmd); 633 if (retcode == ETIMEDOUT) 634 goto dcmd_timeout; 635 636 do_ocr = 0; 637 /* 638 * Copy the data back into callers buffer 639 */ 640 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); 641 mrsas_free_evt_log_info_cmd(sc); 642 643 dcmd_timeout: 644 if (do_ocr) 645 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 646 else 647 mrsas_release_mfi_cmd(cmd); 648 649 return retcode; 650 } 651 652 653 /* 654 * mrsas_register_aen: Register for asynchronous event notification 655 * @sc: Adapter soft state 656 * @seq_num: Starting sequence number 657 * @class_locale: Class of the event 658 * 659 * This function subscribes for events beyond the @seq_num 660 * and type @class_locale. 661 * 662 */ 663 static int 664 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, 665 u_int32_t class_locale_word) 666 { 667 int ret_val; 668 struct mrsas_mfi_cmd *cmd; 669 struct mrsas_dcmd_frame *dcmd; 670 union mrsas_evt_class_locale curr_aen; 671 union mrsas_evt_class_locale prev_aen; 672 673 /* 674 * If there an AEN pending already (aen_cmd), check if the 675 * class_locale of that pending AEN is inclusive of the new AEN 676 * request we currently have. If it is, then we don't have to do 677 * anything. In other words, whichever events the current AEN request 678 * is subscribing to, have already been subscribed to. If the old_cmd 679 * is _not_ inclusive, then we have to abort that command, form a 680 * class_locale that is superset of both old and current and re-issue 681 * to the FW 682 */ 683 684 curr_aen.word = class_locale_word; 685 686 if (sc->aen_cmd) { 687 688 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1]; 689 690 /* 691 * A class whose enum value is smaller is inclusive of all 692 * higher values. If a PROGRESS (= -1) was previously 693 * registered, then a new registration requests for higher 694 * classes need not be sent to FW. They are automatically 695 * included. Locale numbers don't have such hierarchy. They 696 * are bitmap values 697 */ 698 if ((prev_aen.members.class <= curr_aen.members.class) && 699 !((prev_aen.members.locale & curr_aen.members.locale) ^ 700 curr_aen.members.locale)) { 701 /* 702 * Previously issued event registration includes 703 * current request. Nothing to do. 704 */ 705 return 0; 706 } else { 707 curr_aen.members.locale |= prev_aen.members.locale; 708 709 if (prev_aen.members.class < curr_aen.members.class) 710 curr_aen.members.class = prev_aen.members.class; 711 712 sc->aen_cmd->abort_aen = 1; 713 ret_val = mrsas_issue_blocked_abort_cmd(sc, 714 sc->aen_cmd); 715 716 if (ret_val) { 717 printf("mrsas: Failed to abort previous AEN command\n"); 718 return ret_val; 719 } else 720 sc->aen_cmd = NULL; 721 } 722 } 723 cmd = mrsas_get_mfi_cmd(sc); 724 if (!cmd) 725 return ENOMEM; 726 727 dcmd = &cmd->frame->dcmd; 728 729 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); 730 731 /* 732 * Prepare DCMD for aen registration 733 */ 734 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 735 736 dcmd->cmd = MFI_CMD_DCMD; 737 dcmd->cmd_status = 0x0; 738 dcmd->sge_count = 1; 739 dcmd->flags = MFI_FRAME_DIR_READ; 740 dcmd->timeout = 0; 741 dcmd->pad_0 = 0; 742 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail); 743 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 744 dcmd->mbox.w[0] = seq_num; 745 sc->last_seq_num = seq_num; 746 dcmd->mbox.w[1] = curr_aen.word; 747 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr; 748 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail); 749 750 if (sc->aen_cmd != NULL) { 751 mrsas_release_mfi_cmd(cmd); 752 return 0; 753 } 754 /* 755 * Store reference to the cmd used to register for AEN. When an 756 * application wants us to register for AEN, we have to abort this 757 * cmd and re-register with a new EVENT LOCALE supplied by that app 758 */ 759 sc->aen_cmd = cmd; 760 761 /* 762 * Issue the aen registration frame 763 */ 764 if (mrsas_issue_dcmd(sc, cmd)) { 765 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); 766 return (1); 767 } 768 return 0; 769 } 770 771 /* 772 * mrsas_start_aen: Subscribes to AEN during driver load time 773 * @instance: Adapter soft state 774 */ 775 static int 776 mrsas_start_aen(struct mrsas_softc *sc) 777 { 778 struct mrsas_evt_log_info eli; 779 union mrsas_evt_class_locale class_locale; 780 781 782 /* Get the latest sequence number from FW */ 783 784 memset(&eli, 0, sizeof(eli)); 785 786 if (mrsas_get_seq_num(sc, &eli)) 787 return -1; 788 789 /* Register AEN with FW for latest sequence number plus 1 */ 790 class_locale.members.reserved = 0; 791 class_locale.members.locale = MR_EVT_LOCALE_ALL; 792 class_locale.members.class = MR_EVT_CLASS_DEBUG; 793 794 return mrsas_register_aen(sc, eli.newest_seq_num + 1, 795 class_locale.word); 796 797 } 798 799 /* 800 * mrsas_setup_msix: Allocate MSI-x vectors 801 * @sc: adapter soft state 802 */ 803 static int 804 mrsas_setup_msix(struct mrsas_softc *sc) 805 { 806 int i; 807 808 for (i = 0; i < sc->msix_vectors; i++) { 809 sc->irq_context[i].sc = sc; 810 sc->irq_context[i].MSIxIndex = i; 811 sc->irq_id[i] = i + 1; 812 sc->mrsas_irq[i] = bus_alloc_resource_any 813 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i] 814 ,RF_ACTIVE); 815 if (sc->mrsas_irq[i] == NULL) { 816 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n"); 817 goto irq_alloc_failed; 818 } 819 if (bus_setup_intr(sc->mrsas_dev, 820 sc->mrsas_irq[i], 821 INTR_MPSAFE | INTR_TYPE_CAM, 822 NULL, mrsas_isr, &sc->irq_context[i], 823 &sc->intr_handle[i])) { 824 device_printf(sc->mrsas_dev, 825 "Cannot set up MSI-x interrupt handler\n"); 826 goto irq_alloc_failed; 827 } 828 } 829 return SUCCESS; 830 831 irq_alloc_failed: 832 mrsas_teardown_intr(sc); 833 return (FAIL); 834 } 835 836 /* 837 * mrsas_allocate_msix: Setup MSI-x vectors 838 * @sc: adapter soft state 839 */ 840 static int 841 mrsas_allocate_msix(struct mrsas_softc *sc) 842 { 843 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) { 844 device_printf(sc->mrsas_dev, "Using MSI-X with %d number" 845 " of vectors\n", sc->msix_vectors); 846 } else { 847 device_printf(sc->mrsas_dev, "MSI-x setup failed\n"); 848 goto irq_alloc_failed; 849 } 850 return SUCCESS; 851 852 irq_alloc_failed: 853 mrsas_teardown_intr(sc); 854 return (FAIL); 855 } 856 857 /* 858 * mrsas_attach: PCI entry point 859 * input: pointer to device struct 860 * 861 * Performs setup of PCI and registers, initializes mutexes and linked lists, 862 * registers interrupts and CAM, and initializes the adapter/controller to 863 * its proper state. 864 */ 865 static int 866 mrsas_attach(device_t dev) 867 { 868 struct mrsas_softc *sc = device_get_softc(dev); 869 uint32_t cmd, error; 870 871 memset(sc, 0, sizeof(struct mrsas_softc)); 872 873 /* Look up our softc and initialize its fields. */ 874 sc->mrsas_dev = dev; 875 sc->device_id = pci_get_device(dev); 876 877 switch (sc->device_id) { 878 case MRSAS_INVADER: 879 case MRSAS_FURY: 880 case MRSAS_INTRUDER: 881 case MRSAS_INTRUDER_24: 882 case MRSAS_CUTLASS_52: 883 case MRSAS_CUTLASS_53: 884 sc->mrsas_gen3_ctrl = 1; 885 break; 886 case MRSAS_VENTURA: 887 case MRSAS_CRUSADER: 888 case MRSAS_HARPOON: 889 case MRSAS_TOMCAT: 890 case MRSAS_VENTURA_4PORT: 891 case MRSAS_CRUSADER_4PORT: 892 sc->is_ventura = true; 893 break; 894 case MRSAS_AERO_10E1: 895 case MRSAS_AERO_10E5: 896 device_printf(dev, "Adapter is in configurable secure mode\n"); 897 case MRSAS_AERO_10E2: 898 case MRSAS_AERO_10E6: 899 sc->is_aero = true; 900 break; 901 case MRSAS_AERO_10E0: 902 case MRSAS_AERO_10E3: 903 case MRSAS_AERO_10E4: 904 case MRSAS_AERO_10E7: 905 device_printf(dev, "Adapter is in non-secure mode\n"); 906 return SUCCESS; 907 908 } 909 910 mrsas_get_tunables(sc); 911 912 /* 913 * Set up PCI and registers 914 */ 915 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 916 if ((cmd & PCIM_CMD_PORTEN) == 0) { 917 return (ENXIO); 918 } 919 /* Force the busmaster enable bit on. */ 920 cmd |= PCIM_CMD_BUSMASTEREN; 921 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 922 923 /* For Ventura/Aero system registers are mapped to BAR0 */ 924 if (sc->is_ventura || sc->is_aero) 925 sc->reg_res_id = PCIR_BAR(0); /* BAR0 offset */ 926 else 927 sc->reg_res_id = PCIR_BAR(1); /* BAR1 offset */ 928 929 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 930 &(sc->reg_res_id), RF_ACTIVE)) 931 == NULL) { 932 device_printf(dev, "Cannot allocate PCI registers\n"); 933 goto attach_fail; 934 } 935 sc->bus_tag = rman_get_bustag(sc->reg_res); 936 sc->bus_handle = rman_get_bushandle(sc->reg_res); 937 938 /* Intialize mutexes */ 939 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF); 940 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF); 941 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF); 942 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF); 943 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN); 944 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF); 945 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF); 946 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF); 947 mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF); 948 949 /* Intialize linked list */ 950 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); 951 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); 952 953 mrsas_atomic_set(&sc->fw_outstanding, 0); 954 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 955 mrsas_atomic_set(&sc->prp_count, 0); 956 mrsas_atomic_set(&sc->sge_holes, 0); 957 958 sc->io_cmds_highwater = 0; 959 960 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 961 sc->UnevenSpanSupport = 0; 962 963 sc->msix_enable = 0; 964 965 /* Initialize Firmware */ 966 if (mrsas_init_fw(sc) != SUCCESS) { 967 goto attach_fail_fw; 968 } 969 /* Register mrsas to CAM layer */ 970 if ((mrsas_cam_attach(sc) != SUCCESS)) { 971 goto attach_fail_cam; 972 } 973 /* Register IRQs */ 974 if (mrsas_setup_irq(sc) != SUCCESS) { 975 goto attach_fail_irq; 976 } 977 error = mrsas_kproc_create(mrsas_ocr_thread, sc, 978 &sc->ocr_thread, 0, 0, "mrsas_ocr%d", 979 device_get_unit(sc->mrsas_dev)); 980 if (error) { 981 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error); 982 goto attach_fail_ocr_thread; 983 } 984 /* 985 * After FW initialization and OCR thread creation 986 * we will defer the cdev creation, AEN setup on ICH callback 987 */ 988 sc->mrsas_ich.ich_func = mrsas_ich_startup; 989 sc->mrsas_ich.ich_arg = sc; 990 if (config_intrhook_establish(&sc->mrsas_ich) != 0) { 991 device_printf(sc->mrsas_dev, "Config hook is already established\n"); 992 } 993 mrsas_setup_sysctl(sc); 994 return SUCCESS; 995 996 attach_fail_ocr_thread: 997 if (sc->ocr_thread_active) 998 wakeup(&sc->ocr_chan); 999 attach_fail_irq: 1000 mrsas_teardown_intr(sc); 1001 attach_fail_cam: 1002 mrsas_cam_detach(sc); 1003 attach_fail_fw: 1004 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */ 1005 if (sc->msix_enable == 1) 1006 pci_release_msi(sc->mrsas_dev); 1007 mrsas_free_mem(sc); 1008 mtx_destroy(&sc->sim_lock); 1009 mtx_destroy(&sc->aen_lock); 1010 mtx_destroy(&sc->pci_lock); 1011 mtx_destroy(&sc->io_lock); 1012 mtx_destroy(&sc->ioctl_lock); 1013 mtx_destroy(&sc->mpt_cmd_pool_lock); 1014 mtx_destroy(&sc->mfi_cmd_pool_lock); 1015 mtx_destroy(&sc->raidmap_lock); 1016 mtx_destroy(&sc->stream_lock); 1017 attach_fail: 1018 if (sc->reg_res) { 1019 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, 1020 sc->reg_res_id, sc->reg_res); 1021 } 1022 return (ENXIO); 1023 } 1024 1025 /* 1026 * Interrupt config hook 1027 */ 1028 static void 1029 mrsas_ich_startup(void *arg) 1030 { 1031 int i = 0; 1032 struct mrsas_softc *sc = (struct mrsas_softc *)arg; 1033 1034 /* 1035 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs 1036 */ 1037 sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS, 1038 IOCTL_SEMA_DESCRIPTION); 1039 1040 /* Create a /dev entry for mrsas controller. */ 1041 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT, 1042 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", 1043 device_get_unit(sc->mrsas_dev)); 1044 1045 if (device_get_unit(sc->mrsas_dev) == 0) { 1046 make_dev_alias_p(MAKEDEV_CHECKNAME, 1047 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev, 1048 "megaraid_sas_ioctl_node"); 1049 } 1050 if (sc->mrsas_cdev) 1051 sc->mrsas_cdev->si_drv1 = sc; 1052 1053 /* 1054 * Add this controller to mrsas_mgmt_info structure so that it can be 1055 * exported to management applications 1056 */ 1057 if (device_get_unit(sc->mrsas_dev) == 0) 1058 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info)); 1059 1060 mrsas_mgmt_info.count++; 1061 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc; 1062 mrsas_mgmt_info.max_index++; 1063 1064 /* Enable Interrupts */ 1065 mrsas_enable_intr(sc); 1066 1067 /* Call DCMD get_pd_info for all system PDs */ 1068 for (i = 0; i < MRSAS_MAX_PD; i++) { 1069 if ((sc->target_list[i].target_id != 0xffff) && 1070 sc->pd_info_mem) 1071 mrsas_get_pd_info(sc, sc->target_list[i].target_id); 1072 } 1073 1074 /* Initiate AEN (Asynchronous Event Notification) */ 1075 if (mrsas_start_aen(sc)) { 1076 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! " 1077 "Further events from the controller will not be communicated.\n" 1078 "Either there is some problem in the controller" 1079 "or the controller does not support AEN.\n" 1080 "Please contact to the SUPPORT TEAM if the problem persists\n"); 1081 } 1082 if (sc->mrsas_ich.ich_arg != NULL) { 1083 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n"); 1084 config_intrhook_disestablish(&sc->mrsas_ich); 1085 sc->mrsas_ich.ich_arg = NULL; 1086 } 1087 } 1088 1089 /* 1090 * mrsas_detach: De-allocates and teardown resources 1091 * input: pointer to device struct 1092 * 1093 * This function is the entry point for device disconnect and detach. 1094 * It performs memory de-allocations, shutdown of the controller and various 1095 * teardown and destroy resource functions. 1096 */ 1097 static int 1098 mrsas_detach(device_t dev) 1099 { 1100 struct mrsas_softc *sc; 1101 int i = 0; 1102 1103 sc = device_get_softc(dev); 1104 sc->remove_in_progress = 1; 1105 1106 /* Destroy the character device so no other IOCTL will be handled */ 1107 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev) 1108 destroy_dev(sc->mrsas_linux_emulator_cdev); 1109 destroy_dev(sc->mrsas_cdev); 1110 1111 /* 1112 * Take the instance off the instance array. Note that we will not 1113 * decrement the max_index. We let this array be sparse array 1114 */ 1115 for (i = 0; i < mrsas_mgmt_info.max_index; i++) { 1116 if (mrsas_mgmt_info.sc_ptr[i] == sc) { 1117 mrsas_mgmt_info.count--; 1118 mrsas_mgmt_info.sc_ptr[i] = NULL; 1119 break; 1120 } 1121 } 1122 1123 if (sc->ocr_thread_active) 1124 wakeup(&sc->ocr_chan); 1125 while (sc->reset_in_progress) { 1126 i++; 1127 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1128 mrsas_dprint(sc, MRSAS_INFO, 1129 "[%2d]waiting for OCR to be finished from %s\n", i, __func__); 1130 } 1131 pause("mr_shutdown", hz); 1132 } 1133 i = 0; 1134 while (sc->ocr_thread_active) { 1135 i++; 1136 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1137 mrsas_dprint(sc, MRSAS_INFO, 1138 "[%2d]waiting for " 1139 "mrsas_ocr thread to quit ocr %d\n", i, 1140 sc->ocr_thread_active); 1141 } 1142 pause("mr_shutdown", hz); 1143 } 1144 mrsas_flush_cache(sc); 1145 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); 1146 mrsas_disable_intr(sc); 1147 1148 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) { 1149 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 1150 free(sc->streamDetectByLD[i], M_MRSAS); 1151 free(sc->streamDetectByLD, M_MRSAS); 1152 sc->streamDetectByLD = NULL; 1153 } 1154 1155 mrsas_cam_detach(sc); 1156 mrsas_teardown_intr(sc); 1157 mrsas_free_mem(sc); 1158 mtx_destroy(&sc->sim_lock); 1159 mtx_destroy(&sc->aen_lock); 1160 mtx_destroy(&sc->pci_lock); 1161 mtx_destroy(&sc->io_lock); 1162 mtx_destroy(&sc->ioctl_lock); 1163 mtx_destroy(&sc->mpt_cmd_pool_lock); 1164 mtx_destroy(&sc->mfi_cmd_pool_lock); 1165 mtx_destroy(&sc->raidmap_lock); 1166 mtx_destroy(&sc->stream_lock); 1167 1168 /* Wait for all the semaphores to be released */ 1169 while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS) 1170 pause("mr_shutdown", hz); 1171 1172 /* Destroy the counting semaphore created for Ioctl */ 1173 sema_destroy(&sc->ioctl_count_sema); 1174 1175 if (sc->reg_res) { 1176 bus_release_resource(sc->mrsas_dev, 1177 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); 1178 } 1179 if (sc->sysctl_tree != NULL) 1180 sysctl_ctx_free(&sc->sysctl_ctx); 1181 1182 return (0); 1183 } 1184 1185 static int 1186 mrsas_shutdown(device_t dev) 1187 { 1188 struct mrsas_softc *sc; 1189 int i; 1190 1191 sc = device_get_softc(dev); 1192 sc->remove_in_progress = 1; 1193 if (panicstr == NULL) { 1194 if (sc->ocr_thread_active) 1195 wakeup(&sc->ocr_chan); 1196 i = 0; 1197 while (sc->reset_in_progress && i < 15) { 1198 i++; 1199 if ((i % MRSAS_RESET_NOTICE_INTERVAL) == 0) { 1200 mrsas_dprint(sc, MRSAS_INFO, 1201 "[%2d]waiting for OCR to be finished " 1202 "from %s\n", i, __func__); 1203 } 1204 pause("mr_shutdown", hz); 1205 } 1206 if (sc->reset_in_progress) { 1207 mrsas_dprint(sc, MRSAS_INFO, 1208 "gave up waiting for OCR to be finished\n"); 1209 } 1210 } 1211 1212 mrsas_flush_cache(sc); 1213 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); 1214 mrsas_disable_intr(sc); 1215 return (0); 1216 } 1217 1218 /* 1219 * mrsas_free_mem: Frees allocated memory 1220 * input: Adapter instance soft state 1221 * 1222 * This function is called from mrsas_detach() to free previously allocated 1223 * memory. 1224 */ 1225 void 1226 mrsas_free_mem(struct mrsas_softc *sc) 1227 { 1228 int i; 1229 u_int32_t max_fw_cmds; 1230 struct mrsas_mfi_cmd *mfi_cmd; 1231 struct mrsas_mpt_cmd *mpt_cmd; 1232 1233 /* 1234 * Free RAID map memory 1235 */ 1236 for (i = 0; i < 2; i++) { 1237 if (sc->raidmap_phys_addr[i]) 1238 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); 1239 if (sc->raidmap_mem[i] != NULL) 1240 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); 1241 if (sc->raidmap_tag[i] != NULL) 1242 bus_dma_tag_destroy(sc->raidmap_tag[i]); 1243 1244 if (sc->ld_drv_map[i] != NULL) 1245 free(sc->ld_drv_map[i], M_MRSAS); 1246 } 1247 for (i = 0; i < 2; i++) { 1248 if (sc->jbodmap_phys_addr[i]) 1249 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]); 1250 if (sc->jbodmap_mem[i] != NULL) 1251 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]); 1252 if (sc->jbodmap_tag[i] != NULL) 1253 bus_dma_tag_destroy(sc->jbodmap_tag[i]); 1254 } 1255 /* 1256 * Free version buffer memory 1257 */ 1258 if (sc->verbuf_phys_addr) 1259 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); 1260 if (sc->verbuf_mem != NULL) 1261 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); 1262 if (sc->verbuf_tag != NULL) 1263 bus_dma_tag_destroy(sc->verbuf_tag); 1264 1265 1266 /* 1267 * Free sense buffer memory 1268 */ 1269 if (sc->sense_phys_addr) 1270 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); 1271 if (sc->sense_mem != NULL) 1272 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); 1273 if (sc->sense_tag != NULL) 1274 bus_dma_tag_destroy(sc->sense_tag); 1275 1276 /* 1277 * Free chain frame memory 1278 */ 1279 if (sc->chain_frame_phys_addr) 1280 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); 1281 if (sc->chain_frame_mem != NULL) 1282 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); 1283 if (sc->chain_frame_tag != NULL) 1284 bus_dma_tag_destroy(sc->chain_frame_tag); 1285 1286 /* 1287 * Free IO Request memory 1288 */ 1289 if (sc->io_request_phys_addr) 1290 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); 1291 if (sc->io_request_mem != NULL) 1292 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); 1293 if (sc->io_request_tag != NULL) 1294 bus_dma_tag_destroy(sc->io_request_tag); 1295 1296 /* 1297 * Free Reply Descriptor memory 1298 */ 1299 if (sc->reply_desc_phys_addr) 1300 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); 1301 if (sc->reply_desc_mem != NULL) 1302 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); 1303 if (sc->reply_desc_tag != NULL) 1304 bus_dma_tag_destroy(sc->reply_desc_tag); 1305 1306 /* 1307 * Free event detail memory 1308 */ 1309 if (sc->evt_detail_phys_addr) 1310 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); 1311 if (sc->evt_detail_mem != NULL) 1312 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); 1313 if (sc->evt_detail_tag != NULL) 1314 bus_dma_tag_destroy(sc->evt_detail_tag); 1315 1316 /* 1317 * Free PD info memory 1318 */ 1319 if (sc->pd_info_phys_addr) 1320 bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap); 1321 if (sc->pd_info_mem != NULL) 1322 bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap); 1323 if (sc->pd_info_tag != NULL) 1324 bus_dma_tag_destroy(sc->pd_info_tag); 1325 1326 /* 1327 * Free MFI frames 1328 */ 1329 if (sc->mfi_cmd_list) { 1330 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1331 mfi_cmd = sc->mfi_cmd_list[i]; 1332 mrsas_free_frame(sc, mfi_cmd); 1333 } 1334 } 1335 if (sc->mficmd_frame_tag != NULL) 1336 bus_dma_tag_destroy(sc->mficmd_frame_tag); 1337 1338 /* 1339 * Free MPT internal command list 1340 */ 1341 max_fw_cmds = sc->max_fw_cmds; 1342 if (sc->mpt_cmd_list) { 1343 for (i = 0; i < max_fw_cmds; i++) { 1344 mpt_cmd = sc->mpt_cmd_list[i]; 1345 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); 1346 free(sc->mpt_cmd_list[i], M_MRSAS); 1347 } 1348 free(sc->mpt_cmd_list, M_MRSAS); 1349 sc->mpt_cmd_list = NULL; 1350 } 1351 /* 1352 * Free MFI internal command list 1353 */ 1354 1355 if (sc->mfi_cmd_list) { 1356 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1357 free(sc->mfi_cmd_list[i], M_MRSAS); 1358 } 1359 free(sc->mfi_cmd_list, M_MRSAS); 1360 sc->mfi_cmd_list = NULL; 1361 } 1362 /* 1363 * Free request descriptor memory 1364 */ 1365 free(sc->req_desc, M_MRSAS); 1366 sc->req_desc = NULL; 1367 1368 /* 1369 * Destroy parent tag 1370 */ 1371 if (sc->mrsas_parent_tag != NULL) 1372 bus_dma_tag_destroy(sc->mrsas_parent_tag); 1373 1374 /* 1375 * Free ctrl_info memory 1376 */ 1377 if (sc->ctrl_info != NULL) 1378 free(sc->ctrl_info, M_MRSAS); 1379 } 1380 1381 /* 1382 * mrsas_teardown_intr: Teardown interrupt 1383 * input: Adapter instance soft state 1384 * 1385 * This function is called from mrsas_detach() to teardown and release bus 1386 * interrupt resourse. 1387 */ 1388 void 1389 mrsas_teardown_intr(struct mrsas_softc *sc) 1390 { 1391 int i; 1392 1393 if (!sc->msix_enable) { 1394 if (sc->intr_handle[0]) 1395 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]); 1396 if (sc->mrsas_irq[0] != NULL) 1397 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1398 sc->irq_id[0], sc->mrsas_irq[0]); 1399 sc->intr_handle[0] = NULL; 1400 } else { 1401 for (i = 0; i < sc->msix_vectors; i++) { 1402 if (sc->intr_handle[i]) 1403 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i], 1404 sc->intr_handle[i]); 1405 1406 if (sc->mrsas_irq[i] != NULL) 1407 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1408 sc->irq_id[i], sc->mrsas_irq[i]); 1409 1410 sc->intr_handle[i] = NULL; 1411 } 1412 pci_release_msi(sc->mrsas_dev); 1413 } 1414 1415 } 1416 1417 /* 1418 * mrsas_suspend: Suspend entry point 1419 * input: Device struct pointer 1420 * 1421 * This function is the entry point for system suspend from the OS. 1422 */ 1423 static int 1424 mrsas_suspend(device_t dev) 1425 { 1426 /* This will be filled when the driver will have hibernation support */ 1427 return (0); 1428 } 1429 1430 /* 1431 * mrsas_resume: Resume entry point 1432 * input: Device struct pointer 1433 * 1434 * This function is the entry point for system resume from the OS. 1435 */ 1436 static int 1437 mrsas_resume(device_t dev) 1438 { 1439 /* This will be filled when the driver will have hibernation support */ 1440 return (0); 1441 } 1442 1443 /** 1444 * mrsas_get_softc_instance: Find softc instance based on cmd type 1445 * 1446 * This function will return softc instance based on cmd type. 1447 * In some case, application fire ioctl on required management instance and 1448 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those 1449 * case, else get the softc instance from host_no provided by application in 1450 * user data. 1451 */ 1452 1453 static struct mrsas_softc * 1454 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg) 1455 { 1456 struct mrsas_softc *sc = NULL; 1457 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; 1458 1459 if (cmd == MRSAS_IOC_GET_PCI_INFO) { 1460 sc = dev->si_drv1; 1461 } else { 1462 /* 1463 * get the Host number & the softc from data sent by the 1464 * Application 1465 */ 1466 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no]; 1467 if (sc == NULL) 1468 printf("There is no Controller number %d\n", 1469 user_ioc->host_no); 1470 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index) 1471 mrsas_dprint(sc, MRSAS_FAULT, 1472 "Invalid Controller number %d\n", user_ioc->host_no); 1473 } 1474 1475 return sc; 1476 } 1477 1478 /* 1479 * mrsas_ioctl: IOCtl commands entry point. 1480 * 1481 * This function is the entry point for IOCtls from the OS. It calls the 1482 * appropriate function for processing depending on the command received. 1483 */ 1484 static int 1485 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, 1486 struct thread *td) 1487 { 1488 struct mrsas_softc *sc; 1489 int ret = 0, i = 0; 1490 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo; 1491 1492 sc = mrsas_get_softc_instance(dev, cmd, arg); 1493 if (!sc) 1494 return ENOENT; 1495 1496 if (sc->remove_in_progress || 1497 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) { 1498 mrsas_dprint(sc, MRSAS_INFO, 1499 "Either driver remove or shutdown called or " 1500 "HW is in unrecoverable critical error state.\n"); 1501 return ENOENT; 1502 } 1503 mtx_lock_spin(&sc->ioctl_lock); 1504 if (!sc->reset_in_progress) { 1505 mtx_unlock_spin(&sc->ioctl_lock); 1506 goto do_ioctl; 1507 } 1508 mtx_unlock_spin(&sc->ioctl_lock); 1509 while (sc->reset_in_progress) { 1510 i++; 1511 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1512 mrsas_dprint(sc, MRSAS_INFO, 1513 "[%2d]waiting for OCR to be finished from %s\n", i, __func__); 1514 } 1515 pause("mr_ioctl", hz); 1516 } 1517 1518 do_ioctl: 1519 switch (cmd) { 1520 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64: 1521 #ifdef COMPAT_FREEBSD32 1522 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32: 1523 #endif 1524 /* 1525 * Decrement the Ioctl counting Semaphore before getting an 1526 * mfi command 1527 */ 1528 sema_wait(&sc->ioctl_count_sema); 1529 1530 ret = mrsas_passthru(sc, (void *)arg, cmd); 1531 1532 /* Increment the Ioctl counting semaphore value */ 1533 sema_post(&sc->ioctl_count_sema); 1534 1535 break; 1536 case MRSAS_IOC_SCAN_BUS: 1537 ret = mrsas_bus_scan(sc); 1538 break; 1539 1540 case MRSAS_IOC_GET_PCI_INFO: 1541 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg; 1542 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION)); 1543 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev); 1544 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev); 1545 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev); 1546 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev); 1547 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d," 1548 "pci device no: %d, pci function no: %d," 1549 "pci domain ID: %d\n", 1550 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber, 1551 pciDrvInfo->functionNumber, pciDrvInfo->domainID); 1552 ret = 0; 1553 break; 1554 1555 default: 1556 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd); 1557 ret = ENOENT; 1558 } 1559 1560 return (ret); 1561 } 1562 1563 /* 1564 * mrsas_poll: poll entry point for mrsas driver fd 1565 * 1566 * This function is the entry point for poll from the OS. It waits for some AEN 1567 * events to be triggered from the controller and notifies back. 1568 */ 1569 static int 1570 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td) 1571 { 1572 struct mrsas_softc *sc; 1573 int revents = 0; 1574 1575 sc = dev->si_drv1; 1576 1577 if (poll_events & (POLLIN | POLLRDNORM)) { 1578 if (sc->mrsas_aen_triggered) { 1579 revents |= poll_events & (POLLIN | POLLRDNORM); 1580 } 1581 } 1582 if (revents == 0) { 1583 if (poll_events & (POLLIN | POLLRDNORM)) { 1584 mtx_lock(&sc->aen_lock); 1585 sc->mrsas_poll_waiting = 1; 1586 selrecord(td, &sc->mrsas_select); 1587 mtx_unlock(&sc->aen_lock); 1588 } 1589 } 1590 return revents; 1591 } 1592 1593 /* 1594 * mrsas_setup_irq: Set up interrupt 1595 * input: Adapter instance soft state 1596 * 1597 * This function sets up interrupts as a bus resource, with flags indicating 1598 * resource permitting contemporaneous sharing and for resource to activate 1599 * atomically. 1600 */ 1601 static int 1602 mrsas_setup_irq(struct mrsas_softc *sc) 1603 { 1604 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS)) 1605 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n"); 1606 1607 else { 1608 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n"); 1609 sc->irq_context[0].sc = sc; 1610 sc->irq_context[0].MSIxIndex = 0; 1611 sc->irq_id[0] = 0; 1612 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev, 1613 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE); 1614 if (sc->mrsas_irq[0] == NULL) { 1615 device_printf(sc->mrsas_dev, "Cannot allocate legcay" 1616 "interrupt\n"); 1617 return (FAIL); 1618 } 1619 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0], 1620 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, 1621 &sc->irq_context[0], &sc->intr_handle[0])) { 1622 device_printf(sc->mrsas_dev, "Cannot set up legacy" 1623 "interrupt\n"); 1624 return (FAIL); 1625 } 1626 } 1627 return (0); 1628 } 1629 1630 /* 1631 * mrsas_isr: ISR entry point 1632 * input: argument pointer 1633 * 1634 * This function is the interrupt service routine entry point. There are two 1635 * types of interrupts, state change interrupt and response interrupt. If an 1636 * interrupt is not ours, we just return. 1637 */ 1638 void 1639 mrsas_isr(void *arg) 1640 { 1641 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg; 1642 struct mrsas_softc *sc = irq_context->sc; 1643 int status = 0; 1644 1645 if (sc->mask_interrupts) 1646 return; 1647 1648 if (!sc->msix_vectors) { 1649 status = mrsas_clear_intr(sc); 1650 if (!status) 1651 return; 1652 } 1653 /* If we are resetting, bail */ 1654 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { 1655 printf(" Entered into ISR when OCR is going active. \n"); 1656 mrsas_clear_intr(sc); 1657 return; 1658 } 1659 /* Process for reply request and clear response interrupt */ 1660 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS) 1661 mrsas_clear_intr(sc); 1662 1663 return; 1664 } 1665 1666 /* 1667 * mrsas_complete_cmd: Process reply request 1668 * input: Adapter instance soft state 1669 * 1670 * This function is called from mrsas_isr() to process reply request and clear 1671 * response interrupt. Processing of the reply request entails walking 1672 * through the reply descriptor array for the command request pended from 1673 * Firmware. We look at the Function field to determine the command type and 1674 * perform the appropriate action. Before we return, we clear the response 1675 * interrupt. 1676 */ 1677 int 1678 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex) 1679 { 1680 Mpi2ReplyDescriptorsUnion_t *desc; 1681 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 1682 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; 1683 struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL; 1684 struct mrsas_mfi_cmd *cmd_mfi; 1685 u_int8_t reply_descript_type, *sense; 1686 u_int16_t smid, num_completed; 1687 u_int8_t status, extStatus; 1688 union desc_value desc_val; 1689 PLD_LOAD_BALANCE_INFO lbinfo; 1690 u_int32_t device_id, data_length; 1691 int threshold_reply_count = 0; 1692 #if TM_DEBUG 1693 MR_TASK_MANAGE_REQUEST *mr_tm_req; 1694 MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; 1695 #endif 1696 1697 /* If we have a hardware error, not need to continue */ 1698 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 1699 return (DONE); 1700 1701 desc = sc->reply_desc_mem; 1702 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)) 1703 + sc->last_reply_idx[MSIxIndex]; 1704 1705 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1706 1707 desc_val.word = desc->Words; 1708 num_completed = 0; 1709 1710 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1711 1712 /* Find our reply descriptor for the command and process */ 1713 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) { 1714 smid = reply_desc->SMID; 1715 cmd_mpt = sc->mpt_cmd_list[smid - 1]; 1716 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request; 1717 1718 status = scsi_io_req->RaidContext.raid_context.status; 1719 extStatus = scsi_io_req->RaidContext.raid_context.exStatus; 1720 sense = cmd_mpt->sense; 1721 data_length = scsi_io_req->DataLength; 1722 1723 switch (scsi_io_req->Function) { 1724 case MPI2_FUNCTION_SCSI_TASK_MGMT: 1725 #if TM_DEBUG 1726 mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request; 1727 mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *) 1728 &mr_tm_req->TmRequest; 1729 device_printf(sc->mrsas_dev, "TM completion type 0x%X, " 1730 "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID); 1731 #endif 1732 wakeup_one((void *)&sc->ocr_chan); 1733 break; 1734 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */ 1735 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; 1736 lbinfo = &sc->load_balance_info[device_id]; 1737 /* R1 load balancing for READ */ 1738 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { 1739 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]); 1740 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; 1741 } 1742 /* Fall thru and complete IO */ 1743 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: 1744 if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { 1745 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status, 1746 extStatus, data_length, sense); 1747 mrsas_cmd_done(sc, cmd_mpt); 1748 mrsas_atomic_dec(&sc->fw_outstanding); 1749 } else { 1750 /* 1751 * If the peer Raid 1/10 fast path failed, 1752 * mark IO as failed to the scsi layer. 1753 * Overwrite the current status by the failed status 1754 * and make sure that if any command fails, 1755 * driver returns fail status to CAM. 1756 */ 1757 cmd_mpt->cmd_completed = 1; 1758 r1_cmd = cmd_mpt->peer_cmd; 1759 if (r1_cmd->cmd_completed) { 1760 if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) { 1761 status = r1_cmd->io_request->RaidContext.raid_context.status; 1762 extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus; 1763 data_length = r1_cmd->io_request->DataLength; 1764 sense = r1_cmd->sense; 1765 } 1766 r1_cmd->ccb_ptr = NULL; 1767 if (r1_cmd->callout_owner) { 1768 callout_stop(&r1_cmd->cm_callout); 1769 r1_cmd->callout_owner = false; 1770 } 1771 mrsas_release_mpt_cmd(r1_cmd); 1772 mrsas_atomic_dec(&sc->fw_outstanding); 1773 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status, 1774 extStatus, data_length, sense); 1775 mrsas_cmd_done(sc, cmd_mpt); 1776 mrsas_atomic_dec(&sc->fw_outstanding); 1777 } 1778 } 1779 break; 1780 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */ 1781 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 1782 /* 1783 * Make sure NOT TO release the mfi command from the called 1784 * function's context if it is fired with issue_polled call. 1785 * And also make sure that the issue_polled call should only be 1786 * used if INTERRUPT IS DISABLED. 1787 */ 1788 if (cmd_mfi->frame->hdr.flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 1789 mrsas_release_mfi_cmd(cmd_mfi); 1790 else 1791 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); 1792 break; 1793 } 1794 1795 sc->last_reply_idx[MSIxIndex]++; 1796 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth) 1797 sc->last_reply_idx[MSIxIndex] = 0; 1798 1799 desc->Words = ~((uint64_t)0x00); /* set it back to all 1800 * 0xFFFFFFFFs */ 1801 num_completed++; 1802 threshold_reply_count++; 1803 1804 /* Get the next reply descriptor */ 1805 if (!sc->last_reply_idx[MSIxIndex]) { 1806 desc = sc->reply_desc_mem; 1807 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)); 1808 } else 1809 desc++; 1810 1811 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1812 desc_val.word = desc->Words; 1813 1814 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1815 1816 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1817 break; 1818 1819 /* 1820 * Write to reply post index after completing threshold reply 1821 * count and still there are more replies in reply queue 1822 * pending to be completed. 1823 */ 1824 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 1825 if (sc->msix_enable) { 1826 if (sc->msix_combined) 1827 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1828 ((MSIxIndex & 0x7) << 24) | 1829 sc->last_reply_idx[MSIxIndex]); 1830 else 1831 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1832 sc->last_reply_idx[MSIxIndex]); 1833 } else 1834 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1835 reply_post_host_index), sc->last_reply_idx[0]); 1836 1837 threshold_reply_count = 0; 1838 } 1839 } 1840 1841 /* No match, just return */ 1842 if (num_completed == 0) 1843 return (DONE); 1844 1845 /* Clear response interrupt */ 1846 if (sc->msix_enable) { 1847 if (sc->msix_combined) { 1848 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1849 ((MSIxIndex & 0x7) << 24) | 1850 sc->last_reply_idx[MSIxIndex]); 1851 } else 1852 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1853 sc->last_reply_idx[MSIxIndex]); 1854 } else 1855 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1856 reply_post_host_index), sc->last_reply_idx[0]); 1857 1858 return (0); 1859 } 1860 1861 /* 1862 * mrsas_map_mpt_cmd_status: Allocate DMAable memory. 1863 * input: Adapter instance soft state 1864 * 1865 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. 1866 * It checks the command status and maps the appropriate CAM status for the 1867 * CCB. 1868 */ 1869 void 1870 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status, 1871 u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense) 1872 { 1873 struct mrsas_softc *sc = cmd->sc; 1874 u_int8_t *sense_data; 1875 1876 switch (status) { 1877 case MFI_STAT_OK: 1878 ccb_ptr->ccb_h.status = CAM_REQ_CMP; 1879 break; 1880 case MFI_STAT_SCSI_IO_FAILED: 1881 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1882 ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1883 sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data; 1884 if (sense_data) { 1885 /* For now just copy 18 bytes back */ 1886 memcpy(sense_data, sense, 18); 1887 ccb_ptr->csio.sense_len = 18; 1888 ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; 1889 } 1890 break; 1891 case MFI_STAT_LD_OFFLINE: 1892 case MFI_STAT_DEVICE_NOT_FOUND: 1893 if (ccb_ptr->ccb_h.target_lun) 1894 ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; 1895 else 1896 ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; 1897 break; 1898 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1899 ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; 1900 break; 1901 default: 1902 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); 1903 ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; 1904 ccb_ptr->csio.scsi_status = status; 1905 } 1906 return; 1907 } 1908 1909 /* 1910 * mrsas_alloc_mem: Allocate DMAable memory 1911 * input: Adapter instance soft state 1912 * 1913 * This function creates the parent DMA tag and allocates DMAable memory. DMA 1914 * tag describes constraints of DMA mapping. Memory allocated is mapped into 1915 * Kernel virtual address. Callback argument is physical memory address. 1916 */ 1917 static int 1918 mrsas_alloc_mem(struct mrsas_softc *sc) 1919 { 1920 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size, 1921 evt_detail_size, count, pd_info_size; 1922 1923 /* 1924 * Allocate parent DMA tag 1925 */ 1926 if (bus_dma_tag_create(NULL, /* parent */ 1927 1, /* alignment */ 1928 0, /* boundary */ 1929 BUS_SPACE_MAXADDR, /* lowaddr */ 1930 BUS_SPACE_MAXADDR, /* highaddr */ 1931 NULL, NULL, /* filter, filterarg */ 1932 MAXPHYS, /* maxsize */ 1933 sc->max_num_sge, /* nsegments */ 1934 MAXPHYS, /* maxsegsize */ 1935 0, /* flags */ 1936 NULL, NULL, /* lockfunc, lockarg */ 1937 &sc->mrsas_parent_tag /* tag */ 1938 )) { 1939 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); 1940 return (ENOMEM); 1941 } 1942 /* 1943 * Allocate for version buffer 1944 */ 1945 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t)); 1946 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1947 1, 0, 1948 BUS_SPACE_MAXADDR_32BIT, 1949 BUS_SPACE_MAXADDR, 1950 NULL, NULL, 1951 verbuf_size, 1952 1, 1953 verbuf_size, 1954 BUS_DMA_ALLOCNOW, 1955 NULL, NULL, 1956 &sc->verbuf_tag)) { 1957 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); 1958 return (ENOMEM); 1959 } 1960 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, 1961 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { 1962 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); 1963 return (ENOMEM); 1964 } 1965 bzero(sc->verbuf_mem, verbuf_size); 1966 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, 1967 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, 1968 BUS_DMA_NOWAIT)) { 1969 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); 1970 return (ENOMEM); 1971 } 1972 /* 1973 * Allocate IO Request Frames 1974 */ 1975 io_req_size = sc->io_frames_alloc_sz; 1976 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1977 16, 0, 1978 BUS_SPACE_MAXADDR_32BIT, 1979 BUS_SPACE_MAXADDR, 1980 NULL, NULL, 1981 io_req_size, 1982 1, 1983 io_req_size, 1984 BUS_DMA_ALLOCNOW, 1985 NULL, NULL, 1986 &sc->io_request_tag)) { 1987 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); 1988 return (ENOMEM); 1989 } 1990 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, 1991 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { 1992 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); 1993 return (ENOMEM); 1994 } 1995 bzero(sc->io_request_mem, io_req_size); 1996 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, 1997 sc->io_request_mem, io_req_size, mrsas_addr_cb, 1998 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { 1999 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); 2000 return (ENOMEM); 2001 } 2002 /* 2003 * Allocate Chain Frames 2004 */ 2005 chain_frame_size = sc->chain_frames_alloc_sz; 2006 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2007 4, 0, 2008 BUS_SPACE_MAXADDR_32BIT, 2009 BUS_SPACE_MAXADDR, 2010 NULL, NULL, 2011 chain_frame_size, 2012 1, 2013 chain_frame_size, 2014 BUS_DMA_ALLOCNOW, 2015 NULL, NULL, 2016 &sc->chain_frame_tag)) { 2017 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); 2018 return (ENOMEM); 2019 } 2020 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, 2021 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { 2022 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); 2023 return (ENOMEM); 2024 } 2025 bzero(sc->chain_frame_mem, chain_frame_size); 2026 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, 2027 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, 2028 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { 2029 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); 2030 return (ENOMEM); 2031 } 2032 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2033 /* 2034 * Allocate Reply Descriptor Array 2035 */ 2036 reply_desc_size = sc->reply_alloc_sz * count; 2037 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2038 16, 0, 2039 BUS_SPACE_MAXADDR_32BIT, 2040 BUS_SPACE_MAXADDR, 2041 NULL, NULL, 2042 reply_desc_size, 2043 1, 2044 reply_desc_size, 2045 BUS_DMA_ALLOCNOW, 2046 NULL, NULL, 2047 &sc->reply_desc_tag)) { 2048 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); 2049 return (ENOMEM); 2050 } 2051 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, 2052 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { 2053 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); 2054 return (ENOMEM); 2055 } 2056 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, 2057 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, 2058 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { 2059 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); 2060 return (ENOMEM); 2061 } 2062 /* 2063 * Allocate Sense Buffer Array. Keep in lower 4GB 2064 */ 2065 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; 2066 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2067 64, 0, 2068 BUS_SPACE_MAXADDR_32BIT, 2069 BUS_SPACE_MAXADDR, 2070 NULL, NULL, 2071 sense_size, 2072 1, 2073 sense_size, 2074 BUS_DMA_ALLOCNOW, 2075 NULL, NULL, 2076 &sc->sense_tag)) { 2077 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); 2078 return (ENOMEM); 2079 } 2080 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, 2081 BUS_DMA_NOWAIT, &sc->sense_dmamap)) { 2082 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); 2083 return (ENOMEM); 2084 } 2085 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, 2086 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, 2087 BUS_DMA_NOWAIT)) { 2088 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); 2089 return (ENOMEM); 2090 } 2091 2092 /* 2093 * Allocate for Event detail structure 2094 */ 2095 evt_detail_size = sizeof(struct mrsas_evt_detail); 2096 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2097 1, 0, 2098 BUS_SPACE_MAXADDR_32BIT, 2099 BUS_SPACE_MAXADDR, 2100 NULL, NULL, 2101 evt_detail_size, 2102 1, 2103 evt_detail_size, 2104 BUS_DMA_ALLOCNOW, 2105 NULL, NULL, 2106 &sc->evt_detail_tag)) { 2107 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); 2108 return (ENOMEM); 2109 } 2110 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, 2111 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { 2112 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); 2113 return (ENOMEM); 2114 } 2115 bzero(sc->evt_detail_mem, evt_detail_size); 2116 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, 2117 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, 2118 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { 2119 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); 2120 return (ENOMEM); 2121 } 2122 2123 /* 2124 * Allocate for PD INFO structure 2125 */ 2126 pd_info_size = sizeof(struct mrsas_pd_info); 2127 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2128 1, 0, 2129 BUS_SPACE_MAXADDR_32BIT, 2130 BUS_SPACE_MAXADDR, 2131 NULL, NULL, 2132 pd_info_size, 2133 1, 2134 pd_info_size, 2135 BUS_DMA_ALLOCNOW, 2136 NULL, NULL, 2137 &sc->pd_info_tag)) { 2138 device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n"); 2139 return (ENOMEM); 2140 } 2141 if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem, 2142 BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) { 2143 device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n"); 2144 return (ENOMEM); 2145 } 2146 bzero(sc->pd_info_mem, pd_info_size); 2147 if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap, 2148 sc->pd_info_mem, pd_info_size, mrsas_addr_cb, 2149 &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) { 2150 device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n"); 2151 return (ENOMEM); 2152 } 2153 2154 /* 2155 * Create a dma tag for data buffers; size will be the maximum 2156 * possible I/O size (280kB). 2157 */ 2158 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2159 1, 2160 0, 2161 BUS_SPACE_MAXADDR, 2162 BUS_SPACE_MAXADDR, 2163 NULL, NULL, 2164 MAXPHYS, 2165 sc->max_num_sge, /* nsegments */ 2166 MAXPHYS, 2167 BUS_DMA_ALLOCNOW, 2168 busdma_lock_mutex, 2169 &sc->io_lock, 2170 &sc->data_tag)) { 2171 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); 2172 return (ENOMEM); 2173 } 2174 return (0); 2175 } 2176 2177 /* 2178 * mrsas_addr_cb: Callback function of bus_dmamap_load() 2179 * input: callback argument, machine dependent type 2180 * that describes DMA segments, number of segments, error code 2181 * 2182 * This function is for the driver to receive mapping information resultant of 2183 * the bus_dmamap_load(). The information is actually not being used, but the 2184 * address is saved anyway. 2185 */ 2186 void 2187 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2188 { 2189 bus_addr_t *addr; 2190 2191 addr = arg; 2192 *addr = segs[0].ds_addr; 2193 } 2194 2195 /* 2196 * mrsas_setup_raidmap: Set up RAID map. 2197 * input: Adapter instance soft state 2198 * 2199 * Allocate DMA memory for the RAID maps and perform setup. 2200 */ 2201 static int 2202 mrsas_setup_raidmap(struct mrsas_softc *sc) 2203 { 2204 int i; 2205 2206 for (i = 0; i < 2; i++) { 2207 sc->ld_drv_map[i] = 2208 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT); 2209 /* Do Error handling */ 2210 if (!sc->ld_drv_map[i]) { 2211 device_printf(sc->mrsas_dev, "Could not allocate memory for local map"); 2212 2213 if (i == 1) 2214 free(sc->ld_drv_map[0], M_MRSAS); 2215 /* ABORT driver initialization */ 2216 goto ABORT; 2217 } 2218 } 2219 2220 for (int i = 0; i < 2; i++) { 2221 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2222 4, 0, 2223 BUS_SPACE_MAXADDR_32BIT, 2224 BUS_SPACE_MAXADDR, 2225 NULL, NULL, 2226 sc->max_map_sz, 2227 1, 2228 sc->max_map_sz, 2229 BUS_DMA_ALLOCNOW, 2230 NULL, NULL, 2231 &sc->raidmap_tag[i])) { 2232 device_printf(sc->mrsas_dev, 2233 "Cannot allocate raid map tag.\n"); 2234 return (ENOMEM); 2235 } 2236 if (bus_dmamem_alloc(sc->raidmap_tag[i], 2237 (void **)&sc->raidmap_mem[i], 2238 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { 2239 device_printf(sc->mrsas_dev, 2240 "Cannot allocate raidmap memory.\n"); 2241 return (ENOMEM); 2242 } 2243 bzero(sc->raidmap_mem[i], sc->max_map_sz); 2244 2245 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], 2246 sc->raidmap_mem[i], sc->max_map_sz, 2247 mrsas_addr_cb, &sc->raidmap_phys_addr[i], 2248 BUS_DMA_NOWAIT)) { 2249 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); 2250 return (ENOMEM); 2251 } 2252 if (!sc->raidmap_mem[i]) { 2253 device_printf(sc->mrsas_dev, 2254 "Cannot allocate memory for raid map.\n"); 2255 return (ENOMEM); 2256 } 2257 } 2258 2259 if (!mrsas_get_map_info(sc)) 2260 mrsas_sync_map_info(sc); 2261 2262 return (0); 2263 2264 ABORT: 2265 return (1); 2266 } 2267 2268 /** 2269 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 2270 * @sc: Adapter soft state 2271 * 2272 * Return 0 on success. 2273 */ 2274 void 2275 megasas_setup_jbod_map(struct mrsas_softc *sc) 2276 { 2277 int i; 2278 uint32_t pd_seq_map_sz; 2279 2280 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 2281 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 2282 2283 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) { 2284 sc->use_seqnum_jbod_fp = 0; 2285 return; 2286 } 2287 if (sc->jbodmap_mem[0]) 2288 goto skip_alloc; 2289 2290 for (i = 0; i < 2; i++) { 2291 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2292 4, 0, 2293 BUS_SPACE_MAXADDR_32BIT, 2294 BUS_SPACE_MAXADDR, 2295 NULL, NULL, 2296 pd_seq_map_sz, 2297 1, 2298 pd_seq_map_sz, 2299 BUS_DMA_ALLOCNOW, 2300 NULL, NULL, 2301 &sc->jbodmap_tag[i])) { 2302 device_printf(sc->mrsas_dev, 2303 "Cannot allocate jbod map tag.\n"); 2304 return; 2305 } 2306 if (bus_dmamem_alloc(sc->jbodmap_tag[i], 2307 (void **)&sc->jbodmap_mem[i], 2308 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) { 2309 device_printf(sc->mrsas_dev, 2310 "Cannot allocate jbod map memory.\n"); 2311 return; 2312 } 2313 bzero(sc->jbodmap_mem[i], pd_seq_map_sz); 2314 2315 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i], 2316 sc->jbodmap_mem[i], pd_seq_map_sz, 2317 mrsas_addr_cb, &sc->jbodmap_phys_addr[i], 2318 BUS_DMA_NOWAIT)) { 2319 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n"); 2320 return; 2321 } 2322 if (!sc->jbodmap_mem[i]) { 2323 device_printf(sc->mrsas_dev, 2324 "Cannot allocate memory for jbod map.\n"); 2325 sc->use_seqnum_jbod_fp = 0; 2326 return; 2327 } 2328 } 2329 2330 skip_alloc: 2331 if (!megasas_sync_pd_seq_num(sc, false) && 2332 !megasas_sync_pd_seq_num(sc, true)) 2333 sc->use_seqnum_jbod_fp = 1; 2334 else 2335 sc->use_seqnum_jbod_fp = 0; 2336 2337 device_printf(sc->mrsas_dev, "Jbod map is supported\n"); 2338 } 2339 2340 /* 2341 * mrsas_init_fw: Initialize Firmware 2342 * input: Adapter soft state 2343 * 2344 * Calls transition_to_ready() to make sure Firmware is in operational state and 2345 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It 2346 * issues internal commands to get the controller info after the IOC_INIT 2347 * command response is received by Firmware. Note: code relating to 2348 * get_pdlist, get_ld_list and max_sectors are currently not being used, it 2349 * is left here as placeholder. 2350 */ 2351 static int 2352 mrsas_init_fw(struct mrsas_softc *sc) 2353 { 2354 2355 int ret, loop, ocr = 0; 2356 u_int32_t max_sectors_1; 2357 u_int32_t max_sectors_2; 2358 u_int32_t tmp_sectors; 2359 u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4; 2360 int msix_enable = 0; 2361 int fw_msix_count = 0; 2362 int i, j; 2363 2364 /* Make sure Firmware is ready */ 2365 ret = mrsas_transition_to_ready(sc, ocr); 2366 if (ret != SUCCESS) { 2367 return (ret); 2368 } 2369 if (sc->is_ventura || sc->is_aero) { 2370 scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3)); 2371 #if VD_EXT_DEBUG 2372 device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3); 2373 #endif 2374 sc->maxRaidMapSize = ((scratch_pad_3 >> 2375 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 2376 MR_MAX_RAID_MAP_SIZE_MASK); 2377 } 2378 /* MSI-x index 0- reply post host index register */ 2379 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET; 2380 /* Check if MSI-X is supported while in ready state */ 2381 msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a; 2382 2383 if (msix_enable) { 2384 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2385 outbound_scratch_pad_2)); 2386 2387 /* Check max MSI-X vectors */ 2388 if (sc->device_id == MRSAS_TBOLT) { 2389 sc->msix_vectors = (scratch_pad_2 2390 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 2391 fw_msix_count = sc->msix_vectors; 2392 } else { 2393 /* Invader/Fury supports 96 MSI-X vectors */ 2394 sc->msix_vectors = ((scratch_pad_2 2395 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 2396 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 2397 fw_msix_count = sc->msix_vectors; 2398 2399 if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) || 2400 ((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16))) 2401 sc->msix_combined = true; 2402 /* 2403 * Save 1-15 reply post index 2404 * address to local memory Index 0 2405 * is already saved from reg offset 2406 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 2407 */ 2408 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; 2409 loop++) { 2410 sc->msix_reg_offset[loop] = 2411 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + 2412 (loop * 0x10); 2413 } 2414 } 2415 2416 /* Don't bother allocating more MSI-X vectors than cpus */ 2417 sc->msix_vectors = min(sc->msix_vectors, 2418 mp_ncpus); 2419 2420 /* Allocate MSI-x vectors */ 2421 if (mrsas_allocate_msix(sc) == SUCCESS) 2422 sc->msix_enable = 1; 2423 else 2424 sc->msix_enable = 0; 2425 2426 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector," 2427 "Online CPU %d Current MSIX <%d>\n", 2428 fw_msix_count, mp_ncpus, sc->msix_vectors); 2429 } 2430 /* 2431 * MSI-X host index 0 is common for all adapter. 2432 * It is used for all MPT based Adapters. 2433 */ 2434 if (sc->msix_combined) { 2435 sc->msix_reg_offset[0] = 2436 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET; 2437 } 2438 if (mrsas_init_adapter(sc) != SUCCESS) { 2439 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); 2440 return (1); 2441 } 2442 2443 if (sc->is_ventura || sc->is_aero) { 2444 scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2445 outbound_scratch_pad_4)); 2446 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT) 2447 sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK); 2448 2449 device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size); 2450 } 2451 2452 /* Allocate internal commands for pass-thru */ 2453 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) { 2454 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); 2455 return (1); 2456 } 2457 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); 2458 if (!sc->ctrl_info) { 2459 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n"); 2460 return (1); 2461 } 2462 /* 2463 * Get the controller info from FW, so that the MAX VD support 2464 * availability can be decided. 2465 */ 2466 if (mrsas_get_ctrl_info(sc)) { 2467 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n"); 2468 return (1); 2469 } 2470 sc->secure_jbod_support = 2471 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD; 2472 2473 if (sc->secure_jbod_support) 2474 device_printf(sc->mrsas_dev, "FW supports SED \n"); 2475 2476 if (sc->use_seqnum_jbod_fp) 2477 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n"); 2478 2479 if (sc->support_morethan256jbod) 2480 device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n"); 2481 2482 if (mrsas_setup_raidmap(sc) != SUCCESS) { 2483 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! " 2484 "There seems to be some problem in the controller\n" 2485 "Please contact to the SUPPORT TEAM if the problem persists\n"); 2486 } 2487 megasas_setup_jbod_map(sc); 2488 2489 2490 memset(sc->target_list, 0, 2491 MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target)); 2492 for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++) 2493 sc->target_list[i].target_id = 0xffff; 2494 2495 /* For pass-thru, get PD/LD list and controller info */ 2496 memset(sc->pd_list, 0, 2497 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 2498 if (mrsas_get_pd_list(sc) != SUCCESS) { 2499 device_printf(sc->mrsas_dev, "Get PD list failed.\n"); 2500 return (1); 2501 } 2502 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); 2503 if (mrsas_get_ld_list(sc) != SUCCESS) { 2504 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n"); 2505 return (1); 2506 } 2507 2508 if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) { 2509 sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) * 2510 MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT); 2511 if (!sc->streamDetectByLD) { 2512 device_printf(sc->mrsas_dev, 2513 "unable to allocate stream detection for pool of LDs\n"); 2514 return (1); 2515 } 2516 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 2517 sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT); 2518 if (!sc->streamDetectByLD[i]) { 2519 device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n"); 2520 for (j = 0; j < i; ++j) 2521 free(sc->streamDetectByLD[j], M_MRSAS); 2522 free(sc->streamDetectByLD, M_MRSAS); 2523 sc->streamDetectByLD = NULL; 2524 return (1); 2525 } 2526 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT)); 2527 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP; 2528 } 2529 } 2530 2531 /* 2532 * Compute the max allowed sectors per IO: The controller info has 2533 * two limits on max sectors. Driver should use the minimum of these 2534 * two. 2535 * 2536 * 1 << stripe_sz_ops.min = max sectors per strip 2537 * 2538 * Note that older firmwares ( < FW ver 30) didn't report information to 2539 * calculate max_sectors_1. So the number ended up as zero always. 2540 */ 2541 tmp_sectors = 0; 2542 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) * 2543 sc->ctrl_info->max_strips_per_io; 2544 max_sectors_2 = sc->ctrl_info->max_request_size; 2545 tmp_sectors = min(max_sectors_1, max_sectors_2); 2546 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512; 2547 2548 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) 2549 sc->max_sectors_per_req = tmp_sectors; 2550 2551 sc->disableOnlineCtrlReset = 2552 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 2553 sc->UnevenSpanSupport = 2554 sc->ctrl_info->adapterOperations2.supportUnevenSpans; 2555 if (sc->UnevenSpanSupport) { 2556 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n", 2557 sc->UnevenSpanSupport); 2558 2559 if (MR_ValidateMapInfo(sc)) 2560 sc->fast_path_io = 1; 2561 else 2562 sc->fast_path_io = 0; 2563 } 2564 2565 device_printf(sc->mrsas_dev, "max_fw_cmds: %u max_scsi_cmds: %u\n", 2566 sc->max_fw_cmds, sc->max_scsi_cmds); 2567 return (0); 2568 } 2569 2570 /* 2571 * mrsas_init_adapter: Initializes the adapter/controller 2572 * input: Adapter soft state 2573 * 2574 * Prepares for the issuing of the IOC Init cmd to FW for initializing the 2575 * ROC/controller. The FW register is read to determined the number of 2576 * commands that is supported. All memory allocations for IO is based on 2577 * max_cmd. Appropriate calculations are performed in this function. 2578 */ 2579 int 2580 mrsas_init_adapter(struct mrsas_softc *sc) 2581 { 2582 uint32_t status; 2583 u_int32_t scratch_pad_2; 2584 int ret; 2585 int i = 0; 2586 2587 /* Read FW status register */ 2588 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2589 2590 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; 2591 2592 /* Decrement the max supported by 1, to correlate with FW */ 2593 sc->max_fw_cmds = sc->max_fw_cmds - 1; 2594 sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS; 2595 2596 /* Determine allocation size of command frames */ 2597 sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2; 2598 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds; 2599 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); 2600 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + 2601 (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1)); 2602 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2603 outbound_scratch_pad_2)); 2604 /* 2605 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, 2606 * Firmware support extended IO chain frame which is 4 time more 2607 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) = 2608 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K 2609 */ 2610 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) 2611 sc->max_chain_frame_sz = 2612 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) 2613 * MEGASAS_1MB_IO; 2614 else 2615 sc->max_chain_frame_sz = 2616 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) 2617 * MEGASAS_256K_IO; 2618 2619 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds; 2620 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2621 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16; 2622 2623 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION); 2624 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; 2625 2626 mrsas_dprint(sc, MRSAS_INFO, 2627 "max sge: 0x%x, max chain frame size: 0x%x, " 2628 "max fw cmd: 0x%x\n", sc->max_num_sge, 2629 sc->max_chain_frame_sz, sc->max_fw_cmds); 2630 2631 /* Used for pass thru MFI frame (DCMD) */ 2632 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16; 2633 2634 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2635 sizeof(MPI2_SGE_IO_UNION)) / 16; 2636 2637 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2638 2639 for (i = 0; i < count; i++) 2640 sc->last_reply_idx[i] = 0; 2641 2642 ret = mrsas_alloc_mem(sc); 2643 if (ret != SUCCESS) 2644 return (ret); 2645 2646 ret = mrsas_alloc_mpt_cmds(sc); 2647 if (ret != SUCCESS) 2648 return (ret); 2649 2650 ret = mrsas_ioc_init(sc); 2651 if (ret != SUCCESS) 2652 return (ret); 2653 2654 return (0); 2655 } 2656 2657 /* 2658 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command 2659 * input: Adapter soft state 2660 * 2661 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. 2662 */ 2663 int 2664 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) 2665 { 2666 int ioc_init_size; 2667 2668 /* Allocate IOC INIT command */ 2669 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); 2670 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2671 1, 0, 2672 BUS_SPACE_MAXADDR_32BIT, 2673 BUS_SPACE_MAXADDR, 2674 NULL, NULL, 2675 ioc_init_size, 2676 1, 2677 ioc_init_size, 2678 BUS_DMA_ALLOCNOW, 2679 NULL, NULL, 2680 &sc->ioc_init_tag)) { 2681 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); 2682 return (ENOMEM); 2683 } 2684 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, 2685 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { 2686 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); 2687 return (ENOMEM); 2688 } 2689 bzero(sc->ioc_init_mem, ioc_init_size); 2690 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, 2691 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, 2692 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { 2693 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); 2694 return (ENOMEM); 2695 } 2696 return (0); 2697 } 2698 2699 /* 2700 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command 2701 * input: Adapter soft state 2702 * 2703 * Deallocates memory of the IOC Init cmd. 2704 */ 2705 void 2706 mrsas_free_ioc_cmd(struct mrsas_softc *sc) 2707 { 2708 if (sc->ioc_init_phys_mem) 2709 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); 2710 if (sc->ioc_init_mem != NULL) 2711 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); 2712 if (sc->ioc_init_tag != NULL) 2713 bus_dma_tag_destroy(sc->ioc_init_tag); 2714 } 2715 2716 /* 2717 * mrsas_ioc_init: Sends IOC Init command to FW 2718 * input: Adapter soft state 2719 * 2720 * Issues the IOC Init cmd to FW to initialize the ROC/controller. 2721 */ 2722 int 2723 mrsas_ioc_init(struct mrsas_softc *sc) 2724 { 2725 struct mrsas_init_frame *init_frame; 2726 pMpi2IOCInitRequest_t IOCInitMsg; 2727 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; 2728 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 2729 bus_addr_t phys_addr; 2730 int i, retcode = 0; 2731 u_int32_t scratch_pad_2; 2732 2733 /* Allocate memory for the IOC INIT command */ 2734 if (mrsas_alloc_ioc_cmd(sc)) { 2735 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); 2736 return (1); 2737 } 2738 2739 if (!sc->block_sync_cache) { 2740 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2741 outbound_scratch_pad_2)); 2742 sc->fw_sync_cache_support = (scratch_pad_2 & 2743 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; 2744 } 2745 2746 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024); 2747 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; 2748 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 2749 IOCInitMsg->MsgVersion = MPI2_VERSION; 2750 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION; 2751 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; 2752 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth; 2753 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr; 2754 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr; 2755 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0); 2756 IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; 2757 2758 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; 2759 init_frame->cmd = MFI_CMD_INIT; 2760 init_frame->cmd_status = 0xFF; 2761 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2762 2763 /* driver support Extended MSIX */ 2764 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 2765 init_frame->driver_operations. 2766 mfi_capabilities.support_additional_msix = 1; 2767 } 2768 if (sc->verbuf_mem) { 2769 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n", 2770 MRSAS_VERSION); 2771 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; 2772 init_frame->driver_ver_hi = 0; 2773 } 2774 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1; 2775 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1; 2776 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1; 2777 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) 2778 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1; 2779 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; 2780 init_frame->queue_info_new_phys_addr_lo = phys_addr; 2781 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t); 2782 2783 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem; 2784 req_desc.MFAIo.RequestFlags = 2785 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2786 2787 mrsas_disable_intr(sc); 2788 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); 2789 mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high); 2790 2791 /* 2792 * Poll response timer to wait for Firmware response. While this 2793 * timer with the DELAY call could block CPU, the time interval for 2794 * this is only 1 millisecond. 2795 */ 2796 if (init_frame->cmd_status == 0xFF) { 2797 for (i = 0; i < (max_wait * 1000); i++) { 2798 if (init_frame->cmd_status == 0xFF) 2799 DELAY(1000); 2800 else 2801 break; 2802 } 2803 } 2804 if (init_frame->cmd_status == 0) 2805 mrsas_dprint(sc, MRSAS_OCR, 2806 "IOC INIT response received from FW.\n"); 2807 else { 2808 if (init_frame->cmd_status == 0xFF) 2809 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); 2810 else 2811 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); 2812 retcode = 1; 2813 } 2814 2815 if (sc->is_aero) { 2816 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2817 outbound_scratch_pad_2)); 2818 sc->atomic_desc_support = (scratch_pad_2 & 2819 MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0; 2820 device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n", 2821 sc->atomic_desc_support ? "Yes" : "No"); 2822 } 2823 2824 mrsas_free_ioc_cmd(sc); 2825 return (retcode); 2826 } 2827 2828 /* 2829 * mrsas_alloc_mpt_cmds: Allocates the command packets 2830 * input: Adapter instance soft state 2831 * 2832 * This function allocates the internal commands for IOs. Each command that is 2833 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An 2834 * array is allocated with mrsas_mpt_cmd context. The free commands are 2835 * maintained in a linked list (cmd pool). SMID value range is from 1 to 2836 * max_fw_cmds. 2837 */ 2838 int 2839 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) 2840 { 2841 int i, j; 2842 u_int32_t max_fw_cmds, count; 2843 struct mrsas_mpt_cmd *cmd; 2844 pMpi2ReplyDescriptorsUnion_t reply_desc; 2845 u_int32_t offset, chain_offset, sense_offset; 2846 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; 2847 u_int8_t *io_req_base, *chain_frame_base, *sense_base; 2848 2849 max_fw_cmds = sc->max_fw_cmds; 2850 2851 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); 2852 if (!sc->req_desc) { 2853 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); 2854 return (ENOMEM); 2855 } 2856 memset(sc->req_desc, 0, sc->request_alloc_sz); 2857 2858 /* 2859 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. 2860 * Allocate the dynamic array first and then allocate individual 2861 * commands. 2862 */ 2863 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds, 2864 M_MRSAS, M_NOWAIT); 2865 if (!sc->mpt_cmd_list) { 2866 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); 2867 return (ENOMEM); 2868 } 2869 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds); 2870 for (i = 0; i < max_fw_cmds; i++) { 2871 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd), 2872 M_MRSAS, M_NOWAIT); 2873 if (!sc->mpt_cmd_list[i]) { 2874 for (j = 0; j < i; j++) 2875 free(sc->mpt_cmd_list[j], M_MRSAS); 2876 free(sc->mpt_cmd_list, M_MRSAS); 2877 sc->mpt_cmd_list = NULL; 2878 return (ENOMEM); 2879 } 2880 } 2881 2882 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2883 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2884 chain_frame_base = (u_int8_t *)sc->chain_frame_mem; 2885 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; 2886 sense_base = (u_int8_t *)sc->sense_mem; 2887 sense_base_phys = (bus_addr_t)sc->sense_phys_addr; 2888 for (i = 0; i < max_fw_cmds; i++) { 2889 cmd = sc->mpt_cmd_list[i]; 2890 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 2891 chain_offset = sc->max_chain_frame_sz * i; 2892 sense_offset = MRSAS_SENSE_LEN * i; 2893 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); 2894 cmd->index = i + 1; 2895 cmd->ccb_ptr = NULL; 2896 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 2897 callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0); 2898 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 2899 cmd->sc = sc; 2900 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); 2901 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); 2902 cmd->io_request_phys_addr = io_req_base_phys + offset; 2903 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); 2904 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; 2905 cmd->sense = sense_base + sense_offset; 2906 cmd->sense_phys_addr = sense_base_phys + sense_offset; 2907 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { 2908 return (FAIL); 2909 } 2910 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 2911 } 2912 2913 /* Initialize reply descriptor array to 0xFFFFFFFF */ 2914 reply_desc = sc->reply_desc_mem; 2915 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2916 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) { 2917 reply_desc->Words = MRSAS_ULONG_MAX; 2918 } 2919 return (0); 2920 } 2921 2922 /* 2923 * mrsas_write_64bit_req_dsc: Writes 64 bit request descriptor to FW 2924 * input: Adapter softstate 2925 * request descriptor address low 2926 * request descriptor address high 2927 */ 2928 void 2929 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2930 u_int32_t req_desc_hi) 2931 { 2932 mtx_lock(&sc->pci_lock); 2933 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), 2934 req_desc_lo); 2935 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), 2936 req_desc_hi); 2937 mtx_unlock(&sc->pci_lock); 2938 } 2939 2940 /* 2941 * mrsas_fire_cmd: Sends command to FW 2942 * input: Adapter softstate 2943 * request descriptor address low 2944 * request descriptor address high 2945 * 2946 * This functions fires the command to Firmware by writing to the 2947 * inbound_low_queue_port and inbound_high_queue_port. 2948 */ 2949 void 2950 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2951 u_int32_t req_desc_hi) 2952 { 2953 if (sc->atomic_desc_support) 2954 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port), 2955 req_desc_lo); 2956 else 2957 mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi); 2958 } 2959 2960 /* 2961 * mrsas_transition_to_ready: Move FW to Ready state input: 2962 * Adapter instance soft state 2963 * 2964 * During the initialization, FW passes can potentially be in any one of several 2965 * possible states. If the FW in operational, waiting-for-handshake states, 2966 * driver must take steps to bring it to ready state. Otherwise, it has to 2967 * wait for the ready state. 2968 */ 2969 int 2970 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) 2971 { 2972 int i; 2973 u_int8_t max_wait; 2974 u_int32_t val, fw_state; 2975 u_int32_t cur_state; 2976 u_int32_t abs_state, curr_abs_state; 2977 2978 val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2979 fw_state = val & MFI_STATE_MASK; 2980 max_wait = MRSAS_RESET_WAIT_TIME; 2981 2982 if (fw_state != MFI_STATE_READY) 2983 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); 2984 2985 while (fw_state != MFI_STATE_READY) { 2986 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2987 switch (fw_state) { 2988 case MFI_STATE_FAULT: 2989 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); 2990 if (ocr) { 2991 cur_state = MFI_STATE_FAULT; 2992 break; 2993 } else 2994 return -ENODEV; 2995 case MFI_STATE_WAIT_HANDSHAKE: 2996 /* Set the CLR bit in inbound doorbell */ 2997 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2998 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG); 2999 cur_state = MFI_STATE_WAIT_HANDSHAKE; 3000 break; 3001 case MFI_STATE_BOOT_MESSAGE_PENDING: 3002 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 3003 MFI_INIT_HOTPLUG); 3004 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 3005 break; 3006 case MFI_STATE_OPERATIONAL: 3007 /* 3008 * Bring it to READY state; assuming max wait 10 3009 * secs 3010 */ 3011 mrsas_disable_intr(sc); 3012 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); 3013 for (i = 0; i < max_wait * 1000; i++) { 3014 if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1) 3015 DELAY(1000); 3016 else 3017 break; 3018 } 3019 cur_state = MFI_STATE_OPERATIONAL; 3020 break; 3021 case MFI_STATE_UNDEFINED: 3022 /* 3023 * This state should not last for more than 2 3024 * seconds 3025 */ 3026 cur_state = MFI_STATE_UNDEFINED; 3027 break; 3028 case MFI_STATE_BB_INIT: 3029 cur_state = MFI_STATE_BB_INIT; 3030 break; 3031 case MFI_STATE_FW_INIT: 3032 cur_state = MFI_STATE_FW_INIT; 3033 break; 3034 case MFI_STATE_FW_INIT_2: 3035 cur_state = MFI_STATE_FW_INIT_2; 3036 break; 3037 case MFI_STATE_DEVICE_SCAN: 3038 cur_state = MFI_STATE_DEVICE_SCAN; 3039 break; 3040 case MFI_STATE_FLUSH_CACHE: 3041 cur_state = MFI_STATE_FLUSH_CACHE; 3042 break; 3043 default: 3044 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); 3045 return -ENODEV; 3046 } 3047 3048 /* 3049 * The cur_state should not last for more than max_wait secs 3050 */ 3051 for (i = 0; i < (max_wait * 1000); i++) { 3052 fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3053 outbound_scratch_pad)) & MFI_STATE_MASK); 3054 curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3055 outbound_scratch_pad)); 3056 if (abs_state == curr_abs_state) 3057 DELAY(1000); 3058 else 3059 break; 3060 } 3061 3062 /* 3063 * Return error if fw_state hasn't changed after max_wait 3064 */ 3065 if (curr_abs_state == abs_state) { 3066 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " 3067 "in %d secs\n", fw_state, max_wait); 3068 return -ENODEV; 3069 } 3070 } 3071 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); 3072 return 0; 3073 } 3074 3075 /* 3076 * mrsas_get_mfi_cmd: Get a cmd from free command pool 3077 * input: Adapter soft state 3078 * 3079 * This function removes an MFI command from the command list. 3080 */ 3081 struct mrsas_mfi_cmd * 3082 mrsas_get_mfi_cmd(struct mrsas_softc *sc) 3083 { 3084 struct mrsas_mfi_cmd *cmd = NULL; 3085 3086 mtx_lock(&sc->mfi_cmd_pool_lock); 3087 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) { 3088 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); 3089 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); 3090 } 3091 mtx_unlock(&sc->mfi_cmd_pool_lock); 3092 3093 return cmd; 3094 } 3095 3096 /* 3097 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter. 3098 * input: Adapter Context. 3099 * 3100 * This function will check FW status register and flag do_timeout_reset flag. 3101 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has 3102 * trigger reset. 3103 */ 3104 static void 3105 mrsas_ocr_thread(void *arg) 3106 { 3107 struct mrsas_softc *sc; 3108 u_int32_t fw_status, fw_state; 3109 u_int8_t tm_target_reset_failed = 0; 3110 3111 sc = (struct mrsas_softc *)arg; 3112 3113 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); 3114 3115 sc->ocr_thread_active = 1; 3116 mtx_lock(&sc->sim_lock); 3117 for (;;) { 3118 /* Sleep for 1 second and check the queue status */ 3119 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 3120 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); 3121 if (sc->remove_in_progress || 3122 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 3123 mrsas_dprint(sc, MRSAS_OCR, 3124 "Exit due to %s from %s\n", 3125 sc->remove_in_progress ? "Shutdown" : 3126 "Hardware critical error", __func__); 3127 break; 3128 } 3129 fw_status = mrsas_read_reg_with_retries(sc, 3130 offsetof(mrsas_reg_set, outbound_scratch_pad)); 3131 fw_state = fw_status & MFI_STATE_MASK; 3132 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset || 3133 mrsas_atomic_read(&sc->target_reset_outstanding)) { 3134 3135 /* First, freeze further IOs to come to the SIM */ 3136 mrsas_xpt_freeze(sc); 3137 3138 /* If this is an IO timeout then go for target reset */ 3139 if (mrsas_atomic_read(&sc->target_reset_outstanding)) { 3140 device_printf(sc->mrsas_dev, "Initiating Target RESET " 3141 "because of SCSI IO timeout!\n"); 3142 3143 /* Let the remaining IOs to complete */ 3144 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 3145 "mrsas_reset_targets", 5 * hz); 3146 3147 /* Try to reset the target device */ 3148 if (mrsas_reset_targets(sc) == FAIL) 3149 tm_target_reset_failed = 1; 3150 } 3151 3152 /* If this is a DCMD timeout or FW fault, 3153 * then go for controller reset 3154 */ 3155 if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed || 3156 (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) { 3157 if (tm_target_reset_failed) 3158 device_printf(sc->mrsas_dev, "Initiaiting OCR because of " 3159 "TM FAILURE!\n"); 3160 else 3161 device_printf(sc->mrsas_dev, "Initiaiting OCR " 3162 "because of %s!\n", sc->do_timedout_reset ? 3163 "DCMD IO Timeout" : "FW fault"); 3164 3165 mtx_lock_spin(&sc->ioctl_lock); 3166 sc->reset_in_progress = 1; 3167 mtx_unlock_spin(&sc->ioctl_lock); 3168 sc->reset_count++; 3169 3170 /* 3171 * Wait for the AEN task to be completed if it is running. 3172 */ 3173 mtx_unlock(&sc->sim_lock); 3174 taskqueue_drain(sc->ev_tq, &sc->ev_task); 3175 mtx_lock(&sc->sim_lock); 3176 3177 taskqueue_block(sc->ev_tq); 3178 /* Try to reset the controller */ 3179 mrsas_reset_ctrl(sc, sc->do_timedout_reset); 3180 3181 sc->do_timedout_reset = 0; 3182 sc->reset_in_progress = 0; 3183 tm_target_reset_failed = 0; 3184 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 3185 memset(sc->target_reset_pool, 0, 3186 sizeof(sc->target_reset_pool)); 3187 taskqueue_unblock(sc->ev_tq); 3188 } 3189 3190 /* Now allow IOs to come to the SIM */ 3191 mrsas_xpt_release(sc); 3192 } 3193 } 3194 mtx_unlock(&sc->sim_lock); 3195 sc->ocr_thread_active = 0; 3196 mrsas_kproc_exit(0); 3197 } 3198 3199 /* 3200 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR. 3201 * input: Adapter Context. 3202 * 3203 * This function will clear reply descriptor so that post OCR driver and FW will 3204 * lost old history. 3205 */ 3206 void 3207 mrsas_reset_reply_desc(struct mrsas_softc *sc) 3208 { 3209 int i, count; 3210 pMpi2ReplyDescriptorsUnion_t reply_desc; 3211 3212 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3213 for (i = 0; i < count; i++) 3214 sc->last_reply_idx[i] = 0; 3215 3216 reply_desc = sc->reply_desc_mem; 3217 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { 3218 reply_desc->Words = MRSAS_ULONG_MAX; 3219 } 3220 } 3221 3222 /* 3223 * mrsas_reset_ctrl: Core function to OCR/Kill adapter. 3224 * input: Adapter Context. 3225 * 3226 * This function will run from thread context so that it can sleep. 1. Do not 3227 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command 3228 * to complete for 180 seconds. 3. If #2 does not find any outstanding 3229 * command Controller is in working state, so skip OCR. Otherwise, do 3230 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the 3231 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post 3232 * OCR, Re-fire Management command and move Controller to Operation state. 3233 */ 3234 int 3235 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason) 3236 { 3237 int retval = SUCCESS, i, j, retry = 0; 3238 u_int32_t host_diag, abs_state, status_reg, reset_adapter; 3239 union ccb *ccb; 3240 struct mrsas_mfi_cmd *mfi_cmd; 3241 struct mrsas_mpt_cmd *mpt_cmd; 3242 union mrsas_evt_class_locale class_locale; 3243 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3244 3245 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 3246 device_printf(sc->mrsas_dev, 3247 "mrsas: Hardware critical error, returning FAIL.\n"); 3248 return FAIL; 3249 } 3250 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3251 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; 3252 mrsas_disable_intr(sc); 3253 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr", 3254 sc->mrsas_fw_fault_check_delay * hz); 3255 3256 /* First try waiting for commands to complete */ 3257 if (mrsas_wait_for_outstanding(sc, reset_reason)) { 3258 mrsas_dprint(sc, MRSAS_OCR, 3259 "resetting adapter from %s.\n", 3260 __func__); 3261 /* Now return commands back to the CAM layer */ 3262 mtx_unlock(&sc->sim_lock); 3263 for (i = 0; i < sc->max_fw_cmds; i++) { 3264 mpt_cmd = sc->mpt_cmd_list[i]; 3265 3266 if (mpt_cmd->peer_cmd) { 3267 mrsas_dprint(sc, MRSAS_OCR, 3268 "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n", 3269 i, mpt_cmd, mpt_cmd->peer_cmd); 3270 } 3271 3272 if (mpt_cmd->ccb_ptr) { 3273 if (mpt_cmd->callout_owner) { 3274 ccb = (union ccb *)(mpt_cmd->ccb_ptr); 3275 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3276 mrsas_cmd_done(sc, mpt_cmd); 3277 } else { 3278 mpt_cmd->ccb_ptr = NULL; 3279 mrsas_release_mpt_cmd(mpt_cmd); 3280 } 3281 } 3282 } 3283 3284 mrsas_atomic_set(&sc->fw_outstanding, 0); 3285 3286 mtx_lock(&sc->sim_lock); 3287 3288 status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3289 outbound_scratch_pad)); 3290 abs_state = status_reg & MFI_STATE_MASK; 3291 reset_adapter = status_reg & MFI_RESET_ADAPTER; 3292 if (sc->disableOnlineCtrlReset || 3293 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 3294 /* Reset not supported, kill adapter */ 3295 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n"); 3296 mrsas_kill_hba(sc); 3297 retval = FAIL; 3298 goto out; 3299 } 3300 /* Now try to reset the chip */ 3301 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { 3302 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3303 MPI2_WRSEQ_FLUSH_KEY_VALUE); 3304 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3305 MPI2_WRSEQ_1ST_KEY_VALUE); 3306 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3307 MPI2_WRSEQ_2ND_KEY_VALUE); 3308 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3309 MPI2_WRSEQ_3RD_KEY_VALUE); 3310 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3311 MPI2_WRSEQ_4TH_KEY_VALUE); 3312 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3313 MPI2_WRSEQ_5TH_KEY_VALUE); 3314 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3315 MPI2_WRSEQ_6TH_KEY_VALUE); 3316 3317 /* Check that the diag write enable (DRWE) bit is on */ 3318 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3319 fusion_host_diag)); 3320 retry = 0; 3321 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 3322 DELAY(100 * 1000); 3323 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3324 fusion_host_diag)); 3325 if (retry++ == 100) { 3326 mrsas_dprint(sc, MRSAS_OCR, 3327 "Host diag unlock failed!\n"); 3328 break; 3329 } 3330 } 3331 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 3332 continue; 3333 3334 /* Send chip reset command */ 3335 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), 3336 host_diag | HOST_DIAG_RESET_ADAPTER); 3337 DELAY(3000 * 1000); 3338 3339 /* Make sure reset adapter bit is cleared */ 3340 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3341 fusion_host_diag)); 3342 retry = 0; 3343 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 3344 DELAY(100 * 1000); 3345 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3346 fusion_host_diag)); 3347 if (retry++ == 1000) { 3348 mrsas_dprint(sc, MRSAS_OCR, 3349 "Diag reset adapter never cleared!\n"); 3350 break; 3351 } 3352 } 3353 if (host_diag & HOST_DIAG_RESET_ADAPTER) 3354 continue; 3355 3356 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3357 outbound_scratch_pad)) & MFI_STATE_MASK; 3358 retry = 0; 3359 3360 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 3361 DELAY(100 * 1000); 3362 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3363 outbound_scratch_pad)) & MFI_STATE_MASK; 3364 } 3365 if (abs_state <= MFI_STATE_FW_INIT) { 3366 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," 3367 " state = 0x%x\n", abs_state); 3368 continue; 3369 } 3370 /* Wait for FW to become ready */ 3371 if (mrsas_transition_to_ready(sc, 1)) { 3372 mrsas_dprint(sc, MRSAS_OCR, 3373 "mrsas: Failed to transition controller to ready.\n"); 3374 continue; 3375 } 3376 mrsas_reset_reply_desc(sc); 3377 if (mrsas_ioc_init(sc)) { 3378 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); 3379 continue; 3380 } 3381 for (j = 0; j < sc->max_fw_cmds; j++) { 3382 mpt_cmd = sc->mpt_cmd_list[j]; 3383 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 3384 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; 3385 /* If not an IOCTL then release the command else re-fire */ 3386 if (!mfi_cmd->sync_cmd) { 3387 mrsas_release_mfi_cmd(mfi_cmd); 3388 } else { 3389 req_desc = mrsas_get_request_desc(sc, 3390 mfi_cmd->cmd_id.context.smid - 1); 3391 mrsas_dprint(sc, MRSAS_OCR, 3392 "Re-fire command DCMD opcode 0x%x index %d\n ", 3393 mfi_cmd->frame->dcmd.opcode, j); 3394 if (!req_desc) 3395 device_printf(sc->mrsas_dev, 3396 "Cannot build MPT cmd.\n"); 3397 else 3398 mrsas_fire_cmd(sc, req_desc->addr.u.low, 3399 req_desc->addr.u.high); 3400 } 3401 } 3402 } 3403 3404 /* Reset load balance info */ 3405 memset(sc->load_balance_info, 0, 3406 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); 3407 3408 if (mrsas_get_ctrl_info(sc)) { 3409 mrsas_kill_hba(sc); 3410 retval = FAIL; 3411 goto out; 3412 } 3413 if (!mrsas_get_map_info(sc)) 3414 mrsas_sync_map_info(sc); 3415 3416 megasas_setup_jbod_map(sc); 3417 3418 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) { 3419 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { 3420 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT)); 3421 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP; 3422 } 3423 } 3424 3425 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3426 mrsas_enable_intr(sc); 3427 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 3428 3429 /* Register AEN with FW for last sequence number */ 3430 class_locale.members.reserved = 0; 3431 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3432 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3433 3434 mtx_unlock(&sc->sim_lock); 3435 if (mrsas_register_aen(sc, sc->last_seq_num, 3436 class_locale.word)) { 3437 device_printf(sc->mrsas_dev, 3438 "ERROR: AEN registration FAILED from OCR !!! " 3439 "Further events from the controller cannot be notified." 3440 "Either there is some problem in the controller" 3441 "or the controller does not support AEN.\n" 3442 "Please contact to the SUPPORT TEAM if the problem persists\n"); 3443 } 3444 mtx_lock(&sc->sim_lock); 3445 3446 /* Adapter reset completed successfully */ 3447 device_printf(sc->mrsas_dev, "Reset successful\n"); 3448 retval = SUCCESS; 3449 goto out; 3450 } 3451 /* Reset failed, kill the adapter */ 3452 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); 3453 mrsas_kill_hba(sc); 3454 retval = FAIL; 3455 } else { 3456 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3457 mrsas_enable_intr(sc); 3458 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 3459 } 3460 out: 3461 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3462 mrsas_dprint(sc, MRSAS_OCR, 3463 "Reset Exit with %d.\n", retval); 3464 return retval; 3465 } 3466 3467 /* 3468 * mrsas_kill_hba: Kill HBA when OCR is not supported 3469 * input: Adapter Context. 3470 * 3471 * This function will kill HBA when OCR is not supported. 3472 */ 3473 void 3474 mrsas_kill_hba(struct mrsas_softc *sc) 3475 { 3476 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; 3477 DELAY(1000 * 1000); 3478 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); 3479 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 3480 MFI_STOP_ADP); 3481 /* Flush */ 3482 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); 3483 mrsas_complete_outstanding_ioctls(sc); 3484 } 3485 3486 /** 3487 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba 3488 * input: Controller softc 3489 * 3490 * Returns void 3491 */ 3492 void 3493 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc) 3494 { 3495 int i; 3496 struct mrsas_mpt_cmd *cmd_mpt; 3497 struct mrsas_mfi_cmd *cmd_mfi; 3498 u_int32_t count, MSIxIndex; 3499 3500 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3501 for (i = 0; i < sc->max_fw_cmds; i++) { 3502 cmd_mpt = sc->mpt_cmd_list[i]; 3503 3504 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 3505 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 3506 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { 3507 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3508 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, 3509 cmd_mpt->io_request->RaidContext.raid_context.status); 3510 } 3511 } 3512 } 3513 } 3514 3515 /* 3516 * mrsas_wait_for_outstanding: Wait for outstanding commands 3517 * input: Adapter Context. 3518 * 3519 * This function will wait for 180 seconds for outstanding commands to be 3520 * completed. 3521 */ 3522 int 3523 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason) 3524 { 3525 int i, outstanding, retval = 0; 3526 u_int32_t fw_state, count, MSIxIndex; 3527 3528 3529 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { 3530 if (sc->remove_in_progress) { 3531 mrsas_dprint(sc, MRSAS_OCR, 3532 "Driver remove or shutdown called.\n"); 3533 retval = 1; 3534 goto out; 3535 } 3536 /* Check if firmware is in fault state */ 3537 fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3538 outbound_scratch_pad)) & MFI_STATE_MASK; 3539 if (fw_state == MFI_STATE_FAULT) { 3540 mrsas_dprint(sc, MRSAS_OCR, 3541 "Found FW in FAULT state, will reset adapter.\n"); 3542 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3543 mtx_unlock(&sc->sim_lock); 3544 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3545 mrsas_complete_cmd(sc, MSIxIndex); 3546 mtx_lock(&sc->sim_lock); 3547 retval = 1; 3548 goto out; 3549 } 3550 if (check_reason == MFI_DCMD_TIMEOUT_OCR) { 3551 mrsas_dprint(sc, MRSAS_OCR, 3552 "DCMD IO TIMEOUT detected, will reset adapter.\n"); 3553 retval = 1; 3554 goto out; 3555 } 3556 outstanding = mrsas_atomic_read(&sc->fw_outstanding); 3557 if (!outstanding) 3558 goto out; 3559 3560 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 3561 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " 3562 "commands to complete\n", i, outstanding); 3563 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3564 mtx_unlock(&sc->sim_lock); 3565 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3566 mrsas_complete_cmd(sc, MSIxIndex); 3567 mtx_lock(&sc->sim_lock); 3568 } 3569 DELAY(1000 * 1000); 3570 } 3571 3572 if (mrsas_atomic_read(&sc->fw_outstanding)) { 3573 mrsas_dprint(sc, MRSAS_OCR, 3574 " pending commands remain after waiting," 3575 " will reset adapter.\n"); 3576 retval = 1; 3577 } 3578 out: 3579 return retval; 3580 } 3581 3582 /* 3583 * mrsas_release_mfi_cmd: Return a cmd to free command pool 3584 * input: Command packet for return to free cmd pool 3585 * 3586 * This function returns the MFI & MPT command to the command list. 3587 */ 3588 void 3589 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi) 3590 { 3591 struct mrsas_softc *sc = cmd_mfi->sc; 3592 struct mrsas_mpt_cmd *cmd_mpt; 3593 3594 3595 mtx_lock(&sc->mfi_cmd_pool_lock); 3596 /* 3597 * Release the mpt command (if at all it is allocated 3598 * associated with the mfi command 3599 */ 3600 if (cmd_mfi->cmd_id.context.smid) { 3601 mtx_lock(&sc->mpt_cmd_pool_lock); 3602 /* Get the mpt cmd from mfi cmd frame's smid value */ 3603 cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1]; 3604 cmd_mpt->flags = 0; 3605 cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 3606 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next); 3607 mtx_unlock(&sc->mpt_cmd_pool_lock); 3608 } 3609 /* Release the mfi command */ 3610 cmd_mfi->ccb_ptr = NULL; 3611 cmd_mfi->cmd_id.frame_count = 0; 3612 TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next); 3613 mtx_unlock(&sc->mfi_cmd_pool_lock); 3614 3615 return; 3616 } 3617 3618 /* 3619 * mrsas_get_controller_info: Returns FW's controller structure 3620 * input: Adapter soft state 3621 * Controller information structure 3622 * 3623 * Issues an internal command (DCMD) to get the FW's controller structure. This 3624 * information is mainly used to find out the maximum IO transfer per command 3625 * supported by the FW. 3626 */ 3627 static int 3628 mrsas_get_ctrl_info(struct mrsas_softc *sc) 3629 { 3630 int retcode = 0; 3631 u_int8_t do_ocr = 1; 3632 struct mrsas_mfi_cmd *cmd; 3633 struct mrsas_dcmd_frame *dcmd; 3634 3635 cmd = mrsas_get_mfi_cmd(sc); 3636 3637 if (!cmd) { 3638 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 3639 return -ENOMEM; 3640 } 3641 dcmd = &cmd->frame->dcmd; 3642 3643 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { 3644 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); 3645 mrsas_release_mfi_cmd(cmd); 3646 return -ENOMEM; 3647 } 3648 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3649 3650 dcmd->cmd = MFI_CMD_DCMD; 3651 dcmd->cmd_status = 0xFF; 3652 dcmd->sge_count = 1; 3653 dcmd->flags = MFI_FRAME_DIR_READ; 3654 dcmd->timeout = 0; 3655 dcmd->pad_0 = 0; 3656 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info); 3657 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 3658 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr; 3659 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info); 3660 3661 if (!sc->mask_interrupts) 3662 retcode = mrsas_issue_blocked_cmd(sc, cmd); 3663 else 3664 retcode = mrsas_issue_polled(sc, cmd); 3665 3666 if (retcode == ETIMEDOUT) 3667 goto dcmd_timeout; 3668 else 3669 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); 3670 3671 do_ocr = 0; 3672 mrsas_update_ext_vd_details(sc); 3673 3674 sc->use_seqnum_jbod_fp = 3675 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP; 3676 sc->support_morethan256jbod = 3677 sc->ctrl_info->adapterOperations4.supportPdMapTargetId; 3678 3679 sc->disableOnlineCtrlReset = 3680 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 3681 3682 dcmd_timeout: 3683 mrsas_free_ctlr_info_cmd(sc); 3684 3685 if (do_ocr) 3686 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 3687 3688 if (!sc->mask_interrupts) 3689 mrsas_release_mfi_cmd(cmd); 3690 3691 return (retcode); 3692 } 3693 3694 /* 3695 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD 3696 * input: 3697 * sc - Controller's softc 3698 */ 3699 static void 3700 mrsas_update_ext_vd_details(struct mrsas_softc *sc) 3701 { 3702 u_int32_t ventura_map_sz = 0; 3703 sc->max256vdSupport = 3704 sc->ctrl_info->adapterOperations3.supportMaxExtLDs; 3705 3706 /* Below is additional check to address future FW enhancement */ 3707 if (sc->ctrl_info->max_lds > 64) 3708 sc->max256vdSupport = 1; 3709 3710 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS 3711 * MRSAS_MAX_DEV_PER_CHANNEL; 3712 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS 3713 * MRSAS_MAX_DEV_PER_CHANNEL; 3714 if (sc->max256vdSupport) { 3715 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 3716 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3717 } else { 3718 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 3719 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3720 } 3721 3722 if (sc->maxRaidMapSize) { 3723 ventura_map_sz = sc->maxRaidMapSize * 3724 MR_MIN_MAP_SIZE; 3725 sc->current_map_sz = ventura_map_sz; 3726 sc->max_map_sz = ventura_map_sz; 3727 } else { 3728 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + 3729 (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1)); 3730 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); 3731 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); 3732 if (sc->max256vdSupport) 3733 sc->current_map_sz = sc->new_map_sz; 3734 else 3735 sc->current_map_sz = sc->old_map_sz; 3736 } 3737 3738 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL); 3739 #if VD_EXT_DEBUG 3740 device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n", 3741 sc->maxRaidMapSize); 3742 device_printf(sc->mrsas_dev, 3743 "new_map_sz = 0x%x, old_map_sz = 0x%x, " 3744 "ventura_map_sz = 0x%x, current_map_sz = 0x%x " 3745 "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n", 3746 sc->new_map_sz, sc->old_map_sz, ventura_map_sz, 3747 sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL)); 3748 #endif 3749 } 3750 3751 /* 3752 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command 3753 * input: Adapter soft state 3754 * 3755 * Allocates DMAable memory for the controller info internal command. 3756 */ 3757 int 3758 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) 3759 { 3760 int ctlr_info_size; 3761 3762 /* Allocate get controller info command */ 3763 ctlr_info_size = sizeof(struct mrsas_ctrl_info); 3764 if (bus_dma_tag_create(sc->mrsas_parent_tag, 3765 1, 0, 3766 BUS_SPACE_MAXADDR_32BIT, 3767 BUS_SPACE_MAXADDR, 3768 NULL, NULL, 3769 ctlr_info_size, 3770 1, 3771 ctlr_info_size, 3772 BUS_DMA_ALLOCNOW, 3773 NULL, NULL, 3774 &sc->ctlr_info_tag)) { 3775 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); 3776 return (ENOMEM); 3777 } 3778 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, 3779 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { 3780 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); 3781 return (ENOMEM); 3782 } 3783 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, 3784 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, 3785 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { 3786 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); 3787 return (ENOMEM); 3788 } 3789 memset(sc->ctlr_info_mem, 0, ctlr_info_size); 3790 return (0); 3791 } 3792 3793 /* 3794 * mrsas_free_ctlr_info_cmd: Free memory for controller info command 3795 * input: Adapter soft state 3796 * 3797 * Deallocates memory of the get controller info cmd. 3798 */ 3799 void 3800 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) 3801 { 3802 if (sc->ctlr_info_phys_addr) 3803 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); 3804 if (sc->ctlr_info_mem != NULL) 3805 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); 3806 if (sc->ctlr_info_tag != NULL) 3807 bus_dma_tag_destroy(sc->ctlr_info_tag); 3808 } 3809 3810 /* 3811 * mrsas_issue_polled: Issues a polling command 3812 * inputs: Adapter soft state 3813 * Command packet to be issued 3814 * 3815 * This function is for posting of internal commands to Firmware. MFI requires 3816 * the cmd_status to be set to 0xFF before posting. The maximun wait time of 3817 * the poll response timer is 180 seconds. 3818 */ 3819 int 3820 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3821 { 3822 struct mrsas_header *frame_hdr = &cmd->frame->hdr; 3823 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3824 int i, retcode = SUCCESS; 3825 3826 frame_hdr->cmd_status = 0xFF; 3827 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3828 3829 /* Issue the frame using inbound queue port */ 3830 if (mrsas_issue_dcmd(sc, cmd)) { 3831 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3832 return (1); 3833 } 3834 /* 3835 * Poll response timer to wait for Firmware response. While this 3836 * timer with the DELAY call could block CPU, the time interval for 3837 * this is only 1 millisecond. 3838 */ 3839 if (frame_hdr->cmd_status == 0xFF) { 3840 for (i = 0; i < (max_wait * 1000); i++) { 3841 if (frame_hdr->cmd_status == 0xFF) 3842 DELAY(1000); 3843 else 3844 break; 3845 } 3846 } 3847 if (frame_hdr->cmd_status == 0xFF) { 3848 device_printf(sc->mrsas_dev, "DCMD timed out after %d " 3849 "seconds from %s\n", max_wait, __func__); 3850 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", 3851 cmd->frame->dcmd.opcode); 3852 retcode = ETIMEDOUT; 3853 } 3854 return (retcode); 3855 } 3856 3857 /* 3858 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd 3859 * input: Adapter soft state mfi cmd pointer 3860 * 3861 * This function is called by mrsas_issued_blocked_cmd() and 3862 * mrsas_issued_polled(), to build the MPT command and then fire the command 3863 * to Firmware. 3864 */ 3865 int 3866 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3867 { 3868 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3869 3870 req_desc = mrsas_build_mpt_cmd(sc, cmd); 3871 if (!req_desc) { 3872 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); 3873 return (1); 3874 } 3875 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 3876 3877 return (0); 3878 } 3879 3880 /* 3881 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd 3882 * input: Adapter soft state mfi cmd to build 3883 * 3884 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru 3885 * command and prepares the MPT command to send to Firmware. 3886 */ 3887 MRSAS_REQUEST_DESCRIPTOR_UNION * 3888 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3889 { 3890 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3891 u_int16_t index; 3892 3893 if (mrsas_build_mptmfi_passthru(sc, cmd)) { 3894 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); 3895 return NULL; 3896 } 3897 index = cmd->cmd_id.context.smid; 3898 3899 req_desc = mrsas_get_request_desc(sc, index - 1); 3900 if (!req_desc) 3901 return NULL; 3902 3903 req_desc->addr.Words = 0; 3904 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3905 3906 req_desc->SCSIIO.SMID = index; 3907 3908 return (req_desc); 3909 } 3910 3911 /* 3912 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command 3913 * input: Adapter soft state mfi cmd pointer 3914 * 3915 * The MPT command and the io_request are setup as a passthru command. The SGE 3916 * chain address is set to frame_phys_addr of the MFI command. 3917 */ 3918 u_int8_t 3919 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) 3920 { 3921 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 3922 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; 3923 struct mrsas_mpt_cmd *mpt_cmd; 3924 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; 3925 3926 mpt_cmd = mrsas_get_mpt_cmd(sc); 3927 if (!mpt_cmd) 3928 return (1); 3929 3930 /* Save the smid. To be used for returning the cmd */ 3931 mfi_cmd->cmd_id.context.smid = mpt_cmd->index; 3932 3933 mpt_cmd->sync_cmd_idx = mfi_cmd->index; 3934 3935 /* 3936 * For cmds where the flag is set, store the flag and check on 3937 * completion. For cmds with this flag, don't call 3938 * mrsas_complete_cmd. 3939 */ 3940 3941 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 3942 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3943 3944 io_req = mpt_cmd->io_request; 3945 3946 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 3947 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL; 3948 3949 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 3950 sgl_ptr_end->Flags = 0; 3951 } 3952 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain; 3953 3954 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 3955 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; 3956 io_req->ChainOffset = sc->chain_offset_mfi_pthru; 3957 3958 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; 3959 3960 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3961 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 3962 3963 mpi25_ieee_chain->Length = sc->max_chain_frame_sz; 3964 3965 return (0); 3966 } 3967 3968 /* 3969 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds 3970 * input: Adapter soft state Command to be issued 3971 * 3972 * This function waits on an event for the command to be returned from the ISR. 3973 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing 3974 * internal and ioctl commands. 3975 */ 3976 int 3977 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3978 { 3979 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3980 unsigned long total_time = 0; 3981 int retcode = SUCCESS; 3982 3983 /* Initialize cmd_status */ 3984 cmd->cmd_status = 0xFF; 3985 3986 /* Build MPT-MFI command for issue to FW */ 3987 if (mrsas_issue_dcmd(sc, cmd)) { 3988 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3989 return (1); 3990 } 3991 sc->chan = (void *)&cmd; 3992 3993 while (1) { 3994 if (cmd->cmd_status == 0xFF) { 3995 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 3996 } else 3997 break; 3998 3999 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL 4000 * command */ 4001 total_time++; 4002 if (total_time >= max_wait) { 4003 device_printf(sc->mrsas_dev, 4004 "Internal command timed out after %d seconds.\n", max_wait); 4005 retcode = 1; 4006 break; 4007 } 4008 } 4009 } 4010 4011 if (cmd->cmd_status == 0xFF) { 4012 device_printf(sc->mrsas_dev, "DCMD timed out after %d " 4013 "seconds from %s\n", max_wait, __func__); 4014 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", 4015 cmd->frame->dcmd.opcode); 4016 retcode = ETIMEDOUT; 4017 } 4018 return (retcode); 4019 } 4020 4021 /* 4022 * mrsas_complete_mptmfi_passthru: Completes a command 4023 * input: @sc: Adapter soft state 4024 * @cmd: Command to be completed 4025 * @status: cmd completion status 4026 * 4027 * This function is called from mrsas_complete_cmd() after an interrupt is 4028 * received from Firmware, and io_request->Function is 4029 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. 4030 */ 4031 void 4032 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, 4033 u_int8_t status) 4034 { 4035 struct mrsas_header *hdr = &cmd->frame->hdr; 4036 u_int8_t cmd_status = cmd->frame->hdr.cmd_status; 4037 4038 /* Reset the retry counter for future re-tries */ 4039 cmd->retry_for_fw_reset = 0; 4040 4041 if (cmd->ccb_ptr) 4042 cmd->ccb_ptr = NULL; 4043 4044 switch (hdr->cmd) { 4045 case MFI_CMD_INVALID: 4046 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); 4047 break; 4048 case MFI_CMD_PD_SCSI_IO: 4049 case MFI_CMD_LD_SCSI_IO: 4050 /* 4051 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 4052 * issued either through an IO path or an IOCTL path. If it 4053 * was via IOCTL, we will send it to internal completion. 4054 */ 4055 if (cmd->sync_cmd) { 4056 cmd->sync_cmd = 0; 4057 mrsas_wakeup(sc, cmd); 4058 break; 4059 } 4060 case MFI_CMD_SMP: 4061 case MFI_CMD_STP: 4062 case MFI_CMD_DCMD: 4063 /* Check for LD map update */ 4064 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && 4065 (cmd->frame->dcmd.mbox.b[1] == 1)) { 4066 sc->fast_path_io = 0; 4067 mtx_lock(&sc->raidmap_lock); 4068 sc->map_update_cmd = NULL; 4069 if (cmd_status != 0) { 4070 if (cmd_status != MFI_STAT_NOT_FOUND) 4071 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status); 4072 else { 4073 mrsas_release_mfi_cmd(cmd); 4074 mtx_unlock(&sc->raidmap_lock); 4075 break; 4076 } 4077 } else 4078 sc->map_id++; 4079 mrsas_release_mfi_cmd(cmd); 4080 if (MR_ValidateMapInfo(sc)) 4081 sc->fast_path_io = 0; 4082 else 4083 sc->fast_path_io = 1; 4084 mrsas_sync_map_info(sc); 4085 mtx_unlock(&sc->raidmap_lock); 4086 break; 4087 } 4088 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 4089 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { 4090 sc->mrsas_aen_triggered = 0; 4091 } 4092 /* FW has an updated PD sequence */ 4093 if ((cmd->frame->dcmd.opcode == 4094 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 4095 (cmd->frame->dcmd.mbox.b[0] == 1)) { 4096 4097 mtx_lock(&sc->raidmap_lock); 4098 sc->jbod_seq_cmd = NULL; 4099 mrsas_release_mfi_cmd(cmd); 4100 4101 if (cmd_status == MFI_STAT_OK) { 4102 sc->pd_seq_map_id++; 4103 /* Re-register a pd sync seq num cmd */ 4104 if (megasas_sync_pd_seq_num(sc, true)) 4105 sc->use_seqnum_jbod_fp = 0; 4106 } else { 4107 sc->use_seqnum_jbod_fp = 0; 4108 device_printf(sc->mrsas_dev, 4109 "Jbod map sync failed, status=%x\n", cmd_status); 4110 } 4111 mtx_unlock(&sc->raidmap_lock); 4112 break; 4113 } 4114 /* See if got an event notification */ 4115 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) 4116 mrsas_complete_aen(sc, cmd); 4117 else 4118 mrsas_wakeup(sc, cmd); 4119 break; 4120 case MFI_CMD_ABORT: 4121 /* Command issued to abort another cmd return */ 4122 mrsas_complete_abort(sc, cmd); 4123 break; 4124 default: 4125 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd); 4126 break; 4127 } 4128 } 4129 4130 /* 4131 * mrsas_wakeup: Completes an internal command 4132 * input: Adapter soft state 4133 * Command to be completed 4134 * 4135 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait 4136 * timer is started. This function is called from 4137 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up 4138 * from the command wait. 4139 */ 4140 void 4141 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4142 { 4143 cmd->cmd_status = cmd->frame->io.cmd_status; 4144 4145 if (cmd->cmd_status == 0xFF) 4146 cmd->cmd_status = 0; 4147 4148 sc->chan = (void *)&cmd; 4149 wakeup_one((void *)&sc->chan); 4150 return; 4151 } 4152 4153 /* 4154 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input: 4155 * Adapter soft state Shutdown/Hibernate 4156 * 4157 * This function issues a DCMD internal command to Firmware to initiate shutdown 4158 * of the controller. 4159 */ 4160 static void 4161 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) 4162 { 4163 struct mrsas_mfi_cmd *cmd; 4164 struct mrsas_dcmd_frame *dcmd; 4165 4166 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 4167 return; 4168 4169 cmd = mrsas_get_mfi_cmd(sc); 4170 if (!cmd) { 4171 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n"); 4172 return; 4173 } 4174 if (sc->aen_cmd) 4175 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); 4176 if (sc->map_update_cmd) 4177 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); 4178 if (sc->jbod_seq_cmd) 4179 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd); 4180 4181 dcmd = &cmd->frame->dcmd; 4182 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4183 4184 dcmd->cmd = MFI_CMD_DCMD; 4185 dcmd->cmd_status = 0x0; 4186 dcmd->sge_count = 0; 4187 dcmd->flags = MFI_FRAME_DIR_NONE; 4188 dcmd->timeout = 0; 4189 dcmd->pad_0 = 0; 4190 dcmd->data_xfer_len = 0; 4191 dcmd->opcode = opcode; 4192 4193 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n"); 4194 4195 mrsas_issue_blocked_cmd(sc, cmd); 4196 mrsas_release_mfi_cmd(cmd); 4197 4198 return; 4199 } 4200 4201 /* 4202 * mrsas_flush_cache: Requests FW to flush all its caches input: 4203 * Adapter soft state 4204 * 4205 * This function is issues a DCMD internal command to Firmware to initiate 4206 * flushing of all caches. 4207 */ 4208 static void 4209 mrsas_flush_cache(struct mrsas_softc *sc) 4210 { 4211 struct mrsas_mfi_cmd *cmd; 4212 struct mrsas_dcmd_frame *dcmd; 4213 4214 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 4215 return; 4216 4217 cmd = mrsas_get_mfi_cmd(sc); 4218 if (!cmd) { 4219 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n"); 4220 return; 4221 } 4222 dcmd = &cmd->frame->dcmd; 4223 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4224 4225 dcmd->cmd = MFI_CMD_DCMD; 4226 dcmd->cmd_status = 0x0; 4227 dcmd->sge_count = 0; 4228 dcmd->flags = MFI_FRAME_DIR_NONE; 4229 dcmd->timeout = 0; 4230 dcmd->pad_0 = 0; 4231 dcmd->data_xfer_len = 0; 4232 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 4233 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 4234 4235 mrsas_issue_blocked_cmd(sc, cmd); 4236 mrsas_release_mfi_cmd(cmd); 4237 4238 return; 4239 } 4240 4241 int 4242 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend) 4243 { 4244 int retcode = 0; 4245 u_int8_t do_ocr = 1; 4246 struct mrsas_mfi_cmd *cmd; 4247 struct mrsas_dcmd_frame *dcmd; 4248 uint32_t pd_seq_map_sz; 4249 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 4250 bus_addr_t pd_seq_h; 4251 4252 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 4253 (sizeof(struct MR_PD_CFG_SEQ) * 4254 (MAX_PHYSICAL_DEVICES - 1)); 4255 4256 cmd = mrsas_get_mfi_cmd(sc); 4257 if (!cmd) { 4258 device_printf(sc->mrsas_dev, 4259 "Cannot alloc for ld map info cmd.\n"); 4260 return 1; 4261 } 4262 dcmd = &cmd->frame->dcmd; 4263 4264 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)]; 4265 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)]; 4266 if (!pd_sync) { 4267 device_printf(sc->mrsas_dev, 4268 "Failed to alloc mem for jbod map info.\n"); 4269 mrsas_release_mfi_cmd(cmd); 4270 return (ENOMEM); 4271 } 4272 memset(pd_sync, 0, pd_seq_map_sz); 4273 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4274 dcmd->cmd = MFI_CMD_DCMD; 4275 dcmd->cmd_status = 0xFF; 4276 dcmd->sge_count = 1; 4277 dcmd->timeout = 0; 4278 dcmd->pad_0 = 0; 4279 dcmd->data_xfer_len = (pd_seq_map_sz); 4280 dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO); 4281 dcmd->sgl.sge32[0].phys_addr = (pd_seq_h); 4282 dcmd->sgl.sge32[0].length = (pd_seq_map_sz); 4283 4284 if (pend) { 4285 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG; 4286 dcmd->flags = (MFI_FRAME_DIR_WRITE); 4287 sc->jbod_seq_cmd = cmd; 4288 if (mrsas_issue_dcmd(sc, cmd)) { 4289 device_printf(sc->mrsas_dev, 4290 "Fail to send sync map info command.\n"); 4291 return 1; 4292 } else 4293 return 0; 4294 } else 4295 dcmd->flags = MFI_FRAME_DIR_READ; 4296 4297 retcode = mrsas_issue_polled(sc, cmd); 4298 if (retcode == ETIMEDOUT) 4299 goto dcmd_timeout; 4300 4301 if (pd_sync->count > MAX_PHYSICAL_DEVICES) { 4302 device_printf(sc->mrsas_dev, 4303 "driver supports max %d JBOD, but FW reports %d\n", 4304 MAX_PHYSICAL_DEVICES, pd_sync->count); 4305 retcode = -EINVAL; 4306 } 4307 if (!retcode) 4308 sc->pd_seq_map_id++; 4309 do_ocr = 0; 4310 4311 dcmd_timeout: 4312 if (do_ocr) 4313 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4314 4315 return (retcode); 4316 } 4317 4318 /* 4319 * mrsas_get_map_info: Load and validate RAID map input: 4320 * Adapter instance soft state 4321 * 4322 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load 4323 * and validate RAID map. It returns 0 if successful, 1 other- wise. 4324 */ 4325 static int 4326 mrsas_get_map_info(struct mrsas_softc *sc) 4327 { 4328 uint8_t retcode = 0; 4329 4330 sc->fast_path_io = 0; 4331 if (!mrsas_get_ld_map_info(sc)) { 4332 retcode = MR_ValidateMapInfo(sc); 4333 if (retcode == 0) { 4334 sc->fast_path_io = 1; 4335 return 0; 4336 } 4337 } 4338 return 1; 4339 } 4340 4341 /* 4342 * mrsas_get_ld_map_info: Get FW's ld_map structure input: 4343 * Adapter instance soft state 4344 * 4345 * Issues an internal command (DCMD) to get the FW's controller PD list 4346 * structure. 4347 */ 4348 static int 4349 mrsas_get_ld_map_info(struct mrsas_softc *sc) 4350 { 4351 int retcode = 0; 4352 struct mrsas_mfi_cmd *cmd; 4353 struct mrsas_dcmd_frame *dcmd; 4354 void *map; 4355 bus_addr_t map_phys_addr = 0; 4356 4357 cmd = mrsas_get_mfi_cmd(sc); 4358 if (!cmd) { 4359 device_printf(sc->mrsas_dev, 4360 "Cannot alloc for ld map info cmd.\n"); 4361 return 1; 4362 } 4363 dcmd = &cmd->frame->dcmd; 4364 4365 map = (void *)sc->raidmap_mem[(sc->map_id & 1)]; 4366 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; 4367 if (!map) { 4368 device_printf(sc->mrsas_dev, 4369 "Failed to alloc mem for ld map info.\n"); 4370 mrsas_release_mfi_cmd(cmd); 4371 return (ENOMEM); 4372 } 4373 memset(map, 0, sizeof(sc->max_map_sz)); 4374 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4375 4376 dcmd->cmd = MFI_CMD_DCMD; 4377 dcmd->cmd_status = 0xFF; 4378 dcmd->sge_count = 1; 4379 dcmd->flags = MFI_FRAME_DIR_READ; 4380 dcmd->timeout = 0; 4381 dcmd->pad_0 = 0; 4382 dcmd->data_xfer_len = sc->current_map_sz; 4383 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 4384 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 4385 dcmd->sgl.sge32[0].length = sc->current_map_sz; 4386 4387 retcode = mrsas_issue_polled(sc, cmd); 4388 if (retcode == ETIMEDOUT) 4389 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4390 4391 return (retcode); 4392 } 4393 4394 /* 4395 * mrsas_sync_map_info: Get FW's ld_map structure input: 4396 * Adapter instance soft state 4397 * 4398 * Issues an internal command (DCMD) to get the FW's controller PD list 4399 * structure. 4400 */ 4401 static int 4402 mrsas_sync_map_info(struct mrsas_softc *sc) 4403 { 4404 int retcode = 0, i; 4405 struct mrsas_mfi_cmd *cmd; 4406 struct mrsas_dcmd_frame *dcmd; 4407 uint32_t size_sync_info, num_lds; 4408 MR_LD_TARGET_SYNC *target_map = NULL; 4409 MR_DRV_RAID_MAP_ALL *map; 4410 MR_LD_RAID *raid; 4411 MR_LD_TARGET_SYNC *ld_sync; 4412 bus_addr_t map_phys_addr = 0; 4413 4414 cmd = mrsas_get_mfi_cmd(sc); 4415 if (!cmd) { 4416 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n"); 4417 return ENOMEM; 4418 } 4419 map = sc->ld_drv_map[sc->map_id & 1]; 4420 num_lds = map->raidMap.ldCount; 4421 4422 dcmd = &cmd->frame->dcmd; 4423 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds; 4424 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4425 4426 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1]; 4427 memset(target_map, 0, sc->max_map_sz); 4428 4429 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; 4430 4431 ld_sync = (MR_LD_TARGET_SYNC *) target_map; 4432 4433 for (i = 0; i < num_lds; i++, ld_sync++) { 4434 raid = MR_LdRaidGet(i, map); 4435 ld_sync->targetId = MR_GetLDTgtId(i, map); 4436 ld_sync->seqNum = raid->seqNum; 4437 } 4438 4439 dcmd->cmd = MFI_CMD_DCMD; 4440 dcmd->cmd_status = 0xFF; 4441 dcmd->sge_count = 1; 4442 dcmd->flags = MFI_FRAME_DIR_WRITE; 4443 dcmd->timeout = 0; 4444 dcmd->pad_0 = 0; 4445 dcmd->data_xfer_len = sc->current_map_sz; 4446 dcmd->mbox.b[0] = num_lds; 4447 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; 4448 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 4449 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 4450 dcmd->sgl.sge32[0].length = sc->current_map_sz; 4451 4452 sc->map_update_cmd = cmd; 4453 if (mrsas_issue_dcmd(sc, cmd)) { 4454 device_printf(sc->mrsas_dev, 4455 "Fail to send sync map info command.\n"); 4456 return (1); 4457 } 4458 return (retcode); 4459 } 4460 4461 /* Input: dcmd.opcode - MR_DCMD_PD_GET_INFO 4462 * dcmd.mbox.s[0] - deviceId for this physical drive 4463 * dcmd.sge IN - ptr to returned MR_PD_INFO structure 4464 * Desc: Firmware return the physical drive info structure 4465 * 4466 */ 4467 static void 4468 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id) 4469 { 4470 int retcode; 4471 u_int8_t do_ocr = 1; 4472 struct mrsas_mfi_cmd *cmd; 4473 struct mrsas_dcmd_frame *dcmd; 4474 4475 cmd = mrsas_get_mfi_cmd(sc); 4476 4477 if (!cmd) { 4478 device_printf(sc->mrsas_dev, 4479 "Cannot alloc for get PD info cmd\n"); 4480 return; 4481 } 4482 dcmd = &cmd->frame->dcmd; 4483 4484 memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info)); 4485 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4486 4487 dcmd->mbox.s[0] = device_id; 4488 dcmd->cmd = MFI_CMD_DCMD; 4489 dcmd->cmd_status = 0xFF; 4490 dcmd->sge_count = 1; 4491 dcmd->flags = MFI_FRAME_DIR_READ; 4492 dcmd->timeout = 0; 4493 dcmd->pad_0 = 0; 4494 dcmd->data_xfer_len = sizeof(struct mrsas_pd_info); 4495 dcmd->opcode = MR_DCMD_PD_GET_INFO; 4496 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->pd_info_phys_addr; 4497 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_pd_info); 4498 4499 if (!sc->mask_interrupts) 4500 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4501 else 4502 retcode = mrsas_issue_polled(sc, cmd); 4503 4504 if (retcode == ETIMEDOUT) 4505 goto dcmd_timeout; 4506 4507 sc->target_list[device_id].interface_type = 4508 sc->pd_info_mem->state.ddf.pdType.intf; 4509 4510 do_ocr = 0; 4511 4512 dcmd_timeout: 4513 4514 if (do_ocr) 4515 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4516 4517 if (!sc->mask_interrupts) 4518 mrsas_release_mfi_cmd(cmd); 4519 } 4520 4521 /* 4522 * mrsas_add_target: Add target ID of system PD/VD to driver's data structure. 4523 * sc: Adapter's soft state 4524 * target_id: Unique target id per controller(managed by driver) 4525 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1) 4526 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS 4527 * return: void 4528 * Descripton: This function will be called whenever system PD or VD is created. 4529 */ 4530 static void mrsas_add_target(struct mrsas_softc *sc, 4531 u_int16_t target_id) 4532 { 4533 sc->target_list[target_id].target_id = target_id; 4534 4535 device_printf(sc->mrsas_dev, 4536 "%s created target ID: 0x%x\n", 4537 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"), 4538 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD))); 4539 /* 4540 * If interrupts are enabled, then only fire DCMD to get pd_info 4541 * for system PDs 4542 */ 4543 if (!sc->mask_interrupts && sc->pd_info_mem && 4544 (target_id < MRSAS_MAX_PD)) 4545 mrsas_get_pd_info(sc, target_id); 4546 4547 } 4548 4549 /* 4550 * mrsas_remove_target: Remove target ID of system PD/VD from driver's data structure. 4551 * sc: Adapter's soft state 4552 * target_id: Unique target id per controller(managed by driver) 4553 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1) 4554 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS 4555 * return: void 4556 * Descripton: This function will be called whenever system PD or VD is deleted 4557 */ 4558 static void mrsas_remove_target(struct mrsas_softc *sc, 4559 u_int16_t target_id) 4560 { 4561 sc->target_list[target_id].target_id = 0xffff; 4562 device_printf(sc->mrsas_dev, 4563 "%s deleted target ID: 0x%x\n", 4564 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"), 4565 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD))); 4566 } 4567 4568 /* 4569 * mrsas_get_pd_list: Returns FW's PD list structure input: 4570 * Adapter soft state 4571 * 4572 * Issues an internal command (DCMD) to get the FW's controller PD list 4573 * structure. This information is mainly used to find out about system 4574 * supported by Firmware. 4575 */ 4576 static int 4577 mrsas_get_pd_list(struct mrsas_softc *sc) 4578 { 4579 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size; 4580 u_int8_t do_ocr = 1; 4581 struct mrsas_mfi_cmd *cmd; 4582 struct mrsas_dcmd_frame *dcmd; 4583 struct MR_PD_LIST *pd_list_mem; 4584 struct MR_PD_ADDRESS *pd_addr; 4585 bus_addr_t pd_list_phys_addr = 0; 4586 struct mrsas_tmp_dcmd *tcmd; 4587 4588 cmd = mrsas_get_mfi_cmd(sc); 4589 if (!cmd) { 4590 device_printf(sc->mrsas_dev, 4591 "Cannot alloc for get PD list cmd\n"); 4592 return 1; 4593 } 4594 dcmd = &cmd->frame->dcmd; 4595 4596 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 4597 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4598 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { 4599 device_printf(sc->mrsas_dev, 4600 "Cannot alloc dmamap for get PD list cmd\n"); 4601 mrsas_release_mfi_cmd(cmd); 4602 mrsas_free_tmp_dcmd(tcmd); 4603 free(tcmd, M_MRSAS); 4604 return (ENOMEM); 4605 } else { 4606 pd_list_mem = tcmd->tmp_dcmd_mem; 4607 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 4608 } 4609 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4610 4611 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4612 dcmd->mbox.b[1] = 0; 4613 dcmd->cmd = MFI_CMD_DCMD; 4614 dcmd->cmd_status = 0xFF; 4615 dcmd->sge_count = 1; 4616 dcmd->flags = MFI_FRAME_DIR_READ; 4617 dcmd->timeout = 0; 4618 dcmd->pad_0 = 0; 4619 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4620 dcmd->opcode = MR_DCMD_PD_LIST_QUERY; 4621 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr; 4622 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4623 4624 if (!sc->mask_interrupts) 4625 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4626 else 4627 retcode = mrsas_issue_polled(sc, cmd); 4628 4629 if (retcode == ETIMEDOUT) 4630 goto dcmd_timeout; 4631 4632 /* Get the instance PD list */ 4633 pd_count = MRSAS_MAX_PD; 4634 pd_addr = pd_list_mem->addr; 4635 if (pd_list_mem->count < pd_count) { 4636 memset(sc->local_pd_list, 0, 4637 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 4638 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) { 4639 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; 4640 sc->local_pd_list[pd_addr->deviceId].driveType = 4641 pd_addr->scsiDevType; 4642 sc->local_pd_list[pd_addr->deviceId].driveState = 4643 MR_PD_STATE_SYSTEM; 4644 if (sc->target_list[pd_addr->deviceId].target_id == 0xffff) 4645 mrsas_add_target(sc, pd_addr->deviceId); 4646 pd_addr++; 4647 } 4648 for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) { 4649 if ((sc->local_pd_list[pd_index].driveState != 4650 MR_PD_STATE_SYSTEM) && 4651 (sc->target_list[pd_index].target_id != 4652 0xffff)) { 4653 mrsas_remove_target(sc, pd_index); 4654 } 4655 } 4656 /* 4657 * Use mutext/spinlock if pd_list component size increase more than 4658 * 32 bit. 4659 */ 4660 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); 4661 do_ocr = 0; 4662 } 4663 dcmd_timeout: 4664 mrsas_free_tmp_dcmd(tcmd); 4665 free(tcmd, M_MRSAS); 4666 4667 if (do_ocr) 4668 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4669 4670 if (!sc->mask_interrupts) 4671 mrsas_release_mfi_cmd(cmd); 4672 4673 return (retcode); 4674 } 4675 4676 /* 4677 * mrsas_get_ld_list: Returns FW's LD list structure input: 4678 * Adapter soft state 4679 * 4680 * Issues an internal command (DCMD) to get the FW's controller PD list 4681 * structure. This information is mainly used to find out about supported by 4682 * the FW. 4683 */ 4684 static int 4685 mrsas_get_ld_list(struct mrsas_softc *sc) 4686 { 4687 int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id; 4688 u_int8_t do_ocr = 1; 4689 struct mrsas_mfi_cmd *cmd; 4690 struct mrsas_dcmd_frame *dcmd; 4691 struct MR_LD_LIST *ld_list_mem; 4692 bus_addr_t ld_list_phys_addr = 0; 4693 struct mrsas_tmp_dcmd *tcmd; 4694 4695 cmd = mrsas_get_mfi_cmd(sc); 4696 if (!cmd) { 4697 device_printf(sc->mrsas_dev, 4698 "Cannot alloc for get LD list cmd\n"); 4699 return 1; 4700 } 4701 dcmd = &cmd->frame->dcmd; 4702 4703 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 4704 ld_list_size = sizeof(struct MR_LD_LIST); 4705 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { 4706 device_printf(sc->mrsas_dev, 4707 "Cannot alloc dmamap for get LD list cmd\n"); 4708 mrsas_release_mfi_cmd(cmd); 4709 mrsas_free_tmp_dcmd(tcmd); 4710 free(tcmd, M_MRSAS); 4711 return (ENOMEM); 4712 } else { 4713 ld_list_mem = tcmd->tmp_dcmd_mem; 4714 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 4715 } 4716 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4717 4718 if (sc->max256vdSupport) 4719 dcmd->mbox.b[0] = 1; 4720 4721 dcmd->cmd = MFI_CMD_DCMD; 4722 dcmd->cmd_status = 0xFF; 4723 dcmd->sge_count = 1; 4724 dcmd->flags = MFI_FRAME_DIR_READ; 4725 dcmd->timeout = 0; 4726 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); 4727 dcmd->opcode = MR_DCMD_LD_GET_LIST; 4728 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr; 4729 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); 4730 dcmd->pad_0 = 0; 4731 4732 if (!sc->mask_interrupts) 4733 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4734 else 4735 retcode = mrsas_issue_polled(sc, cmd); 4736 4737 if (retcode == ETIMEDOUT) 4738 goto dcmd_timeout; 4739 4740 #if VD_EXT_DEBUG 4741 printf("Number of LDs %d\n", ld_list_mem->ldCount); 4742 #endif 4743 4744 /* Get the instance LD list */ 4745 if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) { 4746 sc->CurLdCount = ld_list_mem->ldCount; 4747 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4748 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) { 4749 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 4750 drv_tgt_id = ids + MRSAS_MAX_PD; 4751 if (ld_list_mem->ldList[ld_index].state != 0) { 4752 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 4753 if (sc->target_list[drv_tgt_id].target_id == 4754 0xffff) 4755 mrsas_add_target(sc, drv_tgt_id); 4756 } else { 4757 if (sc->target_list[drv_tgt_id].target_id != 4758 0xffff) 4759 mrsas_remove_target(sc, 4760 drv_tgt_id); 4761 } 4762 } 4763 4764 do_ocr = 0; 4765 } 4766 dcmd_timeout: 4767 mrsas_free_tmp_dcmd(tcmd); 4768 free(tcmd, M_MRSAS); 4769 4770 if (do_ocr) 4771 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4772 if (!sc->mask_interrupts) 4773 mrsas_release_mfi_cmd(cmd); 4774 4775 return (retcode); 4776 } 4777 4778 /* 4779 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input: 4780 * Adapter soft state Temp command Size of alloction 4781 * 4782 * Allocates DMAable memory for a temporary internal command. The allocated 4783 * memory is initialized to all zeros upon successful loading of the dma 4784 * mapped memory. 4785 */ 4786 int 4787 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, 4788 struct mrsas_tmp_dcmd *tcmd, int size) 4789 { 4790 if (bus_dma_tag_create(sc->mrsas_parent_tag, 4791 1, 0, 4792 BUS_SPACE_MAXADDR_32BIT, 4793 BUS_SPACE_MAXADDR, 4794 NULL, NULL, 4795 size, 4796 1, 4797 size, 4798 BUS_DMA_ALLOCNOW, 4799 NULL, NULL, 4800 &tcmd->tmp_dcmd_tag)) { 4801 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); 4802 return (ENOMEM); 4803 } 4804 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, 4805 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { 4806 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); 4807 return (ENOMEM); 4808 } 4809 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, 4810 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, 4811 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { 4812 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); 4813 return (ENOMEM); 4814 } 4815 memset(tcmd->tmp_dcmd_mem, 0, size); 4816 return (0); 4817 } 4818 4819 /* 4820 * mrsas_free_tmp_dcmd: Free memory for temporary command input: 4821 * temporary dcmd pointer 4822 * 4823 * Deallocates memory of the temporary command for use in the construction of 4824 * the internal DCMD. 4825 */ 4826 void 4827 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) 4828 { 4829 if (tmp->tmp_dcmd_phys_addr) 4830 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); 4831 if (tmp->tmp_dcmd_mem != NULL) 4832 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); 4833 if (tmp->tmp_dcmd_tag != NULL) 4834 bus_dma_tag_destroy(tmp->tmp_dcmd_tag); 4835 } 4836 4837 /* 4838 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input: 4839 * Adapter soft state Previously issued cmd to be aborted 4840 * 4841 * This function is used to abort previously issued commands, such as AEN and 4842 * RAID map sync map commands. The abort command is sent as a DCMD internal 4843 * command and subsequently the driver will wait for a return status. The 4844 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. 4845 */ 4846 static int 4847 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 4848 struct mrsas_mfi_cmd *cmd_to_abort) 4849 { 4850 struct mrsas_mfi_cmd *cmd; 4851 struct mrsas_abort_frame *abort_fr; 4852 u_int8_t retcode = 0; 4853 unsigned long total_time = 0; 4854 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 4855 4856 cmd = mrsas_get_mfi_cmd(sc); 4857 if (!cmd) { 4858 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); 4859 return (1); 4860 } 4861 abort_fr = &cmd->frame->abort; 4862 4863 /* Prepare and issue the abort frame */ 4864 abort_fr->cmd = MFI_CMD_ABORT; 4865 abort_fr->cmd_status = 0xFF; 4866 abort_fr->flags = 0; 4867 abort_fr->abort_context = cmd_to_abort->index; 4868 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 4869 abort_fr->abort_mfi_phys_addr_hi = 0; 4870 4871 cmd->sync_cmd = 1; 4872 cmd->cmd_status = 0xFF; 4873 4874 if (mrsas_issue_dcmd(sc, cmd)) { 4875 device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); 4876 return (1); 4877 } 4878 /* Wait for this cmd to complete */ 4879 sc->chan = (void *)&cmd; 4880 while (1) { 4881 if (cmd->cmd_status == 0xFF) { 4882 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 4883 } else 4884 break; 4885 total_time++; 4886 if (total_time >= max_wait) { 4887 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); 4888 retcode = 1; 4889 break; 4890 } 4891 } 4892 4893 cmd->sync_cmd = 0; 4894 mrsas_release_mfi_cmd(cmd); 4895 return (retcode); 4896 } 4897 4898 /* 4899 * mrsas_complete_abort: Completes aborting a command input: 4900 * Adapter soft state Cmd that was issued to abort another cmd 4901 * 4902 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to 4903 * change after sending the command. This function is called from 4904 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. 4905 */ 4906 void 4907 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4908 { 4909 if (cmd->sync_cmd) { 4910 cmd->sync_cmd = 0; 4911 cmd->cmd_status = 0; 4912 sc->chan = (void *)&cmd; 4913 wakeup_one((void *)&sc->chan); 4914 } 4915 return; 4916 } 4917 4918 /* 4919 * mrsas_aen_handler: AEN processing callback function from thread context 4920 * input: Adapter soft state 4921 * 4922 * Asynchronous event handler 4923 */ 4924 void 4925 mrsas_aen_handler(struct mrsas_softc *sc) 4926 { 4927 union mrsas_evt_class_locale class_locale; 4928 int doscan = 0; 4929 u_int32_t seq_num; 4930 int error, fail_aen = 0; 4931 4932 if (sc == NULL) { 4933 printf("invalid instance!\n"); 4934 return; 4935 } 4936 if (sc->remove_in_progress || sc->reset_in_progress) { 4937 device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n", 4938 __func__, __LINE__); 4939 return; 4940 } 4941 if (sc->evt_detail_mem) { 4942 switch (sc->evt_detail_mem->code) { 4943 case MR_EVT_PD_INSERTED: 4944 fail_aen = mrsas_get_pd_list(sc); 4945 if (!fail_aen) 4946 mrsas_bus_scan_sim(sc, sc->sim_1); 4947 else 4948 goto skip_register_aen; 4949 break; 4950 case MR_EVT_PD_REMOVED: 4951 fail_aen = mrsas_get_pd_list(sc); 4952 if (!fail_aen) 4953 mrsas_bus_scan_sim(sc, sc->sim_1); 4954 else 4955 goto skip_register_aen; 4956 break; 4957 case MR_EVT_LD_OFFLINE: 4958 case MR_EVT_CFG_CLEARED: 4959 case MR_EVT_LD_DELETED: 4960 mrsas_bus_scan_sim(sc, sc->sim_0); 4961 break; 4962 case MR_EVT_LD_CREATED: 4963 fail_aen = mrsas_get_ld_list(sc); 4964 if (!fail_aen) 4965 mrsas_bus_scan_sim(sc, sc->sim_0); 4966 else 4967 goto skip_register_aen; 4968 break; 4969 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 4970 case MR_EVT_FOREIGN_CFG_IMPORTED: 4971 case MR_EVT_LD_STATE_CHANGE: 4972 doscan = 1; 4973 break; 4974 case MR_EVT_CTRL_PROP_CHANGED: 4975 fail_aen = mrsas_get_ctrl_info(sc); 4976 if (fail_aen) 4977 goto skip_register_aen; 4978 break; 4979 default: 4980 break; 4981 } 4982 } else { 4983 device_printf(sc->mrsas_dev, "invalid evt_detail\n"); 4984 return; 4985 } 4986 if (doscan) { 4987 fail_aen = mrsas_get_pd_list(sc); 4988 if (!fail_aen) { 4989 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); 4990 mrsas_bus_scan_sim(sc, sc->sim_1); 4991 } else 4992 goto skip_register_aen; 4993 4994 fail_aen = mrsas_get_ld_list(sc); 4995 if (!fail_aen) { 4996 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); 4997 mrsas_bus_scan_sim(sc, sc->sim_0); 4998 } else 4999 goto skip_register_aen; 5000 } 5001 seq_num = sc->evt_detail_mem->seq_num + 1; 5002 5003 /* Register AEN with FW for latest sequence number plus 1 */ 5004 class_locale.members.reserved = 0; 5005 class_locale.members.locale = MR_EVT_LOCALE_ALL; 5006 class_locale.members.class = MR_EVT_CLASS_DEBUG; 5007 5008 if (sc->aen_cmd != NULL) 5009 return; 5010 5011 mtx_lock(&sc->aen_lock); 5012 error = mrsas_register_aen(sc, seq_num, 5013 class_locale.word); 5014 mtx_unlock(&sc->aen_lock); 5015 5016 if (error) 5017 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); 5018 5019 skip_register_aen: 5020 return; 5021 5022 } 5023 5024 5025 /* 5026 * mrsas_complete_aen: Completes AEN command 5027 * input: Adapter soft state 5028 * Cmd that was issued to abort another cmd 5029 * 5030 * This function will be called from ISR and will continue event processing from 5031 * thread context by enqueuing task in ev_tq (callback function 5032 * "mrsas_aen_handler"). 5033 */ 5034 void 5035 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 5036 { 5037 /* 5038 * Don't signal app if it is just an aborted previously registered 5039 * aen 5040 */ 5041 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { 5042 sc->mrsas_aen_triggered = 1; 5043 mtx_lock(&sc->aen_lock); 5044 if (sc->mrsas_poll_waiting) { 5045 sc->mrsas_poll_waiting = 0; 5046 selwakeup(&sc->mrsas_select); 5047 } 5048 mtx_unlock(&sc->aen_lock); 5049 } else 5050 cmd->abort_aen = 0; 5051 5052 sc->aen_cmd = NULL; 5053 mrsas_release_mfi_cmd(cmd); 5054 5055 taskqueue_enqueue(sc->ev_tq, &sc->ev_task); 5056 5057 return; 5058 } 5059 5060 static device_method_t mrsas_methods[] = { 5061 DEVMETHOD(device_probe, mrsas_probe), 5062 DEVMETHOD(device_attach, mrsas_attach), 5063 DEVMETHOD(device_detach, mrsas_detach), 5064 DEVMETHOD(device_shutdown, mrsas_shutdown), 5065 DEVMETHOD(device_suspend, mrsas_suspend), 5066 DEVMETHOD(device_resume, mrsas_resume), 5067 DEVMETHOD(bus_print_child, bus_generic_print_child), 5068 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 5069 {0, 0} 5070 }; 5071 5072 static driver_t mrsas_driver = { 5073 "mrsas", 5074 mrsas_methods, 5075 sizeof(struct mrsas_softc) 5076 }; 5077 5078 static devclass_t mrsas_devclass; 5079 5080 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0); 5081 MODULE_DEPEND(mrsas, cam, 1, 1, 1); 5082