1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <dev/mrsas/mrsas.h> 44 #include <dev/mrsas/mrsas_ioctl.h> 45 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 49 #include <sys/sysctl.h> 50 #include <sys/types.h> 51 #include <sys/sysent.h> 52 #include <sys/kthread.h> 53 #include <sys/taskqueue.h> 54 #include <sys/smp.h> 55 56 57 /* 58 * Function prototypes 59 */ 60 static d_open_t mrsas_open; 61 static d_close_t mrsas_close; 62 static d_read_t mrsas_read; 63 static d_write_t mrsas_write; 64 static d_ioctl_t mrsas_ioctl; 65 static d_poll_t mrsas_poll; 66 67 static void mrsas_ich_startup(void *arg); 68 static struct mrsas_mgmt_info mrsas_mgmt_info; 69 static struct mrsas_ident *mrsas_find_ident(device_t); 70 static int mrsas_setup_msix(struct mrsas_softc *sc); 71 static int mrsas_allocate_msix(struct mrsas_softc *sc); 72 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); 73 static void mrsas_flush_cache(struct mrsas_softc *sc); 74 static void mrsas_reset_reply_desc(struct mrsas_softc *sc); 75 static void mrsas_ocr_thread(void *arg); 76 static int mrsas_get_map_info(struct mrsas_softc *sc); 77 static int mrsas_get_ld_map_info(struct mrsas_softc *sc); 78 static int mrsas_sync_map_info(struct mrsas_softc *sc); 79 static int mrsas_get_pd_list(struct mrsas_softc *sc); 80 static int mrsas_get_ld_list(struct mrsas_softc *sc); 81 static int mrsas_setup_irq(struct mrsas_softc *sc); 82 static int mrsas_alloc_mem(struct mrsas_softc *sc); 83 static int mrsas_init_fw(struct mrsas_softc *sc); 84 static int mrsas_setup_raidmap(struct mrsas_softc *sc); 85 static void megasas_setup_jbod_map(struct mrsas_softc *sc); 86 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend); 87 static int mrsas_clear_intr(struct mrsas_softc *sc); 88 static int mrsas_get_ctrl_info(struct mrsas_softc *sc); 89 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc); 90 static int 91 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 92 struct mrsas_mfi_cmd *cmd_to_abort); 93 static void 94 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id); 95 static struct mrsas_softc * 96 mrsas_get_softc_instance(struct cdev *dev, 97 u_long cmd, caddr_t arg); 98 u_int32_t 99 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset); 100 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); 101 u_int8_t 102 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, 103 struct mrsas_mfi_cmd *mfi_cmd); 104 void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc); 105 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); 106 int mrsas_init_adapter(struct mrsas_softc *sc); 107 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); 108 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); 109 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); 110 int mrsas_ioc_init(struct mrsas_softc *sc); 111 int mrsas_bus_scan(struct mrsas_softc *sc); 112 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 113 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 114 int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason); 115 int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason); 116 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); 117 int mrsas_reset_targets(struct mrsas_softc *sc); 118 int 119 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, 120 struct mrsas_mfi_cmd *cmd); 121 int 122 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, 123 int size); 124 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); 125 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 126 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 127 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 128 void mrsas_disable_intr(struct mrsas_softc *sc); 129 void mrsas_enable_intr(struct mrsas_softc *sc); 130 void mrsas_free_ioc_cmd(struct mrsas_softc *sc); 131 void mrsas_free_mem(struct mrsas_softc *sc); 132 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); 133 void mrsas_isr(void *arg); 134 void mrsas_teardown_intr(struct mrsas_softc *sc); 135 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 136 void mrsas_kill_hba(struct mrsas_softc *sc); 137 void mrsas_aen_handler(struct mrsas_softc *sc); 138 void 139 mrsas_write_reg(struct mrsas_softc *sc, int offset, 140 u_int32_t value); 141 void 142 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 143 u_int32_t req_desc_hi); 144 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); 145 void 146 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, 147 struct mrsas_mfi_cmd *cmd, u_int8_t status); 148 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); 149 150 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd 151 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 152 153 extern int mrsas_cam_attach(struct mrsas_softc *sc); 154 extern void mrsas_cam_detach(struct mrsas_softc *sc); 155 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 156 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 157 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); 158 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); 159 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); 160 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 161 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 162 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 163 extern void mrsas_xpt_freeze(struct mrsas_softc *sc); 164 extern void mrsas_xpt_release(struct mrsas_softc *sc); 165 extern MRSAS_REQUEST_DESCRIPTOR_UNION * 166 mrsas_get_request_desc(struct mrsas_softc *sc, 167 u_int16_t index); 168 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 169 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); 170 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); 171 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); 172 173 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, 174 union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus, 175 u_int32_t data_length, u_int8_t *sense); 176 void 177 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo, 178 u_int32_t req_desc_hi); 179 180 181 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters"); 182 183 /* 184 * PCI device struct and table 185 * 186 */ 187 typedef struct mrsas_ident { 188 uint16_t vendor; 189 uint16_t device; 190 uint16_t subvendor; 191 uint16_t subdevice; 192 const char *desc; 193 } MRSAS_CTLR_ID; 194 195 MRSAS_CTLR_ID device_table[] = { 196 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"}, 197 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"}, 198 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"}, 199 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"}, 200 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"}, 201 {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"}, 202 {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"}, 203 {0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"}, 204 {0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"}, 205 {0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"}, 206 {0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"}, 207 {0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"}, 208 {0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"}, 209 {0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"}, 210 {0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"}, 211 {0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"}, 212 {0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"}, 213 {0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"}, 214 {0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"}, 215 {0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"}, 216 {0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"}, 217 {0, 0, 0, 0, NULL} 218 }; 219 220 /* 221 * Character device entry points 222 * 223 */ 224 static struct cdevsw mrsas_cdevsw = { 225 .d_version = D_VERSION, 226 .d_open = mrsas_open, 227 .d_close = mrsas_close, 228 .d_read = mrsas_read, 229 .d_write = mrsas_write, 230 .d_ioctl = mrsas_ioctl, 231 .d_poll = mrsas_poll, 232 .d_name = "mrsas", 233 }; 234 235 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); 236 237 /* 238 * In the cdevsw routines, we find our softc by using the si_drv1 member of 239 * struct cdev. We set this variable to point to our softc in our attach 240 * routine when we create the /dev entry. 241 */ 242 int 243 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 244 { 245 struct mrsas_softc *sc; 246 247 sc = dev->si_drv1; 248 return (0); 249 } 250 251 int 252 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 253 { 254 struct mrsas_softc *sc; 255 256 sc = dev->si_drv1; 257 return (0); 258 } 259 260 int 261 mrsas_read(struct cdev *dev, struct uio *uio, int ioflag) 262 { 263 struct mrsas_softc *sc; 264 265 sc = dev->si_drv1; 266 return (0); 267 } 268 int 269 mrsas_write(struct cdev *dev, struct uio *uio, int ioflag) 270 { 271 struct mrsas_softc *sc; 272 273 sc = dev->si_drv1; 274 return (0); 275 } 276 277 u_int32_t 278 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset) 279 { 280 u_int32_t i = 0, ret_val; 281 282 if (sc->is_aero) { 283 do { 284 ret_val = mrsas_read_reg(sc, offset); 285 i++; 286 } while(ret_val == 0 && i < 3); 287 } else 288 ret_val = mrsas_read_reg(sc, offset); 289 290 return ret_val; 291 } 292 293 /* 294 * Register Read/Write Functions 295 * 296 */ 297 void 298 mrsas_write_reg(struct mrsas_softc *sc, int offset, 299 u_int32_t value) 300 { 301 bus_space_tag_t bus_tag = sc->bus_tag; 302 bus_space_handle_t bus_handle = sc->bus_handle; 303 304 bus_space_write_4(bus_tag, bus_handle, offset, value); 305 } 306 307 u_int32_t 308 mrsas_read_reg(struct mrsas_softc *sc, int offset) 309 { 310 bus_space_tag_t bus_tag = sc->bus_tag; 311 bus_space_handle_t bus_handle = sc->bus_handle; 312 313 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); 314 } 315 316 317 /* 318 * Interrupt Disable/Enable/Clear Functions 319 * 320 */ 321 void 322 mrsas_disable_intr(struct mrsas_softc *sc) 323 { 324 u_int32_t mask = 0xFFFFFFFF; 325 u_int32_t status; 326 327 sc->mask_interrupts = 1; 328 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); 329 /* Dummy read to force pci flush */ 330 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 331 } 332 333 void 334 mrsas_enable_intr(struct mrsas_softc *sc) 335 { 336 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; 337 u_int32_t status; 338 339 sc->mask_interrupts = 0; 340 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); 341 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 342 343 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); 344 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 345 } 346 347 static int 348 mrsas_clear_intr(struct mrsas_softc *sc) 349 { 350 u_int32_t status; 351 352 /* Read received interrupt */ 353 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 354 355 /* Not our interrupt, so just return */ 356 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 357 return (0); 358 359 /* We got a reply interrupt */ 360 return (1); 361 } 362 363 /* 364 * PCI Support Functions 365 * 366 */ 367 static struct mrsas_ident * 368 mrsas_find_ident(device_t dev) 369 { 370 struct mrsas_ident *pci_device; 371 372 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) { 373 if ((pci_device->vendor == pci_get_vendor(dev)) && 374 (pci_device->device == pci_get_device(dev)) && 375 ((pci_device->subvendor == pci_get_subvendor(dev)) || 376 (pci_device->subvendor == 0xffff)) && 377 ((pci_device->subdevice == pci_get_subdevice(dev)) || 378 (pci_device->subdevice == 0xffff))) 379 return (pci_device); 380 } 381 return (NULL); 382 } 383 384 static int 385 mrsas_probe(device_t dev) 386 { 387 static u_int8_t first_ctrl = 1; 388 struct mrsas_ident *id; 389 390 if ((id = mrsas_find_ident(dev)) != NULL) { 391 if (first_ctrl) { 392 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n", 393 MRSAS_VERSION); 394 first_ctrl = 0; 395 } 396 device_set_desc(dev, id->desc); 397 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */ 398 return (-30); 399 } 400 return (ENXIO); 401 } 402 403 /* 404 * mrsas_setup_sysctl: setup sysctl values for mrsas 405 * input: Adapter instance soft state 406 * 407 * Setup sysctl entries for mrsas driver. 408 */ 409 static void 410 mrsas_setup_sysctl(struct mrsas_softc *sc) 411 { 412 struct sysctl_ctx_list *sysctl_ctx = NULL; 413 struct sysctl_oid *sysctl_tree = NULL; 414 char tmpstr[80], tmpstr2[80]; 415 416 /* 417 * Setup the sysctl variable so the user can change the debug level 418 * on the fly. 419 */ 420 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", 421 device_get_unit(sc->mrsas_dev)); 422 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev)); 423 424 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); 425 if (sysctl_ctx != NULL) 426 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); 427 428 if (sysctl_tree == NULL) { 429 sysctl_ctx_init(&sc->sysctl_ctx); 430 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 431 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2, 432 CTLFLAG_RD, 0, tmpstr); 433 if (sc->sysctl_tree == NULL) 434 return; 435 sysctl_ctx = &sc->sysctl_ctx; 436 sysctl_tree = sc->sysctl_tree; 437 } 438 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 439 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, 440 "Disable the use of OCR"); 441 442 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 443 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, 444 strlen(MRSAS_VERSION), "driver version"); 445 446 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 447 OID_AUTO, "reset_count", CTLFLAG_RD, 448 &sc->reset_count, 0, "number of ocr from start of the day"); 449 450 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 451 OID_AUTO, "fw_outstanding", CTLFLAG_RD, 452 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands"); 453 454 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 455 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 456 &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); 457 458 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 459 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, 460 "Driver debug level"); 461 462 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 463 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 464 0, "Driver IO timeout value in mili-second."); 465 466 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 467 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, 468 &sc->mrsas_fw_fault_check_delay, 469 0, "FW fault check thread delay in seconds. <default is 1 sec>"); 470 471 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 472 OID_AUTO, "reset_in_progress", CTLFLAG_RD, 473 &sc->reset_in_progress, 0, "ocr in progress status"); 474 475 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 476 OID_AUTO, "block_sync_cache", CTLFLAG_RW, 477 &sc->block_sync_cache, 0, 478 "Block SYNC CACHE at driver. <default: 0, send it to FW>"); 479 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 480 OID_AUTO, "stream detection", CTLFLAG_RW, 481 &sc->drv_stream_detection, 0, 482 "Disable/Enable Stream detection. <default: 1, Enable Stream Detection>"); 483 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 484 OID_AUTO, "prp_count", CTLFLAG_RD, 485 &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built"); 486 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 487 OID_AUTO, "SGE holes", CTLFLAG_RD, 488 &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs"); 489 } 490 491 /* 492 * mrsas_get_tunables: get tunable parameters. 493 * input: Adapter instance soft state 494 * 495 * Get tunable parameters. This will help to debug driver at boot time. 496 */ 497 static void 498 mrsas_get_tunables(struct mrsas_softc *sc) 499 { 500 char tmpstr[80]; 501 502 /* XXX default to some debugging for now */ 503 sc->mrsas_debug = 504 (MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN); 505 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; 506 sc->mrsas_fw_fault_check_delay = 1; 507 sc->reset_count = 0; 508 sc->reset_in_progress = 0; 509 sc->block_sync_cache = 0; 510 sc->drv_stream_detection = 1; 511 512 /* 513 * Grab the global variables. 514 */ 515 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); 516 517 /* 518 * Grab the global variables. 519 */ 520 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds); 521 522 /* Grab the unit-instance variables */ 523 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", 524 device_get_unit(sc->mrsas_dev)); 525 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); 526 } 527 528 /* 529 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. 530 * Used to get sequence number at driver load time. 531 * input: Adapter soft state 532 * 533 * Allocates DMAable memory for the event log info internal command. 534 */ 535 int 536 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) 537 { 538 int el_info_size; 539 540 /* Allocate get event log info command */ 541 el_info_size = sizeof(struct mrsas_evt_log_info); 542 if (bus_dma_tag_create(sc->mrsas_parent_tag, 543 1, 0, 544 BUS_SPACE_MAXADDR_32BIT, 545 BUS_SPACE_MAXADDR, 546 NULL, NULL, 547 el_info_size, 548 1, 549 el_info_size, 550 BUS_DMA_ALLOCNOW, 551 NULL, NULL, 552 &sc->el_info_tag)) { 553 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); 554 return (ENOMEM); 555 } 556 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, 557 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { 558 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); 559 return (ENOMEM); 560 } 561 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, 562 sc->el_info_mem, el_info_size, mrsas_addr_cb, 563 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { 564 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); 565 return (ENOMEM); 566 } 567 memset(sc->el_info_mem, 0, el_info_size); 568 return (0); 569 } 570 571 /* 572 * mrsas_free_evt_info_cmd: Free memory for Event log info command 573 * input: Adapter soft state 574 * 575 * Deallocates memory for the event log info internal command. 576 */ 577 void 578 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) 579 { 580 if (sc->el_info_phys_addr) 581 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); 582 if (sc->el_info_mem != NULL) 583 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); 584 if (sc->el_info_tag != NULL) 585 bus_dma_tag_destroy(sc->el_info_tag); 586 } 587 588 /* 589 * mrsas_get_seq_num: Get latest event sequence number 590 * @sc: Adapter soft state 591 * @eli: Firmware event log sequence number information. 592 * 593 * Firmware maintains a log of all events in a non-volatile area. 594 * Driver get the sequence number using DCMD 595 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. 596 */ 597 598 static int 599 mrsas_get_seq_num(struct mrsas_softc *sc, 600 struct mrsas_evt_log_info *eli) 601 { 602 struct mrsas_mfi_cmd *cmd; 603 struct mrsas_dcmd_frame *dcmd; 604 u_int8_t do_ocr = 1, retcode = 0; 605 606 cmd = mrsas_get_mfi_cmd(sc); 607 608 if (!cmd) { 609 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 610 return -ENOMEM; 611 } 612 dcmd = &cmd->frame->dcmd; 613 614 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { 615 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); 616 mrsas_release_mfi_cmd(cmd); 617 return -ENOMEM; 618 } 619 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 620 621 dcmd->cmd = MFI_CMD_DCMD; 622 dcmd->cmd_status = 0x0; 623 dcmd->sge_count = 1; 624 dcmd->flags = MFI_FRAME_DIR_READ; 625 dcmd->timeout = 0; 626 dcmd->pad_0 = 0; 627 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info); 628 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 629 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr; 630 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info); 631 632 retcode = mrsas_issue_blocked_cmd(sc, cmd); 633 if (retcode == ETIMEDOUT) 634 goto dcmd_timeout; 635 636 do_ocr = 0; 637 /* 638 * Copy the data back into callers buffer 639 */ 640 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); 641 mrsas_free_evt_log_info_cmd(sc); 642 643 dcmd_timeout: 644 if (do_ocr) 645 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 646 else 647 mrsas_release_mfi_cmd(cmd); 648 649 return retcode; 650 } 651 652 653 /* 654 * mrsas_register_aen: Register for asynchronous event notification 655 * @sc: Adapter soft state 656 * @seq_num: Starting sequence number 657 * @class_locale: Class of the event 658 * 659 * This function subscribes for events beyond the @seq_num 660 * and type @class_locale. 661 * 662 */ 663 static int 664 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, 665 u_int32_t class_locale_word) 666 { 667 int ret_val; 668 struct mrsas_mfi_cmd *cmd; 669 struct mrsas_dcmd_frame *dcmd; 670 union mrsas_evt_class_locale curr_aen; 671 union mrsas_evt_class_locale prev_aen; 672 673 /* 674 * If there an AEN pending already (aen_cmd), check if the 675 * class_locale of that pending AEN is inclusive of the new AEN 676 * request we currently have. If it is, then we don't have to do 677 * anything. In other words, whichever events the current AEN request 678 * is subscribing to, have already been subscribed to. If the old_cmd 679 * is _not_ inclusive, then we have to abort that command, form a 680 * class_locale that is superset of both old and current and re-issue 681 * to the FW 682 */ 683 684 curr_aen.word = class_locale_word; 685 686 if (sc->aen_cmd) { 687 688 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1]; 689 690 /* 691 * A class whose enum value is smaller is inclusive of all 692 * higher values. If a PROGRESS (= -1) was previously 693 * registered, then a new registration requests for higher 694 * classes need not be sent to FW. They are automatically 695 * included. Locale numbers don't have such hierarchy. They 696 * are bitmap values 697 */ 698 if ((prev_aen.members.class <= curr_aen.members.class) && 699 !((prev_aen.members.locale & curr_aen.members.locale) ^ 700 curr_aen.members.locale)) { 701 /* 702 * Previously issued event registration includes 703 * current request. Nothing to do. 704 */ 705 return 0; 706 } else { 707 curr_aen.members.locale |= prev_aen.members.locale; 708 709 if (prev_aen.members.class < curr_aen.members.class) 710 curr_aen.members.class = prev_aen.members.class; 711 712 sc->aen_cmd->abort_aen = 1; 713 ret_val = mrsas_issue_blocked_abort_cmd(sc, 714 sc->aen_cmd); 715 716 if (ret_val) { 717 printf("mrsas: Failed to abort previous AEN command\n"); 718 return ret_val; 719 } else 720 sc->aen_cmd = NULL; 721 } 722 } 723 cmd = mrsas_get_mfi_cmd(sc); 724 if (!cmd) 725 return ENOMEM; 726 727 dcmd = &cmd->frame->dcmd; 728 729 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); 730 731 /* 732 * Prepare DCMD for aen registration 733 */ 734 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 735 736 dcmd->cmd = MFI_CMD_DCMD; 737 dcmd->cmd_status = 0x0; 738 dcmd->sge_count = 1; 739 dcmd->flags = MFI_FRAME_DIR_READ; 740 dcmd->timeout = 0; 741 dcmd->pad_0 = 0; 742 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail); 743 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 744 dcmd->mbox.w[0] = seq_num; 745 sc->last_seq_num = seq_num; 746 dcmd->mbox.w[1] = curr_aen.word; 747 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr; 748 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail); 749 750 if (sc->aen_cmd != NULL) { 751 mrsas_release_mfi_cmd(cmd); 752 return 0; 753 } 754 /* 755 * Store reference to the cmd used to register for AEN. When an 756 * application wants us to register for AEN, we have to abort this 757 * cmd and re-register with a new EVENT LOCALE supplied by that app 758 */ 759 sc->aen_cmd = cmd; 760 761 /* 762 * Issue the aen registration frame 763 */ 764 if (mrsas_issue_dcmd(sc, cmd)) { 765 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); 766 return (1); 767 } 768 return 0; 769 } 770 771 /* 772 * mrsas_start_aen: Subscribes to AEN during driver load time 773 * @instance: Adapter soft state 774 */ 775 static int 776 mrsas_start_aen(struct mrsas_softc *sc) 777 { 778 struct mrsas_evt_log_info eli; 779 union mrsas_evt_class_locale class_locale; 780 781 782 /* Get the latest sequence number from FW */ 783 784 memset(&eli, 0, sizeof(eli)); 785 786 if (mrsas_get_seq_num(sc, &eli)) 787 return -1; 788 789 /* Register AEN with FW for latest sequence number plus 1 */ 790 class_locale.members.reserved = 0; 791 class_locale.members.locale = MR_EVT_LOCALE_ALL; 792 class_locale.members.class = MR_EVT_CLASS_DEBUG; 793 794 return mrsas_register_aen(sc, eli.newest_seq_num + 1, 795 class_locale.word); 796 797 } 798 799 /* 800 * mrsas_setup_msix: Allocate MSI-x vectors 801 * @sc: adapter soft state 802 */ 803 static int 804 mrsas_setup_msix(struct mrsas_softc *sc) 805 { 806 int i; 807 808 for (i = 0; i < sc->msix_vectors; i++) { 809 sc->irq_context[i].sc = sc; 810 sc->irq_context[i].MSIxIndex = i; 811 sc->irq_id[i] = i + 1; 812 sc->mrsas_irq[i] = bus_alloc_resource_any 813 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i] 814 ,RF_ACTIVE); 815 if (sc->mrsas_irq[i] == NULL) { 816 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n"); 817 goto irq_alloc_failed; 818 } 819 if (bus_setup_intr(sc->mrsas_dev, 820 sc->mrsas_irq[i], 821 INTR_MPSAFE | INTR_TYPE_CAM, 822 NULL, mrsas_isr, &sc->irq_context[i], 823 &sc->intr_handle[i])) { 824 device_printf(sc->mrsas_dev, 825 "Cannot set up MSI-x interrupt handler\n"); 826 goto irq_alloc_failed; 827 } 828 } 829 return SUCCESS; 830 831 irq_alloc_failed: 832 mrsas_teardown_intr(sc); 833 return (FAIL); 834 } 835 836 /* 837 * mrsas_allocate_msix: Setup MSI-x vectors 838 * @sc: adapter soft state 839 */ 840 static int 841 mrsas_allocate_msix(struct mrsas_softc *sc) 842 { 843 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) { 844 device_printf(sc->mrsas_dev, "Using MSI-X with %d number" 845 " of vectors\n", sc->msix_vectors); 846 } else { 847 device_printf(sc->mrsas_dev, "MSI-x setup failed\n"); 848 goto irq_alloc_failed; 849 } 850 return SUCCESS; 851 852 irq_alloc_failed: 853 mrsas_teardown_intr(sc); 854 return (FAIL); 855 } 856 857 /* 858 * mrsas_attach: PCI entry point 859 * input: pointer to device struct 860 * 861 * Performs setup of PCI and registers, initializes mutexes and linked lists, 862 * registers interrupts and CAM, and initializes the adapter/controller to 863 * its proper state. 864 */ 865 static int 866 mrsas_attach(device_t dev) 867 { 868 struct mrsas_softc *sc = device_get_softc(dev); 869 uint32_t cmd, error; 870 871 memset(sc, 0, sizeof(struct mrsas_softc)); 872 873 /* Look up our softc and initialize its fields. */ 874 sc->mrsas_dev = dev; 875 sc->device_id = pci_get_device(dev); 876 877 switch (sc->device_id) { 878 case MRSAS_INVADER: 879 case MRSAS_FURY: 880 case MRSAS_INTRUDER: 881 case MRSAS_INTRUDER_24: 882 case MRSAS_CUTLASS_52: 883 case MRSAS_CUTLASS_53: 884 sc->mrsas_gen3_ctrl = 1; 885 break; 886 case MRSAS_VENTURA: 887 case MRSAS_CRUSADER: 888 case MRSAS_HARPOON: 889 case MRSAS_TOMCAT: 890 case MRSAS_VENTURA_4PORT: 891 case MRSAS_CRUSADER_4PORT: 892 sc->is_ventura = true; 893 break; 894 case MRSAS_AERO_10E1: 895 case MRSAS_AERO_10E5: 896 device_printf(dev, "Adapter is in configurable secure mode\n"); 897 case MRSAS_AERO_10E2: 898 case MRSAS_AERO_10E6: 899 sc->is_aero = true; 900 break; 901 case MRSAS_AERO_10E0: 902 case MRSAS_AERO_10E3: 903 case MRSAS_AERO_10E4: 904 case MRSAS_AERO_10E7: 905 device_printf(dev, "Adapter is in non-secure mode\n"); 906 return SUCCESS; 907 908 } 909 910 mrsas_get_tunables(sc); 911 912 /* 913 * Set up PCI and registers 914 */ 915 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 916 if ((cmd & PCIM_CMD_PORTEN) == 0) { 917 return (ENXIO); 918 } 919 /* Force the busmaster enable bit on. */ 920 cmd |= PCIM_CMD_BUSMASTEREN; 921 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 922 923 /* For Ventura/Aero system registers are mapped to BAR0 */ 924 if (sc->is_ventura || sc->is_aero) 925 sc->reg_res_id = PCIR_BAR(0); /* BAR0 offset */ 926 else 927 sc->reg_res_id = PCIR_BAR(1); /* BAR1 offset */ 928 929 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 930 &(sc->reg_res_id), RF_ACTIVE)) 931 == NULL) { 932 device_printf(dev, "Cannot allocate PCI registers\n"); 933 goto attach_fail; 934 } 935 sc->bus_tag = rman_get_bustag(sc->reg_res); 936 sc->bus_handle = rman_get_bushandle(sc->reg_res); 937 938 /* Intialize mutexes */ 939 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF); 940 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF); 941 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF); 942 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF); 943 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN); 944 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF); 945 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF); 946 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF); 947 mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF); 948 949 /* Intialize linked list */ 950 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); 951 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); 952 953 mrsas_atomic_set(&sc->fw_outstanding, 0); 954 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 955 mrsas_atomic_set(&sc->prp_count, 0); 956 mrsas_atomic_set(&sc->sge_holes, 0); 957 958 sc->io_cmds_highwater = 0; 959 960 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 961 sc->UnevenSpanSupport = 0; 962 963 sc->msix_enable = 0; 964 965 /* Initialize Firmware */ 966 if (mrsas_init_fw(sc) != SUCCESS) { 967 goto attach_fail_fw; 968 } 969 /* Register mrsas to CAM layer */ 970 if ((mrsas_cam_attach(sc) != SUCCESS)) { 971 goto attach_fail_cam; 972 } 973 /* Register IRQs */ 974 if (mrsas_setup_irq(sc) != SUCCESS) { 975 goto attach_fail_irq; 976 } 977 error = mrsas_kproc_create(mrsas_ocr_thread, sc, 978 &sc->ocr_thread, 0, 0, "mrsas_ocr%d", 979 device_get_unit(sc->mrsas_dev)); 980 if (error) { 981 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error); 982 goto attach_fail_ocr_thread; 983 } 984 /* 985 * After FW initialization and OCR thread creation 986 * we will defer the cdev creation, AEN setup on ICH callback 987 */ 988 sc->mrsas_ich.ich_func = mrsas_ich_startup; 989 sc->mrsas_ich.ich_arg = sc; 990 if (config_intrhook_establish(&sc->mrsas_ich) != 0) { 991 device_printf(sc->mrsas_dev, "Config hook is already established\n"); 992 } 993 mrsas_setup_sysctl(sc); 994 return SUCCESS; 995 996 attach_fail_ocr_thread: 997 if (sc->ocr_thread_active) 998 wakeup(&sc->ocr_chan); 999 attach_fail_irq: 1000 mrsas_teardown_intr(sc); 1001 attach_fail_cam: 1002 mrsas_cam_detach(sc); 1003 attach_fail_fw: 1004 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */ 1005 if (sc->msix_enable == 1) 1006 pci_release_msi(sc->mrsas_dev); 1007 mrsas_free_mem(sc); 1008 mtx_destroy(&sc->sim_lock); 1009 mtx_destroy(&sc->aen_lock); 1010 mtx_destroy(&sc->pci_lock); 1011 mtx_destroy(&sc->io_lock); 1012 mtx_destroy(&sc->ioctl_lock); 1013 mtx_destroy(&sc->mpt_cmd_pool_lock); 1014 mtx_destroy(&sc->mfi_cmd_pool_lock); 1015 mtx_destroy(&sc->raidmap_lock); 1016 mtx_destroy(&sc->stream_lock); 1017 attach_fail: 1018 if (sc->reg_res) { 1019 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, 1020 sc->reg_res_id, sc->reg_res); 1021 } 1022 return (ENXIO); 1023 } 1024 1025 /* 1026 * Interrupt config hook 1027 */ 1028 static void 1029 mrsas_ich_startup(void *arg) 1030 { 1031 int i = 0; 1032 struct mrsas_softc *sc = (struct mrsas_softc *)arg; 1033 1034 /* 1035 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs 1036 */ 1037 sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS, 1038 IOCTL_SEMA_DESCRIPTION); 1039 1040 /* Create a /dev entry for mrsas controller. */ 1041 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT, 1042 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", 1043 device_get_unit(sc->mrsas_dev)); 1044 1045 if (device_get_unit(sc->mrsas_dev) == 0) { 1046 make_dev_alias_p(MAKEDEV_CHECKNAME, 1047 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev, 1048 "megaraid_sas_ioctl_node"); 1049 } 1050 if (sc->mrsas_cdev) 1051 sc->mrsas_cdev->si_drv1 = sc; 1052 1053 /* 1054 * Add this controller to mrsas_mgmt_info structure so that it can be 1055 * exported to management applications 1056 */ 1057 if (device_get_unit(sc->mrsas_dev) == 0) 1058 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info)); 1059 1060 mrsas_mgmt_info.count++; 1061 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc; 1062 mrsas_mgmt_info.max_index++; 1063 1064 /* Enable Interrupts */ 1065 mrsas_enable_intr(sc); 1066 1067 /* Call DCMD get_pd_info for all system PDs */ 1068 for (i = 0; i < MRSAS_MAX_PD; i++) { 1069 if ((sc->target_list[i].target_id != 0xffff) && 1070 sc->pd_info_mem) 1071 mrsas_get_pd_info(sc, sc->target_list[i].target_id); 1072 } 1073 1074 /* Initiate AEN (Asynchronous Event Notification) */ 1075 if (mrsas_start_aen(sc)) { 1076 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! " 1077 "Further events from the controller will not be communicated.\n" 1078 "Either there is some problem in the controller" 1079 "or the controller does not support AEN.\n" 1080 "Please contact to the SUPPORT TEAM if the problem persists\n"); 1081 } 1082 if (sc->mrsas_ich.ich_arg != NULL) { 1083 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n"); 1084 config_intrhook_disestablish(&sc->mrsas_ich); 1085 sc->mrsas_ich.ich_arg = NULL; 1086 } 1087 } 1088 1089 /* 1090 * mrsas_detach: De-allocates and teardown resources 1091 * input: pointer to device struct 1092 * 1093 * This function is the entry point for device disconnect and detach. 1094 * It performs memory de-allocations, shutdown of the controller and various 1095 * teardown and destroy resource functions. 1096 */ 1097 static int 1098 mrsas_detach(device_t dev) 1099 { 1100 struct mrsas_softc *sc; 1101 int i = 0; 1102 1103 sc = device_get_softc(dev); 1104 sc->remove_in_progress = 1; 1105 1106 /* Destroy the character device so no other IOCTL will be handled */ 1107 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev) 1108 destroy_dev(sc->mrsas_linux_emulator_cdev); 1109 destroy_dev(sc->mrsas_cdev); 1110 1111 /* 1112 * Take the instance off the instance array. Note that we will not 1113 * decrement the max_index. We let this array be sparse array 1114 */ 1115 for (i = 0; i < mrsas_mgmt_info.max_index; i++) { 1116 if (mrsas_mgmt_info.sc_ptr[i] == sc) { 1117 mrsas_mgmt_info.count--; 1118 mrsas_mgmt_info.sc_ptr[i] = NULL; 1119 break; 1120 } 1121 } 1122 1123 if (sc->ocr_thread_active) 1124 wakeup(&sc->ocr_chan); 1125 while (sc->reset_in_progress) { 1126 i++; 1127 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1128 mrsas_dprint(sc, MRSAS_INFO, 1129 "[%2d]waiting for OCR to be finished from %s\n", i, __func__); 1130 } 1131 pause("mr_shutdown", hz); 1132 } 1133 i = 0; 1134 while (sc->ocr_thread_active) { 1135 i++; 1136 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1137 mrsas_dprint(sc, MRSAS_INFO, 1138 "[%2d]waiting for " 1139 "mrsas_ocr thread to quit ocr %d\n", i, 1140 sc->ocr_thread_active); 1141 } 1142 pause("mr_shutdown", hz); 1143 } 1144 mrsas_flush_cache(sc); 1145 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); 1146 mrsas_disable_intr(sc); 1147 1148 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) { 1149 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 1150 free(sc->streamDetectByLD[i], M_MRSAS); 1151 free(sc->streamDetectByLD, M_MRSAS); 1152 sc->streamDetectByLD = NULL; 1153 } 1154 1155 mrsas_cam_detach(sc); 1156 mrsas_teardown_intr(sc); 1157 mrsas_free_mem(sc); 1158 mtx_destroy(&sc->sim_lock); 1159 mtx_destroy(&sc->aen_lock); 1160 mtx_destroy(&sc->pci_lock); 1161 mtx_destroy(&sc->io_lock); 1162 mtx_destroy(&sc->ioctl_lock); 1163 mtx_destroy(&sc->mpt_cmd_pool_lock); 1164 mtx_destroy(&sc->mfi_cmd_pool_lock); 1165 mtx_destroy(&sc->raidmap_lock); 1166 mtx_destroy(&sc->stream_lock); 1167 1168 /* Wait for all the semaphores to be released */ 1169 while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS) 1170 pause("mr_shutdown", hz); 1171 1172 /* Destroy the counting semaphore created for Ioctl */ 1173 sema_destroy(&sc->ioctl_count_sema); 1174 1175 if (sc->reg_res) { 1176 bus_release_resource(sc->mrsas_dev, 1177 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); 1178 } 1179 if (sc->sysctl_tree != NULL) 1180 sysctl_ctx_free(&sc->sysctl_ctx); 1181 1182 return (0); 1183 } 1184 1185 /* 1186 * mrsas_free_mem: Frees allocated memory 1187 * input: Adapter instance soft state 1188 * 1189 * This function is called from mrsas_detach() to free previously allocated 1190 * memory. 1191 */ 1192 void 1193 mrsas_free_mem(struct mrsas_softc *sc) 1194 { 1195 int i; 1196 u_int32_t max_fw_cmds; 1197 struct mrsas_mfi_cmd *mfi_cmd; 1198 struct mrsas_mpt_cmd *mpt_cmd; 1199 1200 /* 1201 * Free RAID map memory 1202 */ 1203 for (i = 0; i < 2; i++) { 1204 if (sc->raidmap_phys_addr[i]) 1205 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); 1206 if (sc->raidmap_mem[i] != NULL) 1207 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); 1208 if (sc->raidmap_tag[i] != NULL) 1209 bus_dma_tag_destroy(sc->raidmap_tag[i]); 1210 1211 if (sc->ld_drv_map[i] != NULL) 1212 free(sc->ld_drv_map[i], M_MRSAS); 1213 } 1214 for (i = 0; i < 2; i++) { 1215 if (sc->jbodmap_phys_addr[i]) 1216 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]); 1217 if (sc->jbodmap_mem[i] != NULL) 1218 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]); 1219 if (sc->jbodmap_tag[i] != NULL) 1220 bus_dma_tag_destroy(sc->jbodmap_tag[i]); 1221 } 1222 /* 1223 * Free version buffer memory 1224 */ 1225 if (sc->verbuf_phys_addr) 1226 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); 1227 if (sc->verbuf_mem != NULL) 1228 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); 1229 if (sc->verbuf_tag != NULL) 1230 bus_dma_tag_destroy(sc->verbuf_tag); 1231 1232 1233 /* 1234 * Free sense buffer memory 1235 */ 1236 if (sc->sense_phys_addr) 1237 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); 1238 if (sc->sense_mem != NULL) 1239 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); 1240 if (sc->sense_tag != NULL) 1241 bus_dma_tag_destroy(sc->sense_tag); 1242 1243 /* 1244 * Free chain frame memory 1245 */ 1246 if (sc->chain_frame_phys_addr) 1247 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); 1248 if (sc->chain_frame_mem != NULL) 1249 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); 1250 if (sc->chain_frame_tag != NULL) 1251 bus_dma_tag_destroy(sc->chain_frame_tag); 1252 1253 /* 1254 * Free IO Request memory 1255 */ 1256 if (sc->io_request_phys_addr) 1257 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); 1258 if (sc->io_request_mem != NULL) 1259 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); 1260 if (sc->io_request_tag != NULL) 1261 bus_dma_tag_destroy(sc->io_request_tag); 1262 1263 /* 1264 * Free Reply Descriptor memory 1265 */ 1266 if (sc->reply_desc_phys_addr) 1267 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); 1268 if (sc->reply_desc_mem != NULL) 1269 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); 1270 if (sc->reply_desc_tag != NULL) 1271 bus_dma_tag_destroy(sc->reply_desc_tag); 1272 1273 /* 1274 * Free event detail memory 1275 */ 1276 if (sc->evt_detail_phys_addr) 1277 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); 1278 if (sc->evt_detail_mem != NULL) 1279 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); 1280 if (sc->evt_detail_tag != NULL) 1281 bus_dma_tag_destroy(sc->evt_detail_tag); 1282 1283 /* 1284 * Free PD info memory 1285 */ 1286 if (sc->pd_info_phys_addr) 1287 bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap); 1288 if (sc->pd_info_mem != NULL) 1289 bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap); 1290 if (sc->pd_info_tag != NULL) 1291 bus_dma_tag_destroy(sc->pd_info_tag); 1292 1293 /* 1294 * Free MFI frames 1295 */ 1296 if (sc->mfi_cmd_list) { 1297 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1298 mfi_cmd = sc->mfi_cmd_list[i]; 1299 mrsas_free_frame(sc, mfi_cmd); 1300 } 1301 } 1302 if (sc->mficmd_frame_tag != NULL) 1303 bus_dma_tag_destroy(sc->mficmd_frame_tag); 1304 1305 /* 1306 * Free MPT internal command list 1307 */ 1308 max_fw_cmds = sc->max_fw_cmds; 1309 if (sc->mpt_cmd_list) { 1310 for (i = 0; i < max_fw_cmds; i++) { 1311 mpt_cmd = sc->mpt_cmd_list[i]; 1312 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); 1313 free(sc->mpt_cmd_list[i], M_MRSAS); 1314 } 1315 free(sc->mpt_cmd_list, M_MRSAS); 1316 sc->mpt_cmd_list = NULL; 1317 } 1318 /* 1319 * Free MFI internal command list 1320 */ 1321 1322 if (sc->mfi_cmd_list) { 1323 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1324 free(sc->mfi_cmd_list[i], M_MRSAS); 1325 } 1326 free(sc->mfi_cmd_list, M_MRSAS); 1327 sc->mfi_cmd_list = NULL; 1328 } 1329 /* 1330 * Free request descriptor memory 1331 */ 1332 free(sc->req_desc, M_MRSAS); 1333 sc->req_desc = NULL; 1334 1335 /* 1336 * Destroy parent tag 1337 */ 1338 if (sc->mrsas_parent_tag != NULL) 1339 bus_dma_tag_destroy(sc->mrsas_parent_tag); 1340 1341 /* 1342 * Free ctrl_info memory 1343 */ 1344 if (sc->ctrl_info != NULL) 1345 free(sc->ctrl_info, M_MRSAS); 1346 } 1347 1348 /* 1349 * mrsas_teardown_intr: Teardown interrupt 1350 * input: Adapter instance soft state 1351 * 1352 * This function is called from mrsas_detach() to teardown and release bus 1353 * interrupt resourse. 1354 */ 1355 void 1356 mrsas_teardown_intr(struct mrsas_softc *sc) 1357 { 1358 int i; 1359 1360 if (!sc->msix_enable) { 1361 if (sc->intr_handle[0]) 1362 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]); 1363 if (sc->mrsas_irq[0] != NULL) 1364 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1365 sc->irq_id[0], sc->mrsas_irq[0]); 1366 sc->intr_handle[0] = NULL; 1367 } else { 1368 for (i = 0; i < sc->msix_vectors; i++) { 1369 if (sc->intr_handle[i]) 1370 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i], 1371 sc->intr_handle[i]); 1372 1373 if (sc->mrsas_irq[i] != NULL) 1374 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1375 sc->irq_id[i], sc->mrsas_irq[i]); 1376 1377 sc->intr_handle[i] = NULL; 1378 } 1379 pci_release_msi(sc->mrsas_dev); 1380 } 1381 1382 } 1383 1384 /* 1385 * mrsas_suspend: Suspend entry point 1386 * input: Device struct pointer 1387 * 1388 * This function is the entry point for system suspend from the OS. 1389 */ 1390 static int 1391 mrsas_suspend(device_t dev) 1392 { 1393 /* This will be filled when the driver will have hibernation support */ 1394 return (0); 1395 } 1396 1397 /* 1398 * mrsas_resume: Resume entry point 1399 * input: Device struct pointer 1400 * 1401 * This function is the entry point for system resume from the OS. 1402 */ 1403 static int 1404 mrsas_resume(device_t dev) 1405 { 1406 /* This will be filled when the driver will have hibernation support */ 1407 return (0); 1408 } 1409 1410 /** 1411 * mrsas_get_softc_instance: Find softc instance based on cmd type 1412 * 1413 * This function will return softc instance based on cmd type. 1414 * In some case, application fire ioctl on required management instance and 1415 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those 1416 * case, else get the softc instance from host_no provided by application in 1417 * user data. 1418 */ 1419 1420 static struct mrsas_softc * 1421 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg) 1422 { 1423 struct mrsas_softc *sc = NULL; 1424 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; 1425 1426 if (cmd == MRSAS_IOC_GET_PCI_INFO) { 1427 sc = dev->si_drv1; 1428 } else { 1429 /* 1430 * get the Host number & the softc from data sent by the 1431 * Application 1432 */ 1433 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no]; 1434 if (sc == NULL) 1435 printf("There is no Controller number %d\n", 1436 user_ioc->host_no); 1437 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index) 1438 mrsas_dprint(sc, MRSAS_FAULT, 1439 "Invalid Controller number %d\n", user_ioc->host_no); 1440 } 1441 1442 return sc; 1443 } 1444 1445 /* 1446 * mrsas_ioctl: IOCtl commands entry point. 1447 * 1448 * This function is the entry point for IOCtls from the OS. It calls the 1449 * appropriate function for processing depending on the command received. 1450 */ 1451 static int 1452 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, 1453 struct thread *td) 1454 { 1455 struct mrsas_softc *sc; 1456 int ret = 0, i = 0; 1457 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo; 1458 1459 sc = mrsas_get_softc_instance(dev, cmd, arg); 1460 if (!sc) 1461 return ENOENT; 1462 1463 if (sc->remove_in_progress || 1464 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) { 1465 mrsas_dprint(sc, MRSAS_INFO, 1466 "Either driver remove or shutdown called or " 1467 "HW is in unrecoverable critical error state.\n"); 1468 return ENOENT; 1469 } 1470 mtx_lock_spin(&sc->ioctl_lock); 1471 if (!sc->reset_in_progress) { 1472 mtx_unlock_spin(&sc->ioctl_lock); 1473 goto do_ioctl; 1474 } 1475 mtx_unlock_spin(&sc->ioctl_lock); 1476 while (sc->reset_in_progress) { 1477 i++; 1478 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1479 mrsas_dprint(sc, MRSAS_INFO, 1480 "[%2d]waiting for OCR to be finished from %s\n", i, __func__); 1481 } 1482 pause("mr_ioctl", hz); 1483 } 1484 1485 do_ioctl: 1486 switch (cmd) { 1487 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64: 1488 #ifdef COMPAT_FREEBSD32 1489 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32: 1490 #endif 1491 /* 1492 * Decrement the Ioctl counting Semaphore before getting an 1493 * mfi command 1494 */ 1495 sema_wait(&sc->ioctl_count_sema); 1496 1497 ret = mrsas_passthru(sc, (void *)arg, cmd); 1498 1499 /* Increment the Ioctl counting semaphore value */ 1500 sema_post(&sc->ioctl_count_sema); 1501 1502 break; 1503 case MRSAS_IOC_SCAN_BUS: 1504 ret = mrsas_bus_scan(sc); 1505 break; 1506 1507 case MRSAS_IOC_GET_PCI_INFO: 1508 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg; 1509 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION)); 1510 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev); 1511 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev); 1512 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev); 1513 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev); 1514 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d," 1515 "pci device no: %d, pci function no: %d," 1516 "pci domain ID: %d\n", 1517 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber, 1518 pciDrvInfo->functionNumber, pciDrvInfo->domainID); 1519 ret = 0; 1520 break; 1521 1522 default: 1523 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd); 1524 ret = ENOENT; 1525 } 1526 1527 return (ret); 1528 } 1529 1530 /* 1531 * mrsas_poll: poll entry point for mrsas driver fd 1532 * 1533 * This function is the entry point for poll from the OS. It waits for some AEN 1534 * events to be triggered from the controller and notifies back. 1535 */ 1536 static int 1537 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td) 1538 { 1539 struct mrsas_softc *sc; 1540 int revents = 0; 1541 1542 sc = dev->si_drv1; 1543 1544 if (poll_events & (POLLIN | POLLRDNORM)) { 1545 if (sc->mrsas_aen_triggered) { 1546 revents |= poll_events & (POLLIN | POLLRDNORM); 1547 } 1548 } 1549 if (revents == 0) { 1550 if (poll_events & (POLLIN | POLLRDNORM)) { 1551 mtx_lock(&sc->aen_lock); 1552 sc->mrsas_poll_waiting = 1; 1553 selrecord(td, &sc->mrsas_select); 1554 mtx_unlock(&sc->aen_lock); 1555 } 1556 } 1557 return revents; 1558 } 1559 1560 /* 1561 * mrsas_setup_irq: Set up interrupt 1562 * input: Adapter instance soft state 1563 * 1564 * This function sets up interrupts as a bus resource, with flags indicating 1565 * resource permitting contemporaneous sharing and for resource to activate 1566 * atomically. 1567 */ 1568 static int 1569 mrsas_setup_irq(struct mrsas_softc *sc) 1570 { 1571 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS)) 1572 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n"); 1573 1574 else { 1575 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n"); 1576 sc->irq_context[0].sc = sc; 1577 sc->irq_context[0].MSIxIndex = 0; 1578 sc->irq_id[0] = 0; 1579 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev, 1580 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE); 1581 if (sc->mrsas_irq[0] == NULL) { 1582 device_printf(sc->mrsas_dev, "Cannot allocate legcay" 1583 "interrupt\n"); 1584 return (FAIL); 1585 } 1586 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0], 1587 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, 1588 &sc->irq_context[0], &sc->intr_handle[0])) { 1589 device_printf(sc->mrsas_dev, "Cannot set up legacy" 1590 "interrupt\n"); 1591 return (FAIL); 1592 } 1593 } 1594 return (0); 1595 } 1596 1597 /* 1598 * mrsas_isr: ISR entry point 1599 * input: argument pointer 1600 * 1601 * This function is the interrupt service routine entry point. There are two 1602 * types of interrupts, state change interrupt and response interrupt. If an 1603 * interrupt is not ours, we just return. 1604 */ 1605 void 1606 mrsas_isr(void *arg) 1607 { 1608 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg; 1609 struct mrsas_softc *sc = irq_context->sc; 1610 int status = 0; 1611 1612 if (sc->mask_interrupts) 1613 return; 1614 1615 if (!sc->msix_vectors) { 1616 status = mrsas_clear_intr(sc); 1617 if (!status) 1618 return; 1619 } 1620 /* If we are resetting, bail */ 1621 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { 1622 printf(" Entered into ISR when OCR is going active. \n"); 1623 mrsas_clear_intr(sc); 1624 return; 1625 } 1626 /* Process for reply request and clear response interrupt */ 1627 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS) 1628 mrsas_clear_intr(sc); 1629 1630 return; 1631 } 1632 1633 /* 1634 * mrsas_complete_cmd: Process reply request 1635 * input: Adapter instance soft state 1636 * 1637 * This function is called from mrsas_isr() to process reply request and clear 1638 * response interrupt. Processing of the reply request entails walking 1639 * through the reply descriptor array for the command request pended from 1640 * Firmware. We look at the Function field to determine the command type and 1641 * perform the appropriate action. Before we return, we clear the response 1642 * interrupt. 1643 */ 1644 int 1645 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex) 1646 { 1647 Mpi2ReplyDescriptorsUnion_t *desc; 1648 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 1649 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; 1650 struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL; 1651 struct mrsas_mfi_cmd *cmd_mfi; 1652 u_int8_t reply_descript_type, *sense; 1653 u_int16_t smid, num_completed; 1654 u_int8_t status, extStatus; 1655 union desc_value desc_val; 1656 PLD_LOAD_BALANCE_INFO lbinfo; 1657 u_int32_t device_id, data_length; 1658 int threshold_reply_count = 0; 1659 #if TM_DEBUG 1660 MR_TASK_MANAGE_REQUEST *mr_tm_req; 1661 MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; 1662 #endif 1663 1664 /* If we have a hardware error, not need to continue */ 1665 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 1666 return (DONE); 1667 1668 desc = sc->reply_desc_mem; 1669 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)) 1670 + sc->last_reply_idx[MSIxIndex]; 1671 1672 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1673 1674 desc_val.word = desc->Words; 1675 num_completed = 0; 1676 1677 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1678 1679 /* Find our reply descriptor for the command and process */ 1680 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) { 1681 smid = reply_desc->SMID; 1682 cmd_mpt = sc->mpt_cmd_list[smid - 1]; 1683 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request; 1684 1685 status = scsi_io_req->RaidContext.raid_context.status; 1686 extStatus = scsi_io_req->RaidContext.raid_context.exStatus; 1687 sense = cmd_mpt->sense; 1688 data_length = scsi_io_req->DataLength; 1689 1690 switch (scsi_io_req->Function) { 1691 case MPI2_FUNCTION_SCSI_TASK_MGMT: 1692 #if TM_DEBUG 1693 mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request; 1694 mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *) 1695 &mr_tm_req->TmRequest; 1696 device_printf(sc->mrsas_dev, "TM completion type 0x%X, " 1697 "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID); 1698 #endif 1699 wakeup_one((void *)&sc->ocr_chan); 1700 break; 1701 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */ 1702 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; 1703 lbinfo = &sc->load_balance_info[device_id]; 1704 /* R1 load balancing for READ */ 1705 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { 1706 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]); 1707 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; 1708 } 1709 /* Fall thru and complete IO */ 1710 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: 1711 if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { 1712 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status, 1713 extStatus, data_length, sense); 1714 mrsas_cmd_done(sc, cmd_mpt); 1715 mrsas_atomic_dec(&sc->fw_outstanding); 1716 } else { 1717 /* 1718 * If the peer Raid 1/10 fast path failed, 1719 * mark IO as failed to the scsi layer. 1720 * Overwrite the current status by the failed status 1721 * and make sure that if any command fails, 1722 * driver returns fail status to CAM. 1723 */ 1724 cmd_mpt->cmd_completed = 1; 1725 r1_cmd = cmd_mpt->peer_cmd; 1726 if (r1_cmd->cmd_completed) { 1727 if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) { 1728 status = r1_cmd->io_request->RaidContext.raid_context.status; 1729 extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus; 1730 data_length = r1_cmd->io_request->DataLength; 1731 sense = r1_cmd->sense; 1732 } 1733 r1_cmd->ccb_ptr = NULL; 1734 if (r1_cmd->callout_owner) { 1735 callout_stop(&r1_cmd->cm_callout); 1736 r1_cmd->callout_owner = false; 1737 } 1738 mrsas_release_mpt_cmd(r1_cmd); 1739 mrsas_atomic_dec(&sc->fw_outstanding); 1740 mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status, 1741 extStatus, data_length, sense); 1742 mrsas_cmd_done(sc, cmd_mpt); 1743 mrsas_atomic_dec(&sc->fw_outstanding); 1744 } 1745 } 1746 break; 1747 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */ 1748 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 1749 /* 1750 * Make sure NOT TO release the mfi command from the called 1751 * function's context if it is fired with issue_polled call. 1752 * And also make sure that the issue_polled call should only be 1753 * used if INTERRUPT IS DISABLED. 1754 */ 1755 if (cmd_mfi->frame->hdr.flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 1756 mrsas_release_mfi_cmd(cmd_mfi); 1757 else 1758 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); 1759 break; 1760 } 1761 1762 sc->last_reply_idx[MSIxIndex]++; 1763 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth) 1764 sc->last_reply_idx[MSIxIndex] = 0; 1765 1766 desc->Words = ~((uint64_t)0x00); /* set it back to all 1767 * 0xFFFFFFFFs */ 1768 num_completed++; 1769 threshold_reply_count++; 1770 1771 /* Get the next reply descriptor */ 1772 if (!sc->last_reply_idx[MSIxIndex]) { 1773 desc = sc->reply_desc_mem; 1774 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)); 1775 } else 1776 desc++; 1777 1778 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1779 desc_val.word = desc->Words; 1780 1781 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1782 1783 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1784 break; 1785 1786 /* 1787 * Write to reply post index after completing threshold reply 1788 * count and still there are more replies in reply queue 1789 * pending to be completed. 1790 */ 1791 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 1792 if (sc->msix_enable) { 1793 if (sc->msix_combined) 1794 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1795 ((MSIxIndex & 0x7) << 24) | 1796 sc->last_reply_idx[MSIxIndex]); 1797 else 1798 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1799 sc->last_reply_idx[MSIxIndex]); 1800 } else 1801 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1802 reply_post_host_index), sc->last_reply_idx[0]); 1803 1804 threshold_reply_count = 0; 1805 } 1806 } 1807 1808 /* No match, just return */ 1809 if (num_completed == 0) 1810 return (DONE); 1811 1812 /* Clear response interrupt */ 1813 if (sc->msix_enable) { 1814 if (sc->msix_combined) { 1815 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1816 ((MSIxIndex & 0x7) << 24) | 1817 sc->last_reply_idx[MSIxIndex]); 1818 } else 1819 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1820 sc->last_reply_idx[MSIxIndex]); 1821 } else 1822 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1823 reply_post_host_index), sc->last_reply_idx[0]); 1824 1825 return (0); 1826 } 1827 1828 /* 1829 * mrsas_map_mpt_cmd_status: Allocate DMAable memory. 1830 * input: Adapter instance soft state 1831 * 1832 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. 1833 * It checks the command status and maps the appropriate CAM status for the 1834 * CCB. 1835 */ 1836 void 1837 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status, 1838 u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense) 1839 { 1840 struct mrsas_softc *sc = cmd->sc; 1841 u_int8_t *sense_data; 1842 1843 switch (status) { 1844 case MFI_STAT_OK: 1845 ccb_ptr->ccb_h.status = CAM_REQ_CMP; 1846 break; 1847 case MFI_STAT_SCSI_IO_FAILED: 1848 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1849 ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1850 sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data; 1851 if (sense_data) { 1852 /* For now just copy 18 bytes back */ 1853 memcpy(sense_data, sense, 18); 1854 ccb_ptr->csio.sense_len = 18; 1855 ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; 1856 } 1857 break; 1858 case MFI_STAT_LD_OFFLINE: 1859 case MFI_STAT_DEVICE_NOT_FOUND: 1860 if (ccb_ptr->ccb_h.target_lun) 1861 ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; 1862 else 1863 ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; 1864 break; 1865 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1866 ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; 1867 break; 1868 default: 1869 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); 1870 ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; 1871 ccb_ptr->csio.scsi_status = status; 1872 } 1873 return; 1874 } 1875 1876 /* 1877 * mrsas_alloc_mem: Allocate DMAable memory 1878 * input: Adapter instance soft state 1879 * 1880 * This function creates the parent DMA tag and allocates DMAable memory. DMA 1881 * tag describes constraints of DMA mapping. Memory allocated is mapped into 1882 * Kernel virtual address. Callback argument is physical memory address. 1883 */ 1884 static int 1885 mrsas_alloc_mem(struct mrsas_softc *sc) 1886 { 1887 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size, 1888 evt_detail_size, count, pd_info_size; 1889 1890 /* 1891 * Allocate parent DMA tag 1892 */ 1893 if (bus_dma_tag_create(NULL, /* parent */ 1894 1, /* alignment */ 1895 0, /* boundary */ 1896 BUS_SPACE_MAXADDR, /* lowaddr */ 1897 BUS_SPACE_MAXADDR, /* highaddr */ 1898 NULL, NULL, /* filter, filterarg */ 1899 MAXPHYS, /* maxsize */ 1900 sc->max_num_sge, /* nsegments */ 1901 MAXPHYS, /* maxsegsize */ 1902 0, /* flags */ 1903 NULL, NULL, /* lockfunc, lockarg */ 1904 &sc->mrsas_parent_tag /* tag */ 1905 )) { 1906 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); 1907 return (ENOMEM); 1908 } 1909 /* 1910 * Allocate for version buffer 1911 */ 1912 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t)); 1913 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1914 1, 0, 1915 BUS_SPACE_MAXADDR_32BIT, 1916 BUS_SPACE_MAXADDR, 1917 NULL, NULL, 1918 verbuf_size, 1919 1, 1920 verbuf_size, 1921 BUS_DMA_ALLOCNOW, 1922 NULL, NULL, 1923 &sc->verbuf_tag)) { 1924 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); 1925 return (ENOMEM); 1926 } 1927 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, 1928 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { 1929 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); 1930 return (ENOMEM); 1931 } 1932 bzero(sc->verbuf_mem, verbuf_size); 1933 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, 1934 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, 1935 BUS_DMA_NOWAIT)) { 1936 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); 1937 return (ENOMEM); 1938 } 1939 /* 1940 * Allocate IO Request Frames 1941 */ 1942 io_req_size = sc->io_frames_alloc_sz; 1943 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1944 16, 0, 1945 BUS_SPACE_MAXADDR_32BIT, 1946 BUS_SPACE_MAXADDR, 1947 NULL, NULL, 1948 io_req_size, 1949 1, 1950 io_req_size, 1951 BUS_DMA_ALLOCNOW, 1952 NULL, NULL, 1953 &sc->io_request_tag)) { 1954 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); 1955 return (ENOMEM); 1956 } 1957 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, 1958 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { 1959 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); 1960 return (ENOMEM); 1961 } 1962 bzero(sc->io_request_mem, io_req_size); 1963 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, 1964 sc->io_request_mem, io_req_size, mrsas_addr_cb, 1965 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { 1966 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); 1967 return (ENOMEM); 1968 } 1969 /* 1970 * Allocate Chain Frames 1971 */ 1972 chain_frame_size = sc->chain_frames_alloc_sz; 1973 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1974 4, 0, 1975 BUS_SPACE_MAXADDR_32BIT, 1976 BUS_SPACE_MAXADDR, 1977 NULL, NULL, 1978 chain_frame_size, 1979 1, 1980 chain_frame_size, 1981 BUS_DMA_ALLOCNOW, 1982 NULL, NULL, 1983 &sc->chain_frame_tag)) { 1984 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); 1985 return (ENOMEM); 1986 } 1987 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, 1988 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { 1989 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); 1990 return (ENOMEM); 1991 } 1992 bzero(sc->chain_frame_mem, chain_frame_size); 1993 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, 1994 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, 1995 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { 1996 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); 1997 return (ENOMEM); 1998 } 1999 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2000 /* 2001 * Allocate Reply Descriptor Array 2002 */ 2003 reply_desc_size = sc->reply_alloc_sz * count; 2004 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2005 16, 0, 2006 BUS_SPACE_MAXADDR_32BIT, 2007 BUS_SPACE_MAXADDR, 2008 NULL, NULL, 2009 reply_desc_size, 2010 1, 2011 reply_desc_size, 2012 BUS_DMA_ALLOCNOW, 2013 NULL, NULL, 2014 &sc->reply_desc_tag)) { 2015 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); 2016 return (ENOMEM); 2017 } 2018 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, 2019 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { 2020 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); 2021 return (ENOMEM); 2022 } 2023 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, 2024 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, 2025 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { 2026 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); 2027 return (ENOMEM); 2028 } 2029 /* 2030 * Allocate Sense Buffer Array. Keep in lower 4GB 2031 */ 2032 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; 2033 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2034 64, 0, 2035 BUS_SPACE_MAXADDR_32BIT, 2036 BUS_SPACE_MAXADDR, 2037 NULL, NULL, 2038 sense_size, 2039 1, 2040 sense_size, 2041 BUS_DMA_ALLOCNOW, 2042 NULL, NULL, 2043 &sc->sense_tag)) { 2044 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); 2045 return (ENOMEM); 2046 } 2047 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, 2048 BUS_DMA_NOWAIT, &sc->sense_dmamap)) { 2049 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); 2050 return (ENOMEM); 2051 } 2052 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, 2053 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, 2054 BUS_DMA_NOWAIT)) { 2055 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); 2056 return (ENOMEM); 2057 } 2058 2059 /* 2060 * Allocate for Event detail structure 2061 */ 2062 evt_detail_size = sizeof(struct mrsas_evt_detail); 2063 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2064 1, 0, 2065 BUS_SPACE_MAXADDR_32BIT, 2066 BUS_SPACE_MAXADDR, 2067 NULL, NULL, 2068 evt_detail_size, 2069 1, 2070 evt_detail_size, 2071 BUS_DMA_ALLOCNOW, 2072 NULL, NULL, 2073 &sc->evt_detail_tag)) { 2074 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); 2075 return (ENOMEM); 2076 } 2077 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, 2078 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { 2079 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); 2080 return (ENOMEM); 2081 } 2082 bzero(sc->evt_detail_mem, evt_detail_size); 2083 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, 2084 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, 2085 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { 2086 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); 2087 return (ENOMEM); 2088 } 2089 2090 /* 2091 * Allocate for PD INFO structure 2092 */ 2093 pd_info_size = sizeof(struct mrsas_pd_info); 2094 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2095 1, 0, 2096 BUS_SPACE_MAXADDR_32BIT, 2097 BUS_SPACE_MAXADDR, 2098 NULL, NULL, 2099 pd_info_size, 2100 1, 2101 pd_info_size, 2102 BUS_DMA_ALLOCNOW, 2103 NULL, NULL, 2104 &sc->pd_info_tag)) { 2105 device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n"); 2106 return (ENOMEM); 2107 } 2108 if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem, 2109 BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) { 2110 device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n"); 2111 return (ENOMEM); 2112 } 2113 bzero(sc->pd_info_mem, pd_info_size); 2114 if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap, 2115 sc->pd_info_mem, pd_info_size, mrsas_addr_cb, 2116 &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) { 2117 device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n"); 2118 return (ENOMEM); 2119 } 2120 2121 /* 2122 * Create a dma tag for data buffers; size will be the maximum 2123 * possible I/O size (280kB). 2124 */ 2125 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2126 1, 2127 0, 2128 BUS_SPACE_MAXADDR, 2129 BUS_SPACE_MAXADDR, 2130 NULL, NULL, 2131 MAXPHYS, 2132 sc->max_num_sge, /* nsegments */ 2133 MAXPHYS, 2134 BUS_DMA_ALLOCNOW, 2135 busdma_lock_mutex, 2136 &sc->io_lock, 2137 &sc->data_tag)) { 2138 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); 2139 return (ENOMEM); 2140 } 2141 return (0); 2142 } 2143 2144 /* 2145 * mrsas_addr_cb: Callback function of bus_dmamap_load() 2146 * input: callback argument, machine dependent type 2147 * that describes DMA segments, number of segments, error code 2148 * 2149 * This function is for the driver to receive mapping information resultant of 2150 * the bus_dmamap_load(). The information is actually not being used, but the 2151 * address is saved anyway. 2152 */ 2153 void 2154 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2155 { 2156 bus_addr_t *addr; 2157 2158 addr = arg; 2159 *addr = segs[0].ds_addr; 2160 } 2161 2162 /* 2163 * mrsas_setup_raidmap: Set up RAID map. 2164 * input: Adapter instance soft state 2165 * 2166 * Allocate DMA memory for the RAID maps and perform setup. 2167 */ 2168 static int 2169 mrsas_setup_raidmap(struct mrsas_softc *sc) 2170 { 2171 int i; 2172 2173 for (i = 0; i < 2; i++) { 2174 sc->ld_drv_map[i] = 2175 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT); 2176 /* Do Error handling */ 2177 if (!sc->ld_drv_map[i]) { 2178 device_printf(sc->mrsas_dev, "Could not allocate memory for local map"); 2179 2180 if (i == 1) 2181 free(sc->ld_drv_map[0], M_MRSAS); 2182 /* ABORT driver initialization */ 2183 goto ABORT; 2184 } 2185 } 2186 2187 for (int i = 0; i < 2; i++) { 2188 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2189 4, 0, 2190 BUS_SPACE_MAXADDR_32BIT, 2191 BUS_SPACE_MAXADDR, 2192 NULL, NULL, 2193 sc->max_map_sz, 2194 1, 2195 sc->max_map_sz, 2196 BUS_DMA_ALLOCNOW, 2197 NULL, NULL, 2198 &sc->raidmap_tag[i])) { 2199 device_printf(sc->mrsas_dev, 2200 "Cannot allocate raid map tag.\n"); 2201 return (ENOMEM); 2202 } 2203 if (bus_dmamem_alloc(sc->raidmap_tag[i], 2204 (void **)&sc->raidmap_mem[i], 2205 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { 2206 device_printf(sc->mrsas_dev, 2207 "Cannot allocate raidmap memory.\n"); 2208 return (ENOMEM); 2209 } 2210 bzero(sc->raidmap_mem[i], sc->max_map_sz); 2211 2212 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], 2213 sc->raidmap_mem[i], sc->max_map_sz, 2214 mrsas_addr_cb, &sc->raidmap_phys_addr[i], 2215 BUS_DMA_NOWAIT)) { 2216 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); 2217 return (ENOMEM); 2218 } 2219 if (!sc->raidmap_mem[i]) { 2220 device_printf(sc->mrsas_dev, 2221 "Cannot allocate memory for raid map.\n"); 2222 return (ENOMEM); 2223 } 2224 } 2225 2226 if (!mrsas_get_map_info(sc)) 2227 mrsas_sync_map_info(sc); 2228 2229 return (0); 2230 2231 ABORT: 2232 return (1); 2233 } 2234 2235 /** 2236 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 2237 * @sc: Adapter soft state 2238 * 2239 * Return 0 on success. 2240 */ 2241 void 2242 megasas_setup_jbod_map(struct mrsas_softc *sc) 2243 { 2244 int i; 2245 uint32_t pd_seq_map_sz; 2246 2247 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 2248 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 2249 2250 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) { 2251 sc->use_seqnum_jbod_fp = 0; 2252 return; 2253 } 2254 if (sc->jbodmap_mem[0]) 2255 goto skip_alloc; 2256 2257 for (i = 0; i < 2; i++) { 2258 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2259 4, 0, 2260 BUS_SPACE_MAXADDR_32BIT, 2261 BUS_SPACE_MAXADDR, 2262 NULL, NULL, 2263 pd_seq_map_sz, 2264 1, 2265 pd_seq_map_sz, 2266 BUS_DMA_ALLOCNOW, 2267 NULL, NULL, 2268 &sc->jbodmap_tag[i])) { 2269 device_printf(sc->mrsas_dev, 2270 "Cannot allocate jbod map tag.\n"); 2271 return; 2272 } 2273 if (bus_dmamem_alloc(sc->jbodmap_tag[i], 2274 (void **)&sc->jbodmap_mem[i], 2275 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) { 2276 device_printf(sc->mrsas_dev, 2277 "Cannot allocate jbod map memory.\n"); 2278 return; 2279 } 2280 bzero(sc->jbodmap_mem[i], pd_seq_map_sz); 2281 2282 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i], 2283 sc->jbodmap_mem[i], pd_seq_map_sz, 2284 mrsas_addr_cb, &sc->jbodmap_phys_addr[i], 2285 BUS_DMA_NOWAIT)) { 2286 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n"); 2287 return; 2288 } 2289 if (!sc->jbodmap_mem[i]) { 2290 device_printf(sc->mrsas_dev, 2291 "Cannot allocate memory for jbod map.\n"); 2292 sc->use_seqnum_jbod_fp = 0; 2293 return; 2294 } 2295 } 2296 2297 skip_alloc: 2298 if (!megasas_sync_pd_seq_num(sc, false) && 2299 !megasas_sync_pd_seq_num(sc, true)) 2300 sc->use_seqnum_jbod_fp = 1; 2301 else 2302 sc->use_seqnum_jbod_fp = 0; 2303 2304 device_printf(sc->mrsas_dev, "Jbod map is supported\n"); 2305 } 2306 2307 /* 2308 * mrsas_init_fw: Initialize Firmware 2309 * input: Adapter soft state 2310 * 2311 * Calls transition_to_ready() to make sure Firmware is in operational state and 2312 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It 2313 * issues internal commands to get the controller info after the IOC_INIT 2314 * command response is received by Firmware. Note: code relating to 2315 * get_pdlist, get_ld_list and max_sectors are currently not being used, it 2316 * is left here as placeholder. 2317 */ 2318 static int 2319 mrsas_init_fw(struct mrsas_softc *sc) 2320 { 2321 2322 int ret, loop, ocr = 0; 2323 u_int32_t max_sectors_1; 2324 u_int32_t max_sectors_2; 2325 u_int32_t tmp_sectors; 2326 u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4; 2327 int msix_enable = 0; 2328 int fw_msix_count = 0; 2329 int i, j; 2330 2331 /* Make sure Firmware is ready */ 2332 ret = mrsas_transition_to_ready(sc, ocr); 2333 if (ret != SUCCESS) { 2334 return (ret); 2335 } 2336 if (sc->is_ventura || sc->is_aero) { 2337 scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3)); 2338 #if VD_EXT_DEBUG 2339 device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3); 2340 #endif 2341 sc->maxRaidMapSize = ((scratch_pad_3 >> 2342 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 2343 MR_MAX_RAID_MAP_SIZE_MASK); 2344 } 2345 /* MSI-x index 0- reply post host index register */ 2346 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET; 2347 /* Check if MSI-X is supported while in ready state */ 2348 msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a; 2349 2350 if (msix_enable) { 2351 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2352 outbound_scratch_pad_2)); 2353 2354 /* Check max MSI-X vectors */ 2355 if (sc->device_id == MRSAS_TBOLT) { 2356 sc->msix_vectors = (scratch_pad_2 2357 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 2358 fw_msix_count = sc->msix_vectors; 2359 } else { 2360 /* Invader/Fury supports 96 MSI-X vectors */ 2361 sc->msix_vectors = ((scratch_pad_2 2362 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 2363 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 2364 fw_msix_count = sc->msix_vectors; 2365 2366 if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) || 2367 ((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16))) 2368 sc->msix_combined = true; 2369 /* 2370 * Save 1-15 reply post index 2371 * address to local memory Index 0 2372 * is already saved from reg offset 2373 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 2374 */ 2375 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; 2376 loop++) { 2377 sc->msix_reg_offset[loop] = 2378 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + 2379 (loop * 0x10); 2380 } 2381 } 2382 2383 /* Don't bother allocating more MSI-X vectors than cpus */ 2384 sc->msix_vectors = min(sc->msix_vectors, 2385 mp_ncpus); 2386 2387 /* Allocate MSI-x vectors */ 2388 if (mrsas_allocate_msix(sc) == SUCCESS) 2389 sc->msix_enable = 1; 2390 else 2391 sc->msix_enable = 0; 2392 2393 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector," 2394 "Online CPU %d Current MSIX <%d>\n", 2395 fw_msix_count, mp_ncpus, sc->msix_vectors); 2396 } 2397 /* 2398 * MSI-X host index 0 is common for all adapter. 2399 * It is used for all MPT based Adapters. 2400 */ 2401 if (sc->msix_combined) { 2402 sc->msix_reg_offset[0] = 2403 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET; 2404 } 2405 if (mrsas_init_adapter(sc) != SUCCESS) { 2406 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); 2407 return (1); 2408 } 2409 2410 if (sc->is_ventura || sc->is_aero) { 2411 scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2412 outbound_scratch_pad_4)); 2413 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT) 2414 sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK); 2415 2416 device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size); 2417 } 2418 2419 /* Allocate internal commands for pass-thru */ 2420 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) { 2421 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); 2422 return (1); 2423 } 2424 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); 2425 if (!sc->ctrl_info) { 2426 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n"); 2427 return (1); 2428 } 2429 /* 2430 * Get the controller info from FW, so that the MAX VD support 2431 * availability can be decided. 2432 */ 2433 if (mrsas_get_ctrl_info(sc)) { 2434 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n"); 2435 return (1); 2436 } 2437 sc->secure_jbod_support = 2438 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD; 2439 2440 if (sc->secure_jbod_support) 2441 device_printf(sc->mrsas_dev, "FW supports SED \n"); 2442 2443 if (sc->use_seqnum_jbod_fp) 2444 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n"); 2445 2446 if (sc->support_morethan256jbod) 2447 device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n"); 2448 2449 if (mrsas_setup_raidmap(sc) != SUCCESS) { 2450 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! " 2451 "There seems to be some problem in the controller\n" 2452 "Please contact to the SUPPORT TEAM if the problem persists\n"); 2453 } 2454 megasas_setup_jbod_map(sc); 2455 2456 2457 memset(sc->target_list, 0, 2458 MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target)); 2459 for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++) 2460 sc->target_list[i].target_id = 0xffff; 2461 2462 /* For pass-thru, get PD/LD list and controller info */ 2463 memset(sc->pd_list, 0, 2464 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 2465 if (mrsas_get_pd_list(sc) != SUCCESS) { 2466 device_printf(sc->mrsas_dev, "Get PD list failed.\n"); 2467 return (1); 2468 } 2469 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); 2470 if (mrsas_get_ld_list(sc) != SUCCESS) { 2471 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n"); 2472 return (1); 2473 } 2474 2475 if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) { 2476 sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) * 2477 MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT); 2478 if (!sc->streamDetectByLD) { 2479 device_printf(sc->mrsas_dev, 2480 "unable to allocate stream detection for pool of LDs\n"); 2481 return (1); 2482 } 2483 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 2484 sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT); 2485 if (!sc->streamDetectByLD[i]) { 2486 device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n"); 2487 for (j = 0; j < i; ++j) 2488 free(sc->streamDetectByLD[j], M_MRSAS); 2489 free(sc->streamDetectByLD, M_MRSAS); 2490 sc->streamDetectByLD = NULL; 2491 return (1); 2492 } 2493 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT)); 2494 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP; 2495 } 2496 } 2497 2498 /* 2499 * Compute the max allowed sectors per IO: The controller info has 2500 * two limits on max sectors. Driver should use the minimum of these 2501 * two. 2502 * 2503 * 1 << stripe_sz_ops.min = max sectors per strip 2504 * 2505 * Note that older firmwares ( < FW ver 30) didn't report information to 2506 * calculate max_sectors_1. So the number ended up as zero always. 2507 */ 2508 tmp_sectors = 0; 2509 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) * 2510 sc->ctrl_info->max_strips_per_io; 2511 max_sectors_2 = sc->ctrl_info->max_request_size; 2512 tmp_sectors = min(max_sectors_1, max_sectors_2); 2513 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512; 2514 2515 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) 2516 sc->max_sectors_per_req = tmp_sectors; 2517 2518 sc->disableOnlineCtrlReset = 2519 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 2520 sc->UnevenSpanSupport = 2521 sc->ctrl_info->adapterOperations2.supportUnevenSpans; 2522 if (sc->UnevenSpanSupport) { 2523 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n", 2524 sc->UnevenSpanSupport); 2525 2526 if (MR_ValidateMapInfo(sc)) 2527 sc->fast_path_io = 1; 2528 else 2529 sc->fast_path_io = 0; 2530 } 2531 2532 device_printf(sc->mrsas_dev, "max_fw_cmds: %u max_scsi_cmds: %u\n", 2533 sc->max_fw_cmds, sc->max_scsi_cmds); 2534 return (0); 2535 } 2536 2537 /* 2538 * mrsas_init_adapter: Initializes the adapter/controller 2539 * input: Adapter soft state 2540 * 2541 * Prepares for the issuing of the IOC Init cmd to FW for initializing the 2542 * ROC/controller. The FW register is read to determined the number of 2543 * commands that is supported. All memory allocations for IO is based on 2544 * max_cmd. Appropriate calculations are performed in this function. 2545 */ 2546 int 2547 mrsas_init_adapter(struct mrsas_softc *sc) 2548 { 2549 uint32_t status; 2550 u_int32_t scratch_pad_2; 2551 int ret; 2552 int i = 0; 2553 2554 /* Read FW status register */ 2555 status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2556 2557 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; 2558 2559 /* Decrement the max supported by 1, to correlate with FW */ 2560 sc->max_fw_cmds = sc->max_fw_cmds - 1; 2561 sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS; 2562 2563 /* Determine allocation size of command frames */ 2564 sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2; 2565 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds; 2566 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); 2567 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + 2568 (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1)); 2569 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2570 outbound_scratch_pad_2)); 2571 /* 2572 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, 2573 * Firmware support extended IO chain frame which is 4 time more 2574 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) = 2575 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K 2576 */ 2577 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) 2578 sc->max_chain_frame_sz = 2579 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) 2580 * MEGASAS_1MB_IO; 2581 else 2582 sc->max_chain_frame_sz = 2583 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) 2584 * MEGASAS_256K_IO; 2585 2586 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds; 2587 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2588 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16; 2589 2590 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION); 2591 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; 2592 2593 mrsas_dprint(sc, MRSAS_INFO, 2594 "max sge: 0x%x, max chain frame size: 0x%x, " 2595 "max fw cmd: 0x%x\n", sc->max_num_sge, 2596 sc->max_chain_frame_sz, sc->max_fw_cmds); 2597 2598 /* Used for pass thru MFI frame (DCMD) */ 2599 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16; 2600 2601 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2602 sizeof(MPI2_SGE_IO_UNION)) / 16; 2603 2604 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2605 2606 for (i = 0; i < count; i++) 2607 sc->last_reply_idx[i] = 0; 2608 2609 ret = mrsas_alloc_mem(sc); 2610 if (ret != SUCCESS) 2611 return (ret); 2612 2613 ret = mrsas_alloc_mpt_cmds(sc); 2614 if (ret != SUCCESS) 2615 return (ret); 2616 2617 ret = mrsas_ioc_init(sc); 2618 if (ret != SUCCESS) 2619 return (ret); 2620 2621 return (0); 2622 } 2623 2624 /* 2625 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command 2626 * input: Adapter soft state 2627 * 2628 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. 2629 */ 2630 int 2631 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) 2632 { 2633 int ioc_init_size; 2634 2635 /* Allocate IOC INIT command */ 2636 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); 2637 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2638 1, 0, 2639 BUS_SPACE_MAXADDR_32BIT, 2640 BUS_SPACE_MAXADDR, 2641 NULL, NULL, 2642 ioc_init_size, 2643 1, 2644 ioc_init_size, 2645 BUS_DMA_ALLOCNOW, 2646 NULL, NULL, 2647 &sc->ioc_init_tag)) { 2648 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); 2649 return (ENOMEM); 2650 } 2651 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, 2652 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { 2653 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); 2654 return (ENOMEM); 2655 } 2656 bzero(sc->ioc_init_mem, ioc_init_size); 2657 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, 2658 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, 2659 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { 2660 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); 2661 return (ENOMEM); 2662 } 2663 return (0); 2664 } 2665 2666 /* 2667 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command 2668 * input: Adapter soft state 2669 * 2670 * Deallocates memory of the IOC Init cmd. 2671 */ 2672 void 2673 mrsas_free_ioc_cmd(struct mrsas_softc *sc) 2674 { 2675 if (sc->ioc_init_phys_mem) 2676 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); 2677 if (sc->ioc_init_mem != NULL) 2678 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); 2679 if (sc->ioc_init_tag != NULL) 2680 bus_dma_tag_destroy(sc->ioc_init_tag); 2681 } 2682 2683 /* 2684 * mrsas_ioc_init: Sends IOC Init command to FW 2685 * input: Adapter soft state 2686 * 2687 * Issues the IOC Init cmd to FW to initialize the ROC/controller. 2688 */ 2689 int 2690 mrsas_ioc_init(struct mrsas_softc *sc) 2691 { 2692 struct mrsas_init_frame *init_frame; 2693 pMpi2IOCInitRequest_t IOCInitMsg; 2694 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; 2695 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 2696 bus_addr_t phys_addr; 2697 int i, retcode = 0; 2698 u_int32_t scratch_pad_2; 2699 2700 /* Allocate memory for the IOC INIT command */ 2701 if (mrsas_alloc_ioc_cmd(sc)) { 2702 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); 2703 return (1); 2704 } 2705 2706 if (!sc->block_sync_cache) { 2707 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2708 outbound_scratch_pad_2)); 2709 sc->fw_sync_cache_support = (scratch_pad_2 & 2710 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; 2711 } 2712 2713 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024); 2714 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; 2715 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 2716 IOCInitMsg->MsgVersion = MPI2_VERSION; 2717 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION; 2718 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; 2719 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth; 2720 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr; 2721 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr; 2722 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0); 2723 IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; 2724 2725 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; 2726 init_frame->cmd = MFI_CMD_INIT; 2727 init_frame->cmd_status = 0xFF; 2728 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2729 2730 /* driver support Extended MSIX */ 2731 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 2732 init_frame->driver_operations. 2733 mfi_capabilities.support_additional_msix = 1; 2734 } 2735 if (sc->verbuf_mem) { 2736 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n", 2737 MRSAS_VERSION); 2738 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; 2739 init_frame->driver_ver_hi = 0; 2740 } 2741 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1; 2742 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1; 2743 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1; 2744 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) 2745 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1; 2746 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; 2747 init_frame->queue_info_new_phys_addr_lo = phys_addr; 2748 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t); 2749 2750 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem; 2751 req_desc.MFAIo.RequestFlags = 2752 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2753 2754 mrsas_disable_intr(sc); 2755 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); 2756 mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high); 2757 2758 /* 2759 * Poll response timer to wait for Firmware response. While this 2760 * timer with the DELAY call could block CPU, the time interval for 2761 * this is only 1 millisecond. 2762 */ 2763 if (init_frame->cmd_status == 0xFF) { 2764 for (i = 0; i < (max_wait * 1000); i++) { 2765 if (init_frame->cmd_status == 0xFF) 2766 DELAY(1000); 2767 else 2768 break; 2769 } 2770 } 2771 if (init_frame->cmd_status == 0) 2772 mrsas_dprint(sc, MRSAS_OCR, 2773 "IOC INIT response received from FW.\n"); 2774 else { 2775 if (init_frame->cmd_status == 0xFF) 2776 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); 2777 else 2778 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); 2779 retcode = 1; 2780 } 2781 2782 if (sc->is_aero) { 2783 scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 2784 outbound_scratch_pad_2)); 2785 sc->atomic_desc_support = (scratch_pad_2 & 2786 MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0; 2787 device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n", 2788 sc->atomic_desc_support ? "Yes" : "No"); 2789 } 2790 2791 mrsas_free_ioc_cmd(sc); 2792 return (retcode); 2793 } 2794 2795 /* 2796 * mrsas_alloc_mpt_cmds: Allocates the command packets 2797 * input: Adapter instance soft state 2798 * 2799 * This function allocates the internal commands for IOs. Each command that is 2800 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An 2801 * array is allocated with mrsas_mpt_cmd context. The free commands are 2802 * maintained in a linked list (cmd pool). SMID value range is from 1 to 2803 * max_fw_cmds. 2804 */ 2805 int 2806 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) 2807 { 2808 int i, j; 2809 u_int32_t max_fw_cmds, count; 2810 struct mrsas_mpt_cmd *cmd; 2811 pMpi2ReplyDescriptorsUnion_t reply_desc; 2812 u_int32_t offset, chain_offset, sense_offset; 2813 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; 2814 u_int8_t *io_req_base, *chain_frame_base, *sense_base; 2815 2816 max_fw_cmds = sc->max_fw_cmds; 2817 2818 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); 2819 if (!sc->req_desc) { 2820 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); 2821 return (ENOMEM); 2822 } 2823 memset(sc->req_desc, 0, sc->request_alloc_sz); 2824 2825 /* 2826 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. 2827 * Allocate the dynamic array first and then allocate individual 2828 * commands. 2829 */ 2830 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds, 2831 M_MRSAS, M_NOWAIT); 2832 if (!sc->mpt_cmd_list) { 2833 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); 2834 return (ENOMEM); 2835 } 2836 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds); 2837 for (i = 0; i < max_fw_cmds; i++) { 2838 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd), 2839 M_MRSAS, M_NOWAIT); 2840 if (!sc->mpt_cmd_list[i]) { 2841 for (j = 0; j < i; j++) 2842 free(sc->mpt_cmd_list[j], M_MRSAS); 2843 free(sc->mpt_cmd_list, M_MRSAS); 2844 sc->mpt_cmd_list = NULL; 2845 return (ENOMEM); 2846 } 2847 } 2848 2849 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2850 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2851 chain_frame_base = (u_int8_t *)sc->chain_frame_mem; 2852 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; 2853 sense_base = (u_int8_t *)sc->sense_mem; 2854 sense_base_phys = (bus_addr_t)sc->sense_phys_addr; 2855 for (i = 0; i < max_fw_cmds; i++) { 2856 cmd = sc->mpt_cmd_list[i]; 2857 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 2858 chain_offset = sc->max_chain_frame_sz * i; 2859 sense_offset = MRSAS_SENSE_LEN * i; 2860 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); 2861 cmd->index = i + 1; 2862 cmd->ccb_ptr = NULL; 2863 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 2864 callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0); 2865 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 2866 cmd->sc = sc; 2867 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); 2868 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); 2869 cmd->io_request_phys_addr = io_req_base_phys + offset; 2870 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); 2871 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; 2872 cmd->sense = sense_base + sense_offset; 2873 cmd->sense_phys_addr = sense_base_phys + sense_offset; 2874 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { 2875 return (FAIL); 2876 } 2877 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 2878 } 2879 2880 /* Initialize reply descriptor array to 0xFFFFFFFF */ 2881 reply_desc = sc->reply_desc_mem; 2882 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2883 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) { 2884 reply_desc->Words = MRSAS_ULONG_MAX; 2885 } 2886 return (0); 2887 } 2888 2889 /* 2890 * mrsas_write_64bit_req_dsc: Writes 64 bit request descriptor to FW 2891 * input: Adapter softstate 2892 * request descriptor address low 2893 * request descriptor address high 2894 */ 2895 void 2896 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2897 u_int32_t req_desc_hi) 2898 { 2899 mtx_lock(&sc->pci_lock); 2900 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), 2901 req_desc_lo); 2902 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), 2903 req_desc_hi); 2904 mtx_unlock(&sc->pci_lock); 2905 } 2906 2907 /* 2908 * mrsas_fire_cmd: Sends command to FW 2909 * input: Adapter softstate 2910 * request descriptor address low 2911 * request descriptor address high 2912 * 2913 * This functions fires the command to Firmware by writing to the 2914 * inbound_low_queue_port and inbound_high_queue_port. 2915 */ 2916 void 2917 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2918 u_int32_t req_desc_hi) 2919 { 2920 if (sc->atomic_desc_support) 2921 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port), 2922 req_desc_lo); 2923 else 2924 mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi); 2925 } 2926 2927 /* 2928 * mrsas_transition_to_ready: Move FW to Ready state input: 2929 * Adapter instance soft state 2930 * 2931 * During the initialization, FW passes can potentially be in any one of several 2932 * possible states. If the FW in operational, waiting-for-handshake states, 2933 * driver must take steps to bring it to ready state. Otherwise, it has to 2934 * wait for the ready state. 2935 */ 2936 int 2937 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) 2938 { 2939 int i; 2940 u_int8_t max_wait; 2941 u_int32_t val, fw_state; 2942 u_int32_t cur_state; 2943 u_int32_t abs_state, curr_abs_state; 2944 2945 val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2946 fw_state = val & MFI_STATE_MASK; 2947 max_wait = MRSAS_RESET_WAIT_TIME; 2948 2949 if (fw_state != MFI_STATE_READY) 2950 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); 2951 2952 while (fw_state != MFI_STATE_READY) { 2953 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2954 switch (fw_state) { 2955 case MFI_STATE_FAULT: 2956 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); 2957 if (ocr) { 2958 cur_state = MFI_STATE_FAULT; 2959 break; 2960 } else 2961 return -ENODEV; 2962 case MFI_STATE_WAIT_HANDSHAKE: 2963 /* Set the CLR bit in inbound doorbell */ 2964 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2965 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG); 2966 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2967 break; 2968 case MFI_STATE_BOOT_MESSAGE_PENDING: 2969 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2970 MFI_INIT_HOTPLUG); 2971 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2972 break; 2973 case MFI_STATE_OPERATIONAL: 2974 /* 2975 * Bring it to READY state; assuming max wait 10 2976 * secs 2977 */ 2978 mrsas_disable_intr(sc); 2979 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); 2980 for (i = 0; i < max_wait * 1000; i++) { 2981 if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1) 2982 DELAY(1000); 2983 else 2984 break; 2985 } 2986 cur_state = MFI_STATE_OPERATIONAL; 2987 break; 2988 case MFI_STATE_UNDEFINED: 2989 /* 2990 * This state should not last for more than 2 2991 * seconds 2992 */ 2993 cur_state = MFI_STATE_UNDEFINED; 2994 break; 2995 case MFI_STATE_BB_INIT: 2996 cur_state = MFI_STATE_BB_INIT; 2997 break; 2998 case MFI_STATE_FW_INIT: 2999 cur_state = MFI_STATE_FW_INIT; 3000 break; 3001 case MFI_STATE_FW_INIT_2: 3002 cur_state = MFI_STATE_FW_INIT_2; 3003 break; 3004 case MFI_STATE_DEVICE_SCAN: 3005 cur_state = MFI_STATE_DEVICE_SCAN; 3006 break; 3007 case MFI_STATE_FLUSH_CACHE: 3008 cur_state = MFI_STATE_FLUSH_CACHE; 3009 break; 3010 default: 3011 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); 3012 return -ENODEV; 3013 } 3014 3015 /* 3016 * The cur_state should not last for more than max_wait secs 3017 */ 3018 for (i = 0; i < (max_wait * 1000); i++) { 3019 fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3020 outbound_scratch_pad)) & MFI_STATE_MASK); 3021 curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3022 outbound_scratch_pad)); 3023 if (abs_state == curr_abs_state) 3024 DELAY(1000); 3025 else 3026 break; 3027 } 3028 3029 /* 3030 * Return error if fw_state hasn't changed after max_wait 3031 */ 3032 if (curr_abs_state == abs_state) { 3033 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " 3034 "in %d secs\n", fw_state, max_wait); 3035 return -ENODEV; 3036 } 3037 } 3038 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); 3039 return 0; 3040 } 3041 3042 /* 3043 * mrsas_get_mfi_cmd: Get a cmd from free command pool 3044 * input: Adapter soft state 3045 * 3046 * This function removes an MFI command from the command list. 3047 */ 3048 struct mrsas_mfi_cmd * 3049 mrsas_get_mfi_cmd(struct mrsas_softc *sc) 3050 { 3051 struct mrsas_mfi_cmd *cmd = NULL; 3052 3053 mtx_lock(&sc->mfi_cmd_pool_lock); 3054 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) { 3055 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); 3056 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); 3057 } 3058 mtx_unlock(&sc->mfi_cmd_pool_lock); 3059 3060 return cmd; 3061 } 3062 3063 /* 3064 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter. 3065 * input: Adapter Context. 3066 * 3067 * This function will check FW status register and flag do_timeout_reset flag. 3068 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has 3069 * trigger reset. 3070 */ 3071 static void 3072 mrsas_ocr_thread(void *arg) 3073 { 3074 struct mrsas_softc *sc; 3075 u_int32_t fw_status, fw_state; 3076 u_int8_t tm_target_reset_failed = 0; 3077 3078 sc = (struct mrsas_softc *)arg; 3079 3080 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); 3081 3082 sc->ocr_thread_active = 1; 3083 mtx_lock(&sc->sim_lock); 3084 for (;;) { 3085 /* Sleep for 1 second and check the queue status */ 3086 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 3087 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); 3088 if (sc->remove_in_progress || 3089 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 3090 mrsas_dprint(sc, MRSAS_OCR, 3091 "Exit due to %s from %s\n", 3092 sc->remove_in_progress ? "Shutdown" : 3093 "Hardware critical error", __func__); 3094 break; 3095 } 3096 fw_status = mrsas_read_reg_with_retries(sc, 3097 offsetof(mrsas_reg_set, outbound_scratch_pad)); 3098 fw_state = fw_status & MFI_STATE_MASK; 3099 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset || 3100 mrsas_atomic_read(&sc->target_reset_outstanding)) { 3101 3102 /* First, freeze further IOs to come to the SIM */ 3103 mrsas_xpt_freeze(sc); 3104 3105 /* If this is an IO timeout then go for target reset */ 3106 if (mrsas_atomic_read(&sc->target_reset_outstanding)) { 3107 device_printf(sc->mrsas_dev, "Initiating Target RESET " 3108 "because of SCSI IO timeout!\n"); 3109 3110 /* Let the remaining IOs to complete */ 3111 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 3112 "mrsas_reset_targets", 5 * hz); 3113 3114 /* Try to reset the target device */ 3115 if (mrsas_reset_targets(sc) == FAIL) 3116 tm_target_reset_failed = 1; 3117 } 3118 3119 /* If this is a DCMD timeout or FW fault, 3120 * then go for controller reset 3121 */ 3122 if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed || 3123 (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) { 3124 if (tm_target_reset_failed) 3125 device_printf(sc->mrsas_dev, "Initiaiting OCR because of " 3126 "TM FAILURE!\n"); 3127 else 3128 device_printf(sc->mrsas_dev, "Initiaiting OCR " 3129 "because of %s!\n", sc->do_timedout_reset ? 3130 "DCMD IO Timeout" : "FW fault"); 3131 3132 mtx_lock_spin(&sc->ioctl_lock); 3133 sc->reset_in_progress = 1; 3134 mtx_unlock_spin(&sc->ioctl_lock); 3135 sc->reset_count++; 3136 3137 /* 3138 * Wait for the AEN task to be completed if it is running. 3139 */ 3140 mtx_unlock(&sc->sim_lock); 3141 taskqueue_drain(sc->ev_tq, &sc->ev_task); 3142 mtx_lock(&sc->sim_lock); 3143 3144 taskqueue_block(sc->ev_tq); 3145 /* Try to reset the controller */ 3146 mrsas_reset_ctrl(sc, sc->do_timedout_reset); 3147 3148 sc->do_timedout_reset = 0; 3149 sc->reset_in_progress = 0; 3150 tm_target_reset_failed = 0; 3151 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 3152 memset(sc->target_reset_pool, 0, 3153 sizeof(sc->target_reset_pool)); 3154 taskqueue_unblock(sc->ev_tq); 3155 } 3156 3157 /* Now allow IOs to come to the SIM */ 3158 mrsas_xpt_release(sc); 3159 } 3160 } 3161 mtx_unlock(&sc->sim_lock); 3162 sc->ocr_thread_active = 0; 3163 mrsas_kproc_exit(0); 3164 } 3165 3166 /* 3167 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR. 3168 * input: Adapter Context. 3169 * 3170 * This function will clear reply descriptor so that post OCR driver and FW will 3171 * lost old history. 3172 */ 3173 void 3174 mrsas_reset_reply_desc(struct mrsas_softc *sc) 3175 { 3176 int i, count; 3177 pMpi2ReplyDescriptorsUnion_t reply_desc; 3178 3179 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3180 for (i = 0; i < count; i++) 3181 sc->last_reply_idx[i] = 0; 3182 3183 reply_desc = sc->reply_desc_mem; 3184 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { 3185 reply_desc->Words = MRSAS_ULONG_MAX; 3186 } 3187 } 3188 3189 /* 3190 * mrsas_reset_ctrl: Core function to OCR/Kill adapter. 3191 * input: Adapter Context. 3192 * 3193 * This function will run from thread context so that it can sleep. 1. Do not 3194 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command 3195 * to complete for 180 seconds. 3. If #2 does not find any outstanding 3196 * command Controller is in working state, so skip OCR. Otherwise, do 3197 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the 3198 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post 3199 * OCR, Re-fire Management command and move Controller to Operation state. 3200 */ 3201 int 3202 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason) 3203 { 3204 int retval = SUCCESS, i, j, retry = 0; 3205 u_int32_t host_diag, abs_state, status_reg, reset_adapter; 3206 union ccb *ccb; 3207 struct mrsas_mfi_cmd *mfi_cmd; 3208 struct mrsas_mpt_cmd *mpt_cmd; 3209 union mrsas_evt_class_locale class_locale; 3210 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3211 3212 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 3213 device_printf(sc->mrsas_dev, 3214 "mrsas: Hardware critical error, returning FAIL.\n"); 3215 return FAIL; 3216 } 3217 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3218 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; 3219 mrsas_disable_intr(sc); 3220 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr", 3221 sc->mrsas_fw_fault_check_delay * hz); 3222 3223 /* First try waiting for commands to complete */ 3224 if (mrsas_wait_for_outstanding(sc, reset_reason)) { 3225 mrsas_dprint(sc, MRSAS_OCR, 3226 "resetting adapter from %s.\n", 3227 __func__); 3228 /* Now return commands back to the CAM layer */ 3229 mtx_unlock(&sc->sim_lock); 3230 for (i = 0; i < sc->max_fw_cmds; i++) { 3231 mpt_cmd = sc->mpt_cmd_list[i]; 3232 3233 if (mpt_cmd->peer_cmd) { 3234 mrsas_dprint(sc, MRSAS_OCR, 3235 "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n", 3236 i, mpt_cmd, mpt_cmd->peer_cmd); 3237 } 3238 3239 if (mpt_cmd->ccb_ptr) { 3240 if (mpt_cmd->callout_owner) { 3241 ccb = (union ccb *)(mpt_cmd->ccb_ptr); 3242 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3243 mrsas_cmd_done(sc, mpt_cmd); 3244 } else { 3245 mpt_cmd->ccb_ptr = NULL; 3246 mrsas_release_mpt_cmd(mpt_cmd); 3247 } 3248 } 3249 } 3250 3251 mrsas_atomic_set(&sc->fw_outstanding, 0); 3252 3253 mtx_lock(&sc->sim_lock); 3254 3255 status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3256 outbound_scratch_pad)); 3257 abs_state = status_reg & MFI_STATE_MASK; 3258 reset_adapter = status_reg & MFI_RESET_ADAPTER; 3259 if (sc->disableOnlineCtrlReset || 3260 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 3261 /* Reset not supported, kill adapter */ 3262 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n"); 3263 mrsas_kill_hba(sc); 3264 retval = FAIL; 3265 goto out; 3266 } 3267 /* Now try to reset the chip */ 3268 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { 3269 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3270 MPI2_WRSEQ_FLUSH_KEY_VALUE); 3271 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3272 MPI2_WRSEQ_1ST_KEY_VALUE); 3273 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3274 MPI2_WRSEQ_2ND_KEY_VALUE); 3275 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3276 MPI2_WRSEQ_3RD_KEY_VALUE); 3277 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3278 MPI2_WRSEQ_4TH_KEY_VALUE); 3279 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3280 MPI2_WRSEQ_5TH_KEY_VALUE); 3281 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 3282 MPI2_WRSEQ_6TH_KEY_VALUE); 3283 3284 /* Check that the diag write enable (DRWE) bit is on */ 3285 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3286 fusion_host_diag)); 3287 retry = 0; 3288 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 3289 DELAY(100 * 1000); 3290 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3291 fusion_host_diag)); 3292 if (retry++ == 100) { 3293 mrsas_dprint(sc, MRSAS_OCR, 3294 "Host diag unlock failed!\n"); 3295 break; 3296 } 3297 } 3298 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 3299 continue; 3300 3301 /* Send chip reset command */ 3302 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), 3303 host_diag | HOST_DIAG_RESET_ADAPTER); 3304 DELAY(3000 * 1000); 3305 3306 /* Make sure reset adapter bit is cleared */ 3307 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3308 fusion_host_diag)); 3309 retry = 0; 3310 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 3311 DELAY(100 * 1000); 3312 host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3313 fusion_host_diag)); 3314 if (retry++ == 1000) { 3315 mrsas_dprint(sc, MRSAS_OCR, 3316 "Diag reset adapter never cleared!\n"); 3317 break; 3318 } 3319 } 3320 if (host_diag & HOST_DIAG_RESET_ADAPTER) 3321 continue; 3322 3323 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3324 outbound_scratch_pad)) & MFI_STATE_MASK; 3325 retry = 0; 3326 3327 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 3328 DELAY(100 * 1000); 3329 abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3330 outbound_scratch_pad)) & MFI_STATE_MASK; 3331 } 3332 if (abs_state <= MFI_STATE_FW_INIT) { 3333 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," 3334 " state = 0x%x\n", abs_state); 3335 continue; 3336 } 3337 /* Wait for FW to become ready */ 3338 if (mrsas_transition_to_ready(sc, 1)) { 3339 mrsas_dprint(sc, MRSAS_OCR, 3340 "mrsas: Failed to transition controller to ready.\n"); 3341 continue; 3342 } 3343 mrsas_reset_reply_desc(sc); 3344 if (mrsas_ioc_init(sc)) { 3345 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); 3346 continue; 3347 } 3348 for (j = 0; j < sc->max_fw_cmds; j++) { 3349 mpt_cmd = sc->mpt_cmd_list[j]; 3350 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 3351 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; 3352 /* If not an IOCTL then release the command else re-fire */ 3353 if (!mfi_cmd->sync_cmd) { 3354 mrsas_release_mfi_cmd(mfi_cmd); 3355 } else { 3356 req_desc = mrsas_get_request_desc(sc, 3357 mfi_cmd->cmd_id.context.smid - 1); 3358 mrsas_dprint(sc, MRSAS_OCR, 3359 "Re-fire command DCMD opcode 0x%x index %d\n ", 3360 mfi_cmd->frame->dcmd.opcode, j); 3361 if (!req_desc) 3362 device_printf(sc->mrsas_dev, 3363 "Cannot build MPT cmd.\n"); 3364 else 3365 mrsas_fire_cmd(sc, req_desc->addr.u.low, 3366 req_desc->addr.u.high); 3367 } 3368 } 3369 } 3370 3371 /* Reset load balance info */ 3372 memset(sc->load_balance_info, 0, 3373 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); 3374 3375 if (mrsas_get_ctrl_info(sc)) { 3376 mrsas_kill_hba(sc); 3377 retval = FAIL; 3378 goto out; 3379 } 3380 if (!mrsas_get_map_info(sc)) 3381 mrsas_sync_map_info(sc); 3382 3383 megasas_setup_jbod_map(sc); 3384 3385 if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) { 3386 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { 3387 memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT)); 3388 sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP; 3389 } 3390 } 3391 3392 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3393 mrsas_enable_intr(sc); 3394 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 3395 3396 /* Register AEN with FW for last sequence number */ 3397 class_locale.members.reserved = 0; 3398 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3399 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3400 3401 mtx_unlock(&sc->sim_lock); 3402 if (mrsas_register_aen(sc, sc->last_seq_num, 3403 class_locale.word)) { 3404 device_printf(sc->mrsas_dev, 3405 "ERROR: AEN registration FAILED from OCR !!! " 3406 "Further events from the controller cannot be notified." 3407 "Either there is some problem in the controller" 3408 "or the controller does not support AEN.\n" 3409 "Please contact to the SUPPORT TEAM if the problem persists\n"); 3410 } 3411 mtx_lock(&sc->sim_lock); 3412 3413 /* Adapter reset completed successfully */ 3414 device_printf(sc->mrsas_dev, "Reset successful\n"); 3415 retval = SUCCESS; 3416 goto out; 3417 } 3418 /* Reset failed, kill the adapter */ 3419 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); 3420 mrsas_kill_hba(sc); 3421 retval = FAIL; 3422 } else { 3423 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3424 mrsas_enable_intr(sc); 3425 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 3426 } 3427 out: 3428 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3429 mrsas_dprint(sc, MRSAS_OCR, 3430 "Reset Exit with %d.\n", retval); 3431 return retval; 3432 } 3433 3434 /* 3435 * mrsas_kill_hba: Kill HBA when OCR is not supported 3436 * input: Adapter Context. 3437 * 3438 * This function will kill HBA when OCR is not supported. 3439 */ 3440 void 3441 mrsas_kill_hba(struct mrsas_softc *sc) 3442 { 3443 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; 3444 DELAY(1000 * 1000); 3445 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); 3446 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 3447 MFI_STOP_ADP); 3448 /* Flush */ 3449 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); 3450 mrsas_complete_outstanding_ioctls(sc); 3451 } 3452 3453 /** 3454 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba 3455 * input: Controller softc 3456 * 3457 * Returns void 3458 */ 3459 void 3460 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc) 3461 { 3462 int i; 3463 struct mrsas_mpt_cmd *cmd_mpt; 3464 struct mrsas_mfi_cmd *cmd_mfi; 3465 u_int32_t count, MSIxIndex; 3466 3467 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3468 for (i = 0; i < sc->max_fw_cmds; i++) { 3469 cmd_mpt = sc->mpt_cmd_list[i]; 3470 3471 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 3472 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 3473 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { 3474 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3475 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, 3476 cmd_mpt->io_request->RaidContext.raid_context.status); 3477 } 3478 } 3479 } 3480 } 3481 3482 /* 3483 * mrsas_wait_for_outstanding: Wait for outstanding commands 3484 * input: Adapter Context. 3485 * 3486 * This function will wait for 180 seconds for outstanding commands to be 3487 * completed. 3488 */ 3489 int 3490 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason) 3491 { 3492 int i, outstanding, retval = 0; 3493 u_int32_t fw_state, count, MSIxIndex; 3494 3495 3496 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { 3497 if (sc->remove_in_progress) { 3498 mrsas_dprint(sc, MRSAS_OCR, 3499 "Driver remove or shutdown called.\n"); 3500 retval = 1; 3501 goto out; 3502 } 3503 /* Check if firmware is in fault state */ 3504 fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, 3505 outbound_scratch_pad)) & MFI_STATE_MASK; 3506 if (fw_state == MFI_STATE_FAULT) { 3507 mrsas_dprint(sc, MRSAS_OCR, 3508 "Found FW in FAULT state, will reset adapter.\n"); 3509 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3510 mtx_unlock(&sc->sim_lock); 3511 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3512 mrsas_complete_cmd(sc, MSIxIndex); 3513 mtx_lock(&sc->sim_lock); 3514 retval = 1; 3515 goto out; 3516 } 3517 if (check_reason == MFI_DCMD_TIMEOUT_OCR) { 3518 mrsas_dprint(sc, MRSAS_OCR, 3519 "DCMD IO TIMEOUT detected, will reset adapter.\n"); 3520 retval = 1; 3521 goto out; 3522 } 3523 outstanding = mrsas_atomic_read(&sc->fw_outstanding); 3524 if (!outstanding) 3525 goto out; 3526 3527 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 3528 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " 3529 "commands to complete\n", i, outstanding); 3530 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3531 mtx_unlock(&sc->sim_lock); 3532 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3533 mrsas_complete_cmd(sc, MSIxIndex); 3534 mtx_lock(&sc->sim_lock); 3535 } 3536 DELAY(1000 * 1000); 3537 } 3538 3539 if (mrsas_atomic_read(&sc->fw_outstanding)) { 3540 mrsas_dprint(sc, MRSAS_OCR, 3541 " pending commands remain after waiting," 3542 " will reset adapter.\n"); 3543 retval = 1; 3544 } 3545 out: 3546 return retval; 3547 } 3548 3549 /* 3550 * mrsas_release_mfi_cmd: Return a cmd to free command pool 3551 * input: Command packet for return to free cmd pool 3552 * 3553 * This function returns the MFI & MPT command to the command list. 3554 */ 3555 void 3556 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi) 3557 { 3558 struct mrsas_softc *sc = cmd_mfi->sc; 3559 struct mrsas_mpt_cmd *cmd_mpt; 3560 3561 3562 mtx_lock(&sc->mfi_cmd_pool_lock); 3563 /* 3564 * Release the mpt command (if at all it is allocated 3565 * associated with the mfi command 3566 */ 3567 if (cmd_mfi->cmd_id.context.smid) { 3568 mtx_lock(&sc->mpt_cmd_pool_lock); 3569 /* Get the mpt cmd from mfi cmd frame's smid value */ 3570 cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1]; 3571 cmd_mpt->flags = 0; 3572 cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 3573 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next); 3574 mtx_unlock(&sc->mpt_cmd_pool_lock); 3575 } 3576 /* Release the mfi command */ 3577 cmd_mfi->ccb_ptr = NULL; 3578 cmd_mfi->cmd_id.frame_count = 0; 3579 TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next); 3580 mtx_unlock(&sc->mfi_cmd_pool_lock); 3581 3582 return; 3583 } 3584 3585 /* 3586 * mrsas_get_controller_info: Returns FW's controller structure 3587 * input: Adapter soft state 3588 * Controller information structure 3589 * 3590 * Issues an internal command (DCMD) to get the FW's controller structure. This 3591 * information is mainly used to find out the maximum IO transfer per command 3592 * supported by the FW. 3593 */ 3594 static int 3595 mrsas_get_ctrl_info(struct mrsas_softc *sc) 3596 { 3597 int retcode = 0; 3598 u_int8_t do_ocr = 1; 3599 struct mrsas_mfi_cmd *cmd; 3600 struct mrsas_dcmd_frame *dcmd; 3601 3602 cmd = mrsas_get_mfi_cmd(sc); 3603 3604 if (!cmd) { 3605 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 3606 return -ENOMEM; 3607 } 3608 dcmd = &cmd->frame->dcmd; 3609 3610 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { 3611 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); 3612 mrsas_release_mfi_cmd(cmd); 3613 return -ENOMEM; 3614 } 3615 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3616 3617 dcmd->cmd = MFI_CMD_DCMD; 3618 dcmd->cmd_status = 0xFF; 3619 dcmd->sge_count = 1; 3620 dcmd->flags = MFI_FRAME_DIR_READ; 3621 dcmd->timeout = 0; 3622 dcmd->pad_0 = 0; 3623 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info); 3624 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 3625 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr; 3626 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info); 3627 3628 if (!sc->mask_interrupts) 3629 retcode = mrsas_issue_blocked_cmd(sc, cmd); 3630 else 3631 retcode = mrsas_issue_polled(sc, cmd); 3632 3633 if (retcode == ETIMEDOUT) 3634 goto dcmd_timeout; 3635 else 3636 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); 3637 3638 do_ocr = 0; 3639 mrsas_update_ext_vd_details(sc); 3640 3641 sc->use_seqnum_jbod_fp = 3642 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP; 3643 sc->support_morethan256jbod = 3644 sc->ctrl_info->adapterOperations4.supportPdMapTargetId; 3645 3646 sc->disableOnlineCtrlReset = 3647 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 3648 3649 dcmd_timeout: 3650 mrsas_free_ctlr_info_cmd(sc); 3651 3652 if (do_ocr) 3653 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 3654 3655 if (!sc->mask_interrupts) 3656 mrsas_release_mfi_cmd(cmd); 3657 3658 return (retcode); 3659 } 3660 3661 /* 3662 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD 3663 * input: 3664 * sc - Controller's softc 3665 */ 3666 static void 3667 mrsas_update_ext_vd_details(struct mrsas_softc *sc) 3668 { 3669 u_int32_t ventura_map_sz = 0; 3670 sc->max256vdSupport = 3671 sc->ctrl_info->adapterOperations3.supportMaxExtLDs; 3672 3673 /* Below is additional check to address future FW enhancement */ 3674 if (sc->ctrl_info->max_lds > 64) 3675 sc->max256vdSupport = 1; 3676 3677 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS 3678 * MRSAS_MAX_DEV_PER_CHANNEL; 3679 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS 3680 * MRSAS_MAX_DEV_PER_CHANNEL; 3681 if (sc->max256vdSupport) { 3682 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 3683 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3684 } else { 3685 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 3686 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3687 } 3688 3689 if (sc->maxRaidMapSize) { 3690 ventura_map_sz = sc->maxRaidMapSize * 3691 MR_MIN_MAP_SIZE; 3692 sc->current_map_sz = ventura_map_sz; 3693 sc->max_map_sz = ventura_map_sz; 3694 } else { 3695 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + 3696 (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1)); 3697 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); 3698 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); 3699 if (sc->max256vdSupport) 3700 sc->current_map_sz = sc->new_map_sz; 3701 else 3702 sc->current_map_sz = sc->old_map_sz; 3703 } 3704 3705 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL); 3706 #if VD_EXT_DEBUG 3707 device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n", 3708 sc->maxRaidMapSize); 3709 device_printf(sc->mrsas_dev, 3710 "new_map_sz = 0x%x, old_map_sz = 0x%x, " 3711 "ventura_map_sz = 0x%x, current_map_sz = 0x%x " 3712 "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n", 3713 sc->new_map_sz, sc->old_map_sz, ventura_map_sz, 3714 sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL)); 3715 #endif 3716 } 3717 3718 /* 3719 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command 3720 * input: Adapter soft state 3721 * 3722 * Allocates DMAable memory for the controller info internal command. 3723 */ 3724 int 3725 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) 3726 { 3727 int ctlr_info_size; 3728 3729 /* Allocate get controller info command */ 3730 ctlr_info_size = sizeof(struct mrsas_ctrl_info); 3731 if (bus_dma_tag_create(sc->mrsas_parent_tag, 3732 1, 0, 3733 BUS_SPACE_MAXADDR_32BIT, 3734 BUS_SPACE_MAXADDR, 3735 NULL, NULL, 3736 ctlr_info_size, 3737 1, 3738 ctlr_info_size, 3739 BUS_DMA_ALLOCNOW, 3740 NULL, NULL, 3741 &sc->ctlr_info_tag)) { 3742 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); 3743 return (ENOMEM); 3744 } 3745 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, 3746 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { 3747 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); 3748 return (ENOMEM); 3749 } 3750 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, 3751 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, 3752 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { 3753 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); 3754 return (ENOMEM); 3755 } 3756 memset(sc->ctlr_info_mem, 0, ctlr_info_size); 3757 return (0); 3758 } 3759 3760 /* 3761 * mrsas_free_ctlr_info_cmd: Free memory for controller info command 3762 * input: Adapter soft state 3763 * 3764 * Deallocates memory of the get controller info cmd. 3765 */ 3766 void 3767 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) 3768 { 3769 if (sc->ctlr_info_phys_addr) 3770 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); 3771 if (sc->ctlr_info_mem != NULL) 3772 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); 3773 if (sc->ctlr_info_tag != NULL) 3774 bus_dma_tag_destroy(sc->ctlr_info_tag); 3775 } 3776 3777 /* 3778 * mrsas_issue_polled: Issues a polling command 3779 * inputs: Adapter soft state 3780 * Command packet to be issued 3781 * 3782 * This function is for posting of internal commands to Firmware. MFI requires 3783 * the cmd_status to be set to 0xFF before posting. The maximun wait time of 3784 * the poll response timer is 180 seconds. 3785 */ 3786 int 3787 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3788 { 3789 struct mrsas_header *frame_hdr = &cmd->frame->hdr; 3790 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3791 int i, retcode = SUCCESS; 3792 3793 frame_hdr->cmd_status = 0xFF; 3794 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3795 3796 /* Issue the frame using inbound queue port */ 3797 if (mrsas_issue_dcmd(sc, cmd)) { 3798 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3799 return (1); 3800 } 3801 /* 3802 * Poll response timer to wait for Firmware response. While this 3803 * timer with the DELAY call could block CPU, the time interval for 3804 * this is only 1 millisecond. 3805 */ 3806 if (frame_hdr->cmd_status == 0xFF) { 3807 for (i = 0; i < (max_wait * 1000); i++) { 3808 if (frame_hdr->cmd_status == 0xFF) 3809 DELAY(1000); 3810 else 3811 break; 3812 } 3813 } 3814 if (frame_hdr->cmd_status == 0xFF) { 3815 device_printf(sc->mrsas_dev, "DCMD timed out after %d " 3816 "seconds from %s\n", max_wait, __func__); 3817 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", 3818 cmd->frame->dcmd.opcode); 3819 retcode = ETIMEDOUT; 3820 } 3821 return (retcode); 3822 } 3823 3824 /* 3825 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd 3826 * input: Adapter soft state mfi cmd pointer 3827 * 3828 * This function is called by mrsas_issued_blocked_cmd() and 3829 * mrsas_issued_polled(), to build the MPT command and then fire the command 3830 * to Firmware. 3831 */ 3832 int 3833 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3834 { 3835 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3836 3837 req_desc = mrsas_build_mpt_cmd(sc, cmd); 3838 if (!req_desc) { 3839 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); 3840 return (1); 3841 } 3842 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 3843 3844 return (0); 3845 } 3846 3847 /* 3848 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd 3849 * input: Adapter soft state mfi cmd to build 3850 * 3851 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru 3852 * command and prepares the MPT command to send to Firmware. 3853 */ 3854 MRSAS_REQUEST_DESCRIPTOR_UNION * 3855 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3856 { 3857 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3858 u_int16_t index; 3859 3860 if (mrsas_build_mptmfi_passthru(sc, cmd)) { 3861 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); 3862 return NULL; 3863 } 3864 index = cmd->cmd_id.context.smid; 3865 3866 req_desc = mrsas_get_request_desc(sc, index - 1); 3867 if (!req_desc) 3868 return NULL; 3869 3870 req_desc->addr.Words = 0; 3871 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3872 3873 req_desc->SCSIIO.SMID = index; 3874 3875 return (req_desc); 3876 } 3877 3878 /* 3879 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command 3880 * input: Adapter soft state mfi cmd pointer 3881 * 3882 * The MPT command and the io_request are setup as a passthru command. The SGE 3883 * chain address is set to frame_phys_addr of the MFI command. 3884 */ 3885 u_int8_t 3886 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) 3887 { 3888 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 3889 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; 3890 struct mrsas_mpt_cmd *mpt_cmd; 3891 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; 3892 3893 mpt_cmd = mrsas_get_mpt_cmd(sc); 3894 if (!mpt_cmd) 3895 return (1); 3896 3897 /* Save the smid. To be used for returning the cmd */ 3898 mfi_cmd->cmd_id.context.smid = mpt_cmd->index; 3899 3900 mpt_cmd->sync_cmd_idx = mfi_cmd->index; 3901 3902 /* 3903 * For cmds where the flag is set, store the flag and check on 3904 * completion. For cmds with this flag, don't call 3905 * mrsas_complete_cmd. 3906 */ 3907 3908 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 3909 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3910 3911 io_req = mpt_cmd->io_request; 3912 3913 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 3914 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL; 3915 3916 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 3917 sgl_ptr_end->Flags = 0; 3918 } 3919 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain; 3920 3921 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 3922 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; 3923 io_req->ChainOffset = sc->chain_offset_mfi_pthru; 3924 3925 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; 3926 3927 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3928 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 3929 3930 mpi25_ieee_chain->Length = sc->max_chain_frame_sz; 3931 3932 return (0); 3933 } 3934 3935 /* 3936 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds 3937 * input: Adapter soft state Command to be issued 3938 * 3939 * This function waits on an event for the command to be returned from the ISR. 3940 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing 3941 * internal and ioctl commands. 3942 */ 3943 int 3944 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3945 { 3946 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3947 unsigned long total_time = 0; 3948 int retcode = SUCCESS; 3949 3950 /* Initialize cmd_status */ 3951 cmd->cmd_status = 0xFF; 3952 3953 /* Build MPT-MFI command for issue to FW */ 3954 if (mrsas_issue_dcmd(sc, cmd)) { 3955 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3956 return (1); 3957 } 3958 sc->chan = (void *)&cmd; 3959 3960 while (1) { 3961 if (cmd->cmd_status == 0xFF) { 3962 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 3963 } else 3964 break; 3965 3966 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL 3967 * command */ 3968 total_time++; 3969 if (total_time >= max_wait) { 3970 device_printf(sc->mrsas_dev, 3971 "Internal command timed out after %d seconds.\n", max_wait); 3972 retcode = 1; 3973 break; 3974 } 3975 } 3976 } 3977 3978 if (cmd->cmd_status == 0xFF) { 3979 device_printf(sc->mrsas_dev, "DCMD timed out after %d " 3980 "seconds from %s\n", max_wait, __func__); 3981 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", 3982 cmd->frame->dcmd.opcode); 3983 retcode = ETIMEDOUT; 3984 } 3985 return (retcode); 3986 } 3987 3988 /* 3989 * mrsas_complete_mptmfi_passthru: Completes a command 3990 * input: @sc: Adapter soft state 3991 * @cmd: Command to be completed 3992 * @status: cmd completion status 3993 * 3994 * This function is called from mrsas_complete_cmd() after an interrupt is 3995 * received from Firmware, and io_request->Function is 3996 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. 3997 */ 3998 void 3999 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, 4000 u_int8_t status) 4001 { 4002 struct mrsas_header *hdr = &cmd->frame->hdr; 4003 u_int8_t cmd_status = cmd->frame->hdr.cmd_status; 4004 4005 /* Reset the retry counter for future re-tries */ 4006 cmd->retry_for_fw_reset = 0; 4007 4008 if (cmd->ccb_ptr) 4009 cmd->ccb_ptr = NULL; 4010 4011 switch (hdr->cmd) { 4012 case MFI_CMD_INVALID: 4013 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); 4014 break; 4015 case MFI_CMD_PD_SCSI_IO: 4016 case MFI_CMD_LD_SCSI_IO: 4017 /* 4018 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 4019 * issued either through an IO path or an IOCTL path. If it 4020 * was via IOCTL, we will send it to internal completion. 4021 */ 4022 if (cmd->sync_cmd) { 4023 cmd->sync_cmd = 0; 4024 mrsas_wakeup(sc, cmd); 4025 break; 4026 } 4027 case MFI_CMD_SMP: 4028 case MFI_CMD_STP: 4029 case MFI_CMD_DCMD: 4030 /* Check for LD map update */ 4031 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && 4032 (cmd->frame->dcmd.mbox.b[1] == 1)) { 4033 sc->fast_path_io = 0; 4034 mtx_lock(&sc->raidmap_lock); 4035 sc->map_update_cmd = NULL; 4036 if (cmd_status != 0) { 4037 if (cmd_status != MFI_STAT_NOT_FOUND) 4038 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status); 4039 else { 4040 mrsas_release_mfi_cmd(cmd); 4041 mtx_unlock(&sc->raidmap_lock); 4042 break; 4043 } 4044 } else 4045 sc->map_id++; 4046 mrsas_release_mfi_cmd(cmd); 4047 if (MR_ValidateMapInfo(sc)) 4048 sc->fast_path_io = 0; 4049 else 4050 sc->fast_path_io = 1; 4051 mrsas_sync_map_info(sc); 4052 mtx_unlock(&sc->raidmap_lock); 4053 break; 4054 } 4055 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 4056 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { 4057 sc->mrsas_aen_triggered = 0; 4058 } 4059 /* FW has an updated PD sequence */ 4060 if ((cmd->frame->dcmd.opcode == 4061 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 4062 (cmd->frame->dcmd.mbox.b[0] == 1)) { 4063 4064 mtx_lock(&sc->raidmap_lock); 4065 sc->jbod_seq_cmd = NULL; 4066 mrsas_release_mfi_cmd(cmd); 4067 4068 if (cmd_status == MFI_STAT_OK) { 4069 sc->pd_seq_map_id++; 4070 /* Re-register a pd sync seq num cmd */ 4071 if (megasas_sync_pd_seq_num(sc, true)) 4072 sc->use_seqnum_jbod_fp = 0; 4073 } else { 4074 sc->use_seqnum_jbod_fp = 0; 4075 device_printf(sc->mrsas_dev, 4076 "Jbod map sync failed, status=%x\n", cmd_status); 4077 } 4078 mtx_unlock(&sc->raidmap_lock); 4079 break; 4080 } 4081 /* See if got an event notification */ 4082 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) 4083 mrsas_complete_aen(sc, cmd); 4084 else 4085 mrsas_wakeup(sc, cmd); 4086 break; 4087 case MFI_CMD_ABORT: 4088 /* Command issued to abort another cmd return */ 4089 mrsas_complete_abort(sc, cmd); 4090 break; 4091 default: 4092 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd); 4093 break; 4094 } 4095 } 4096 4097 /* 4098 * mrsas_wakeup: Completes an internal command 4099 * input: Adapter soft state 4100 * Command to be completed 4101 * 4102 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait 4103 * timer is started. This function is called from 4104 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up 4105 * from the command wait. 4106 */ 4107 void 4108 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4109 { 4110 cmd->cmd_status = cmd->frame->io.cmd_status; 4111 4112 if (cmd->cmd_status == 0xFF) 4113 cmd->cmd_status = 0; 4114 4115 sc->chan = (void *)&cmd; 4116 wakeup_one((void *)&sc->chan); 4117 return; 4118 } 4119 4120 /* 4121 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input: 4122 * Adapter soft state Shutdown/Hibernate 4123 * 4124 * This function issues a DCMD internal command to Firmware to initiate shutdown 4125 * of the controller. 4126 */ 4127 static void 4128 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) 4129 { 4130 struct mrsas_mfi_cmd *cmd; 4131 struct mrsas_dcmd_frame *dcmd; 4132 4133 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 4134 return; 4135 4136 cmd = mrsas_get_mfi_cmd(sc); 4137 if (!cmd) { 4138 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n"); 4139 return; 4140 } 4141 if (sc->aen_cmd) 4142 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); 4143 if (sc->map_update_cmd) 4144 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); 4145 if (sc->jbod_seq_cmd) 4146 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd); 4147 4148 dcmd = &cmd->frame->dcmd; 4149 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4150 4151 dcmd->cmd = MFI_CMD_DCMD; 4152 dcmd->cmd_status = 0x0; 4153 dcmd->sge_count = 0; 4154 dcmd->flags = MFI_FRAME_DIR_NONE; 4155 dcmd->timeout = 0; 4156 dcmd->pad_0 = 0; 4157 dcmd->data_xfer_len = 0; 4158 dcmd->opcode = opcode; 4159 4160 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n"); 4161 4162 mrsas_issue_blocked_cmd(sc, cmd); 4163 mrsas_release_mfi_cmd(cmd); 4164 4165 return; 4166 } 4167 4168 /* 4169 * mrsas_flush_cache: Requests FW to flush all its caches input: 4170 * Adapter soft state 4171 * 4172 * This function is issues a DCMD internal command to Firmware to initiate 4173 * flushing of all caches. 4174 */ 4175 static void 4176 mrsas_flush_cache(struct mrsas_softc *sc) 4177 { 4178 struct mrsas_mfi_cmd *cmd; 4179 struct mrsas_dcmd_frame *dcmd; 4180 4181 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 4182 return; 4183 4184 cmd = mrsas_get_mfi_cmd(sc); 4185 if (!cmd) { 4186 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n"); 4187 return; 4188 } 4189 dcmd = &cmd->frame->dcmd; 4190 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4191 4192 dcmd->cmd = MFI_CMD_DCMD; 4193 dcmd->cmd_status = 0x0; 4194 dcmd->sge_count = 0; 4195 dcmd->flags = MFI_FRAME_DIR_NONE; 4196 dcmd->timeout = 0; 4197 dcmd->pad_0 = 0; 4198 dcmd->data_xfer_len = 0; 4199 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 4200 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 4201 4202 mrsas_issue_blocked_cmd(sc, cmd); 4203 mrsas_release_mfi_cmd(cmd); 4204 4205 return; 4206 } 4207 4208 int 4209 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend) 4210 { 4211 int retcode = 0; 4212 u_int8_t do_ocr = 1; 4213 struct mrsas_mfi_cmd *cmd; 4214 struct mrsas_dcmd_frame *dcmd; 4215 uint32_t pd_seq_map_sz; 4216 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 4217 bus_addr_t pd_seq_h; 4218 4219 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 4220 (sizeof(struct MR_PD_CFG_SEQ) * 4221 (MAX_PHYSICAL_DEVICES - 1)); 4222 4223 cmd = mrsas_get_mfi_cmd(sc); 4224 if (!cmd) { 4225 device_printf(sc->mrsas_dev, 4226 "Cannot alloc for ld map info cmd.\n"); 4227 return 1; 4228 } 4229 dcmd = &cmd->frame->dcmd; 4230 4231 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)]; 4232 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)]; 4233 if (!pd_sync) { 4234 device_printf(sc->mrsas_dev, 4235 "Failed to alloc mem for jbod map info.\n"); 4236 mrsas_release_mfi_cmd(cmd); 4237 return (ENOMEM); 4238 } 4239 memset(pd_sync, 0, pd_seq_map_sz); 4240 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4241 dcmd->cmd = MFI_CMD_DCMD; 4242 dcmd->cmd_status = 0xFF; 4243 dcmd->sge_count = 1; 4244 dcmd->timeout = 0; 4245 dcmd->pad_0 = 0; 4246 dcmd->data_xfer_len = (pd_seq_map_sz); 4247 dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO); 4248 dcmd->sgl.sge32[0].phys_addr = (pd_seq_h); 4249 dcmd->sgl.sge32[0].length = (pd_seq_map_sz); 4250 4251 if (pend) { 4252 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG; 4253 dcmd->flags = (MFI_FRAME_DIR_WRITE); 4254 sc->jbod_seq_cmd = cmd; 4255 if (mrsas_issue_dcmd(sc, cmd)) { 4256 device_printf(sc->mrsas_dev, 4257 "Fail to send sync map info command.\n"); 4258 return 1; 4259 } else 4260 return 0; 4261 } else 4262 dcmd->flags = MFI_FRAME_DIR_READ; 4263 4264 retcode = mrsas_issue_polled(sc, cmd); 4265 if (retcode == ETIMEDOUT) 4266 goto dcmd_timeout; 4267 4268 if (pd_sync->count > MAX_PHYSICAL_DEVICES) { 4269 device_printf(sc->mrsas_dev, 4270 "driver supports max %d JBOD, but FW reports %d\n", 4271 MAX_PHYSICAL_DEVICES, pd_sync->count); 4272 retcode = -EINVAL; 4273 } 4274 if (!retcode) 4275 sc->pd_seq_map_id++; 4276 do_ocr = 0; 4277 4278 dcmd_timeout: 4279 if (do_ocr) 4280 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4281 4282 return (retcode); 4283 } 4284 4285 /* 4286 * mrsas_get_map_info: Load and validate RAID map input: 4287 * Adapter instance soft state 4288 * 4289 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load 4290 * and validate RAID map. It returns 0 if successful, 1 other- wise. 4291 */ 4292 static int 4293 mrsas_get_map_info(struct mrsas_softc *sc) 4294 { 4295 uint8_t retcode = 0; 4296 4297 sc->fast_path_io = 0; 4298 if (!mrsas_get_ld_map_info(sc)) { 4299 retcode = MR_ValidateMapInfo(sc); 4300 if (retcode == 0) { 4301 sc->fast_path_io = 1; 4302 return 0; 4303 } 4304 } 4305 return 1; 4306 } 4307 4308 /* 4309 * mrsas_get_ld_map_info: Get FW's ld_map structure input: 4310 * Adapter instance soft state 4311 * 4312 * Issues an internal command (DCMD) to get the FW's controller PD list 4313 * structure. 4314 */ 4315 static int 4316 mrsas_get_ld_map_info(struct mrsas_softc *sc) 4317 { 4318 int retcode = 0; 4319 struct mrsas_mfi_cmd *cmd; 4320 struct mrsas_dcmd_frame *dcmd; 4321 void *map; 4322 bus_addr_t map_phys_addr = 0; 4323 4324 cmd = mrsas_get_mfi_cmd(sc); 4325 if (!cmd) { 4326 device_printf(sc->mrsas_dev, 4327 "Cannot alloc for ld map info cmd.\n"); 4328 return 1; 4329 } 4330 dcmd = &cmd->frame->dcmd; 4331 4332 map = (void *)sc->raidmap_mem[(sc->map_id & 1)]; 4333 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; 4334 if (!map) { 4335 device_printf(sc->mrsas_dev, 4336 "Failed to alloc mem for ld map info.\n"); 4337 mrsas_release_mfi_cmd(cmd); 4338 return (ENOMEM); 4339 } 4340 memset(map, 0, sizeof(sc->max_map_sz)); 4341 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4342 4343 dcmd->cmd = MFI_CMD_DCMD; 4344 dcmd->cmd_status = 0xFF; 4345 dcmd->sge_count = 1; 4346 dcmd->flags = MFI_FRAME_DIR_READ; 4347 dcmd->timeout = 0; 4348 dcmd->pad_0 = 0; 4349 dcmd->data_xfer_len = sc->current_map_sz; 4350 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 4351 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 4352 dcmd->sgl.sge32[0].length = sc->current_map_sz; 4353 4354 retcode = mrsas_issue_polled(sc, cmd); 4355 if (retcode == ETIMEDOUT) 4356 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4357 4358 return (retcode); 4359 } 4360 4361 /* 4362 * mrsas_sync_map_info: Get FW's ld_map structure input: 4363 * Adapter instance soft state 4364 * 4365 * Issues an internal command (DCMD) to get the FW's controller PD list 4366 * structure. 4367 */ 4368 static int 4369 mrsas_sync_map_info(struct mrsas_softc *sc) 4370 { 4371 int retcode = 0, i; 4372 struct mrsas_mfi_cmd *cmd; 4373 struct mrsas_dcmd_frame *dcmd; 4374 uint32_t size_sync_info, num_lds; 4375 MR_LD_TARGET_SYNC *target_map = NULL; 4376 MR_DRV_RAID_MAP_ALL *map; 4377 MR_LD_RAID *raid; 4378 MR_LD_TARGET_SYNC *ld_sync; 4379 bus_addr_t map_phys_addr = 0; 4380 4381 cmd = mrsas_get_mfi_cmd(sc); 4382 if (!cmd) { 4383 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n"); 4384 return ENOMEM; 4385 } 4386 map = sc->ld_drv_map[sc->map_id & 1]; 4387 num_lds = map->raidMap.ldCount; 4388 4389 dcmd = &cmd->frame->dcmd; 4390 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds; 4391 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4392 4393 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1]; 4394 memset(target_map, 0, sc->max_map_sz); 4395 4396 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; 4397 4398 ld_sync = (MR_LD_TARGET_SYNC *) target_map; 4399 4400 for (i = 0; i < num_lds; i++, ld_sync++) { 4401 raid = MR_LdRaidGet(i, map); 4402 ld_sync->targetId = MR_GetLDTgtId(i, map); 4403 ld_sync->seqNum = raid->seqNum; 4404 } 4405 4406 dcmd->cmd = MFI_CMD_DCMD; 4407 dcmd->cmd_status = 0xFF; 4408 dcmd->sge_count = 1; 4409 dcmd->flags = MFI_FRAME_DIR_WRITE; 4410 dcmd->timeout = 0; 4411 dcmd->pad_0 = 0; 4412 dcmd->data_xfer_len = sc->current_map_sz; 4413 dcmd->mbox.b[0] = num_lds; 4414 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; 4415 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 4416 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 4417 dcmd->sgl.sge32[0].length = sc->current_map_sz; 4418 4419 sc->map_update_cmd = cmd; 4420 if (mrsas_issue_dcmd(sc, cmd)) { 4421 device_printf(sc->mrsas_dev, 4422 "Fail to send sync map info command.\n"); 4423 return (1); 4424 } 4425 return (retcode); 4426 } 4427 4428 /* Input: dcmd.opcode - MR_DCMD_PD_GET_INFO 4429 * dcmd.mbox.s[0] - deviceId for this physical drive 4430 * dcmd.sge IN - ptr to returned MR_PD_INFO structure 4431 * Desc: Firmware return the physical drive info structure 4432 * 4433 */ 4434 static void 4435 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id) 4436 { 4437 int retcode; 4438 u_int8_t do_ocr = 1; 4439 struct mrsas_mfi_cmd *cmd; 4440 struct mrsas_dcmd_frame *dcmd; 4441 4442 cmd = mrsas_get_mfi_cmd(sc); 4443 4444 if (!cmd) { 4445 device_printf(sc->mrsas_dev, 4446 "Cannot alloc for get PD info cmd\n"); 4447 return; 4448 } 4449 dcmd = &cmd->frame->dcmd; 4450 4451 memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info)); 4452 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4453 4454 dcmd->mbox.s[0] = device_id; 4455 dcmd->cmd = MFI_CMD_DCMD; 4456 dcmd->cmd_status = 0xFF; 4457 dcmd->sge_count = 1; 4458 dcmd->flags = MFI_FRAME_DIR_READ; 4459 dcmd->timeout = 0; 4460 dcmd->pad_0 = 0; 4461 dcmd->data_xfer_len = sizeof(struct mrsas_pd_info); 4462 dcmd->opcode = MR_DCMD_PD_GET_INFO; 4463 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->pd_info_phys_addr; 4464 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_pd_info); 4465 4466 if (!sc->mask_interrupts) 4467 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4468 else 4469 retcode = mrsas_issue_polled(sc, cmd); 4470 4471 if (retcode == ETIMEDOUT) 4472 goto dcmd_timeout; 4473 4474 sc->target_list[device_id].interface_type = 4475 sc->pd_info_mem->state.ddf.pdType.intf; 4476 4477 do_ocr = 0; 4478 4479 dcmd_timeout: 4480 4481 if (do_ocr) 4482 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4483 4484 if (!sc->mask_interrupts) 4485 mrsas_release_mfi_cmd(cmd); 4486 } 4487 4488 /* 4489 * mrsas_add_target: Add target ID of system PD/VD to driver's data structure. 4490 * sc: Adapter's soft state 4491 * target_id: Unique target id per controller(managed by driver) 4492 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1) 4493 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS 4494 * return: void 4495 * Descripton: This function will be called whenever system PD or VD is created. 4496 */ 4497 static void mrsas_add_target(struct mrsas_softc *sc, 4498 u_int16_t target_id) 4499 { 4500 sc->target_list[target_id].target_id = target_id; 4501 4502 device_printf(sc->mrsas_dev, 4503 "%s created target ID: 0x%x\n", 4504 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"), 4505 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD))); 4506 /* 4507 * If interrupts are enabled, then only fire DCMD to get pd_info 4508 * for system PDs 4509 */ 4510 if (!sc->mask_interrupts && sc->pd_info_mem && 4511 (target_id < MRSAS_MAX_PD)) 4512 mrsas_get_pd_info(sc, target_id); 4513 4514 } 4515 4516 /* 4517 * mrsas_remove_target: Remove target ID of system PD/VD from driver's data structure. 4518 * sc: Adapter's soft state 4519 * target_id: Unique target id per controller(managed by driver) 4520 * for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1) 4521 * for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS 4522 * return: void 4523 * Descripton: This function will be called whenever system PD or VD is deleted 4524 */ 4525 static void mrsas_remove_target(struct mrsas_softc *sc, 4526 u_int16_t target_id) 4527 { 4528 sc->target_list[target_id].target_id = 0xffff; 4529 device_printf(sc->mrsas_dev, 4530 "%s deleted target ID: 0x%x\n", 4531 (target_id < MRSAS_MAX_PD ? "System PD" : "VD"), 4532 (target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD))); 4533 } 4534 4535 /* 4536 * mrsas_get_pd_list: Returns FW's PD list structure input: 4537 * Adapter soft state 4538 * 4539 * Issues an internal command (DCMD) to get the FW's controller PD list 4540 * structure. This information is mainly used to find out about system 4541 * supported by Firmware. 4542 */ 4543 static int 4544 mrsas_get_pd_list(struct mrsas_softc *sc) 4545 { 4546 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size; 4547 u_int8_t do_ocr = 1; 4548 struct mrsas_mfi_cmd *cmd; 4549 struct mrsas_dcmd_frame *dcmd; 4550 struct MR_PD_LIST *pd_list_mem; 4551 struct MR_PD_ADDRESS *pd_addr; 4552 bus_addr_t pd_list_phys_addr = 0; 4553 struct mrsas_tmp_dcmd *tcmd; 4554 4555 cmd = mrsas_get_mfi_cmd(sc); 4556 if (!cmd) { 4557 device_printf(sc->mrsas_dev, 4558 "Cannot alloc for get PD list cmd\n"); 4559 return 1; 4560 } 4561 dcmd = &cmd->frame->dcmd; 4562 4563 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 4564 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4565 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { 4566 device_printf(sc->mrsas_dev, 4567 "Cannot alloc dmamap for get PD list cmd\n"); 4568 mrsas_release_mfi_cmd(cmd); 4569 mrsas_free_tmp_dcmd(tcmd); 4570 free(tcmd, M_MRSAS); 4571 return (ENOMEM); 4572 } else { 4573 pd_list_mem = tcmd->tmp_dcmd_mem; 4574 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 4575 } 4576 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4577 4578 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4579 dcmd->mbox.b[1] = 0; 4580 dcmd->cmd = MFI_CMD_DCMD; 4581 dcmd->cmd_status = 0xFF; 4582 dcmd->sge_count = 1; 4583 dcmd->flags = MFI_FRAME_DIR_READ; 4584 dcmd->timeout = 0; 4585 dcmd->pad_0 = 0; 4586 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4587 dcmd->opcode = MR_DCMD_PD_LIST_QUERY; 4588 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr; 4589 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4590 4591 if (!sc->mask_interrupts) 4592 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4593 else 4594 retcode = mrsas_issue_polled(sc, cmd); 4595 4596 if (retcode == ETIMEDOUT) 4597 goto dcmd_timeout; 4598 4599 /* Get the instance PD list */ 4600 pd_count = MRSAS_MAX_PD; 4601 pd_addr = pd_list_mem->addr; 4602 if (pd_list_mem->count < pd_count) { 4603 memset(sc->local_pd_list, 0, 4604 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 4605 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) { 4606 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; 4607 sc->local_pd_list[pd_addr->deviceId].driveType = 4608 pd_addr->scsiDevType; 4609 sc->local_pd_list[pd_addr->deviceId].driveState = 4610 MR_PD_STATE_SYSTEM; 4611 if (sc->target_list[pd_addr->deviceId].target_id == 0xffff) 4612 mrsas_add_target(sc, pd_addr->deviceId); 4613 pd_addr++; 4614 } 4615 for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) { 4616 if ((sc->local_pd_list[pd_index].driveState != 4617 MR_PD_STATE_SYSTEM) && 4618 (sc->target_list[pd_index].target_id != 4619 0xffff)) { 4620 mrsas_remove_target(sc, pd_index); 4621 } 4622 } 4623 /* 4624 * Use mutext/spinlock if pd_list component size increase more than 4625 * 32 bit. 4626 */ 4627 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); 4628 do_ocr = 0; 4629 } 4630 dcmd_timeout: 4631 mrsas_free_tmp_dcmd(tcmd); 4632 free(tcmd, M_MRSAS); 4633 4634 if (do_ocr) 4635 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4636 4637 if (!sc->mask_interrupts) 4638 mrsas_release_mfi_cmd(cmd); 4639 4640 return (retcode); 4641 } 4642 4643 /* 4644 * mrsas_get_ld_list: Returns FW's LD list structure input: 4645 * Adapter soft state 4646 * 4647 * Issues an internal command (DCMD) to get the FW's controller PD list 4648 * structure. This information is mainly used to find out about supported by 4649 * the FW. 4650 */ 4651 static int 4652 mrsas_get_ld_list(struct mrsas_softc *sc) 4653 { 4654 int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id; 4655 u_int8_t do_ocr = 1; 4656 struct mrsas_mfi_cmd *cmd; 4657 struct mrsas_dcmd_frame *dcmd; 4658 struct MR_LD_LIST *ld_list_mem; 4659 bus_addr_t ld_list_phys_addr = 0; 4660 struct mrsas_tmp_dcmd *tcmd; 4661 4662 cmd = mrsas_get_mfi_cmd(sc); 4663 if (!cmd) { 4664 device_printf(sc->mrsas_dev, 4665 "Cannot alloc for get LD list cmd\n"); 4666 return 1; 4667 } 4668 dcmd = &cmd->frame->dcmd; 4669 4670 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 4671 ld_list_size = sizeof(struct MR_LD_LIST); 4672 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { 4673 device_printf(sc->mrsas_dev, 4674 "Cannot alloc dmamap for get LD list cmd\n"); 4675 mrsas_release_mfi_cmd(cmd); 4676 mrsas_free_tmp_dcmd(tcmd); 4677 free(tcmd, M_MRSAS); 4678 return (ENOMEM); 4679 } else { 4680 ld_list_mem = tcmd->tmp_dcmd_mem; 4681 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 4682 } 4683 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4684 4685 if (sc->max256vdSupport) 4686 dcmd->mbox.b[0] = 1; 4687 4688 dcmd->cmd = MFI_CMD_DCMD; 4689 dcmd->cmd_status = 0xFF; 4690 dcmd->sge_count = 1; 4691 dcmd->flags = MFI_FRAME_DIR_READ; 4692 dcmd->timeout = 0; 4693 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); 4694 dcmd->opcode = MR_DCMD_LD_GET_LIST; 4695 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr; 4696 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); 4697 dcmd->pad_0 = 0; 4698 4699 if (!sc->mask_interrupts) 4700 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4701 else 4702 retcode = mrsas_issue_polled(sc, cmd); 4703 4704 if (retcode == ETIMEDOUT) 4705 goto dcmd_timeout; 4706 4707 #if VD_EXT_DEBUG 4708 printf("Number of LDs %d\n", ld_list_mem->ldCount); 4709 #endif 4710 4711 /* Get the instance LD list */ 4712 if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) { 4713 sc->CurLdCount = ld_list_mem->ldCount; 4714 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4715 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) { 4716 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 4717 drv_tgt_id = ids + MRSAS_MAX_PD; 4718 if (ld_list_mem->ldList[ld_index].state != 0) { 4719 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 4720 if (sc->target_list[drv_tgt_id].target_id == 4721 0xffff) 4722 mrsas_add_target(sc, drv_tgt_id); 4723 } else { 4724 if (sc->target_list[drv_tgt_id].target_id != 4725 0xffff) 4726 mrsas_remove_target(sc, 4727 drv_tgt_id); 4728 } 4729 } 4730 4731 do_ocr = 0; 4732 } 4733 dcmd_timeout: 4734 mrsas_free_tmp_dcmd(tcmd); 4735 free(tcmd, M_MRSAS); 4736 4737 if (do_ocr) 4738 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4739 if (!sc->mask_interrupts) 4740 mrsas_release_mfi_cmd(cmd); 4741 4742 return (retcode); 4743 } 4744 4745 /* 4746 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input: 4747 * Adapter soft state Temp command Size of alloction 4748 * 4749 * Allocates DMAable memory for a temporary internal command. The allocated 4750 * memory is initialized to all zeros upon successful loading of the dma 4751 * mapped memory. 4752 */ 4753 int 4754 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, 4755 struct mrsas_tmp_dcmd *tcmd, int size) 4756 { 4757 if (bus_dma_tag_create(sc->mrsas_parent_tag, 4758 1, 0, 4759 BUS_SPACE_MAXADDR_32BIT, 4760 BUS_SPACE_MAXADDR, 4761 NULL, NULL, 4762 size, 4763 1, 4764 size, 4765 BUS_DMA_ALLOCNOW, 4766 NULL, NULL, 4767 &tcmd->tmp_dcmd_tag)) { 4768 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); 4769 return (ENOMEM); 4770 } 4771 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, 4772 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { 4773 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); 4774 return (ENOMEM); 4775 } 4776 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, 4777 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, 4778 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { 4779 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); 4780 return (ENOMEM); 4781 } 4782 memset(tcmd->tmp_dcmd_mem, 0, size); 4783 return (0); 4784 } 4785 4786 /* 4787 * mrsas_free_tmp_dcmd: Free memory for temporary command input: 4788 * temporary dcmd pointer 4789 * 4790 * Deallocates memory of the temporary command for use in the construction of 4791 * the internal DCMD. 4792 */ 4793 void 4794 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) 4795 { 4796 if (tmp->tmp_dcmd_phys_addr) 4797 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); 4798 if (tmp->tmp_dcmd_mem != NULL) 4799 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); 4800 if (tmp->tmp_dcmd_tag != NULL) 4801 bus_dma_tag_destroy(tmp->tmp_dcmd_tag); 4802 } 4803 4804 /* 4805 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input: 4806 * Adapter soft state Previously issued cmd to be aborted 4807 * 4808 * This function is used to abort previously issued commands, such as AEN and 4809 * RAID map sync map commands. The abort command is sent as a DCMD internal 4810 * command and subsequently the driver will wait for a return status. The 4811 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. 4812 */ 4813 static int 4814 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 4815 struct mrsas_mfi_cmd *cmd_to_abort) 4816 { 4817 struct mrsas_mfi_cmd *cmd; 4818 struct mrsas_abort_frame *abort_fr; 4819 u_int8_t retcode = 0; 4820 unsigned long total_time = 0; 4821 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 4822 4823 cmd = mrsas_get_mfi_cmd(sc); 4824 if (!cmd) { 4825 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); 4826 return (1); 4827 } 4828 abort_fr = &cmd->frame->abort; 4829 4830 /* Prepare and issue the abort frame */ 4831 abort_fr->cmd = MFI_CMD_ABORT; 4832 abort_fr->cmd_status = 0xFF; 4833 abort_fr->flags = 0; 4834 abort_fr->abort_context = cmd_to_abort->index; 4835 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 4836 abort_fr->abort_mfi_phys_addr_hi = 0; 4837 4838 cmd->sync_cmd = 1; 4839 cmd->cmd_status = 0xFF; 4840 4841 if (mrsas_issue_dcmd(sc, cmd)) { 4842 device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); 4843 return (1); 4844 } 4845 /* Wait for this cmd to complete */ 4846 sc->chan = (void *)&cmd; 4847 while (1) { 4848 if (cmd->cmd_status == 0xFF) { 4849 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 4850 } else 4851 break; 4852 total_time++; 4853 if (total_time >= max_wait) { 4854 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); 4855 retcode = 1; 4856 break; 4857 } 4858 } 4859 4860 cmd->sync_cmd = 0; 4861 mrsas_release_mfi_cmd(cmd); 4862 return (retcode); 4863 } 4864 4865 /* 4866 * mrsas_complete_abort: Completes aborting a command input: 4867 * Adapter soft state Cmd that was issued to abort another cmd 4868 * 4869 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to 4870 * change after sending the command. This function is called from 4871 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. 4872 */ 4873 void 4874 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4875 { 4876 if (cmd->sync_cmd) { 4877 cmd->sync_cmd = 0; 4878 cmd->cmd_status = 0; 4879 sc->chan = (void *)&cmd; 4880 wakeup_one((void *)&sc->chan); 4881 } 4882 return; 4883 } 4884 4885 /* 4886 * mrsas_aen_handler: AEN processing callback function from thread context 4887 * input: Adapter soft state 4888 * 4889 * Asynchronous event handler 4890 */ 4891 void 4892 mrsas_aen_handler(struct mrsas_softc *sc) 4893 { 4894 union mrsas_evt_class_locale class_locale; 4895 int doscan = 0; 4896 u_int32_t seq_num; 4897 int error, fail_aen = 0; 4898 4899 if (sc == NULL) { 4900 printf("invalid instance!\n"); 4901 return; 4902 } 4903 if (sc->remove_in_progress || sc->reset_in_progress) { 4904 device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n", 4905 __func__, __LINE__); 4906 return; 4907 } 4908 if (sc->evt_detail_mem) { 4909 switch (sc->evt_detail_mem->code) { 4910 case MR_EVT_PD_INSERTED: 4911 fail_aen = mrsas_get_pd_list(sc); 4912 if (!fail_aen) 4913 mrsas_bus_scan_sim(sc, sc->sim_1); 4914 else 4915 goto skip_register_aen; 4916 break; 4917 case MR_EVT_PD_REMOVED: 4918 fail_aen = mrsas_get_pd_list(sc); 4919 if (!fail_aen) 4920 mrsas_bus_scan_sim(sc, sc->sim_1); 4921 else 4922 goto skip_register_aen; 4923 break; 4924 case MR_EVT_LD_OFFLINE: 4925 case MR_EVT_CFG_CLEARED: 4926 case MR_EVT_LD_DELETED: 4927 mrsas_bus_scan_sim(sc, sc->sim_0); 4928 break; 4929 case MR_EVT_LD_CREATED: 4930 fail_aen = mrsas_get_ld_list(sc); 4931 if (!fail_aen) 4932 mrsas_bus_scan_sim(sc, sc->sim_0); 4933 else 4934 goto skip_register_aen; 4935 break; 4936 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 4937 case MR_EVT_FOREIGN_CFG_IMPORTED: 4938 case MR_EVT_LD_STATE_CHANGE: 4939 doscan = 1; 4940 break; 4941 case MR_EVT_CTRL_PROP_CHANGED: 4942 fail_aen = mrsas_get_ctrl_info(sc); 4943 if (fail_aen) 4944 goto skip_register_aen; 4945 break; 4946 default: 4947 break; 4948 } 4949 } else { 4950 device_printf(sc->mrsas_dev, "invalid evt_detail\n"); 4951 return; 4952 } 4953 if (doscan) { 4954 fail_aen = mrsas_get_pd_list(sc); 4955 if (!fail_aen) { 4956 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); 4957 mrsas_bus_scan_sim(sc, sc->sim_1); 4958 } else 4959 goto skip_register_aen; 4960 4961 fail_aen = mrsas_get_ld_list(sc); 4962 if (!fail_aen) { 4963 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); 4964 mrsas_bus_scan_sim(sc, sc->sim_0); 4965 } else 4966 goto skip_register_aen; 4967 } 4968 seq_num = sc->evt_detail_mem->seq_num + 1; 4969 4970 /* Register AEN with FW for latest sequence number plus 1 */ 4971 class_locale.members.reserved = 0; 4972 class_locale.members.locale = MR_EVT_LOCALE_ALL; 4973 class_locale.members.class = MR_EVT_CLASS_DEBUG; 4974 4975 if (sc->aen_cmd != NULL) 4976 return; 4977 4978 mtx_lock(&sc->aen_lock); 4979 error = mrsas_register_aen(sc, seq_num, 4980 class_locale.word); 4981 mtx_unlock(&sc->aen_lock); 4982 4983 if (error) 4984 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); 4985 4986 skip_register_aen: 4987 return; 4988 4989 } 4990 4991 4992 /* 4993 * mrsas_complete_aen: Completes AEN command 4994 * input: Adapter soft state 4995 * Cmd that was issued to abort another cmd 4996 * 4997 * This function will be called from ISR and will continue event processing from 4998 * thread context by enqueuing task in ev_tq (callback function 4999 * "mrsas_aen_handler"). 5000 */ 5001 void 5002 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 5003 { 5004 /* 5005 * Don't signal app if it is just an aborted previously registered 5006 * aen 5007 */ 5008 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { 5009 sc->mrsas_aen_triggered = 1; 5010 mtx_lock(&sc->aen_lock); 5011 if (sc->mrsas_poll_waiting) { 5012 sc->mrsas_poll_waiting = 0; 5013 selwakeup(&sc->mrsas_select); 5014 } 5015 mtx_unlock(&sc->aen_lock); 5016 } else 5017 cmd->abort_aen = 0; 5018 5019 sc->aen_cmd = NULL; 5020 mrsas_release_mfi_cmd(cmd); 5021 5022 taskqueue_enqueue(sc->ev_tq, &sc->ev_task); 5023 5024 return; 5025 } 5026 5027 static device_method_t mrsas_methods[] = { 5028 DEVMETHOD(device_probe, mrsas_probe), 5029 DEVMETHOD(device_attach, mrsas_attach), 5030 DEVMETHOD(device_detach, mrsas_detach), 5031 DEVMETHOD(device_suspend, mrsas_suspend), 5032 DEVMETHOD(device_resume, mrsas_resume), 5033 DEVMETHOD(bus_print_child, bus_generic_print_child), 5034 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 5035 {0, 0} 5036 }; 5037 5038 static driver_t mrsas_driver = { 5039 "mrsas", 5040 mrsas_methods, 5041 sizeof(struct mrsas_softc) 5042 }; 5043 5044 static devclass_t mrsas_devclass; 5045 5046 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0); 5047 MODULE_DEPEND(mrsas, cam, 1, 1, 1); 5048