1 /*- 2 * Copyright (c) 2006 IronPort Systems 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /*- 27 * Copyright (c) 2007 LSI Corp. 28 * Copyright (c) 2007 Rajesh Prabhakaran. 29 * All rights reserved. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * SUCH DAMAGE. 51 */ 52 53 #include <sys/cdefs.h> 54 __FBSDID("$FreeBSD$"); 55 56 #include "opt_compat.h" 57 #include "opt_mfi.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/sysctl.h> 62 #include <sys/malloc.h> 63 #include <sys/kernel.h> 64 #include <sys/poll.h> 65 #include <sys/selinfo.h> 66 #include <sys/bus.h> 67 #include <sys/conf.h> 68 #include <sys/eventhandler.h> 69 #include <sys/rman.h> 70 #include <sys/bus_dma.h> 71 #include <sys/bio.h> 72 #include <sys/ioccom.h> 73 #include <sys/uio.h> 74 #include <sys/proc.h> 75 #include <sys/signalvar.h> 76 #include <sys/sysent.h> 77 #include <sys/taskqueue.h> 78 79 #include <machine/bus.h> 80 #include <machine/resource.h> 81 82 #include <dev/mfi/mfireg.h> 83 #include <dev/mfi/mfi_ioctl.h> 84 #include <dev/mfi/mfivar.h> 85 #include <sys/interrupt.h> 86 #include <sys/priority.h> 87 88 static int mfi_alloc_commands(struct mfi_softc *); 89 static int mfi_comms_init(struct mfi_softc *); 90 static int mfi_get_controller_info(struct mfi_softc *); 91 static int mfi_get_log_state(struct mfi_softc *, 92 struct mfi_evt_log_state **); 93 static int mfi_parse_entries(struct mfi_softc *, int, int); 94 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int); 95 static void mfi_startup(void *arg); 96 static void mfi_intr(void *arg); 97 static void mfi_ldprobe(struct mfi_softc *sc); 98 static void mfi_syspdprobe(struct mfi_softc *sc); 99 static void mfi_handle_evt(void *context, int pending); 100 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale); 101 static void mfi_aen_complete(struct mfi_command *); 102 static int mfi_add_ld(struct mfi_softc *sc, int); 103 static void mfi_add_ld_complete(struct mfi_command *); 104 static int mfi_add_sys_pd(struct mfi_softc *sc, int); 105 static void mfi_add_sys_pd_complete(struct mfi_command *); 106 static struct mfi_command * mfi_bio_command(struct mfi_softc *); 107 static void mfi_bio_complete(struct mfi_command *); 108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*); 109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*); 110 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *); 111 static int mfi_std_send_frame(struct mfi_softc *, struct mfi_command *); 112 static int mfi_abort(struct mfi_softc *, struct mfi_command **); 113 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *); 114 static void mfi_timeout(void *); 115 static int mfi_user_command(struct mfi_softc *, 116 struct mfi_ioc_passthru *); 117 static void mfi_enable_intr_xscale(struct mfi_softc *sc); 118 static void mfi_enable_intr_ppc(struct mfi_softc *sc); 119 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc); 120 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc); 121 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc); 122 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc); 123 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, 124 uint32_t frame_cnt); 125 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, 126 uint32_t frame_cnt); 127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode); 128 static void mfi_config_unlock(struct mfi_softc *sc, int locked); 129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm); 130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm); 131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm); 132 133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters"); 134 static int mfi_event_locale = MFI_EVT_LOCALE_ALL; 135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale); 136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale, 137 0, "event message locale"); 138 139 static int mfi_event_class = MFI_EVT_CLASS_INFO; 140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class); 141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class, 142 0, "event message class"); 143 144 static int mfi_max_cmds = 128; 145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds); 146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds, 147 0, "Max commands limit (-1 = controller limit)"); 148 149 static int mfi_detect_jbod_change = 1; 150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change); 151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN, 152 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD"); 153 154 int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS; 155 TUNABLE_INT("hw.mfi.polled_cmd_timeout", &mfi_polled_cmd_timeout); 156 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN, 157 &mfi_polled_cmd_timeout, 0, 158 "Polled command timeout - used for firmware flash etc (in seconds)"); 159 160 static int mfi_cmd_timeout = MFI_CMD_TIMEOUT; 161 TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout); 162 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout, 163 0, "Command timeout (in seconds)"); 164 165 /* Management interface */ 166 static d_open_t mfi_open; 167 static d_close_t mfi_close; 168 static d_ioctl_t mfi_ioctl; 169 static d_poll_t mfi_poll; 170 171 static struct cdevsw mfi_cdevsw = { 172 .d_version = D_VERSION, 173 .d_flags = 0, 174 .d_open = mfi_open, 175 .d_close = mfi_close, 176 .d_ioctl = mfi_ioctl, 177 .d_poll = mfi_poll, 178 .d_name = "mfi", 179 }; 180 181 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver"); 182 183 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH 184 struct mfi_skinny_dma_info mfi_skinny; 185 186 static void 187 mfi_enable_intr_xscale(struct mfi_softc *sc) 188 { 189 MFI_WRITE4(sc, MFI_OMSK, 0x01); 190 } 191 192 static void 193 mfi_enable_intr_ppc(struct mfi_softc *sc) 194 { 195 if (sc->mfi_flags & MFI_FLAGS_1078) { 196 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF); 197 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM); 198 } 199 else if (sc->mfi_flags & MFI_FLAGS_GEN2) { 200 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF); 201 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM); 202 } 203 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 204 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001); 205 } 206 } 207 208 static int32_t 209 mfi_read_fw_status_xscale(struct mfi_softc *sc) 210 { 211 return MFI_READ4(sc, MFI_OMSG0); 212 } 213 214 static int32_t 215 mfi_read_fw_status_ppc(struct mfi_softc *sc) 216 { 217 return MFI_READ4(sc, MFI_OSP0); 218 } 219 220 static int 221 mfi_check_clear_intr_xscale(struct mfi_softc *sc) 222 { 223 int32_t status; 224 225 status = MFI_READ4(sc, MFI_OSTS); 226 if ((status & MFI_OSTS_INTR_VALID) == 0) 227 return 1; 228 229 MFI_WRITE4(sc, MFI_OSTS, status); 230 return 0; 231 } 232 233 static int 234 mfi_check_clear_intr_ppc(struct mfi_softc *sc) 235 { 236 int32_t status; 237 238 status = MFI_READ4(sc, MFI_OSTS); 239 if (sc->mfi_flags & MFI_FLAGS_1078) { 240 if (!(status & MFI_1078_RM)) { 241 return 1; 242 } 243 } 244 else if (sc->mfi_flags & MFI_FLAGS_GEN2) { 245 if (!(status & MFI_GEN2_RM)) { 246 return 1; 247 } 248 } 249 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 250 if (!(status & MFI_SKINNY_RM)) { 251 return 1; 252 } 253 } 254 if (sc->mfi_flags & MFI_FLAGS_SKINNY) 255 MFI_WRITE4(sc, MFI_OSTS, status); 256 else 257 MFI_WRITE4(sc, MFI_ODCR0, status); 258 return 0; 259 } 260 261 static void 262 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt) 263 { 264 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt); 265 } 266 267 static void 268 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt) 269 { 270 if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 271 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 ); 272 MFI_WRITE4(sc, MFI_IQPH, 0x00000000); 273 } else { 274 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 ); 275 } 276 } 277 278 int 279 mfi_transition_firmware(struct mfi_softc *sc) 280 { 281 uint32_t fw_state, cur_state; 282 int max_wait, i; 283 uint32_t cur_abs_reg_val = 0; 284 uint32_t prev_abs_reg_val = 0; 285 286 cur_abs_reg_val = sc->mfi_read_fw_status(sc); 287 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK; 288 while (fw_state != MFI_FWSTATE_READY) { 289 if (bootverbose) 290 device_printf(sc->mfi_dev, "Waiting for firmware to " 291 "become ready\n"); 292 cur_state = fw_state; 293 switch (fw_state) { 294 case MFI_FWSTATE_FAULT: 295 device_printf(sc->mfi_dev, "Firmware fault\n"); 296 return (ENXIO); 297 case MFI_FWSTATE_WAIT_HANDSHAKE: 298 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) 299 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE); 300 else 301 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE); 302 max_wait = MFI_RESET_WAIT_TIME; 303 break; 304 case MFI_FWSTATE_OPERATIONAL: 305 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) 306 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7); 307 else 308 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY); 309 max_wait = MFI_RESET_WAIT_TIME; 310 break; 311 case MFI_FWSTATE_UNDEFINED: 312 case MFI_FWSTATE_BB_INIT: 313 max_wait = MFI_RESET_WAIT_TIME; 314 break; 315 case MFI_FWSTATE_FW_INIT_2: 316 max_wait = MFI_RESET_WAIT_TIME; 317 break; 318 case MFI_FWSTATE_FW_INIT: 319 case MFI_FWSTATE_FLUSH_CACHE: 320 max_wait = MFI_RESET_WAIT_TIME; 321 break; 322 case MFI_FWSTATE_DEVICE_SCAN: 323 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */ 324 prev_abs_reg_val = cur_abs_reg_val; 325 break; 326 case MFI_FWSTATE_BOOT_MESSAGE_PENDING: 327 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) 328 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG); 329 else 330 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG); 331 max_wait = MFI_RESET_WAIT_TIME; 332 break; 333 default: 334 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n", 335 fw_state); 336 return (ENXIO); 337 } 338 for (i = 0; i < (max_wait * 10); i++) { 339 cur_abs_reg_val = sc->mfi_read_fw_status(sc); 340 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK; 341 if (fw_state == cur_state) 342 DELAY(100000); 343 else 344 break; 345 } 346 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) { 347 /* Check the device scanning progress */ 348 if (prev_abs_reg_val != cur_abs_reg_val) { 349 continue; 350 } 351 } 352 if (fw_state == cur_state) { 353 device_printf(sc->mfi_dev, "Firmware stuck in state " 354 "%#x\n", fw_state); 355 return (ENXIO); 356 } 357 } 358 return (0); 359 } 360 361 static void 362 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 363 { 364 bus_addr_t *addr; 365 366 addr = arg; 367 *addr = segs[0].ds_addr; 368 } 369 370 371 int 372 mfi_attach(struct mfi_softc *sc) 373 { 374 uint32_t status; 375 int error, commsz, framessz, sensesz; 376 int frames, unit, max_fw_sge, max_fw_cmds; 377 uint32_t tb_mem_size = 0; 378 379 if (sc == NULL) 380 return EINVAL; 381 382 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n", 383 MEGASAS_VERSION); 384 385 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF); 386 sx_init(&sc->mfi_config_lock, "MFI config"); 387 TAILQ_INIT(&sc->mfi_ld_tqh); 388 TAILQ_INIT(&sc->mfi_syspd_tqh); 389 TAILQ_INIT(&sc->mfi_ld_pend_tqh); 390 TAILQ_INIT(&sc->mfi_syspd_pend_tqh); 391 TAILQ_INIT(&sc->mfi_evt_queue); 392 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc); 393 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc); 394 TAILQ_INIT(&sc->mfi_aen_pids); 395 TAILQ_INIT(&sc->mfi_cam_ccbq); 396 397 mfi_initq_free(sc); 398 mfi_initq_ready(sc); 399 mfi_initq_busy(sc); 400 mfi_initq_bio(sc); 401 402 sc->adpreset = 0; 403 sc->last_seq_num = 0; 404 sc->disableOnlineCtrlReset = 1; 405 sc->issuepend_done = 1; 406 sc->hw_crit_error = 0; 407 408 if (sc->mfi_flags & MFI_FLAGS_1064R) { 409 sc->mfi_enable_intr = mfi_enable_intr_xscale; 410 sc->mfi_read_fw_status = mfi_read_fw_status_xscale; 411 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale; 412 sc->mfi_issue_cmd = mfi_issue_cmd_xscale; 413 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 414 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc; 415 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc; 416 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc; 417 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc; 418 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc; 419 sc->mfi_adp_reset = mfi_tbolt_adp_reset; 420 sc->mfi_tbolt = 1; 421 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh); 422 } else { 423 sc->mfi_enable_intr = mfi_enable_intr_ppc; 424 sc->mfi_read_fw_status = mfi_read_fw_status_ppc; 425 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc; 426 sc->mfi_issue_cmd = mfi_issue_cmd_ppc; 427 } 428 429 430 /* Before we get too far, see if the firmware is working */ 431 if ((error = mfi_transition_firmware(sc)) != 0) { 432 device_printf(sc->mfi_dev, "Firmware not in READY state, " 433 "error %d\n", error); 434 return (ENXIO); 435 } 436 437 /* Start: LSIP200113393 */ 438 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 439 1, 0, /* algnmnt, boundary */ 440 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 441 BUS_SPACE_MAXADDR, /* highaddr */ 442 NULL, NULL, /* filter, filterarg */ 443 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */ 444 1, /* msegments */ 445 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */ 446 0, /* flags */ 447 NULL, NULL, /* lockfunc, lockarg */ 448 &sc->verbuf_h_dmat)) { 449 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n"); 450 return (ENOMEM); 451 } 452 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf, 453 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) { 454 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n"); 455 return (ENOMEM); 456 } 457 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t)); 458 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap, 459 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t), 460 mfi_addr_cb, &sc->verbuf_h_busaddr, 0); 461 /* End: LSIP200113393 */ 462 463 /* 464 * Get information needed for sizing the contiguous memory for the 465 * frame pool. Size down the sgl parameter since we know that 466 * we will never need more than what's required for MAXPHYS. 467 * It would be nice if these constants were available at runtime 468 * instead of compile time. 469 */ 470 status = sc->mfi_read_fw_status(sc); 471 max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK; 472 if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) { 473 device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n", 474 max_fw_cmds, mfi_max_cmds); 475 sc->mfi_max_fw_cmds = mfi_max_cmds; 476 } else { 477 sc->mfi_max_fw_cmds = max_fw_cmds; 478 } 479 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16; 480 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1)); 481 482 /* ThunderBolt Support get the contiguous memory */ 483 484 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 485 mfi_tbolt_init_globals(sc); 486 device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, " 487 "MaxSgl = %d, state = %#x\n", max_fw_cmds, 488 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status); 489 tb_mem_size = mfi_tbolt_get_memory_requirement(sc); 490 491 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 492 1, 0, /* algnmnt, boundary */ 493 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 494 BUS_SPACE_MAXADDR, /* highaddr */ 495 NULL, NULL, /* filter, filterarg */ 496 tb_mem_size, /* maxsize */ 497 1, /* msegments */ 498 tb_mem_size, /* maxsegsize */ 499 0, /* flags */ 500 NULL, NULL, /* lockfunc, lockarg */ 501 &sc->mfi_tb_dmat)) { 502 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); 503 return (ENOMEM); 504 } 505 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool, 506 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) { 507 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); 508 return (ENOMEM); 509 } 510 bzero(sc->request_message_pool, tb_mem_size); 511 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap, 512 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0); 513 514 /* For ThunderBolt memory init */ 515 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 516 0x100, 0, /* alignmnt, boundary */ 517 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 518 BUS_SPACE_MAXADDR, /* highaddr */ 519 NULL, NULL, /* filter, filterarg */ 520 MFI_FRAME_SIZE, /* maxsize */ 521 1, /* msegments */ 522 MFI_FRAME_SIZE, /* maxsegsize */ 523 0, /* flags */ 524 NULL, NULL, /* lockfunc, lockarg */ 525 &sc->mfi_tb_init_dmat)) { 526 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n"); 527 return (ENOMEM); 528 } 529 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init, 530 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) { 531 device_printf(sc->mfi_dev, "Cannot allocate init memory\n"); 532 return (ENOMEM); 533 } 534 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE); 535 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap, 536 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb, 537 &sc->mfi_tb_init_busaddr, 0); 538 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool, 539 tb_mem_size)) { 540 device_printf(sc->mfi_dev, 541 "Thunderbolt pool preparation error\n"); 542 return 0; 543 } 544 545 /* 546 Allocate DMA memory mapping for MPI2 IOC Init descriptor, 547 we are taking it diffrent from what we have allocated for Request 548 and reply descriptors to avoid confusion later 549 */ 550 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST); 551 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 552 1, 0, /* algnmnt, boundary */ 553 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 554 BUS_SPACE_MAXADDR, /* highaddr */ 555 NULL, NULL, /* filter, filterarg */ 556 tb_mem_size, /* maxsize */ 557 1, /* msegments */ 558 tb_mem_size, /* maxsegsize */ 559 0, /* flags */ 560 NULL, NULL, /* lockfunc, lockarg */ 561 &sc->mfi_tb_ioc_init_dmat)) { 562 device_printf(sc->mfi_dev, 563 "Cannot allocate comms DMA tag\n"); 564 return (ENOMEM); 565 } 566 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat, 567 (void **)&sc->mfi_tb_ioc_init_desc, 568 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) { 569 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); 570 return (ENOMEM); 571 } 572 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size); 573 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap, 574 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb, 575 &sc->mfi_tb_ioc_init_busaddr, 0); 576 } 577 /* 578 * Create the dma tag for data buffers. Used both for block I/O 579 * and for various internal data queries. 580 */ 581 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 582 1, 0, /* algnmnt, boundary */ 583 BUS_SPACE_MAXADDR, /* lowaddr */ 584 BUS_SPACE_MAXADDR, /* highaddr */ 585 NULL, NULL, /* filter, filterarg */ 586 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 587 sc->mfi_max_sge, /* nsegments */ 588 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 589 BUS_DMA_ALLOCNOW, /* flags */ 590 busdma_lock_mutex, /* lockfunc */ 591 &sc->mfi_io_lock, /* lockfuncarg */ 592 &sc->mfi_buffer_dmat)) { 593 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n"); 594 return (ENOMEM); 595 } 596 597 /* 598 * Allocate DMA memory for the comms queues. Keep it under 4GB for 599 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue 600 * entry, so the calculated size here will be will be 1 more than 601 * mfi_max_fw_cmds. This is apparently a requirement of the hardware. 602 */ 603 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) + 604 sizeof(struct mfi_hwcomms); 605 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 606 1, 0, /* algnmnt, boundary */ 607 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 608 BUS_SPACE_MAXADDR, /* highaddr */ 609 NULL, NULL, /* filter, filterarg */ 610 commsz, /* maxsize */ 611 1, /* msegments */ 612 commsz, /* maxsegsize */ 613 0, /* flags */ 614 NULL, NULL, /* lockfunc, lockarg */ 615 &sc->mfi_comms_dmat)) { 616 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); 617 return (ENOMEM); 618 } 619 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms, 620 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) { 621 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); 622 return (ENOMEM); 623 } 624 bzero(sc->mfi_comms, commsz); 625 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap, 626 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0); 627 /* 628 * Allocate DMA memory for the command frames. Keep them in the 629 * lower 4GB for efficiency. Calculate the size of the commands at 630 * the same time; each command is one 64 byte frame plus a set of 631 * additional frames for holding sg lists or other data. 632 * The assumption here is that the SG list will start at the second 633 * frame and not use the unused bytes in the first frame. While this 634 * isn't technically correct, it simplifies the calculation and allows 635 * for command frames that might be larger than an mfi_io_frame. 636 */ 637 if (sizeof(bus_addr_t) == 8) { 638 sc->mfi_sge_size = sizeof(struct mfi_sg64); 639 sc->mfi_flags |= MFI_FLAGS_SG64; 640 } else { 641 sc->mfi_sge_size = sizeof(struct mfi_sg32); 642 } 643 if (sc->mfi_flags & MFI_FLAGS_SKINNY) 644 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny); 645 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2; 646 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE; 647 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds; 648 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 649 64, 0, /* algnmnt, boundary */ 650 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 651 BUS_SPACE_MAXADDR, /* highaddr */ 652 NULL, NULL, /* filter, filterarg */ 653 framessz, /* maxsize */ 654 1, /* nsegments */ 655 framessz, /* maxsegsize */ 656 0, /* flags */ 657 NULL, NULL, /* lockfunc, lockarg */ 658 &sc->mfi_frames_dmat)) { 659 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n"); 660 return (ENOMEM); 661 } 662 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames, 663 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) { 664 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n"); 665 return (ENOMEM); 666 } 667 bzero(sc->mfi_frames, framessz); 668 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap, 669 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0); 670 /* 671 * Allocate DMA memory for the frame sense data. Keep them in the 672 * lower 4GB for efficiency 673 */ 674 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN; 675 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 676 4, 0, /* algnmnt, boundary */ 677 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 678 BUS_SPACE_MAXADDR, /* highaddr */ 679 NULL, NULL, /* filter, filterarg */ 680 sensesz, /* maxsize */ 681 1, /* nsegments */ 682 sensesz, /* maxsegsize */ 683 0, /* flags */ 684 NULL, NULL, /* lockfunc, lockarg */ 685 &sc->mfi_sense_dmat)) { 686 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n"); 687 return (ENOMEM); 688 } 689 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense, 690 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) { 691 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n"); 692 return (ENOMEM); 693 } 694 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap, 695 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0); 696 if ((error = mfi_alloc_commands(sc)) != 0) 697 return (error); 698 699 /* Before moving the FW to operational state, check whether 700 * hostmemory is required by the FW or not 701 */ 702 703 /* ThunderBolt MFI_IOC2 INIT */ 704 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 705 sc->mfi_disable_intr(sc); 706 mtx_lock(&sc->mfi_io_lock); 707 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) { 708 device_printf(sc->mfi_dev, 709 "TB Init has failed with error %d\n",error); 710 mtx_unlock(&sc->mfi_io_lock); 711 return error; 712 } 713 mtx_unlock(&sc->mfi_io_lock); 714 715 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0) 716 return error; 717 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, 718 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc, 719 &sc->mfi_intr)) { 720 device_printf(sc->mfi_dev, "Cannot set up interrupt\n"); 721 return (EINVAL); 722 } 723 sc->mfi_intr_ptr = mfi_intr_tbolt; 724 sc->mfi_enable_intr(sc); 725 } else { 726 if ((error = mfi_comms_init(sc)) != 0) 727 return (error); 728 729 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, 730 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) { 731 device_printf(sc->mfi_dev, "Cannot set up interrupt\n"); 732 return (EINVAL); 733 } 734 sc->mfi_intr_ptr = mfi_intr; 735 sc->mfi_enable_intr(sc); 736 } 737 if ((error = mfi_get_controller_info(sc)) != 0) 738 return (error); 739 sc->disableOnlineCtrlReset = 0; 740 741 /* Register a config hook to probe the bus for arrays */ 742 sc->mfi_ich.ich_func = mfi_startup; 743 sc->mfi_ich.ich_arg = sc; 744 if (config_intrhook_establish(&sc->mfi_ich) != 0) { 745 device_printf(sc->mfi_dev, "Cannot establish configuration " 746 "hook\n"); 747 return (EINVAL); 748 } 749 mtx_lock(&sc->mfi_io_lock); 750 if ((error = mfi_aen_setup(sc, 0), 0) != 0) { 751 mtx_unlock(&sc->mfi_io_lock); 752 return (error); 753 } 754 mtx_unlock(&sc->mfi_io_lock); 755 756 /* 757 * Register a shutdown handler. 758 */ 759 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown, 760 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) { 761 device_printf(sc->mfi_dev, "Warning: shutdown event " 762 "registration failed\n"); 763 } 764 765 /* 766 * Create the control device for doing management 767 */ 768 unit = device_get_unit(sc->mfi_dev); 769 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR, 770 0640, "mfi%d", unit); 771 if (unit == 0) 772 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node"); 773 if (sc->mfi_cdev != NULL) 774 sc->mfi_cdev->si_drv1 = sc; 775 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), 776 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), 777 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW, 778 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes"); 779 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), 780 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), 781 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW, 782 &sc->mfi_keep_deleted_volumes, 0, 783 "Don't detach the mfid device for a busy volume that is deleted"); 784 785 device_add_child(sc->mfi_dev, "mfip", -1); 786 bus_generic_attach(sc->mfi_dev); 787 788 /* Start the timeout watchdog */ 789 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE); 790 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz, 791 mfi_timeout, sc); 792 793 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 794 mtx_lock(&sc->mfi_io_lock); 795 mfi_tbolt_sync_map_info(sc); 796 mtx_unlock(&sc->mfi_io_lock); 797 } 798 799 return (0); 800 } 801 802 static int 803 mfi_alloc_commands(struct mfi_softc *sc) 804 { 805 struct mfi_command *cm; 806 int i, j; 807 808 /* 809 * XXX Should we allocate all the commands up front, or allocate on 810 * demand later like 'aac' does? 811 */ 812 sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) * 813 sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO); 814 815 for (i = 0; i < sc->mfi_max_fw_cmds; i++) { 816 cm = &sc->mfi_commands[i]; 817 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames + 818 sc->mfi_cmd_size * i); 819 cm->cm_frame_busaddr = sc->mfi_frames_busaddr + 820 sc->mfi_cmd_size * i; 821 cm->cm_frame->header.context = i; 822 cm->cm_sense = &sc->mfi_sense[i]; 823 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i; 824 cm->cm_sc = sc; 825 cm->cm_index = i; 826 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0, 827 &cm->cm_dmamap) == 0) { 828 mtx_lock(&sc->mfi_io_lock); 829 mfi_release_command(cm); 830 mtx_unlock(&sc->mfi_io_lock); 831 } else { 832 device_printf(sc->mfi_dev, "Failed to allocate %d " 833 "command blocks, only allocated %d\n", 834 sc->mfi_max_fw_cmds, i - 1); 835 for (j = 0; j < i; j++) { 836 cm = &sc->mfi_commands[i]; 837 bus_dmamap_destroy(sc->mfi_buffer_dmat, 838 cm->cm_dmamap); 839 } 840 free(sc->mfi_commands, M_MFIBUF); 841 sc->mfi_commands = NULL; 842 843 return (ENOMEM); 844 } 845 } 846 847 return (0); 848 } 849 850 void 851 mfi_release_command(struct mfi_command *cm) 852 { 853 struct mfi_frame_header *hdr; 854 uint32_t *hdr_data; 855 856 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED); 857 858 /* 859 * Zero out the important fields of the frame, but make sure the 860 * context field is preserved. For efficiency, handle the fields 861 * as 32 bit words. Clear out the first S/G entry too for safety. 862 */ 863 hdr = &cm->cm_frame->header; 864 if (cm->cm_data != NULL && hdr->sg_count) { 865 cm->cm_sg->sg32[0].len = 0; 866 cm->cm_sg->sg32[0].addr = 0; 867 } 868 869 /* 870 * Command may be on other queues e.g. busy queue depending on the 871 * flow of a previous call to mfi_mapcmd, so ensure its dequeued 872 * properly 873 */ 874 if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0) 875 mfi_remove_busy(cm); 876 if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0) 877 mfi_remove_ready(cm); 878 879 /* We're not expecting it to be on any other queue but check */ 880 if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) { 881 panic("Command %p is still on another queue, flags = %#x", 882 cm, cm->cm_flags); 883 } 884 885 /* tbolt cleanup */ 886 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) { 887 mfi_tbolt_return_cmd(cm->cm_sc, 888 cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1], 889 cm); 890 } 891 892 hdr_data = (uint32_t *)cm->cm_frame; 893 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */ 894 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */ 895 hdr_data[4] = 0; /* flags, timeout */ 896 hdr_data[5] = 0; /* data_len */ 897 898 cm->cm_extra_frames = 0; 899 cm->cm_flags = 0; 900 cm->cm_complete = NULL; 901 cm->cm_private = NULL; 902 cm->cm_data = NULL; 903 cm->cm_sg = 0; 904 cm->cm_total_frame_size = 0; 905 cm->retry_for_fw_reset = 0; 906 907 mfi_enqueue_free(cm); 908 } 909 910 int 911 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, 912 uint32_t opcode, void **bufp, size_t bufsize) 913 { 914 struct mfi_command *cm; 915 struct mfi_dcmd_frame *dcmd; 916 void *buf = NULL; 917 uint32_t context = 0; 918 919 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 920 921 cm = mfi_dequeue_free(sc); 922 if (cm == NULL) 923 return (EBUSY); 924 925 /* Zero out the MFI frame */ 926 context = cm->cm_frame->header.context; 927 bzero(cm->cm_frame, sizeof(union mfi_frame)); 928 cm->cm_frame->header.context = context; 929 930 if ((bufsize > 0) && (bufp != NULL)) { 931 if (*bufp == NULL) { 932 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO); 933 if (buf == NULL) { 934 mfi_release_command(cm); 935 return (ENOMEM); 936 } 937 *bufp = buf; 938 } else { 939 buf = *bufp; 940 } 941 } 942 943 dcmd = &cm->cm_frame->dcmd; 944 bzero(dcmd->mbox, MFI_MBOX_SIZE); 945 dcmd->header.cmd = MFI_CMD_DCMD; 946 dcmd->header.timeout = 0; 947 dcmd->header.flags = 0; 948 dcmd->header.data_len = bufsize; 949 dcmd->header.scsi_status = 0; 950 dcmd->opcode = opcode; 951 cm->cm_sg = &dcmd->sgl; 952 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 953 cm->cm_flags = 0; 954 cm->cm_data = buf; 955 cm->cm_private = buf; 956 cm->cm_len = bufsize; 957 958 *cmp = cm; 959 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL)) 960 *bufp = buf; 961 return (0); 962 } 963 964 static int 965 mfi_comms_init(struct mfi_softc *sc) 966 { 967 struct mfi_command *cm; 968 struct mfi_init_frame *init; 969 struct mfi_init_qinfo *qinfo; 970 int error; 971 uint32_t context = 0; 972 973 mtx_lock(&sc->mfi_io_lock); 974 if ((cm = mfi_dequeue_free(sc)) == NULL) { 975 mtx_unlock(&sc->mfi_io_lock); 976 return (EBUSY); 977 } 978 979 /* Zero out the MFI frame */ 980 context = cm->cm_frame->header.context; 981 bzero(cm->cm_frame, sizeof(union mfi_frame)); 982 cm->cm_frame->header.context = context; 983 984 /* 985 * Abuse the SG list area of the frame to hold the init_qinfo 986 * object; 987 */ 988 init = &cm->cm_frame->init; 989 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE); 990 991 bzero(qinfo, sizeof(struct mfi_init_qinfo)); 992 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1; 993 qinfo->rq_addr_lo = sc->mfi_comms_busaddr + 994 offsetof(struct mfi_hwcomms, hw_reply_q); 995 qinfo->pi_addr_lo = sc->mfi_comms_busaddr + 996 offsetof(struct mfi_hwcomms, hw_pi); 997 qinfo->ci_addr_lo = sc->mfi_comms_busaddr + 998 offsetof(struct mfi_hwcomms, hw_ci); 999 1000 init->header.cmd = MFI_CMD_INIT; 1001 init->header.data_len = sizeof(struct mfi_init_qinfo); 1002 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE; 1003 cm->cm_data = NULL; 1004 cm->cm_flags = MFI_CMD_POLLED; 1005 1006 if ((error = mfi_mapcmd(sc, cm)) != 0) 1007 device_printf(sc->mfi_dev, "failed to send init command\n"); 1008 mfi_release_command(cm); 1009 mtx_unlock(&sc->mfi_io_lock); 1010 1011 return (error); 1012 } 1013 1014 static int 1015 mfi_get_controller_info(struct mfi_softc *sc) 1016 { 1017 struct mfi_command *cm = NULL; 1018 struct mfi_ctrl_info *ci = NULL; 1019 uint32_t max_sectors_1, max_sectors_2; 1020 int error; 1021 1022 mtx_lock(&sc->mfi_io_lock); 1023 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO, 1024 (void **)&ci, sizeof(*ci)); 1025 if (error) 1026 goto out; 1027 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1028 1029 if ((error = mfi_mapcmd(sc, cm)) != 0) { 1030 device_printf(sc->mfi_dev, "Failed to get controller info\n"); 1031 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE / 1032 MFI_SECTOR_LEN; 1033 error = 0; 1034 goto out; 1035 } 1036 1037 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1038 BUS_DMASYNC_POSTREAD); 1039 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1040 1041 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io; 1042 max_sectors_2 = ci->max_request_size; 1043 sc->mfi_max_io = min(max_sectors_1, max_sectors_2); 1044 sc->disableOnlineCtrlReset = 1045 ci->properties.OnOffProperties.disableOnlineCtrlReset; 1046 1047 out: 1048 if (ci) 1049 free(ci, M_MFIBUF); 1050 if (cm) 1051 mfi_release_command(cm); 1052 mtx_unlock(&sc->mfi_io_lock); 1053 return (error); 1054 } 1055 1056 static int 1057 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state) 1058 { 1059 struct mfi_command *cm = NULL; 1060 int error; 1061 1062 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1063 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO, 1064 (void **)log_state, sizeof(**log_state)); 1065 if (error) 1066 goto out; 1067 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1068 1069 if ((error = mfi_mapcmd(sc, cm)) != 0) { 1070 device_printf(sc->mfi_dev, "Failed to get log state\n"); 1071 goto out; 1072 } 1073 1074 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1075 BUS_DMASYNC_POSTREAD); 1076 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1077 1078 out: 1079 if (cm) 1080 mfi_release_command(cm); 1081 1082 return (error); 1083 } 1084 1085 int 1086 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start) 1087 { 1088 struct mfi_evt_log_state *log_state = NULL; 1089 union mfi_evt class_locale; 1090 int error = 0; 1091 uint32_t seq; 1092 1093 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1094 1095 class_locale.members.reserved = 0; 1096 class_locale.members.locale = mfi_event_locale; 1097 class_locale.members.evt_class = mfi_event_class; 1098 1099 if (seq_start == 0) { 1100 if ((error = mfi_get_log_state(sc, &log_state)) != 0) 1101 goto out; 1102 sc->mfi_boot_seq_num = log_state->boot_seq_num; 1103 1104 /* 1105 * Walk through any events that fired since the last 1106 * shutdown. 1107 */ 1108 if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num, 1109 log_state->newest_seq_num)) != 0) 1110 goto out; 1111 seq = log_state->newest_seq_num; 1112 } else 1113 seq = seq_start; 1114 error = mfi_aen_register(sc, seq, class_locale.word); 1115 out: 1116 free(log_state, M_MFIBUF); 1117 1118 return (error); 1119 } 1120 1121 int 1122 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm) 1123 { 1124 1125 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1126 cm->cm_complete = NULL; 1127 1128 /* 1129 * MegaCli can issue a DCMD of 0. In this case do nothing 1130 * and return 0 to it as status 1131 */ 1132 if (cm->cm_frame->dcmd.opcode == 0) { 1133 cm->cm_frame->header.cmd_status = MFI_STAT_OK; 1134 cm->cm_error = 0; 1135 return (cm->cm_error); 1136 } 1137 mfi_enqueue_ready(cm); 1138 mfi_startio(sc); 1139 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0) 1140 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0); 1141 return (cm->cm_error); 1142 } 1143 1144 void 1145 mfi_free(struct mfi_softc *sc) 1146 { 1147 struct mfi_command *cm; 1148 int i; 1149 1150 callout_drain(&sc->mfi_watchdog_callout); 1151 1152 if (sc->mfi_cdev != NULL) 1153 destroy_dev(sc->mfi_cdev); 1154 1155 if (sc->mfi_commands != NULL) { 1156 for (i = 0; i < sc->mfi_max_fw_cmds; i++) { 1157 cm = &sc->mfi_commands[i]; 1158 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap); 1159 } 1160 free(sc->mfi_commands, M_MFIBUF); 1161 sc->mfi_commands = NULL; 1162 } 1163 1164 if (sc->mfi_intr) 1165 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr); 1166 if (sc->mfi_irq != NULL) 1167 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid, 1168 sc->mfi_irq); 1169 1170 if (sc->mfi_sense_busaddr != 0) 1171 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap); 1172 if (sc->mfi_sense != NULL) 1173 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense, 1174 sc->mfi_sense_dmamap); 1175 if (sc->mfi_sense_dmat != NULL) 1176 bus_dma_tag_destroy(sc->mfi_sense_dmat); 1177 1178 if (sc->mfi_frames_busaddr != 0) 1179 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap); 1180 if (sc->mfi_frames != NULL) 1181 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames, 1182 sc->mfi_frames_dmamap); 1183 if (sc->mfi_frames_dmat != NULL) 1184 bus_dma_tag_destroy(sc->mfi_frames_dmat); 1185 1186 if (sc->mfi_comms_busaddr != 0) 1187 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap); 1188 if (sc->mfi_comms != NULL) 1189 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms, 1190 sc->mfi_comms_dmamap); 1191 if (sc->mfi_comms_dmat != NULL) 1192 bus_dma_tag_destroy(sc->mfi_comms_dmat); 1193 1194 /* ThunderBolt contiguous memory free here */ 1195 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 1196 if (sc->mfi_tb_busaddr != 0) 1197 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap); 1198 if (sc->request_message_pool != NULL) 1199 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool, 1200 sc->mfi_tb_dmamap); 1201 if (sc->mfi_tb_dmat != NULL) 1202 bus_dma_tag_destroy(sc->mfi_tb_dmat); 1203 1204 /* Version buffer memory free */ 1205 /* Start LSIP200113393 */ 1206 if (sc->verbuf_h_busaddr != 0) 1207 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap); 1208 if (sc->verbuf != NULL) 1209 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf, 1210 sc->verbuf_h_dmamap); 1211 if (sc->verbuf_h_dmat != NULL) 1212 bus_dma_tag_destroy(sc->verbuf_h_dmat); 1213 1214 /* End LSIP200113393 */ 1215 /* ThunderBolt INIT packet memory Free */ 1216 if (sc->mfi_tb_init_busaddr != 0) 1217 bus_dmamap_unload(sc->mfi_tb_init_dmat, 1218 sc->mfi_tb_init_dmamap); 1219 if (sc->mfi_tb_init != NULL) 1220 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init, 1221 sc->mfi_tb_init_dmamap); 1222 if (sc->mfi_tb_init_dmat != NULL) 1223 bus_dma_tag_destroy(sc->mfi_tb_init_dmat); 1224 1225 /* ThunderBolt IOC Init Desc memory free here */ 1226 if (sc->mfi_tb_ioc_init_busaddr != 0) 1227 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat, 1228 sc->mfi_tb_ioc_init_dmamap); 1229 if (sc->mfi_tb_ioc_init_desc != NULL) 1230 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat, 1231 sc->mfi_tb_ioc_init_desc, 1232 sc->mfi_tb_ioc_init_dmamap); 1233 if (sc->mfi_tb_ioc_init_dmat != NULL) 1234 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat); 1235 if (sc->mfi_cmd_pool_tbolt != NULL) { 1236 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) { 1237 if (sc->mfi_cmd_pool_tbolt[i] != NULL) { 1238 free(sc->mfi_cmd_pool_tbolt[i], 1239 M_MFIBUF); 1240 sc->mfi_cmd_pool_tbolt[i] = NULL; 1241 } 1242 } 1243 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF); 1244 sc->mfi_cmd_pool_tbolt = NULL; 1245 } 1246 if (sc->request_desc_pool != NULL) { 1247 free(sc->request_desc_pool, M_MFIBUF); 1248 sc->request_desc_pool = NULL; 1249 } 1250 } 1251 if (sc->mfi_buffer_dmat != NULL) 1252 bus_dma_tag_destroy(sc->mfi_buffer_dmat); 1253 if (sc->mfi_parent_dmat != NULL) 1254 bus_dma_tag_destroy(sc->mfi_parent_dmat); 1255 1256 if (mtx_initialized(&sc->mfi_io_lock)) { 1257 mtx_destroy(&sc->mfi_io_lock); 1258 sx_destroy(&sc->mfi_config_lock); 1259 } 1260 1261 return; 1262 } 1263 1264 static void 1265 mfi_startup(void *arg) 1266 { 1267 struct mfi_softc *sc; 1268 1269 sc = (struct mfi_softc *)arg; 1270 1271 config_intrhook_disestablish(&sc->mfi_ich); 1272 1273 sc->mfi_enable_intr(sc); 1274 sx_xlock(&sc->mfi_config_lock); 1275 mtx_lock(&sc->mfi_io_lock); 1276 mfi_ldprobe(sc); 1277 if (sc->mfi_flags & MFI_FLAGS_SKINNY) 1278 mfi_syspdprobe(sc); 1279 mtx_unlock(&sc->mfi_io_lock); 1280 sx_xunlock(&sc->mfi_config_lock); 1281 } 1282 1283 static void 1284 mfi_intr(void *arg) 1285 { 1286 struct mfi_softc *sc; 1287 struct mfi_command *cm; 1288 uint32_t pi, ci, context; 1289 1290 sc = (struct mfi_softc *)arg; 1291 1292 if (sc->mfi_check_clear_intr(sc)) 1293 return; 1294 1295 restart: 1296 pi = sc->mfi_comms->hw_pi; 1297 ci = sc->mfi_comms->hw_ci; 1298 mtx_lock(&sc->mfi_io_lock); 1299 while (ci != pi) { 1300 context = sc->mfi_comms->hw_reply_q[ci]; 1301 if (context < sc->mfi_max_fw_cmds) { 1302 cm = &sc->mfi_commands[context]; 1303 mfi_remove_busy(cm); 1304 cm->cm_error = 0; 1305 mfi_complete(sc, cm); 1306 } 1307 if (++ci == (sc->mfi_max_fw_cmds + 1)) 1308 ci = 0; 1309 } 1310 1311 sc->mfi_comms->hw_ci = ci; 1312 1313 /* Give defered I/O a chance to run */ 1314 sc->mfi_flags &= ~MFI_FLAGS_QFRZN; 1315 mfi_startio(sc); 1316 mtx_unlock(&sc->mfi_io_lock); 1317 1318 /* 1319 * Dummy read to flush the bus; this ensures that the indexes are up 1320 * to date. Restart processing if more commands have come it. 1321 */ 1322 (void)sc->mfi_read_fw_status(sc); 1323 if (pi != sc->mfi_comms->hw_pi) 1324 goto restart; 1325 1326 return; 1327 } 1328 1329 int 1330 mfi_shutdown(struct mfi_softc *sc) 1331 { 1332 struct mfi_dcmd_frame *dcmd; 1333 struct mfi_command *cm; 1334 int error; 1335 1336 1337 if (sc->mfi_aen_cm != NULL) { 1338 sc->cm_aen_abort = 1; 1339 mfi_abort(sc, &sc->mfi_aen_cm); 1340 } 1341 1342 if (sc->mfi_map_sync_cm != NULL) { 1343 sc->cm_map_abort = 1; 1344 mfi_abort(sc, &sc->mfi_map_sync_cm); 1345 } 1346 1347 mtx_lock(&sc->mfi_io_lock); 1348 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0); 1349 if (error) { 1350 mtx_unlock(&sc->mfi_io_lock); 1351 return (error); 1352 } 1353 1354 dcmd = &cm->cm_frame->dcmd; 1355 dcmd->header.flags = MFI_FRAME_DIR_NONE; 1356 cm->cm_flags = MFI_CMD_POLLED; 1357 cm->cm_data = NULL; 1358 1359 if ((error = mfi_mapcmd(sc, cm)) != 0) 1360 device_printf(sc->mfi_dev, "Failed to shutdown controller\n"); 1361 1362 mfi_release_command(cm); 1363 mtx_unlock(&sc->mfi_io_lock); 1364 return (error); 1365 } 1366 1367 static void 1368 mfi_syspdprobe(struct mfi_softc *sc) 1369 { 1370 struct mfi_frame_header *hdr; 1371 struct mfi_command *cm = NULL; 1372 struct mfi_pd_list *pdlist = NULL; 1373 struct mfi_system_pd *syspd, *tmp; 1374 struct mfi_system_pending *syspd_pend; 1375 int error, i, found; 1376 1377 sx_assert(&sc->mfi_config_lock, SA_XLOCKED); 1378 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1379 /* Add SYSTEM PD's */ 1380 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY, 1381 (void **)&pdlist, sizeof(*pdlist)); 1382 if (error) { 1383 device_printf(sc->mfi_dev, 1384 "Error while forming SYSTEM PD list\n"); 1385 goto out; 1386 } 1387 1388 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1389 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 1390 cm->cm_frame->dcmd.mbox[1] = 0; 1391 if (mfi_mapcmd(sc, cm) != 0) { 1392 device_printf(sc->mfi_dev, 1393 "Failed to get syspd device listing\n"); 1394 goto out; 1395 } 1396 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap, 1397 BUS_DMASYNC_POSTREAD); 1398 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1399 hdr = &cm->cm_frame->header; 1400 if (hdr->cmd_status != MFI_STAT_OK) { 1401 device_printf(sc->mfi_dev, 1402 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status); 1403 goto out; 1404 } 1405 /* Get each PD and add it to the system */ 1406 for (i = 0; i < pdlist->count; i++) { 1407 if (pdlist->addr[i].device_id == 1408 pdlist->addr[i].encl_device_id) 1409 continue; 1410 found = 0; 1411 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) { 1412 if (syspd->pd_id == pdlist->addr[i].device_id) 1413 found = 1; 1414 } 1415 TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) { 1416 if (syspd_pend->pd_id == pdlist->addr[i].device_id) 1417 found = 1; 1418 } 1419 if (found == 0) 1420 mfi_add_sys_pd(sc, pdlist->addr[i].device_id); 1421 } 1422 /* Delete SYSPD's whose state has been changed */ 1423 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) { 1424 found = 0; 1425 for (i = 0; i < pdlist->count; i++) { 1426 if (syspd->pd_id == pdlist->addr[i].device_id) { 1427 found = 1; 1428 break; 1429 } 1430 } 1431 if (found == 0) { 1432 printf("DELETE\n"); 1433 mtx_unlock(&sc->mfi_io_lock); 1434 mtx_lock(&Giant); 1435 device_delete_child(sc->mfi_dev, syspd->pd_dev); 1436 mtx_unlock(&Giant); 1437 mtx_lock(&sc->mfi_io_lock); 1438 } 1439 } 1440 out: 1441 if (pdlist) 1442 free(pdlist, M_MFIBUF); 1443 if (cm) 1444 mfi_release_command(cm); 1445 1446 return; 1447 } 1448 1449 static void 1450 mfi_ldprobe(struct mfi_softc *sc) 1451 { 1452 struct mfi_frame_header *hdr; 1453 struct mfi_command *cm = NULL; 1454 struct mfi_ld_list *list = NULL; 1455 struct mfi_disk *ld; 1456 struct mfi_disk_pending *ld_pend; 1457 int error, i; 1458 1459 sx_assert(&sc->mfi_config_lock, SA_XLOCKED); 1460 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1461 1462 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST, 1463 (void **)&list, sizeof(*list)); 1464 if (error) 1465 goto out; 1466 1467 cm->cm_flags = MFI_CMD_DATAIN; 1468 if (mfi_wait_command(sc, cm) != 0) { 1469 device_printf(sc->mfi_dev, "Failed to get device listing\n"); 1470 goto out; 1471 } 1472 1473 hdr = &cm->cm_frame->header; 1474 if (hdr->cmd_status != MFI_STAT_OK) { 1475 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n", 1476 hdr->cmd_status); 1477 goto out; 1478 } 1479 1480 for (i = 0; i < list->ld_count; i++) { 1481 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1482 if (ld->ld_id == list->ld_list[i].ld.v.target_id) 1483 goto skip_add; 1484 } 1485 TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) { 1486 if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id) 1487 goto skip_add; 1488 } 1489 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id); 1490 skip_add:; 1491 } 1492 out: 1493 if (list) 1494 free(list, M_MFIBUF); 1495 if (cm) 1496 mfi_release_command(cm); 1497 1498 return; 1499 } 1500 1501 /* 1502 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If 1503 * the bits in 24-31 are all set, then it is the number of seconds since 1504 * boot. 1505 */ 1506 static const char * 1507 format_timestamp(uint32_t timestamp) 1508 { 1509 static char buffer[32]; 1510 1511 if ((timestamp & 0xff000000) == 0xff000000) 1512 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 1513 0x00ffffff); 1514 else 1515 snprintf(buffer, sizeof(buffer), "%us", timestamp); 1516 return (buffer); 1517 } 1518 1519 static const char * 1520 format_class(int8_t class) 1521 { 1522 static char buffer[6]; 1523 1524 switch (class) { 1525 case MFI_EVT_CLASS_DEBUG: 1526 return ("debug"); 1527 case MFI_EVT_CLASS_PROGRESS: 1528 return ("progress"); 1529 case MFI_EVT_CLASS_INFO: 1530 return ("info"); 1531 case MFI_EVT_CLASS_WARNING: 1532 return ("WARN"); 1533 case MFI_EVT_CLASS_CRITICAL: 1534 return ("CRIT"); 1535 case MFI_EVT_CLASS_FATAL: 1536 return ("FATAL"); 1537 case MFI_EVT_CLASS_DEAD: 1538 return ("DEAD"); 1539 default: 1540 snprintf(buffer, sizeof(buffer), "%d", class); 1541 return (buffer); 1542 } 1543 } 1544 1545 static void 1546 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail) 1547 { 1548 struct mfi_system_pd *syspd = NULL; 1549 1550 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq, 1551 format_timestamp(detail->time), detail->evt_class.members.locale, 1552 format_class(detail->evt_class.members.evt_class), 1553 detail->description); 1554 1555 /* Don't act on old AEN's or while shutting down */ 1556 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching) 1557 return; 1558 1559 switch (detail->arg_type) { 1560 case MR_EVT_ARGS_NONE: 1561 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) { 1562 device_printf(sc->mfi_dev, "HostBus scan raised\n"); 1563 if (mfi_detect_jbod_change) { 1564 /* 1565 * Probe for new SYSPD's and Delete 1566 * invalid SYSPD's 1567 */ 1568 sx_xlock(&sc->mfi_config_lock); 1569 mtx_lock(&sc->mfi_io_lock); 1570 mfi_syspdprobe(sc); 1571 mtx_unlock(&sc->mfi_io_lock); 1572 sx_xunlock(&sc->mfi_config_lock); 1573 } 1574 } 1575 break; 1576 case MR_EVT_ARGS_LD_STATE: 1577 /* During load time driver reads all the events starting 1578 * from the one that has been logged after shutdown. Avoid 1579 * these old events. 1580 */ 1581 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) { 1582 /* Remove the LD */ 1583 struct mfi_disk *ld; 1584 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1585 if (ld->ld_id == 1586 detail->args.ld_state.ld.target_id) 1587 break; 1588 } 1589 /* 1590 Fix: for kernel panics when SSCD is removed 1591 KASSERT(ld != NULL, ("volume dissappeared")); 1592 */ 1593 if (ld != NULL) { 1594 mtx_lock(&Giant); 1595 device_delete_child(sc->mfi_dev, ld->ld_dev); 1596 mtx_unlock(&Giant); 1597 } 1598 } 1599 break; 1600 case MR_EVT_ARGS_PD: 1601 if (detail->code == MR_EVT_PD_REMOVED) { 1602 if (mfi_detect_jbod_change) { 1603 /* 1604 * If the removed device is a SYSPD then 1605 * delete it 1606 */ 1607 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, 1608 pd_link) { 1609 if (syspd->pd_id == 1610 detail->args.pd.device_id) { 1611 mtx_lock(&Giant); 1612 device_delete_child( 1613 sc->mfi_dev, 1614 syspd->pd_dev); 1615 mtx_unlock(&Giant); 1616 break; 1617 } 1618 } 1619 } 1620 } 1621 if (detail->code == MR_EVT_PD_INSERTED) { 1622 if (mfi_detect_jbod_change) { 1623 /* Probe for new SYSPD's */ 1624 sx_xlock(&sc->mfi_config_lock); 1625 mtx_lock(&sc->mfi_io_lock); 1626 mfi_syspdprobe(sc); 1627 mtx_unlock(&sc->mfi_io_lock); 1628 sx_xunlock(&sc->mfi_config_lock); 1629 } 1630 } 1631 if (sc->mfi_cam_rescan_cb != NULL && 1632 (detail->code == MR_EVT_PD_INSERTED || 1633 detail->code == MR_EVT_PD_REMOVED)) { 1634 sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id); 1635 } 1636 break; 1637 } 1638 } 1639 1640 static void 1641 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail) 1642 { 1643 struct mfi_evt_queue_elm *elm; 1644 1645 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1646 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO); 1647 if (elm == NULL) 1648 return; 1649 memcpy(&elm->detail, detail, sizeof(*detail)); 1650 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link); 1651 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task); 1652 } 1653 1654 static void 1655 mfi_handle_evt(void *context, int pending) 1656 { 1657 TAILQ_HEAD(,mfi_evt_queue_elm) queue; 1658 struct mfi_softc *sc; 1659 struct mfi_evt_queue_elm *elm; 1660 1661 sc = context; 1662 TAILQ_INIT(&queue); 1663 mtx_lock(&sc->mfi_io_lock); 1664 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link); 1665 mtx_unlock(&sc->mfi_io_lock); 1666 while ((elm = TAILQ_FIRST(&queue)) != NULL) { 1667 TAILQ_REMOVE(&queue, elm, link); 1668 mfi_decode_evt(sc, &elm->detail); 1669 free(elm, M_MFIBUF); 1670 } 1671 } 1672 1673 static int 1674 mfi_aen_register(struct mfi_softc *sc, int seq, int locale) 1675 { 1676 struct mfi_command *cm; 1677 struct mfi_dcmd_frame *dcmd; 1678 union mfi_evt current_aen, prior_aen; 1679 struct mfi_evt_detail *ed = NULL; 1680 int error = 0; 1681 1682 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1683 1684 current_aen.word = locale; 1685 if (sc->mfi_aen_cm != NULL) { 1686 prior_aen.word = 1687 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1]; 1688 if (prior_aen.members.evt_class <= current_aen.members.evt_class && 1689 !((prior_aen.members.locale & current_aen.members.locale) 1690 ^current_aen.members.locale)) { 1691 return (0); 1692 } else { 1693 prior_aen.members.locale |= current_aen.members.locale; 1694 if (prior_aen.members.evt_class 1695 < current_aen.members.evt_class) 1696 current_aen.members.evt_class = 1697 prior_aen.members.evt_class; 1698 mfi_abort(sc, &sc->mfi_aen_cm); 1699 } 1700 } 1701 1702 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT, 1703 (void **)&ed, sizeof(*ed)); 1704 if (error) 1705 goto out; 1706 1707 dcmd = &cm->cm_frame->dcmd; 1708 ((uint32_t *)&dcmd->mbox)[0] = seq; 1709 ((uint32_t *)&dcmd->mbox)[1] = locale; 1710 cm->cm_flags = MFI_CMD_DATAIN; 1711 cm->cm_complete = mfi_aen_complete; 1712 1713 sc->last_seq_num = seq; 1714 sc->mfi_aen_cm = cm; 1715 1716 mfi_enqueue_ready(cm); 1717 mfi_startio(sc); 1718 1719 out: 1720 return (error); 1721 } 1722 1723 static void 1724 mfi_aen_complete(struct mfi_command *cm) 1725 { 1726 struct mfi_frame_header *hdr; 1727 struct mfi_softc *sc; 1728 struct mfi_evt_detail *detail; 1729 struct mfi_aen *mfi_aen_entry, *tmp; 1730 int seq = 0, aborted = 0; 1731 1732 sc = cm->cm_sc; 1733 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1734 1735 if (sc->mfi_aen_cm == NULL) 1736 return; 1737 1738 hdr = &cm->cm_frame->header; 1739 1740 if (sc->cm_aen_abort || 1741 hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1742 sc->cm_aen_abort = 0; 1743 aborted = 1; 1744 } else { 1745 sc->mfi_aen_triggered = 1; 1746 if (sc->mfi_poll_waiting) { 1747 sc->mfi_poll_waiting = 0; 1748 selwakeup(&sc->mfi_select); 1749 } 1750 detail = cm->cm_data; 1751 mfi_queue_evt(sc, detail); 1752 seq = detail->seq + 1; 1753 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, 1754 tmp) { 1755 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 1756 aen_link); 1757 PROC_LOCK(mfi_aen_entry->p); 1758 kern_psignal(mfi_aen_entry->p, SIGIO); 1759 PROC_UNLOCK(mfi_aen_entry->p); 1760 free(mfi_aen_entry, M_MFIBUF); 1761 } 1762 } 1763 1764 free(cm->cm_data, M_MFIBUF); 1765 wakeup(&sc->mfi_aen_cm); 1766 sc->mfi_aen_cm = NULL; 1767 mfi_release_command(cm); 1768 1769 /* set it up again so the driver can catch more events */ 1770 if (!aborted) 1771 mfi_aen_setup(sc, seq); 1772 } 1773 1774 #define MAX_EVENTS 15 1775 1776 static int 1777 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq) 1778 { 1779 struct mfi_command *cm; 1780 struct mfi_dcmd_frame *dcmd; 1781 struct mfi_evt_list *el; 1782 union mfi_evt class_locale; 1783 int error, i, seq, size; 1784 1785 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1786 1787 class_locale.members.reserved = 0; 1788 class_locale.members.locale = mfi_event_locale; 1789 class_locale.members.evt_class = mfi_event_class; 1790 1791 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail) 1792 * (MAX_EVENTS - 1); 1793 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO); 1794 if (el == NULL) 1795 return (ENOMEM); 1796 1797 for (seq = start_seq;;) { 1798 if ((cm = mfi_dequeue_free(sc)) == NULL) { 1799 free(el, M_MFIBUF); 1800 return (EBUSY); 1801 } 1802 1803 dcmd = &cm->cm_frame->dcmd; 1804 bzero(dcmd->mbox, MFI_MBOX_SIZE); 1805 dcmd->header.cmd = MFI_CMD_DCMD; 1806 dcmd->header.timeout = 0; 1807 dcmd->header.data_len = size; 1808 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET; 1809 ((uint32_t *)&dcmd->mbox)[0] = seq; 1810 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word; 1811 cm->cm_sg = &dcmd->sgl; 1812 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 1813 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1814 cm->cm_data = el; 1815 cm->cm_len = size; 1816 1817 if ((error = mfi_mapcmd(sc, cm)) != 0) { 1818 device_printf(sc->mfi_dev, 1819 "Failed to get controller entries\n"); 1820 mfi_release_command(cm); 1821 break; 1822 } 1823 1824 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1825 BUS_DMASYNC_POSTREAD); 1826 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1827 1828 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) { 1829 mfi_release_command(cm); 1830 break; 1831 } 1832 if (dcmd->header.cmd_status != MFI_STAT_OK) { 1833 device_printf(sc->mfi_dev, 1834 "Error %d fetching controller entries\n", 1835 dcmd->header.cmd_status); 1836 mfi_release_command(cm); 1837 error = EIO; 1838 break; 1839 } 1840 mfi_release_command(cm); 1841 1842 for (i = 0; i < el->count; i++) { 1843 /* 1844 * If this event is newer than 'stop_seq' then 1845 * break out of the loop. Note that the log 1846 * is a circular buffer so we have to handle 1847 * the case that our stop point is earlier in 1848 * the buffer than our start point. 1849 */ 1850 if (el->event[i].seq >= stop_seq) { 1851 if (start_seq <= stop_seq) 1852 break; 1853 else if (el->event[i].seq < start_seq) 1854 break; 1855 } 1856 mfi_queue_evt(sc, &el->event[i]); 1857 } 1858 seq = el->event[el->count - 1].seq + 1; 1859 } 1860 1861 free(el, M_MFIBUF); 1862 return (error); 1863 } 1864 1865 static int 1866 mfi_add_ld(struct mfi_softc *sc, int id) 1867 { 1868 struct mfi_command *cm; 1869 struct mfi_dcmd_frame *dcmd = NULL; 1870 struct mfi_ld_info *ld_info = NULL; 1871 struct mfi_disk_pending *ld_pend; 1872 int error; 1873 1874 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1875 1876 ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO); 1877 if (ld_pend != NULL) { 1878 ld_pend->ld_id = id; 1879 TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link); 1880 } 1881 1882 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO, 1883 (void **)&ld_info, sizeof(*ld_info)); 1884 if (error) { 1885 device_printf(sc->mfi_dev, 1886 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error); 1887 if (ld_info) 1888 free(ld_info, M_MFIBUF); 1889 return (error); 1890 } 1891 cm->cm_flags = MFI_CMD_DATAIN; 1892 dcmd = &cm->cm_frame->dcmd; 1893 dcmd->mbox[0] = id; 1894 if (mfi_wait_command(sc, cm) != 0) { 1895 device_printf(sc->mfi_dev, 1896 "Failed to get logical drive: %d\n", id); 1897 free(ld_info, M_MFIBUF); 1898 return (0); 1899 } 1900 if (ld_info->ld_config.params.isSSCD != 1) 1901 mfi_add_ld_complete(cm); 1902 else { 1903 mfi_release_command(cm); 1904 if (ld_info) /* SSCD drives ld_info free here */ 1905 free(ld_info, M_MFIBUF); 1906 } 1907 return (0); 1908 } 1909 1910 static void 1911 mfi_add_ld_complete(struct mfi_command *cm) 1912 { 1913 struct mfi_frame_header *hdr; 1914 struct mfi_ld_info *ld_info; 1915 struct mfi_softc *sc; 1916 device_t child; 1917 1918 sc = cm->cm_sc; 1919 hdr = &cm->cm_frame->header; 1920 ld_info = cm->cm_private; 1921 1922 if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) { 1923 free(ld_info, M_MFIBUF); 1924 wakeup(&sc->mfi_map_sync_cm); 1925 mfi_release_command(cm); 1926 return; 1927 } 1928 wakeup(&sc->mfi_map_sync_cm); 1929 mfi_release_command(cm); 1930 1931 mtx_unlock(&sc->mfi_io_lock); 1932 mtx_lock(&Giant); 1933 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) { 1934 device_printf(sc->mfi_dev, "Failed to add logical disk\n"); 1935 free(ld_info, M_MFIBUF); 1936 mtx_unlock(&Giant); 1937 mtx_lock(&sc->mfi_io_lock); 1938 return; 1939 } 1940 1941 device_set_ivars(child, ld_info); 1942 device_set_desc(child, "MFI Logical Disk"); 1943 bus_generic_attach(sc->mfi_dev); 1944 mtx_unlock(&Giant); 1945 mtx_lock(&sc->mfi_io_lock); 1946 } 1947 1948 static int mfi_add_sys_pd(struct mfi_softc *sc, int id) 1949 { 1950 struct mfi_command *cm; 1951 struct mfi_dcmd_frame *dcmd = NULL; 1952 struct mfi_pd_info *pd_info = NULL; 1953 struct mfi_system_pending *syspd_pend; 1954 int error; 1955 1956 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1957 1958 syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO); 1959 if (syspd_pend != NULL) { 1960 syspd_pend->pd_id = id; 1961 TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link); 1962 } 1963 1964 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO, 1965 (void **)&pd_info, sizeof(*pd_info)); 1966 if (error) { 1967 device_printf(sc->mfi_dev, 1968 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n", 1969 error); 1970 if (pd_info) 1971 free(pd_info, M_MFIBUF); 1972 return (error); 1973 } 1974 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1975 dcmd = &cm->cm_frame->dcmd; 1976 dcmd->mbox[0]=id; 1977 dcmd->header.scsi_status = 0; 1978 dcmd->header.pad0 = 0; 1979 if ((error = mfi_mapcmd(sc, cm)) != 0) { 1980 device_printf(sc->mfi_dev, 1981 "Failed to get physical drive info %d\n", id); 1982 free(pd_info, M_MFIBUF); 1983 mfi_release_command(cm); 1984 return (error); 1985 } 1986 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1987 BUS_DMASYNC_POSTREAD); 1988 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1989 mfi_add_sys_pd_complete(cm); 1990 return (0); 1991 } 1992 1993 static void 1994 mfi_add_sys_pd_complete(struct mfi_command *cm) 1995 { 1996 struct mfi_frame_header *hdr; 1997 struct mfi_pd_info *pd_info; 1998 struct mfi_softc *sc; 1999 device_t child; 2000 2001 sc = cm->cm_sc; 2002 hdr = &cm->cm_frame->header; 2003 pd_info = cm->cm_private; 2004 2005 if (hdr->cmd_status != MFI_STAT_OK) { 2006 free(pd_info, M_MFIBUF); 2007 mfi_release_command(cm); 2008 return; 2009 } 2010 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) { 2011 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n", 2012 pd_info->ref.v.device_id); 2013 free(pd_info, M_MFIBUF); 2014 mfi_release_command(cm); 2015 return; 2016 } 2017 mfi_release_command(cm); 2018 2019 mtx_unlock(&sc->mfi_io_lock); 2020 mtx_lock(&Giant); 2021 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) { 2022 device_printf(sc->mfi_dev, "Failed to add system pd\n"); 2023 free(pd_info, M_MFIBUF); 2024 mtx_unlock(&Giant); 2025 mtx_lock(&sc->mfi_io_lock); 2026 return; 2027 } 2028 2029 device_set_ivars(child, pd_info); 2030 device_set_desc(child, "MFI System PD"); 2031 bus_generic_attach(sc->mfi_dev); 2032 mtx_unlock(&Giant); 2033 mtx_lock(&sc->mfi_io_lock); 2034 } 2035 2036 static struct mfi_command * 2037 mfi_bio_command(struct mfi_softc *sc) 2038 { 2039 struct bio *bio; 2040 struct mfi_command *cm = NULL; 2041 2042 /*reserving two commands to avoid starvation for IOCTL*/ 2043 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) { 2044 return (NULL); 2045 } 2046 if ((bio = mfi_dequeue_bio(sc)) == NULL) { 2047 return (NULL); 2048 } 2049 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) { 2050 cm = mfi_build_ldio(sc, bio); 2051 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) { 2052 cm = mfi_build_syspdio(sc, bio); 2053 } 2054 if (!cm) 2055 mfi_enqueue_bio(sc, bio); 2056 return cm; 2057 } 2058 2059 /* 2060 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write 2061 */ 2062 2063 int 2064 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb) 2065 { 2066 int cdb_len; 2067 2068 if (((lba & 0x1fffff) == lba) 2069 && ((block_count & 0xff) == block_count) 2070 && (byte2 == 0)) { 2071 /* We can fit in a 6 byte cdb */ 2072 struct scsi_rw_6 *scsi_cmd; 2073 2074 scsi_cmd = (struct scsi_rw_6 *)cdb; 2075 scsi_cmd->opcode = readop ? READ_6 : WRITE_6; 2076 scsi_ulto3b(lba, scsi_cmd->addr); 2077 scsi_cmd->length = block_count & 0xff; 2078 scsi_cmd->control = 0; 2079 cdb_len = sizeof(*scsi_cmd); 2080 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) { 2081 /* Need a 10 byte CDB */ 2082 struct scsi_rw_10 *scsi_cmd; 2083 2084 scsi_cmd = (struct scsi_rw_10 *)cdb; 2085 scsi_cmd->opcode = readop ? READ_10 : WRITE_10; 2086 scsi_cmd->byte2 = byte2; 2087 scsi_ulto4b(lba, scsi_cmd->addr); 2088 scsi_cmd->reserved = 0; 2089 scsi_ulto2b(block_count, scsi_cmd->length); 2090 scsi_cmd->control = 0; 2091 cdb_len = sizeof(*scsi_cmd); 2092 } else if (((block_count & 0xffffffff) == block_count) && 2093 ((lba & 0xffffffff) == lba)) { 2094 /* Block count is too big for 10 byte CDB use a 12 byte CDB */ 2095 struct scsi_rw_12 *scsi_cmd; 2096 2097 scsi_cmd = (struct scsi_rw_12 *)cdb; 2098 scsi_cmd->opcode = readop ? READ_12 : WRITE_12; 2099 scsi_cmd->byte2 = byte2; 2100 scsi_ulto4b(lba, scsi_cmd->addr); 2101 scsi_cmd->reserved = 0; 2102 scsi_ulto4b(block_count, scsi_cmd->length); 2103 scsi_cmd->control = 0; 2104 cdb_len = sizeof(*scsi_cmd); 2105 } else { 2106 /* 2107 * 16 byte CDB. We'll only get here if the LBA is larger 2108 * than 2^32 2109 */ 2110 struct scsi_rw_16 *scsi_cmd; 2111 2112 scsi_cmd = (struct scsi_rw_16 *)cdb; 2113 scsi_cmd->opcode = readop ? READ_16 : WRITE_16; 2114 scsi_cmd->byte2 = byte2; 2115 scsi_u64to8b(lba, scsi_cmd->addr); 2116 scsi_cmd->reserved = 0; 2117 scsi_ulto4b(block_count, scsi_cmd->length); 2118 scsi_cmd->control = 0; 2119 cdb_len = sizeof(*scsi_cmd); 2120 } 2121 2122 return cdb_len; 2123 } 2124 2125 static struct mfi_command * 2126 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio) 2127 { 2128 struct mfi_command *cm; 2129 struct mfi_pass_frame *pass; 2130 uint32_t context = 0; 2131 int flags = 0, blkcount = 0, readop; 2132 uint8_t cdb_len; 2133 2134 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 2135 2136 if ((cm = mfi_dequeue_free(sc)) == NULL) 2137 return (NULL); 2138 2139 /* Zero out the MFI frame */ 2140 context = cm->cm_frame->header.context; 2141 bzero(cm->cm_frame, sizeof(union mfi_frame)); 2142 cm->cm_frame->header.context = context; 2143 pass = &cm->cm_frame->pass; 2144 bzero(pass->cdb, 16); 2145 pass->header.cmd = MFI_CMD_PD_SCSI_IO; 2146 switch (bio->bio_cmd & 0x03) { 2147 case BIO_READ: 2148 flags = MFI_CMD_DATAIN; 2149 readop = 1; 2150 break; 2151 case BIO_WRITE: 2152 flags = MFI_CMD_DATAOUT; 2153 readop = 0; 2154 break; 2155 default: 2156 /* TODO: what about BIO_DELETE??? */ 2157 panic("Unsupported bio command %x\n", bio->bio_cmd); 2158 } 2159 2160 /* Cheat with the sector length to avoid a non-constant division */ 2161 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 2162 /* Fill the LBA and Transfer length in CDB */ 2163 cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount, 2164 pass->cdb); 2165 pass->header.target_id = (uintptr_t)bio->bio_driver1; 2166 pass->header.lun_id = 0; 2167 pass->header.timeout = 0; 2168 pass->header.flags = 0; 2169 pass->header.scsi_status = 0; 2170 pass->header.sense_len = MFI_SENSE_LEN; 2171 pass->header.data_len = bio->bio_bcount; 2172 pass->header.cdb_len = cdb_len; 2173 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; 2174 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 2175 cm->cm_complete = mfi_bio_complete; 2176 cm->cm_private = bio; 2177 cm->cm_data = bio->bio_data; 2178 cm->cm_len = bio->bio_bcount; 2179 cm->cm_sg = &pass->sgl; 2180 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; 2181 cm->cm_flags = flags; 2182 2183 return (cm); 2184 } 2185 2186 static struct mfi_command * 2187 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio) 2188 { 2189 struct mfi_io_frame *io; 2190 struct mfi_command *cm; 2191 int flags; 2192 uint32_t blkcount; 2193 uint32_t context = 0; 2194 2195 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 2196 2197 if ((cm = mfi_dequeue_free(sc)) == NULL) 2198 return (NULL); 2199 2200 /* Zero out the MFI frame */ 2201 context = cm->cm_frame->header.context; 2202 bzero(cm->cm_frame, sizeof(union mfi_frame)); 2203 cm->cm_frame->header.context = context; 2204 io = &cm->cm_frame->io; 2205 switch (bio->bio_cmd & 0x03) { 2206 case BIO_READ: 2207 io->header.cmd = MFI_CMD_LD_READ; 2208 flags = MFI_CMD_DATAIN; 2209 break; 2210 case BIO_WRITE: 2211 io->header.cmd = MFI_CMD_LD_WRITE; 2212 flags = MFI_CMD_DATAOUT; 2213 break; 2214 default: 2215 /* TODO: what about BIO_DELETE??? */ 2216 panic("Unsupported bio command %x\n", bio->bio_cmd); 2217 } 2218 2219 /* Cheat with the sector length to avoid a non-constant division */ 2220 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 2221 io->header.target_id = (uintptr_t)bio->bio_driver1; 2222 io->header.timeout = 0; 2223 io->header.flags = 0; 2224 io->header.scsi_status = 0; 2225 io->header.sense_len = MFI_SENSE_LEN; 2226 io->header.data_len = blkcount; 2227 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; 2228 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 2229 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32; 2230 io->lba_lo = bio->bio_pblkno & 0xffffffff; 2231 cm->cm_complete = mfi_bio_complete; 2232 cm->cm_private = bio; 2233 cm->cm_data = bio->bio_data; 2234 cm->cm_len = bio->bio_bcount; 2235 cm->cm_sg = &io->sgl; 2236 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; 2237 cm->cm_flags = flags; 2238 2239 return (cm); 2240 } 2241 2242 static void 2243 mfi_bio_complete(struct mfi_command *cm) 2244 { 2245 struct bio *bio; 2246 struct mfi_frame_header *hdr; 2247 struct mfi_softc *sc; 2248 2249 bio = cm->cm_private; 2250 hdr = &cm->cm_frame->header; 2251 sc = cm->cm_sc; 2252 2253 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) { 2254 bio->bio_flags |= BIO_ERROR; 2255 bio->bio_error = EIO; 2256 device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, " 2257 "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status); 2258 mfi_print_sense(cm->cm_sc, cm->cm_sense); 2259 } else if (cm->cm_error != 0) { 2260 bio->bio_flags |= BIO_ERROR; 2261 bio->bio_error = cm->cm_error; 2262 device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n", 2263 cm, cm->cm_error); 2264 } 2265 2266 mfi_release_command(cm); 2267 mfi_disk_complete(bio); 2268 } 2269 2270 void 2271 mfi_startio(struct mfi_softc *sc) 2272 { 2273 struct mfi_command *cm; 2274 struct ccb_hdr *ccbh; 2275 2276 for (;;) { 2277 /* Don't bother if we're short on resources */ 2278 if (sc->mfi_flags & MFI_FLAGS_QFRZN) 2279 break; 2280 2281 /* Try a command that has already been prepared */ 2282 cm = mfi_dequeue_ready(sc); 2283 2284 if (cm == NULL) { 2285 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL) 2286 cm = sc->mfi_cam_start(ccbh); 2287 } 2288 2289 /* Nope, so look for work on the bioq */ 2290 if (cm == NULL) 2291 cm = mfi_bio_command(sc); 2292 2293 /* No work available, so exit */ 2294 if (cm == NULL) 2295 break; 2296 2297 /* Send the command to the controller */ 2298 if (mfi_mapcmd(sc, cm) != 0) { 2299 device_printf(sc->mfi_dev, "Failed to startio\n"); 2300 mfi_requeue_ready(cm); 2301 break; 2302 } 2303 } 2304 } 2305 2306 int 2307 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm) 2308 { 2309 int error, polled; 2310 2311 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 2312 2313 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) { 2314 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0; 2315 if (cm->cm_flags & MFI_CMD_CCB) 2316 error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat, 2317 cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm, 2318 polled); 2319 else 2320 error = bus_dmamap_load(sc->mfi_buffer_dmat, 2321 cm->cm_dmamap, cm->cm_data, cm->cm_len, 2322 mfi_data_cb, cm, polled); 2323 if (error == EINPROGRESS) { 2324 sc->mfi_flags |= MFI_FLAGS_QFRZN; 2325 return (0); 2326 } 2327 } else { 2328 error = mfi_send_frame(sc, cm); 2329 } 2330 2331 return (error); 2332 } 2333 2334 static void 2335 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2336 { 2337 struct mfi_frame_header *hdr; 2338 struct mfi_command *cm; 2339 union mfi_sgl *sgl; 2340 struct mfi_softc *sc; 2341 int i, j, first, dir; 2342 int sge_size, locked; 2343 2344 cm = (struct mfi_command *)arg; 2345 sc = cm->cm_sc; 2346 hdr = &cm->cm_frame->header; 2347 sgl = cm->cm_sg; 2348 2349 /* 2350 * We need to check if we have the lock as this is async 2351 * callback so even though our caller mfi_mapcmd asserts 2352 * it has the lock, there is no garantee that hasn't been 2353 * dropped if bus_dmamap_load returned prior to our 2354 * completion. 2355 */ 2356 if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0) 2357 mtx_lock(&sc->mfi_io_lock); 2358 2359 if (error) { 2360 printf("error %d in callback\n", error); 2361 cm->cm_error = error; 2362 mfi_complete(sc, cm); 2363 goto out; 2364 } 2365 /* Use IEEE sgl only for IO's on a SKINNY controller 2366 * For other commands on a SKINNY controller use either 2367 * sg32 or sg64 based on the sizeof(bus_addr_t). 2368 * Also calculate the total frame size based on the type 2369 * of SGL used. 2370 */ 2371 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) || 2372 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) || 2373 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) && 2374 (sc->mfi_flags & MFI_FLAGS_SKINNY)) { 2375 for (i = 0; i < nsegs; i++) { 2376 sgl->sg_skinny[i].addr = segs[i].ds_addr; 2377 sgl->sg_skinny[i].len = segs[i].ds_len; 2378 sgl->sg_skinny[i].flag = 0; 2379 } 2380 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64; 2381 sge_size = sizeof(struct mfi_sg_skinny); 2382 hdr->sg_count = nsegs; 2383 } else { 2384 j = 0; 2385 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 2386 first = cm->cm_stp_len; 2387 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { 2388 sgl->sg32[j].addr = segs[0].ds_addr; 2389 sgl->sg32[j++].len = first; 2390 } else { 2391 sgl->sg64[j].addr = segs[0].ds_addr; 2392 sgl->sg64[j++].len = first; 2393 } 2394 } else 2395 first = 0; 2396 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { 2397 for (i = 0; i < nsegs; i++) { 2398 sgl->sg32[j].addr = segs[i].ds_addr + first; 2399 sgl->sg32[j++].len = segs[i].ds_len - first; 2400 first = 0; 2401 } 2402 } else { 2403 for (i = 0; i < nsegs; i++) { 2404 sgl->sg64[j].addr = segs[i].ds_addr + first; 2405 sgl->sg64[j++].len = segs[i].ds_len - first; 2406 first = 0; 2407 } 2408 hdr->flags |= MFI_FRAME_SGL64; 2409 } 2410 hdr->sg_count = j; 2411 sge_size = sc->mfi_sge_size; 2412 } 2413 2414 dir = 0; 2415 if (cm->cm_flags & MFI_CMD_DATAIN) { 2416 dir |= BUS_DMASYNC_PREREAD; 2417 hdr->flags |= MFI_FRAME_DIR_READ; 2418 } 2419 if (cm->cm_flags & MFI_CMD_DATAOUT) { 2420 dir |= BUS_DMASYNC_PREWRITE; 2421 hdr->flags |= MFI_FRAME_DIR_WRITE; 2422 } 2423 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); 2424 cm->cm_flags |= MFI_CMD_MAPPED; 2425 2426 /* 2427 * Instead of calculating the total number of frames in the 2428 * compound frame, it's already assumed that there will be at 2429 * least 1 frame, so don't compensate for the modulo of the 2430 * following division. 2431 */ 2432 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs); 2433 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE; 2434 2435 if ((error = mfi_send_frame(sc, cm)) != 0) { 2436 printf("error %d in callback from mfi_send_frame\n", error); 2437 cm->cm_error = error; 2438 mfi_complete(sc, cm); 2439 goto out; 2440 } 2441 2442 out: 2443 /* leave the lock in the state we found it */ 2444 if (locked == 0) 2445 mtx_unlock(&sc->mfi_io_lock); 2446 2447 return; 2448 } 2449 2450 static int 2451 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm) 2452 { 2453 int error; 2454 2455 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 2456 2457 if (sc->MFA_enabled) 2458 error = mfi_tbolt_send_frame(sc, cm); 2459 else 2460 error = mfi_std_send_frame(sc, cm); 2461 2462 if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0) 2463 mfi_remove_busy(cm); 2464 2465 return (error); 2466 } 2467 2468 static int 2469 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm) 2470 { 2471 struct mfi_frame_header *hdr; 2472 int tm = mfi_polled_cmd_timeout * 1000; 2473 2474 hdr = &cm->cm_frame->header; 2475 2476 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) { 2477 cm->cm_timestamp = time_uptime; 2478 mfi_enqueue_busy(cm); 2479 } else { 2480 hdr->cmd_status = MFI_STAT_INVALID_STATUS; 2481 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2482 } 2483 2484 /* 2485 * The bus address of the command is aligned on a 64 byte boundary, 2486 * leaving the least 6 bits as zero. For whatever reason, the 2487 * hardware wants the address shifted right by three, leaving just 2488 * 3 zero bits. These three bits are then used as a prefetching 2489 * hint for the hardware to predict how many frames need to be 2490 * fetched across the bus. If a command has more than 8 frames 2491 * then the 3 bits are set to 0x7 and the firmware uses other 2492 * information in the command to determine the total amount to fetch. 2493 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames 2494 * is enough for both 32bit and 64bit systems. 2495 */ 2496 if (cm->cm_extra_frames > 7) 2497 cm->cm_extra_frames = 7; 2498 2499 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames); 2500 2501 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) 2502 return (0); 2503 2504 /* This is a polled command, so busy-wait for it to complete. */ 2505 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 2506 DELAY(1000); 2507 tm -= 1; 2508 if (tm <= 0) 2509 break; 2510 } 2511 2512 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 2513 device_printf(sc->mfi_dev, "Frame %p timed out " 2514 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode); 2515 return (ETIMEDOUT); 2516 } 2517 2518 return (0); 2519 } 2520 2521 2522 void 2523 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm) 2524 { 2525 int dir; 2526 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 2527 2528 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) { 2529 dir = 0; 2530 if ((cm->cm_flags & MFI_CMD_DATAIN) || 2531 (cm->cm_frame->header.cmd == MFI_CMD_STP)) 2532 dir |= BUS_DMASYNC_POSTREAD; 2533 if (cm->cm_flags & MFI_CMD_DATAOUT) 2534 dir |= BUS_DMASYNC_POSTWRITE; 2535 2536 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); 2537 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 2538 cm->cm_flags &= ~MFI_CMD_MAPPED; 2539 } 2540 2541 cm->cm_flags |= MFI_CMD_COMPLETED; 2542 2543 if (cm->cm_complete != NULL) 2544 cm->cm_complete(cm); 2545 else 2546 wakeup(cm); 2547 } 2548 2549 static int 2550 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort) 2551 { 2552 struct mfi_command *cm; 2553 struct mfi_abort_frame *abort; 2554 int i = 0, error; 2555 uint32_t context = 0; 2556 2557 mtx_lock(&sc->mfi_io_lock); 2558 if ((cm = mfi_dequeue_free(sc)) == NULL) { 2559 mtx_unlock(&sc->mfi_io_lock); 2560 return (EBUSY); 2561 } 2562 2563 /* Zero out the MFI frame */ 2564 context = cm->cm_frame->header.context; 2565 bzero(cm->cm_frame, sizeof(union mfi_frame)); 2566 cm->cm_frame->header.context = context; 2567 2568 abort = &cm->cm_frame->abort; 2569 abort->header.cmd = MFI_CMD_ABORT; 2570 abort->header.flags = 0; 2571 abort->header.scsi_status = 0; 2572 abort->abort_context = (*cm_abort)->cm_frame->header.context; 2573 abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr; 2574 abort->abort_mfi_addr_hi = 2575 (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32); 2576 cm->cm_data = NULL; 2577 cm->cm_flags = MFI_CMD_POLLED; 2578 2579 if ((error = mfi_mapcmd(sc, cm)) != 0) 2580 device_printf(sc->mfi_dev, "failed to abort command\n"); 2581 mfi_release_command(cm); 2582 2583 mtx_unlock(&sc->mfi_io_lock); 2584 while (i < 5 && *cm_abort != NULL) { 2585 tsleep(cm_abort, 0, "mfiabort", 2586 5 * hz); 2587 i++; 2588 } 2589 if (*cm_abort != NULL) { 2590 /* Force a complete if command didn't abort */ 2591 mtx_lock(&sc->mfi_io_lock); 2592 (*cm_abort)->cm_complete(*cm_abort); 2593 mtx_unlock(&sc->mfi_io_lock); 2594 } 2595 2596 return (error); 2597 } 2598 2599 int 2600 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, 2601 int len) 2602 { 2603 struct mfi_command *cm; 2604 struct mfi_io_frame *io; 2605 int error; 2606 uint32_t context = 0; 2607 2608 if ((cm = mfi_dequeue_free(sc)) == NULL) 2609 return (EBUSY); 2610 2611 /* Zero out the MFI frame */ 2612 context = cm->cm_frame->header.context; 2613 bzero(cm->cm_frame, sizeof(union mfi_frame)); 2614 cm->cm_frame->header.context = context; 2615 2616 io = &cm->cm_frame->io; 2617 io->header.cmd = MFI_CMD_LD_WRITE; 2618 io->header.target_id = id; 2619 io->header.timeout = 0; 2620 io->header.flags = 0; 2621 io->header.scsi_status = 0; 2622 io->header.sense_len = MFI_SENSE_LEN; 2623 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 2624 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; 2625 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 2626 io->lba_hi = (lba & 0xffffffff00000000) >> 32; 2627 io->lba_lo = lba & 0xffffffff; 2628 cm->cm_data = virt; 2629 cm->cm_len = len; 2630 cm->cm_sg = &io->sgl; 2631 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; 2632 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT; 2633 2634 if ((error = mfi_mapcmd(sc, cm)) != 0) 2635 device_printf(sc->mfi_dev, "failed dump blocks\n"); 2636 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 2637 BUS_DMASYNC_POSTWRITE); 2638 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 2639 mfi_release_command(cm); 2640 2641 return (error); 2642 } 2643 2644 int 2645 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, 2646 int len) 2647 { 2648 struct mfi_command *cm; 2649 struct mfi_pass_frame *pass; 2650 int error, readop, cdb_len; 2651 uint32_t blkcount; 2652 2653 if ((cm = mfi_dequeue_free(sc)) == NULL) 2654 return (EBUSY); 2655 2656 pass = &cm->cm_frame->pass; 2657 bzero(pass->cdb, 16); 2658 pass->header.cmd = MFI_CMD_PD_SCSI_IO; 2659 2660 readop = 0; 2661 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 2662 cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb); 2663 pass->header.target_id = id; 2664 pass->header.timeout = 0; 2665 pass->header.flags = 0; 2666 pass->header.scsi_status = 0; 2667 pass->header.sense_len = MFI_SENSE_LEN; 2668 pass->header.data_len = len; 2669 pass->header.cdb_len = cdb_len; 2670 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; 2671 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 2672 cm->cm_data = virt; 2673 cm->cm_len = len; 2674 cm->cm_sg = &pass->sgl; 2675 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; 2676 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI; 2677 2678 if ((error = mfi_mapcmd(sc, cm)) != 0) 2679 device_printf(sc->mfi_dev, "failed dump blocks\n"); 2680 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 2681 BUS_DMASYNC_POSTWRITE); 2682 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 2683 mfi_release_command(cm); 2684 2685 return (error); 2686 } 2687 2688 static int 2689 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2690 { 2691 struct mfi_softc *sc; 2692 int error; 2693 2694 sc = dev->si_drv1; 2695 2696 mtx_lock(&sc->mfi_io_lock); 2697 if (sc->mfi_detaching) 2698 error = ENXIO; 2699 else { 2700 sc->mfi_flags |= MFI_FLAGS_OPEN; 2701 error = 0; 2702 } 2703 mtx_unlock(&sc->mfi_io_lock); 2704 2705 return (error); 2706 } 2707 2708 static int 2709 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2710 { 2711 struct mfi_softc *sc; 2712 struct mfi_aen *mfi_aen_entry, *tmp; 2713 2714 sc = dev->si_drv1; 2715 2716 mtx_lock(&sc->mfi_io_lock); 2717 sc->mfi_flags &= ~MFI_FLAGS_OPEN; 2718 2719 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) { 2720 if (mfi_aen_entry->p == curproc) { 2721 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 2722 aen_link); 2723 free(mfi_aen_entry, M_MFIBUF); 2724 } 2725 } 2726 mtx_unlock(&sc->mfi_io_lock); 2727 return (0); 2728 } 2729 2730 static int 2731 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode) 2732 { 2733 2734 switch (opcode) { 2735 case MFI_DCMD_LD_DELETE: 2736 case MFI_DCMD_CFG_ADD: 2737 case MFI_DCMD_CFG_CLEAR: 2738 case MFI_DCMD_CFG_FOREIGN_IMPORT: 2739 sx_xlock(&sc->mfi_config_lock); 2740 return (1); 2741 default: 2742 return (0); 2743 } 2744 } 2745 2746 static void 2747 mfi_config_unlock(struct mfi_softc *sc, int locked) 2748 { 2749 2750 if (locked) 2751 sx_xunlock(&sc->mfi_config_lock); 2752 } 2753 2754 /* 2755 * Perform pre-issue checks on commands from userland and possibly veto 2756 * them. 2757 */ 2758 static int 2759 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm) 2760 { 2761 struct mfi_disk *ld, *ld2; 2762 int error; 2763 struct mfi_system_pd *syspd = NULL; 2764 uint16_t syspd_id; 2765 uint16_t *mbox; 2766 2767 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 2768 error = 0; 2769 switch (cm->cm_frame->dcmd.opcode) { 2770 case MFI_DCMD_LD_DELETE: 2771 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 2772 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) 2773 break; 2774 } 2775 if (ld == NULL) 2776 error = ENOENT; 2777 else 2778 error = mfi_disk_disable(ld); 2779 break; 2780 case MFI_DCMD_CFG_CLEAR: 2781 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 2782 error = mfi_disk_disable(ld); 2783 if (error) 2784 break; 2785 } 2786 if (error) { 2787 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) { 2788 if (ld2 == ld) 2789 break; 2790 mfi_disk_enable(ld2); 2791 } 2792 } 2793 break; 2794 case MFI_DCMD_PD_STATE_SET: 2795 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox; 2796 syspd_id = mbox[0]; 2797 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) { 2798 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) { 2799 if (syspd->pd_id == syspd_id) 2800 break; 2801 } 2802 } 2803 else 2804 break; 2805 if (syspd) 2806 error = mfi_syspd_disable(syspd); 2807 break; 2808 default: 2809 break; 2810 } 2811 return (error); 2812 } 2813 2814 /* Perform post-issue checks on commands from userland. */ 2815 static void 2816 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm) 2817 { 2818 struct mfi_disk *ld, *ldn; 2819 struct mfi_system_pd *syspd = NULL; 2820 uint16_t syspd_id; 2821 uint16_t *mbox; 2822 2823 switch (cm->cm_frame->dcmd.opcode) { 2824 case MFI_DCMD_LD_DELETE: 2825 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 2826 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) 2827 break; 2828 } 2829 KASSERT(ld != NULL, ("volume dissappeared")); 2830 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { 2831 mtx_unlock(&sc->mfi_io_lock); 2832 mtx_lock(&Giant); 2833 device_delete_child(sc->mfi_dev, ld->ld_dev); 2834 mtx_unlock(&Giant); 2835 mtx_lock(&sc->mfi_io_lock); 2836 } else 2837 mfi_disk_enable(ld); 2838 break; 2839 case MFI_DCMD_CFG_CLEAR: 2840 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { 2841 mtx_unlock(&sc->mfi_io_lock); 2842 mtx_lock(&Giant); 2843 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) { 2844 device_delete_child(sc->mfi_dev, ld->ld_dev); 2845 } 2846 mtx_unlock(&Giant); 2847 mtx_lock(&sc->mfi_io_lock); 2848 } else { 2849 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) 2850 mfi_disk_enable(ld); 2851 } 2852 break; 2853 case MFI_DCMD_CFG_ADD: 2854 mfi_ldprobe(sc); 2855 break; 2856 case MFI_DCMD_CFG_FOREIGN_IMPORT: 2857 mfi_ldprobe(sc); 2858 break; 2859 case MFI_DCMD_PD_STATE_SET: 2860 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox; 2861 syspd_id = mbox[0]; 2862 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) { 2863 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) { 2864 if (syspd->pd_id == syspd_id) 2865 break; 2866 } 2867 } 2868 else 2869 break; 2870 /* If the transition fails then enable the syspd again */ 2871 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK) 2872 mfi_syspd_enable(syspd); 2873 break; 2874 } 2875 } 2876 2877 static int 2878 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm) 2879 { 2880 struct mfi_config_data *conf_data; 2881 struct mfi_command *ld_cm = NULL; 2882 struct mfi_ld_info *ld_info = NULL; 2883 struct mfi_ld_config *ld; 2884 char *p; 2885 int error = 0; 2886 2887 conf_data = (struct mfi_config_data *)cm->cm_data; 2888 2889 if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) { 2890 p = (char *)conf_data->array; 2891 p += conf_data->array_size * conf_data->array_count; 2892 ld = (struct mfi_ld_config *)p; 2893 if (ld->params.isSSCD == 1) 2894 error = 1; 2895 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) { 2896 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO, 2897 (void **)&ld_info, sizeof(*ld_info)); 2898 if (error) { 2899 device_printf(sc->mfi_dev, "Failed to allocate" 2900 "MFI_DCMD_LD_GET_INFO %d", error); 2901 if (ld_info) 2902 free(ld_info, M_MFIBUF); 2903 return 0; 2904 } 2905 ld_cm->cm_flags = MFI_CMD_DATAIN; 2906 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0]; 2907 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0]; 2908 if (mfi_wait_command(sc, ld_cm) != 0) { 2909 device_printf(sc->mfi_dev, "failed to get log drv\n"); 2910 mfi_release_command(ld_cm); 2911 free(ld_info, M_MFIBUF); 2912 return 0; 2913 } 2914 2915 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) { 2916 free(ld_info, M_MFIBUF); 2917 mfi_release_command(ld_cm); 2918 return 0; 2919 } 2920 else 2921 ld_info = (struct mfi_ld_info *)ld_cm->cm_private; 2922 2923 if (ld_info->ld_config.params.isSSCD == 1) 2924 error = 1; 2925 2926 mfi_release_command(ld_cm); 2927 free(ld_info, M_MFIBUF); 2928 2929 } 2930 return error; 2931 } 2932 2933 static int 2934 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg) 2935 { 2936 uint8_t i; 2937 struct mfi_ioc_packet *ioc; 2938 ioc = (struct mfi_ioc_packet *)arg; 2939 int sge_size, error; 2940 struct megasas_sge *kern_sge; 2941 2942 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr)); 2943 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off); 2944 cm->cm_frame->header.sg_count = ioc->mfi_sge_count; 2945 2946 if (sizeof(bus_addr_t) == 8) { 2947 cm->cm_frame->header.flags |= MFI_FRAME_SGL64; 2948 cm->cm_extra_frames = 2; 2949 sge_size = sizeof(struct mfi_sg64); 2950 } else { 2951 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE; 2952 sge_size = sizeof(struct mfi_sg32); 2953 } 2954 2955 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count); 2956 for (i = 0; i < ioc->mfi_sge_count; i++) { 2957 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 2958 1, 0, /* algnmnt, boundary */ 2959 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 2960 BUS_SPACE_MAXADDR, /* highaddr */ 2961 NULL, NULL, /* filter, filterarg */ 2962 ioc->mfi_sgl[i].iov_len,/* maxsize */ 2963 2, /* nsegments */ 2964 ioc->mfi_sgl[i].iov_len,/* maxsegsize */ 2965 BUS_DMA_ALLOCNOW, /* flags */ 2966 NULL, NULL, /* lockfunc, lockarg */ 2967 &sc->mfi_kbuff_arr_dmat[i])) { 2968 device_printf(sc->mfi_dev, 2969 "Cannot allocate mfi_kbuff_arr_dmat tag\n"); 2970 return (ENOMEM); 2971 } 2972 2973 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i], 2974 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT, 2975 &sc->mfi_kbuff_arr_dmamap[i])) { 2976 device_printf(sc->mfi_dev, 2977 "Cannot allocate mfi_kbuff_arr_dmamap memory\n"); 2978 return (ENOMEM); 2979 } 2980 2981 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i], 2982 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i], 2983 ioc->mfi_sgl[i].iov_len, mfi_addr_cb, 2984 &sc->mfi_kbuff_arr_busaddr[i], 0); 2985 2986 if (!sc->kbuff_arr[i]) { 2987 device_printf(sc->mfi_dev, 2988 "Could not allocate memory for kbuff_arr info\n"); 2989 return -1; 2990 } 2991 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i]; 2992 kern_sge[i].length = ioc->mfi_sgl[i].iov_len; 2993 2994 if (sizeof(bus_addr_t) == 8) { 2995 cm->cm_frame->stp.sgl.sg64[i].addr = 2996 kern_sge[i].phys_addr; 2997 cm->cm_frame->stp.sgl.sg64[i].len = 2998 ioc->mfi_sgl[i].iov_len; 2999 } else { 3000 cm->cm_frame->stp.sgl.sg32[i].addr = 3001 kern_sge[i].phys_addr; 3002 cm->cm_frame->stp.sgl.sg32[i].len = 3003 ioc->mfi_sgl[i].iov_len; 3004 } 3005 3006 error = copyin(ioc->mfi_sgl[i].iov_base, 3007 sc->kbuff_arr[i], 3008 ioc->mfi_sgl[i].iov_len); 3009 if (error != 0) { 3010 device_printf(sc->mfi_dev, "Copy in failed\n"); 3011 return error; 3012 } 3013 } 3014 3015 cm->cm_flags |=MFI_CMD_MAPPED; 3016 return 0; 3017 } 3018 3019 static int 3020 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc) 3021 { 3022 struct mfi_command *cm; 3023 struct mfi_dcmd_frame *dcmd; 3024 void *ioc_buf = NULL; 3025 uint32_t context; 3026 int error = 0, locked; 3027 3028 3029 if (ioc->buf_size > 0) { 3030 if (ioc->buf_size > 1024 * 1024) 3031 return (ENOMEM); 3032 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK); 3033 error = copyin(ioc->buf, ioc_buf, ioc->buf_size); 3034 if (error) { 3035 device_printf(sc->mfi_dev, "failed to copyin\n"); 3036 free(ioc_buf, M_MFIBUF); 3037 return (error); 3038 } 3039 } 3040 3041 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode); 3042 3043 mtx_lock(&sc->mfi_io_lock); 3044 while ((cm = mfi_dequeue_free(sc)) == NULL) 3045 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz); 3046 3047 /* Save context for later */ 3048 context = cm->cm_frame->header.context; 3049 3050 dcmd = &cm->cm_frame->dcmd; 3051 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame)); 3052 3053 cm->cm_sg = &dcmd->sgl; 3054 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 3055 cm->cm_data = ioc_buf; 3056 cm->cm_len = ioc->buf_size; 3057 3058 /* restore context */ 3059 cm->cm_frame->header.context = context; 3060 3061 /* Cheat since we don't know if we're writing or reading */ 3062 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT; 3063 3064 error = mfi_check_command_pre(sc, cm); 3065 if (error) 3066 goto out; 3067 3068 error = mfi_wait_command(sc, cm); 3069 if (error) { 3070 device_printf(sc->mfi_dev, "ioctl failed %d\n", error); 3071 goto out; 3072 } 3073 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame)); 3074 mfi_check_command_post(sc, cm); 3075 out: 3076 mfi_release_command(cm); 3077 mtx_unlock(&sc->mfi_io_lock); 3078 mfi_config_unlock(sc, locked); 3079 if (ioc->buf_size > 0) 3080 error = copyout(ioc_buf, ioc->buf, ioc->buf_size); 3081 if (ioc_buf) 3082 free(ioc_buf, M_MFIBUF); 3083 return (error); 3084 } 3085 3086 #define PTRIN(p) ((void *)(uintptr_t)(p)) 3087 3088 static int 3089 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 3090 { 3091 struct mfi_softc *sc; 3092 union mfi_statrequest *ms; 3093 struct mfi_ioc_packet *ioc; 3094 #ifdef COMPAT_FREEBSD32 3095 struct mfi_ioc_packet32 *ioc32; 3096 #endif 3097 struct mfi_ioc_aen *aen; 3098 struct mfi_command *cm = NULL; 3099 uint32_t context = 0; 3100 union mfi_sense_ptr sense_ptr; 3101 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0; 3102 size_t len; 3103 int i, res; 3104 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg; 3105 #ifdef COMPAT_FREEBSD32 3106 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg; 3107 struct mfi_ioc_passthru iop_swab; 3108 #endif 3109 int error, locked; 3110 union mfi_sgl *sgl; 3111 sc = dev->si_drv1; 3112 error = 0; 3113 3114 if (sc->adpreset) 3115 return EBUSY; 3116 3117 if (sc->hw_crit_error) 3118 return EBUSY; 3119 3120 if (sc->issuepend_done == 0) 3121 return EBUSY; 3122 3123 switch (cmd) { 3124 case MFIIO_STATS: 3125 ms = (union mfi_statrequest *)arg; 3126 switch (ms->ms_item) { 3127 case MFIQ_FREE: 3128 case MFIQ_BIO: 3129 case MFIQ_READY: 3130 case MFIQ_BUSY: 3131 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat, 3132 sizeof(struct mfi_qstat)); 3133 break; 3134 default: 3135 error = ENOIOCTL; 3136 break; 3137 } 3138 break; 3139 case MFIIO_QUERY_DISK: 3140 { 3141 struct mfi_query_disk *qd; 3142 struct mfi_disk *ld; 3143 3144 qd = (struct mfi_query_disk *)arg; 3145 mtx_lock(&sc->mfi_io_lock); 3146 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 3147 if (ld->ld_id == qd->array_id) 3148 break; 3149 } 3150 if (ld == NULL) { 3151 qd->present = 0; 3152 mtx_unlock(&sc->mfi_io_lock); 3153 return (0); 3154 } 3155 qd->present = 1; 3156 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN) 3157 qd->open = 1; 3158 bzero(qd->devname, SPECNAMELEN + 1); 3159 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit); 3160 mtx_unlock(&sc->mfi_io_lock); 3161 break; 3162 } 3163 case MFI_CMD: 3164 #ifdef COMPAT_FREEBSD32 3165 case MFI_CMD32: 3166 #endif 3167 { 3168 devclass_t devclass; 3169 ioc = (struct mfi_ioc_packet *)arg; 3170 int adapter; 3171 3172 adapter = ioc->mfi_adapter_no; 3173 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) { 3174 devclass = devclass_find("mfi"); 3175 sc = devclass_get_softc(devclass, adapter); 3176 } 3177 mtx_lock(&sc->mfi_io_lock); 3178 if ((cm = mfi_dequeue_free(sc)) == NULL) { 3179 mtx_unlock(&sc->mfi_io_lock); 3180 return (EBUSY); 3181 } 3182 mtx_unlock(&sc->mfi_io_lock); 3183 locked = 0; 3184 3185 /* 3186 * save off original context since copying from user 3187 * will clobber some data 3188 */ 3189 context = cm->cm_frame->header.context; 3190 cm->cm_frame->header.context = cm->cm_index; 3191 3192 bcopy(ioc->mfi_frame.raw, cm->cm_frame, 3193 2 * MEGAMFI_FRAME_SIZE); 3194 cm->cm_total_frame_size = (sizeof(union mfi_sgl) 3195 * ioc->mfi_sge_count) + ioc->mfi_sgl_off; 3196 cm->cm_frame->header.scsi_status = 0; 3197 cm->cm_frame->header.pad0 = 0; 3198 if (ioc->mfi_sge_count) { 3199 cm->cm_sg = 3200 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off]; 3201 } 3202 sgl = cm->cm_sg; 3203 cm->cm_flags = 0; 3204 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) 3205 cm->cm_flags |= MFI_CMD_DATAIN; 3206 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) 3207 cm->cm_flags |= MFI_CMD_DATAOUT; 3208 /* Legacy app shim */ 3209 if (cm->cm_flags == 0) 3210 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT; 3211 cm->cm_len = cm->cm_frame->header.data_len; 3212 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 3213 #ifdef COMPAT_FREEBSD32 3214 if (cmd == MFI_CMD) { 3215 #endif 3216 /* Native */ 3217 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len; 3218 #ifdef COMPAT_FREEBSD32 3219 } else { 3220 /* 32bit on 64bit */ 3221 ioc32 = (struct mfi_ioc_packet32 *)ioc; 3222 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len; 3223 } 3224 #endif 3225 cm->cm_len += cm->cm_stp_len; 3226 } 3227 if (cm->cm_len && 3228 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) { 3229 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, 3230 M_WAITOK | M_ZERO); 3231 if (cm->cm_data == NULL) { 3232 device_printf(sc->mfi_dev, "Malloc failed\n"); 3233 goto out; 3234 } 3235 } else { 3236 cm->cm_data = 0; 3237 } 3238 3239 /* restore header context */ 3240 cm->cm_frame->header.context = context; 3241 3242 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 3243 res = mfi_stp_cmd(sc, cm, arg); 3244 if (res != 0) 3245 goto out; 3246 } else { 3247 temp = data; 3248 if ((cm->cm_flags & MFI_CMD_DATAOUT) || 3249 (cm->cm_frame->header.cmd == MFI_CMD_STP)) { 3250 for (i = 0; i < ioc->mfi_sge_count; i++) { 3251 #ifdef COMPAT_FREEBSD32 3252 if (cmd == MFI_CMD) { 3253 #endif 3254 /* Native */ 3255 addr = ioc->mfi_sgl[i].iov_base; 3256 len = ioc->mfi_sgl[i].iov_len; 3257 #ifdef COMPAT_FREEBSD32 3258 } else { 3259 /* 32bit on 64bit */ 3260 ioc32 = (struct mfi_ioc_packet32 *)ioc; 3261 addr = PTRIN(ioc32->mfi_sgl[i].iov_base); 3262 len = ioc32->mfi_sgl[i].iov_len; 3263 } 3264 #endif 3265 error = copyin(addr, temp, len); 3266 if (error != 0) { 3267 device_printf(sc->mfi_dev, 3268 "Copy in failed\n"); 3269 goto out; 3270 } 3271 temp = &temp[len]; 3272 } 3273 } 3274 } 3275 3276 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) 3277 locked = mfi_config_lock(sc, 3278 cm->cm_frame->dcmd.opcode); 3279 3280 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) { 3281 cm->cm_frame->pass.sense_addr_lo = 3282 (uint32_t)cm->cm_sense_busaddr; 3283 cm->cm_frame->pass.sense_addr_hi = 3284 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 3285 } 3286 mtx_lock(&sc->mfi_io_lock); 3287 skip_pre_post = mfi_check_for_sscd (sc, cm); 3288 if (!skip_pre_post) { 3289 error = mfi_check_command_pre(sc, cm); 3290 if (error) { 3291 mtx_unlock(&sc->mfi_io_lock); 3292 goto out; 3293 } 3294 } 3295 if ((error = mfi_wait_command(sc, cm)) != 0) { 3296 device_printf(sc->mfi_dev, 3297 "Controller polled failed\n"); 3298 mtx_unlock(&sc->mfi_io_lock); 3299 goto out; 3300 } 3301 if (!skip_pre_post) { 3302 mfi_check_command_post(sc, cm); 3303 } 3304 mtx_unlock(&sc->mfi_io_lock); 3305 3306 if (cm->cm_frame->header.cmd != MFI_CMD_STP) { 3307 temp = data; 3308 if ((cm->cm_flags & MFI_CMD_DATAIN) || 3309 (cm->cm_frame->header.cmd == MFI_CMD_STP)) { 3310 for (i = 0; i < ioc->mfi_sge_count; i++) { 3311 #ifdef COMPAT_FREEBSD32 3312 if (cmd == MFI_CMD) { 3313 #endif 3314 /* Native */ 3315 addr = ioc->mfi_sgl[i].iov_base; 3316 len = ioc->mfi_sgl[i].iov_len; 3317 #ifdef COMPAT_FREEBSD32 3318 } else { 3319 /* 32bit on 64bit */ 3320 ioc32 = (struct mfi_ioc_packet32 *)ioc; 3321 addr = PTRIN(ioc32->mfi_sgl[i].iov_base); 3322 len = ioc32->mfi_sgl[i].iov_len; 3323 } 3324 #endif 3325 error = copyout(temp, addr, len); 3326 if (error != 0) { 3327 device_printf(sc->mfi_dev, 3328 "Copy out failed\n"); 3329 goto out; 3330 } 3331 temp = &temp[len]; 3332 } 3333 } 3334 } 3335 3336 if (ioc->mfi_sense_len) { 3337 /* get user-space sense ptr then copy out sense */ 3338 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off], 3339 &sense_ptr.sense_ptr_data[0], 3340 sizeof(sense_ptr.sense_ptr_data)); 3341 #ifdef COMPAT_FREEBSD32 3342 if (cmd != MFI_CMD) { 3343 /* 3344 * not 64bit native so zero out any address 3345 * over 32bit */ 3346 sense_ptr.addr.high = 0; 3347 } 3348 #endif 3349 error = copyout(cm->cm_sense, sense_ptr.user_space, 3350 ioc->mfi_sense_len); 3351 if (error != 0) { 3352 device_printf(sc->mfi_dev, 3353 "Copy out failed\n"); 3354 goto out; 3355 } 3356 } 3357 3358 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status; 3359 out: 3360 mfi_config_unlock(sc, locked); 3361 if (data) 3362 free(data, M_MFIBUF); 3363 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 3364 for (i = 0; i < 2; i++) { 3365 if (sc->kbuff_arr[i]) { 3366 if (sc->mfi_kbuff_arr_busaddr != 0) 3367 bus_dmamap_unload( 3368 sc->mfi_kbuff_arr_dmat[i], 3369 sc->mfi_kbuff_arr_dmamap[i] 3370 ); 3371 if (sc->kbuff_arr[i] != NULL) 3372 bus_dmamem_free( 3373 sc->mfi_kbuff_arr_dmat[i], 3374 sc->kbuff_arr[i], 3375 sc->mfi_kbuff_arr_dmamap[i] 3376 ); 3377 if (sc->mfi_kbuff_arr_dmat[i] != NULL) 3378 bus_dma_tag_destroy( 3379 sc->mfi_kbuff_arr_dmat[i]); 3380 } 3381 } 3382 } 3383 if (cm) { 3384 mtx_lock(&sc->mfi_io_lock); 3385 mfi_release_command(cm); 3386 mtx_unlock(&sc->mfi_io_lock); 3387 } 3388 3389 break; 3390 } 3391 case MFI_SET_AEN: 3392 aen = (struct mfi_ioc_aen *)arg; 3393 mtx_lock(&sc->mfi_io_lock); 3394 error = mfi_aen_register(sc, aen->aen_seq_num, 3395 aen->aen_class_locale); 3396 mtx_unlock(&sc->mfi_io_lock); 3397 3398 break; 3399 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ 3400 { 3401 devclass_t devclass; 3402 struct mfi_linux_ioc_packet l_ioc; 3403 int adapter; 3404 3405 devclass = devclass_find("mfi"); 3406 if (devclass == NULL) 3407 return (ENOENT); 3408 3409 error = copyin(arg, &l_ioc, sizeof(l_ioc)); 3410 if (error) 3411 return (error); 3412 adapter = l_ioc.lioc_adapter_no; 3413 sc = devclass_get_softc(devclass, adapter); 3414 if (sc == NULL) 3415 return (ENOENT); 3416 return (mfi_linux_ioctl_int(sc->mfi_cdev, 3417 cmd, arg, flag, td)); 3418 break; 3419 } 3420 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ 3421 { 3422 devclass_t devclass; 3423 struct mfi_linux_ioc_aen l_aen; 3424 int adapter; 3425 3426 devclass = devclass_find("mfi"); 3427 if (devclass == NULL) 3428 return (ENOENT); 3429 3430 error = copyin(arg, &l_aen, sizeof(l_aen)); 3431 if (error) 3432 return (error); 3433 adapter = l_aen.laen_adapter_no; 3434 sc = devclass_get_softc(devclass, adapter); 3435 if (sc == NULL) 3436 return (ENOENT); 3437 return (mfi_linux_ioctl_int(sc->mfi_cdev, 3438 cmd, arg, flag, td)); 3439 break; 3440 } 3441 #ifdef COMPAT_FREEBSD32 3442 case MFIIO_PASSTHRU32: 3443 if (!SV_CURPROC_FLAG(SV_ILP32)) { 3444 error = ENOTTY; 3445 break; 3446 } 3447 iop_swab.ioc_frame = iop32->ioc_frame; 3448 iop_swab.buf_size = iop32->buf_size; 3449 iop_swab.buf = PTRIN(iop32->buf); 3450 iop = &iop_swab; 3451 /* FALLTHROUGH */ 3452 #endif 3453 case MFIIO_PASSTHRU: 3454 error = mfi_user_command(sc, iop); 3455 #ifdef COMPAT_FREEBSD32 3456 if (cmd == MFIIO_PASSTHRU32) 3457 iop32->ioc_frame = iop_swab.ioc_frame; 3458 #endif 3459 break; 3460 default: 3461 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); 3462 error = ENOTTY; 3463 break; 3464 } 3465 3466 return (error); 3467 } 3468 3469 static int 3470 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 3471 { 3472 struct mfi_softc *sc; 3473 struct mfi_linux_ioc_packet l_ioc; 3474 struct mfi_linux_ioc_aen l_aen; 3475 struct mfi_command *cm = NULL; 3476 struct mfi_aen *mfi_aen_entry; 3477 union mfi_sense_ptr sense_ptr; 3478 uint32_t context = 0; 3479 uint8_t *data = NULL, *temp; 3480 int i; 3481 int error, locked; 3482 3483 sc = dev->si_drv1; 3484 error = 0; 3485 switch (cmd) { 3486 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ 3487 error = copyin(arg, &l_ioc, sizeof(l_ioc)); 3488 if (error != 0) 3489 return (error); 3490 3491 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) { 3492 return (EINVAL); 3493 } 3494 3495 mtx_lock(&sc->mfi_io_lock); 3496 if ((cm = mfi_dequeue_free(sc)) == NULL) { 3497 mtx_unlock(&sc->mfi_io_lock); 3498 return (EBUSY); 3499 } 3500 mtx_unlock(&sc->mfi_io_lock); 3501 locked = 0; 3502 3503 /* 3504 * save off original context since copying from user 3505 * will clobber some data 3506 */ 3507 context = cm->cm_frame->header.context; 3508 3509 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame, 3510 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */ 3511 cm->cm_total_frame_size = (sizeof(union mfi_sgl) 3512 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off; 3513 cm->cm_frame->header.scsi_status = 0; 3514 cm->cm_frame->header.pad0 = 0; 3515 if (l_ioc.lioc_sge_count) 3516 cm->cm_sg = 3517 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off]; 3518 cm->cm_flags = 0; 3519 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) 3520 cm->cm_flags |= MFI_CMD_DATAIN; 3521 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) 3522 cm->cm_flags |= MFI_CMD_DATAOUT; 3523 cm->cm_len = cm->cm_frame->header.data_len; 3524 if (cm->cm_len && 3525 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) { 3526 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, 3527 M_WAITOK | M_ZERO); 3528 if (cm->cm_data == NULL) { 3529 device_printf(sc->mfi_dev, "Malloc failed\n"); 3530 goto out; 3531 } 3532 } else { 3533 cm->cm_data = 0; 3534 } 3535 3536 /* restore header context */ 3537 cm->cm_frame->header.context = context; 3538 3539 temp = data; 3540 if (cm->cm_flags & MFI_CMD_DATAOUT) { 3541 for (i = 0; i < l_ioc.lioc_sge_count; i++) { 3542 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base), 3543 temp, 3544 l_ioc.lioc_sgl[i].iov_len); 3545 if (error != 0) { 3546 device_printf(sc->mfi_dev, 3547 "Copy in failed\n"); 3548 goto out; 3549 } 3550 temp = &temp[l_ioc.lioc_sgl[i].iov_len]; 3551 } 3552 } 3553 3554 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) 3555 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode); 3556 3557 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) { 3558 cm->cm_frame->pass.sense_addr_lo = 3559 (uint32_t)cm->cm_sense_busaddr; 3560 cm->cm_frame->pass.sense_addr_hi = 3561 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 3562 } 3563 3564 mtx_lock(&sc->mfi_io_lock); 3565 error = mfi_check_command_pre(sc, cm); 3566 if (error) { 3567 mtx_unlock(&sc->mfi_io_lock); 3568 goto out; 3569 } 3570 3571 if ((error = mfi_wait_command(sc, cm)) != 0) { 3572 device_printf(sc->mfi_dev, 3573 "Controller polled failed\n"); 3574 mtx_unlock(&sc->mfi_io_lock); 3575 goto out; 3576 } 3577 3578 mfi_check_command_post(sc, cm); 3579 mtx_unlock(&sc->mfi_io_lock); 3580 3581 temp = data; 3582 if (cm->cm_flags & MFI_CMD_DATAIN) { 3583 for (i = 0; i < l_ioc.lioc_sge_count; i++) { 3584 error = copyout(temp, 3585 PTRIN(l_ioc.lioc_sgl[i].iov_base), 3586 l_ioc.lioc_sgl[i].iov_len); 3587 if (error != 0) { 3588 device_printf(sc->mfi_dev, 3589 "Copy out failed\n"); 3590 goto out; 3591 } 3592 temp = &temp[l_ioc.lioc_sgl[i].iov_len]; 3593 } 3594 } 3595 3596 if (l_ioc.lioc_sense_len) { 3597 /* get user-space sense ptr then copy out sense */ 3598 bcopy(&((struct mfi_linux_ioc_packet*)arg) 3599 ->lioc_frame.raw[l_ioc.lioc_sense_off], 3600 &sense_ptr.sense_ptr_data[0], 3601 sizeof(sense_ptr.sense_ptr_data)); 3602 #ifdef __amd64__ 3603 /* 3604 * only 32bit Linux support so zero out any 3605 * address over 32bit 3606 */ 3607 sense_ptr.addr.high = 0; 3608 #endif 3609 error = copyout(cm->cm_sense, sense_ptr.user_space, 3610 l_ioc.lioc_sense_len); 3611 if (error != 0) { 3612 device_printf(sc->mfi_dev, 3613 "Copy out failed\n"); 3614 goto out; 3615 } 3616 } 3617 3618 error = copyout(&cm->cm_frame->header.cmd_status, 3619 &((struct mfi_linux_ioc_packet*)arg) 3620 ->lioc_frame.hdr.cmd_status, 3621 1); 3622 if (error != 0) { 3623 device_printf(sc->mfi_dev, 3624 "Copy out failed\n"); 3625 goto out; 3626 } 3627 3628 out: 3629 mfi_config_unlock(sc, locked); 3630 if (data) 3631 free(data, M_MFIBUF); 3632 if (cm) { 3633 mtx_lock(&sc->mfi_io_lock); 3634 mfi_release_command(cm); 3635 mtx_unlock(&sc->mfi_io_lock); 3636 } 3637 3638 return (error); 3639 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ 3640 error = copyin(arg, &l_aen, sizeof(l_aen)); 3641 if (error != 0) 3642 return (error); 3643 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid); 3644 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF, 3645 M_WAITOK); 3646 mtx_lock(&sc->mfi_io_lock); 3647 if (mfi_aen_entry != NULL) { 3648 mfi_aen_entry->p = curproc; 3649 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry, 3650 aen_link); 3651 } 3652 error = mfi_aen_register(sc, l_aen.laen_seq_num, 3653 l_aen.laen_class_locale); 3654 3655 if (error != 0) { 3656 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 3657 aen_link); 3658 free(mfi_aen_entry, M_MFIBUF); 3659 } 3660 mtx_unlock(&sc->mfi_io_lock); 3661 3662 return (error); 3663 default: 3664 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); 3665 error = ENOENT; 3666 break; 3667 } 3668 3669 return (error); 3670 } 3671 3672 static int 3673 mfi_poll(struct cdev *dev, int poll_events, struct thread *td) 3674 { 3675 struct mfi_softc *sc; 3676 int revents = 0; 3677 3678 sc = dev->si_drv1; 3679 3680 if (poll_events & (POLLIN | POLLRDNORM)) { 3681 if (sc->mfi_aen_triggered != 0) { 3682 revents |= poll_events & (POLLIN | POLLRDNORM); 3683 sc->mfi_aen_triggered = 0; 3684 } 3685 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) { 3686 revents |= POLLERR; 3687 } 3688 } 3689 3690 if (revents == 0) { 3691 if (poll_events & (POLLIN | POLLRDNORM)) { 3692 sc->mfi_poll_waiting = 1; 3693 selrecord(td, &sc->mfi_select); 3694 } 3695 } 3696 3697 return revents; 3698 } 3699 3700 static void 3701 mfi_dump_all(void) 3702 { 3703 struct mfi_softc *sc; 3704 struct mfi_command *cm; 3705 devclass_t dc; 3706 time_t deadline; 3707 int timedout; 3708 int i; 3709 3710 dc = devclass_find("mfi"); 3711 if (dc == NULL) { 3712 printf("No mfi dev class\n"); 3713 return; 3714 } 3715 3716 for (i = 0; ; i++) { 3717 sc = devclass_get_softc(dc, i); 3718 if (sc == NULL) 3719 break; 3720 device_printf(sc->mfi_dev, "Dumping\n\n"); 3721 timedout = 0; 3722 deadline = time_uptime - mfi_cmd_timeout; 3723 mtx_lock(&sc->mfi_io_lock); 3724 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) { 3725 if (cm->cm_timestamp <= deadline) { 3726 device_printf(sc->mfi_dev, 3727 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", 3728 cm, (int)(time_uptime - cm->cm_timestamp)); 3729 MFI_PRINT_CMD(cm); 3730 timedout++; 3731 } 3732 } 3733 3734 #if 0 3735 if (timedout) 3736 MFI_DUMP_CMDS(sc); 3737 #endif 3738 3739 mtx_unlock(&sc->mfi_io_lock); 3740 } 3741 3742 return; 3743 } 3744 3745 static void 3746 mfi_timeout(void *data) 3747 { 3748 struct mfi_softc *sc = (struct mfi_softc *)data; 3749 struct mfi_command *cm, *tmp; 3750 time_t deadline; 3751 int timedout = 0; 3752 3753 deadline = time_uptime - mfi_cmd_timeout; 3754 if (sc->adpreset == 0) { 3755 if (!mfi_tbolt_reset(sc)) { 3756 callout_reset(&sc->mfi_watchdog_callout, 3757 mfi_cmd_timeout * hz, mfi_timeout, sc); 3758 return; 3759 } 3760 } 3761 mtx_lock(&sc->mfi_io_lock); 3762 TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) { 3763 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm) 3764 continue; 3765 if (cm->cm_timestamp <= deadline) { 3766 if (sc->adpreset != 0 && sc->issuepend_done == 0) { 3767 cm->cm_timestamp = time_uptime; 3768 } else { 3769 device_printf(sc->mfi_dev, 3770 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", 3771 cm, (int)(time_uptime - cm->cm_timestamp) 3772 ); 3773 MFI_PRINT_CMD(cm); 3774 MFI_VALIDATE_CMD(sc, cm); 3775 /* 3776 * While commands can get stuck forever we do 3777 * not fail them as there is no way to tell if 3778 * the controller has actually processed them 3779 * or not. 3780 * 3781 * In addition its very likely that force 3782 * failing a command here would cause a panic 3783 * e.g. in UFS. 3784 */ 3785 timedout++; 3786 } 3787 } 3788 } 3789 3790 #if 0 3791 if (timedout) 3792 MFI_DUMP_CMDS(sc); 3793 #endif 3794 3795 mtx_unlock(&sc->mfi_io_lock); 3796 3797 callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz, 3798 mfi_timeout, sc); 3799 3800 if (0) 3801 mfi_dump_all(); 3802 return; 3803 } 3804