1 /*- 2 * Copyright (c) 2006 IronPort Systems 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /*- 27 * Copyright (c) 2007 LSI Corp. 28 * Copyright (c) 2007 Rajesh Prabhakaran. 29 * All rights reserved. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * SUCH DAMAGE. 51 */ 52 53 #include <sys/cdefs.h> 54 __FBSDID("$FreeBSD$"); 55 56 #include "opt_compat.h" 57 #include "opt_mfi.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/sysctl.h> 62 #include <sys/malloc.h> 63 #include <sys/kernel.h> 64 #include <sys/poll.h> 65 #include <sys/selinfo.h> 66 #include <sys/bus.h> 67 #include <sys/conf.h> 68 #include <sys/eventhandler.h> 69 #include <sys/rman.h> 70 #include <sys/bus_dma.h> 71 #include <sys/bio.h> 72 #include <sys/ioccom.h> 73 #include <sys/uio.h> 74 #include <sys/proc.h> 75 #include <sys/signalvar.h> 76 #include <sys/sysent.h> 77 #include <sys/taskqueue.h> 78 79 #include <machine/bus.h> 80 #include <machine/resource.h> 81 82 #include <dev/mfi/mfireg.h> 83 #include <dev/mfi/mfi_ioctl.h> 84 #include <dev/mfi/mfivar.h> 85 #include <sys/interrupt.h> 86 #include <sys/priority.h> 87 88 static int mfi_alloc_commands(struct mfi_softc *); 89 static int mfi_comms_init(struct mfi_softc *); 90 static int mfi_get_controller_info(struct mfi_softc *); 91 static int mfi_get_log_state(struct mfi_softc *, 92 struct mfi_evt_log_state **); 93 static int mfi_parse_entries(struct mfi_softc *, int, int); 94 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int); 95 static void mfi_startup(void *arg); 96 static void mfi_intr(void *arg); 97 static void mfi_ldprobe(struct mfi_softc *sc); 98 static void mfi_syspdprobe(struct mfi_softc *sc); 99 static void mfi_handle_evt(void *context, int pending); 100 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale); 101 static void mfi_aen_complete(struct mfi_command *); 102 static int mfi_add_ld(struct mfi_softc *sc, int); 103 static void mfi_add_ld_complete(struct mfi_command *); 104 static int mfi_add_sys_pd(struct mfi_softc *sc, int); 105 static void mfi_add_sys_pd_complete(struct mfi_command *); 106 static struct mfi_command * mfi_bio_command(struct mfi_softc *); 107 static void mfi_bio_complete(struct mfi_command *); 108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*); 109 static int mfi_build_syspd_cdb(struct mfi_pass_frame *pass, uint32_t block_count, 110 uint64_t lba, uint8_t byte2, int readop); 111 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*); 112 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *); 113 static int mfi_abort(struct mfi_softc *, struct mfi_command *); 114 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *); 115 static void mfi_timeout(void *); 116 static int mfi_user_command(struct mfi_softc *, 117 struct mfi_ioc_passthru *); 118 static void mfi_enable_intr_xscale(struct mfi_softc *sc); 119 static void mfi_enable_intr_ppc(struct mfi_softc *sc); 120 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc); 121 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc); 122 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc); 123 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc); 124 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, 125 uint32_t frame_cnt); 126 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, 127 uint32_t frame_cnt); 128 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode); 129 static void mfi_config_unlock(struct mfi_softc *sc, int locked); 130 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm); 131 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm); 132 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm); 133 134 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters"); 135 static int mfi_event_locale = MFI_EVT_LOCALE_ALL; 136 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale); 137 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale, 138 0, "event message locale"); 139 140 static int mfi_event_class = MFI_EVT_CLASS_INFO; 141 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class); 142 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class, 143 0, "event message class"); 144 145 static int mfi_max_cmds = 128; 146 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds); 147 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds, 148 0, "Max commands"); 149 150 static int mfi_detect_jbod_change = 1; 151 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change); 152 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW, 153 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD"); 154 155 /* Management interface */ 156 static d_open_t mfi_open; 157 static d_close_t mfi_close; 158 static d_ioctl_t mfi_ioctl; 159 static d_poll_t mfi_poll; 160 161 static struct cdevsw mfi_cdevsw = { 162 .d_version = D_VERSION, 163 .d_flags = 0, 164 .d_open = mfi_open, 165 .d_close = mfi_close, 166 .d_ioctl = mfi_ioctl, 167 .d_poll = mfi_poll, 168 .d_name = "mfi", 169 }; 170 171 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver"); 172 173 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH 174 struct mfi_skinny_dma_info mfi_skinny; 175 176 static void 177 mfi_enable_intr_xscale(struct mfi_softc *sc) 178 { 179 MFI_WRITE4(sc, MFI_OMSK, 0x01); 180 } 181 182 static void 183 mfi_enable_intr_ppc(struct mfi_softc *sc) 184 { 185 if (sc->mfi_flags & MFI_FLAGS_1078) { 186 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF); 187 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM); 188 } 189 else if (sc->mfi_flags & MFI_FLAGS_GEN2) { 190 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF); 191 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM); 192 } 193 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 194 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001); 195 } 196 } 197 198 static int32_t 199 mfi_read_fw_status_xscale(struct mfi_softc *sc) 200 { 201 return MFI_READ4(sc, MFI_OMSG0); 202 } 203 204 static int32_t 205 mfi_read_fw_status_ppc(struct mfi_softc *sc) 206 { 207 return MFI_READ4(sc, MFI_OSP0); 208 } 209 210 static int 211 mfi_check_clear_intr_xscale(struct mfi_softc *sc) 212 { 213 int32_t status; 214 215 status = MFI_READ4(sc, MFI_OSTS); 216 if ((status & MFI_OSTS_INTR_VALID) == 0) 217 return 1; 218 219 MFI_WRITE4(sc, MFI_OSTS, status); 220 return 0; 221 } 222 223 static int 224 mfi_check_clear_intr_ppc(struct mfi_softc *sc) 225 { 226 int32_t status; 227 228 status = MFI_READ4(sc, MFI_OSTS); 229 if (sc->mfi_flags & MFI_FLAGS_1078) { 230 if (!(status & MFI_1078_RM)) { 231 return 1; 232 } 233 } 234 else if (sc->mfi_flags & MFI_FLAGS_GEN2) { 235 if (!(status & MFI_GEN2_RM)) { 236 return 1; 237 } 238 } 239 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 240 if (!(status & MFI_SKINNY_RM)) { 241 return 1; 242 } 243 } 244 if (sc->mfi_flags & MFI_FLAGS_SKINNY) 245 MFI_WRITE4(sc, MFI_OSTS, status); 246 else 247 MFI_WRITE4(sc, MFI_ODCR0, status); 248 return 0; 249 } 250 251 static void 252 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt) 253 { 254 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt); 255 } 256 257 static void 258 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt) 259 { 260 if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 261 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 ); 262 MFI_WRITE4(sc, MFI_IQPH, 0x00000000); 263 } else { 264 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 ); 265 } 266 } 267 268 int 269 mfi_transition_firmware(struct mfi_softc *sc) 270 { 271 uint32_t fw_state, cur_state; 272 int max_wait, i; 273 uint32_t cur_abs_reg_val = 0; 274 uint32_t prev_abs_reg_val = 0; 275 276 cur_abs_reg_val = sc->mfi_read_fw_status(sc); 277 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK; 278 while (fw_state != MFI_FWSTATE_READY) { 279 if (bootverbose) 280 device_printf(sc->mfi_dev, "Waiting for firmware to " 281 "become ready\n"); 282 cur_state = fw_state; 283 switch (fw_state) { 284 case MFI_FWSTATE_FAULT: 285 device_printf(sc->mfi_dev, "Firmware fault\n"); 286 return (ENXIO); 287 case MFI_FWSTATE_WAIT_HANDSHAKE: 288 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) 289 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE); 290 else 291 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE); 292 max_wait = MFI_RESET_WAIT_TIME; 293 break; 294 case MFI_FWSTATE_OPERATIONAL: 295 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) 296 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7); 297 else 298 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY); 299 max_wait = MFI_RESET_WAIT_TIME; 300 break; 301 case MFI_FWSTATE_UNDEFINED: 302 case MFI_FWSTATE_BB_INIT: 303 max_wait = MFI_RESET_WAIT_TIME; 304 break; 305 case MFI_FWSTATE_FW_INIT_2: 306 max_wait = MFI_RESET_WAIT_TIME; 307 break; 308 case MFI_FWSTATE_FW_INIT: 309 case MFI_FWSTATE_FLUSH_CACHE: 310 max_wait = MFI_RESET_WAIT_TIME; 311 break; 312 case MFI_FWSTATE_DEVICE_SCAN: 313 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */ 314 prev_abs_reg_val = cur_abs_reg_val; 315 break; 316 case MFI_FWSTATE_BOOT_MESSAGE_PENDING: 317 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) 318 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG); 319 else 320 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG); 321 max_wait = MFI_RESET_WAIT_TIME; 322 break; 323 default: 324 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n", 325 fw_state); 326 return (ENXIO); 327 } 328 for (i = 0; i < (max_wait * 10); i++) { 329 cur_abs_reg_val = sc->mfi_read_fw_status(sc); 330 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK; 331 if (fw_state == cur_state) 332 DELAY(100000); 333 else 334 break; 335 } 336 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) { 337 /* Check the device scanning progress */ 338 if (prev_abs_reg_val != cur_abs_reg_val) { 339 continue; 340 } 341 } 342 if (fw_state == cur_state) { 343 device_printf(sc->mfi_dev, "Firmware stuck in state " 344 "%#x\n", fw_state); 345 return (ENXIO); 346 } 347 } 348 return (0); 349 } 350 351 static void 352 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 353 { 354 bus_addr_t *addr; 355 356 addr = arg; 357 *addr = segs[0].ds_addr; 358 } 359 360 361 int 362 mfi_attach(struct mfi_softc *sc) 363 { 364 uint32_t status; 365 int error, commsz, framessz, sensesz; 366 int frames, unit, max_fw_sge; 367 uint32_t tb_mem_size = 0; 368 369 if (sc == NULL) 370 return EINVAL; 371 372 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n", 373 MEGASAS_VERSION); 374 375 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF); 376 sx_init(&sc->mfi_config_lock, "MFI config"); 377 TAILQ_INIT(&sc->mfi_ld_tqh); 378 TAILQ_INIT(&sc->mfi_syspd_tqh); 379 TAILQ_INIT(&sc->mfi_evt_queue); 380 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc); 381 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc); 382 TAILQ_INIT(&sc->mfi_aen_pids); 383 TAILQ_INIT(&sc->mfi_cam_ccbq); 384 385 mfi_initq_free(sc); 386 mfi_initq_ready(sc); 387 mfi_initq_busy(sc); 388 mfi_initq_bio(sc); 389 390 sc->adpreset = 0; 391 sc->last_seq_num = 0; 392 sc->disableOnlineCtrlReset = 1; 393 sc->issuepend_done = 1; 394 sc->hw_crit_error = 0; 395 396 if (sc->mfi_flags & MFI_FLAGS_1064R) { 397 sc->mfi_enable_intr = mfi_enable_intr_xscale; 398 sc->mfi_read_fw_status = mfi_read_fw_status_xscale; 399 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale; 400 sc->mfi_issue_cmd = mfi_issue_cmd_xscale; 401 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 402 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc; 403 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc; 404 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc; 405 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc; 406 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc; 407 sc->mfi_adp_reset = mfi_tbolt_adp_reset; 408 sc->mfi_tbolt = 1; 409 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh); 410 } else { 411 sc->mfi_enable_intr = mfi_enable_intr_ppc; 412 sc->mfi_read_fw_status = mfi_read_fw_status_ppc; 413 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc; 414 sc->mfi_issue_cmd = mfi_issue_cmd_ppc; 415 } 416 417 418 /* Before we get too far, see if the firmware is working */ 419 if ((error = mfi_transition_firmware(sc)) != 0) { 420 device_printf(sc->mfi_dev, "Firmware not in READY state, " 421 "error %d\n", error); 422 return (ENXIO); 423 } 424 425 /* Start: LSIP200113393 */ 426 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 427 1, 0, /* algnmnt, boundary */ 428 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 429 BUS_SPACE_MAXADDR, /* highaddr */ 430 NULL, NULL, /* filter, filterarg */ 431 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */ 432 1, /* msegments */ 433 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */ 434 0, /* flags */ 435 NULL, NULL, /* lockfunc, lockarg */ 436 &sc->verbuf_h_dmat)) { 437 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n"); 438 return (ENOMEM); 439 } 440 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf, 441 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) { 442 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n"); 443 return (ENOMEM); 444 } 445 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t)); 446 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap, 447 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t), 448 mfi_addr_cb, &sc->verbuf_h_busaddr, 0); 449 /* End: LSIP200113393 */ 450 451 /* 452 * Get information needed for sizing the contiguous memory for the 453 * frame pool. Size down the sgl parameter since we know that 454 * we will never need more than what's required for MAXPHYS. 455 * It would be nice if these constants were available at runtime 456 * instead of compile time. 457 */ 458 status = sc->mfi_read_fw_status(sc); 459 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK; 460 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16; 461 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1)); 462 463 /* ThunderBolt Support get the contiguous memory */ 464 465 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 466 mfi_tbolt_init_globals(sc); 467 device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n", 468 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status); 469 tb_mem_size = mfi_tbolt_get_memory_requirement(sc); 470 471 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 472 1, 0, /* algnmnt, boundary */ 473 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 474 BUS_SPACE_MAXADDR, /* highaddr */ 475 NULL, NULL, /* filter, filterarg */ 476 tb_mem_size, /* maxsize */ 477 1, /* msegments */ 478 tb_mem_size, /* maxsegsize */ 479 0, /* flags */ 480 NULL, NULL, /* lockfunc, lockarg */ 481 &sc->mfi_tb_dmat)) { 482 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); 483 return (ENOMEM); 484 } 485 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool, 486 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) { 487 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); 488 return (ENOMEM); 489 } 490 bzero(sc->request_message_pool, tb_mem_size); 491 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap, 492 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0); 493 494 /* For ThunderBolt memory init */ 495 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 496 0x100, 0, /* alignmnt, boundary */ 497 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 498 BUS_SPACE_MAXADDR, /* highaddr */ 499 NULL, NULL, /* filter, filterarg */ 500 MFI_FRAME_SIZE, /* maxsize */ 501 1, /* msegments */ 502 MFI_FRAME_SIZE, /* maxsegsize */ 503 0, /* flags */ 504 NULL, NULL, /* lockfunc, lockarg */ 505 &sc->mfi_tb_init_dmat)) { 506 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n"); 507 return (ENOMEM); 508 } 509 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init, 510 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) { 511 device_printf(sc->mfi_dev, "Cannot allocate init memory\n"); 512 return (ENOMEM); 513 } 514 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE); 515 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap, 516 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb, 517 &sc->mfi_tb_init_busaddr, 0); 518 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool, 519 tb_mem_size)) { 520 device_printf(sc->mfi_dev, 521 "Thunderbolt pool preparation error\n"); 522 return 0; 523 } 524 525 /* 526 Allocate DMA memory mapping for MPI2 IOC Init descriptor, 527 we are taking it diffrent from what we have allocated for Request 528 and reply descriptors to avoid confusion later 529 */ 530 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST); 531 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 532 1, 0, /* algnmnt, boundary */ 533 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 534 BUS_SPACE_MAXADDR, /* highaddr */ 535 NULL, NULL, /* filter, filterarg */ 536 tb_mem_size, /* maxsize */ 537 1, /* msegments */ 538 tb_mem_size, /* maxsegsize */ 539 0, /* flags */ 540 NULL, NULL, /* lockfunc, lockarg */ 541 &sc->mfi_tb_ioc_init_dmat)) { 542 device_printf(sc->mfi_dev, 543 "Cannot allocate comms DMA tag\n"); 544 return (ENOMEM); 545 } 546 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat, 547 (void **)&sc->mfi_tb_ioc_init_desc, 548 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) { 549 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); 550 return (ENOMEM); 551 } 552 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size); 553 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap, 554 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb, 555 &sc->mfi_tb_ioc_init_busaddr, 0); 556 } 557 /* 558 * Create the dma tag for data buffers. Used both for block I/O 559 * and for various internal data queries. 560 */ 561 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 562 1, 0, /* algnmnt, boundary */ 563 BUS_SPACE_MAXADDR, /* lowaddr */ 564 BUS_SPACE_MAXADDR, /* highaddr */ 565 NULL, NULL, /* filter, filterarg */ 566 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 567 sc->mfi_max_sge, /* nsegments */ 568 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 569 BUS_DMA_ALLOCNOW, /* flags */ 570 busdma_lock_mutex, /* lockfunc */ 571 &sc->mfi_io_lock, /* lockfuncarg */ 572 &sc->mfi_buffer_dmat)) { 573 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n"); 574 return (ENOMEM); 575 } 576 577 /* 578 * Allocate DMA memory for the comms queues. Keep it under 4GB for 579 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue 580 * entry, so the calculated size here will be will be 1 more than 581 * mfi_max_fw_cmds. This is apparently a requirement of the hardware. 582 */ 583 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) + 584 sizeof(struct mfi_hwcomms); 585 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 586 1, 0, /* algnmnt, boundary */ 587 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 588 BUS_SPACE_MAXADDR, /* highaddr */ 589 NULL, NULL, /* filter, filterarg */ 590 commsz, /* maxsize */ 591 1, /* msegments */ 592 commsz, /* maxsegsize */ 593 0, /* flags */ 594 NULL, NULL, /* lockfunc, lockarg */ 595 &sc->mfi_comms_dmat)) { 596 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); 597 return (ENOMEM); 598 } 599 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms, 600 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) { 601 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); 602 return (ENOMEM); 603 } 604 bzero(sc->mfi_comms, commsz); 605 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap, 606 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0); 607 /* 608 * Allocate DMA memory for the command frames. Keep them in the 609 * lower 4GB for efficiency. Calculate the size of the commands at 610 * the same time; each command is one 64 byte frame plus a set of 611 * additional frames for holding sg lists or other data. 612 * The assumption here is that the SG list will start at the second 613 * frame and not use the unused bytes in the first frame. While this 614 * isn't technically correct, it simplifies the calculation and allows 615 * for command frames that might be larger than an mfi_io_frame. 616 */ 617 if (sizeof(bus_addr_t) == 8) { 618 sc->mfi_sge_size = sizeof(struct mfi_sg64); 619 sc->mfi_flags |= MFI_FLAGS_SG64; 620 } else { 621 sc->mfi_sge_size = sizeof(struct mfi_sg32); 622 } 623 if (sc->mfi_flags & MFI_FLAGS_SKINNY) 624 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny); 625 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2; 626 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE; 627 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds; 628 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 629 64, 0, /* algnmnt, boundary */ 630 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 631 BUS_SPACE_MAXADDR, /* highaddr */ 632 NULL, NULL, /* filter, filterarg */ 633 framessz, /* maxsize */ 634 1, /* nsegments */ 635 framessz, /* maxsegsize */ 636 0, /* flags */ 637 NULL, NULL, /* lockfunc, lockarg */ 638 &sc->mfi_frames_dmat)) { 639 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n"); 640 return (ENOMEM); 641 } 642 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames, 643 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) { 644 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n"); 645 return (ENOMEM); 646 } 647 bzero(sc->mfi_frames, framessz); 648 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap, 649 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0); 650 /* 651 * Allocate DMA memory for the frame sense data. Keep them in the 652 * lower 4GB for efficiency 653 */ 654 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN; 655 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 656 4, 0, /* algnmnt, boundary */ 657 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 658 BUS_SPACE_MAXADDR, /* highaddr */ 659 NULL, NULL, /* filter, filterarg */ 660 sensesz, /* maxsize */ 661 1, /* nsegments */ 662 sensesz, /* maxsegsize */ 663 0, /* flags */ 664 NULL, NULL, /* lockfunc, lockarg */ 665 &sc->mfi_sense_dmat)) { 666 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n"); 667 return (ENOMEM); 668 } 669 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense, 670 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) { 671 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n"); 672 return (ENOMEM); 673 } 674 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap, 675 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0); 676 if ((error = mfi_alloc_commands(sc)) != 0) 677 return (error); 678 679 /* Before moving the FW to operational state, check whether 680 * hostmemory is required by the FW or not 681 */ 682 683 /* ThunderBolt MFI_IOC2 INIT */ 684 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 685 sc->mfi_disable_intr(sc); 686 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) { 687 device_printf(sc->mfi_dev, 688 "TB Init has failed with error %d\n",error); 689 return error; 690 } 691 692 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0) 693 return error; 694 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, 695 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc, 696 &sc->mfi_intr)) { 697 device_printf(sc->mfi_dev, "Cannot set up interrupt\n"); 698 return (EINVAL); 699 } 700 sc->mfi_enable_intr(sc); 701 } else { 702 if ((error = mfi_comms_init(sc)) != 0) 703 return (error); 704 705 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, 706 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) { 707 device_printf(sc->mfi_dev, "Cannot set up interrupt\n"); 708 return (EINVAL); 709 } 710 sc->mfi_enable_intr(sc); 711 } 712 if ((error = mfi_get_controller_info(sc)) != 0) 713 return (error); 714 sc->disableOnlineCtrlReset = 0; 715 716 /* Register a config hook to probe the bus for arrays */ 717 sc->mfi_ich.ich_func = mfi_startup; 718 sc->mfi_ich.ich_arg = sc; 719 if (config_intrhook_establish(&sc->mfi_ich) != 0) { 720 device_printf(sc->mfi_dev, "Cannot establish configuration " 721 "hook\n"); 722 return (EINVAL); 723 } 724 if ((error = mfi_aen_setup(sc, 0), 0) != 0) { 725 mtx_unlock(&sc->mfi_io_lock); 726 return (error); 727 } 728 729 /* 730 * Register a shutdown handler. 731 */ 732 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown, 733 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) { 734 device_printf(sc->mfi_dev, "Warning: shutdown event " 735 "registration failed\n"); 736 } 737 738 /* 739 * Create the control device for doing management 740 */ 741 unit = device_get_unit(sc->mfi_dev); 742 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR, 743 0640, "mfi%d", unit); 744 if (unit == 0) 745 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node"); 746 if (sc->mfi_cdev != NULL) 747 sc->mfi_cdev->si_drv1 = sc; 748 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), 749 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), 750 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW, 751 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes"); 752 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), 753 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), 754 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW, 755 &sc->mfi_keep_deleted_volumes, 0, 756 "Don't detach the mfid device for a busy volume that is deleted"); 757 758 device_add_child(sc->mfi_dev, "mfip", -1); 759 bus_generic_attach(sc->mfi_dev); 760 761 /* Start the timeout watchdog */ 762 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE); 763 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, 764 mfi_timeout, sc); 765 766 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 767 mfi_tbolt_sync_map_info(sc); 768 } 769 770 return (0); 771 } 772 773 static int 774 mfi_alloc_commands(struct mfi_softc *sc) 775 { 776 struct mfi_command *cm; 777 int i, ncmds; 778 779 /* 780 * XXX Should we allocate all the commands up front, or allocate on 781 * demand later like 'aac' does? 782 */ 783 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds); 784 if (bootverbose) 785 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver " 786 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds); 787 788 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF, 789 M_WAITOK | M_ZERO); 790 791 for (i = 0; i < ncmds; i++) { 792 cm = &sc->mfi_commands[i]; 793 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames + 794 sc->mfi_cmd_size * i); 795 cm->cm_frame_busaddr = sc->mfi_frames_busaddr + 796 sc->mfi_cmd_size * i; 797 cm->cm_frame->header.context = i; 798 cm->cm_sense = &sc->mfi_sense[i]; 799 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i; 800 cm->cm_sc = sc; 801 cm->cm_index = i; 802 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0, 803 &cm->cm_dmamap) == 0) { 804 mtx_lock(&sc->mfi_io_lock); 805 mfi_release_command(cm); 806 mtx_unlock(&sc->mfi_io_lock); 807 } 808 else 809 break; 810 sc->mfi_total_cmds++; 811 } 812 813 return (0); 814 } 815 816 void 817 mfi_release_command(struct mfi_command *cm) 818 { 819 struct mfi_frame_header *hdr; 820 uint32_t *hdr_data; 821 822 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED); 823 824 /* 825 * Zero out the important fields of the frame, but make sure the 826 * context field is preserved. For efficiency, handle the fields 827 * as 32 bit words. Clear out the first S/G entry too for safety. 828 */ 829 hdr = &cm->cm_frame->header; 830 if (cm->cm_data != NULL && hdr->sg_count) { 831 cm->cm_sg->sg32[0].len = 0; 832 cm->cm_sg->sg32[0].addr = 0; 833 } 834 835 hdr_data = (uint32_t *)cm->cm_frame; 836 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */ 837 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */ 838 hdr_data[4] = 0; /* flags, timeout */ 839 hdr_data[5] = 0; /* data_len */ 840 841 cm->cm_extra_frames = 0; 842 cm->cm_flags = 0; 843 cm->cm_complete = NULL; 844 cm->cm_private = NULL; 845 cm->cm_data = NULL; 846 cm->cm_sg = 0; 847 cm->cm_total_frame_size = 0; 848 cm->retry_for_fw_reset = 0; 849 850 mfi_enqueue_free(cm); 851 } 852 853 int 854 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, 855 uint32_t opcode, void **bufp, size_t bufsize) 856 { 857 struct mfi_command *cm; 858 struct mfi_dcmd_frame *dcmd; 859 void *buf = NULL; 860 uint32_t context = 0; 861 862 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 863 864 cm = mfi_dequeue_free(sc); 865 if (cm == NULL) 866 return (EBUSY); 867 868 /* Zero out the MFI frame */ 869 context = cm->cm_frame->header.context; 870 bzero(cm->cm_frame, sizeof(union mfi_frame)); 871 cm->cm_frame->header.context = context; 872 873 if ((bufsize > 0) && (bufp != NULL)) { 874 if (*bufp == NULL) { 875 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO); 876 if (buf == NULL) { 877 mfi_release_command(cm); 878 return (ENOMEM); 879 } 880 *bufp = buf; 881 } else { 882 buf = *bufp; 883 } 884 } 885 886 dcmd = &cm->cm_frame->dcmd; 887 bzero(dcmd->mbox, MFI_MBOX_SIZE); 888 dcmd->header.cmd = MFI_CMD_DCMD; 889 dcmd->header.timeout = 0; 890 dcmd->header.flags = 0; 891 dcmd->header.data_len = bufsize; 892 dcmd->header.scsi_status = 0; 893 dcmd->opcode = opcode; 894 cm->cm_sg = &dcmd->sgl; 895 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 896 cm->cm_flags = 0; 897 cm->cm_data = buf; 898 cm->cm_private = buf; 899 cm->cm_len = bufsize; 900 901 *cmp = cm; 902 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL)) 903 *bufp = buf; 904 return (0); 905 } 906 907 static int 908 mfi_comms_init(struct mfi_softc *sc) 909 { 910 struct mfi_command *cm; 911 struct mfi_init_frame *init; 912 struct mfi_init_qinfo *qinfo; 913 int error; 914 uint32_t context = 0; 915 916 mtx_lock(&sc->mfi_io_lock); 917 if ((cm = mfi_dequeue_free(sc)) == NULL) 918 return (EBUSY); 919 920 /* Zero out the MFI frame */ 921 context = cm->cm_frame->header.context; 922 bzero(cm->cm_frame, sizeof(union mfi_frame)); 923 cm->cm_frame->header.context = context; 924 925 /* 926 * Abuse the SG list area of the frame to hold the init_qinfo 927 * object; 928 */ 929 init = &cm->cm_frame->init; 930 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE); 931 932 bzero(qinfo, sizeof(struct mfi_init_qinfo)); 933 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1; 934 qinfo->rq_addr_lo = sc->mfi_comms_busaddr + 935 offsetof(struct mfi_hwcomms, hw_reply_q); 936 qinfo->pi_addr_lo = sc->mfi_comms_busaddr + 937 offsetof(struct mfi_hwcomms, hw_pi); 938 qinfo->ci_addr_lo = sc->mfi_comms_busaddr + 939 offsetof(struct mfi_hwcomms, hw_ci); 940 941 init->header.cmd = MFI_CMD_INIT; 942 init->header.data_len = sizeof(struct mfi_init_qinfo); 943 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE; 944 cm->cm_data = NULL; 945 cm->cm_flags = MFI_CMD_POLLED; 946 947 if ((error = mfi_mapcmd(sc, cm)) != 0) { 948 device_printf(sc->mfi_dev, "failed to send init command\n"); 949 mtx_unlock(&sc->mfi_io_lock); 950 return (error); 951 } 952 mfi_release_command(cm); 953 mtx_unlock(&sc->mfi_io_lock); 954 955 return (0); 956 } 957 958 static int 959 mfi_get_controller_info(struct mfi_softc *sc) 960 { 961 struct mfi_command *cm = NULL; 962 struct mfi_ctrl_info *ci = NULL; 963 uint32_t max_sectors_1, max_sectors_2; 964 int error; 965 966 mtx_lock(&sc->mfi_io_lock); 967 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO, 968 (void **)&ci, sizeof(*ci)); 969 if (error) 970 goto out; 971 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 972 973 if ((error = mfi_mapcmd(sc, cm)) != 0) { 974 device_printf(sc->mfi_dev, "Failed to get controller info\n"); 975 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE / 976 MFI_SECTOR_LEN; 977 error = 0; 978 goto out; 979 } 980 981 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 982 BUS_DMASYNC_POSTREAD); 983 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 984 985 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io; 986 max_sectors_2 = ci->max_request_size; 987 sc->mfi_max_io = min(max_sectors_1, max_sectors_2); 988 sc->disableOnlineCtrlReset = 989 ci->properties.OnOffProperties.disableOnlineCtrlReset; 990 991 out: 992 if (ci) 993 free(ci, M_MFIBUF); 994 if (cm) 995 mfi_release_command(cm); 996 mtx_unlock(&sc->mfi_io_lock); 997 return (error); 998 } 999 1000 static int 1001 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state) 1002 { 1003 struct mfi_command *cm = NULL; 1004 int error; 1005 1006 mtx_lock(&sc->mfi_io_lock); 1007 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO, 1008 (void **)log_state, sizeof(**log_state)); 1009 if (error) 1010 goto out; 1011 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1012 1013 if ((error = mfi_mapcmd(sc, cm)) != 0) { 1014 device_printf(sc->mfi_dev, "Failed to get log state\n"); 1015 goto out; 1016 } 1017 1018 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1019 BUS_DMASYNC_POSTREAD); 1020 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1021 1022 out: 1023 if (cm) 1024 mfi_release_command(cm); 1025 mtx_unlock(&sc->mfi_io_lock); 1026 1027 return (error); 1028 } 1029 1030 int 1031 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start) 1032 { 1033 struct mfi_evt_log_state *log_state = NULL; 1034 union mfi_evt class_locale; 1035 int error = 0; 1036 uint32_t seq; 1037 1038 class_locale.members.reserved = 0; 1039 class_locale.members.locale = mfi_event_locale; 1040 class_locale.members.evt_class = mfi_event_class; 1041 1042 if (seq_start == 0) { 1043 error = mfi_get_log_state(sc, &log_state); 1044 sc->mfi_boot_seq_num = log_state->boot_seq_num; 1045 if (error) { 1046 if (log_state) 1047 free(log_state, M_MFIBUF); 1048 return (error); 1049 } 1050 1051 /* 1052 * Walk through any events that fired since the last 1053 * shutdown. 1054 */ 1055 mfi_parse_entries(sc, log_state->shutdown_seq_num, 1056 log_state->newest_seq_num); 1057 seq = log_state->newest_seq_num; 1058 } else 1059 seq = seq_start; 1060 mfi_aen_register(sc, seq, class_locale.word); 1061 free(log_state, M_MFIBUF); 1062 1063 return 0; 1064 } 1065 1066 int 1067 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm) 1068 { 1069 1070 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1071 cm->cm_complete = NULL; 1072 1073 1074 /* 1075 * MegaCli can issue a DCMD of 0. In this case do nothing 1076 * and return 0 to it as status 1077 */ 1078 if (cm->cm_frame->dcmd.opcode == 0) { 1079 cm->cm_frame->header.cmd_status = MFI_STAT_OK; 1080 cm->cm_error = 0; 1081 return (cm->cm_error); 1082 } 1083 mfi_enqueue_ready(cm); 1084 mfi_startio(sc); 1085 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0) 1086 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0); 1087 return (cm->cm_error); 1088 } 1089 1090 void 1091 mfi_free(struct mfi_softc *sc) 1092 { 1093 struct mfi_command *cm; 1094 int i; 1095 1096 callout_drain(&sc->mfi_watchdog_callout); 1097 1098 if (sc->mfi_cdev != NULL) 1099 destroy_dev(sc->mfi_cdev); 1100 1101 if (sc->mfi_total_cmds != 0) { 1102 for (i = 0; i < sc->mfi_total_cmds; i++) { 1103 cm = &sc->mfi_commands[i]; 1104 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap); 1105 } 1106 free(sc->mfi_commands, M_MFIBUF); 1107 } 1108 1109 if (sc->mfi_intr) 1110 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr); 1111 if (sc->mfi_irq != NULL) 1112 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid, 1113 sc->mfi_irq); 1114 1115 if (sc->mfi_sense_busaddr != 0) 1116 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap); 1117 if (sc->mfi_sense != NULL) 1118 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense, 1119 sc->mfi_sense_dmamap); 1120 if (sc->mfi_sense_dmat != NULL) 1121 bus_dma_tag_destroy(sc->mfi_sense_dmat); 1122 1123 if (sc->mfi_frames_busaddr != 0) 1124 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap); 1125 if (sc->mfi_frames != NULL) 1126 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames, 1127 sc->mfi_frames_dmamap); 1128 if (sc->mfi_frames_dmat != NULL) 1129 bus_dma_tag_destroy(sc->mfi_frames_dmat); 1130 1131 if (sc->mfi_comms_busaddr != 0) 1132 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap); 1133 if (sc->mfi_comms != NULL) 1134 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms, 1135 sc->mfi_comms_dmamap); 1136 if (sc->mfi_comms_dmat != NULL) 1137 bus_dma_tag_destroy(sc->mfi_comms_dmat); 1138 1139 /* ThunderBolt contiguous memory free here */ 1140 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 1141 if (sc->mfi_tb_busaddr != 0) 1142 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap); 1143 if (sc->request_message_pool != NULL) 1144 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool, 1145 sc->mfi_tb_dmamap); 1146 if (sc->mfi_tb_dmat != NULL) 1147 bus_dma_tag_destroy(sc->mfi_tb_dmat); 1148 1149 /* Version buffer memory free */ 1150 /* Start LSIP200113393 */ 1151 if (sc->verbuf_h_busaddr != 0) 1152 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap); 1153 if (sc->verbuf != NULL) 1154 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf, 1155 sc->verbuf_h_dmamap); 1156 if (sc->verbuf_h_dmat != NULL) 1157 bus_dma_tag_destroy(sc->verbuf_h_dmat); 1158 1159 /* End LSIP200113393 */ 1160 /* ThunderBolt INIT packet memory Free */ 1161 if (sc->mfi_tb_init_busaddr != 0) 1162 bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap); 1163 if (sc->mfi_tb_init != NULL) 1164 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init, 1165 sc->mfi_tb_init_dmamap); 1166 if (sc->mfi_tb_init_dmat != NULL) 1167 bus_dma_tag_destroy(sc->mfi_tb_init_dmat); 1168 1169 /* ThunderBolt IOC Init Desc memory free here */ 1170 if (sc->mfi_tb_ioc_init_busaddr != 0) 1171 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat, 1172 sc->mfi_tb_ioc_init_dmamap); 1173 if (sc->mfi_tb_ioc_init_desc != NULL) 1174 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat, 1175 sc->mfi_tb_ioc_init_desc, 1176 sc->mfi_tb_ioc_init_dmamap); 1177 if (sc->mfi_tb_ioc_init_dmat != NULL) 1178 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat); 1179 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) { 1180 if (sc->mfi_cmd_pool_tbolt != NULL) { 1181 if (sc->mfi_cmd_pool_tbolt[i] != NULL) { 1182 free(sc->mfi_cmd_pool_tbolt[i], 1183 M_MFIBUF); 1184 sc->mfi_cmd_pool_tbolt[i] = NULL; 1185 } 1186 } 1187 } 1188 if (sc->mfi_cmd_pool_tbolt != NULL) { 1189 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF); 1190 sc->mfi_cmd_pool_tbolt = NULL; 1191 } 1192 if (sc->request_desc_pool != NULL) { 1193 free(sc->request_desc_pool, M_MFIBUF); 1194 sc->request_desc_pool = NULL; 1195 } 1196 } 1197 if (sc->mfi_buffer_dmat != NULL) 1198 bus_dma_tag_destroy(sc->mfi_buffer_dmat); 1199 if (sc->mfi_parent_dmat != NULL) 1200 bus_dma_tag_destroy(sc->mfi_parent_dmat); 1201 1202 if (mtx_initialized(&sc->mfi_io_lock)) { 1203 mtx_destroy(&sc->mfi_io_lock); 1204 sx_destroy(&sc->mfi_config_lock); 1205 } 1206 1207 return; 1208 } 1209 1210 static void 1211 mfi_startup(void *arg) 1212 { 1213 struct mfi_softc *sc; 1214 1215 sc = (struct mfi_softc *)arg; 1216 1217 config_intrhook_disestablish(&sc->mfi_ich); 1218 1219 sc->mfi_enable_intr(sc); 1220 sx_xlock(&sc->mfi_config_lock); 1221 mtx_lock(&sc->mfi_io_lock); 1222 mfi_ldprobe(sc); 1223 if (sc->mfi_flags & MFI_FLAGS_SKINNY) 1224 mfi_syspdprobe(sc); 1225 mtx_unlock(&sc->mfi_io_lock); 1226 sx_xunlock(&sc->mfi_config_lock); 1227 } 1228 1229 static void 1230 mfi_intr(void *arg) 1231 { 1232 struct mfi_softc *sc; 1233 struct mfi_command *cm; 1234 uint32_t pi, ci, context; 1235 1236 sc = (struct mfi_softc *)arg; 1237 1238 if (sc->mfi_check_clear_intr(sc)) 1239 return; 1240 1241 restart: 1242 pi = sc->mfi_comms->hw_pi; 1243 ci = sc->mfi_comms->hw_ci; 1244 mtx_lock(&sc->mfi_io_lock); 1245 while (ci != pi) { 1246 context = sc->mfi_comms->hw_reply_q[ci]; 1247 if (context < sc->mfi_max_fw_cmds) { 1248 cm = &sc->mfi_commands[context]; 1249 mfi_remove_busy(cm); 1250 cm->cm_error = 0; 1251 mfi_complete(sc, cm); 1252 } 1253 if (++ci == (sc->mfi_max_fw_cmds + 1)) { 1254 ci = 0; 1255 } 1256 } 1257 1258 sc->mfi_comms->hw_ci = ci; 1259 1260 /* Give defered I/O a chance to run */ 1261 if (sc->mfi_flags & MFI_FLAGS_QFRZN) 1262 sc->mfi_flags &= ~MFI_FLAGS_QFRZN; 1263 mfi_startio(sc); 1264 mtx_unlock(&sc->mfi_io_lock); 1265 1266 /* 1267 * Dummy read to flush the bus; this ensures that the indexes are up 1268 * to date. Restart processing if more commands have come it. 1269 */ 1270 (void)sc->mfi_read_fw_status(sc); 1271 if (pi != sc->mfi_comms->hw_pi) 1272 goto restart; 1273 1274 return; 1275 } 1276 1277 int 1278 mfi_shutdown(struct mfi_softc *sc) 1279 { 1280 struct mfi_dcmd_frame *dcmd; 1281 struct mfi_command *cm; 1282 int error; 1283 1284 mtx_lock(&sc->mfi_io_lock); 1285 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0); 1286 if (error) { 1287 mtx_unlock(&sc->mfi_io_lock); 1288 return (error); 1289 } 1290 1291 if (sc->mfi_aen_cm != NULL) 1292 mfi_abort(sc, sc->mfi_aen_cm); 1293 1294 if (sc->mfi_map_sync_cm != NULL) 1295 mfi_abort(sc, sc->mfi_map_sync_cm); 1296 1297 dcmd = &cm->cm_frame->dcmd; 1298 dcmd->header.flags = MFI_FRAME_DIR_NONE; 1299 cm->cm_flags = MFI_CMD_POLLED; 1300 cm->cm_data = NULL; 1301 1302 if ((error = mfi_mapcmd(sc, cm)) != 0) { 1303 device_printf(sc->mfi_dev, "Failed to shutdown controller\n"); 1304 } 1305 1306 mfi_release_command(cm); 1307 mtx_unlock(&sc->mfi_io_lock); 1308 return (error); 1309 } 1310 1311 static void 1312 mfi_syspdprobe(struct mfi_softc *sc) 1313 { 1314 struct mfi_frame_header *hdr; 1315 struct mfi_command *cm = NULL; 1316 struct mfi_pd_list *pdlist = NULL; 1317 struct mfi_system_pd *syspd, *tmp; 1318 int error, i, found; 1319 1320 sx_assert(&sc->mfi_config_lock, SA_XLOCKED); 1321 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1322 /* Add SYSTEM PD's */ 1323 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY, 1324 (void **)&pdlist, sizeof(*pdlist)); 1325 if (error) { 1326 device_printf(sc->mfi_dev, 1327 "Error while forming SYSTEM PD list\n"); 1328 goto out; 1329 } 1330 1331 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1332 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 1333 cm->cm_frame->dcmd.mbox[1] = 0; 1334 if (mfi_mapcmd(sc, cm) != 0) { 1335 device_printf(sc->mfi_dev, 1336 "Failed to get syspd device listing\n"); 1337 goto out; 1338 } 1339 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap, 1340 BUS_DMASYNC_POSTREAD); 1341 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1342 hdr = &cm->cm_frame->header; 1343 if (hdr->cmd_status != MFI_STAT_OK) { 1344 device_printf(sc->mfi_dev, 1345 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status); 1346 goto out; 1347 } 1348 /* Get each PD and add it to the system */ 1349 for (i = 0; i < pdlist->count; i++) { 1350 if (pdlist->addr[i].device_id == 1351 pdlist->addr[i].encl_device_id) 1352 continue; 1353 found = 0; 1354 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) { 1355 if (syspd->pd_id == pdlist->addr[i].device_id) 1356 found = 1; 1357 } 1358 if (found == 0) 1359 mfi_add_sys_pd(sc, pdlist->addr[i].device_id); 1360 } 1361 /* Delete SYSPD's whose state has been changed */ 1362 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) { 1363 found = 0; 1364 for (i = 0; i < pdlist->count; i++) { 1365 if (syspd->pd_id == pdlist->addr[i].device_id) 1366 found = 1; 1367 } 1368 if (found == 0) { 1369 printf("DELETE\n"); 1370 mtx_unlock(&sc->mfi_io_lock); 1371 mtx_lock(&Giant); 1372 device_delete_child(sc->mfi_dev, syspd->pd_dev); 1373 mtx_unlock(&Giant); 1374 mtx_lock(&sc->mfi_io_lock); 1375 } 1376 } 1377 out: 1378 if (pdlist) 1379 free(pdlist, M_MFIBUF); 1380 if (cm) 1381 mfi_release_command(cm); 1382 1383 return; 1384 } 1385 1386 static void 1387 mfi_ldprobe(struct mfi_softc *sc) 1388 { 1389 struct mfi_frame_header *hdr; 1390 struct mfi_command *cm = NULL; 1391 struct mfi_ld_list *list = NULL; 1392 struct mfi_disk *ld; 1393 int error, i; 1394 1395 sx_assert(&sc->mfi_config_lock, SA_XLOCKED); 1396 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1397 1398 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST, 1399 (void **)&list, sizeof(*list)); 1400 if (error) 1401 goto out; 1402 1403 cm->cm_flags = MFI_CMD_DATAIN; 1404 if (mfi_wait_command(sc, cm) != 0) { 1405 device_printf(sc->mfi_dev, "Failed to get device listing\n"); 1406 goto out; 1407 } 1408 1409 hdr = &cm->cm_frame->header; 1410 if (hdr->cmd_status != MFI_STAT_OK) { 1411 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n", 1412 hdr->cmd_status); 1413 goto out; 1414 } 1415 1416 for (i = 0; i < list->ld_count; i++) { 1417 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1418 if (ld->ld_id == list->ld_list[i].ld.v.target_id) 1419 goto skip_add; 1420 } 1421 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id); 1422 skip_add:; 1423 } 1424 out: 1425 if (list) 1426 free(list, M_MFIBUF); 1427 if (cm) 1428 mfi_release_command(cm); 1429 1430 return; 1431 } 1432 1433 /* 1434 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If 1435 * the bits in 24-31 are all set, then it is the number of seconds since 1436 * boot. 1437 */ 1438 static const char * 1439 format_timestamp(uint32_t timestamp) 1440 { 1441 static char buffer[32]; 1442 1443 if ((timestamp & 0xff000000) == 0xff000000) 1444 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 1445 0x00ffffff); 1446 else 1447 snprintf(buffer, sizeof(buffer), "%us", timestamp); 1448 return (buffer); 1449 } 1450 1451 static const char * 1452 format_class(int8_t class) 1453 { 1454 static char buffer[6]; 1455 1456 switch (class) { 1457 case MFI_EVT_CLASS_DEBUG: 1458 return ("debug"); 1459 case MFI_EVT_CLASS_PROGRESS: 1460 return ("progress"); 1461 case MFI_EVT_CLASS_INFO: 1462 return ("info"); 1463 case MFI_EVT_CLASS_WARNING: 1464 return ("WARN"); 1465 case MFI_EVT_CLASS_CRITICAL: 1466 return ("CRIT"); 1467 case MFI_EVT_CLASS_FATAL: 1468 return ("FATAL"); 1469 case MFI_EVT_CLASS_DEAD: 1470 return ("DEAD"); 1471 default: 1472 snprintf(buffer, sizeof(buffer), "%d", class); 1473 return (buffer); 1474 } 1475 } 1476 1477 static void 1478 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail) 1479 { 1480 struct mfi_system_pd *syspd = NULL; 1481 1482 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq, 1483 format_timestamp(detail->time), detail->evt_class.members.locale, 1484 format_class(detail->evt_class.members.evt_class), 1485 detail->description); 1486 1487 /* Don't act on old AEN's or while shutting down */ 1488 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching) 1489 return; 1490 1491 switch (detail->arg_type) { 1492 case MR_EVT_ARGS_NONE: 1493 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) { 1494 device_printf(sc->mfi_dev, "HostBus scan raised\n"); 1495 if (mfi_detect_jbod_change) { 1496 /* 1497 * Probe for new SYSPD's and Delete 1498 * invalid SYSPD's 1499 */ 1500 sx_xlock(&sc->mfi_config_lock); 1501 mtx_lock(&sc->mfi_io_lock); 1502 mfi_syspdprobe(sc); 1503 mtx_unlock(&sc->mfi_io_lock); 1504 sx_xunlock(&sc->mfi_config_lock); 1505 } 1506 } 1507 break; 1508 case MR_EVT_ARGS_LD_STATE: 1509 /* During load time driver reads all the events starting 1510 * from the one that has been logged after shutdown. Avoid 1511 * these old events. 1512 */ 1513 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) { 1514 /* Remove the LD */ 1515 struct mfi_disk *ld; 1516 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1517 if (ld->ld_id == 1518 detail->args.ld_state.ld.target_id) 1519 break; 1520 } 1521 /* 1522 Fix: for kernel panics when SSCD is removed 1523 KASSERT(ld != NULL, ("volume dissappeared")); 1524 */ 1525 if (ld != NULL) { 1526 mtx_lock(&Giant); 1527 device_delete_child(sc->mfi_dev, ld->ld_dev); 1528 mtx_unlock(&Giant); 1529 } 1530 } 1531 break; 1532 case MR_EVT_ARGS_PD: 1533 if (detail->code == MR_EVT_PD_REMOVED) { 1534 if (mfi_detect_jbod_change) { 1535 /* 1536 * If the removed device is a SYSPD then 1537 * delete it 1538 */ 1539 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, 1540 pd_link) { 1541 if (syspd->pd_id == 1542 detail->args.pd.device_id) { 1543 mtx_lock(&Giant); 1544 device_delete_child( 1545 sc->mfi_dev, 1546 syspd->pd_dev); 1547 mtx_unlock(&Giant); 1548 break; 1549 } 1550 } 1551 } 1552 } 1553 if (detail->code == MR_EVT_PD_INSERTED) { 1554 if (mfi_detect_jbod_change) { 1555 /* Probe for new SYSPD's */ 1556 sx_xlock(&sc->mfi_config_lock); 1557 mtx_lock(&sc->mfi_io_lock); 1558 mfi_syspdprobe(sc); 1559 mtx_unlock(&sc->mfi_io_lock); 1560 sx_xunlock(&sc->mfi_config_lock); 1561 } 1562 } 1563 break; 1564 } 1565 } 1566 1567 static void 1568 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail) 1569 { 1570 struct mfi_evt_queue_elm *elm; 1571 1572 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1573 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO); 1574 if (elm == NULL) 1575 return; 1576 memcpy(&elm->detail, detail, sizeof(*detail)); 1577 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link); 1578 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task); 1579 } 1580 1581 static void 1582 mfi_handle_evt(void *context, int pending) 1583 { 1584 TAILQ_HEAD(,mfi_evt_queue_elm) queue; 1585 struct mfi_softc *sc; 1586 struct mfi_evt_queue_elm *elm; 1587 1588 sc = context; 1589 TAILQ_INIT(&queue); 1590 mtx_lock(&sc->mfi_io_lock); 1591 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link); 1592 mtx_unlock(&sc->mfi_io_lock); 1593 while ((elm = TAILQ_FIRST(&queue)) != NULL) { 1594 TAILQ_REMOVE(&queue, elm, link); 1595 mfi_decode_evt(sc, &elm->detail); 1596 free(elm, M_MFIBUF); 1597 } 1598 } 1599 1600 static int 1601 mfi_aen_register(struct mfi_softc *sc, int seq, int locale) 1602 { 1603 struct mfi_command *cm; 1604 struct mfi_dcmd_frame *dcmd; 1605 union mfi_evt current_aen, prior_aen; 1606 struct mfi_evt_detail *ed = NULL; 1607 int error = 0; 1608 1609 current_aen.word = locale; 1610 if (sc->mfi_aen_cm != NULL) { 1611 prior_aen.word = 1612 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1]; 1613 if (prior_aen.members.evt_class <= current_aen.members.evt_class && 1614 !((prior_aen.members.locale & current_aen.members.locale) 1615 ^current_aen.members.locale)) { 1616 return (0); 1617 } else { 1618 prior_aen.members.locale |= current_aen.members.locale; 1619 if (prior_aen.members.evt_class 1620 < current_aen.members.evt_class) 1621 current_aen.members.evt_class = 1622 prior_aen.members.evt_class; 1623 mtx_lock(&sc->mfi_io_lock); 1624 mfi_abort(sc, sc->mfi_aen_cm); 1625 mtx_unlock(&sc->mfi_io_lock); 1626 } 1627 } 1628 1629 mtx_lock(&sc->mfi_io_lock); 1630 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT, 1631 (void **)&ed, sizeof(*ed)); 1632 mtx_unlock(&sc->mfi_io_lock); 1633 if (error) { 1634 goto out; 1635 } 1636 1637 dcmd = &cm->cm_frame->dcmd; 1638 ((uint32_t *)&dcmd->mbox)[0] = seq; 1639 ((uint32_t *)&dcmd->mbox)[1] = locale; 1640 cm->cm_flags = MFI_CMD_DATAIN; 1641 cm->cm_complete = mfi_aen_complete; 1642 1643 sc->last_seq_num = seq; 1644 sc->mfi_aen_cm = cm; 1645 1646 mtx_lock(&sc->mfi_io_lock); 1647 mfi_enqueue_ready(cm); 1648 mfi_startio(sc); 1649 mtx_unlock(&sc->mfi_io_lock); 1650 1651 out: 1652 return (error); 1653 } 1654 1655 static void 1656 mfi_aen_complete(struct mfi_command *cm) 1657 { 1658 struct mfi_frame_header *hdr; 1659 struct mfi_softc *sc; 1660 struct mfi_evt_detail *detail; 1661 struct mfi_aen *mfi_aen_entry, *tmp; 1662 int seq = 0, aborted = 0; 1663 1664 sc = cm->cm_sc; 1665 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1666 1667 hdr = &cm->cm_frame->header; 1668 1669 if (sc->mfi_aen_cm == NULL) 1670 return; 1671 1672 if (sc->cm_aen_abort || 1673 hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1674 sc->cm_aen_abort = 0; 1675 aborted = 1; 1676 } else { 1677 sc->mfi_aen_triggered = 1; 1678 if (sc->mfi_poll_waiting) { 1679 sc->mfi_poll_waiting = 0; 1680 selwakeup(&sc->mfi_select); 1681 } 1682 detail = cm->cm_data; 1683 mfi_queue_evt(sc, detail); 1684 seq = detail->seq + 1; 1685 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, 1686 tmp) { 1687 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 1688 aen_link); 1689 PROC_LOCK(mfi_aen_entry->p); 1690 kern_psignal(mfi_aen_entry->p, SIGIO); 1691 PROC_UNLOCK(mfi_aen_entry->p); 1692 free(mfi_aen_entry, M_MFIBUF); 1693 } 1694 } 1695 1696 free(cm->cm_data, M_MFIBUF); 1697 sc->mfi_aen_cm = NULL; 1698 wakeup(&sc->mfi_aen_cm); 1699 mfi_release_command(cm); 1700 1701 /* set it up again so the driver can catch more events */ 1702 if (!aborted) { 1703 mtx_unlock(&sc->mfi_io_lock); 1704 mfi_aen_setup(sc, seq); 1705 mtx_lock(&sc->mfi_io_lock); 1706 } 1707 } 1708 1709 #define MAX_EVENTS 15 1710 1711 static int 1712 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq) 1713 { 1714 struct mfi_command *cm; 1715 struct mfi_dcmd_frame *dcmd; 1716 struct mfi_evt_list *el; 1717 union mfi_evt class_locale; 1718 int error, i, seq, size; 1719 1720 class_locale.members.reserved = 0; 1721 class_locale.members.locale = mfi_event_locale; 1722 class_locale.members.evt_class = mfi_event_class; 1723 1724 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail) 1725 * (MAX_EVENTS - 1); 1726 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO); 1727 if (el == NULL) 1728 return (ENOMEM); 1729 1730 for (seq = start_seq;;) { 1731 mtx_lock(&sc->mfi_io_lock); 1732 if ((cm = mfi_dequeue_free(sc)) == NULL) { 1733 free(el, M_MFIBUF); 1734 mtx_unlock(&sc->mfi_io_lock); 1735 return (EBUSY); 1736 } 1737 mtx_unlock(&sc->mfi_io_lock); 1738 1739 dcmd = &cm->cm_frame->dcmd; 1740 bzero(dcmd->mbox, MFI_MBOX_SIZE); 1741 dcmd->header.cmd = MFI_CMD_DCMD; 1742 dcmd->header.timeout = 0; 1743 dcmd->header.data_len = size; 1744 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET; 1745 ((uint32_t *)&dcmd->mbox)[0] = seq; 1746 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word; 1747 cm->cm_sg = &dcmd->sgl; 1748 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 1749 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1750 cm->cm_data = el; 1751 cm->cm_len = size; 1752 1753 mtx_lock(&sc->mfi_io_lock); 1754 if ((error = mfi_mapcmd(sc, cm)) != 0) { 1755 device_printf(sc->mfi_dev, 1756 "Failed to get controller entries\n"); 1757 mfi_release_command(cm); 1758 mtx_unlock(&sc->mfi_io_lock); 1759 break; 1760 } 1761 1762 mtx_unlock(&sc->mfi_io_lock); 1763 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1764 BUS_DMASYNC_POSTREAD); 1765 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1766 1767 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) { 1768 mtx_lock(&sc->mfi_io_lock); 1769 mfi_release_command(cm); 1770 mtx_unlock(&sc->mfi_io_lock); 1771 break; 1772 } 1773 if (dcmd->header.cmd_status != MFI_STAT_OK) { 1774 device_printf(sc->mfi_dev, 1775 "Error %d fetching controller entries\n", 1776 dcmd->header.cmd_status); 1777 mtx_lock(&sc->mfi_io_lock); 1778 mfi_release_command(cm); 1779 mtx_unlock(&sc->mfi_io_lock); 1780 break; 1781 } 1782 mtx_lock(&sc->mfi_io_lock); 1783 mfi_release_command(cm); 1784 mtx_unlock(&sc->mfi_io_lock); 1785 1786 for (i = 0; i < el->count; i++) { 1787 /* 1788 * If this event is newer than 'stop_seq' then 1789 * break out of the loop. Note that the log 1790 * is a circular buffer so we have to handle 1791 * the case that our stop point is earlier in 1792 * the buffer than our start point. 1793 */ 1794 if (el->event[i].seq >= stop_seq) { 1795 if (start_seq <= stop_seq) 1796 break; 1797 else if (el->event[i].seq < start_seq) 1798 break; 1799 } 1800 mtx_lock(&sc->mfi_io_lock); 1801 mfi_queue_evt(sc, &el->event[i]); 1802 mtx_unlock(&sc->mfi_io_lock); 1803 } 1804 seq = el->event[el->count - 1].seq + 1; 1805 } 1806 1807 free(el, M_MFIBUF); 1808 return (0); 1809 } 1810 1811 static int 1812 mfi_add_ld(struct mfi_softc *sc, int id) 1813 { 1814 struct mfi_command *cm; 1815 struct mfi_dcmd_frame *dcmd = NULL; 1816 struct mfi_ld_info *ld_info = NULL; 1817 int error; 1818 1819 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1820 1821 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO, 1822 (void **)&ld_info, sizeof(*ld_info)); 1823 if (error) { 1824 device_printf(sc->mfi_dev, 1825 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error); 1826 if (ld_info) 1827 free(ld_info, M_MFIBUF); 1828 return (error); 1829 } 1830 cm->cm_flags = MFI_CMD_DATAIN; 1831 dcmd = &cm->cm_frame->dcmd; 1832 dcmd->mbox[0] = id; 1833 if (mfi_wait_command(sc, cm) != 0) { 1834 device_printf(sc->mfi_dev, 1835 "Failed to get logical drive: %d\n", id); 1836 free(ld_info, M_MFIBUF); 1837 return (0); 1838 } 1839 if (ld_info->ld_config.params.isSSCD != 1) 1840 mfi_add_ld_complete(cm); 1841 else { 1842 mfi_release_command(cm); 1843 if (ld_info) /* SSCD drives ld_info free here */ 1844 free(ld_info, M_MFIBUF); 1845 } 1846 return (0); 1847 } 1848 1849 static void 1850 mfi_add_ld_complete(struct mfi_command *cm) 1851 { 1852 struct mfi_frame_header *hdr; 1853 struct mfi_ld_info *ld_info; 1854 struct mfi_softc *sc; 1855 device_t child; 1856 1857 sc = cm->cm_sc; 1858 hdr = &cm->cm_frame->header; 1859 ld_info = cm->cm_private; 1860 1861 if (hdr->cmd_status != MFI_STAT_OK) { 1862 free(ld_info, M_MFIBUF); 1863 mfi_release_command(cm); 1864 return; 1865 } 1866 mfi_release_command(cm); 1867 1868 mtx_unlock(&sc->mfi_io_lock); 1869 mtx_lock(&Giant); 1870 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) { 1871 device_printf(sc->mfi_dev, "Failed to add logical disk\n"); 1872 free(ld_info, M_MFIBUF); 1873 mtx_unlock(&Giant); 1874 mtx_lock(&sc->mfi_io_lock); 1875 return; 1876 } 1877 1878 device_set_ivars(child, ld_info); 1879 device_set_desc(child, "MFI Logical Disk"); 1880 bus_generic_attach(sc->mfi_dev); 1881 mtx_unlock(&Giant); 1882 mtx_lock(&sc->mfi_io_lock); 1883 } 1884 1885 static int mfi_add_sys_pd(struct mfi_softc *sc, int id) 1886 { 1887 struct mfi_command *cm; 1888 struct mfi_dcmd_frame *dcmd = NULL; 1889 struct mfi_pd_info *pd_info = NULL; 1890 int error; 1891 1892 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1893 1894 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO, 1895 (void **)&pd_info, sizeof(*pd_info)); 1896 if (error) { 1897 device_printf(sc->mfi_dev, 1898 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n", 1899 error); 1900 if (pd_info) 1901 free(pd_info, M_MFIBUF); 1902 return (error); 1903 } 1904 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1905 dcmd = &cm->cm_frame->dcmd; 1906 dcmd->mbox[0]=id; 1907 dcmd->header.scsi_status = 0; 1908 dcmd->header.pad0 = 0; 1909 if (mfi_mapcmd(sc, cm) != 0) { 1910 device_printf(sc->mfi_dev, 1911 "Failed to get physical drive info %d\n", id); 1912 free(pd_info, M_MFIBUF); 1913 return (0); 1914 } 1915 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1916 BUS_DMASYNC_POSTREAD); 1917 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1918 mfi_add_sys_pd_complete(cm); 1919 return (0); 1920 } 1921 1922 static void 1923 mfi_add_sys_pd_complete(struct mfi_command *cm) 1924 { 1925 struct mfi_frame_header *hdr; 1926 struct mfi_pd_info *pd_info; 1927 struct mfi_softc *sc; 1928 device_t child; 1929 1930 sc = cm->cm_sc; 1931 hdr = &cm->cm_frame->header; 1932 pd_info = cm->cm_private; 1933 1934 if (hdr->cmd_status != MFI_STAT_OK) { 1935 free(pd_info, M_MFIBUF); 1936 mfi_release_command(cm); 1937 return; 1938 } 1939 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) { 1940 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n", 1941 pd_info->ref.v.device_id); 1942 free(pd_info, M_MFIBUF); 1943 mfi_release_command(cm); 1944 return; 1945 } 1946 mfi_release_command(cm); 1947 1948 mtx_unlock(&sc->mfi_io_lock); 1949 mtx_lock(&Giant); 1950 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) { 1951 device_printf(sc->mfi_dev, "Failed to add system pd\n"); 1952 free(pd_info, M_MFIBUF); 1953 mtx_unlock(&Giant); 1954 mtx_lock(&sc->mfi_io_lock); 1955 return; 1956 } 1957 1958 device_set_ivars(child, pd_info); 1959 device_set_desc(child, "MFI System PD"); 1960 bus_generic_attach(sc->mfi_dev); 1961 mtx_unlock(&Giant); 1962 mtx_lock(&sc->mfi_io_lock); 1963 } 1964 1965 static struct mfi_command * 1966 mfi_bio_command(struct mfi_softc *sc) 1967 { 1968 struct bio *bio; 1969 struct mfi_command *cm = NULL; 1970 1971 /*reserving two commands to avoid starvation for IOCTL*/ 1972 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) { 1973 return (NULL); 1974 } 1975 if ((bio = mfi_dequeue_bio(sc)) == NULL) { 1976 return (NULL); 1977 } 1978 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) { 1979 cm = mfi_build_ldio(sc, bio); 1980 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) { 1981 cm = mfi_build_syspdio(sc, bio); 1982 } 1983 if (!cm) 1984 mfi_enqueue_bio(sc, bio); 1985 return cm; 1986 } 1987 1988 static int 1989 mfi_build_syspd_cdb(struct mfi_pass_frame *pass, uint32_t block_count, 1990 uint64_t lba, uint8_t byte2, int readop) 1991 { 1992 int cdb_len; 1993 1994 if (((lba & 0x1fffff) == lba) 1995 && ((block_count & 0xff) == block_count) 1996 && (byte2 == 0)) { 1997 /* We can fit in a 6 byte cdb */ 1998 struct scsi_rw_6 *scsi_cmd; 1999 2000 scsi_cmd = (struct scsi_rw_6 *)&pass->cdb; 2001 scsi_cmd->opcode = readop ? READ_6 : WRITE_6; 2002 scsi_ulto3b(lba, scsi_cmd->addr); 2003 scsi_cmd->length = block_count & 0xff; 2004 scsi_cmd->control = 0; 2005 cdb_len = sizeof(*scsi_cmd); 2006 } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) { 2007 /* Need a 10 byte CDB */ 2008 struct scsi_rw_10 *scsi_cmd; 2009 2010 scsi_cmd = (struct scsi_rw_10 *)&pass->cdb; 2011 scsi_cmd->opcode = readop ? READ_10 : WRITE_10; 2012 scsi_cmd->byte2 = byte2; 2013 scsi_ulto4b(lba, scsi_cmd->addr); 2014 scsi_cmd->reserved = 0; 2015 scsi_ulto2b(block_count, scsi_cmd->length); 2016 scsi_cmd->control = 0; 2017 cdb_len = sizeof(*scsi_cmd); 2018 } else if (((block_count & 0xffffffff) == block_count) && 2019 ((lba & 0xffffffff) == lba)) { 2020 /* Block count is too big for 10 byte CDB use a 12 byte CDB */ 2021 struct scsi_rw_12 *scsi_cmd; 2022 2023 scsi_cmd = (struct scsi_rw_12 *)&pass->cdb; 2024 scsi_cmd->opcode = readop ? READ_12 : WRITE_12; 2025 scsi_cmd->byte2 = byte2; 2026 scsi_ulto4b(lba, scsi_cmd->addr); 2027 scsi_cmd->reserved = 0; 2028 scsi_ulto4b(block_count, scsi_cmd->length); 2029 scsi_cmd->control = 0; 2030 cdb_len = sizeof(*scsi_cmd); 2031 } else { 2032 /* 2033 * 16 byte CDB. We'll only get here if the LBA is larger 2034 * than 2^32 2035 */ 2036 struct scsi_rw_16 *scsi_cmd; 2037 2038 scsi_cmd = (struct scsi_rw_16 *)&pass->cdb; 2039 scsi_cmd->opcode = readop ? READ_16 : WRITE_16; 2040 scsi_cmd->byte2 = byte2; 2041 scsi_u64to8b(lba, scsi_cmd->addr); 2042 scsi_cmd->reserved = 0; 2043 scsi_ulto4b(block_count, scsi_cmd->length); 2044 scsi_cmd->control = 0; 2045 cdb_len = sizeof(*scsi_cmd); 2046 } 2047 2048 return cdb_len; 2049 } 2050 2051 static struct mfi_command * 2052 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio) 2053 { 2054 struct mfi_command *cm; 2055 struct mfi_pass_frame *pass; 2056 int flags = 0; 2057 uint8_t cdb_len; 2058 uint32_t block_count, context = 0; 2059 2060 if ((cm = mfi_dequeue_free(sc)) == NULL) 2061 return (NULL); 2062 2063 /* Zero out the MFI frame */ 2064 context = cm->cm_frame->header.context; 2065 bzero(cm->cm_frame, sizeof(union mfi_frame)); 2066 cm->cm_frame->header.context = context; 2067 pass = &cm->cm_frame->pass; 2068 bzero(pass->cdb, 16); 2069 pass->header.cmd = MFI_CMD_PD_SCSI_IO; 2070 switch (bio->bio_cmd & 0x03) { 2071 case BIO_READ: 2072 flags = MFI_CMD_DATAIN; 2073 break; 2074 case BIO_WRITE: 2075 flags = MFI_CMD_DATAOUT; 2076 break; 2077 default: 2078 /* TODO: what about BIO_DELETE??? */ 2079 panic("Unsupported bio command"); 2080 } 2081 2082 /* Cheat with the sector length to avoid a non-constant division */ 2083 block_count = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 2084 /* Fill the LBA and Transfer length in CDB */ 2085 cdb_len = mfi_build_syspd_cdb(pass, block_count, bio->bio_pblkno, 0, 2086 flags == MFI_CMD_DATAIN); 2087 2088 pass->header.target_id = (uintptr_t)bio->bio_driver1; 2089 pass->header.timeout = 0; 2090 pass->header.flags = 0; 2091 pass->header.scsi_status = 0; 2092 pass->header.sense_len = MFI_SENSE_LEN; 2093 pass->header.data_len = bio->bio_bcount; 2094 pass->header.cdb_len = cdb_len; 2095 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; 2096 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 2097 cm->cm_complete = mfi_bio_complete; 2098 cm->cm_private = bio; 2099 cm->cm_data = bio->bio_data; 2100 cm->cm_len = bio->bio_bcount; 2101 cm->cm_sg = &pass->sgl; 2102 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; 2103 cm->cm_flags = flags; 2104 return (cm); 2105 } 2106 2107 static struct mfi_command * 2108 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio) 2109 { 2110 struct mfi_io_frame *io; 2111 struct mfi_command *cm; 2112 int flags; 2113 uint32_t blkcount; 2114 uint32_t context = 0; 2115 2116 if ((cm = mfi_dequeue_free(sc)) == NULL) 2117 return (NULL); 2118 2119 /* Zero out the MFI frame */ 2120 context = cm->cm_frame->header.context; 2121 bzero(cm->cm_frame, sizeof(union mfi_frame)); 2122 cm->cm_frame->header.context = context; 2123 io = &cm->cm_frame->io; 2124 switch (bio->bio_cmd & 0x03) { 2125 case BIO_READ: 2126 io->header.cmd = MFI_CMD_LD_READ; 2127 flags = MFI_CMD_DATAIN; 2128 break; 2129 case BIO_WRITE: 2130 io->header.cmd = MFI_CMD_LD_WRITE; 2131 flags = MFI_CMD_DATAOUT; 2132 break; 2133 default: 2134 /* TODO: what about BIO_DELETE??? */ 2135 panic("Unsupported bio command"); 2136 } 2137 2138 /* Cheat with the sector length to avoid a non-constant division */ 2139 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 2140 io->header.target_id = (uintptr_t)bio->bio_driver1; 2141 io->header.timeout = 0; 2142 io->header.flags = 0; 2143 io->header.scsi_status = 0; 2144 io->header.sense_len = MFI_SENSE_LEN; 2145 io->header.data_len = blkcount; 2146 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; 2147 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 2148 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32; 2149 io->lba_lo = bio->bio_pblkno & 0xffffffff; 2150 cm->cm_complete = mfi_bio_complete; 2151 cm->cm_private = bio; 2152 cm->cm_data = bio->bio_data; 2153 cm->cm_len = bio->bio_bcount; 2154 cm->cm_sg = &io->sgl; 2155 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; 2156 cm->cm_flags = flags; 2157 return (cm); 2158 } 2159 2160 static void 2161 mfi_bio_complete(struct mfi_command *cm) 2162 { 2163 struct bio *bio; 2164 struct mfi_frame_header *hdr; 2165 struct mfi_softc *sc; 2166 2167 bio = cm->cm_private; 2168 hdr = &cm->cm_frame->header; 2169 sc = cm->cm_sc; 2170 2171 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) { 2172 bio->bio_flags |= BIO_ERROR; 2173 bio->bio_error = EIO; 2174 device_printf(sc->mfi_dev, "I/O error, status= %d " 2175 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status); 2176 mfi_print_sense(cm->cm_sc, cm->cm_sense); 2177 } else if (cm->cm_error != 0) { 2178 bio->bio_flags |= BIO_ERROR; 2179 } 2180 2181 mfi_release_command(cm); 2182 mfi_disk_complete(bio); 2183 } 2184 2185 void 2186 mfi_startio(struct mfi_softc *sc) 2187 { 2188 struct mfi_command *cm; 2189 struct ccb_hdr *ccbh; 2190 2191 for (;;) { 2192 /* Don't bother if we're short on resources */ 2193 if (sc->mfi_flags & MFI_FLAGS_QFRZN) 2194 break; 2195 2196 /* Try a command that has already been prepared */ 2197 cm = mfi_dequeue_ready(sc); 2198 2199 if (cm == NULL) { 2200 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL) 2201 cm = sc->mfi_cam_start(ccbh); 2202 } 2203 2204 /* Nope, so look for work on the bioq */ 2205 if (cm == NULL) 2206 cm = mfi_bio_command(sc); 2207 2208 /* No work available, so exit */ 2209 if (cm == NULL) 2210 break; 2211 2212 /* Send the command to the controller */ 2213 if (mfi_mapcmd(sc, cm) != 0) { 2214 mfi_requeue_ready(cm); 2215 break; 2216 } 2217 } 2218 } 2219 2220 int 2221 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm) 2222 { 2223 int error, polled; 2224 2225 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 2226 2227 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) { 2228 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0; 2229 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap, 2230 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled); 2231 if (error == EINPROGRESS) { 2232 sc->mfi_flags |= MFI_FLAGS_QFRZN; 2233 return (0); 2234 } 2235 } else { 2236 if (sc->MFA_enabled) 2237 error = mfi_tbolt_send_frame(sc, cm); 2238 else 2239 error = mfi_send_frame(sc, cm); 2240 } 2241 2242 return (error); 2243 } 2244 2245 static void 2246 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2247 { 2248 struct mfi_frame_header *hdr; 2249 struct mfi_command *cm; 2250 union mfi_sgl *sgl; 2251 struct mfi_softc *sc; 2252 int i, j, first, dir; 2253 int sge_size; 2254 2255 cm = (struct mfi_command *)arg; 2256 sc = cm->cm_sc; 2257 hdr = &cm->cm_frame->header; 2258 sgl = cm->cm_sg; 2259 2260 if (error) { 2261 printf("error %d in callback\n", error); 2262 cm->cm_error = error; 2263 mfi_complete(sc, cm); 2264 return; 2265 } 2266 /* Use IEEE sgl only for IO's on a SKINNY controller 2267 * For other commands on a SKINNY controller use either 2268 * sg32 or sg64 based on the sizeof(bus_addr_t). 2269 * Also calculate the total frame size based on the type 2270 * of SGL used. 2271 */ 2272 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) || 2273 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) || 2274 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) && 2275 (sc->mfi_flags & MFI_FLAGS_SKINNY)) { 2276 for (i = 0; i < nsegs; i++) { 2277 sgl->sg_skinny[i].addr = segs[i].ds_addr; 2278 sgl->sg_skinny[i].len = segs[i].ds_len; 2279 sgl->sg_skinny[i].flag = 0; 2280 } 2281 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64; 2282 sge_size = sizeof(struct mfi_sg_skinny); 2283 hdr->sg_count = nsegs; 2284 } else { 2285 j = 0; 2286 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 2287 first = cm->cm_stp_len; 2288 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { 2289 sgl->sg32[j].addr = segs[0].ds_addr; 2290 sgl->sg32[j++].len = first; 2291 } else { 2292 sgl->sg64[j].addr = segs[0].ds_addr; 2293 sgl->sg64[j++].len = first; 2294 } 2295 } else 2296 first = 0; 2297 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { 2298 for (i = 0; i < nsegs; i++) { 2299 sgl->sg32[j].addr = segs[i].ds_addr + first; 2300 sgl->sg32[j++].len = segs[i].ds_len - first; 2301 first = 0; 2302 } 2303 } else { 2304 for (i = 0; i < nsegs; i++) { 2305 sgl->sg64[j].addr = segs[i].ds_addr + first; 2306 sgl->sg64[j++].len = segs[i].ds_len - first; 2307 first = 0; 2308 } 2309 hdr->flags |= MFI_FRAME_SGL64; 2310 } 2311 hdr->sg_count = j; 2312 sge_size = sc->mfi_sge_size; 2313 } 2314 2315 dir = 0; 2316 if (cm->cm_flags & MFI_CMD_DATAIN) { 2317 dir |= BUS_DMASYNC_PREREAD; 2318 hdr->flags |= MFI_FRAME_DIR_READ; 2319 } 2320 if (cm->cm_flags & MFI_CMD_DATAOUT) { 2321 dir |= BUS_DMASYNC_PREWRITE; 2322 hdr->flags |= MFI_FRAME_DIR_WRITE; 2323 } 2324 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); 2325 cm->cm_flags |= MFI_CMD_MAPPED; 2326 2327 /* 2328 * Instead of calculating the total number of frames in the 2329 * compound frame, it's already assumed that there will be at 2330 * least 1 frame, so don't compensate for the modulo of the 2331 * following division. 2332 */ 2333 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs); 2334 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE; 2335 2336 if (sc->MFA_enabled) 2337 mfi_tbolt_send_frame(sc, cm); 2338 else 2339 mfi_send_frame(sc, cm); 2340 2341 return; 2342 } 2343 2344 static int 2345 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm) 2346 { 2347 struct mfi_frame_header *hdr; 2348 int tm = MFI_POLL_TIMEOUT_SECS * 1000; 2349 2350 hdr = &cm->cm_frame->header; 2351 2352 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) { 2353 cm->cm_timestamp = time_uptime; 2354 mfi_enqueue_busy(cm); 2355 } else { 2356 hdr->cmd_status = MFI_STAT_INVALID_STATUS; 2357 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2358 } 2359 2360 /* 2361 * The bus address of the command is aligned on a 64 byte boundary, 2362 * leaving the least 6 bits as zero. For whatever reason, the 2363 * hardware wants the address shifted right by three, leaving just 2364 * 3 zero bits. These three bits are then used as a prefetching 2365 * hint for the hardware to predict how many frames need to be 2366 * fetched across the bus. If a command has more than 8 frames 2367 * then the 3 bits are set to 0x7 and the firmware uses other 2368 * information in the command to determine the total amount to fetch. 2369 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames 2370 * is enough for both 32bit and 64bit systems. 2371 */ 2372 if (cm->cm_extra_frames > 7) 2373 cm->cm_extra_frames = 7; 2374 2375 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames); 2376 2377 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) 2378 return (0); 2379 2380 /* This is a polled command, so busy-wait for it to complete. */ 2381 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 2382 DELAY(1000); 2383 tm -= 1; 2384 if (tm <= 0) 2385 break; 2386 } 2387 2388 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 2389 device_printf(sc->mfi_dev, "Frame %p timed out " 2390 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode); 2391 return (ETIMEDOUT); 2392 } 2393 2394 return (0); 2395 } 2396 2397 2398 void 2399 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm) 2400 { 2401 int dir; 2402 2403 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) { 2404 dir = 0; 2405 if ((cm->cm_flags & MFI_CMD_DATAIN) || 2406 (cm->cm_frame->header.cmd == MFI_CMD_STP)) 2407 dir |= BUS_DMASYNC_POSTREAD; 2408 if (cm->cm_flags & MFI_CMD_DATAOUT) 2409 dir |= BUS_DMASYNC_POSTWRITE; 2410 2411 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); 2412 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 2413 cm->cm_flags &= ~MFI_CMD_MAPPED; 2414 } 2415 2416 cm->cm_flags |= MFI_CMD_COMPLETED; 2417 2418 if (cm->cm_complete != NULL) 2419 cm->cm_complete(cm); 2420 else 2421 wakeup(cm); 2422 } 2423 2424 static int 2425 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort) 2426 { 2427 struct mfi_command *cm; 2428 struct mfi_abort_frame *abort; 2429 int i = 0; 2430 uint32_t context = 0; 2431 2432 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 2433 2434 if ((cm = mfi_dequeue_free(sc)) == NULL) { 2435 return (EBUSY); 2436 } 2437 2438 /* Zero out the MFI frame */ 2439 context = cm->cm_frame->header.context; 2440 bzero(cm->cm_frame, sizeof(union mfi_frame)); 2441 cm->cm_frame->header.context = context; 2442 2443 abort = &cm->cm_frame->abort; 2444 abort->header.cmd = MFI_CMD_ABORT; 2445 abort->header.flags = 0; 2446 abort->header.scsi_status = 0; 2447 abort->abort_context = cm_abort->cm_frame->header.context; 2448 abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr; 2449 abort->abort_mfi_addr_hi = 2450 (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32); 2451 cm->cm_data = NULL; 2452 cm->cm_flags = MFI_CMD_POLLED; 2453 2454 if (sc->mfi_aen_cm) 2455 sc->cm_aen_abort = 1; 2456 if (sc->mfi_map_sync_cm) 2457 sc->cm_map_abort = 1; 2458 mfi_mapcmd(sc, cm); 2459 mfi_release_command(cm); 2460 2461 while (i < 5 && sc->mfi_aen_cm != NULL) { 2462 msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 2463 5 * hz); 2464 i++; 2465 } 2466 while (i < 5 && sc->mfi_map_sync_cm != NULL) { 2467 msleep(&sc->mfi_map_sync_cm, &sc->mfi_io_lock, 0, "mfiabort", 2468 5 * hz); 2469 i++; 2470 } 2471 2472 return (0); 2473 } 2474 2475 int 2476 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, 2477 int len) 2478 { 2479 struct mfi_command *cm; 2480 struct mfi_io_frame *io; 2481 int error; 2482 uint32_t context = 0; 2483 2484 if ((cm = mfi_dequeue_free(sc)) == NULL) 2485 return (EBUSY); 2486 2487 /* Zero out the MFI frame */ 2488 context = cm->cm_frame->header.context; 2489 bzero(cm->cm_frame, sizeof(union mfi_frame)); 2490 cm->cm_frame->header.context = context; 2491 2492 io = &cm->cm_frame->io; 2493 io->header.cmd = MFI_CMD_LD_WRITE; 2494 io->header.target_id = id; 2495 io->header.timeout = 0; 2496 io->header.flags = 0; 2497 io->header.scsi_status = 0; 2498 io->header.sense_len = MFI_SENSE_LEN; 2499 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 2500 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; 2501 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 2502 io->lba_hi = (lba & 0xffffffff00000000) >> 32; 2503 io->lba_lo = lba & 0xffffffff; 2504 cm->cm_data = virt; 2505 cm->cm_len = len; 2506 cm->cm_sg = &io->sgl; 2507 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; 2508 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT; 2509 2510 error = mfi_mapcmd(sc, cm); 2511 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 2512 BUS_DMASYNC_POSTWRITE); 2513 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 2514 mfi_release_command(cm); 2515 2516 return (error); 2517 } 2518 2519 int 2520 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, 2521 int len) 2522 { 2523 struct mfi_command *cm; 2524 struct mfi_pass_frame *pass; 2525 int error; 2526 uint32_t blkcount; 2527 2528 if ((cm = mfi_dequeue_free(sc)) == NULL) 2529 return (EBUSY); 2530 2531 pass = &cm->cm_frame->pass; 2532 bzero(pass->cdb, 16); 2533 pass->header.cmd = MFI_CMD_PD_SCSI_IO; 2534 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 2535 pass->header.target_id = id; 2536 pass->header.timeout = 0; 2537 pass->header.flags = 0; 2538 pass->header.scsi_status = 0; 2539 pass->header.sense_len = MFI_SENSE_LEN; 2540 pass->header.data_len = len; 2541 pass->header.cdb_len = mfi_build_syspd_cdb(pass, blkcount, lba, 0, 0); 2542 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; 2543 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 2544 cm->cm_data = virt; 2545 cm->cm_len = len; 2546 cm->cm_sg = &pass->sgl; 2547 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; 2548 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT; 2549 2550 error = mfi_mapcmd(sc, cm); 2551 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 2552 BUS_DMASYNC_POSTWRITE); 2553 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 2554 mfi_release_command(cm); 2555 2556 return (error); 2557 } 2558 2559 static int 2560 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2561 { 2562 struct mfi_softc *sc; 2563 int error; 2564 2565 sc = dev->si_drv1; 2566 2567 mtx_lock(&sc->mfi_io_lock); 2568 if (sc->mfi_detaching) 2569 error = ENXIO; 2570 else { 2571 sc->mfi_flags |= MFI_FLAGS_OPEN; 2572 error = 0; 2573 } 2574 mtx_unlock(&sc->mfi_io_lock); 2575 2576 return (error); 2577 } 2578 2579 static int 2580 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2581 { 2582 struct mfi_softc *sc; 2583 struct mfi_aen *mfi_aen_entry, *tmp; 2584 2585 sc = dev->si_drv1; 2586 2587 mtx_lock(&sc->mfi_io_lock); 2588 sc->mfi_flags &= ~MFI_FLAGS_OPEN; 2589 2590 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) { 2591 if (mfi_aen_entry->p == curproc) { 2592 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 2593 aen_link); 2594 free(mfi_aen_entry, M_MFIBUF); 2595 } 2596 } 2597 mtx_unlock(&sc->mfi_io_lock); 2598 return (0); 2599 } 2600 2601 static int 2602 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode) 2603 { 2604 2605 switch (opcode) { 2606 case MFI_DCMD_LD_DELETE: 2607 case MFI_DCMD_CFG_ADD: 2608 case MFI_DCMD_CFG_CLEAR: 2609 case MFI_DCMD_CFG_FOREIGN_IMPORT: 2610 sx_xlock(&sc->mfi_config_lock); 2611 return (1); 2612 default: 2613 return (0); 2614 } 2615 } 2616 2617 static void 2618 mfi_config_unlock(struct mfi_softc *sc, int locked) 2619 { 2620 2621 if (locked) 2622 sx_xunlock(&sc->mfi_config_lock); 2623 } 2624 2625 /* 2626 * Perform pre-issue checks on commands from userland and possibly veto 2627 * them. 2628 */ 2629 static int 2630 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm) 2631 { 2632 struct mfi_disk *ld, *ld2; 2633 int error; 2634 struct mfi_system_pd *syspd = NULL; 2635 uint16_t syspd_id; 2636 uint16_t *mbox; 2637 2638 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 2639 error = 0; 2640 switch (cm->cm_frame->dcmd.opcode) { 2641 case MFI_DCMD_LD_DELETE: 2642 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 2643 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) 2644 break; 2645 } 2646 if (ld == NULL) 2647 error = ENOENT; 2648 else 2649 error = mfi_disk_disable(ld); 2650 break; 2651 case MFI_DCMD_CFG_CLEAR: 2652 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 2653 error = mfi_disk_disable(ld); 2654 if (error) 2655 break; 2656 } 2657 if (error) { 2658 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) { 2659 if (ld2 == ld) 2660 break; 2661 mfi_disk_enable(ld2); 2662 } 2663 } 2664 break; 2665 case MFI_DCMD_PD_STATE_SET: 2666 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox; 2667 syspd_id = mbox[0]; 2668 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) { 2669 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) { 2670 if (syspd->pd_id == syspd_id) 2671 break; 2672 } 2673 } 2674 else 2675 break; 2676 if (syspd) 2677 error = mfi_syspd_disable(syspd); 2678 break; 2679 default: 2680 break; 2681 } 2682 return (error); 2683 } 2684 2685 /* Perform post-issue checks on commands from userland. */ 2686 static void 2687 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm) 2688 { 2689 struct mfi_disk *ld, *ldn; 2690 struct mfi_system_pd *syspd = NULL; 2691 uint16_t syspd_id; 2692 uint16_t *mbox; 2693 2694 switch (cm->cm_frame->dcmd.opcode) { 2695 case MFI_DCMD_LD_DELETE: 2696 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 2697 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) 2698 break; 2699 } 2700 KASSERT(ld != NULL, ("volume dissappeared")); 2701 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { 2702 mtx_unlock(&sc->mfi_io_lock); 2703 mtx_lock(&Giant); 2704 device_delete_child(sc->mfi_dev, ld->ld_dev); 2705 mtx_unlock(&Giant); 2706 mtx_lock(&sc->mfi_io_lock); 2707 } else 2708 mfi_disk_enable(ld); 2709 break; 2710 case MFI_DCMD_CFG_CLEAR: 2711 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { 2712 mtx_unlock(&sc->mfi_io_lock); 2713 mtx_lock(&Giant); 2714 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) { 2715 device_delete_child(sc->mfi_dev, ld->ld_dev); 2716 } 2717 mtx_unlock(&Giant); 2718 mtx_lock(&sc->mfi_io_lock); 2719 } else { 2720 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) 2721 mfi_disk_enable(ld); 2722 } 2723 break; 2724 case MFI_DCMD_CFG_ADD: 2725 mfi_ldprobe(sc); 2726 break; 2727 case MFI_DCMD_CFG_FOREIGN_IMPORT: 2728 mfi_ldprobe(sc); 2729 break; 2730 case MFI_DCMD_PD_STATE_SET: 2731 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox; 2732 syspd_id = mbox[0]; 2733 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) { 2734 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) { 2735 if (syspd->pd_id == syspd_id) 2736 break; 2737 } 2738 } 2739 else 2740 break; 2741 /* If the transition fails then enable the syspd again */ 2742 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK) 2743 mfi_syspd_enable(syspd); 2744 break; 2745 } 2746 } 2747 2748 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm) 2749 { 2750 struct mfi_config_data *conf_data=(struct mfi_config_data *)cm->cm_data; 2751 struct mfi_command *ld_cm = NULL; 2752 struct mfi_ld_info *ld_info = NULL; 2753 int error = 0; 2754 2755 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) && 2756 (conf_data->ld[0].params.isSSCD == 1)) { 2757 error = 1; 2758 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) { 2759 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO, 2760 (void **)&ld_info, sizeof(*ld_info)); 2761 if (error) { 2762 device_printf(sc->mfi_dev, "Failed to allocate" 2763 "MFI_DCMD_LD_GET_INFO %d", error); 2764 if (ld_info) 2765 free(ld_info, M_MFIBUF); 2766 return 0; 2767 } 2768 ld_cm->cm_flags = MFI_CMD_DATAIN; 2769 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0]; 2770 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0]; 2771 if (mfi_wait_command(sc, ld_cm) != 0) { 2772 device_printf(sc->mfi_dev, "failed to get log drv\n"); 2773 mfi_release_command(ld_cm); 2774 free(ld_info, M_MFIBUF); 2775 return 0; 2776 } 2777 2778 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) { 2779 free(ld_info, M_MFIBUF); 2780 mfi_release_command(ld_cm); 2781 return 0; 2782 } 2783 else 2784 ld_info = (struct mfi_ld_info *)ld_cm->cm_private; 2785 2786 if (ld_info->ld_config.params.isSSCD == 1) 2787 error = 1; 2788 2789 mfi_release_command(ld_cm); 2790 free(ld_info, M_MFIBUF); 2791 2792 } 2793 return error; 2794 } 2795 2796 static int 2797 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg) 2798 { 2799 uint8_t i; 2800 struct mfi_ioc_packet *ioc; 2801 ioc = (struct mfi_ioc_packet *)arg; 2802 int sge_size, error; 2803 struct megasas_sge *kern_sge; 2804 2805 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr)); 2806 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off); 2807 cm->cm_frame->header.sg_count = ioc->mfi_sge_count; 2808 2809 if (sizeof(bus_addr_t) == 8) { 2810 cm->cm_frame->header.flags |= MFI_FRAME_SGL64; 2811 cm->cm_extra_frames = 2; 2812 sge_size = sizeof(struct mfi_sg64); 2813 } else { 2814 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE; 2815 sge_size = sizeof(struct mfi_sg32); 2816 } 2817 2818 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count); 2819 for (i = 0; i < ioc->mfi_sge_count; i++) { 2820 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 2821 1, 0, /* algnmnt, boundary */ 2822 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 2823 BUS_SPACE_MAXADDR, /* highaddr */ 2824 NULL, NULL, /* filter, filterarg */ 2825 ioc->mfi_sgl[i].iov_len,/* maxsize */ 2826 2, /* nsegments */ 2827 ioc->mfi_sgl[i].iov_len,/* maxsegsize */ 2828 BUS_DMA_ALLOCNOW, /* flags */ 2829 NULL, NULL, /* lockfunc, lockarg */ 2830 &sc->mfi_kbuff_arr_dmat[i])) { 2831 device_printf(sc->mfi_dev, 2832 "Cannot allocate mfi_kbuff_arr_dmat tag\n"); 2833 return (ENOMEM); 2834 } 2835 2836 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i], 2837 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT, 2838 &sc->mfi_kbuff_arr_dmamap[i])) { 2839 device_printf(sc->mfi_dev, 2840 "Cannot allocate mfi_kbuff_arr_dmamap memory\n"); 2841 return (ENOMEM); 2842 } 2843 2844 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i], 2845 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i], 2846 ioc->mfi_sgl[i].iov_len, mfi_addr_cb, 2847 &sc->mfi_kbuff_arr_busaddr[i], 0); 2848 2849 if (!sc->kbuff_arr[i]) { 2850 device_printf(sc->mfi_dev, 2851 "Could not allocate memory for kbuff_arr info\n"); 2852 return -1; 2853 } 2854 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i]; 2855 kern_sge[i].length = ioc->mfi_sgl[i].iov_len; 2856 2857 if (sizeof(bus_addr_t) == 8) { 2858 cm->cm_frame->stp.sgl.sg64[i].addr = 2859 kern_sge[i].phys_addr; 2860 cm->cm_frame->stp.sgl.sg64[i].len = 2861 ioc->mfi_sgl[i].iov_len; 2862 } else { 2863 cm->cm_frame->stp.sgl.sg32[i].len = 2864 kern_sge[i].phys_addr; 2865 cm->cm_frame->stp.sgl.sg32[i].len = 2866 ioc->mfi_sgl[i].iov_len; 2867 } 2868 2869 error = copyin(ioc->mfi_sgl[i].iov_base, 2870 sc->kbuff_arr[i], 2871 ioc->mfi_sgl[i].iov_len); 2872 if (error != 0) { 2873 device_printf(sc->mfi_dev, "Copy in failed\n"); 2874 return error; 2875 } 2876 } 2877 2878 cm->cm_flags |=MFI_CMD_MAPPED; 2879 return 0; 2880 } 2881 2882 static int 2883 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc) 2884 { 2885 struct mfi_command *cm; 2886 struct mfi_dcmd_frame *dcmd; 2887 void *ioc_buf = NULL; 2888 uint32_t context; 2889 int error = 0, locked; 2890 2891 2892 if (ioc->buf_size > 0) { 2893 if (ioc->buf_size > 1024 * 1024) 2894 return (ENOMEM); 2895 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK); 2896 error = copyin(ioc->buf, ioc_buf, ioc->buf_size); 2897 if (error) { 2898 device_printf(sc->mfi_dev, "failed to copyin\n"); 2899 free(ioc_buf, M_MFIBUF); 2900 return (error); 2901 } 2902 } 2903 2904 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode); 2905 2906 mtx_lock(&sc->mfi_io_lock); 2907 while ((cm = mfi_dequeue_free(sc)) == NULL) 2908 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz); 2909 2910 /* Save context for later */ 2911 context = cm->cm_frame->header.context; 2912 2913 dcmd = &cm->cm_frame->dcmd; 2914 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame)); 2915 2916 cm->cm_sg = &dcmd->sgl; 2917 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 2918 cm->cm_data = ioc_buf; 2919 cm->cm_len = ioc->buf_size; 2920 2921 /* restore context */ 2922 cm->cm_frame->header.context = context; 2923 2924 /* Cheat since we don't know if we're writing or reading */ 2925 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT; 2926 2927 error = mfi_check_command_pre(sc, cm); 2928 if (error) 2929 goto out; 2930 2931 error = mfi_wait_command(sc, cm); 2932 if (error) { 2933 device_printf(sc->mfi_dev, "ioctl failed %d\n", error); 2934 goto out; 2935 } 2936 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame)); 2937 mfi_check_command_post(sc, cm); 2938 out: 2939 mfi_release_command(cm); 2940 mtx_unlock(&sc->mfi_io_lock); 2941 mfi_config_unlock(sc, locked); 2942 if (ioc->buf_size > 0) 2943 error = copyout(ioc_buf, ioc->buf, ioc->buf_size); 2944 if (ioc_buf) 2945 free(ioc_buf, M_MFIBUF); 2946 return (error); 2947 } 2948 2949 #define PTRIN(p) ((void *)(uintptr_t)(p)) 2950 2951 static int 2952 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 2953 { 2954 struct mfi_softc *sc; 2955 union mfi_statrequest *ms; 2956 struct mfi_ioc_packet *ioc; 2957 #ifdef COMPAT_FREEBSD32 2958 struct mfi_ioc_packet32 *ioc32; 2959 #endif 2960 struct mfi_ioc_aen *aen; 2961 struct mfi_command *cm = NULL; 2962 uint32_t context = 0; 2963 union mfi_sense_ptr sense_ptr; 2964 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0; 2965 size_t len; 2966 int i, res; 2967 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg; 2968 #ifdef COMPAT_FREEBSD32 2969 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg; 2970 struct mfi_ioc_passthru iop_swab; 2971 #endif 2972 int error, locked; 2973 union mfi_sgl *sgl; 2974 sc = dev->si_drv1; 2975 error = 0; 2976 2977 if (sc->adpreset) 2978 return EBUSY; 2979 2980 if (sc->hw_crit_error) 2981 return EBUSY; 2982 2983 if (sc->issuepend_done == 0) 2984 return EBUSY; 2985 2986 switch (cmd) { 2987 case MFIIO_STATS: 2988 ms = (union mfi_statrequest *)arg; 2989 switch (ms->ms_item) { 2990 case MFIQ_FREE: 2991 case MFIQ_BIO: 2992 case MFIQ_READY: 2993 case MFIQ_BUSY: 2994 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat, 2995 sizeof(struct mfi_qstat)); 2996 break; 2997 default: 2998 error = ENOIOCTL; 2999 break; 3000 } 3001 break; 3002 case MFIIO_QUERY_DISK: 3003 { 3004 struct mfi_query_disk *qd; 3005 struct mfi_disk *ld; 3006 3007 qd = (struct mfi_query_disk *)arg; 3008 mtx_lock(&sc->mfi_io_lock); 3009 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 3010 if (ld->ld_id == qd->array_id) 3011 break; 3012 } 3013 if (ld == NULL) { 3014 qd->present = 0; 3015 mtx_unlock(&sc->mfi_io_lock); 3016 return (0); 3017 } 3018 qd->present = 1; 3019 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN) 3020 qd->open = 1; 3021 bzero(qd->devname, SPECNAMELEN + 1); 3022 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit); 3023 mtx_unlock(&sc->mfi_io_lock); 3024 break; 3025 } 3026 case MFI_CMD: 3027 #ifdef COMPAT_FREEBSD32 3028 case MFI_CMD32: 3029 #endif 3030 { 3031 devclass_t devclass; 3032 ioc = (struct mfi_ioc_packet *)arg; 3033 int adapter; 3034 3035 adapter = ioc->mfi_adapter_no; 3036 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) { 3037 devclass = devclass_find("mfi"); 3038 sc = devclass_get_softc(devclass, adapter); 3039 } 3040 mtx_lock(&sc->mfi_io_lock); 3041 if ((cm = mfi_dequeue_free(sc)) == NULL) { 3042 mtx_unlock(&sc->mfi_io_lock); 3043 return (EBUSY); 3044 } 3045 mtx_unlock(&sc->mfi_io_lock); 3046 locked = 0; 3047 3048 /* 3049 * save off original context since copying from user 3050 * will clobber some data 3051 */ 3052 context = cm->cm_frame->header.context; 3053 cm->cm_frame->header.context = cm->cm_index; 3054 3055 bcopy(ioc->mfi_frame.raw, cm->cm_frame, 3056 2 * MEGAMFI_FRAME_SIZE); 3057 cm->cm_total_frame_size = (sizeof(union mfi_sgl) 3058 * ioc->mfi_sge_count) + ioc->mfi_sgl_off; 3059 cm->cm_frame->header.scsi_status = 0; 3060 cm->cm_frame->header.pad0 = 0; 3061 if (ioc->mfi_sge_count) { 3062 cm->cm_sg = 3063 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off]; 3064 } 3065 sgl = cm->cm_sg; 3066 cm->cm_flags = 0; 3067 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) 3068 cm->cm_flags |= MFI_CMD_DATAIN; 3069 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) 3070 cm->cm_flags |= MFI_CMD_DATAOUT; 3071 /* Legacy app shim */ 3072 if (cm->cm_flags == 0) 3073 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT; 3074 cm->cm_len = cm->cm_frame->header.data_len; 3075 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 3076 #ifdef COMPAT_FREEBSD32 3077 if (cmd == MFI_CMD) { 3078 #endif 3079 /* Native */ 3080 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len; 3081 #ifdef COMPAT_FREEBSD32 3082 } else { 3083 /* 32bit on 64bit */ 3084 ioc32 = (struct mfi_ioc_packet32 *)ioc; 3085 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len; 3086 } 3087 #endif 3088 cm->cm_len += cm->cm_stp_len; 3089 } 3090 if (cm->cm_len && 3091 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) { 3092 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, 3093 M_WAITOK | M_ZERO); 3094 if (cm->cm_data == NULL) { 3095 device_printf(sc->mfi_dev, "Malloc failed\n"); 3096 goto out; 3097 } 3098 } else { 3099 cm->cm_data = 0; 3100 } 3101 3102 /* restore header context */ 3103 cm->cm_frame->header.context = context; 3104 3105 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 3106 res = mfi_stp_cmd(sc, cm, arg); 3107 if (res != 0) 3108 goto out; 3109 } else { 3110 temp = data; 3111 if ((cm->cm_flags & MFI_CMD_DATAOUT) || 3112 (cm->cm_frame->header.cmd == MFI_CMD_STP)) { 3113 for (i = 0; i < ioc->mfi_sge_count; i++) { 3114 #ifdef COMPAT_FREEBSD32 3115 if (cmd == MFI_CMD) { 3116 #endif 3117 /* Native */ 3118 addr = ioc->mfi_sgl[i].iov_base; 3119 len = ioc->mfi_sgl[i].iov_len; 3120 #ifdef COMPAT_FREEBSD32 3121 } else { 3122 /* 32bit on 64bit */ 3123 ioc32 = (struct mfi_ioc_packet32 *)ioc; 3124 addr = PTRIN(ioc32->mfi_sgl[i].iov_base); 3125 len = ioc32->mfi_sgl[i].iov_len; 3126 } 3127 #endif 3128 error = copyin(addr, temp, len); 3129 if (error != 0) { 3130 device_printf(sc->mfi_dev, 3131 "Copy in failed\n"); 3132 goto out; 3133 } 3134 temp = &temp[len]; 3135 } 3136 } 3137 } 3138 3139 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) 3140 locked = mfi_config_lock(sc, 3141 cm->cm_frame->dcmd.opcode); 3142 3143 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) { 3144 cm->cm_frame->pass.sense_addr_lo = 3145 (uint32_t)cm->cm_sense_busaddr; 3146 cm->cm_frame->pass.sense_addr_hi = 3147 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 3148 } 3149 mtx_lock(&sc->mfi_io_lock); 3150 skip_pre_post = mfi_check_for_sscd (sc, cm); 3151 if (!skip_pre_post) { 3152 error = mfi_check_command_pre(sc, cm); 3153 if (error) { 3154 mtx_unlock(&sc->mfi_io_lock); 3155 goto out; 3156 } 3157 } 3158 if ((error = mfi_wait_command(sc, cm)) != 0) { 3159 device_printf(sc->mfi_dev, 3160 "Controller polled failed\n"); 3161 mtx_unlock(&sc->mfi_io_lock); 3162 goto out; 3163 } 3164 if (!skip_pre_post) { 3165 mfi_check_command_post(sc, cm); 3166 } 3167 mtx_unlock(&sc->mfi_io_lock); 3168 3169 if (cm->cm_frame->header.cmd != MFI_CMD_STP) { 3170 temp = data; 3171 if ((cm->cm_flags & MFI_CMD_DATAIN) || 3172 (cm->cm_frame->header.cmd == MFI_CMD_STP)) { 3173 for (i = 0; i < ioc->mfi_sge_count; i++) { 3174 #ifdef COMPAT_FREEBSD32 3175 if (cmd == MFI_CMD) { 3176 #endif 3177 /* Native */ 3178 addr = ioc->mfi_sgl[i].iov_base; 3179 len = ioc->mfi_sgl[i].iov_len; 3180 #ifdef COMPAT_FREEBSD32 3181 } else { 3182 /* 32bit on 64bit */ 3183 ioc32 = (struct mfi_ioc_packet32 *)ioc; 3184 addr = PTRIN(ioc32->mfi_sgl[i].iov_base); 3185 len = ioc32->mfi_sgl[i].iov_len; 3186 } 3187 #endif 3188 error = copyout(temp, addr, len); 3189 if (error != 0) { 3190 device_printf(sc->mfi_dev, 3191 "Copy out failed\n"); 3192 goto out; 3193 } 3194 temp = &temp[len]; 3195 } 3196 } 3197 } 3198 3199 if (ioc->mfi_sense_len) { 3200 /* get user-space sense ptr then copy out sense */ 3201 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off], 3202 &sense_ptr.sense_ptr_data[0], 3203 sizeof(sense_ptr.sense_ptr_data)); 3204 #ifdef COMPAT_FREEBSD32 3205 if (cmd != MFI_CMD) { 3206 /* 3207 * not 64bit native so zero out any address 3208 * over 32bit */ 3209 sense_ptr.addr.high = 0; 3210 } 3211 #endif 3212 error = copyout(cm->cm_sense, sense_ptr.user_space, 3213 ioc->mfi_sense_len); 3214 if (error != 0) { 3215 device_printf(sc->mfi_dev, 3216 "Copy out failed\n"); 3217 goto out; 3218 } 3219 } 3220 3221 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status; 3222 out: 3223 mfi_config_unlock(sc, locked); 3224 if (data) 3225 free(data, M_MFIBUF); 3226 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 3227 for (i = 0; i < 2; i++) { 3228 if (sc->kbuff_arr[i]) { 3229 if (sc->mfi_kbuff_arr_busaddr != 0) 3230 bus_dmamap_unload( 3231 sc->mfi_kbuff_arr_dmat[i], 3232 sc->mfi_kbuff_arr_dmamap[i] 3233 ); 3234 if (sc->kbuff_arr[i] != NULL) 3235 bus_dmamem_free( 3236 sc->mfi_kbuff_arr_dmat[i], 3237 sc->kbuff_arr[i], 3238 sc->mfi_kbuff_arr_dmamap[i] 3239 ); 3240 if (sc->mfi_kbuff_arr_dmat[i] != NULL) 3241 bus_dma_tag_destroy( 3242 sc->mfi_kbuff_arr_dmat[i]); 3243 } 3244 } 3245 } 3246 if (cm) { 3247 mtx_lock(&sc->mfi_io_lock); 3248 mfi_release_command(cm); 3249 mtx_unlock(&sc->mfi_io_lock); 3250 } 3251 3252 break; 3253 } 3254 case MFI_SET_AEN: 3255 aen = (struct mfi_ioc_aen *)arg; 3256 error = mfi_aen_register(sc, aen->aen_seq_num, 3257 aen->aen_class_locale); 3258 3259 break; 3260 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ 3261 { 3262 devclass_t devclass; 3263 struct mfi_linux_ioc_packet l_ioc; 3264 int adapter; 3265 3266 devclass = devclass_find("mfi"); 3267 if (devclass == NULL) 3268 return (ENOENT); 3269 3270 error = copyin(arg, &l_ioc, sizeof(l_ioc)); 3271 if (error) 3272 return (error); 3273 adapter = l_ioc.lioc_adapter_no; 3274 sc = devclass_get_softc(devclass, adapter); 3275 if (sc == NULL) 3276 return (ENOENT); 3277 return (mfi_linux_ioctl_int(sc->mfi_cdev, 3278 cmd, arg, flag, td)); 3279 break; 3280 } 3281 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ 3282 { 3283 devclass_t devclass; 3284 struct mfi_linux_ioc_aen l_aen; 3285 int adapter; 3286 3287 devclass = devclass_find("mfi"); 3288 if (devclass == NULL) 3289 return (ENOENT); 3290 3291 error = copyin(arg, &l_aen, sizeof(l_aen)); 3292 if (error) 3293 return (error); 3294 adapter = l_aen.laen_adapter_no; 3295 sc = devclass_get_softc(devclass, adapter); 3296 if (sc == NULL) 3297 return (ENOENT); 3298 return (mfi_linux_ioctl_int(sc->mfi_cdev, 3299 cmd, arg, flag, td)); 3300 break; 3301 } 3302 #ifdef COMPAT_FREEBSD32 3303 case MFIIO_PASSTHRU32: 3304 if (!SV_CURPROC_FLAG(SV_ILP32)) { 3305 error = ENOTTY; 3306 break; 3307 } 3308 iop_swab.ioc_frame = iop32->ioc_frame; 3309 iop_swab.buf_size = iop32->buf_size; 3310 iop_swab.buf = PTRIN(iop32->buf); 3311 iop = &iop_swab; 3312 /* FALLTHROUGH */ 3313 #endif 3314 case MFIIO_PASSTHRU: 3315 error = mfi_user_command(sc, iop); 3316 #ifdef COMPAT_FREEBSD32 3317 if (cmd == MFIIO_PASSTHRU32) 3318 iop32->ioc_frame = iop_swab.ioc_frame; 3319 #endif 3320 break; 3321 default: 3322 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); 3323 error = ENOTTY; 3324 break; 3325 } 3326 3327 return (error); 3328 } 3329 3330 static int 3331 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 3332 { 3333 struct mfi_softc *sc; 3334 struct mfi_linux_ioc_packet l_ioc; 3335 struct mfi_linux_ioc_aen l_aen; 3336 struct mfi_command *cm = NULL; 3337 struct mfi_aen *mfi_aen_entry; 3338 union mfi_sense_ptr sense_ptr; 3339 uint32_t context = 0; 3340 uint8_t *data = NULL, *temp; 3341 int i; 3342 int error, locked; 3343 3344 sc = dev->si_drv1; 3345 error = 0; 3346 switch (cmd) { 3347 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ 3348 error = copyin(arg, &l_ioc, sizeof(l_ioc)); 3349 if (error != 0) 3350 return (error); 3351 3352 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) { 3353 return (EINVAL); 3354 } 3355 3356 mtx_lock(&sc->mfi_io_lock); 3357 if ((cm = mfi_dequeue_free(sc)) == NULL) { 3358 mtx_unlock(&sc->mfi_io_lock); 3359 return (EBUSY); 3360 } 3361 mtx_unlock(&sc->mfi_io_lock); 3362 locked = 0; 3363 3364 /* 3365 * save off original context since copying from user 3366 * will clobber some data 3367 */ 3368 context = cm->cm_frame->header.context; 3369 3370 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame, 3371 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */ 3372 cm->cm_total_frame_size = (sizeof(union mfi_sgl) 3373 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off; 3374 cm->cm_frame->header.scsi_status = 0; 3375 cm->cm_frame->header.pad0 = 0; 3376 if (l_ioc.lioc_sge_count) 3377 cm->cm_sg = 3378 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off]; 3379 cm->cm_flags = 0; 3380 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) 3381 cm->cm_flags |= MFI_CMD_DATAIN; 3382 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) 3383 cm->cm_flags |= MFI_CMD_DATAOUT; 3384 cm->cm_len = cm->cm_frame->header.data_len; 3385 if (cm->cm_len && 3386 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) { 3387 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, 3388 M_WAITOK | M_ZERO); 3389 if (cm->cm_data == NULL) { 3390 device_printf(sc->mfi_dev, "Malloc failed\n"); 3391 goto out; 3392 } 3393 } else { 3394 cm->cm_data = 0; 3395 } 3396 3397 /* restore header context */ 3398 cm->cm_frame->header.context = context; 3399 3400 temp = data; 3401 if (cm->cm_flags & MFI_CMD_DATAOUT) { 3402 for (i = 0; i < l_ioc.lioc_sge_count; i++) { 3403 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base), 3404 temp, 3405 l_ioc.lioc_sgl[i].iov_len); 3406 if (error != 0) { 3407 device_printf(sc->mfi_dev, 3408 "Copy in failed\n"); 3409 goto out; 3410 } 3411 temp = &temp[l_ioc.lioc_sgl[i].iov_len]; 3412 } 3413 } 3414 3415 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) 3416 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode); 3417 3418 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) { 3419 cm->cm_frame->pass.sense_addr_lo = 3420 (uint32_t)cm->cm_sense_busaddr; 3421 cm->cm_frame->pass.sense_addr_hi = 3422 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 3423 } 3424 3425 mtx_lock(&sc->mfi_io_lock); 3426 error = mfi_check_command_pre(sc, cm); 3427 if (error) { 3428 mtx_unlock(&sc->mfi_io_lock); 3429 goto out; 3430 } 3431 3432 if ((error = mfi_wait_command(sc, cm)) != 0) { 3433 device_printf(sc->mfi_dev, 3434 "Controller polled failed\n"); 3435 mtx_unlock(&sc->mfi_io_lock); 3436 goto out; 3437 } 3438 3439 mfi_check_command_post(sc, cm); 3440 mtx_unlock(&sc->mfi_io_lock); 3441 3442 temp = data; 3443 if (cm->cm_flags & MFI_CMD_DATAIN) { 3444 for (i = 0; i < l_ioc.lioc_sge_count; i++) { 3445 error = copyout(temp, 3446 PTRIN(l_ioc.lioc_sgl[i].iov_base), 3447 l_ioc.lioc_sgl[i].iov_len); 3448 if (error != 0) { 3449 device_printf(sc->mfi_dev, 3450 "Copy out failed\n"); 3451 goto out; 3452 } 3453 temp = &temp[l_ioc.lioc_sgl[i].iov_len]; 3454 } 3455 } 3456 3457 if (l_ioc.lioc_sense_len) { 3458 /* get user-space sense ptr then copy out sense */ 3459 bcopy(&((struct mfi_linux_ioc_packet*)arg) 3460 ->lioc_frame.raw[l_ioc.lioc_sense_off], 3461 &sense_ptr.sense_ptr_data[0], 3462 sizeof(sense_ptr.sense_ptr_data)); 3463 #ifdef __amd64__ 3464 /* 3465 * only 32bit Linux support so zero out any 3466 * address over 32bit 3467 */ 3468 sense_ptr.addr.high = 0; 3469 #endif 3470 error = copyout(cm->cm_sense, sense_ptr.user_space, 3471 l_ioc.lioc_sense_len); 3472 if (error != 0) { 3473 device_printf(sc->mfi_dev, 3474 "Copy out failed\n"); 3475 goto out; 3476 } 3477 } 3478 3479 error = copyout(&cm->cm_frame->header.cmd_status, 3480 &((struct mfi_linux_ioc_packet*)arg) 3481 ->lioc_frame.hdr.cmd_status, 3482 1); 3483 if (error != 0) { 3484 device_printf(sc->mfi_dev, 3485 "Copy out failed\n"); 3486 goto out; 3487 } 3488 3489 out: 3490 mfi_config_unlock(sc, locked); 3491 if (data) 3492 free(data, M_MFIBUF); 3493 if (cm) { 3494 mtx_lock(&sc->mfi_io_lock); 3495 mfi_release_command(cm); 3496 mtx_unlock(&sc->mfi_io_lock); 3497 } 3498 3499 return (error); 3500 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ 3501 error = copyin(arg, &l_aen, sizeof(l_aen)); 3502 if (error != 0) 3503 return (error); 3504 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid); 3505 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF, 3506 M_WAITOK); 3507 mtx_lock(&sc->mfi_io_lock); 3508 if (mfi_aen_entry != NULL) { 3509 mfi_aen_entry->p = curproc; 3510 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry, 3511 aen_link); 3512 } 3513 error = mfi_aen_register(sc, l_aen.laen_seq_num, 3514 l_aen.laen_class_locale); 3515 3516 if (error != 0) { 3517 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 3518 aen_link); 3519 free(mfi_aen_entry, M_MFIBUF); 3520 } 3521 mtx_unlock(&sc->mfi_io_lock); 3522 3523 return (error); 3524 default: 3525 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); 3526 error = ENOENT; 3527 break; 3528 } 3529 3530 return (error); 3531 } 3532 3533 static int 3534 mfi_poll(struct cdev *dev, int poll_events, struct thread *td) 3535 { 3536 struct mfi_softc *sc; 3537 int revents = 0; 3538 3539 sc = dev->si_drv1; 3540 3541 if (poll_events & (POLLIN | POLLRDNORM)) { 3542 if (sc->mfi_aen_triggered != 0) { 3543 revents |= poll_events & (POLLIN | POLLRDNORM); 3544 sc->mfi_aen_triggered = 0; 3545 } 3546 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) { 3547 revents |= POLLERR; 3548 } 3549 } 3550 3551 if (revents == 0) { 3552 if (poll_events & (POLLIN | POLLRDNORM)) { 3553 sc->mfi_poll_waiting = 1; 3554 selrecord(td, &sc->mfi_select); 3555 } 3556 } 3557 3558 return revents; 3559 } 3560 3561 static void 3562 mfi_dump_all(void) 3563 { 3564 struct mfi_softc *sc; 3565 struct mfi_command *cm; 3566 devclass_t dc; 3567 time_t deadline; 3568 int timedout; 3569 int i; 3570 3571 dc = devclass_find("mfi"); 3572 if (dc == NULL) { 3573 printf("No mfi dev class\n"); 3574 return; 3575 } 3576 3577 for (i = 0; ; i++) { 3578 sc = devclass_get_softc(dc, i); 3579 if (sc == NULL) 3580 break; 3581 device_printf(sc->mfi_dev, "Dumping\n\n"); 3582 timedout = 0; 3583 deadline = time_uptime - MFI_CMD_TIMEOUT; 3584 mtx_lock(&sc->mfi_io_lock); 3585 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) { 3586 if (cm->cm_timestamp < deadline) { 3587 device_printf(sc->mfi_dev, 3588 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", 3589 cm, (int)(time_uptime - cm->cm_timestamp)); 3590 MFI_PRINT_CMD(cm); 3591 timedout++; 3592 } 3593 } 3594 3595 #if 0 3596 if (timedout) 3597 MFI_DUMP_CMDS(SC); 3598 #endif 3599 3600 mtx_unlock(&sc->mfi_io_lock); 3601 } 3602 3603 return; 3604 } 3605 3606 static void 3607 mfi_timeout(void *data) 3608 { 3609 struct mfi_softc *sc = (struct mfi_softc *)data; 3610 struct mfi_command *cm; 3611 time_t deadline; 3612 int timedout = 0; 3613 3614 deadline = time_uptime - MFI_CMD_TIMEOUT; 3615 if (sc->adpreset == 0) { 3616 if (!mfi_tbolt_reset(sc)) { 3617 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc); 3618 return; 3619 } 3620 } 3621 mtx_lock(&sc->mfi_io_lock); 3622 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) { 3623 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm) 3624 continue; 3625 if (cm->cm_timestamp < deadline) { 3626 if (sc->adpreset != 0 && sc->issuepend_done == 0) { 3627 cm->cm_timestamp = time_uptime; 3628 } else { 3629 device_printf(sc->mfi_dev, 3630 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", 3631 cm, (int)(time_uptime - cm->cm_timestamp) 3632 ); 3633 MFI_PRINT_CMD(cm); 3634 MFI_VALIDATE_CMD(sc, cm); 3635 timedout++; 3636 } 3637 } 3638 } 3639 3640 #if 0 3641 if (timedout) 3642 MFI_DUMP_CMDS(SC); 3643 #endif 3644 3645 mtx_unlock(&sc->mfi_io_lock); 3646 3647 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, 3648 mfi_timeout, sc); 3649 3650 if (0) 3651 mfi_dump_all(); 3652 return; 3653 } 3654