1 /*- 2 * Copyright (c) 2006 IronPort Systems 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /*- 27 * Copyright (c) 2007 LSI Corp. 28 * Copyright (c) 2007 Rajesh Prabhakaran. 29 * All rights reserved. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * SUCH DAMAGE. 51 */ 52 53 #include <sys/cdefs.h> 54 __FBSDID("$FreeBSD$"); 55 56 #include "opt_mfi.h" 57 58 #include <sys/param.h> 59 #include <sys/systm.h> 60 #include <sys/sysctl.h> 61 #include <sys/malloc.h> 62 #include <sys/kernel.h> 63 #include <sys/poll.h> 64 #include <sys/selinfo.h> 65 #include <sys/bus.h> 66 #include <sys/conf.h> 67 #include <sys/eventhandler.h> 68 #include <sys/rman.h> 69 #include <sys/bus_dma.h> 70 #include <sys/bio.h> 71 #include <sys/ioccom.h> 72 #include <sys/uio.h> 73 #include <sys/proc.h> 74 #include <sys/signalvar.h> 75 76 #include <machine/bus.h> 77 #include <machine/resource.h> 78 79 #include <dev/mfi/mfireg.h> 80 #include <dev/mfi/mfi_ioctl.h> 81 #include <dev/mfi/mfivar.h> 82 83 static int mfi_alloc_commands(struct mfi_softc *); 84 static int mfi_comms_init(struct mfi_softc *); 85 static int mfi_wait_command(struct mfi_softc *, struct mfi_command *); 86 static int mfi_get_controller_info(struct mfi_softc *); 87 static int mfi_get_log_state(struct mfi_softc *, 88 struct mfi_evt_log_state **); 89 static int mfi_parse_entries(struct mfi_softc *, int, int); 90 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **, 91 uint32_t, void **, size_t); 92 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int); 93 static void mfi_startup(void *arg); 94 static void mfi_intr(void *arg); 95 static void mfi_ldprobe(struct mfi_softc *sc); 96 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale); 97 static void mfi_aen_complete(struct mfi_command *); 98 static int mfi_aen_setup(struct mfi_softc *, uint32_t); 99 static int mfi_add_ld(struct mfi_softc *sc, int); 100 static void mfi_add_ld_complete(struct mfi_command *); 101 static struct mfi_command * mfi_bio_command(struct mfi_softc *); 102 static void mfi_bio_complete(struct mfi_command *); 103 static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *); 104 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *); 105 static void mfi_complete(struct mfi_softc *, struct mfi_command *); 106 static int mfi_abort(struct mfi_softc *, struct mfi_command *); 107 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, d_thread_t *); 108 static void mfi_timeout(void *); 109 static int mfi_user_command(struct mfi_softc *, 110 struct mfi_ioc_passthru *); 111 static void mfi_enable_intr_xscale(struct mfi_softc *sc); 112 static void mfi_enable_intr_ppc(struct mfi_softc *sc); 113 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc); 114 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc); 115 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc); 116 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc); 117 static void mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt); 118 static void mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt); 119 120 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters"); 121 static int mfi_event_locale = MFI_EVT_LOCALE_ALL; 122 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale); 123 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale, 124 0, "event message locale"); 125 126 static int mfi_event_class = MFI_EVT_CLASS_INFO; 127 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class); 128 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class, 129 0, "event message class"); 130 131 static int mfi_max_cmds = 128; 132 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds); 133 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds, 134 0, "Max commands"); 135 136 /* Management interface */ 137 static d_open_t mfi_open; 138 static d_close_t mfi_close; 139 static d_ioctl_t mfi_ioctl; 140 static d_poll_t mfi_poll; 141 142 static struct cdevsw mfi_cdevsw = { 143 .d_version = D_VERSION, 144 .d_flags = 0, 145 .d_open = mfi_open, 146 .d_close = mfi_close, 147 .d_ioctl = mfi_ioctl, 148 .d_poll = mfi_poll, 149 .d_name = "mfi", 150 }; 151 152 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver"); 153 154 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH 155 156 static void 157 mfi_enable_intr_xscale(struct mfi_softc *sc) 158 { 159 MFI_WRITE4(sc, MFI_OMSK, 0x01); 160 } 161 162 static void 163 mfi_enable_intr_ppc(struct mfi_softc *sc) 164 { 165 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF); 166 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM); 167 } 168 169 static int32_t 170 mfi_read_fw_status_xscale(struct mfi_softc *sc) 171 { 172 return MFI_READ4(sc, MFI_OMSG0); 173 } 174 175 static int32_t 176 mfi_read_fw_status_ppc(struct mfi_softc *sc) 177 { 178 return MFI_READ4(sc, MFI_OSP0); 179 } 180 181 static int 182 mfi_check_clear_intr_xscale(struct mfi_softc *sc) 183 { 184 int32_t status; 185 186 status = MFI_READ4(sc, MFI_OSTS); 187 if ((status & MFI_OSTS_INTR_VALID) == 0) 188 return 1; 189 190 MFI_WRITE4(sc, MFI_OSTS, status); 191 return 0; 192 } 193 194 static int 195 mfi_check_clear_intr_ppc(struct mfi_softc *sc) 196 { 197 int32_t status; 198 199 status = MFI_READ4(sc, MFI_OSTS); 200 if (!status) 201 return 1; 202 203 MFI_WRITE4(sc, MFI_ODCR0, status); 204 return 0; 205 } 206 207 static void 208 mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt) 209 { 210 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt); 211 } 212 213 static void 214 mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt) 215 { 216 MFI_WRITE4(sc, MFI_IQP, (bus_add |frame_cnt <<1)|1 ); 217 } 218 219 static int 220 mfi_transition_firmware(struct mfi_softc *sc) 221 { 222 int32_t fw_state, cur_state; 223 int max_wait, i; 224 225 fw_state = sc->mfi_read_fw_status(sc)& MFI_FWSTATE_MASK; 226 while (fw_state != MFI_FWSTATE_READY) { 227 if (bootverbose) 228 device_printf(sc->mfi_dev, "Waiting for firmware to " 229 "become ready\n"); 230 cur_state = fw_state; 231 switch (fw_state) { 232 case MFI_FWSTATE_FAULT: 233 device_printf(sc->mfi_dev, "Firmware fault\n"); 234 return (ENXIO); 235 case MFI_FWSTATE_WAIT_HANDSHAKE: 236 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE); 237 max_wait = 2; 238 break; 239 case MFI_FWSTATE_OPERATIONAL: 240 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY); 241 max_wait = 10; 242 break; 243 case MFI_FWSTATE_UNDEFINED: 244 case MFI_FWSTATE_BB_INIT: 245 max_wait = 2; 246 break; 247 case MFI_FWSTATE_FW_INIT: 248 case MFI_FWSTATE_DEVICE_SCAN: 249 case MFI_FWSTATE_FLUSH_CACHE: 250 max_wait = 20; 251 break; 252 default: 253 device_printf(sc->mfi_dev,"Unknown firmware state %d\n", 254 fw_state); 255 return (ENXIO); 256 } 257 for (i = 0; i < (max_wait * 10); i++) { 258 fw_state = sc->mfi_read_fw_status(sc) & MFI_FWSTATE_MASK; 259 if (fw_state == cur_state) 260 DELAY(100000); 261 else 262 break; 263 } 264 if (fw_state == cur_state) { 265 device_printf(sc->mfi_dev, "firmware stuck in state " 266 "%#x\n", fw_state); 267 return (ENXIO); 268 } 269 } 270 return (0); 271 } 272 273 static void 274 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 275 { 276 uint32_t *addr; 277 278 addr = arg; 279 *addr = segs[0].ds_addr; 280 } 281 282 int 283 mfi_attach(struct mfi_softc *sc) 284 { 285 uint32_t status; 286 int error, commsz, framessz, sensesz; 287 int frames, unit, max_fw_sge; 288 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 2.00 \n"); 289 290 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF); 291 sx_init(&sc->mfi_config_lock, "MFI config"); 292 TAILQ_INIT(&sc->mfi_ld_tqh); 293 TAILQ_INIT(&sc->mfi_aen_pids); 294 TAILQ_INIT(&sc->mfi_cam_ccbq); 295 296 mfi_initq_free(sc); 297 mfi_initq_ready(sc); 298 mfi_initq_busy(sc); 299 mfi_initq_bio(sc); 300 301 if (sc->mfi_flags & MFI_FLAGS_1064R) { 302 sc->mfi_enable_intr = mfi_enable_intr_xscale; 303 sc->mfi_read_fw_status = mfi_read_fw_status_xscale; 304 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale; 305 sc->mfi_issue_cmd = mfi_issue_cmd_xscale; 306 } 307 else { 308 sc->mfi_enable_intr = mfi_enable_intr_ppc; 309 sc->mfi_read_fw_status = mfi_read_fw_status_ppc; 310 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc; 311 sc->mfi_issue_cmd = mfi_issue_cmd_ppc; 312 } 313 314 315 /* Before we get too far, see if the firmware is working */ 316 if ((error = mfi_transition_firmware(sc)) != 0) { 317 device_printf(sc->mfi_dev, "Firmware not in READY state, " 318 "error %d\n", error); 319 return (ENXIO); 320 } 321 322 /* 323 * Get information needed for sizing the contiguous memory for the 324 * frame pool. Size down the sgl parameter since we know that 325 * we will never need more than what's required for MAXPHYS. 326 * It would be nice if these constants were available at runtime 327 * instead of compile time. 328 */ 329 status = sc->mfi_read_fw_status(sc); 330 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK; 331 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16; 332 sc->mfi_max_sge = min(max_fw_sge, ((MAXPHYS / PAGE_SIZE) + 1)); 333 334 /* 335 * Create the dma tag for data buffers. Used both for block I/O 336 * and for various internal data queries. 337 */ 338 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 339 1, 0, /* algnmnt, boundary */ 340 BUS_SPACE_MAXADDR, /* lowaddr */ 341 BUS_SPACE_MAXADDR, /* highaddr */ 342 NULL, NULL, /* filter, filterarg */ 343 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 344 sc->mfi_max_sge, /* nsegments */ 345 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 346 BUS_DMA_ALLOCNOW, /* flags */ 347 busdma_lock_mutex, /* lockfunc */ 348 &sc->mfi_io_lock, /* lockfuncarg */ 349 &sc->mfi_buffer_dmat)) { 350 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n"); 351 return (ENOMEM); 352 } 353 354 /* 355 * Allocate DMA memory for the comms queues. Keep it under 4GB for 356 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue 357 * entry, so the calculated size here will be will be 1 more than 358 * mfi_max_fw_cmds. This is apparently a requirement of the hardware. 359 */ 360 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) + 361 sizeof(struct mfi_hwcomms); 362 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 363 1, 0, /* algnmnt, boundary */ 364 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 365 BUS_SPACE_MAXADDR, /* highaddr */ 366 NULL, NULL, /* filter, filterarg */ 367 commsz, /* maxsize */ 368 1, /* msegments */ 369 commsz, /* maxsegsize */ 370 0, /* flags */ 371 NULL, NULL, /* lockfunc, lockarg */ 372 &sc->mfi_comms_dmat)) { 373 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); 374 return (ENOMEM); 375 } 376 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms, 377 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) { 378 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); 379 return (ENOMEM); 380 } 381 bzero(sc->mfi_comms, commsz); 382 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap, 383 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0); 384 385 /* 386 * Allocate DMA memory for the command frames. Keep them in the 387 * lower 4GB for efficiency. Calculate the size of the commands at 388 * the same time; each command is one 64 byte frame plus a set of 389 * additional frames for holding sg lists or other data. 390 * The assumption here is that the SG list will start at the second 391 * frame and not use the unused bytes in the first frame. While this 392 * isn't technically correct, it simplifies the calculation and allows 393 * for command frames that might be larger than an mfi_io_frame. 394 */ 395 if (sizeof(bus_addr_t) == 8) { 396 sc->mfi_sge_size = sizeof(struct mfi_sg64); 397 sc->mfi_flags |= MFI_FLAGS_SG64; 398 } else { 399 sc->mfi_sge_size = sizeof(struct mfi_sg32); 400 } 401 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2; 402 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE; 403 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds; 404 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 405 64, 0, /* algnmnt, boundary */ 406 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 407 BUS_SPACE_MAXADDR, /* highaddr */ 408 NULL, NULL, /* filter, filterarg */ 409 framessz, /* maxsize */ 410 1, /* nsegments */ 411 framessz, /* maxsegsize */ 412 0, /* flags */ 413 NULL, NULL, /* lockfunc, lockarg */ 414 &sc->mfi_frames_dmat)) { 415 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n"); 416 return (ENOMEM); 417 } 418 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames, 419 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) { 420 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n"); 421 return (ENOMEM); 422 } 423 bzero(sc->mfi_frames, framessz); 424 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap, 425 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0); 426 427 /* 428 * Allocate DMA memory for the frame sense data. Keep them in the 429 * lower 4GB for efficiency 430 */ 431 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN; 432 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 433 4, 0, /* algnmnt, boundary */ 434 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 435 BUS_SPACE_MAXADDR, /* highaddr */ 436 NULL, NULL, /* filter, filterarg */ 437 sensesz, /* maxsize */ 438 1, /* nsegments */ 439 sensesz, /* maxsegsize */ 440 0, /* flags */ 441 NULL, NULL, /* lockfunc, lockarg */ 442 &sc->mfi_sense_dmat)) { 443 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n"); 444 return (ENOMEM); 445 } 446 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense, 447 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) { 448 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n"); 449 return (ENOMEM); 450 } 451 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap, 452 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0); 453 454 if ((error = mfi_alloc_commands(sc)) != 0) 455 return (error); 456 457 if ((error = mfi_comms_init(sc)) != 0) 458 return (error); 459 460 if ((error = mfi_get_controller_info(sc)) != 0) 461 return (error); 462 463 mtx_lock(&sc->mfi_io_lock); 464 if ((error = mfi_aen_setup(sc, 0), 0) != 0) { 465 mtx_unlock(&sc->mfi_io_lock); 466 return (error); 467 } 468 mtx_unlock(&sc->mfi_io_lock); 469 470 /* 471 * Set up the interrupt handler. XXX This should happen in 472 * mfi_pci.c 473 */ 474 sc->mfi_irq_rid = 0; 475 if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ, 476 &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { 477 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n"); 478 return (EINVAL); 479 } 480 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO, 481 NULL, mfi_intr, sc, &sc->mfi_intr)) { 482 device_printf(sc->mfi_dev, "Cannot set up interrupt\n"); 483 return (EINVAL); 484 } 485 486 /* Register a config hook to probe the bus for arrays */ 487 sc->mfi_ich.ich_func = mfi_startup; 488 sc->mfi_ich.ich_arg = sc; 489 if (config_intrhook_establish(&sc->mfi_ich) != 0) { 490 device_printf(sc->mfi_dev, "Cannot establish configuration " 491 "hook\n"); 492 return (EINVAL); 493 } 494 495 /* 496 * Register a shutdown handler. 497 */ 498 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown, 499 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) { 500 device_printf(sc->mfi_dev, "Warning: shutdown event " 501 "registration failed\n"); 502 } 503 504 /* 505 * Create the control device for doing management 506 */ 507 unit = device_get_unit(sc->mfi_dev); 508 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR, 509 0640, "mfi%d", unit); 510 if (unit == 0) 511 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node"); 512 if (sc->mfi_cdev != NULL) 513 sc->mfi_cdev->si_drv1 = sc; 514 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), 515 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), 516 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW, 517 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes"); 518 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), 519 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), 520 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW, 521 &sc->mfi_keep_deleted_volumes, 0, 522 "Don't detach the mfid device for a busy volume that is deleted"); 523 524 device_add_child(sc->mfi_dev, "mfip", -1); 525 bus_generic_attach(sc->mfi_dev); 526 527 /* Start the timeout watchdog */ 528 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE); 529 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, 530 mfi_timeout, sc); 531 532 return (0); 533 } 534 535 static int 536 mfi_alloc_commands(struct mfi_softc *sc) 537 { 538 struct mfi_command *cm; 539 int i, ncmds; 540 541 /* 542 * XXX Should we allocate all the commands up front, or allocate on 543 * demand later like 'aac' does? 544 */ 545 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds); 546 if (bootverbose) 547 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver " 548 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds); 549 550 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF, 551 M_WAITOK | M_ZERO); 552 553 for (i = 0; i < ncmds; i++) { 554 cm = &sc->mfi_commands[i]; 555 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames + 556 sc->mfi_cmd_size * i); 557 cm->cm_frame_busaddr = sc->mfi_frames_busaddr + 558 sc->mfi_cmd_size * i; 559 cm->cm_frame->header.context = i; 560 cm->cm_sense = &sc->mfi_sense[i]; 561 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i; 562 cm->cm_sc = sc; 563 cm->cm_index = i; 564 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0, 565 &cm->cm_dmamap) == 0) 566 mfi_release_command(cm); 567 else 568 break; 569 sc->mfi_total_cmds++; 570 } 571 572 return (0); 573 } 574 575 void 576 mfi_release_command(struct mfi_command *cm) 577 { 578 struct mfi_frame_header *hdr; 579 uint32_t *hdr_data; 580 581 /* 582 * Zero out the important fields of the frame, but make sure the 583 * context field is preserved. For efficiency, handle the fields 584 * as 32 bit words. Clear out the first S/G entry too for safety. 585 */ 586 hdr = &cm->cm_frame->header; 587 if (cm->cm_data != NULL && hdr->sg_count) { 588 cm->cm_sg->sg32[0].len = 0; 589 cm->cm_sg->sg32[0].addr = 0; 590 } 591 592 hdr_data = (uint32_t *)cm->cm_frame; 593 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */ 594 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */ 595 hdr_data[4] = 0; /* flags, timeout */ 596 hdr_data[5] = 0; /* data_len */ 597 598 cm->cm_extra_frames = 0; 599 cm->cm_flags = 0; 600 cm->cm_complete = NULL; 601 cm->cm_private = NULL; 602 cm->cm_data = NULL; 603 cm->cm_sg = 0; 604 cm->cm_total_frame_size = 0; 605 606 mfi_enqueue_free(cm); 607 } 608 609 static int 610 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode, 611 void **bufp, size_t bufsize) 612 { 613 struct mfi_command *cm; 614 struct mfi_dcmd_frame *dcmd; 615 void *buf = NULL; 616 617 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 618 619 cm = mfi_dequeue_free(sc); 620 if (cm == NULL) 621 return (EBUSY); 622 623 if ((bufsize > 0) && (bufp != NULL)) { 624 if (*bufp == NULL) { 625 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO); 626 if (buf == NULL) { 627 mfi_release_command(cm); 628 return (ENOMEM); 629 } 630 *bufp = buf; 631 } else { 632 buf = *bufp; 633 } 634 } 635 636 dcmd = &cm->cm_frame->dcmd; 637 bzero(dcmd->mbox, MFI_MBOX_SIZE); 638 dcmd->header.cmd = MFI_CMD_DCMD; 639 dcmd->header.timeout = 0; 640 dcmd->header.flags = 0; 641 dcmd->header.data_len = bufsize; 642 dcmd->opcode = opcode; 643 cm->cm_sg = &dcmd->sgl; 644 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 645 cm->cm_flags = 0; 646 cm->cm_data = buf; 647 cm->cm_private = buf; 648 cm->cm_len = bufsize; 649 650 *cmp = cm; 651 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL)) 652 *bufp = buf; 653 return (0); 654 } 655 656 static int 657 mfi_comms_init(struct mfi_softc *sc) 658 { 659 struct mfi_command *cm; 660 struct mfi_init_frame *init; 661 struct mfi_init_qinfo *qinfo; 662 int error; 663 664 mtx_lock(&sc->mfi_io_lock); 665 if ((cm = mfi_dequeue_free(sc)) == NULL) 666 return (EBUSY); 667 668 /* 669 * Abuse the SG list area of the frame to hold the init_qinfo 670 * object; 671 */ 672 init = &cm->cm_frame->init; 673 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE); 674 675 bzero(qinfo, sizeof(struct mfi_init_qinfo)); 676 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1; 677 qinfo->rq_addr_lo = sc->mfi_comms_busaddr + 678 offsetof(struct mfi_hwcomms, hw_reply_q); 679 qinfo->pi_addr_lo = sc->mfi_comms_busaddr + 680 offsetof(struct mfi_hwcomms, hw_pi); 681 qinfo->ci_addr_lo = sc->mfi_comms_busaddr + 682 offsetof(struct mfi_hwcomms, hw_ci); 683 684 init->header.cmd = MFI_CMD_INIT; 685 init->header.data_len = sizeof(struct mfi_init_qinfo); 686 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE; 687 cm->cm_data = NULL; 688 cm->cm_flags = MFI_CMD_POLLED; 689 690 if ((error = mfi_mapcmd(sc, cm)) != 0) { 691 device_printf(sc->mfi_dev, "failed to send init command\n"); 692 mtx_unlock(&sc->mfi_io_lock); 693 return (error); 694 } 695 mfi_release_command(cm); 696 mtx_unlock(&sc->mfi_io_lock); 697 698 return (0); 699 } 700 701 static int 702 mfi_get_controller_info(struct mfi_softc *sc) 703 { 704 struct mfi_command *cm = NULL; 705 struct mfi_ctrl_info *ci = NULL; 706 uint32_t max_sectors_1, max_sectors_2; 707 int error; 708 709 mtx_lock(&sc->mfi_io_lock); 710 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO, 711 (void **)&ci, sizeof(*ci)); 712 if (error) 713 goto out; 714 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 715 716 if ((error = mfi_mapcmd(sc, cm)) != 0) { 717 device_printf(sc->mfi_dev, "Failed to get controller info\n"); 718 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE / 719 MFI_SECTOR_LEN; 720 error = 0; 721 goto out; 722 } 723 724 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 725 BUS_DMASYNC_POSTREAD); 726 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 727 728 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io; 729 max_sectors_2 = ci->max_request_size; 730 sc->mfi_max_io = min(max_sectors_1, max_sectors_2); 731 732 out: 733 if (ci) 734 free(ci, M_MFIBUF); 735 if (cm) 736 mfi_release_command(cm); 737 mtx_unlock(&sc->mfi_io_lock); 738 return (error); 739 } 740 741 static int 742 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state) 743 { 744 struct mfi_command *cm = NULL; 745 int error; 746 747 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO, 748 (void **)log_state, sizeof(**log_state)); 749 if (error) 750 goto out; 751 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 752 753 if ((error = mfi_mapcmd(sc, cm)) != 0) { 754 device_printf(sc->mfi_dev, "Failed to get log state\n"); 755 goto out; 756 } 757 758 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 759 BUS_DMASYNC_POSTREAD); 760 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 761 762 out: 763 if (cm) 764 mfi_release_command(cm); 765 766 return (error); 767 } 768 769 static int 770 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start) 771 { 772 struct mfi_evt_log_state *log_state = NULL; 773 union mfi_evt class_locale; 774 int error = 0; 775 uint32_t seq; 776 777 class_locale.members.reserved = 0; 778 class_locale.members.locale = mfi_event_locale; 779 class_locale.members.class = mfi_event_class; 780 781 if (seq_start == 0) { 782 error = mfi_get_log_state(sc, &log_state); 783 if (error) { 784 if (log_state) 785 free(log_state, M_MFIBUF); 786 return (error); 787 } 788 789 /* 790 * Walk through any events that fired since the last 791 * shutdown. 792 */ 793 mfi_parse_entries(sc, log_state->shutdown_seq_num, 794 log_state->newest_seq_num); 795 seq = log_state->newest_seq_num; 796 } else 797 seq = seq_start; 798 mfi_aen_register(sc, seq, class_locale.word); 799 free(log_state, M_MFIBUF); 800 801 return 0; 802 } 803 804 static int 805 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm) 806 { 807 808 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 809 cm->cm_complete = NULL; 810 811 812 /* 813 * MegaCli can issue a DCMD of 0. In this case do nothing 814 * and return 0 to it as status 815 */ 816 if (cm->cm_frame->dcmd.opcode == 0) { 817 cm->cm_frame->header.cmd_status = MFI_STAT_OK; 818 cm->cm_error = 0; 819 return (cm->cm_error); 820 } 821 mfi_enqueue_ready(cm); 822 mfi_startio(sc); 823 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0) 824 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0); 825 return (cm->cm_error); 826 } 827 828 void 829 mfi_free(struct mfi_softc *sc) 830 { 831 struct mfi_command *cm; 832 int i; 833 834 callout_drain(&sc->mfi_watchdog_callout); 835 836 if (sc->mfi_cdev != NULL) 837 destroy_dev(sc->mfi_cdev); 838 839 if (sc->mfi_total_cmds != 0) { 840 for (i = 0; i < sc->mfi_total_cmds; i++) { 841 cm = &sc->mfi_commands[i]; 842 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap); 843 } 844 free(sc->mfi_commands, M_MFIBUF); 845 } 846 847 if (sc->mfi_intr) 848 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr); 849 if (sc->mfi_irq != NULL) 850 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid, 851 sc->mfi_irq); 852 853 if (sc->mfi_sense_busaddr != 0) 854 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap); 855 if (sc->mfi_sense != NULL) 856 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense, 857 sc->mfi_sense_dmamap); 858 if (sc->mfi_sense_dmat != NULL) 859 bus_dma_tag_destroy(sc->mfi_sense_dmat); 860 861 if (sc->mfi_frames_busaddr != 0) 862 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap); 863 if (sc->mfi_frames != NULL) 864 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames, 865 sc->mfi_frames_dmamap); 866 if (sc->mfi_frames_dmat != NULL) 867 bus_dma_tag_destroy(sc->mfi_frames_dmat); 868 869 if (sc->mfi_comms_busaddr != 0) 870 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap); 871 if (sc->mfi_comms != NULL) 872 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms, 873 sc->mfi_comms_dmamap); 874 if (sc->mfi_comms_dmat != NULL) 875 bus_dma_tag_destroy(sc->mfi_comms_dmat); 876 877 if (sc->mfi_buffer_dmat != NULL) 878 bus_dma_tag_destroy(sc->mfi_buffer_dmat); 879 if (sc->mfi_parent_dmat != NULL) 880 bus_dma_tag_destroy(sc->mfi_parent_dmat); 881 882 if (mtx_initialized(&sc->mfi_io_lock)) { 883 mtx_destroy(&sc->mfi_io_lock); 884 sx_destroy(&sc->mfi_config_lock); 885 } 886 887 return; 888 } 889 890 static void 891 mfi_startup(void *arg) 892 { 893 struct mfi_softc *sc; 894 895 sc = (struct mfi_softc *)arg; 896 897 config_intrhook_disestablish(&sc->mfi_ich); 898 899 sc->mfi_enable_intr(sc); 900 sx_xlock(&sc->mfi_config_lock); 901 mtx_lock(&sc->mfi_io_lock); 902 mfi_ldprobe(sc); 903 mtx_unlock(&sc->mfi_io_lock); 904 sx_xunlock(&sc->mfi_config_lock); 905 } 906 907 static void 908 mfi_intr(void *arg) 909 { 910 struct mfi_softc *sc; 911 struct mfi_command *cm; 912 uint32_t pi, ci, context; 913 914 sc = (struct mfi_softc *)arg; 915 916 if (sc->mfi_check_clear_intr(sc)) 917 return; 918 919 pi = sc->mfi_comms->hw_pi; 920 ci = sc->mfi_comms->hw_ci; 921 mtx_lock(&sc->mfi_io_lock); 922 while (ci != pi) { 923 context = sc->mfi_comms->hw_reply_q[ci]; 924 if (context < sc->mfi_max_fw_cmds) { 925 cm = &sc->mfi_commands[context]; 926 mfi_remove_busy(cm); 927 cm->cm_error = 0; 928 mfi_complete(sc, cm); 929 } 930 if (++ci == (sc->mfi_max_fw_cmds + 1)) { 931 ci = 0; 932 } 933 } 934 935 sc->mfi_comms->hw_ci = ci; 936 937 /* Give defered I/O a chance to run */ 938 if (sc->mfi_flags & MFI_FLAGS_QFRZN) 939 sc->mfi_flags &= ~MFI_FLAGS_QFRZN; 940 mfi_startio(sc); 941 mtx_unlock(&sc->mfi_io_lock); 942 943 return; 944 } 945 946 int 947 mfi_shutdown(struct mfi_softc *sc) 948 { 949 struct mfi_dcmd_frame *dcmd; 950 struct mfi_command *cm; 951 int error; 952 953 mtx_lock(&sc->mfi_io_lock); 954 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0); 955 if (error) { 956 mtx_unlock(&sc->mfi_io_lock); 957 return (error); 958 } 959 960 if (sc->mfi_aen_cm != NULL) 961 mfi_abort(sc, sc->mfi_aen_cm); 962 963 dcmd = &cm->cm_frame->dcmd; 964 dcmd->header.flags = MFI_FRAME_DIR_NONE; 965 cm->cm_flags = MFI_CMD_POLLED; 966 cm->cm_data = NULL; 967 968 if ((error = mfi_mapcmd(sc, cm)) != 0) { 969 device_printf(sc->mfi_dev, "Failed to shutdown controller\n"); 970 } 971 972 mfi_release_command(cm); 973 mtx_unlock(&sc->mfi_io_lock); 974 return (error); 975 } 976 977 static void 978 mfi_ldprobe(struct mfi_softc *sc) 979 { 980 struct mfi_frame_header *hdr; 981 struct mfi_command *cm = NULL; 982 struct mfi_ld_list *list = NULL; 983 struct mfi_disk *ld; 984 int error, i; 985 986 sx_assert(&sc->mfi_config_lock, SA_XLOCKED); 987 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 988 989 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST, 990 (void **)&list, sizeof(*list)); 991 if (error) 992 goto out; 993 994 cm->cm_flags = MFI_CMD_DATAIN; 995 if (mfi_wait_command(sc, cm) != 0) { 996 device_printf(sc->mfi_dev, "Failed to get device listing\n"); 997 goto out; 998 } 999 1000 hdr = &cm->cm_frame->header; 1001 if (hdr->cmd_status != MFI_STAT_OK) { 1002 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n", 1003 hdr->cmd_status); 1004 goto out; 1005 } 1006 1007 for (i = 0; i < list->ld_count; i++) { 1008 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1009 if (ld->ld_id == list->ld_list[i].ld.v.target_id) 1010 goto skip_add; 1011 } 1012 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id); 1013 skip_add:; 1014 } 1015 out: 1016 if (list) 1017 free(list, M_MFIBUF); 1018 if (cm) 1019 mfi_release_command(cm); 1020 1021 return; 1022 } 1023 1024 /* 1025 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If 1026 * the bits in 24-31 are all set, then it is the number of seconds since 1027 * boot. 1028 */ 1029 static const char * 1030 format_timestamp(uint32_t timestamp) 1031 { 1032 static char buffer[32]; 1033 1034 if ((timestamp & 0xff000000) == 0xff000000) 1035 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 1036 0x00ffffff); 1037 else 1038 snprintf(buffer, sizeof(buffer), "%us", timestamp); 1039 return (buffer); 1040 } 1041 1042 static const char * 1043 format_class(int8_t class) 1044 { 1045 static char buffer[6]; 1046 1047 switch (class) { 1048 case MFI_EVT_CLASS_DEBUG: 1049 return ("debug"); 1050 case MFI_EVT_CLASS_PROGRESS: 1051 return ("progress"); 1052 case MFI_EVT_CLASS_INFO: 1053 return ("info"); 1054 case MFI_EVT_CLASS_WARNING: 1055 return ("WARN"); 1056 case MFI_EVT_CLASS_CRITICAL: 1057 return ("CRIT"); 1058 case MFI_EVT_CLASS_FATAL: 1059 return ("FATAL"); 1060 case MFI_EVT_CLASS_DEAD: 1061 return ("DEAD"); 1062 default: 1063 snprintf(buffer, sizeof(buffer), "%d", class); 1064 return (buffer); 1065 } 1066 } 1067 1068 static void 1069 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail) 1070 { 1071 1072 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq, 1073 format_timestamp(detail->time), detail->class.members.locale, 1074 format_class(detail->class.members.class), detail->description); 1075 } 1076 1077 static int 1078 mfi_aen_register(struct mfi_softc *sc, int seq, int locale) 1079 { 1080 struct mfi_command *cm; 1081 struct mfi_dcmd_frame *dcmd; 1082 union mfi_evt current_aen, prior_aen; 1083 struct mfi_evt_detail *ed = NULL; 1084 int error = 0; 1085 1086 current_aen.word = locale; 1087 if (sc->mfi_aen_cm != NULL) { 1088 prior_aen.word = 1089 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1]; 1090 if (prior_aen.members.class <= current_aen.members.class && 1091 !((prior_aen.members.locale & current_aen.members.locale) 1092 ^current_aen.members.locale)) { 1093 return (0); 1094 } else { 1095 prior_aen.members.locale |= current_aen.members.locale; 1096 if (prior_aen.members.class 1097 < current_aen.members.class) 1098 current_aen.members.class = 1099 prior_aen.members.class; 1100 mfi_abort(sc, sc->mfi_aen_cm); 1101 } 1102 } 1103 1104 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT, 1105 (void **)&ed, sizeof(*ed)); 1106 if (error) { 1107 goto out; 1108 } 1109 1110 dcmd = &cm->cm_frame->dcmd; 1111 ((uint32_t *)&dcmd->mbox)[0] = seq; 1112 ((uint32_t *)&dcmd->mbox)[1] = locale; 1113 cm->cm_flags = MFI_CMD_DATAIN; 1114 cm->cm_complete = mfi_aen_complete; 1115 1116 sc->mfi_aen_cm = cm; 1117 1118 mfi_enqueue_ready(cm); 1119 mfi_startio(sc); 1120 1121 out: 1122 return (error); 1123 } 1124 1125 static void 1126 mfi_aen_complete(struct mfi_command *cm) 1127 { 1128 struct mfi_frame_header *hdr; 1129 struct mfi_softc *sc; 1130 struct mfi_evt_detail *detail; 1131 struct mfi_aen *mfi_aen_entry, *tmp; 1132 int seq = 0, aborted = 0; 1133 1134 sc = cm->cm_sc; 1135 hdr = &cm->cm_frame->header; 1136 1137 if (sc->mfi_aen_cm == NULL) 1138 return; 1139 1140 if (sc->mfi_aen_cm->cm_aen_abort || hdr->cmd_status == 0xff) { 1141 sc->mfi_aen_cm->cm_aen_abort = 0; 1142 aborted = 1; 1143 } else { 1144 sc->mfi_aen_triggered = 1; 1145 if (sc->mfi_poll_waiting) { 1146 sc->mfi_poll_waiting = 0; 1147 selwakeup(&sc->mfi_select); 1148 } 1149 detail = cm->cm_data; 1150 /* 1151 * XXX If this function is too expensive or is recursive, then 1152 * events should be put onto a queue and processed later. 1153 */ 1154 mfi_decode_evt(sc, detail); 1155 seq = detail->seq + 1; 1156 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) { 1157 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 1158 aen_link); 1159 PROC_LOCK(mfi_aen_entry->p); 1160 psignal(mfi_aen_entry->p, SIGIO); 1161 PROC_UNLOCK(mfi_aen_entry->p); 1162 free(mfi_aen_entry, M_MFIBUF); 1163 } 1164 } 1165 1166 free(cm->cm_data, M_MFIBUF); 1167 sc->mfi_aen_cm = NULL; 1168 wakeup(&sc->mfi_aen_cm); 1169 mfi_release_command(cm); 1170 1171 /* set it up again so the driver can catch more events */ 1172 if (!aborted) { 1173 mfi_aen_setup(sc, seq); 1174 } 1175 } 1176 1177 #define MAX_EVENTS 15 1178 1179 static int 1180 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq) 1181 { 1182 struct mfi_command *cm; 1183 struct mfi_dcmd_frame *dcmd; 1184 struct mfi_evt_list *el; 1185 union mfi_evt class_locale; 1186 int error, i, seq, size; 1187 1188 class_locale.members.reserved = 0; 1189 class_locale.members.locale = mfi_event_locale; 1190 class_locale.members.class = mfi_event_class; 1191 1192 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail) 1193 * (MAX_EVENTS - 1); 1194 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO); 1195 if (el == NULL) 1196 return (ENOMEM); 1197 1198 for (seq = start_seq;;) { 1199 if ((cm = mfi_dequeue_free(sc)) == NULL) { 1200 free(el, M_MFIBUF); 1201 return (EBUSY); 1202 } 1203 1204 dcmd = &cm->cm_frame->dcmd; 1205 bzero(dcmd->mbox, MFI_MBOX_SIZE); 1206 dcmd->header.cmd = MFI_CMD_DCMD; 1207 dcmd->header.timeout = 0; 1208 dcmd->header.data_len = size; 1209 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET; 1210 ((uint32_t *)&dcmd->mbox)[0] = seq; 1211 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word; 1212 cm->cm_sg = &dcmd->sgl; 1213 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 1214 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1215 cm->cm_data = el; 1216 cm->cm_len = size; 1217 1218 if ((error = mfi_mapcmd(sc, cm)) != 0) { 1219 device_printf(sc->mfi_dev, 1220 "Failed to get controller entries\n"); 1221 mfi_release_command(cm); 1222 break; 1223 } 1224 1225 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1226 BUS_DMASYNC_POSTREAD); 1227 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1228 1229 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) { 1230 mfi_release_command(cm); 1231 break; 1232 } 1233 if (dcmd->header.cmd_status != MFI_STAT_OK) { 1234 device_printf(sc->mfi_dev, 1235 "Error %d fetching controller entries\n", 1236 dcmd->header.cmd_status); 1237 mfi_release_command(cm); 1238 break; 1239 } 1240 mfi_release_command(cm); 1241 1242 for (i = 0; i < el->count; i++) { 1243 /* 1244 * If this event is newer than 'stop_seq' then 1245 * break out of the loop. Note that the log 1246 * is a circular buffer so we have to handle 1247 * the case that our stop point is earlier in 1248 * the buffer than our start point. 1249 */ 1250 if (el->event[i].seq >= stop_seq) { 1251 if (start_seq <= stop_seq) 1252 break; 1253 else if (el->event[i].seq < start_seq) 1254 break; 1255 } 1256 mfi_decode_evt(sc, &el->event[i]); 1257 } 1258 seq = el->event[el->count - 1].seq + 1; 1259 } 1260 1261 free(el, M_MFIBUF); 1262 return (0); 1263 } 1264 1265 static int 1266 mfi_add_ld(struct mfi_softc *sc, int id) 1267 { 1268 struct mfi_command *cm; 1269 struct mfi_dcmd_frame *dcmd = NULL; 1270 struct mfi_ld_info *ld_info = NULL; 1271 int error; 1272 1273 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1274 1275 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO, 1276 (void **)&ld_info, sizeof(*ld_info)); 1277 if (error) { 1278 device_printf(sc->mfi_dev, 1279 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error); 1280 if (ld_info) 1281 free(ld_info, M_MFIBUF); 1282 return (error); 1283 } 1284 cm->cm_flags = MFI_CMD_DATAIN; 1285 dcmd = &cm->cm_frame->dcmd; 1286 dcmd->mbox[0] = id; 1287 if (mfi_wait_command(sc, cm) != 0) { 1288 device_printf(sc->mfi_dev, 1289 "Failed to get logical drive: %d\n", id); 1290 free(ld_info, M_MFIBUF); 1291 return (0); 1292 } 1293 1294 mfi_add_ld_complete(cm); 1295 return (0); 1296 } 1297 1298 static void 1299 mfi_add_ld_complete(struct mfi_command *cm) 1300 { 1301 struct mfi_frame_header *hdr; 1302 struct mfi_ld_info *ld_info; 1303 struct mfi_softc *sc; 1304 device_t child; 1305 1306 sc = cm->cm_sc; 1307 hdr = &cm->cm_frame->header; 1308 ld_info = cm->cm_private; 1309 1310 if (hdr->cmd_status != MFI_STAT_OK) { 1311 free(ld_info, M_MFIBUF); 1312 mfi_release_command(cm); 1313 return; 1314 } 1315 mfi_release_command(cm); 1316 1317 mtx_unlock(&sc->mfi_io_lock); 1318 mtx_lock(&Giant); 1319 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) { 1320 device_printf(sc->mfi_dev, "Failed to add logical disk\n"); 1321 free(ld_info, M_MFIBUF); 1322 mtx_unlock(&Giant); 1323 mtx_lock(&sc->mfi_io_lock); 1324 return; 1325 } 1326 1327 device_set_ivars(child, ld_info); 1328 device_set_desc(child, "MFI Logical Disk"); 1329 bus_generic_attach(sc->mfi_dev); 1330 mtx_unlock(&Giant); 1331 mtx_lock(&sc->mfi_io_lock); 1332 } 1333 1334 static struct mfi_command * 1335 mfi_bio_command(struct mfi_softc *sc) 1336 { 1337 struct mfi_io_frame *io; 1338 struct mfi_command *cm; 1339 struct bio *bio; 1340 int flags, blkcount; 1341 1342 if ((cm = mfi_dequeue_free(sc)) == NULL) 1343 return (NULL); 1344 1345 if ((bio = mfi_dequeue_bio(sc)) == NULL) { 1346 mfi_release_command(cm); 1347 return (NULL); 1348 } 1349 1350 io = &cm->cm_frame->io; 1351 switch (bio->bio_cmd & 0x03) { 1352 case BIO_READ: 1353 io->header.cmd = MFI_CMD_LD_READ; 1354 flags = MFI_CMD_DATAIN; 1355 break; 1356 case BIO_WRITE: 1357 io->header.cmd = MFI_CMD_LD_WRITE; 1358 flags = MFI_CMD_DATAOUT; 1359 break; 1360 default: 1361 panic("Invalid bio command"); 1362 } 1363 1364 /* Cheat with the sector length to avoid a non-constant division */ 1365 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 1366 io->header.target_id = (uintptr_t)bio->bio_driver1; 1367 io->header.timeout = 0; 1368 io->header.flags = 0; 1369 io->header.sense_len = MFI_SENSE_LEN; 1370 io->header.data_len = blkcount; 1371 io->sense_addr_lo = cm->cm_sense_busaddr; 1372 io->sense_addr_hi = 0; 1373 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32; 1374 io->lba_lo = bio->bio_pblkno & 0xffffffff; 1375 cm->cm_complete = mfi_bio_complete; 1376 cm->cm_private = bio; 1377 cm->cm_data = bio->bio_data; 1378 cm->cm_len = bio->bio_bcount; 1379 cm->cm_sg = &io->sgl; 1380 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; 1381 cm->cm_flags = flags; 1382 return (cm); 1383 } 1384 1385 static void 1386 mfi_bio_complete(struct mfi_command *cm) 1387 { 1388 struct bio *bio; 1389 struct mfi_frame_header *hdr; 1390 struct mfi_softc *sc; 1391 1392 bio = cm->cm_private; 1393 hdr = &cm->cm_frame->header; 1394 sc = cm->cm_sc; 1395 1396 if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) { 1397 bio->bio_flags |= BIO_ERROR; 1398 bio->bio_error = EIO; 1399 device_printf(sc->mfi_dev, "I/O error, status= %d " 1400 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status); 1401 mfi_print_sense(cm->cm_sc, cm->cm_sense); 1402 } 1403 1404 mfi_release_command(cm); 1405 mfi_disk_complete(bio); 1406 } 1407 1408 void 1409 mfi_startio(struct mfi_softc *sc) 1410 { 1411 struct mfi_command *cm; 1412 struct ccb_hdr *ccbh; 1413 1414 for (;;) { 1415 /* Don't bother if we're short on resources */ 1416 if (sc->mfi_flags & MFI_FLAGS_QFRZN) 1417 break; 1418 1419 /* Try a command that has already been prepared */ 1420 cm = mfi_dequeue_ready(sc); 1421 1422 if (cm == NULL) { 1423 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL) 1424 cm = sc->mfi_cam_start(ccbh); 1425 } 1426 1427 /* Nope, so look for work on the bioq */ 1428 if (cm == NULL) 1429 cm = mfi_bio_command(sc); 1430 1431 /* No work available, so exit */ 1432 if (cm == NULL) 1433 break; 1434 1435 /* Send the command to the controller */ 1436 if (mfi_mapcmd(sc, cm) != 0) { 1437 mfi_requeue_ready(cm); 1438 break; 1439 } 1440 } 1441 } 1442 1443 static int 1444 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm) 1445 { 1446 int error, polled; 1447 1448 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1449 1450 if (cm->cm_data != NULL) { 1451 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0; 1452 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap, 1453 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled); 1454 if (error == EINPROGRESS) { 1455 sc->mfi_flags |= MFI_FLAGS_QFRZN; 1456 return (0); 1457 } 1458 } else { 1459 error = mfi_send_frame(sc, cm); 1460 } 1461 1462 return (error); 1463 } 1464 1465 static void 1466 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1467 { 1468 struct mfi_frame_header *hdr; 1469 struct mfi_command *cm; 1470 union mfi_sgl *sgl; 1471 struct mfi_softc *sc; 1472 int i, dir; 1473 1474 cm = (struct mfi_command *)arg; 1475 sc = cm->cm_sc; 1476 hdr = &cm->cm_frame->header; 1477 sgl = cm->cm_sg; 1478 1479 if (error) { 1480 printf("error %d in callback\n", error); 1481 cm->cm_error = error; 1482 mfi_complete(sc, cm); 1483 return; 1484 } 1485 1486 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { 1487 for (i = 0; i < nsegs; i++) { 1488 sgl->sg32[i].addr = segs[i].ds_addr; 1489 sgl->sg32[i].len = segs[i].ds_len; 1490 } 1491 } else { 1492 for (i = 0; i < nsegs; i++) { 1493 sgl->sg64[i].addr = segs[i].ds_addr; 1494 sgl->sg64[i].len = segs[i].ds_len; 1495 } 1496 hdr->flags |= MFI_FRAME_SGL64; 1497 } 1498 hdr->sg_count = nsegs; 1499 1500 dir = 0; 1501 if (cm->cm_flags & MFI_CMD_DATAIN) { 1502 dir |= BUS_DMASYNC_PREREAD; 1503 hdr->flags |= MFI_FRAME_DIR_READ; 1504 } 1505 if (cm->cm_flags & MFI_CMD_DATAOUT) { 1506 dir |= BUS_DMASYNC_PREWRITE; 1507 hdr->flags |= MFI_FRAME_DIR_WRITE; 1508 } 1509 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); 1510 cm->cm_flags |= MFI_CMD_MAPPED; 1511 1512 /* 1513 * Instead of calculating the total number of frames in the 1514 * compound frame, it's already assumed that there will be at 1515 * least 1 frame, so don't compensate for the modulo of the 1516 * following division. 1517 */ 1518 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs); 1519 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE; 1520 1521 mfi_send_frame(sc, cm); 1522 1523 return; 1524 } 1525 1526 static int 1527 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm) 1528 { 1529 struct mfi_frame_header *hdr; 1530 int tm = MFI_POLL_TIMEOUT_SECS * 1000; 1531 1532 hdr = &cm->cm_frame->header; 1533 1534 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) { 1535 cm->cm_timestamp = time_uptime; 1536 mfi_enqueue_busy(cm); 1537 } else { 1538 hdr->cmd_status = 0xff; 1539 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 1540 } 1541 1542 /* 1543 * The bus address of the command is aligned on a 64 byte boundary, 1544 * leaving the least 6 bits as zero. For whatever reason, the 1545 * hardware wants the address shifted right by three, leaving just 1546 * 3 zero bits. These three bits are then used as a prefetching 1547 * hint for the hardware to predict how many frames need to be 1548 * fetched across the bus. If a command has more than 8 frames 1549 * then the 3 bits are set to 0x7 and the firmware uses other 1550 * information in the command to determine the total amount to fetch. 1551 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames 1552 * is enough for both 32bit and 64bit systems. 1553 */ 1554 if (cm->cm_extra_frames > 7) 1555 cm->cm_extra_frames = 7; 1556 1557 sc->mfi_issue_cmd(sc,cm->cm_frame_busaddr,cm->cm_extra_frames); 1558 1559 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) 1560 return (0); 1561 1562 /* This is a polled command, so busy-wait for it to complete. */ 1563 while (hdr->cmd_status == 0xff) { 1564 DELAY(1000); 1565 tm -= 1; 1566 if (tm <= 0) 1567 break; 1568 } 1569 1570 if (hdr->cmd_status == 0xff) { 1571 device_printf(sc->mfi_dev, "Frame %p timed out " 1572 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode); 1573 return (ETIMEDOUT); 1574 } 1575 1576 return (0); 1577 } 1578 1579 static void 1580 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm) 1581 { 1582 int dir; 1583 1584 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) { 1585 dir = 0; 1586 if (cm->cm_flags & MFI_CMD_DATAIN) 1587 dir |= BUS_DMASYNC_POSTREAD; 1588 if (cm->cm_flags & MFI_CMD_DATAOUT) 1589 dir |= BUS_DMASYNC_POSTWRITE; 1590 1591 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); 1592 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1593 cm->cm_flags &= ~MFI_CMD_MAPPED; 1594 } 1595 1596 cm->cm_flags |= MFI_CMD_COMPLETED; 1597 1598 if (cm->cm_complete != NULL) 1599 cm->cm_complete(cm); 1600 else 1601 wakeup(cm); 1602 } 1603 1604 static int 1605 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort) 1606 { 1607 struct mfi_command *cm; 1608 struct mfi_abort_frame *abort; 1609 int i = 0; 1610 1611 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1612 1613 if ((cm = mfi_dequeue_free(sc)) == NULL) { 1614 return (EBUSY); 1615 } 1616 1617 abort = &cm->cm_frame->abort; 1618 abort->header.cmd = MFI_CMD_ABORT; 1619 abort->header.flags = 0; 1620 abort->abort_context = cm_abort->cm_frame->header.context; 1621 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr; 1622 abort->abort_mfi_addr_hi = 0; 1623 cm->cm_data = NULL; 1624 cm->cm_flags = MFI_CMD_POLLED; 1625 1626 sc->mfi_aen_cm->cm_aen_abort = 1; 1627 mfi_mapcmd(sc, cm); 1628 mfi_release_command(cm); 1629 1630 while (i < 5 && sc->mfi_aen_cm != NULL) { 1631 msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz); 1632 i++; 1633 } 1634 1635 return (0); 1636 } 1637 1638 int 1639 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len) 1640 { 1641 struct mfi_command *cm; 1642 struct mfi_io_frame *io; 1643 int error; 1644 1645 if ((cm = mfi_dequeue_free(sc)) == NULL) 1646 return (EBUSY); 1647 1648 io = &cm->cm_frame->io; 1649 io->header.cmd = MFI_CMD_LD_WRITE; 1650 io->header.target_id = id; 1651 io->header.timeout = 0; 1652 io->header.flags = 0; 1653 io->header.sense_len = MFI_SENSE_LEN; 1654 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 1655 io->sense_addr_lo = cm->cm_sense_busaddr; 1656 io->sense_addr_hi = 0; 1657 io->lba_hi = (lba & 0xffffffff00000000) >> 32; 1658 io->lba_lo = lba & 0xffffffff; 1659 cm->cm_data = virt; 1660 cm->cm_len = len; 1661 cm->cm_sg = &io->sgl; 1662 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; 1663 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT; 1664 1665 error = mfi_mapcmd(sc, cm); 1666 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1667 BUS_DMASYNC_POSTWRITE); 1668 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1669 mfi_release_command(cm); 1670 1671 return (error); 1672 } 1673 1674 static int 1675 mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td) 1676 { 1677 struct mfi_softc *sc; 1678 int error; 1679 1680 sc = dev->si_drv1; 1681 1682 mtx_lock(&sc->mfi_io_lock); 1683 if (sc->mfi_detaching) 1684 error = ENXIO; 1685 else { 1686 sc->mfi_flags |= MFI_FLAGS_OPEN; 1687 error = 0; 1688 } 1689 mtx_unlock(&sc->mfi_io_lock); 1690 1691 return (error); 1692 } 1693 1694 static int 1695 mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td) 1696 { 1697 struct mfi_softc *sc; 1698 struct mfi_aen *mfi_aen_entry, *tmp; 1699 1700 sc = dev->si_drv1; 1701 1702 mtx_lock(&sc->mfi_io_lock); 1703 sc->mfi_flags &= ~MFI_FLAGS_OPEN; 1704 1705 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) { 1706 if (mfi_aen_entry->p == curproc) { 1707 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 1708 aen_link); 1709 free(mfi_aen_entry, M_MFIBUF); 1710 } 1711 } 1712 mtx_unlock(&sc->mfi_io_lock); 1713 return (0); 1714 } 1715 1716 static int 1717 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode) 1718 { 1719 1720 switch (opcode) { 1721 case MFI_DCMD_LD_DELETE: 1722 case MFI_DCMD_CFG_ADD: 1723 case MFI_DCMD_CFG_CLEAR: 1724 sx_xlock(&sc->mfi_config_lock); 1725 return (1); 1726 default: 1727 return (0); 1728 } 1729 } 1730 1731 static void 1732 mfi_config_unlock(struct mfi_softc *sc, int locked) 1733 { 1734 1735 if (locked) 1736 sx_xunlock(&sc->mfi_config_lock); 1737 } 1738 1739 /* Perform pre-issue checks on commands from userland and possibly veto them. */ 1740 static int 1741 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm) 1742 { 1743 struct mfi_disk *ld, *ld2; 1744 int error; 1745 1746 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1747 error = 0; 1748 switch (cm->cm_frame->dcmd.opcode) { 1749 case MFI_DCMD_LD_DELETE: 1750 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1751 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) 1752 break; 1753 } 1754 if (ld == NULL) 1755 error = ENOENT; 1756 else 1757 error = mfi_disk_disable(ld); 1758 break; 1759 case MFI_DCMD_CFG_CLEAR: 1760 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1761 error = mfi_disk_disable(ld); 1762 if (error) 1763 break; 1764 } 1765 if (error) { 1766 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) { 1767 if (ld2 == ld) 1768 break; 1769 mfi_disk_enable(ld2); 1770 } 1771 } 1772 break; 1773 default: 1774 break; 1775 } 1776 return (error); 1777 } 1778 1779 /* Perform post-issue checks on commands from userland. */ 1780 static void 1781 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm) 1782 { 1783 struct mfi_disk *ld, *ldn; 1784 1785 switch (cm->cm_frame->dcmd.opcode) { 1786 case MFI_DCMD_LD_DELETE: 1787 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1788 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) 1789 break; 1790 } 1791 KASSERT(ld != NULL, ("volume dissappeared")); 1792 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { 1793 mtx_unlock(&sc->mfi_io_lock); 1794 mtx_lock(&Giant); 1795 device_delete_child(sc->mfi_dev, ld->ld_dev); 1796 mtx_unlock(&Giant); 1797 mtx_lock(&sc->mfi_io_lock); 1798 } else 1799 mfi_disk_enable(ld); 1800 break; 1801 case MFI_DCMD_CFG_CLEAR: 1802 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { 1803 mtx_unlock(&sc->mfi_io_lock); 1804 mtx_lock(&Giant); 1805 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) { 1806 device_delete_child(sc->mfi_dev, ld->ld_dev); 1807 } 1808 mtx_unlock(&Giant); 1809 mtx_lock(&sc->mfi_io_lock); 1810 } else { 1811 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) 1812 mfi_disk_enable(ld); 1813 } 1814 break; 1815 case MFI_DCMD_CFG_ADD: 1816 mfi_ldprobe(sc); 1817 break; 1818 } 1819 } 1820 1821 static int 1822 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc) 1823 { 1824 struct mfi_command *cm; 1825 struct mfi_dcmd_frame *dcmd; 1826 void *ioc_buf = NULL; 1827 uint32_t context; 1828 int error = 0, locked; 1829 1830 1831 if (ioc->buf_size > 0) { 1832 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK); 1833 if (ioc_buf == NULL) { 1834 return (ENOMEM); 1835 } 1836 error = copyin(ioc->buf, ioc_buf, ioc->buf_size); 1837 if (error) { 1838 device_printf(sc->mfi_dev, "failed to copyin\n"); 1839 free(ioc_buf, M_MFIBUF); 1840 return (error); 1841 } 1842 } 1843 1844 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode); 1845 1846 mtx_lock(&sc->mfi_io_lock); 1847 while ((cm = mfi_dequeue_free(sc)) == NULL) 1848 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz); 1849 1850 /* Save context for later */ 1851 context = cm->cm_frame->header.context; 1852 1853 dcmd = &cm->cm_frame->dcmd; 1854 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame)); 1855 1856 cm->cm_sg = &dcmd->sgl; 1857 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 1858 cm->cm_data = ioc_buf; 1859 cm->cm_len = ioc->buf_size; 1860 1861 /* restore context */ 1862 cm->cm_frame->header.context = context; 1863 1864 /* Cheat since we don't know if we're writing or reading */ 1865 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT; 1866 1867 error = mfi_check_command_pre(sc, cm); 1868 if (error) 1869 goto out; 1870 1871 error = mfi_wait_command(sc, cm); 1872 if (error) { 1873 device_printf(sc->mfi_dev, "ioctl failed %d\n", error); 1874 goto out; 1875 } 1876 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame)); 1877 mfi_check_command_post(sc, cm); 1878 out: 1879 mfi_release_command(cm); 1880 mtx_unlock(&sc->mfi_io_lock); 1881 mfi_config_unlock(sc, locked); 1882 if (ioc->buf_size > 0) 1883 error = copyout(ioc_buf, ioc->buf, ioc->buf_size); 1884 if (ioc_buf) 1885 free(ioc_buf, M_MFIBUF); 1886 return (error); 1887 } 1888 1889 #ifdef __amd64__ 1890 #define PTRIN(p) ((void *)(uintptr_t)(p)) 1891 #else 1892 #define PTRIN(p) (p) 1893 #endif 1894 1895 static int 1896 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td) 1897 { 1898 struct mfi_softc *sc; 1899 union mfi_statrequest *ms; 1900 struct mfi_ioc_packet *ioc; 1901 #ifdef __amd64__ 1902 struct mfi_ioc_packet32 *ioc32; 1903 #endif 1904 struct mfi_ioc_aen *aen; 1905 struct mfi_command *cm = NULL; 1906 uint32_t context; 1907 uint8_t *sense_ptr; 1908 uint8_t *data = NULL, *temp; 1909 int i; 1910 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg; 1911 #ifdef __amd64__ 1912 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg; 1913 struct mfi_ioc_passthru iop_swab; 1914 #endif 1915 int error, locked; 1916 1917 sc = dev->si_drv1; 1918 error = 0; 1919 1920 switch (cmd) { 1921 case MFIIO_STATS: 1922 ms = (union mfi_statrequest *)arg; 1923 switch (ms->ms_item) { 1924 case MFIQ_FREE: 1925 case MFIQ_BIO: 1926 case MFIQ_READY: 1927 case MFIQ_BUSY: 1928 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat, 1929 sizeof(struct mfi_qstat)); 1930 break; 1931 default: 1932 error = ENOIOCTL; 1933 break; 1934 } 1935 break; 1936 case MFIIO_QUERY_DISK: 1937 { 1938 struct mfi_query_disk *qd; 1939 struct mfi_disk *ld; 1940 1941 qd = (struct mfi_query_disk *)arg; 1942 mtx_lock(&sc->mfi_io_lock); 1943 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1944 if (ld->ld_id == qd->array_id) 1945 break; 1946 } 1947 if (ld == NULL) { 1948 qd->present = 0; 1949 mtx_unlock(&sc->mfi_io_lock); 1950 return (0); 1951 } 1952 qd->present = 1; 1953 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN) 1954 qd->open = 1; 1955 bzero(qd->devname, SPECNAMELEN + 1); 1956 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit); 1957 mtx_unlock(&sc->mfi_io_lock); 1958 break; 1959 } 1960 case MFI_CMD: 1961 #ifdef __amd64__ 1962 case MFI_CMD32: 1963 #endif 1964 { 1965 devclass_t devclass; 1966 ioc = (struct mfi_ioc_packet *)arg; 1967 int adapter; 1968 1969 adapter = ioc->mfi_adapter_no; 1970 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) { 1971 devclass = devclass_find("mfi"); 1972 sc = devclass_get_softc(devclass, adapter); 1973 } 1974 mtx_lock(&sc->mfi_io_lock); 1975 if ((cm = mfi_dequeue_free(sc)) == NULL) { 1976 mtx_unlock(&sc->mfi_io_lock); 1977 return (EBUSY); 1978 } 1979 mtx_unlock(&sc->mfi_io_lock); 1980 locked = 0; 1981 1982 /* 1983 * save off original context since copying from user 1984 * will clobber some data 1985 */ 1986 context = cm->cm_frame->header.context; 1987 1988 bcopy(ioc->mfi_frame.raw, cm->cm_frame, 1989 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */ 1990 cm->cm_total_frame_size = (sizeof(union mfi_sgl) * ioc->mfi_sge_count) + ioc->mfi_sgl_off; 1991 if (ioc->mfi_sge_count) { 1992 cm->cm_sg = 1993 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off]; 1994 } 1995 cm->cm_flags = 0; 1996 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) 1997 cm->cm_flags |= MFI_CMD_DATAIN; 1998 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) 1999 cm->cm_flags |= MFI_CMD_DATAOUT; 2000 /* Legacy app shim */ 2001 if (cm->cm_flags == 0) 2002 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT; 2003 cm->cm_len = cm->cm_frame->header.data_len; 2004 if (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT)) { 2005 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, 2006 M_WAITOK | M_ZERO); 2007 if (cm->cm_data == NULL) { 2008 device_printf(sc->mfi_dev, "Malloc failed\n"); 2009 goto out; 2010 } 2011 } else { 2012 cm->cm_data = 0; 2013 } 2014 2015 /* restore header context */ 2016 cm->cm_frame->header.context = context; 2017 2018 temp = data; 2019 if (cm->cm_flags & MFI_CMD_DATAOUT) { 2020 for (i = 0; i < ioc->mfi_sge_count; i++) { 2021 #ifdef __amd64__ 2022 if (cmd == MFI_CMD) { 2023 /* Native */ 2024 error = copyin(ioc->mfi_sgl[i].iov_base, 2025 temp, 2026 ioc->mfi_sgl[i].iov_len); 2027 } else { 2028 void *temp_convert; 2029 /* 32bit */ 2030 ioc32 = (struct mfi_ioc_packet32 *)ioc; 2031 temp_convert = 2032 PTRIN(ioc32->mfi_sgl[i].iov_base); 2033 error = copyin(temp_convert, 2034 temp, 2035 ioc32->mfi_sgl[i].iov_len); 2036 } 2037 #else 2038 error = copyin(ioc->mfi_sgl[i].iov_base, 2039 temp, 2040 ioc->mfi_sgl[i].iov_len); 2041 #endif 2042 if (error != 0) { 2043 device_printf(sc->mfi_dev, 2044 "Copy in failed\n"); 2045 goto out; 2046 } 2047 temp = &temp[ioc->mfi_sgl[i].iov_len]; 2048 } 2049 } 2050 2051 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) 2052 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode); 2053 2054 mtx_lock(&sc->mfi_io_lock); 2055 error = mfi_check_command_pre(sc, cm); 2056 if (error) { 2057 mtx_unlock(&sc->mfi_io_lock); 2058 goto out; 2059 } 2060 2061 if ((error = mfi_wait_command(sc, cm)) != 0) { 2062 device_printf(sc->mfi_dev, 2063 "Controller polled failed\n"); 2064 mtx_unlock(&sc->mfi_io_lock); 2065 goto out; 2066 } 2067 2068 mfi_check_command_post(sc, cm); 2069 mtx_unlock(&sc->mfi_io_lock); 2070 2071 temp = data; 2072 if (cm->cm_flags & MFI_CMD_DATAIN) { 2073 for (i = 0; i < ioc->mfi_sge_count; i++) { 2074 #ifdef __amd64__ 2075 if (cmd == MFI_CMD) { 2076 /* Native */ 2077 error = copyout(temp, 2078 ioc->mfi_sgl[i].iov_base, 2079 ioc->mfi_sgl[i].iov_len); 2080 } else { 2081 void *temp_convert; 2082 /* 32bit */ 2083 ioc32 = (struct mfi_ioc_packet32 *)ioc; 2084 temp_convert = 2085 PTRIN(ioc32->mfi_sgl[i].iov_base); 2086 error = copyout(temp, 2087 temp_convert, 2088 ioc32->mfi_sgl[i].iov_len); 2089 } 2090 #else 2091 error = copyout(temp, 2092 ioc->mfi_sgl[i].iov_base, 2093 ioc->mfi_sgl[i].iov_len); 2094 #endif 2095 if (error != 0) { 2096 device_printf(sc->mfi_dev, 2097 "Copy out failed\n"); 2098 goto out; 2099 } 2100 temp = &temp[ioc->mfi_sgl[i].iov_len]; 2101 } 2102 } 2103 2104 if (ioc->mfi_sense_len) { 2105 /* copy out sense */ 2106 sense_ptr = &((struct mfi_ioc_packet*)arg) 2107 ->mfi_frame.raw[0]; 2108 error = copyout(cm->cm_sense, sense_ptr, 2109 ioc->mfi_sense_len); 2110 if (error != 0) { 2111 device_printf(sc->mfi_dev, 2112 "Copy out failed\n"); 2113 goto out; 2114 } 2115 } 2116 2117 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status; 2118 out: 2119 mfi_config_unlock(sc, locked); 2120 if (data) 2121 free(data, M_MFIBUF); 2122 if (cm) { 2123 mtx_lock(&sc->mfi_io_lock); 2124 mfi_release_command(cm); 2125 mtx_unlock(&sc->mfi_io_lock); 2126 } 2127 2128 break; 2129 } 2130 case MFI_SET_AEN: 2131 aen = (struct mfi_ioc_aen *)arg; 2132 error = mfi_aen_register(sc, aen->aen_seq_num, 2133 aen->aen_class_locale); 2134 2135 break; 2136 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ 2137 { 2138 devclass_t devclass; 2139 struct mfi_linux_ioc_packet l_ioc; 2140 int adapter; 2141 2142 devclass = devclass_find("mfi"); 2143 if (devclass == NULL) 2144 return (ENOENT); 2145 2146 error = copyin(arg, &l_ioc, sizeof(l_ioc)); 2147 if (error) 2148 return (error); 2149 adapter = l_ioc.lioc_adapter_no; 2150 sc = devclass_get_softc(devclass, adapter); 2151 if (sc == NULL) 2152 return (ENOENT); 2153 return (mfi_linux_ioctl_int(sc->mfi_cdev, 2154 cmd, arg, flag, td)); 2155 break; 2156 } 2157 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ 2158 { 2159 devclass_t devclass; 2160 struct mfi_linux_ioc_aen l_aen; 2161 int adapter; 2162 2163 devclass = devclass_find("mfi"); 2164 if (devclass == NULL) 2165 return (ENOENT); 2166 2167 error = copyin(arg, &l_aen, sizeof(l_aen)); 2168 if (error) 2169 return (error); 2170 adapter = l_aen.laen_adapter_no; 2171 sc = devclass_get_softc(devclass, adapter); 2172 if (sc == NULL) 2173 return (ENOENT); 2174 return (mfi_linux_ioctl_int(sc->mfi_cdev, 2175 cmd, arg, flag, td)); 2176 break; 2177 } 2178 #ifdef __amd64__ 2179 case MFIIO_PASSTHRU32: 2180 iop_swab.ioc_frame = iop32->ioc_frame; 2181 iop_swab.buf_size = iop32->buf_size; 2182 iop_swab.buf = PTRIN(iop32->buf); 2183 iop = &iop_swab; 2184 /* FALLTHROUGH */ 2185 #endif 2186 case MFIIO_PASSTHRU: 2187 error = mfi_user_command(sc, iop); 2188 #ifdef __amd64__ 2189 if (cmd == MFIIO_PASSTHRU32) 2190 iop32->ioc_frame = iop_swab.ioc_frame; 2191 #endif 2192 break; 2193 default: 2194 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); 2195 error = ENOENT; 2196 break; 2197 } 2198 2199 return (error); 2200 } 2201 2202 static int 2203 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td) 2204 { 2205 struct mfi_softc *sc; 2206 struct mfi_linux_ioc_packet l_ioc; 2207 struct mfi_linux_ioc_aen l_aen; 2208 struct mfi_command *cm = NULL; 2209 struct mfi_aen *mfi_aen_entry; 2210 uint8_t *sense_ptr; 2211 uint32_t context; 2212 uint8_t *data = NULL, *temp; 2213 int i; 2214 int error, locked; 2215 2216 sc = dev->si_drv1; 2217 error = 0; 2218 switch (cmd) { 2219 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ 2220 error = copyin(arg, &l_ioc, sizeof(l_ioc)); 2221 if (error != 0) 2222 return (error); 2223 2224 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) { 2225 return (EINVAL); 2226 } 2227 2228 mtx_lock(&sc->mfi_io_lock); 2229 if ((cm = mfi_dequeue_free(sc)) == NULL) { 2230 mtx_unlock(&sc->mfi_io_lock); 2231 return (EBUSY); 2232 } 2233 mtx_unlock(&sc->mfi_io_lock); 2234 locked = 0; 2235 2236 /* 2237 * save off original context since copying from user 2238 * will clobber some data 2239 */ 2240 context = cm->cm_frame->header.context; 2241 2242 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame, 2243 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */ 2244 cm->cm_total_frame_size = (sizeof(union mfi_sgl) * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off; 2245 if (l_ioc.lioc_sge_count) 2246 cm->cm_sg = 2247 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off]; 2248 cm->cm_flags = 0; 2249 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) 2250 cm->cm_flags |= MFI_CMD_DATAIN; 2251 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) 2252 cm->cm_flags |= MFI_CMD_DATAOUT; 2253 cm->cm_len = cm->cm_frame->header.data_len; 2254 if (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT)) { 2255 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, 2256 M_WAITOK | M_ZERO); 2257 if (cm->cm_data == NULL) { 2258 device_printf(sc->mfi_dev, "Malloc failed\n"); 2259 goto out; 2260 } 2261 } else { 2262 cm->cm_data = 0; 2263 } 2264 2265 /* restore header context */ 2266 cm->cm_frame->header.context = context; 2267 2268 temp = data; 2269 if (cm->cm_flags & MFI_CMD_DATAOUT) { 2270 for (i = 0; i < l_ioc.lioc_sge_count; i++) { 2271 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base), 2272 temp, 2273 l_ioc.lioc_sgl[i].iov_len); 2274 if (error != 0) { 2275 device_printf(sc->mfi_dev, 2276 "Copy in failed\n"); 2277 goto out; 2278 } 2279 temp = &temp[l_ioc.lioc_sgl[i].iov_len]; 2280 } 2281 } 2282 2283 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) 2284 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode); 2285 2286 mtx_lock(&sc->mfi_io_lock); 2287 error = mfi_check_command_pre(sc, cm); 2288 if (error) { 2289 mtx_unlock(&sc->mfi_io_lock); 2290 goto out; 2291 } 2292 2293 if ((error = mfi_wait_command(sc, cm)) != 0) { 2294 device_printf(sc->mfi_dev, 2295 "Controller polled failed\n"); 2296 mtx_unlock(&sc->mfi_io_lock); 2297 goto out; 2298 } 2299 2300 mfi_check_command_post(sc, cm); 2301 mtx_unlock(&sc->mfi_io_lock); 2302 2303 temp = data; 2304 if (cm->cm_flags & MFI_CMD_DATAIN) { 2305 for (i = 0; i < l_ioc.lioc_sge_count; i++) { 2306 error = copyout(temp, 2307 PTRIN(l_ioc.lioc_sgl[i].iov_base), 2308 l_ioc.lioc_sgl[i].iov_len); 2309 if (error != 0) { 2310 device_printf(sc->mfi_dev, 2311 "Copy out failed\n"); 2312 goto out; 2313 } 2314 temp = &temp[l_ioc.lioc_sgl[i].iov_len]; 2315 } 2316 } 2317 2318 if (l_ioc.lioc_sense_len) { 2319 /* copy out sense */ 2320 sense_ptr = &((struct mfi_linux_ioc_packet*)arg) 2321 ->lioc_frame.raw[0]; 2322 error = copyout(cm->cm_sense, sense_ptr, 2323 l_ioc.lioc_sense_len); 2324 if (error != 0) { 2325 device_printf(sc->mfi_dev, 2326 "Copy out failed\n"); 2327 goto out; 2328 } 2329 } 2330 2331 error = copyout(&cm->cm_frame->header.cmd_status, 2332 &((struct mfi_linux_ioc_packet*)arg) 2333 ->lioc_frame.hdr.cmd_status, 2334 1); 2335 if (error != 0) { 2336 device_printf(sc->mfi_dev, 2337 "Copy out failed\n"); 2338 goto out; 2339 } 2340 2341 out: 2342 mfi_config_unlock(sc, locked); 2343 if (data) 2344 free(data, M_MFIBUF); 2345 if (cm) { 2346 mtx_lock(&sc->mfi_io_lock); 2347 mfi_release_command(cm); 2348 mtx_unlock(&sc->mfi_io_lock); 2349 } 2350 2351 return (error); 2352 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ 2353 error = copyin(arg, &l_aen, sizeof(l_aen)); 2354 if (error != 0) 2355 return (error); 2356 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid); 2357 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF, 2358 M_WAITOK); 2359 mtx_lock(&sc->mfi_io_lock); 2360 if (mfi_aen_entry != NULL) { 2361 mfi_aen_entry->p = curproc; 2362 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry, 2363 aen_link); 2364 } 2365 error = mfi_aen_register(sc, l_aen.laen_seq_num, 2366 l_aen.laen_class_locale); 2367 2368 if (error != 0) { 2369 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 2370 aen_link); 2371 free(mfi_aen_entry, M_MFIBUF); 2372 } 2373 mtx_unlock(&sc->mfi_io_lock); 2374 2375 return (error); 2376 default: 2377 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); 2378 error = ENOENT; 2379 break; 2380 } 2381 2382 return (error); 2383 } 2384 2385 static int 2386 mfi_poll(struct cdev *dev, int poll_events, struct thread *td) 2387 { 2388 struct mfi_softc *sc; 2389 int revents = 0; 2390 2391 sc = dev->si_drv1; 2392 2393 if (poll_events & (POLLIN | POLLRDNORM)) { 2394 if (sc->mfi_aen_triggered != 0) { 2395 revents |= poll_events & (POLLIN | POLLRDNORM); 2396 sc->mfi_aen_triggered = 0; 2397 } 2398 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) { 2399 revents |= POLLERR; 2400 } 2401 } 2402 2403 if (revents == 0) { 2404 if (poll_events & (POLLIN | POLLRDNORM)) { 2405 sc->mfi_poll_waiting = 1; 2406 selrecord(td, &sc->mfi_select); 2407 } 2408 } 2409 2410 return revents; 2411 } 2412 2413 2414 static void 2415 mfi_dump_all(void) 2416 { 2417 struct mfi_softc *sc; 2418 struct mfi_command *cm; 2419 devclass_t dc; 2420 time_t deadline; 2421 int timedout; 2422 int i; 2423 2424 dc = devclass_find("mfi"); 2425 if (dc == NULL) { 2426 printf("No mfi dev class\n"); 2427 return; 2428 } 2429 2430 for (i = 0; ; i++) { 2431 sc = devclass_get_softc(dc, i); 2432 if (sc == NULL) 2433 break; 2434 device_printf(sc->mfi_dev, "Dumping\n\n"); 2435 timedout = 0; 2436 deadline = time_uptime - MFI_CMD_TIMEOUT; 2437 mtx_lock(&sc->mfi_io_lock); 2438 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) { 2439 if (cm->cm_timestamp < deadline) { 2440 device_printf(sc->mfi_dev, 2441 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm, 2442 (int)(time_uptime - cm->cm_timestamp)); 2443 MFI_PRINT_CMD(cm); 2444 timedout++; 2445 } 2446 } 2447 2448 #if 0 2449 if (timedout) 2450 MFI_DUMP_CMDS(SC); 2451 #endif 2452 2453 mtx_unlock(&sc->mfi_io_lock); 2454 } 2455 2456 return; 2457 } 2458 2459 static void 2460 mfi_timeout(void *data) 2461 { 2462 struct mfi_softc *sc = (struct mfi_softc *)data; 2463 struct mfi_command *cm; 2464 time_t deadline; 2465 int timedout = 0; 2466 2467 deadline = time_uptime - MFI_CMD_TIMEOUT; 2468 mtx_lock(&sc->mfi_io_lock); 2469 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) { 2470 if (sc->mfi_aen_cm == cm) 2471 continue; 2472 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) { 2473 device_printf(sc->mfi_dev, 2474 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm, 2475 (int)(time_uptime - cm->cm_timestamp)); 2476 MFI_PRINT_CMD(cm); 2477 MFI_VALIDATE_CMD(sc, cm); 2478 timedout++; 2479 } 2480 } 2481 2482 #if 0 2483 if (timedout) 2484 MFI_DUMP_CMDS(SC); 2485 #endif 2486 2487 mtx_unlock(&sc->mfi_io_lock); 2488 2489 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, 2490 mfi_timeout, sc); 2491 2492 if (0) 2493 mfi_dump_all(); 2494 return; 2495 } 2496