1 /*- 2 * Copyright (c) 2006 IronPort Systems 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /*- 27 * Copyright (c) 2007 LSI Corp. 28 * Copyright (c) 2007 Rajesh Prabhakaran. 29 * All rights reserved. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * SUCH DAMAGE. 51 */ 52 53 #include <sys/cdefs.h> 54 __FBSDID("$FreeBSD$"); 55 56 #include "opt_mfi.h" 57 58 #include <sys/param.h> 59 #include <sys/systm.h> 60 #include <sys/sysctl.h> 61 #include <sys/malloc.h> 62 #include <sys/kernel.h> 63 #include <sys/poll.h> 64 #include <sys/selinfo.h> 65 #include <sys/bus.h> 66 #include <sys/conf.h> 67 #include <sys/eventhandler.h> 68 #include <sys/rman.h> 69 #include <sys/bus_dma.h> 70 #include <sys/bio.h> 71 #include <sys/ioccom.h> 72 #include <sys/uio.h> 73 #include <sys/proc.h> 74 #include <sys/signalvar.h> 75 76 #include <machine/bus.h> 77 #include <machine/resource.h> 78 79 #include <dev/mfi/mfireg.h> 80 #include <dev/mfi/mfi_ioctl.h> 81 #include <dev/mfi/mfivar.h> 82 83 static int mfi_alloc_commands(struct mfi_softc *); 84 static int mfi_comms_init(struct mfi_softc *); 85 static int mfi_wait_command(struct mfi_softc *, struct mfi_command *); 86 static int mfi_get_controller_info(struct mfi_softc *); 87 static int mfi_get_log_state(struct mfi_softc *, 88 struct mfi_evt_log_state **); 89 static int mfi_parse_entries(struct mfi_softc *, int, int); 90 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **, 91 uint32_t, void **, size_t); 92 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int); 93 static void mfi_startup(void *arg); 94 static void mfi_intr(void *arg); 95 static void mfi_ldprobe(struct mfi_softc *sc); 96 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale); 97 static void mfi_aen_complete(struct mfi_command *); 98 static int mfi_aen_setup(struct mfi_softc *, uint32_t); 99 static int mfi_add_ld(struct mfi_softc *sc, int); 100 static void mfi_add_ld_complete(struct mfi_command *); 101 static struct mfi_command * mfi_bio_command(struct mfi_softc *); 102 static void mfi_bio_complete(struct mfi_command *); 103 static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *); 104 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *); 105 static void mfi_complete(struct mfi_softc *, struct mfi_command *); 106 static int mfi_abort(struct mfi_softc *, struct mfi_command *); 107 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *); 108 static void mfi_timeout(void *); 109 static int mfi_user_command(struct mfi_softc *, 110 struct mfi_ioc_passthru *); 111 static void mfi_enable_intr_xscale(struct mfi_softc *sc); 112 static void mfi_enable_intr_ppc(struct mfi_softc *sc); 113 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc); 114 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc); 115 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc); 116 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc); 117 static void mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt); 118 static void mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt); 119 120 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters"); 121 static int mfi_event_locale = MFI_EVT_LOCALE_ALL; 122 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale); 123 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale, 124 0, "event message locale"); 125 126 static int mfi_event_class = MFI_EVT_CLASS_INFO; 127 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class); 128 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class, 129 0, "event message class"); 130 131 static int mfi_max_cmds = 128; 132 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds); 133 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds, 134 0, "Max commands"); 135 136 /* Management interface */ 137 static d_open_t mfi_open; 138 static d_close_t mfi_close; 139 static d_ioctl_t mfi_ioctl; 140 static d_poll_t mfi_poll; 141 142 static struct cdevsw mfi_cdevsw = { 143 .d_version = D_VERSION, 144 .d_flags = 0, 145 .d_open = mfi_open, 146 .d_close = mfi_close, 147 .d_ioctl = mfi_ioctl, 148 .d_poll = mfi_poll, 149 .d_name = "mfi", 150 }; 151 152 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver"); 153 154 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH 155 156 static void 157 mfi_enable_intr_xscale(struct mfi_softc *sc) 158 { 159 MFI_WRITE4(sc, MFI_OMSK, 0x01); 160 } 161 162 static void 163 mfi_enable_intr_ppc(struct mfi_softc *sc) 164 { 165 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF); 166 if (sc->mfi_flags & MFI_FLAGS_1078) { 167 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM); 168 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) { 169 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM); 170 } 171 } 172 173 static int32_t 174 mfi_read_fw_status_xscale(struct mfi_softc *sc) 175 { 176 return MFI_READ4(sc, MFI_OMSG0); 177 } 178 179 static int32_t 180 mfi_read_fw_status_ppc(struct mfi_softc *sc) 181 { 182 return MFI_READ4(sc, MFI_OSP0); 183 } 184 185 static int 186 mfi_check_clear_intr_xscale(struct mfi_softc *sc) 187 { 188 int32_t status; 189 190 status = MFI_READ4(sc, MFI_OSTS); 191 if ((status & MFI_OSTS_INTR_VALID) == 0) 192 return 1; 193 194 MFI_WRITE4(sc, MFI_OSTS, status); 195 return 0; 196 } 197 198 static int 199 mfi_check_clear_intr_ppc(struct mfi_softc *sc) 200 { 201 int32_t status; 202 203 status = MFI_READ4(sc, MFI_OSTS); 204 if (sc->mfi_flags & MFI_FLAGS_1078) { 205 if (!(status & MFI_1078_RM)) { 206 return 1; 207 } 208 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) { 209 if (!(status & MFI_GEN2_RM)) { 210 return 1; 211 } 212 } 213 214 MFI_WRITE4(sc, MFI_ODCR0, status); 215 return 0; 216 } 217 218 static void 219 mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt) 220 { 221 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt); 222 } 223 224 static void 225 mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt) 226 { 227 MFI_WRITE4(sc, MFI_IQP, (bus_add |frame_cnt <<1)|1 ); 228 } 229 230 static int 231 mfi_transition_firmware(struct mfi_softc *sc) 232 { 233 uint32_t fw_state, cur_state; 234 int max_wait, i; 235 236 fw_state = sc->mfi_read_fw_status(sc)& MFI_FWSTATE_MASK; 237 while (fw_state != MFI_FWSTATE_READY) { 238 if (bootverbose) 239 device_printf(sc->mfi_dev, "Waiting for firmware to " 240 "become ready\n"); 241 cur_state = fw_state; 242 switch (fw_state) { 243 case MFI_FWSTATE_FAULT: 244 device_printf(sc->mfi_dev, "Firmware fault\n"); 245 return (ENXIO); 246 case MFI_FWSTATE_WAIT_HANDSHAKE: 247 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE); 248 max_wait = 2; 249 break; 250 case MFI_FWSTATE_OPERATIONAL: 251 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY); 252 max_wait = 10; 253 break; 254 case MFI_FWSTATE_UNDEFINED: 255 case MFI_FWSTATE_BB_INIT: 256 max_wait = 2; 257 break; 258 case MFI_FWSTATE_FW_INIT: 259 case MFI_FWSTATE_DEVICE_SCAN: 260 case MFI_FWSTATE_FLUSH_CACHE: 261 max_wait = 20; 262 break; 263 case MFI_FWSTATE_BOOT_MESSAGE_PENDING: 264 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG); 265 max_wait = 10; 266 break; 267 default: 268 device_printf(sc->mfi_dev,"Unknown firmware state %#x\n", 269 fw_state); 270 return (ENXIO); 271 } 272 for (i = 0; i < (max_wait * 10); i++) { 273 fw_state = sc->mfi_read_fw_status(sc) & MFI_FWSTATE_MASK; 274 if (fw_state == cur_state) 275 DELAY(100000); 276 else 277 break; 278 } 279 if (fw_state == cur_state) { 280 device_printf(sc->mfi_dev, "Firmware stuck in state " 281 "%#x\n", fw_state); 282 return (ENXIO); 283 } 284 } 285 return (0); 286 } 287 288 static void 289 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 290 { 291 uint32_t *addr; 292 293 addr = arg; 294 *addr = segs[0].ds_addr; 295 } 296 297 int 298 mfi_attach(struct mfi_softc *sc) 299 { 300 uint32_t status; 301 int error, commsz, framessz, sensesz; 302 int frames, unit, max_fw_sge; 303 304 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 3.00 \n"); 305 306 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF); 307 sx_init(&sc->mfi_config_lock, "MFI config"); 308 TAILQ_INIT(&sc->mfi_ld_tqh); 309 TAILQ_INIT(&sc->mfi_aen_pids); 310 TAILQ_INIT(&sc->mfi_cam_ccbq); 311 312 mfi_initq_free(sc); 313 mfi_initq_ready(sc); 314 mfi_initq_busy(sc); 315 mfi_initq_bio(sc); 316 317 if (sc->mfi_flags & MFI_FLAGS_1064R) { 318 sc->mfi_enable_intr = mfi_enable_intr_xscale; 319 sc->mfi_read_fw_status = mfi_read_fw_status_xscale; 320 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale; 321 sc->mfi_issue_cmd = mfi_issue_cmd_xscale; 322 } 323 else { 324 sc->mfi_enable_intr = mfi_enable_intr_ppc; 325 sc->mfi_read_fw_status = mfi_read_fw_status_ppc; 326 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc; 327 sc->mfi_issue_cmd = mfi_issue_cmd_ppc; 328 } 329 330 331 /* Before we get too far, see if the firmware is working */ 332 if ((error = mfi_transition_firmware(sc)) != 0) { 333 device_printf(sc->mfi_dev, "Firmware not in READY state, " 334 "error %d\n", error); 335 return (ENXIO); 336 } 337 338 /* 339 * Get information needed for sizing the contiguous memory for the 340 * frame pool. Size down the sgl parameter since we know that 341 * we will never need more than what's required for MAXPHYS. 342 * It would be nice if these constants were available at runtime 343 * instead of compile time. 344 */ 345 status = sc->mfi_read_fw_status(sc); 346 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK; 347 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16; 348 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1)); 349 350 /* 351 * Create the dma tag for data buffers. Used both for block I/O 352 * and for various internal data queries. 353 */ 354 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 355 1, 0, /* algnmnt, boundary */ 356 BUS_SPACE_MAXADDR, /* lowaddr */ 357 BUS_SPACE_MAXADDR, /* highaddr */ 358 NULL, NULL, /* filter, filterarg */ 359 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 360 sc->mfi_max_sge, /* nsegments */ 361 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 362 BUS_DMA_ALLOCNOW, /* flags */ 363 busdma_lock_mutex, /* lockfunc */ 364 &sc->mfi_io_lock, /* lockfuncarg */ 365 &sc->mfi_buffer_dmat)) { 366 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n"); 367 return (ENOMEM); 368 } 369 370 /* 371 * Allocate DMA memory for the comms queues. Keep it under 4GB for 372 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue 373 * entry, so the calculated size here will be will be 1 more than 374 * mfi_max_fw_cmds. This is apparently a requirement of the hardware. 375 */ 376 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) + 377 sizeof(struct mfi_hwcomms); 378 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 379 1, 0, /* algnmnt, boundary */ 380 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 381 BUS_SPACE_MAXADDR, /* highaddr */ 382 NULL, NULL, /* filter, filterarg */ 383 commsz, /* maxsize */ 384 1, /* msegments */ 385 commsz, /* maxsegsize */ 386 0, /* flags */ 387 NULL, NULL, /* lockfunc, lockarg */ 388 &sc->mfi_comms_dmat)) { 389 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); 390 return (ENOMEM); 391 } 392 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms, 393 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) { 394 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); 395 return (ENOMEM); 396 } 397 bzero(sc->mfi_comms, commsz); 398 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap, 399 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0); 400 401 /* 402 * Allocate DMA memory for the command frames. Keep them in the 403 * lower 4GB for efficiency. Calculate the size of the commands at 404 * the same time; each command is one 64 byte frame plus a set of 405 * additional frames for holding sg lists or other data. 406 * The assumption here is that the SG list will start at the second 407 * frame and not use the unused bytes in the first frame. While this 408 * isn't technically correct, it simplifies the calculation and allows 409 * for command frames that might be larger than an mfi_io_frame. 410 */ 411 if (sizeof(bus_addr_t) == 8) { 412 sc->mfi_sge_size = sizeof(struct mfi_sg64); 413 sc->mfi_flags |= MFI_FLAGS_SG64; 414 } else { 415 sc->mfi_sge_size = sizeof(struct mfi_sg32); 416 } 417 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2; 418 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE; 419 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds; 420 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 421 64, 0, /* algnmnt, boundary */ 422 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 423 BUS_SPACE_MAXADDR, /* highaddr */ 424 NULL, NULL, /* filter, filterarg */ 425 framessz, /* maxsize */ 426 1, /* nsegments */ 427 framessz, /* maxsegsize */ 428 0, /* flags */ 429 NULL, NULL, /* lockfunc, lockarg */ 430 &sc->mfi_frames_dmat)) { 431 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n"); 432 return (ENOMEM); 433 } 434 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames, 435 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) { 436 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n"); 437 return (ENOMEM); 438 } 439 bzero(sc->mfi_frames, framessz); 440 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap, 441 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0); 442 443 /* 444 * Allocate DMA memory for the frame sense data. Keep them in the 445 * lower 4GB for efficiency 446 */ 447 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN; 448 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 449 4, 0, /* algnmnt, boundary */ 450 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 451 BUS_SPACE_MAXADDR, /* highaddr */ 452 NULL, NULL, /* filter, filterarg */ 453 sensesz, /* maxsize */ 454 1, /* nsegments */ 455 sensesz, /* maxsegsize */ 456 0, /* flags */ 457 NULL, NULL, /* lockfunc, lockarg */ 458 &sc->mfi_sense_dmat)) { 459 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n"); 460 return (ENOMEM); 461 } 462 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense, 463 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) { 464 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n"); 465 return (ENOMEM); 466 } 467 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap, 468 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0); 469 470 if ((error = mfi_alloc_commands(sc)) != 0) 471 return (error); 472 473 if ((error = mfi_comms_init(sc)) != 0) 474 return (error); 475 476 if ((error = mfi_get_controller_info(sc)) != 0) 477 return (error); 478 479 mtx_lock(&sc->mfi_io_lock); 480 if ((error = mfi_aen_setup(sc, 0), 0) != 0) { 481 mtx_unlock(&sc->mfi_io_lock); 482 return (error); 483 } 484 mtx_unlock(&sc->mfi_io_lock); 485 486 /* 487 * Set up the interrupt handler. 488 */ 489 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO, 490 NULL, mfi_intr, sc, &sc->mfi_intr)) { 491 device_printf(sc->mfi_dev, "Cannot set up interrupt\n"); 492 return (EINVAL); 493 } 494 495 /* Register a config hook to probe the bus for arrays */ 496 sc->mfi_ich.ich_func = mfi_startup; 497 sc->mfi_ich.ich_arg = sc; 498 if (config_intrhook_establish(&sc->mfi_ich) != 0) { 499 device_printf(sc->mfi_dev, "Cannot establish configuration " 500 "hook\n"); 501 return (EINVAL); 502 } 503 504 /* 505 * Register a shutdown handler. 506 */ 507 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown, 508 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) { 509 device_printf(sc->mfi_dev, "Warning: shutdown event " 510 "registration failed\n"); 511 } 512 513 /* 514 * Create the control device for doing management 515 */ 516 unit = device_get_unit(sc->mfi_dev); 517 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR, 518 0640, "mfi%d", unit); 519 if (unit == 0) 520 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node"); 521 if (sc->mfi_cdev != NULL) 522 sc->mfi_cdev->si_drv1 = sc; 523 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), 524 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), 525 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW, 526 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes"); 527 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), 528 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), 529 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW, 530 &sc->mfi_keep_deleted_volumes, 0, 531 "Don't detach the mfid device for a busy volume that is deleted"); 532 533 device_add_child(sc->mfi_dev, "mfip", -1); 534 bus_generic_attach(sc->mfi_dev); 535 536 /* Start the timeout watchdog */ 537 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE); 538 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, 539 mfi_timeout, sc); 540 541 return (0); 542 } 543 544 static int 545 mfi_alloc_commands(struct mfi_softc *sc) 546 { 547 struct mfi_command *cm; 548 int i, ncmds; 549 550 /* 551 * XXX Should we allocate all the commands up front, or allocate on 552 * demand later like 'aac' does? 553 */ 554 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds); 555 if (bootverbose) 556 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver " 557 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds); 558 559 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF, 560 M_WAITOK | M_ZERO); 561 562 for (i = 0; i < ncmds; i++) { 563 cm = &sc->mfi_commands[i]; 564 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames + 565 sc->mfi_cmd_size * i); 566 cm->cm_frame_busaddr = sc->mfi_frames_busaddr + 567 sc->mfi_cmd_size * i; 568 cm->cm_frame->header.context = i; 569 cm->cm_sense = &sc->mfi_sense[i]; 570 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i; 571 cm->cm_sc = sc; 572 cm->cm_index = i; 573 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0, 574 &cm->cm_dmamap) == 0) 575 mfi_release_command(cm); 576 else 577 break; 578 sc->mfi_total_cmds++; 579 } 580 581 return (0); 582 } 583 584 void 585 mfi_release_command(struct mfi_command *cm) 586 { 587 struct mfi_frame_header *hdr; 588 uint32_t *hdr_data; 589 590 /* 591 * Zero out the important fields of the frame, but make sure the 592 * context field is preserved. For efficiency, handle the fields 593 * as 32 bit words. Clear out the first S/G entry too for safety. 594 */ 595 hdr = &cm->cm_frame->header; 596 if (cm->cm_data != NULL && hdr->sg_count) { 597 cm->cm_sg->sg32[0].len = 0; 598 cm->cm_sg->sg32[0].addr = 0; 599 } 600 601 hdr_data = (uint32_t *)cm->cm_frame; 602 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */ 603 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */ 604 hdr_data[4] = 0; /* flags, timeout */ 605 hdr_data[5] = 0; /* data_len */ 606 607 cm->cm_extra_frames = 0; 608 cm->cm_flags = 0; 609 cm->cm_complete = NULL; 610 cm->cm_private = NULL; 611 cm->cm_data = NULL; 612 cm->cm_sg = 0; 613 cm->cm_total_frame_size = 0; 614 615 mfi_enqueue_free(cm); 616 } 617 618 static int 619 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode, 620 void **bufp, size_t bufsize) 621 { 622 struct mfi_command *cm; 623 struct mfi_dcmd_frame *dcmd; 624 void *buf = NULL; 625 626 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 627 628 cm = mfi_dequeue_free(sc); 629 if (cm == NULL) 630 return (EBUSY); 631 632 if ((bufsize > 0) && (bufp != NULL)) { 633 if (*bufp == NULL) { 634 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO); 635 if (buf == NULL) { 636 mfi_release_command(cm); 637 return (ENOMEM); 638 } 639 *bufp = buf; 640 } else { 641 buf = *bufp; 642 } 643 } 644 645 dcmd = &cm->cm_frame->dcmd; 646 bzero(dcmd->mbox, MFI_MBOX_SIZE); 647 dcmd->header.cmd = MFI_CMD_DCMD; 648 dcmd->header.timeout = 0; 649 dcmd->header.flags = 0; 650 dcmd->header.data_len = bufsize; 651 dcmd->opcode = opcode; 652 cm->cm_sg = &dcmd->sgl; 653 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 654 cm->cm_flags = 0; 655 cm->cm_data = buf; 656 cm->cm_private = buf; 657 cm->cm_len = bufsize; 658 659 *cmp = cm; 660 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL)) 661 *bufp = buf; 662 return (0); 663 } 664 665 static int 666 mfi_comms_init(struct mfi_softc *sc) 667 { 668 struct mfi_command *cm; 669 struct mfi_init_frame *init; 670 struct mfi_init_qinfo *qinfo; 671 int error; 672 673 mtx_lock(&sc->mfi_io_lock); 674 if ((cm = mfi_dequeue_free(sc)) == NULL) 675 return (EBUSY); 676 677 /* 678 * Abuse the SG list area of the frame to hold the init_qinfo 679 * object; 680 */ 681 init = &cm->cm_frame->init; 682 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE); 683 684 bzero(qinfo, sizeof(struct mfi_init_qinfo)); 685 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1; 686 qinfo->rq_addr_lo = sc->mfi_comms_busaddr + 687 offsetof(struct mfi_hwcomms, hw_reply_q); 688 qinfo->pi_addr_lo = sc->mfi_comms_busaddr + 689 offsetof(struct mfi_hwcomms, hw_pi); 690 qinfo->ci_addr_lo = sc->mfi_comms_busaddr + 691 offsetof(struct mfi_hwcomms, hw_ci); 692 693 init->header.cmd = MFI_CMD_INIT; 694 init->header.data_len = sizeof(struct mfi_init_qinfo); 695 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE; 696 cm->cm_data = NULL; 697 cm->cm_flags = MFI_CMD_POLLED; 698 699 if ((error = mfi_mapcmd(sc, cm)) != 0) { 700 device_printf(sc->mfi_dev, "failed to send init command\n"); 701 mtx_unlock(&sc->mfi_io_lock); 702 return (error); 703 } 704 mfi_release_command(cm); 705 mtx_unlock(&sc->mfi_io_lock); 706 707 return (0); 708 } 709 710 static int 711 mfi_get_controller_info(struct mfi_softc *sc) 712 { 713 struct mfi_command *cm = NULL; 714 struct mfi_ctrl_info *ci = NULL; 715 uint32_t max_sectors_1, max_sectors_2; 716 int error; 717 718 mtx_lock(&sc->mfi_io_lock); 719 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO, 720 (void **)&ci, sizeof(*ci)); 721 if (error) 722 goto out; 723 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 724 725 if ((error = mfi_mapcmd(sc, cm)) != 0) { 726 device_printf(sc->mfi_dev, "Failed to get controller info\n"); 727 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE / 728 MFI_SECTOR_LEN; 729 error = 0; 730 goto out; 731 } 732 733 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 734 BUS_DMASYNC_POSTREAD); 735 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 736 737 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io; 738 max_sectors_2 = ci->max_request_size; 739 sc->mfi_max_io = min(max_sectors_1, max_sectors_2); 740 741 out: 742 if (ci) 743 free(ci, M_MFIBUF); 744 if (cm) 745 mfi_release_command(cm); 746 mtx_unlock(&sc->mfi_io_lock); 747 return (error); 748 } 749 750 static int 751 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state) 752 { 753 struct mfi_command *cm = NULL; 754 int error; 755 756 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO, 757 (void **)log_state, sizeof(**log_state)); 758 if (error) 759 goto out; 760 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 761 762 if ((error = mfi_mapcmd(sc, cm)) != 0) { 763 device_printf(sc->mfi_dev, "Failed to get log state\n"); 764 goto out; 765 } 766 767 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 768 BUS_DMASYNC_POSTREAD); 769 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 770 771 out: 772 if (cm) 773 mfi_release_command(cm); 774 775 return (error); 776 } 777 778 static int 779 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start) 780 { 781 struct mfi_evt_log_state *log_state = NULL; 782 union mfi_evt class_locale; 783 int error = 0; 784 uint32_t seq; 785 786 class_locale.members.reserved = 0; 787 class_locale.members.locale = mfi_event_locale; 788 class_locale.members.evt_class = mfi_event_class; 789 790 if (seq_start == 0) { 791 error = mfi_get_log_state(sc, &log_state); 792 if (error) { 793 if (log_state) 794 free(log_state, M_MFIBUF); 795 return (error); 796 } 797 798 /* 799 * Walk through any events that fired since the last 800 * shutdown. 801 */ 802 mfi_parse_entries(sc, log_state->shutdown_seq_num, 803 log_state->newest_seq_num); 804 seq = log_state->newest_seq_num; 805 } else 806 seq = seq_start; 807 mfi_aen_register(sc, seq, class_locale.word); 808 free(log_state, M_MFIBUF); 809 810 return 0; 811 } 812 813 static int 814 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm) 815 { 816 817 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 818 cm->cm_complete = NULL; 819 820 821 /* 822 * MegaCli can issue a DCMD of 0. In this case do nothing 823 * and return 0 to it as status 824 */ 825 if (cm->cm_frame->dcmd.opcode == 0) { 826 cm->cm_frame->header.cmd_status = MFI_STAT_OK; 827 cm->cm_error = 0; 828 return (cm->cm_error); 829 } 830 mfi_enqueue_ready(cm); 831 mfi_startio(sc); 832 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0) 833 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0); 834 return (cm->cm_error); 835 } 836 837 void 838 mfi_free(struct mfi_softc *sc) 839 { 840 struct mfi_command *cm; 841 int i; 842 843 callout_drain(&sc->mfi_watchdog_callout); 844 845 if (sc->mfi_cdev != NULL) 846 destroy_dev(sc->mfi_cdev); 847 848 if (sc->mfi_total_cmds != 0) { 849 for (i = 0; i < sc->mfi_total_cmds; i++) { 850 cm = &sc->mfi_commands[i]; 851 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap); 852 } 853 free(sc->mfi_commands, M_MFIBUF); 854 } 855 856 if (sc->mfi_intr) 857 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr); 858 if (sc->mfi_irq != NULL) 859 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid, 860 sc->mfi_irq); 861 862 if (sc->mfi_sense_busaddr != 0) 863 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap); 864 if (sc->mfi_sense != NULL) 865 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense, 866 sc->mfi_sense_dmamap); 867 if (sc->mfi_sense_dmat != NULL) 868 bus_dma_tag_destroy(sc->mfi_sense_dmat); 869 870 if (sc->mfi_frames_busaddr != 0) 871 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap); 872 if (sc->mfi_frames != NULL) 873 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames, 874 sc->mfi_frames_dmamap); 875 if (sc->mfi_frames_dmat != NULL) 876 bus_dma_tag_destroy(sc->mfi_frames_dmat); 877 878 if (sc->mfi_comms_busaddr != 0) 879 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap); 880 if (sc->mfi_comms != NULL) 881 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms, 882 sc->mfi_comms_dmamap); 883 if (sc->mfi_comms_dmat != NULL) 884 bus_dma_tag_destroy(sc->mfi_comms_dmat); 885 886 if (sc->mfi_buffer_dmat != NULL) 887 bus_dma_tag_destroy(sc->mfi_buffer_dmat); 888 if (sc->mfi_parent_dmat != NULL) 889 bus_dma_tag_destroy(sc->mfi_parent_dmat); 890 891 if (mtx_initialized(&sc->mfi_io_lock)) { 892 mtx_destroy(&sc->mfi_io_lock); 893 sx_destroy(&sc->mfi_config_lock); 894 } 895 896 return; 897 } 898 899 static void 900 mfi_startup(void *arg) 901 { 902 struct mfi_softc *sc; 903 904 sc = (struct mfi_softc *)arg; 905 906 config_intrhook_disestablish(&sc->mfi_ich); 907 908 sc->mfi_enable_intr(sc); 909 sx_xlock(&sc->mfi_config_lock); 910 mtx_lock(&sc->mfi_io_lock); 911 mfi_ldprobe(sc); 912 mtx_unlock(&sc->mfi_io_lock); 913 sx_xunlock(&sc->mfi_config_lock); 914 } 915 916 static void 917 mfi_intr(void *arg) 918 { 919 struct mfi_softc *sc; 920 struct mfi_command *cm; 921 uint32_t pi, ci, context; 922 923 sc = (struct mfi_softc *)arg; 924 925 if (sc->mfi_check_clear_intr(sc)) 926 return; 927 928 /* 929 * Do a dummy read to flush the interrupt ACK that we just performed, 930 * ensuring that everything is really, truly consistent. 931 */ 932 (void)sc->mfi_read_fw_status(sc); 933 934 pi = sc->mfi_comms->hw_pi; 935 ci = sc->mfi_comms->hw_ci; 936 mtx_lock(&sc->mfi_io_lock); 937 while (ci != pi) { 938 context = sc->mfi_comms->hw_reply_q[ci]; 939 if (context < sc->mfi_max_fw_cmds) { 940 cm = &sc->mfi_commands[context]; 941 mfi_remove_busy(cm); 942 cm->cm_error = 0; 943 mfi_complete(sc, cm); 944 } 945 if (++ci == (sc->mfi_max_fw_cmds + 1)) { 946 ci = 0; 947 } 948 } 949 950 sc->mfi_comms->hw_ci = ci; 951 952 /* Give defered I/O a chance to run */ 953 if (sc->mfi_flags & MFI_FLAGS_QFRZN) 954 sc->mfi_flags &= ~MFI_FLAGS_QFRZN; 955 mfi_startio(sc); 956 mtx_unlock(&sc->mfi_io_lock); 957 958 return; 959 } 960 961 int 962 mfi_shutdown(struct mfi_softc *sc) 963 { 964 struct mfi_dcmd_frame *dcmd; 965 struct mfi_command *cm; 966 int error; 967 968 mtx_lock(&sc->mfi_io_lock); 969 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0); 970 if (error) { 971 mtx_unlock(&sc->mfi_io_lock); 972 return (error); 973 } 974 975 if (sc->mfi_aen_cm != NULL) 976 mfi_abort(sc, sc->mfi_aen_cm); 977 978 dcmd = &cm->cm_frame->dcmd; 979 dcmd->header.flags = MFI_FRAME_DIR_NONE; 980 cm->cm_flags = MFI_CMD_POLLED; 981 cm->cm_data = NULL; 982 983 if ((error = mfi_mapcmd(sc, cm)) != 0) { 984 device_printf(sc->mfi_dev, "Failed to shutdown controller\n"); 985 } 986 987 mfi_release_command(cm); 988 mtx_unlock(&sc->mfi_io_lock); 989 return (error); 990 } 991 992 static void 993 mfi_ldprobe(struct mfi_softc *sc) 994 { 995 struct mfi_frame_header *hdr; 996 struct mfi_command *cm = NULL; 997 struct mfi_ld_list *list = NULL; 998 struct mfi_disk *ld; 999 int error, i; 1000 1001 sx_assert(&sc->mfi_config_lock, SA_XLOCKED); 1002 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1003 1004 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST, 1005 (void **)&list, sizeof(*list)); 1006 if (error) 1007 goto out; 1008 1009 cm->cm_flags = MFI_CMD_DATAIN; 1010 if (mfi_wait_command(sc, cm) != 0) { 1011 device_printf(sc->mfi_dev, "Failed to get device listing\n"); 1012 goto out; 1013 } 1014 1015 hdr = &cm->cm_frame->header; 1016 if (hdr->cmd_status != MFI_STAT_OK) { 1017 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n", 1018 hdr->cmd_status); 1019 goto out; 1020 } 1021 1022 for (i = 0; i < list->ld_count; i++) { 1023 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1024 if (ld->ld_id == list->ld_list[i].ld.v.target_id) 1025 goto skip_add; 1026 } 1027 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id); 1028 skip_add:; 1029 } 1030 out: 1031 if (list) 1032 free(list, M_MFIBUF); 1033 if (cm) 1034 mfi_release_command(cm); 1035 1036 return; 1037 } 1038 1039 /* 1040 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If 1041 * the bits in 24-31 are all set, then it is the number of seconds since 1042 * boot. 1043 */ 1044 static const char * 1045 format_timestamp(uint32_t timestamp) 1046 { 1047 static char buffer[32]; 1048 1049 if ((timestamp & 0xff000000) == 0xff000000) 1050 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 1051 0x00ffffff); 1052 else 1053 snprintf(buffer, sizeof(buffer), "%us", timestamp); 1054 return (buffer); 1055 } 1056 1057 static const char * 1058 format_class(int8_t class) 1059 { 1060 static char buffer[6]; 1061 1062 switch (class) { 1063 case MFI_EVT_CLASS_DEBUG: 1064 return ("debug"); 1065 case MFI_EVT_CLASS_PROGRESS: 1066 return ("progress"); 1067 case MFI_EVT_CLASS_INFO: 1068 return ("info"); 1069 case MFI_EVT_CLASS_WARNING: 1070 return ("WARN"); 1071 case MFI_EVT_CLASS_CRITICAL: 1072 return ("CRIT"); 1073 case MFI_EVT_CLASS_FATAL: 1074 return ("FATAL"); 1075 case MFI_EVT_CLASS_DEAD: 1076 return ("DEAD"); 1077 default: 1078 snprintf(buffer, sizeof(buffer), "%d", class); 1079 return (buffer); 1080 } 1081 } 1082 1083 static void 1084 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail) 1085 { 1086 1087 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq, 1088 format_timestamp(detail->time), detail->evt_class.members.locale, 1089 format_class(detail->evt_class.members.evt_class), detail->description); 1090 } 1091 1092 static int 1093 mfi_aen_register(struct mfi_softc *sc, int seq, int locale) 1094 { 1095 struct mfi_command *cm; 1096 struct mfi_dcmd_frame *dcmd; 1097 union mfi_evt current_aen, prior_aen; 1098 struct mfi_evt_detail *ed = NULL; 1099 int error = 0; 1100 1101 current_aen.word = locale; 1102 if (sc->mfi_aen_cm != NULL) { 1103 prior_aen.word = 1104 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1]; 1105 if (prior_aen.members.evt_class <= current_aen.members.evt_class && 1106 !((prior_aen.members.locale & current_aen.members.locale) 1107 ^current_aen.members.locale)) { 1108 return (0); 1109 } else { 1110 prior_aen.members.locale |= current_aen.members.locale; 1111 if (prior_aen.members.evt_class 1112 < current_aen.members.evt_class) 1113 current_aen.members.evt_class = 1114 prior_aen.members.evt_class; 1115 mfi_abort(sc, sc->mfi_aen_cm); 1116 } 1117 } 1118 1119 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT, 1120 (void **)&ed, sizeof(*ed)); 1121 if (error) { 1122 goto out; 1123 } 1124 1125 dcmd = &cm->cm_frame->dcmd; 1126 ((uint32_t *)&dcmd->mbox)[0] = seq; 1127 ((uint32_t *)&dcmd->mbox)[1] = locale; 1128 cm->cm_flags = MFI_CMD_DATAIN; 1129 cm->cm_complete = mfi_aen_complete; 1130 1131 sc->mfi_aen_cm = cm; 1132 1133 mfi_enqueue_ready(cm); 1134 mfi_startio(sc); 1135 1136 out: 1137 return (error); 1138 } 1139 1140 static void 1141 mfi_aen_complete(struct mfi_command *cm) 1142 { 1143 struct mfi_frame_header *hdr; 1144 struct mfi_softc *sc; 1145 struct mfi_evt_detail *detail; 1146 struct mfi_aen *mfi_aen_entry, *tmp; 1147 int seq = 0, aborted = 0; 1148 1149 sc = cm->cm_sc; 1150 hdr = &cm->cm_frame->header; 1151 1152 if (sc->mfi_aen_cm == NULL) 1153 return; 1154 1155 if (sc->mfi_aen_cm->cm_aen_abort || 1156 hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1157 sc->mfi_aen_cm->cm_aen_abort = 0; 1158 aborted = 1; 1159 } else { 1160 sc->mfi_aen_triggered = 1; 1161 if (sc->mfi_poll_waiting) { 1162 sc->mfi_poll_waiting = 0; 1163 selwakeup(&sc->mfi_select); 1164 } 1165 detail = cm->cm_data; 1166 /* 1167 * XXX If this function is too expensive or is recursive, then 1168 * events should be put onto a queue and processed later. 1169 */ 1170 mfi_decode_evt(sc, detail); 1171 seq = detail->seq + 1; 1172 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) { 1173 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 1174 aen_link); 1175 PROC_LOCK(mfi_aen_entry->p); 1176 kern_psignal(mfi_aen_entry->p, SIGIO); 1177 PROC_UNLOCK(mfi_aen_entry->p); 1178 free(mfi_aen_entry, M_MFIBUF); 1179 } 1180 } 1181 1182 free(cm->cm_data, M_MFIBUF); 1183 sc->mfi_aen_cm = NULL; 1184 wakeup(&sc->mfi_aen_cm); 1185 mfi_release_command(cm); 1186 1187 /* set it up again so the driver can catch more events */ 1188 if (!aborted) { 1189 mfi_aen_setup(sc, seq); 1190 } 1191 } 1192 1193 #define MAX_EVENTS 15 1194 1195 static int 1196 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq) 1197 { 1198 struct mfi_command *cm; 1199 struct mfi_dcmd_frame *dcmd; 1200 struct mfi_evt_list *el; 1201 union mfi_evt class_locale; 1202 int error, i, seq, size; 1203 1204 class_locale.members.reserved = 0; 1205 class_locale.members.locale = mfi_event_locale; 1206 class_locale.members.evt_class = mfi_event_class; 1207 1208 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail) 1209 * (MAX_EVENTS - 1); 1210 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO); 1211 if (el == NULL) 1212 return (ENOMEM); 1213 1214 for (seq = start_seq;;) { 1215 if ((cm = mfi_dequeue_free(sc)) == NULL) { 1216 free(el, M_MFIBUF); 1217 return (EBUSY); 1218 } 1219 1220 dcmd = &cm->cm_frame->dcmd; 1221 bzero(dcmd->mbox, MFI_MBOX_SIZE); 1222 dcmd->header.cmd = MFI_CMD_DCMD; 1223 dcmd->header.timeout = 0; 1224 dcmd->header.data_len = size; 1225 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET; 1226 ((uint32_t *)&dcmd->mbox)[0] = seq; 1227 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word; 1228 cm->cm_sg = &dcmd->sgl; 1229 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 1230 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1231 cm->cm_data = el; 1232 cm->cm_len = size; 1233 1234 if ((error = mfi_mapcmd(sc, cm)) != 0) { 1235 device_printf(sc->mfi_dev, 1236 "Failed to get controller entries\n"); 1237 mfi_release_command(cm); 1238 break; 1239 } 1240 1241 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1242 BUS_DMASYNC_POSTREAD); 1243 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1244 1245 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) { 1246 mfi_release_command(cm); 1247 break; 1248 } 1249 if (dcmd->header.cmd_status != MFI_STAT_OK) { 1250 device_printf(sc->mfi_dev, 1251 "Error %d fetching controller entries\n", 1252 dcmd->header.cmd_status); 1253 mfi_release_command(cm); 1254 break; 1255 } 1256 mfi_release_command(cm); 1257 1258 for (i = 0; i < el->count; i++) { 1259 /* 1260 * If this event is newer than 'stop_seq' then 1261 * break out of the loop. Note that the log 1262 * is a circular buffer so we have to handle 1263 * the case that our stop point is earlier in 1264 * the buffer than our start point. 1265 */ 1266 if (el->event[i].seq >= stop_seq) { 1267 if (start_seq <= stop_seq) 1268 break; 1269 else if (el->event[i].seq < start_seq) 1270 break; 1271 } 1272 mfi_decode_evt(sc, &el->event[i]); 1273 } 1274 seq = el->event[el->count - 1].seq + 1; 1275 } 1276 1277 free(el, M_MFIBUF); 1278 return (0); 1279 } 1280 1281 static int 1282 mfi_add_ld(struct mfi_softc *sc, int id) 1283 { 1284 struct mfi_command *cm; 1285 struct mfi_dcmd_frame *dcmd = NULL; 1286 struct mfi_ld_info *ld_info = NULL; 1287 int error; 1288 1289 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1290 1291 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO, 1292 (void **)&ld_info, sizeof(*ld_info)); 1293 if (error) { 1294 device_printf(sc->mfi_dev, 1295 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error); 1296 if (ld_info) 1297 free(ld_info, M_MFIBUF); 1298 return (error); 1299 } 1300 cm->cm_flags = MFI_CMD_DATAIN; 1301 dcmd = &cm->cm_frame->dcmd; 1302 dcmd->mbox[0] = id; 1303 if (mfi_wait_command(sc, cm) != 0) { 1304 device_printf(sc->mfi_dev, 1305 "Failed to get logical drive: %d\n", id); 1306 free(ld_info, M_MFIBUF); 1307 return (0); 1308 } 1309 1310 mfi_add_ld_complete(cm); 1311 return (0); 1312 } 1313 1314 static void 1315 mfi_add_ld_complete(struct mfi_command *cm) 1316 { 1317 struct mfi_frame_header *hdr; 1318 struct mfi_ld_info *ld_info; 1319 struct mfi_softc *sc; 1320 device_t child; 1321 1322 sc = cm->cm_sc; 1323 hdr = &cm->cm_frame->header; 1324 ld_info = cm->cm_private; 1325 1326 if (hdr->cmd_status != MFI_STAT_OK) { 1327 free(ld_info, M_MFIBUF); 1328 mfi_release_command(cm); 1329 return; 1330 } 1331 mfi_release_command(cm); 1332 1333 mtx_unlock(&sc->mfi_io_lock); 1334 mtx_lock(&Giant); 1335 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) { 1336 device_printf(sc->mfi_dev, "Failed to add logical disk\n"); 1337 free(ld_info, M_MFIBUF); 1338 mtx_unlock(&Giant); 1339 mtx_lock(&sc->mfi_io_lock); 1340 return; 1341 } 1342 1343 device_set_ivars(child, ld_info); 1344 device_set_desc(child, "MFI Logical Disk"); 1345 bus_generic_attach(sc->mfi_dev); 1346 mtx_unlock(&Giant); 1347 mtx_lock(&sc->mfi_io_lock); 1348 } 1349 1350 static struct mfi_command * 1351 mfi_bio_command(struct mfi_softc *sc) 1352 { 1353 struct mfi_io_frame *io; 1354 struct mfi_command *cm; 1355 struct bio *bio; 1356 int flags, blkcount; 1357 1358 if ((cm = mfi_dequeue_free(sc)) == NULL) 1359 return (NULL); 1360 1361 if ((bio = mfi_dequeue_bio(sc)) == NULL) { 1362 mfi_release_command(cm); 1363 return (NULL); 1364 } 1365 1366 io = &cm->cm_frame->io; 1367 switch (bio->bio_cmd & 0x03) { 1368 case BIO_READ: 1369 io->header.cmd = MFI_CMD_LD_READ; 1370 flags = MFI_CMD_DATAIN; 1371 break; 1372 case BIO_WRITE: 1373 io->header.cmd = MFI_CMD_LD_WRITE; 1374 flags = MFI_CMD_DATAOUT; 1375 break; 1376 default: 1377 panic("Invalid bio command"); 1378 } 1379 1380 /* Cheat with the sector length to avoid a non-constant division */ 1381 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 1382 io->header.target_id = (uintptr_t)bio->bio_driver1; 1383 io->header.timeout = 0; 1384 io->header.flags = 0; 1385 io->header.sense_len = MFI_SENSE_LEN; 1386 io->header.data_len = blkcount; 1387 io->sense_addr_lo = cm->cm_sense_busaddr; 1388 io->sense_addr_hi = 0; 1389 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32; 1390 io->lba_lo = bio->bio_pblkno & 0xffffffff; 1391 cm->cm_complete = mfi_bio_complete; 1392 cm->cm_private = bio; 1393 cm->cm_data = bio->bio_data; 1394 cm->cm_len = bio->bio_bcount; 1395 cm->cm_sg = &io->sgl; 1396 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; 1397 cm->cm_flags = flags; 1398 return (cm); 1399 } 1400 1401 static void 1402 mfi_bio_complete(struct mfi_command *cm) 1403 { 1404 struct bio *bio; 1405 struct mfi_frame_header *hdr; 1406 struct mfi_softc *sc; 1407 1408 bio = cm->cm_private; 1409 hdr = &cm->cm_frame->header; 1410 sc = cm->cm_sc; 1411 1412 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) { 1413 bio->bio_flags |= BIO_ERROR; 1414 bio->bio_error = EIO; 1415 device_printf(sc->mfi_dev, "I/O error, status= %d " 1416 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status); 1417 mfi_print_sense(cm->cm_sc, cm->cm_sense); 1418 } else if (cm->cm_error != 0) { 1419 bio->bio_flags |= BIO_ERROR; 1420 } 1421 1422 mfi_release_command(cm); 1423 mfi_disk_complete(bio); 1424 } 1425 1426 void 1427 mfi_startio(struct mfi_softc *sc) 1428 { 1429 struct mfi_command *cm; 1430 struct ccb_hdr *ccbh; 1431 1432 for (;;) { 1433 /* Don't bother if we're short on resources */ 1434 if (sc->mfi_flags & MFI_FLAGS_QFRZN) 1435 break; 1436 1437 /* Try a command that has already been prepared */ 1438 cm = mfi_dequeue_ready(sc); 1439 1440 if (cm == NULL) { 1441 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL) 1442 cm = sc->mfi_cam_start(ccbh); 1443 } 1444 1445 /* Nope, so look for work on the bioq */ 1446 if (cm == NULL) 1447 cm = mfi_bio_command(sc); 1448 1449 /* No work available, so exit */ 1450 if (cm == NULL) 1451 break; 1452 1453 /* Send the command to the controller */ 1454 if (mfi_mapcmd(sc, cm) != 0) { 1455 mfi_requeue_ready(cm); 1456 break; 1457 } 1458 } 1459 } 1460 1461 static int 1462 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm) 1463 { 1464 int error, polled; 1465 1466 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1467 1468 if (cm->cm_data != NULL) { 1469 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0; 1470 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap, 1471 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled); 1472 if (error == EINPROGRESS) { 1473 sc->mfi_flags |= MFI_FLAGS_QFRZN; 1474 return (0); 1475 } 1476 } else { 1477 error = mfi_send_frame(sc, cm); 1478 } 1479 1480 return (error); 1481 } 1482 1483 static void 1484 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1485 { 1486 struct mfi_frame_header *hdr; 1487 struct mfi_command *cm; 1488 union mfi_sgl *sgl; 1489 struct mfi_softc *sc; 1490 int i, j, first, dir; 1491 1492 cm = (struct mfi_command *)arg; 1493 sc = cm->cm_sc; 1494 hdr = &cm->cm_frame->header; 1495 sgl = cm->cm_sg; 1496 1497 if (error) { 1498 printf("error %d in callback\n", error); 1499 cm->cm_error = error; 1500 mfi_complete(sc, cm); 1501 return; 1502 } 1503 1504 j = 0; 1505 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 1506 first = cm->cm_stp_len; 1507 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { 1508 sgl->sg32[j].addr = segs[0].ds_addr; 1509 sgl->sg32[j++].len = first; 1510 } else { 1511 sgl->sg64[j].addr = segs[0].ds_addr; 1512 sgl->sg64[j++].len = first; 1513 } 1514 } else 1515 first = 0; 1516 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { 1517 for (i = 0; i < nsegs; i++) { 1518 sgl->sg32[j].addr = segs[i].ds_addr + first; 1519 sgl->sg32[j++].len = segs[i].ds_len - first; 1520 first = 0; 1521 } 1522 } else { 1523 for (i = 0; i < nsegs; i++) { 1524 sgl->sg64[j].addr = segs[i].ds_addr + first; 1525 sgl->sg64[j++].len = segs[i].ds_len - first; 1526 first = 0; 1527 } 1528 hdr->flags |= MFI_FRAME_SGL64; 1529 } 1530 hdr->sg_count = j; 1531 1532 dir = 0; 1533 if (cm->cm_flags & MFI_CMD_DATAIN) { 1534 dir |= BUS_DMASYNC_PREREAD; 1535 hdr->flags |= MFI_FRAME_DIR_READ; 1536 } 1537 if (cm->cm_flags & MFI_CMD_DATAOUT) { 1538 dir |= BUS_DMASYNC_PREWRITE; 1539 hdr->flags |= MFI_FRAME_DIR_WRITE; 1540 } 1541 if (cm->cm_frame->header.cmd == MFI_CMD_STP) 1542 dir |= BUS_DMASYNC_PREWRITE; 1543 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); 1544 cm->cm_flags |= MFI_CMD_MAPPED; 1545 1546 /* 1547 * Instead of calculating the total number of frames in the 1548 * compound frame, it's already assumed that there will be at 1549 * least 1 frame, so don't compensate for the modulo of the 1550 * following division. 1551 */ 1552 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs); 1553 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE; 1554 1555 mfi_send_frame(sc, cm); 1556 1557 return; 1558 } 1559 1560 static int 1561 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm) 1562 { 1563 struct mfi_frame_header *hdr; 1564 int tm = MFI_POLL_TIMEOUT_SECS * 1000; 1565 1566 hdr = &cm->cm_frame->header; 1567 1568 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) { 1569 cm->cm_timestamp = time_uptime; 1570 mfi_enqueue_busy(cm); 1571 } else { 1572 hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1573 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 1574 } 1575 1576 /* 1577 * The bus address of the command is aligned on a 64 byte boundary, 1578 * leaving the least 6 bits as zero. For whatever reason, the 1579 * hardware wants the address shifted right by three, leaving just 1580 * 3 zero bits. These three bits are then used as a prefetching 1581 * hint for the hardware to predict how many frames need to be 1582 * fetched across the bus. If a command has more than 8 frames 1583 * then the 3 bits are set to 0x7 and the firmware uses other 1584 * information in the command to determine the total amount to fetch. 1585 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames 1586 * is enough for both 32bit and 64bit systems. 1587 */ 1588 if (cm->cm_extra_frames > 7) 1589 cm->cm_extra_frames = 7; 1590 1591 sc->mfi_issue_cmd(sc,cm->cm_frame_busaddr,cm->cm_extra_frames); 1592 1593 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) 1594 return (0); 1595 1596 /* This is a polled command, so busy-wait for it to complete. */ 1597 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1598 DELAY(1000); 1599 tm -= 1; 1600 if (tm <= 0) 1601 break; 1602 } 1603 1604 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1605 device_printf(sc->mfi_dev, "Frame %p timed out " 1606 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode); 1607 return (ETIMEDOUT); 1608 } 1609 1610 return (0); 1611 } 1612 1613 static void 1614 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm) 1615 { 1616 int dir; 1617 1618 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) { 1619 dir = 0; 1620 if ((cm->cm_flags & MFI_CMD_DATAIN) || 1621 (cm->cm_frame->header.cmd == MFI_CMD_STP)) 1622 dir |= BUS_DMASYNC_POSTREAD; 1623 if (cm->cm_flags & MFI_CMD_DATAOUT) 1624 dir |= BUS_DMASYNC_POSTWRITE; 1625 1626 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); 1627 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1628 cm->cm_flags &= ~MFI_CMD_MAPPED; 1629 } 1630 1631 cm->cm_flags |= MFI_CMD_COMPLETED; 1632 1633 if (cm->cm_complete != NULL) 1634 cm->cm_complete(cm); 1635 else 1636 wakeup(cm); 1637 } 1638 1639 static int 1640 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort) 1641 { 1642 struct mfi_command *cm; 1643 struct mfi_abort_frame *abort; 1644 int i = 0; 1645 1646 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1647 1648 if ((cm = mfi_dequeue_free(sc)) == NULL) { 1649 return (EBUSY); 1650 } 1651 1652 abort = &cm->cm_frame->abort; 1653 abort->header.cmd = MFI_CMD_ABORT; 1654 abort->header.flags = 0; 1655 abort->abort_context = cm_abort->cm_frame->header.context; 1656 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr; 1657 abort->abort_mfi_addr_hi = 0; 1658 cm->cm_data = NULL; 1659 cm->cm_flags = MFI_CMD_POLLED; 1660 1661 sc->mfi_aen_cm->cm_aen_abort = 1; 1662 mfi_mapcmd(sc, cm); 1663 mfi_release_command(cm); 1664 1665 while (i < 5 && sc->mfi_aen_cm != NULL) { 1666 msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz); 1667 i++; 1668 } 1669 1670 return (0); 1671 } 1672 1673 int 1674 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len) 1675 { 1676 struct mfi_command *cm; 1677 struct mfi_io_frame *io; 1678 int error; 1679 1680 if ((cm = mfi_dequeue_free(sc)) == NULL) 1681 return (EBUSY); 1682 1683 io = &cm->cm_frame->io; 1684 io->header.cmd = MFI_CMD_LD_WRITE; 1685 io->header.target_id = id; 1686 io->header.timeout = 0; 1687 io->header.flags = 0; 1688 io->header.sense_len = MFI_SENSE_LEN; 1689 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 1690 io->sense_addr_lo = cm->cm_sense_busaddr; 1691 io->sense_addr_hi = 0; 1692 io->lba_hi = (lba & 0xffffffff00000000) >> 32; 1693 io->lba_lo = lba & 0xffffffff; 1694 cm->cm_data = virt; 1695 cm->cm_len = len; 1696 cm->cm_sg = &io->sgl; 1697 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; 1698 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT; 1699 1700 error = mfi_mapcmd(sc, cm); 1701 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1702 BUS_DMASYNC_POSTWRITE); 1703 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1704 mfi_release_command(cm); 1705 1706 return (error); 1707 } 1708 1709 static int 1710 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1711 { 1712 struct mfi_softc *sc; 1713 int error; 1714 1715 sc = dev->si_drv1; 1716 1717 mtx_lock(&sc->mfi_io_lock); 1718 if (sc->mfi_detaching) 1719 error = ENXIO; 1720 else { 1721 sc->mfi_flags |= MFI_FLAGS_OPEN; 1722 error = 0; 1723 } 1724 mtx_unlock(&sc->mfi_io_lock); 1725 1726 return (error); 1727 } 1728 1729 static int 1730 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1731 { 1732 struct mfi_softc *sc; 1733 struct mfi_aen *mfi_aen_entry, *tmp; 1734 1735 sc = dev->si_drv1; 1736 1737 mtx_lock(&sc->mfi_io_lock); 1738 sc->mfi_flags &= ~MFI_FLAGS_OPEN; 1739 1740 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) { 1741 if (mfi_aen_entry->p == curproc) { 1742 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 1743 aen_link); 1744 free(mfi_aen_entry, M_MFIBUF); 1745 } 1746 } 1747 mtx_unlock(&sc->mfi_io_lock); 1748 return (0); 1749 } 1750 1751 static int 1752 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode) 1753 { 1754 1755 switch (opcode) { 1756 case MFI_DCMD_LD_DELETE: 1757 case MFI_DCMD_CFG_ADD: 1758 case MFI_DCMD_CFG_CLEAR: 1759 sx_xlock(&sc->mfi_config_lock); 1760 return (1); 1761 default: 1762 return (0); 1763 } 1764 } 1765 1766 static void 1767 mfi_config_unlock(struct mfi_softc *sc, int locked) 1768 { 1769 1770 if (locked) 1771 sx_xunlock(&sc->mfi_config_lock); 1772 } 1773 1774 /* Perform pre-issue checks on commands from userland and possibly veto them. */ 1775 static int 1776 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm) 1777 { 1778 struct mfi_disk *ld, *ld2; 1779 int error; 1780 1781 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1782 error = 0; 1783 switch (cm->cm_frame->dcmd.opcode) { 1784 case MFI_DCMD_LD_DELETE: 1785 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1786 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) 1787 break; 1788 } 1789 if (ld == NULL) 1790 error = ENOENT; 1791 else 1792 error = mfi_disk_disable(ld); 1793 break; 1794 case MFI_DCMD_CFG_CLEAR: 1795 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1796 error = mfi_disk_disable(ld); 1797 if (error) 1798 break; 1799 } 1800 if (error) { 1801 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) { 1802 if (ld2 == ld) 1803 break; 1804 mfi_disk_enable(ld2); 1805 } 1806 } 1807 break; 1808 default: 1809 break; 1810 } 1811 return (error); 1812 } 1813 1814 /* Perform post-issue checks on commands from userland. */ 1815 static void 1816 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm) 1817 { 1818 struct mfi_disk *ld, *ldn; 1819 1820 switch (cm->cm_frame->dcmd.opcode) { 1821 case MFI_DCMD_LD_DELETE: 1822 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1823 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) 1824 break; 1825 } 1826 KASSERT(ld != NULL, ("volume dissappeared")); 1827 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { 1828 mtx_unlock(&sc->mfi_io_lock); 1829 mtx_lock(&Giant); 1830 device_delete_child(sc->mfi_dev, ld->ld_dev); 1831 mtx_unlock(&Giant); 1832 mtx_lock(&sc->mfi_io_lock); 1833 } else 1834 mfi_disk_enable(ld); 1835 break; 1836 case MFI_DCMD_CFG_CLEAR: 1837 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { 1838 mtx_unlock(&sc->mfi_io_lock); 1839 mtx_lock(&Giant); 1840 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) { 1841 device_delete_child(sc->mfi_dev, ld->ld_dev); 1842 } 1843 mtx_unlock(&Giant); 1844 mtx_lock(&sc->mfi_io_lock); 1845 } else { 1846 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) 1847 mfi_disk_enable(ld); 1848 } 1849 break; 1850 case MFI_DCMD_CFG_ADD: 1851 mfi_ldprobe(sc); 1852 break; 1853 case MFI_DCMD_CFG_FOREIGN_IMPORT: 1854 mfi_ldprobe(sc); 1855 break; 1856 } 1857 } 1858 1859 static int 1860 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc) 1861 { 1862 struct mfi_command *cm; 1863 struct mfi_dcmd_frame *dcmd; 1864 void *ioc_buf = NULL; 1865 uint32_t context; 1866 int error = 0, locked; 1867 1868 1869 if (ioc->buf_size > 0) { 1870 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK); 1871 if (ioc_buf == NULL) { 1872 return (ENOMEM); 1873 } 1874 error = copyin(ioc->buf, ioc_buf, ioc->buf_size); 1875 if (error) { 1876 device_printf(sc->mfi_dev, "failed to copyin\n"); 1877 free(ioc_buf, M_MFIBUF); 1878 return (error); 1879 } 1880 } 1881 1882 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode); 1883 1884 mtx_lock(&sc->mfi_io_lock); 1885 while ((cm = mfi_dequeue_free(sc)) == NULL) 1886 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz); 1887 1888 /* Save context for later */ 1889 context = cm->cm_frame->header.context; 1890 1891 dcmd = &cm->cm_frame->dcmd; 1892 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame)); 1893 1894 cm->cm_sg = &dcmd->sgl; 1895 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 1896 cm->cm_data = ioc_buf; 1897 cm->cm_len = ioc->buf_size; 1898 1899 /* restore context */ 1900 cm->cm_frame->header.context = context; 1901 1902 /* Cheat since we don't know if we're writing or reading */ 1903 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT; 1904 1905 error = mfi_check_command_pre(sc, cm); 1906 if (error) 1907 goto out; 1908 1909 error = mfi_wait_command(sc, cm); 1910 if (error) { 1911 device_printf(sc->mfi_dev, "ioctl failed %d\n", error); 1912 goto out; 1913 } 1914 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame)); 1915 mfi_check_command_post(sc, cm); 1916 out: 1917 mfi_release_command(cm); 1918 mtx_unlock(&sc->mfi_io_lock); 1919 mfi_config_unlock(sc, locked); 1920 if (ioc->buf_size > 0) 1921 error = copyout(ioc_buf, ioc->buf, ioc->buf_size); 1922 if (ioc_buf) 1923 free(ioc_buf, M_MFIBUF); 1924 return (error); 1925 } 1926 1927 #ifdef __amd64__ 1928 #define PTRIN(p) ((void *)(uintptr_t)(p)) 1929 #else 1930 #define PTRIN(p) (p) 1931 #endif 1932 1933 static int 1934 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 1935 { 1936 struct mfi_softc *sc; 1937 union mfi_statrequest *ms; 1938 struct mfi_ioc_packet *ioc; 1939 #ifdef __amd64__ 1940 struct mfi_ioc_packet32 *ioc32; 1941 #endif 1942 struct mfi_ioc_aen *aen; 1943 struct mfi_command *cm = NULL; 1944 uint32_t context; 1945 union mfi_sense_ptr sense_ptr; 1946 uint8_t *data = NULL, *temp, *addr; 1947 size_t len; 1948 int i; 1949 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg; 1950 #ifdef __amd64__ 1951 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg; 1952 struct mfi_ioc_passthru iop_swab; 1953 #endif 1954 int error, locked; 1955 1956 sc = dev->si_drv1; 1957 error = 0; 1958 1959 switch (cmd) { 1960 case MFIIO_STATS: 1961 ms = (union mfi_statrequest *)arg; 1962 switch (ms->ms_item) { 1963 case MFIQ_FREE: 1964 case MFIQ_BIO: 1965 case MFIQ_READY: 1966 case MFIQ_BUSY: 1967 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat, 1968 sizeof(struct mfi_qstat)); 1969 break; 1970 default: 1971 error = ENOIOCTL; 1972 break; 1973 } 1974 break; 1975 case MFIIO_QUERY_DISK: 1976 { 1977 struct mfi_query_disk *qd; 1978 struct mfi_disk *ld; 1979 1980 qd = (struct mfi_query_disk *)arg; 1981 mtx_lock(&sc->mfi_io_lock); 1982 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1983 if (ld->ld_id == qd->array_id) 1984 break; 1985 } 1986 if (ld == NULL) { 1987 qd->present = 0; 1988 mtx_unlock(&sc->mfi_io_lock); 1989 return (0); 1990 } 1991 qd->present = 1; 1992 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN) 1993 qd->open = 1; 1994 bzero(qd->devname, SPECNAMELEN + 1); 1995 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit); 1996 mtx_unlock(&sc->mfi_io_lock); 1997 break; 1998 } 1999 case MFI_CMD: 2000 #ifdef __amd64__ 2001 case MFI_CMD32: 2002 #endif 2003 { 2004 devclass_t devclass; 2005 ioc = (struct mfi_ioc_packet *)arg; 2006 int adapter; 2007 2008 adapter = ioc->mfi_adapter_no; 2009 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) { 2010 devclass = devclass_find("mfi"); 2011 sc = devclass_get_softc(devclass, adapter); 2012 } 2013 mtx_lock(&sc->mfi_io_lock); 2014 if ((cm = mfi_dequeue_free(sc)) == NULL) { 2015 mtx_unlock(&sc->mfi_io_lock); 2016 return (EBUSY); 2017 } 2018 mtx_unlock(&sc->mfi_io_lock); 2019 locked = 0; 2020 2021 /* 2022 * save off original context since copying from user 2023 * will clobber some data 2024 */ 2025 context = cm->cm_frame->header.context; 2026 2027 bcopy(ioc->mfi_frame.raw, cm->cm_frame, 2028 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */ 2029 cm->cm_total_frame_size = (sizeof(union mfi_sgl) 2030 * ioc->mfi_sge_count) + ioc->mfi_sgl_off; 2031 if (ioc->mfi_sge_count) { 2032 cm->cm_sg = 2033 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off]; 2034 } 2035 cm->cm_flags = 0; 2036 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) 2037 cm->cm_flags |= MFI_CMD_DATAIN; 2038 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) 2039 cm->cm_flags |= MFI_CMD_DATAOUT; 2040 /* Legacy app shim */ 2041 if (cm->cm_flags == 0) 2042 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT; 2043 cm->cm_len = cm->cm_frame->header.data_len; 2044 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 2045 #ifdef __amd64__ 2046 if (cmd == MFI_CMD) { 2047 #endif 2048 /* Native */ 2049 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len; 2050 #ifdef __amd64__ 2051 } else { 2052 /* 32bit on 64bit */ 2053 ioc32 = (struct mfi_ioc_packet32 *)ioc; 2054 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len; 2055 } 2056 #endif 2057 cm->cm_len += cm->cm_stp_len; 2058 } 2059 if (cm->cm_len && 2060 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) { 2061 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, 2062 M_WAITOK | M_ZERO); 2063 if (cm->cm_data == NULL) { 2064 device_printf(sc->mfi_dev, "Malloc failed\n"); 2065 goto out; 2066 } 2067 } else { 2068 cm->cm_data = 0; 2069 } 2070 2071 /* restore header context */ 2072 cm->cm_frame->header.context = context; 2073 2074 temp = data; 2075 if ((cm->cm_flags & MFI_CMD_DATAOUT) || 2076 (cm->cm_frame->header.cmd == MFI_CMD_STP)) { 2077 for (i = 0; i < ioc->mfi_sge_count; i++) { 2078 #ifdef __amd64__ 2079 if (cmd == MFI_CMD) { 2080 #endif 2081 /* Native */ 2082 addr = ioc->mfi_sgl[i].iov_base; 2083 len = ioc->mfi_sgl[i].iov_len; 2084 #ifdef __amd64__ 2085 } else { 2086 /* 32bit on 64bit */ 2087 ioc32 = (struct mfi_ioc_packet32 *)ioc; 2088 addr = PTRIN(ioc32->mfi_sgl[i].iov_base); 2089 len = ioc32->mfi_sgl[i].iov_len; 2090 } 2091 #endif 2092 error = copyin(addr, temp, len); 2093 if (error != 0) { 2094 device_printf(sc->mfi_dev, 2095 "Copy in failed\n"); 2096 goto out; 2097 } 2098 temp = &temp[len]; 2099 } 2100 } 2101 2102 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) 2103 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode); 2104 2105 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) { 2106 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr; 2107 cm->cm_frame->pass.sense_addr_hi = 0; 2108 } 2109 2110 mtx_lock(&sc->mfi_io_lock); 2111 error = mfi_check_command_pre(sc, cm); 2112 if (error) { 2113 mtx_unlock(&sc->mfi_io_lock); 2114 goto out; 2115 } 2116 2117 if ((error = mfi_wait_command(sc, cm)) != 0) { 2118 device_printf(sc->mfi_dev, 2119 "Controller polled failed\n"); 2120 mtx_unlock(&sc->mfi_io_lock); 2121 goto out; 2122 } 2123 2124 mfi_check_command_post(sc, cm); 2125 mtx_unlock(&sc->mfi_io_lock); 2126 2127 temp = data; 2128 if ((cm->cm_flags & MFI_CMD_DATAIN) || 2129 (cm->cm_frame->header.cmd == MFI_CMD_STP)) { 2130 for (i = 0; i < ioc->mfi_sge_count; i++) { 2131 #ifdef __amd64__ 2132 if (cmd == MFI_CMD) { 2133 #endif 2134 /* Native */ 2135 addr = ioc->mfi_sgl[i].iov_base; 2136 len = ioc->mfi_sgl[i].iov_len; 2137 #ifdef __amd64__ 2138 } else { 2139 /* 32bit on 64bit */ 2140 ioc32 = (struct mfi_ioc_packet32 *)ioc; 2141 addr = PTRIN(ioc32->mfi_sgl[i].iov_base); 2142 len = ioc32->mfi_sgl[i].iov_len; 2143 } 2144 #endif 2145 error = copyout(temp, addr, len); 2146 if (error != 0) { 2147 device_printf(sc->mfi_dev, 2148 "Copy out failed\n"); 2149 goto out; 2150 } 2151 temp = &temp[len]; 2152 } 2153 } 2154 2155 if (ioc->mfi_sense_len) { 2156 /* get user-space sense ptr then copy out sense */ 2157 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off], 2158 &sense_ptr.sense_ptr_data[0], 2159 sizeof(sense_ptr.sense_ptr_data)); 2160 #ifdef __amd64__ 2161 if (cmd != MFI_CMD) { 2162 /* 2163 * not 64bit native so zero out any address 2164 * over 32bit */ 2165 sense_ptr.addr.high = 0; 2166 } 2167 #endif 2168 error = copyout(cm->cm_sense, sense_ptr.user_space, 2169 ioc->mfi_sense_len); 2170 if (error != 0) { 2171 device_printf(sc->mfi_dev, 2172 "Copy out failed\n"); 2173 goto out; 2174 } 2175 } 2176 2177 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status; 2178 out: 2179 mfi_config_unlock(sc, locked); 2180 if (data) 2181 free(data, M_MFIBUF); 2182 if (cm) { 2183 mtx_lock(&sc->mfi_io_lock); 2184 mfi_release_command(cm); 2185 mtx_unlock(&sc->mfi_io_lock); 2186 } 2187 2188 break; 2189 } 2190 case MFI_SET_AEN: 2191 aen = (struct mfi_ioc_aen *)arg; 2192 error = mfi_aen_register(sc, aen->aen_seq_num, 2193 aen->aen_class_locale); 2194 2195 break; 2196 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ 2197 { 2198 devclass_t devclass; 2199 struct mfi_linux_ioc_packet l_ioc; 2200 int adapter; 2201 2202 devclass = devclass_find("mfi"); 2203 if (devclass == NULL) 2204 return (ENOENT); 2205 2206 error = copyin(arg, &l_ioc, sizeof(l_ioc)); 2207 if (error) 2208 return (error); 2209 adapter = l_ioc.lioc_adapter_no; 2210 sc = devclass_get_softc(devclass, adapter); 2211 if (sc == NULL) 2212 return (ENOENT); 2213 return (mfi_linux_ioctl_int(sc->mfi_cdev, 2214 cmd, arg, flag, td)); 2215 break; 2216 } 2217 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ 2218 { 2219 devclass_t devclass; 2220 struct mfi_linux_ioc_aen l_aen; 2221 int adapter; 2222 2223 devclass = devclass_find("mfi"); 2224 if (devclass == NULL) 2225 return (ENOENT); 2226 2227 error = copyin(arg, &l_aen, sizeof(l_aen)); 2228 if (error) 2229 return (error); 2230 adapter = l_aen.laen_adapter_no; 2231 sc = devclass_get_softc(devclass, adapter); 2232 if (sc == NULL) 2233 return (ENOENT); 2234 return (mfi_linux_ioctl_int(sc->mfi_cdev, 2235 cmd, arg, flag, td)); 2236 break; 2237 } 2238 #ifdef __amd64__ 2239 case MFIIO_PASSTHRU32: 2240 iop_swab.ioc_frame = iop32->ioc_frame; 2241 iop_swab.buf_size = iop32->buf_size; 2242 iop_swab.buf = PTRIN(iop32->buf); 2243 iop = &iop_swab; 2244 /* FALLTHROUGH */ 2245 #endif 2246 case MFIIO_PASSTHRU: 2247 error = mfi_user_command(sc, iop); 2248 #ifdef __amd64__ 2249 if (cmd == MFIIO_PASSTHRU32) 2250 iop32->ioc_frame = iop_swab.ioc_frame; 2251 #endif 2252 break; 2253 default: 2254 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); 2255 error = ENOENT; 2256 break; 2257 } 2258 2259 return (error); 2260 } 2261 2262 static int 2263 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 2264 { 2265 struct mfi_softc *sc; 2266 struct mfi_linux_ioc_packet l_ioc; 2267 struct mfi_linux_ioc_aen l_aen; 2268 struct mfi_command *cm = NULL; 2269 struct mfi_aen *mfi_aen_entry; 2270 union mfi_sense_ptr sense_ptr; 2271 uint32_t context; 2272 uint8_t *data = NULL, *temp; 2273 int i; 2274 int error, locked; 2275 2276 sc = dev->si_drv1; 2277 error = 0; 2278 switch (cmd) { 2279 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ 2280 error = copyin(arg, &l_ioc, sizeof(l_ioc)); 2281 if (error != 0) 2282 return (error); 2283 2284 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) { 2285 return (EINVAL); 2286 } 2287 2288 mtx_lock(&sc->mfi_io_lock); 2289 if ((cm = mfi_dequeue_free(sc)) == NULL) { 2290 mtx_unlock(&sc->mfi_io_lock); 2291 return (EBUSY); 2292 } 2293 mtx_unlock(&sc->mfi_io_lock); 2294 locked = 0; 2295 2296 /* 2297 * save off original context since copying from user 2298 * will clobber some data 2299 */ 2300 context = cm->cm_frame->header.context; 2301 2302 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame, 2303 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */ 2304 cm->cm_total_frame_size = (sizeof(union mfi_sgl) 2305 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off; 2306 if (l_ioc.lioc_sge_count) 2307 cm->cm_sg = 2308 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off]; 2309 cm->cm_flags = 0; 2310 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) 2311 cm->cm_flags |= MFI_CMD_DATAIN; 2312 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) 2313 cm->cm_flags |= MFI_CMD_DATAOUT; 2314 cm->cm_len = cm->cm_frame->header.data_len; 2315 if (cm->cm_len && 2316 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) { 2317 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, 2318 M_WAITOK | M_ZERO); 2319 if (cm->cm_data == NULL) { 2320 device_printf(sc->mfi_dev, "Malloc failed\n"); 2321 goto out; 2322 } 2323 } else { 2324 cm->cm_data = 0; 2325 } 2326 2327 /* restore header context */ 2328 cm->cm_frame->header.context = context; 2329 2330 temp = data; 2331 if (cm->cm_flags & MFI_CMD_DATAOUT) { 2332 for (i = 0; i < l_ioc.lioc_sge_count; i++) { 2333 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base), 2334 temp, 2335 l_ioc.lioc_sgl[i].iov_len); 2336 if (error != 0) { 2337 device_printf(sc->mfi_dev, 2338 "Copy in failed\n"); 2339 goto out; 2340 } 2341 temp = &temp[l_ioc.lioc_sgl[i].iov_len]; 2342 } 2343 } 2344 2345 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) 2346 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode); 2347 2348 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) { 2349 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr; 2350 cm->cm_frame->pass.sense_addr_hi = 0; 2351 } 2352 2353 mtx_lock(&sc->mfi_io_lock); 2354 error = mfi_check_command_pre(sc, cm); 2355 if (error) { 2356 mtx_unlock(&sc->mfi_io_lock); 2357 goto out; 2358 } 2359 2360 if ((error = mfi_wait_command(sc, cm)) != 0) { 2361 device_printf(sc->mfi_dev, 2362 "Controller polled failed\n"); 2363 mtx_unlock(&sc->mfi_io_lock); 2364 goto out; 2365 } 2366 2367 mfi_check_command_post(sc, cm); 2368 mtx_unlock(&sc->mfi_io_lock); 2369 2370 temp = data; 2371 if (cm->cm_flags & MFI_CMD_DATAIN) { 2372 for (i = 0; i < l_ioc.lioc_sge_count; i++) { 2373 error = copyout(temp, 2374 PTRIN(l_ioc.lioc_sgl[i].iov_base), 2375 l_ioc.lioc_sgl[i].iov_len); 2376 if (error != 0) { 2377 device_printf(sc->mfi_dev, 2378 "Copy out failed\n"); 2379 goto out; 2380 } 2381 temp = &temp[l_ioc.lioc_sgl[i].iov_len]; 2382 } 2383 } 2384 2385 if (l_ioc.lioc_sense_len) { 2386 /* get user-space sense ptr then copy out sense */ 2387 bcopy(&((struct mfi_linux_ioc_packet*)arg) 2388 ->lioc_frame.raw[l_ioc.lioc_sense_off], 2389 &sense_ptr.sense_ptr_data[0], 2390 sizeof(sense_ptr.sense_ptr_data)); 2391 #ifdef __amd64__ 2392 /* 2393 * only 32bit Linux support so zero out any 2394 * address over 32bit 2395 */ 2396 sense_ptr.addr.high = 0; 2397 #endif 2398 error = copyout(cm->cm_sense, sense_ptr.user_space, 2399 l_ioc.lioc_sense_len); 2400 if (error != 0) { 2401 device_printf(sc->mfi_dev, 2402 "Copy out failed\n"); 2403 goto out; 2404 } 2405 } 2406 2407 error = copyout(&cm->cm_frame->header.cmd_status, 2408 &((struct mfi_linux_ioc_packet*)arg) 2409 ->lioc_frame.hdr.cmd_status, 2410 1); 2411 if (error != 0) { 2412 device_printf(sc->mfi_dev, 2413 "Copy out failed\n"); 2414 goto out; 2415 } 2416 2417 out: 2418 mfi_config_unlock(sc, locked); 2419 if (data) 2420 free(data, M_MFIBUF); 2421 if (cm) { 2422 mtx_lock(&sc->mfi_io_lock); 2423 mfi_release_command(cm); 2424 mtx_unlock(&sc->mfi_io_lock); 2425 } 2426 2427 return (error); 2428 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ 2429 error = copyin(arg, &l_aen, sizeof(l_aen)); 2430 if (error != 0) 2431 return (error); 2432 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid); 2433 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF, 2434 M_WAITOK); 2435 mtx_lock(&sc->mfi_io_lock); 2436 if (mfi_aen_entry != NULL) { 2437 mfi_aen_entry->p = curproc; 2438 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry, 2439 aen_link); 2440 } 2441 error = mfi_aen_register(sc, l_aen.laen_seq_num, 2442 l_aen.laen_class_locale); 2443 2444 if (error != 0) { 2445 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 2446 aen_link); 2447 free(mfi_aen_entry, M_MFIBUF); 2448 } 2449 mtx_unlock(&sc->mfi_io_lock); 2450 2451 return (error); 2452 default: 2453 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); 2454 error = ENOENT; 2455 break; 2456 } 2457 2458 return (error); 2459 } 2460 2461 static int 2462 mfi_poll(struct cdev *dev, int poll_events, struct thread *td) 2463 { 2464 struct mfi_softc *sc; 2465 int revents = 0; 2466 2467 sc = dev->si_drv1; 2468 2469 if (poll_events & (POLLIN | POLLRDNORM)) { 2470 if (sc->mfi_aen_triggered != 0) { 2471 revents |= poll_events & (POLLIN | POLLRDNORM); 2472 sc->mfi_aen_triggered = 0; 2473 } 2474 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) { 2475 revents |= POLLERR; 2476 } 2477 } 2478 2479 if (revents == 0) { 2480 if (poll_events & (POLLIN | POLLRDNORM)) { 2481 sc->mfi_poll_waiting = 1; 2482 selrecord(td, &sc->mfi_select); 2483 } 2484 } 2485 2486 return revents; 2487 } 2488 2489 2490 static void 2491 mfi_dump_all(void) 2492 { 2493 struct mfi_softc *sc; 2494 struct mfi_command *cm; 2495 devclass_t dc; 2496 time_t deadline; 2497 int timedout; 2498 int i; 2499 2500 dc = devclass_find("mfi"); 2501 if (dc == NULL) { 2502 printf("No mfi dev class\n"); 2503 return; 2504 } 2505 2506 for (i = 0; ; i++) { 2507 sc = devclass_get_softc(dc, i); 2508 if (sc == NULL) 2509 break; 2510 device_printf(sc->mfi_dev, "Dumping\n\n"); 2511 timedout = 0; 2512 deadline = time_uptime - MFI_CMD_TIMEOUT; 2513 mtx_lock(&sc->mfi_io_lock); 2514 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) { 2515 if (cm->cm_timestamp < deadline) { 2516 device_printf(sc->mfi_dev, 2517 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm, 2518 (int)(time_uptime - cm->cm_timestamp)); 2519 MFI_PRINT_CMD(cm); 2520 timedout++; 2521 } 2522 } 2523 2524 #if 0 2525 if (timedout) 2526 MFI_DUMP_CMDS(SC); 2527 #endif 2528 2529 mtx_unlock(&sc->mfi_io_lock); 2530 } 2531 2532 return; 2533 } 2534 2535 static void 2536 mfi_timeout(void *data) 2537 { 2538 struct mfi_softc *sc = (struct mfi_softc *)data; 2539 struct mfi_command *cm; 2540 time_t deadline; 2541 int timedout = 0; 2542 2543 deadline = time_uptime - MFI_CMD_TIMEOUT; 2544 mtx_lock(&sc->mfi_io_lock); 2545 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) { 2546 if (sc->mfi_aen_cm == cm) 2547 continue; 2548 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) { 2549 device_printf(sc->mfi_dev, 2550 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm, 2551 (int)(time_uptime - cm->cm_timestamp)); 2552 MFI_PRINT_CMD(cm); 2553 MFI_VALIDATE_CMD(sc, cm); 2554 timedout++; 2555 } 2556 } 2557 2558 #if 0 2559 if (timedout) 2560 MFI_DUMP_CMDS(SC); 2561 #endif 2562 2563 mtx_unlock(&sc->mfi_io_lock); 2564 2565 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, 2566 mfi_timeout, sc); 2567 2568 if (0) 2569 mfi_dump_all(); 2570 return; 2571 } 2572