1 /*- 2 * Copyright (c) 2006 IronPort Systems 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /*- 27 * Copyright (c) 2007 LSI Corp. 28 * Copyright (c) 2007 Rajesh Prabhakaran. 29 * All rights reserved. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * SUCH DAMAGE. 51 */ 52 53 #include <sys/cdefs.h> 54 __FBSDID("$FreeBSD$"); 55 56 #include "opt_mfi.h" 57 58 #include <sys/param.h> 59 #include <sys/systm.h> 60 #include <sys/sysctl.h> 61 #include <sys/malloc.h> 62 #include <sys/kernel.h> 63 #include <sys/poll.h> 64 #include <sys/selinfo.h> 65 #include <sys/bus.h> 66 #include <sys/conf.h> 67 #include <sys/eventhandler.h> 68 #include <sys/rman.h> 69 #include <sys/bus_dma.h> 70 #include <sys/bio.h> 71 #include <sys/ioccom.h> 72 #include <sys/uio.h> 73 #include <sys/proc.h> 74 #include <sys/signalvar.h> 75 76 #include <machine/bus.h> 77 #include <machine/resource.h> 78 79 #include <dev/mfi/mfireg.h> 80 #include <dev/mfi/mfi_ioctl.h> 81 #include <dev/mfi/mfivar.h> 82 83 static int mfi_alloc_commands(struct mfi_softc *); 84 static int mfi_comms_init(struct mfi_softc *); 85 static int mfi_wait_command(struct mfi_softc *, struct mfi_command *); 86 static int mfi_get_controller_info(struct mfi_softc *); 87 static int mfi_get_log_state(struct mfi_softc *, 88 struct mfi_evt_log_state **); 89 static int mfi_parse_entries(struct mfi_softc *, int, int); 90 static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **, 91 uint32_t, void **, size_t); 92 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int); 93 static void mfi_startup(void *arg); 94 static void mfi_intr(void *arg); 95 static void mfi_ldprobe(struct mfi_softc *sc); 96 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale); 97 static void mfi_aen_complete(struct mfi_command *); 98 static int mfi_aen_setup(struct mfi_softc *, uint32_t); 99 static int mfi_add_ld(struct mfi_softc *sc, int); 100 static void mfi_add_ld_complete(struct mfi_command *); 101 static struct mfi_command * mfi_bio_command(struct mfi_softc *); 102 static void mfi_bio_complete(struct mfi_command *); 103 static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *); 104 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *); 105 static void mfi_complete(struct mfi_softc *, struct mfi_command *); 106 static int mfi_abort(struct mfi_softc *, struct mfi_command *); 107 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *); 108 static void mfi_timeout(void *); 109 static int mfi_user_command(struct mfi_softc *, 110 struct mfi_ioc_passthru *); 111 static void mfi_enable_intr_xscale(struct mfi_softc *sc); 112 static void mfi_enable_intr_ppc(struct mfi_softc *sc); 113 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc); 114 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc); 115 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc); 116 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc); 117 static void mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt); 118 static void mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt); 119 120 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters"); 121 static int mfi_event_locale = MFI_EVT_LOCALE_ALL; 122 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale); 123 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale, 124 0, "event message locale"); 125 126 static int mfi_event_class = MFI_EVT_CLASS_INFO; 127 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class); 128 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class, 129 0, "event message class"); 130 131 static int mfi_max_cmds = 128; 132 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds); 133 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds, 134 0, "Max commands"); 135 136 /* Management interface */ 137 static d_open_t mfi_open; 138 static d_close_t mfi_close; 139 static d_ioctl_t mfi_ioctl; 140 static d_poll_t mfi_poll; 141 142 static struct cdevsw mfi_cdevsw = { 143 .d_version = D_VERSION, 144 .d_flags = 0, 145 .d_open = mfi_open, 146 .d_close = mfi_close, 147 .d_ioctl = mfi_ioctl, 148 .d_poll = mfi_poll, 149 .d_name = "mfi", 150 }; 151 152 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver"); 153 154 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH 155 156 static void 157 mfi_enable_intr_xscale(struct mfi_softc *sc) 158 { 159 MFI_WRITE4(sc, MFI_OMSK, 0x01); 160 } 161 162 static void 163 mfi_enable_intr_ppc(struct mfi_softc *sc) 164 { 165 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF); 166 if (sc->mfi_flags & MFI_FLAGS_1078) { 167 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM); 168 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) { 169 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM); 170 } 171 } 172 173 static int32_t 174 mfi_read_fw_status_xscale(struct mfi_softc *sc) 175 { 176 return MFI_READ4(sc, MFI_OMSG0); 177 } 178 179 static int32_t 180 mfi_read_fw_status_ppc(struct mfi_softc *sc) 181 { 182 return MFI_READ4(sc, MFI_OSP0); 183 } 184 185 static int 186 mfi_check_clear_intr_xscale(struct mfi_softc *sc) 187 { 188 int32_t status; 189 190 status = MFI_READ4(sc, MFI_OSTS); 191 if ((status & MFI_OSTS_INTR_VALID) == 0) 192 return 1; 193 194 MFI_WRITE4(sc, MFI_OSTS, status); 195 return 0; 196 } 197 198 static int 199 mfi_check_clear_intr_ppc(struct mfi_softc *sc) 200 { 201 int32_t status; 202 203 status = MFI_READ4(sc, MFI_OSTS); 204 if (sc->mfi_flags & MFI_FLAGS_1078) { 205 if (!(status & MFI_1078_RM)) { 206 return 1; 207 } 208 } else if (sc->mfi_flags & MFI_FLAGS_GEN2) { 209 if (!(status & MFI_GEN2_RM)) { 210 return 1; 211 } 212 } 213 214 MFI_WRITE4(sc, MFI_ODCR0, status); 215 return 0; 216 } 217 218 static void 219 mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt) 220 { 221 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt); 222 } 223 224 static void 225 mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt) 226 { 227 MFI_WRITE4(sc, MFI_IQP, (bus_add |frame_cnt <<1)|1 ); 228 } 229 230 static int 231 mfi_transition_firmware(struct mfi_softc *sc) 232 { 233 uint32_t fw_state, cur_state; 234 int max_wait, i; 235 236 fw_state = sc->mfi_read_fw_status(sc)& MFI_FWSTATE_MASK; 237 while (fw_state != MFI_FWSTATE_READY) { 238 if (bootverbose) 239 device_printf(sc->mfi_dev, "Waiting for firmware to " 240 "become ready\n"); 241 cur_state = fw_state; 242 switch (fw_state) { 243 case MFI_FWSTATE_FAULT: 244 device_printf(sc->mfi_dev, "Firmware fault\n"); 245 return (ENXIO); 246 case MFI_FWSTATE_WAIT_HANDSHAKE: 247 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE); 248 max_wait = 2; 249 break; 250 case MFI_FWSTATE_OPERATIONAL: 251 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY); 252 max_wait = 10; 253 break; 254 case MFI_FWSTATE_UNDEFINED: 255 case MFI_FWSTATE_BB_INIT: 256 max_wait = 2; 257 break; 258 case MFI_FWSTATE_FW_INIT: 259 case MFI_FWSTATE_DEVICE_SCAN: 260 case MFI_FWSTATE_FLUSH_CACHE: 261 max_wait = 20; 262 break; 263 case MFI_FWSTATE_BOOT_MESSAGE_PENDING: 264 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG); 265 max_wait = 10; 266 break; 267 default: 268 device_printf(sc->mfi_dev,"Unknown firmware state %#x\n", 269 fw_state); 270 return (ENXIO); 271 } 272 for (i = 0; i < (max_wait * 10); i++) { 273 fw_state = sc->mfi_read_fw_status(sc) & MFI_FWSTATE_MASK; 274 if (fw_state == cur_state) 275 DELAY(100000); 276 else 277 break; 278 } 279 if (fw_state == cur_state) { 280 device_printf(sc->mfi_dev, "Firmware stuck in state " 281 "%#x\n", fw_state); 282 return (ENXIO); 283 } 284 } 285 return (0); 286 } 287 288 static void 289 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 290 { 291 uint32_t *addr; 292 293 addr = arg; 294 *addr = segs[0].ds_addr; 295 } 296 297 int 298 mfi_attach(struct mfi_softc *sc) 299 { 300 uint32_t status; 301 int error, commsz, framessz, sensesz; 302 int frames, unit, max_fw_sge; 303 304 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 3.00 \n"); 305 306 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF); 307 sx_init(&sc->mfi_config_lock, "MFI config"); 308 TAILQ_INIT(&sc->mfi_ld_tqh); 309 TAILQ_INIT(&sc->mfi_aen_pids); 310 TAILQ_INIT(&sc->mfi_cam_ccbq); 311 312 mfi_initq_free(sc); 313 mfi_initq_ready(sc); 314 mfi_initq_busy(sc); 315 mfi_initq_bio(sc); 316 317 if (sc->mfi_flags & MFI_FLAGS_1064R) { 318 sc->mfi_enable_intr = mfi_enable_intr_xscale; 319 sc->mfi_read_fw_status = mfi_read_fw_status_xscale; 320 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale; 321 sc->mfi_issue_cmd = mfi_issue_cmd_xscale; 322 } 323 else { 324 sc->mfi_enable_intr = mfi_enable_intr_ppc; 325 sc->mfi_read_fw_status = mfi_read_fw_status_ppc; 326 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc; 327 sc->mfi_issue_cmd = mfi_issue_cmd_ppc; 328 } 329 330 331 /* Before we get too far, see if the firmware is working */ 332 if ((error = mfi_transition_firmware(sc)) != 0) { 333 device_printf(sc->mfi_dev, "Firmware not in READY state, " 334 "error %d\n", error); 335 return (ENXIO); 336 } 337 338 /* 339 * Get information needed for sizing the contiguous memory for the 340 * frame pool. Size down the sgl parameter since we know that 341 * we will never need more than what's required for MAXPHYS. 342 * It would be nice if these constants were available at runtime 343 * instead of compile time. 344 */ 345 status = sc->mfi_read_fw_status(sc); 346 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK; 347 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16; 348 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1)); 349 350 /* 351 * Create the dma tag for data buffers. Used both for block I/O 352 * and for various internal data queries. 353 */ 354 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 355 1, 0, /* algnmnt, boundary */ 356 BUS_SPACE_MAXADDR, /* lowaddr */ 357 BUS_SPACE_MAXADDR, /* highaddr */ 358 NULL, NULL, /* filter, filterarg */ 359 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 360 sc->mfi_max_sge, /* nsegments */ 361 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 362 BUS_DMA_ALLOCNOW, /* flags */ 363 busdma_lock_mutex, /* lockfunc */ 364 &sc->mfi_io_lock, /* lockfuncarg */ 365 &sc->mfi_buffer_dmat)) { 366 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n"); 367 return (ENOMEM); 368 } 369 370 /* 371 * Allocate DMA memory for the comms queues. Keep it under 4GB for 372 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue 373 * entry, so the calculated size here will be will be 1 more than 374 * mfi_max_fw_cmds. This is apparently a requirement of the hardware. 375 */ 376 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) + 377 sizeof(struct mfi_hwcomms); 378 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 379 1, 0, /* algnmnt, boundary */ 380 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 381 BUS_SPACE_MAXADDR, /* highaddr */ 382 NULL, NULL, /* filter, filterarg */ 383 commsz, /* maxsize */ 384 1, /* msegments */ 385 commsz, /* maxsegsize */ 386 0, /* flags */ 387 NULL, NULL, /* lockfunc, lockarg */ 388 &sc->mfi_comms_dmat)) { 389 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); 390 return (ENOMEM); 391 } 392 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms, 393 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) { 394 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); 395 return (ENOMEM); 396 } 397 bzero(sc->mfi_comms, commsz); 398 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap, 399 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0); 400 401 /* 402 * Allocate DMA memory for the command frames. Keep them in the 403 * lower 4GB for efficiency. Calculate the size of the commands at 404 * the same time; each command is one 64 byte frame plus a set of 405 * additional frames for holding sg lists or other data. 406 * The assumption here is that the SG list will start at the second 407 * frame and not use the unused bytes in the first frame. While this 408 * isn't technically correct, it simplifies the calculation and allows 409 * for command frames that might be larger than an mfi_io_frame. 410 */ 411 if (sizeof(bus_addr_t) == 8) { 412 sc->mfi_sge_size = sizeof(struct mfi_sg64); 413 sc->mfi_flags |= MFI_FLAGS_SG64; 414 } else { 415 sc->mfi_sge_size = sizeof(struct mfi_sg32); 416 } 417 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2; 418 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE; 419 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds; 420 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 421 64, 0, /* algnmnt, boundary */ 422 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 423 BUS_SPACE_MAXADDR, /* highaddr */ 424 NULL, NULL, /* filter, filterarg */ 425 framessz, /* maxsize */ 426 1, /* nsegments */ 427 framessz, /* maxsegsize */ 428 0, /* flags */ 429 NULL, NULL, /* lockfunc, lockarg */ 430 &sc->mfi_frames_dmat)) { 431 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n"); 432 return (ENOMEM); 433 } 434 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames, 435 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) { 436 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n"); 437 return (ENOMEM); 438 } 439 bzero(sc->mfi_frames, framessz); 440 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap, 441 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0); 442 443 /* 444 * Allocate DMA memory for the frame sense data. Keep them in the 445 * lower 4GB for efficiency 446 */ 447 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN; 448 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 449 4, 0, /* algnmnt, boundary */ 450 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 451 BUS_SPACE_MAXADDR, /* highaddr */ 452 NULL, NULL, /* filter, filterarg */ 453 sensesz, /* maxsize */ 454 1, /* nsegments */ 455 sensesz, /* maxsegsize */ 456 0, /* flags */ 457 NULL, NULL, /* lockfunc, lockarg */ 458 &sc->mfi_sense_dmat)) { 459 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n"); 460 return (ENOMEM); 461 } 462 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense, 463 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) { 464 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n"); 465 return (ENOMEM); 466 } 467 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap, 468 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0); 469 470 if ((error = mfi_alloc_commands(sc)) != 0) 471 return (error); 472 473 if ((error = mfi_comms_init(sc)) != 0) 474 return (error); 475 476 if ((error = mfi_get_controller_info(sc)) != 0) 477 return (error); 478 479 mtx_lock(&sc->mfi_io_lock); 480 if ((error = mfi_aen_setup(sc, 0), 0) != 0) { 481 mtx_unlock(&sc->mfi_io_lock); 482 return (error); 483 } 484 mtx_unlock(&sc->mfi_io_lock); 485 486 /* 487 * Set up the interrupt handler. XXX This should happen in 488 * mfi_pci.c 489 */ 490 sc->mfi_irq_rid = 0; 491 if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ, 492 &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { 493 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n"); 494 return (EINVAL); 495 } 496 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO, 497 NULL, mfi_intr, sc, &sc->mfi_intr)) { 498 device_printf(sc->mfi_dev, "Cannot set up interrupt\n"); 499 return (EINVAL); 500 } 501 502 /* Register a config hook to probe the bus for arrays */ 503 sc->mfi_ich.ich_func = mfi_startup; 504 sc->mfi_ich.ich_arg = sc; 505 if (config_intrhook_establish(&sc->mfi_ich) != 0) { 506 device_printf(sc->mfi_dev, "Cannot establish configuration " 507 "hook\n"); 508 return (EINVAL); 509 } 510 511 /* 512 * Register a shutdown handler. 513 */ 514 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown, 515 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) { 516 device_printf(sc->mfi_dev, "Warning: shutdown event " 517 "registration failed\n"); 518 } 519 520 /* 521 * Create the control device for doing management 522 */ 523 unit = device_get_unit(sc->mfi_dev); 524 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR, 525 0640, "mfi%d", unit); 526 if (unit == 0) 527 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node"); 528 if (sc->mfi_cdev != NULL) 529 sc->mfi_cdev->si_drv1 = sc; 530 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), 531 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), 532 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW, 533 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes"); 534 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), 535 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), 536 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW, 537 &sc->mfi_keep_deleted_volumes, 0, 538 "Don't detach the mfid device for a busy volume that is deleted"); 539 540 device_add_child(sc->mfi_dev, "mfip", -1); 541 bus_generic_attach(sc->mfi_dev); 542 543 /* Start the timeout watchdog */ 544 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE); 545 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, 546 mfi_timeout, sc); 547 548 return (0); 549 } 550 551 static int 552 mfi_alloc_commands(struct mfi_softc *sc) 553 { 554 struct mfi_command *cm; 555 int i, ncmds; 556 557 /* 558 * XXX Should we allocate all the commands up front, or allocate on 559 * demand later like 'aac' does? 560 */ 561 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds); 562 if (bootverbose) 563 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver " 564 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds); 565 566 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF, 567 M_WAITOK | M_ZERO); 568 569 for (i = 0; i < ncmds; i++) { 570 cm = &sc->mfi_commands[i]; 571 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames + 572 sc->mfi_cmd_size * i); 573 cm->cm_frame_busaddr = sc->mfi_frames_busaddr + 574 sc->mfi_cmd_size * i; 575 cm->cm_frame->header.context = i; 576 cm->cm_sense = &sc->mfi_sense[i]; 577 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i; 578 cm->cm_sc = sc; 579 cm->cm_index = i; 580 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0, 581 &cm->cm_dmamap) == 0) 582 mfi_release_command(cm); 583 else 584 break; 585 sc->mfi_total_cmds++; 586 } 587 588 return (0); 589 } 590 591 void 592 mfi_release_command(struct mfi_command *cm) 593 { 594 struct mfi_frame_header *hdr; 595 uint32_t *hdr_data; 596 597 /* 598 * Zero out the important fields of the frame, but make sure the 599 * context field is preserved. For efficiency, handle the fields 600 * as 32 bit words. Clear out the first S/G entry too for safety. 601 */ 602 hdr = &cm->cm_frame->header; 603 if (cm->cm_data != NULL && hdr->sg_count) { 604 cm->cm_sg->sg32[0].len = 0; 605 cm->cm_sg->sg32[0].addr = 0; 606 } 607 608 hdr_data = (uint32_t *)cm->cm_frame; 609 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */ 610 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */ 611 hdr_data[4] = 0; /* flags, timeout */ 612 hdr_data[5] = 0; /* data_len */ 613 614 cm->cm_extra_frames = 0; 615 cm->cm_flags = 0; 616 cm->cm_complete = NULL; 617 cm->cm_private = NULL; 618 cm->cm_data = NULL; 619 cm->cm_sg = 0; 620 cm->cm_total_frame_size = 0; 621 622 mfi_enqueue_free(cm); 623 } 624 625 static int 626 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode, 627 void **bufp, size_t bufsize) 628 { 629 struct mfi_command *cm; 630 struct mfi_dcmd_frame *dcmd; 631 void *buf = NULL; 632 633 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 634 635 cm = mfi_dequeue_free(sc); 636 if (cm == NULL) 637 return (EBUSY); 638 639 if ((bufsize > 0) && (bufp != NULL)) { 640 if (*bufp == NULL) { 641 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO); 642 if (buf == NULL) { 643 mfi_release_command(cm); 644 return (ENOMEM); 645 } 646 *bufp = buf; 647 } else { 648 buf = *bufp; 649 } 650 } 651 652 dcmd = &cm->cm_frame->dcmd; 653 bzero(dcmd->mbox, MFI_MBOX_SIZE); 654 dcmd->header.cmd = MFI_CMD_DCMD; 655 dcmd->header.timeout = 0; 656 dcmd->header.flags = 0; 657 dcmd->header.data_len = bufsize; 658 dcmd->opcode = opcode; 659 cm->cm_sg = &dcmd->sgl; 660 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 661 cm->cm_flags = 0; 662 cm->cm_data = buf; 663 cm->cm_private = buf; 664 cm->cm_len = bufsize; 665 666 *cmp = cm; 667 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL)) 668 *bufp = buf; 669 return (0); 670 } 671 672 static int 673 mfi_comms_init(struct mfi_softc *sc) 674 { 675 struct mfi_command *cm; 676 struct mfi_init_frame *init; 677 struct mfi_init_qinfo *qinfo; 678 int error; 679 680 mtx_lock(&sc->mfi_io_lock); 681 if ((cm = mfi_dequeue_free(sc)) == NULL) 682 return (EBUSY); 683 684 /* 685 * Abuse the SG list area of the frame to hold the init_qinfo 686 * object; 687 */ 688 init = &cm->cm_frame->init; 689 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE); 690 691 bzero(qinfo, sizeof(struct mfi_init_qinfo)); 692 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1; 693 qinfo->rq_addr_lo = sc->mfi_comms_busaddr + 694 offsetof(struct mfi_hwcomms, hw_reply_q); 695 qinfo->pi_addr_lo = sc->mfi_comms_busaddr + 696 offsetof(struct mfi_hwcomms, hw_pi); 697 qinfo->ci_addr_lo = sc->mfi_comms_busaddr + 698 offsetof(struct mfi_hwcomms, hw_ci); 699 700 init->header.cmd = MFI_CMD_INIT; 701 init->header.data_len = sizeof(struct mfi_init_qinfo); 702 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE; 703 cm->cm_data = NULL; 704 cm->cm_flags = MFI_CMD_POLLED; 705 706 if ((error = mfi_mapcmd(sc, cm)) != 0) { 707 device_printf(sc->mfi_dev, "failed to send init command\n"); 708 mtx_unlock(&sc->mfi_io_lock); 709 return (error); 710 } 711 mfi_release_command(cm); 712 mtx_unlock(&sc->mfi_io_lock); 713 714 return (0); 715 } 716 717 static int 718 mfi_get_controller_info(struct mfi_softc *sc) 719 { 720 struct mfi_command *cm = NULL; 721 struct mfi_ctrl_info *ci = NULL; 722 uint32_t max_sectors_1, max_sectors_2; 723 int error; 724 725 mtx_lock(&sc->mfi_io_lock); 726 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO, 727 (void **)&ci, sizeof(*ci)); 728 if (error) 729 goto out; 730 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 731 732 if ((error = mfi_mapcmd(sc, cm)) != 0) { 733 device_printf(sc->mfi_dev, "Failed to get controller info\n"); 734 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE / 735 MFI_SECTOR_LEN; 736 error = 0; 737 goto out; 738 } 739 740 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 741 BUS_DMASYNC_POSTREAD); 742 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 743 744 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io; 745 max_sectors_2 = ci->max_request_size; 746 sc->mfi_max_io = min(max_sectors_1, max_sectors_2); 747 748 out: 749 if (ci) 750 free(ci, M_MFIBUF); 751 if (cm) 752 mfi_release_command(cm); 753 mtx_unlock(&sc->mfi_io_lock); 754 return (error); 755 } 756 757 static int 758 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state) 759 { 760 struct mfi_command *cm = NULL; 761 int error; 762 763 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO, 764 (void **)log_state, sizeof(**log_state)); 765 if (error) 766 goto out; 767 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 768 769 if ((error = mfi_mapcmd(sc, cm)) != 0) { 770 device_printf(sc->mfi_dev, "Failed to get log state\n"); 771 goto out; 772 } 773 774 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 775 BUS_DMASYNC_POSTREAD); 776 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 777 778 out: 779 if (cm) 780 mfi_release_command(cm); 781 782 return (error); 783 } 784 785 static int 786 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start) 787 { 788 struct mfi_evt_log_state *log_state = NULL; 789 union mfi_evt class_locale; 790 int error = 0; 791 uint32_t seq; 792 793 class_locale.members.reserved = 0; 794 class_locale.members.locale = mfi_event_locale; 795 class_locale.members.evt_class = mfi_event_class; 796 797 if (seq_start == 0) { 798 error = mfi_get_log_state(sc, &log_state); 799 if (error) { 800 if (log_state) 801 free(log_state, M_MFIBUF); 802 return (error); 803 } 804 805 /* 806 * Walk through any events that fired since the last 807 * shutdown. 808 */ 809 mfi_parse_entries(sc, log_state->shutdown_seq_num, 810 log_state->newest_seq_num); 811 seq = log_state->newest_seq_num; 812 } else 813 seq = seq_start; 814 mfi_aen_register(sc, seq, class_locale.word); 815 free(log_state, M_MFIBUF); 816 817 return 0; 818 } 819 820 static int 821 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm) 822 { 823 824 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 825 cm->cm_complete = NULL; 826 827 828 /* 829 * MegaCli can issue a DCMD of 0. In this case do nothing 830 * and return 0 to it as status 831 */ 832 if (cm->cm_frame->dcmd.opcode == 0) { 833 cm->cm_frame->header.cmd_status = MFI_STAT_OK; 834 cm->cm_error = 0; 835 return (cm->cm_error); 836 } 837 mfi_enqueue_ready(cm); 838 mfi_startio(sc); 839 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0) 840 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0); 841 return (cm->cm_error); 842 } 843 844 void 845 mfi_free(struct mfi_softc *sc) 846 { 847 struct mfi_command *cm; 848 int i; 849 850 callout_drain(&sc->mfi_watchdog_callout); 851 852 if (sc->mfi_cdev != NULL) 853 destroy_dev(sc->mfi_cdev); 854 855 if (sc->mfi_total_cmds != 0) { 856 for (i = 0; i < sc->mfi_total_cmds; i++) { 857 cm = &sc->mfi_commands[i]; 858 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap); 859 } 860 free(sc->mfi_commands, M_MFIBUF); 861 } 862 863 if (sc->mfi_intr) 864 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr); 865 if (sc->mfi_irq != NULL) 866 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid, 867 sc->mfi_irq); 868 869 if (sc->mfi_sense_busaddr != 0) 870 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap); 871 if (sc->mfi_sense != NULL) 872 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense, 873 sc->mfi_sense_dmamap); 874 if (sc->mfi_sense_dmat != NULL) 875 bus_dma_tag_destroy(sc->mfi_sense_dmat); 876 877 if (sc->mfi_frames_busaddr != 0) 878 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap); 879 if (sc->mfi_frames != NULL) 880 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames, 881 sc->mfi_frames_dmamap); 882 if (sc->mfi_frames_dmat != NULL) 883 bus_dma_tag_destroy(sc->mfi_frames_dmat); 884 885 if (sc->mfi_comms_busaddr != 0) 886 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap); 887 if (sc->mfi_comms != NULL) 888 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms, 889 sc->mfi_comms_dmamap); 890 if (sc->mfi_comms_dmat != NULL) 891 bus_dma_tag_destroy(sc->mfi_comms_dmat); 892 893 if (sc->mfi_buffer_dmat != NULL) 894 bus_dma_tag_destroy(sc->mfi_buffer_dmat); 895 if (sc->mfi_parent_dmat != NULL) 896 bus_dma_tag_destroy(sc->mfi_parent_dmat); 897 898 if (mtx_initialized(&sc->mfi_io_lock)) { 899 mtx_destroy(&sc->mfi_io_lock); 900 sx_destroy(&sc->mfi_config_lock); 901 } 902 903 return; 904 } 905 906 static void 907 mfi_startup(void *arg) 908 { 909 struct mfi_softc *sc; 910 911 sc = (struct mfi_softc *)arg; 912 913 config_intrhook_disestablish(&sc->mfi_ich); 914 915 sc->mfi_enable_intr(sc); 916 sx_xlock(&sc->mfi_config_lock); 917 mtx_lock(&sc->mfi_io_lock); 918 mfi_ldprobe(sc); 919 mtx_unlock(&sc->mfi_io_lock); 920 sx_xunlock(&sc->mfi_config_lock); 921 } 922 923 static void 924 mfi_intr(void *arg) 925 { 926 struct mfi_softc *sc; 927 struct mfi_command *cm; 928 uint32_t pi, ci, context; 929 930 sc = (struct mfi_softc *)arg; 931 932 if (sc->mfi_check_clear_intr(sc)) 933 return; 934 935 pi = sc->mfi_comms->hw_pi; 936 ci = sc->mfi_comms->hw_ci; 937 mtx_lock(&sc->mfi_io_lock); 938 while (ci != pi) { 939 context = sc->mfi_comms->hw_reply_q[ci]; 940 if (context < sc->mfi_max_fw_cmds) { 941 cm = &sc->mfi_commands[context]; 942 mfi_remove_busy(cm); 943 cm->cm_error = 0; 944 mfi_complete(sc, cm); 945 } 946 if (++ci == (sc->mfi_max_fw_cmds + 1)) { 947 ci = 0; 948 } 949 } 950 951 sc->mfi_comms->hw_ci = ci; 952 953 /* Give defered I/O a chance to run */ 954 if (sc->mfi_flags & MFI_FLAGS_QFRZN) 955 sc->mfi_flags &= ~MFI_FLAGS_QFRZN; 956 mfi_startio(sc); 957 mtx_unlock(&sc->mfi_io_lock); 958 959 return; 960 } 961 962 int 963 mfi_shutdown(struct mfi_softc *sc) 964 { 965 struct mfi_dcmd_frame *dcmd; 966 struct mfi_command *cm; 967 int error; 968 969 mtx_lock(&sc->mfi_io_lock); 970 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0); 971 if (error) { 972 mtx_unlock(&sc->mfi_io_lock); 973 return (error); 974 } 975 976 if (sc->mfi_aen_cm != NULL) 977 mfi_abort(sc, sc->mfi_aen_cm); 978 979 dcmd = &cm->cm_frame->dcmd; 980 dcmd->header.flags = MFI_FRAME_DIR_NONE; 981 cm->cm_flags = MFI_CMD_POLLED; 982 cm->cm_data = NULL; 983 984 if ((error = mfi_mapcmd(sc, cm)) != 0) { 985 device_printf(sc->mfi_dev, "Failed to shutdown controller\n"); 986 } 987 988 mfi_release_command(cm); 989 mtx_unlock(&sc->mfi_io_lock); 990 return (error); 991 } 992 993 static void 994 mfi_ldprobe(struct mfi_softc *sc) 995 { 996 struct mfi_frame_header *hdr; 997 struct mfi_command *cm = NULL; 998 struct mfi_ld_list *list = NULL; 999 struct mfi_disk *ld; 1000 int error, i; 1001 1002 sx_assert(&sc->mfi_config_lock, SA_XLOCKED); 1003 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1004 1005 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST, 1006 (void **)&list, sizeof(*list)); 1007 if (error) 1008 goto out; 1009 1010 cm->cm_flags = MFI_CMD_DATAIN; 1011 if (mfi_wait_command(sc, cm) != 0) { 1012 device_printf(sc->mfi_dev, "Failed to get device listing\n"); 1013 goto out; 1014 } 1015 1016 hdr = &cm->cm_frame->header; 1017 if (hdr->cmd_status != MFI_STAT_OK) { 1018 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n", 1019 hdr->cmd_status); 1020 goto out; 1021 } 1022 1023 for (i = 0; i < list->ld_count; i++) { 1024 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1025 if (ld->ld_id == list->ld_list[i].ld.v.target_id) 1026 goto skip_add; 1027 } 1028 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id); 1029 skip_add:; 1030 } 1031 out: 1032 if (list) 1033 free(list, M_MFIBUF); 1034 if (cm) 1035 mfi_release_command(cm); 1036 1037 return; 1038 } 1039 1040 /* 1041 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If 1042 * the bits in 24-31 are all set, then it is the number of seconds since 1043 * boot. 1044 */ 1045 static const char * 1046 format_timestamp(uint32_t timestamp) 1047 { 1048 static char buffer[32]; 1049 1050 if ((timestamp & 0xff000000) == 0xff000000) 1051 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 1052 0x00ffffff); 1053 else 1054 snprintf(buffer, sizeof(buffer), "%us", timestamp); 1055 return (buffer); 1056 } 1057 1058 static const char * 1059 format_class(int8_t class) 1060 { 1061 static char buffer[6]; 1062 1063 switch (class) { 1064 case MFI_EVT_CLASS_DEBUG: 1065 return ("debug"); 1066 case MFI_EVT_CLASS_PROGRESS: 1067 return ("progress"); 1068 case MFI_EVT_CLASS_INFO: 1069 return ("info"); 1070 case MFI_EVT_CLASS_WARNING: 1071 return ("WARN"); 1072 case MFI_EVT_CLASS_CRITICAL: 1073 return ("CRIT"); 1074 case MFI_EVT_CLASS_FATAL: 1075 return ("FATAL"); 1076 case MFI_EVT_CLASS_DEAD: 1077 return ("DEAD"); 1078 default: 1079 snprintf(buffer, sizeof(buffer), "%d", class); 1080 return (buffer); 1081 } 1082 } 1083 1084 static void 1085 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail) 1086 { 1087 1088 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq, 1089 format_timestamp(detail->time), detail->evt_class.members.locale, 1090 format_class(detail->evt_class.members.evt_class), detail->description); 1091 } 1092 1093 static int 1094 mfi_aen_register(struct mfi_softc *sc, int seq, int locale) 1095 { 1096 struct mfi_command *cm; 1097 struct mfi_dcmd_frame *dcmd; 1098 union mfi_evt current_aen, prior_aen; 1099 struct mfi_evt_detail *ed = NULL; 1100 int error = 0; 1101 1102 current_aen.word = locale; 1103 if (sc->mfi_aen_cm != NULL) { 1104 prior_aen.word = 1105 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1]; 1106 if (prior_aen.members.evt_class <= current_aen.members.evt_class && 1107 !((prior_aen.members.locale & current_aen.members.locale) 1108 ^current_aen.members.locale)) { 1109 return (0); 1110 } else { 1111 prior_aen.members.locale |= current_aen.members.locale; 1112 if (prior_aen.members.evt_class 1113 < current_aen.members.evt_class) 1114 current_aen.members.evt_class = 1115 prior_aen.members.evt_class; 1116 mfi_abort(sc, sc->mfi_aen_cm); 1117 } 1118 } 1119 1120 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT, 1121 (void **)&ed, sizeof(*ed)); 1122 if (error) { 1123 goto out; 1124 } 1125 1126 dcmd = &cm->cm_frame->dcmd; 1127 ((uint32_t *)&dcmd->mbox)[0] = seq; 1128 ((uint32_t *)&dcmd->mbox)[1] = locale; 1129 cm->cm_flags = MFI_CMD_DATAIN; 1130 cm->cm_complete = mfi_aen_complete; 1131 1132 sc->mfi_aen_cm = cm; 1133 1134 mfi_enqueue_ready(cm); 1135 mfi_startio(sc); 1136 1137 out: 1138 return (error); 1139 } 1140 1141 static void 1142 mfi_aen_complete(struct mfi_command *cm) 1143 { 1144 struct mfi_frame_header *hdr; 1145 struct mfi_softc *sc; 1146 struct mfi_evt_detail *detail; 1147 struct mfi_aen *mfi_aen_entry, *tmp; 1148 int seq = 0, aborted = 0; 1149 1150 sc = cm->cm_sc; 1151 hdr = &cm->cm_frame->header; 1152 1153 if (sc->mfi_aen_cm == NULL) 1154 return; 1155 1156 if (sc->mfi_aen_cm->cm_aen_abort || 1157 hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1158 sc->mfi_aen_cm->cm_aen_abort = 0; 1159 aborted = 1; 1160 } else { 1161 sc->mfi_aen_triggered = 1; 1162 if (sc->mfi_poll_waiting) { 1163 sc->mfi_poll_waiting = 0; 1164 selwakeup(&sc->mfi_select); 1165 } 1166 detail = cm->cm_data; 1167 /* 1168 * XXX If this function is too expensive or is recursive, then 1169 * events should be put onto a queue and processed later. 1170 */ 1171 mfi_decode_evt(sc, detail); 1172 seq = detail->seq + 1; 1173 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) { 1174 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 1175 aen_link); 1176 PROC_LOCK(mfi_aen_entry->p); 1177 kern_psignal(mfi_aen_entry->p, SIGIO); 1178 PROC_UNLOCK(mfi_aen_entry->p); 1179 free(mfi_aen_entry, M_MFIBUF); 1180 } 1181 } 1182 1183 free(cm->cm_data, M_MFIBUF); 1184 sc->mfi_aen_cm = NULL; 1185 wakeup(&sc->mfi_aen_cm); 1186 mfi_release_command(cm); 1187 1188 /* set it up again so the driver can catch more events */ 1189 if (!aborted) { 1190 mfi_aen_setup(sc, seq); 1191 } 1192 } 1193 1194 #define MAX_EVENTS 15 1195 1196 static int 1197 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq) 1198 { 1199 struct mfi_command *cm; 1200 struct mfi_dcmd_frame *dcmd; 1201 struct mfi_evt_list *el; 1202 union mfi_evt class_locale; 1203 int error, i, seq, size; 1204 1205 class_locale.members.reserved = 0; 1206 class_locale.members.locale = mfi_event_locale; 1207 class_locale.members.evt_class = mfi_event_class; 1208 1209 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail) 1210 * (MAX_EVENTS - 1); 1211 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO); 1212 if (el == NULL) 1213 return (ENOMEM); 1214 1215 for (seq = start_seq;;) { 1216 if ((cm = mfi_dequeue_free(sc)) == NULL) { 1217 free(el, M_MFIBUF); 1218 return (EBUSY); 1219 } 1220 1221 dcmd = &cm->cm_frame->dcmd; 1222 bzero(dcmd->mbox, MFI_MBOX_SIZE); 1223 dcmd->header.cmd = MFI_CMD_DCMD; 1224 dcmd->header.timeout = 0; 1225 dcmd->header.data_len = size; 1226 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET; 1227 ((uint32_t *)&dcmd->mbox)[0] = seq; 1228 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word; 1229 cm->cm_sg = &dcmd->sgl; 1230 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 1231 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1232 cm->cm_data = el; 1233 cm->cm_len = size; 1234 1235 if ((error = mfi_mapcmd(sc, cm)) != 0) { 1236 device_printf(sc->mfi_dev, 1237 "Failed to get controller entries\n"); 1238 mfi_release_command(cm); 1239 break; 1240 } 1241 1242 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1243 BUS_DMASYNC_POSTREAD); 1244 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1245 1246 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) { 1247 mfi_release_command(cm); 1248 break; 1249 } 1250 if (dcmd->header.cmd_status != MFI_STAT_OK) { 1251 device_printf(sc->mfi_dev, 1252 "Error %d fetching controller entries\n", 1253 dcmd->header.cmd_status); 1254 mfi_release_command(cm); 1255 break; 1256 } 1257 mfi_release_command(cm); 1258 1259 for (i = 0; i < el->count; i++) { 1260 /* 1261 * If this event is newer than 'stop_seq' then 1262 * break out of the loop. Note that the log 1263 * is a circular buffer so we have to handle 1264 * the case that our stop point is earlier in 1265 * the buffer than our start point. 1266 */ 1267 if (el->event[i].seq >= stop_seq) { 1268 if (start_seq <= stop_seq) 1269 break; 1270 else if (el->event[i].seq < start_seq) 1271 break; 1272 } 1273 mfi_decode_evt(sc, &el->event[i]); 1274 } 1275 seq = el->event[el->count - 1].seq + 1; 1276 } 1277 1278 free(el, M_MFIBUF); 1279 return (0); 1280 } 1281 1282 static int 1283 mfi_add_ld(struct mfi_softc *sc, int id) 1284 { 1285 struct mfi_command *cm; 1286 struct mfi_dcmd_frame *dcmd = NULL; 1287 struct mfi_ld_info *ld_info = NULL; 1288 int error; 1289 1290 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1291 1292 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO, 1293 (void **)&ld_info, sizeof(*ld_info)); 1294 if (error) { 1295 device_printf(sc->mfi_dev, 1296 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error); 1297 if (ld_info) 1298 free(ld_info, M_MFIBUF); 1299 return (error); 1300 } 1301 cm->cm_flags = MFI_CMD_DATAIN; 1302 dcmd = &cm->cm_frame->dcmd; 1303 dcmd->mbox[0] = id; 1304 if (mfi_wait_command(sc, cm) != 0) { 1305 device_printf(sc->mfi_dev, 1306 "Failed to get logical drive: %d\n", id); 1307 free(ld_info, M_MFIBUF); 1308 return (0); 1309 } 1310 1311 mfi_add_ld_complete(cm); 1312 return (0); 1313 } 1314 1315 static void 1316 mfi_add_ld_complete(struct mfi_command *cm) 1317 { 1318 struct mfi_frame_header *hdr; 1319 struct mfi_ld_info *ld_info; 1320 struct mfi_softc *sc; 1321 device_t child; 1322 1323 sc = cm->cm_sc; 1324 hdr = &cm->cm_frame->header; 1325 ld_info = cm->cm_private; 1326 1327 if (hdr->cmd_status != MFI_STAT_OK) { 1328 free(ld_info, M_MFIBUF); 1329 mfi_release_command(cm); 1330 return; 1331 } 1332 mfi_release_command(cm); 1333 1334 mtx_unlock(&sc->mfi_io_lock); 1335 mtx_lock(&Giant); 1336 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) { 1337 device_printf(sc->mfi_dev, "Failed to add logical disk\n"); 1338 free(ld_info, M_MFIBUF); 1339 mtx_unlock(&Giant); 1340 mtx_lock(&sc->mfi_io_lock); 1341 return; 1342 } 1343 1344 device_set_ivars(child, ld_info); 1345 device_set_desc(child, "MFI Logical Disk"); 1346 bus_generic_attach(sc->mfi_dev); 1347 mtx_unlock(&Giant); 1348 mtx_lock(&sc->mfi_io_lock); 1349 } 1350 1351 static struct mfi_command * 1352 mfi_bio_command(struct mfi_softc *sc) 1353 { 1354 struct mfi_io_frame *io; 1355 struct mfi_command *cm; 1356 struct bio *bio; 1357 int flags, blkcount; 1358 1359 if ((cm = mfi_dequeue_free(sc)) == NULL) 1360 return (NULL); 1361 1362 if ((bio = mfi_dequeue_bio(sc)) == NULL) { 1363 mfi_release_command(cm); 1364 return (NULL); 1365 } 1366 1367 io = &cm->cm_frame->io; 1368 switch (bio->bio_cmd & 0x03) { 1369 case BIO_READ: 1370 io->header.cmd = MFI_CMD_LD_READ; 1371 flags = MFI_CMD_DATAIN; 1372 break; 1373 case BIO_WRITE: 1374 io->header.cmd = MFI_CMD_LD_WRITE; 1375 flags = MFI_CMD_DATAOUT; 1376 break; 1377 default: 1378 panic("Invalid bio command"); 1379 } 1380 1381 /* Cheat with the sector length to avoid a non-constant division */ 1382 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 1383 io->header.target_id = (uintptr_t)bio->bio_driver1; 1384 io->header.timeout = 0; 1385 io->header.flags = 0; 1386 io->header.sense_len = MFI_SENSE_LEN; 1387 io->header.data_len = blkcount; 1388 io->sense_addr_lo = cm->cm_sense_busaddr; 1389 io->sense_addr_hi = 0; 1390 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32; 1391 io->lba_lo = bio->bio_pblkno & 0xffffffff; 1392 cm->cm_complete = mfi_bio_complete; 1393 cm->cm_private = bio; 1394 cm->cm_data = bio->bio_data; 1395 cm->cm_len = bio->bio_bcount; 1396 cm->cm_sg = &io->sgl; 1397 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; 1398 cm->cm_flags = flags; 1399 return (cm); 1400 } 1401 1402 static void 1403 mfi_bio_complete(struct mfi_command *cm) 1404 { 1405 struct bio *bio; 1406 struct mfi_frame_header *hdr; 1407 struct mfi_softc *sc; 1408 1409 bio = cm->cm_private; 1410 hdr = &cm->cm_frame->header; 1411 sc = cm->cm_sc; 1412 1413 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) { 1414 bio->bio_flags |= BIO_ERROR; 1415 bio->bio_error = EIO; 1416 device_printf(sc->mfi_dev, "I/O error, status= %d " 1417 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status); 1418 mfi_print_sense(cm->cm_sc, cm->cm_sense); 1419 } else if (cm->cm_error != 0) { 1420 bio->bio_flags |= BIO_ERROR; 1421 } 1422 1423 mfi_release_command(cm); 1424 mfi_disk_complete(bio); 1425 } 1426 1427 void 1428 mfi_startio(struct mfi_softc *sc) 1429 { 1430 struct mfi_command *cm; 1431 struct ccb_hdr *ccbh; 1432 1433 for (;;) { 1434 /* Don't bother if we're short on resources */ 1435 if (sc->mfi_flags & MFI_FLAGS_QFRZN) 1436 break; 1437 1438 /* Try a command that has already been prepared */ 1439 cm = mfi_dequeue_ready(sc); 1440 1441 if (cm == NULL) { 1442 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL) 1443 cm = sc->mfi_cam_start(ccbh); 1444 } 1445 1446 /* Nope, so look for work on the bioq */ 1447 if (cm == NULL) 1448 cm = mfi_bio_command(sc); 1449 1450 /* No work available, so exit */ 1451 if (cm == NULL) 1452 break; 1453 1454 /* Send the command to the controller */ 1455 if (mfi_mapcmd(sc, cm) != 0) { 1456 mfi_requeue_ready(cm); 1457 break; 1458 } 1459 } 1460 } 1461 1462 static int 1463 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm) 1464 { 1465 int error, polled; 1466 1467 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1468 1469 if (cm->cm_data != NULL) { 1470 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0; 1471 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap, 1472 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled); 1473 if (error == EINPROGRESS) { 1474 sc->mfi_flags |= MFI_FLAGS_QFRZN; 1475 return (0); 1476 } 1477 } else { 1478 error = mfi_send_frame(sc, cm); 1479 } 1480 1481 return (error); 1482 } 1483 1484 static void 1485 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1486 { 1487 struct mfi_frame_header *hdr; 1488 struct mfi_command *cm; 1489 union mfi_sgl *sgl; 1490 struct mfi_softc *sc; 1491 int i, j, first, dir; 1492 1493 cm = (struct mfi_command *)arg; 1494 sc = cm->cm_sc; 1495 hdr = &cm->cm_frame->header; 1496 sgl = cm->cm_sg; 1497 1498 if (error) { 1499 printf("error %d in callback\n", error); 1500 cm->cm_error = error; 1501 mfi_complete(sc, cm); 1502 return; 1503 } 1504 1505 j = 0; 1506 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 1507 first = cm->cm_stp_len; 1508 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { 1509 sgl->sg32[j].addr = segs[0].ds_addr; 1510 sgl->sg32[j++].len = first; 1511 } else { 1512 sgl->sg64[j].addr = segs[0].ds_addr; 1513 sgl->sg64[j++].len = first; 1514 } 1515 } else 1516 first = 0; 1517 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { 1518 for (i = 0; i < nsegs; i++) { 1519 sgl->sg32[j].addr = segs[i].ds_addr + first; 1520 sgl->sg32[j++].len = segs[i].ds_len - first; 1521 first = 0; 1522 } 1523 } else { 1524 for (i = 0; i < nsegs; i++) { 1525 sgl->sg64[j].addr = segs[i].ds_addr + first; 1526 sgl->sg64[j++].len = segs[i].ds_len - first; 1527 first = 0; 1528 } 1529 hdr->flags |= MFI_FRAME_SGL64; 1530 } 1531 hdr->sg_count = j; 1532 1533 dir = 0; 1534 if (cm->cm_flags & MFI_CMD_DATAIN) { 1535 dir |= BUS_DMASYNC_PREREAD; 1536 hdr->flags |= MFI_FRAME_DIR_READ; 1537 } 1538 if (cm->cm_flags & MFI_CMD_DATAOUT) { 1539 dir |= BUS_DMASYNC_PREWRITE; 1540 hdr->flags |= MFI_FRAME_DIR_WRITE; 1541 } 1542 if (cm->cm_frame->header.cmd == MFI_CMD_STP) 1543 dir |= BUS_DMASYNC_PREWRITE; 1544 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); 1545 cm->cm_flags |= MFI_CMD_MAPPED; 1546 1547 /* 1548 * Instead of calculating the total number of frames in the 1549 * compound frame, it's already assumed that there will be at 1550 * least 1 frame, so don't compensate for the modulo of the 1551 * following division. 1552 */ 1553 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs); 1554 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE; 1555 1556 mfi_send_frame(sc, cm); 1557 1558 return; 1559 } 1560 1561 static int 1562 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm) 1563 { 1564 struct mfi_frame_header *hdr; 1565 int tm = MFI_POLL_TIMEOUT_SECS * 1000; 1566 1567 hdr = &cm->cm_frame->header; 1568 1569 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) { 1570 cm->cm_timestamp = time_uptime; 1571 mfi_enqueue_busy(cm); 1572 } else { 1573 hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1574 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 1575 } 1576 1577 /* 1578 * The bus address of the command is aligned on a 64 byte boundary, 1579 * leaving the least 6 bits as zero. For whatever reason, the 1580 * hardware wants the address shifted right by three, leaving just 1581 * 3 zero bits. These three bits are then used as a prefetching 1582 * hint for the hardware to predict how many frames need to be 1583 * fetched across the bus. If a command has more than 8 frames 1584 * then the 3 bits are set to 0x7 and the firmware uses other 1585 * information in the command to determine the total amount to fetch. 1586 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames 1587 * is enough for both 32bit and 64bit systems. 1588 */ 1589 if (cm->cm_extra_frames > 7) 1590 cm->cm_extra_frames = 7; 1591 1592 sc->mfi_issue_cmd(sc,cm->cm_frame_busaddr,cm->cm_extra_frames); 1593 1594 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) 1595 return (0); 1596 1597 /* This is a polled command, so busy-wait for it to complete. */ 1598 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1599 DELAY(1000); 1600 tm -= 1; 1601 if (tm <= 0) 1602 break; 1603 } 1604 1605 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1606 device_printf(sc->mfi_dev, "Frame %p timed out " 1607 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode); 1608 return (ETIMEDOUT); 1609 } 1610 1611 return (0); 1612 } 1613 1614 static void 1615 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm) 1616 { 1617 int dir; 1618 1619 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) { 1620 dir = 0; 1621 if ((cm->cm_flags & MFI_CMD_DATAIN) || 1622 (cm->cm_frame->header.cmd == MFI_CMD_STP)) 1623 dir |= BUS_DMASYNC_POSTREAD; 1624 if (cm->cm_flags & MFI_CMD_DATAOUT) 1625 dir |= BUS_DMASYNC_POSTWRITE; 1626 1627 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); 1628 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1629 cm->cm_flags &= ~MFI_CMD_MAPPED; 1630 } 1631 1632 cm->cm_flags |= MFI_CMD_COMPLETED; 1633 1634 if (cm->cm_complete != NULL) 1635 cm->cm_complete(cm); 1636 else 1637 wakeup(cm); 1638 } 1639 1640 static int 1641 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort) 1642 { 1643 struct mfi_command *cm; 1644 struct mfi_abort_frame *abort; 1645 int i = 0; 1646 1647 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1648 1649 if ((cm = mfi_dequeue_free(sc)) == NULL) { 1650 return (EBUSY); 1651 } 1652 1653 abort = &cm->cm_frame->abort; 1654 abort->header.cmd = MFI_CMD_ABORT; 1655 abort->header.flags = 0; 1656 abort->abort_context = cm_abort->cm_frame->header.context; 1657 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr; 1658 abort->abort_mfi_addr_hi = 0; 1659 cm->cm_data = NULL; 1660 cm->cm_flags = MFI_CMD_POLLED; 1661 1662 sc->mfi_aen_cm->cm_aen_abort = 1; 1663 mfi_mapcmd(sc, cm); 1664 mfi_release_command(cm); 1665 1666 while (i < 5 && sc->mfi_aen_cm != NULL) { 1667 msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz); 1668 i++; 1669 } 1670 1671 return (0); 1672 } 1673 1674 int 1675 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len) 1676 { 1677 struct mfi_command *cm; 1678 struct mfi_io_frame *io; 1679 int error; 1680 1681 if ((cm = mfi_dequeue_free(sc)) == NULL) 1682 return (EBUSY); 1683 1684 io = &cm->cm_frame->io; 1685 io->header.cmd = MFI_CMD_LD_WRITE; 1686 io->header.target_id = id; 1687 io->header.timeout = 0; 1688 io->header.flags = 0; 1689 io->header.sense_len = MFI_SENSE_LEN; 1690 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 1691 io->sense_addr_lo = cm->cm_sense_busaddr; 1692 io->sense_addr_hi = 0; 1693 io->lba_hi = (lba & 0xffffffff00000000) >> 32; 1694 io->lba_lo = lba & 0xffffffff; 1695 cm->cm_data = virt; 1696 cm->cm_len = len; 1697 cm->cm_sg = &io->sgl; 1698 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; 1699 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT; 1700 1701 error = mfi_mapcmd(sc, cm); 1702 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1703 BUS_DMASYNC_POSTWRITE); 1704 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1705 mfi_release_command(cm); 1706 1707 return (error); 1708 } 1709 1710 static int 1711 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1712 { 1713 struct mfi_softc *sc; 1714 int error; 1715 1716 sc = dev->si_drv1; 1717 1718 mtx_lock(&sc->mfi_io_lock); 1719 if (sc->mfi_detaching) 1720 error = ENXIO; 1721 else { 1722 sc->mfi_flags |= MFI_FLAGS_OPEN; 1723 error = 0; 1724 } 1725 mtx_unlock(&sc->mfi_io_lock); 1726 1727 return (error); 1728 } 1729 1730 static int 1731 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1732 { 1733 struct mfi_softc *sc; 1734 struct mfi_aen *mfi_aen_entry, *tmp; 1735 1736 sc = dev->si_drv1; 1737 1738 mtx_lock(&sc->mfi_io_lock); 1739 sc->mfi_flags &= ~MFI_FLAGS_OPEN; 1740 1741 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) { 1742 if (mfi_aen_entry->p == curproc) { 1743 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 1744 aen_link); 1745 free(mfi_aen_entry, M_MFIBUF); 1746 } 1747 } 1748 mtx_unlock(&sc->mfi_io_lock); 1749 return (0); 1750 } 1751 1752 static int 1753 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode) 1754 { 1755 1756 switch (opcode) { 1757 case MFI_DCMD_LD_DELETE: 1758 case MFI_DCMD_CFG_ADD: 1759 case MFI_DCMD_CFG_CLEAR: 1760 sx_xlock(&sc->mfi_config_lock); 1761 return (1); 1762 default: 1763 return (0); 1764 } 1765 } 1766 1767 static void 1768 mfi_config_unlock(struct mfi_softc *sc, int locked) 1769 { 1770 1771 if (locked) 1772 sx_xunlock(&sc->mfi_config_lock); 1773 } 1774 1775 /* Perform pre-issue checks on commands from userland and possibly veto them. */ 1776 static int 1777 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm) 1778 { 1779 struct mfi_disk *ld, *ld2; 1780 int error; 1781 1782 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1783 error = 0; 1784 switch (cm->cm_frame->dcmd.opcode) { 1785 case MFI_DCMD_LD_DELETE: 1786 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1787 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) 1788 break; 1789 } 1790 if (ld == NULL) 1791 error = ENOENT; 1792 else 1793 error = mfi_disk_disable(ld); 1794 break; 1795 case MFI_DCMD_CFG_CLEAR: 1796 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1797 error = mfi_disk_disable(ld); 1798 if (error) 1799 break; 1800 } 1801 if (error) { 1802 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) { 1803 if (ld2 == ld) 1804 break; 1805 mfi_disk_enable(ld2); 1806 } 1807 } 1808 break; 1809 default: 1810 break; 1811 } 1812 return (error); 1813 } 1814 1815 /* Perform post-issue checks on commands from userland. */ 1816 static void 1817 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm) 1818 { 1819 struct mfi_disk *ld, *ldn; 1820 1821 switch (cm->cm_frame->dcmd.opcode) { 1822 case MFI_DCMD_LD_DELETE: 1823 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1824 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) 1825 break; 1826 } 1827 KASSERT(ld != NULL, ("volume dissappeared")); 1828 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { 1829 mtx_unlock(&sc->mfi_io_lock); 1830 mtx_lock(&Giant); 1831 device_delete_child(sc->mfi_dev, ld->ld_dev); 1832 mtx_unlock(&Giant); 1833 mtx_lock(&sc->mfi_io_lock); 1834 } else 1835 mfi_disk_enable(ld); 1836 break; 1837 case MFI_DCMD_CFG_CLEAR: 1838 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { 1839 mtx_unlock(&sc->mfi_io_lock); 1840 mtx_lock(&Giant); 1841 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) { 1842 device_delete_child(sc->mfi_dev, ld->ld_dev); 1843 } 1844 mtx_unlock(&Giant); 1845 mtx_lock(&sc->mfi_io_lock); 1846 } else { 1847 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) 1848 mfi_disk_enable(ld); 1849 } 1850 break; 1851 case MFI_DCMD_CFG_ADD: 1852 mfi_ldprobe(sc); 1853 break; 1854 case MFI_DCMD_CFG_FOREIGN_IMPORT: 1855 mfi_ldprobe(sc); 1856 break; 1857 } 1858 } 1859 1860 static int 1861 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc) 1862 { 1863 struct mfi_command *cm; 1864 struct mfi_dcmd_frame *dcmd; 1865 void *ioc_buf = NULL; 1866 uint32_t context; 1867 int error = 0, locked; 1868 1869 1870 if (ioc->buf_size > 0) { 1871 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK); 1872 if (ioc_buf == NULL) { 1873 return (ENOMEM); 1874 } 1875 error = copyin(ioc->buf, ioc_buf, ioc->buf_size); 1876 if (error) { 1877 device_printf(sc->mfi_dev, "failed to copyin\n"); 1878 free(ioc_buf, M_MFIBUF); 1879 return (error); 1880 } 1881 } 1882 1883 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode); 1884 1885 mtx_lock(&sc->mfi_io_lock); 1886 while ((cm = mfi_dequeue_free(sc)) == NULL) 1887 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz); 1888 1889 /* Save context for later */ 1890 context = cm->cm_frame->header.context; 1891 1892 dcmd = &cm->cm_frame->dcmd; 1893 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame)); 1894 1895 cm->cm_sg = &dcmd->sgl; 1896 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 1897 cm->cm_data = ioc_buf; 1898 cm->cm_len = ioc->buf_size; 1899 1900 /* restore context */ 1901 cm->cm_frame->header.context = context; 1902 1903 /* Cheat since we don't know if we're writing or reading */ 1904 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT; 1905 1906 error = mfi_check_command_pre(sc, cm); 1907 if (error) 1908 goto out; 1909 1910 error = mfi_wait_command(sc, cm); 1911 if (error) { 1912 device_printf(sc->mfi_dev, "ioctl failed %d\n", error); 1913 goto out; 1914 } 1915 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame)); 1916 mfi_check_command_post(sc, cm); 1917 out: 1918 mfi_release_command(cm); 1919 mtx_unlock(&sc->mfi_io_lock); 1920 mfi_config_unlock(sc, locked); 1921 if (ioc->buf_size > 0) 1922 error = copyout(ioc_buf, ioc->buf, ioc->buf_size); 1923 if (ioc_buf) 1924 free(ioc_buf, M_MFIBUF); 1925 return (error); 1926 } 1927 1928 #ifdef __amd64__ 1929 #define PTRIN(p) ((void *)(uintptr_t)(p)) 1930 #else 1931 #define PTRIN(p) (p) 1932 #endif 1933 1934 static int 1935 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 1936 { 1937 struct mfi_softc *sc; 1938 union mfi_statrequest *ms; 1939 struct mfi_ioc_packet *ioc; 1940 #ifdef __amd64__ 1941 struct mfi_ioc_packet32 *ioc32; 1942 #endif 1943 struct mfi_ioc_aen *aen; 1944 struct mfi_command *cm = NULL; 1945 uint32_t context; 1946 union mfi_sense_ptr sense_ptr; 1947 uint8_t *data = NULL, *temp, *addr; 1948 size_t len; 1949 int i; 1950 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg; 1951 #ifdef __amd64__ 1952 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg; 1953 struct mfi_ioc_passthru iop_swab; 1954 #endif 1955 int error, locked; 1956 1957 sc = dev->si_drv1; 1958 error = 0; 1959 1960 switch (cmd) { 1961 case MFIIO_STATS: 1962 ms = (union mfi_statrequest *)arg; 1963 switch (ms->ms_item) { 1964 case MFIQ_FREE: 1965 case MFIQ_BIO: 1966 case MFIQ_READY: 1967 case MFIQ_BUSY: 1968 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat, 1969 sizeof(struct mfi_qstat)); 1970 break; 1971 default: 1972 error = ENOIOCTL; 1973 break; 1974 } 1975 break; 1976 case MFIIO_QUERY_DISK: 1977 { 1978 struct mfi_query_disk *qd; 1979 struct mfi_disk *ld; 1980 1981 qd = (struct mfi_query_disk *)arg; 1982 mtx_lock(&sc->mfi_io_lock); 1983 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1984 if (ld->ld_id == qd->array_id) 1985 break; 1986 } 1987 if (ld == NULL) { 1988 qd->present = 0; 1989 mtx_unlock(&sc->mfi_io_lock); 1990 return (0); 1991 } 1992 qd->present = 1; 1993 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN) 1994 qd->open = 1; 1995 bzero(qd->devname, SPECNAMELEN + 1); 1996 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit); 1997 mtx_unlock(&sc->mfi_io_lock); 1998 break; 1999 } 2000 case MFI_CMD: 2001 #ifdef __amd64__ 2002 case MFI_CMD32: 2003 #endif 2004 { 2005 devclass_t devclass; 2006 ioc = (struct mfi_ioc_packet *)arg; 2007 int adapter; 2008 2009 adapter = ioc->mfi_adapter_no; 2010 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) { 2011 devclass = devclass_find("mfi"); 2012 sc = devclass_get_softc(devclass, adapter); 2013 } 2014 mtx_lock(&sc->mfi_io_lock); 2015 if ((cm = mfi_dequeue_free(sc)) == NULL) { 2016 mtx_unlock(&sc->mfi_io_lock); 2017 return (EBUSY); 2018 } 2019 mtx_unlock(&sc->mfi_io_lock); 2020 locked = 0; 2021 2022 /* 2023 * save off original context since copying from user 2024 * will clobber some data 2025 */ 2026 context = cm->cm_frame->header.context; 2027 2028 bcopy(ioc->mfi_frame.raw, cm->cm_frame, 2029 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */ 2030 cm->cm_total_frame_size = (sizeof(union mfi_sgl) 2031 * ioc->mfi_sge_count) + ioc->mfi_sgl_off; 2032 if (ioc->mfi_sge_count) { 2033 cm->cm_sg = 2034 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off]; 2035 } 2036 cm->cm_flags = 0; 2037 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) 2038 cm->cm_flags |= MFI_CMD_DATAIN; 2039 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) 2040 cm->cm_flags |= MFI_CMD_DATAOUT; 2041 /* Legacy app shim */ 2042 if (cm->cm_flags == 0) 2043 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT; 2044 cm->cm_len = cm->cm_frame->header.data_len; 2045 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 2046 #ifdef __amd64__ 2047 if (cmd == MFI_CMD) { 2048 #endif 2049 /* Native */ 2050 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len; 2051 #ifdef __amd64__ 2052 } else { 2053 /* 32bit on 64bit */ 2054 ioc32 = (struct mfi_ioc_packet32 *)ioc; 2055 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len; 2056 } 2057 #endif 2058 cm->cm_len += cm->cm_stp_len; 2059 } 2060 if (cm->cm_len && 2061 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) { 2062 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, 2063 M_WAITOK | M_ZERO); 2064 if (cm->cm_data == NULL) { 2065 device_printf(sc->mfi_dev, "Malloc failed\n"); 2066 goto out; 2067 } 2068 } else { 2069 cm->cm_data = 0; 2070 } 2071 2072 /* restore header context */ 2073 cm->cm_frame->header.context = context; 2074 2075 temp = data; 2076 if ((cm->cm_flags & MFI_CMD_DATAOUT) || 2077 (cm->cm_frame->header.cmd == MFI_CMD_STP)) { 2078 for (i = 0; i < ioc->mfi_sge_count; i++) { 2079 #ifdef __amd64__ 2080 if (cmd == MFI_CMD) { 2081 #endif 2082 /* Native */ 2083 addr = ioc->mfi_sgl[i].iov_base; 2084 len = ioc->mfi_sgl[i].iov_len; 2085 #ifdef __amd64__ 2086 } else { 2087 /* 32bit on 64bit */ 2088 ioc32 = (struct mfi_ioc_packet32 *)ioc; 2089 addr = PTRIN(ioc32->mfi_sgl[i].iov_base); 2090 len = ioc32->mfi_sgl[i].iov_len; 2091 } 2092 #endif 2093 error = copyin(addr, temp, len); 2094 if (error != 0) { 2095 device_printf(sc->mfi_dev, 2096 "Copy in failed\n"); 2097 goto out; 2098 } 2099 temp = &temp[len]; 2100 } 2101 } 2102 2103 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) 2104 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode); 2105 2106 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) { 2107 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr; 2108 cm->cm_frame->pass.sense_addr_hi = 0; 2109 } 2110 2111 mtx_lock(&sc->mfi_io_lock); 2112 error = mfi_check_command_pre(sc, cm); 2113 if (error) { 2114 mtx_unlock(&sc->mfi_io_lock); 2115 goto out; 2116 } 2117 2118 if ((error = mfi_wait_command(sc, cm)) != 0) { 2119 device_printf(sc->mfi_dev, 2120 "Controller polled failed\n"); 2121 mtx_unlock(&sc->mfi_io_lock); 2122 goto out; 2123 } 2124 2125 mfi_check_command_post(sc, cm); 2126 mtx_unlock(&sc->mfi_io_lock); 2127 2128 temp = data; 2129 if ((cm->cm_flags & MFI_CMD_DATAIN) || 2130 (cm->cm_frame->header.cmd == MFI_CMD_STP)) { 2131 for (i = 0; i < ioc->mfi_sge_count; i++) { 2132 #ifdef __amd64__ 2133 if (cmd == MFI_CMD) { 2134 #endif 2135 /* Native */ 2136 addr = ioc->mfi_sgl[i].iov_base; 2137 len = ioc->mfi_sgl[i].iov_len; 2138 #ifdef __amd64__ 2139 } else { 2140 /* 32bit on 64bit */ 2141 ioc32 = (struct mfi_ioc_packet32 *)ioc; 2142 addr = PTRIN(ioc32->mfi_sgl[i].iov_base); 2143 len = ioc32->mfi_sgl[i].iov_len; 2144 } 2145 #endif 2146 error = copyout(temp, addr, len); 2147 if (error != 0) { 2148 device_printf(sc->mfi_dev, 2149 "Copy out failed\n"); 2150 goto out; 2151 } 2152 temp = &temp[len]; 2153 } 2154 } 2155 2156 if (ioc->mfi_sense_len) { 2157 /* get user-space sense ptr then copy out sense */ 2158 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off], 2159 &sense_ptr.sense_ptr_data[0], 2160 sizeof(sense_ptr.sense_ptr_data)); 2161 #ifdef __amd64__ 2162 if (cmd != MFI_CMD) { 2163 /* 2164 * not 64bit native so zero out any address 2165 * over 32bit */ 2166 sense_ptr.addr.high = 0; 2167 } 2168 #endif 2169 error = copyout(cm->cm_sense, sense_ptr.user_space, 2170 ioc->mfi_sense_len); 2171 if (error != 0) { 2172 device_printf(sc->mfi_dev, 2173 "Copy out failed\n"); 2174 goto out; 2175 } 2176 } 2177 2178 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status; 2179 out: 2180 mfi_config_unlock(sc, locked); 2181 if (data) 2182 free(data, M_MFIBUF); 2183 if (cm) { 2184 mtx_lock(&sc->mfi_io_lock); 2185 mfi_release_command(cm); 2186 mtx_unlock(&sc->mfi_io_lock); 2187 } 2188 2189 break; 2190 } 2191 case MFI_SET_AEN: 2192 aen = (struct mfi_ioc_aen *)arg; 2193 error = mfi_aen_register(sc, aen->aen_seq_num, 2194 aen->aen_class_locale); 2195 2196 break; 2197 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ 2198 { 2199 devclass_t devclass; 2200 struct mfi_linux_ioc_packet l_ioc; 2201 int adapter; 2202 2203 devclass = devclass_find("mfi"); 2204 if (devclass == NULL) 2205 return (ENOENT); 2206 2207 error = copyin(arg, &l_ioc, sizeof(l_ioc)); 2208 if (error) 2209 return (error); 2210 adapter = l_ioc.lioc_adapter_no; 2211 sc = devclass_get_softc(devclass, adapter); 2212 if (sc == NULL) 2213 return (ENOENT); 2214 return (mfi_linux_ioctl_int(sc->mfi_cdev, 2215 cmd, arg, flag, td)); 2216 break; 2217 } 2218 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ 2219 { 2220 devclass_t devclass; 2221 struct mfi_linux_ioc_aen l_aen; 2222 int adapter; 2223 2224 devclass = devclass_find("mfi"); 2225 if (devclass == NULL) 2226 return (ENOENT); 2227 2228 error = copyin(arg, &l_aen, sizeof(l_aen)); 2229 if (error) 2230 return (error); 2231 adapter = l_aen.laen_adapter_no; 2232 sc = devclass_get_softc(devclass, adapter); 2233 if (sc == NULL) 2234 return (ENOENT); 2235 return (mfi_linux_ioctl_int(sc->mfi_cdev, 2236 cmd, arg, flag, td)); 2237 break; 2238 } 2239 #ifdef __amd64__ 2240 case MFIIO_PASSTHRU32: 2241 iop_swab.ioc_frame = iop32->ioc_frame; 2242 iop_swab.buf_size = iop32->buf_size; 2243 iop_swab.buf = PTRIN(iop32->buf); 2244 iop = &iop_swab; 2245 /* FALLTHROUGH */ 2246 #endif 2247 case MFIIO_PASSTHRU: 2248 error = mfi_user_command(sc, iop); 2249 #ifdef __amd64__ 2250 if (cmd == MFIIO_PASSTHRU32) 2251 iop32->ioc_frame = iop_swab.ioc_frame; 2252 #endif 2253 break; 2254 default: 2255 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); 2256 error = ENOENT; 2257 break; 2258 } 2259 2260 return (error); 2261 } 2262 2263 static int 2264 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 2265 { 2266 struct mfi_softc *sc; 2267 struct mfi_linux_ioc_packet l_ioc; 2268 struct mfi_linux_ioc_aen l_aen; 2269 struct mfi_command *cm = NULL; 2270 struct mfi_aen *mfi_aen_entry; 2271 union mfi_sense_ptr sense_ptr; 2272 uint32_t context; 2273 uint8_t *data = NULL, *temp; 2274 int i; 2275 int error, locked; 2276 2277 sc = dev->si_drv1; 2278 error = 0; 2279 switch (cmd) { 2280 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ 2281 error = copyin(arg, &l_ioc, sizeof(l_ioc)); 2282 if (error != 0) 2283 return (error); 2284 2285 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) { 2286 return (EINVAL); 2287 } 2288 2289 mtx_lock(&sc->mfi_io_lock); 2290 if ((cm = mfi_dequeue_free(sc)) == NULL) { 2291 mtx_unlock(&sc->mfi_io_lock); 2292 return (EBUSY); 2293 } 2294 mtx_unlock(&sc->mfi_io_lock); 2295 locked = 0; 2296 2297 /* 2298 * save off original context since copying from user 2299 * will clobber some data 2300 */ 2301 context = cm->cm_frame->header.context; 2302 2303 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame, 2304 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */ 2305 cm->cm_total_frame_size = (sizeof(union mfi_sgl) 2306 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off; 2307 if (l_ioc.lioc_sge_count) 2308 cm->cm_sg = 2309 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off]; 2310 cm->cm_flags = 0; 2311 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) 2312 cm->cm_flags |= MFI_CMD_DATAIN; 2313 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) 2314 cm->cm_flags |= MFI_CMD_DATAOUT; 2315 cm->cm_len = cm->cm_frame->header.data_len; 2316 if (cm->cm_len && 2317 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) { 2318 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, 2319 M_WAITOK | M_ZERO); 2320 if (cm->cm_data == NULL) { 2321 device_printf(sc->mfi_dev, "Malloc failed\n"); 2322 goto out; 2323 } 2324 } else { 2325 cm->cm_data = 0; 2326 } 2327 2328 /* restore header context */ 2329 cm->cm_frame->header.context = context; 2330 2331 temp = data; 2332 if (cm->cm_flags & MFI_CMD_DATAOUT) { 2333 for (i = 0; i < l_ioc.lioc_sge_count; i++) { 2334 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base), 2335 temp, 2336 l_ioc.lioc_sgl[i].iov_len); 2337 if (error != 0) { 2338 device_printf(sc->mfi_dev, 2339 "Copy in failed\n"); 2340 goto out; 2341 } 2342 temp = &temp[l_ioc.lioc_sgl[i].iov_len]; 2343 } 2344 } 2345 2346 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) 2347 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode); 2348 2349 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) { 2350 cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr; 2351 cm->cm_frame->pass.sense_addr_hi = 0; 2352 } 2353 2354 mtx_lock(&sc->mfi_io_lock); 2355 error = mfi_check_command_pre(sc, cm); 2356 if (error) { 2357 mtx_unlock(&sc->mfi_io_lock); 2358 goto out; 2359 } 2360 2361 if ((error = mfi_wait_command(sc, cm)) != 0) { 2362 device_printf(sc->mfi_dev, 2363 "Controller polled failed\n"); 2364 mtx_unlock(&sc->mfi_io_lock); 2365 goto out; 2366 } 2367 2368 mfi_check_command_post(sc, cm); 2369 mtx_unlock(&sc->mfi_io_lock); 2370 2371 temp = data; 2372 if (cm->cm_flags & MFI_CMD_DATAIN) { 2373 for (i = 0; i < l_ioc.lioc_sge_count; i++) { 2374 error = copyout(temp, 2375 PTRIN(l_ioc.lioc_sgl[i].iov_base), 2376 l_ioc.lioc_sgl[i].iov_len); 2377 if (error != 0) { 2378 device_printf(sc->mfi_dev, 2379 "Copy out failed\n"); 2380 goto out; 2381 } 2382 temp = &temp[l_ioc.lioc_sgl[i].iov_len]; 2383 } 2384 } 2385 2386 if (l_ioc.lioc_sense_len) { 2387 /* get user-space sense ptr then copy out sense */ 2388 bcopy(&((struct mfi_linux_ioc_packet*)arg) 2389 ->lioc_frame.raw[l_ioc.lioc_sense_off], 2390 &sense_ptr.sense_ptr_data[0], 2391 sizeof(sense_ptr.sense_ptr_data)); 2392 #ifdef __amd64__ 2393 /* 2394 * only 32bit Linux support so zero out any 2395 * address over 32bit 2396 */ 2397 sense_ptr.addr.high = 0; 2398 #endif 2399 error = copyout(cm->cm_sense, sense_ptr.user_space, 2400 l_ioc.lioc_sense_len); 2401 if (error != 0) { 2402 device_printf(sc->mfi_dev, 2403 "Copy out failed\n"); 2404 goto out; 2405 } 2406 } 2407 2408 error = copyout(&cm->cm_frame->header.cmd_status, 2409 &((struct mfi_linux_ioc_packet*)arg) 2410 ->lioc_frame.hdr.cmd_status, 2411 1); 2412 if (error != 0) { 2413 device_printf(sc->mfi_dev, 2414 "Copy out failed\n"); 2415 goto out; 2416 } 2417 2418 out: 2419 mfi_config_unlock(sc, locked); 2420 if (data) 2421 free(data, M_MFIBUF); 2422 if (cm) { 2423 mtx_lock(&sc->mfi_io_lock); 2424 mfi_release_command(cm); 2425 mtx_unlock(&sc->mfi_io_lock); 2426 } 2427 2428 return (error); 2429 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ 2430 error = copyin(arg, &l_aen, sizeof(l_aen)); 2431 if (error != 0) 2432 return (error); 2433 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid); 2434 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF, 2435 M_WAITOK); 2436 mtx_lock(&sc->mfi_io_lock); 2437 if (mfi_aen_entry != NULL) { 2438 mfi_aen_entry->p = curproc; 2439 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry, 2440 aen_link); 2441 } 2442 error = mfi_aen_register(sc, l_aen.laen_seq_num, 2443 l_aen.laen_class_locale); 2444 2445 if (error != 0) { 2446 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 2447 aen_link); 2448 free(mfi_aen_entry, M_MFIBUF); 2449 } 2450 mtx_unlock(&sc->mfi_io_lock); 2451 2452 return (error); 2453 default: 2454 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); 2455 error = ENOENT; 2456 break; 2457 } 2458 2459 return (error); 2460 } 2461 2462 static int 2463 mfi_poll(struct cdev *dev, int poll_events, struct thread *td) 2464 { 2465 struct mfi_softc *sc; 2466 int revents = 0; 2467 2468 sc = dev->si_drv1; 2469 2470 if (poll_events & (POLLIN | POLLRDNORM)) { 2471 if (sc->mfi_aen_triggered != 0) { 2472 revents |= poll_events & (POLLIN | POLLRDNORM); 2473 sc->mfi_aen_triggered = 0; 2474 } 2475 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) { 2476 revents |= POLLERR; 2477 } 2478 } 2479 2480 if (revents == 0) { 2481 if (poll_events & (POLLIN | POLLRDNORM)) { 2482 sc->mfi_poll_waiting = 1; 2483 selrecord(td, &sc->mfi_select); 2484 } 2485 } 2486 2487 return revents; 2488 } 2489 2490 2491 static void 2492 mfi_dump_all(void) 2493 { 2494 struct mfi_softc *sc; 2495 struct mfi_command *cm; 2496 devclass_t dc; 2497 time_t deadline; 2498 int timedout; 2499 int i; 2500 2501 dc = devclass_find("mfi"); 2502 if (dc == NULL) { 2503 printf("No mfi dev class\n"); 2504 return; 2505 } 2506 2507 for (i = 0; ; i++) { 2508 sc = devclass_get_softc(dc, i); 2509 if (sc == NULL) 2510 break; 2511 device_printf(sc->mfi_dev, "Dumping\n\n"); 2512 timedout = 0; 2513 deadline = time_uptime - MFI_CMD_TIMEOUT; 2514 mtx_lock(&sc->mfi_io_lock); 2515 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) { 2516 if (cm->cm_timestamp < deadline) { 2517 device_printf(sc->mfi_dev, 2518 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm, 2519 (int)(time_uptime - cm->cm_timestamp)); 2520 MFI_PRINT_CMD(cm); 2521 timedout++; 2522 } 2523 } 2524 2525 #if 0 2526 if (timedout) 2527 MFI_DUMP_CMDS(SC); 2528 #endif 2529 2530 mtx_unlock(&sc->mfi_io_lock); 2531 } 2532 2533 return; 2534 } 2535 2536 static void 2537 mfi_timeout(void *data) 2538 { 2539 struct mfi_softc *sc = (struct mfi_softc *)data; 2540 struct mfi_command *cm; 2541 time_t deadline; 2542 int timedout = 0; 2543 2544 deadline = time_uptime - MFI_CMD_TIMEOUT; 2545 mtx_lock(&sc->mfi_io_lock); 2546 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) { 2547 if (sc->mfi_aen_cm == cm) 2548 continue; 2549 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) { 2550 device_printf(sc->mfi_dev, 2551 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm, 2552 (int)(time_uptime - cm->cm_timestamp)); 2553 MFI_PRINT_CMD(cm); 2554 MFI_VALIDATE_CMD(sc, cm); 2555 timedout++; 2556 } 2557 } 2558 2559 #if 0 2560 if (timedout) 2561 MFI_DUMP_CMDS(SC); 2562 #endif 2563 2564 mtx_unlock(&sc->mfi_io_lock); 2565 2566 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, 2567 mfi_timeout, sc); 2568 2569 if (0) 2570 mfi_dump_all(); 2571 return; 2572 } 2573