1 /*- 2 * Copyright (c) 2006 IronPort Systems 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /*- 27 * Copyright (c) 2007 LSI Corp. 28 * Copyright (c) 2007 Rajesh Prabhakaran. 29 * All rights reserved. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * SUCH DAMAGE. 51 */ 52 53 #include <sys/cdefs.h> 54 __FBSDID("$FreeBSD$"); 55 56 #include "opt_compat.h" 57 #include "opt_mfi.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/sysctl.h> 62 #include <sys/malloc.h> 63 #include <sys/kernel.h> 64 #include <sys/poll.h> 65 #include <sys/selinfo.h> 66 #include <sys/bus.h> 67 #include <sys/conf.h> 68 #include <sys/eventhandler.h> 69 #include <sys/rman.h> 70 #include <sys/bus_dma.h> 71 #include <sys/bio.h> 72 #include <sys/ioccom.h> 73 #include <sys/uio.h> 74 #include <sys/proc.h> 75 #include <sys/signalvar.h> 76 #include <sys/sysent.h> 77 #include <sys/taskqueue.h> 78 79 #include <machine/bus.h> 80 #include <machine/resource.h> 81 82 #include <dev/mfi/mfireg.h> 83 #include <dev/mfi/mfi_ioctl.h> 84 #include <dev/mfi/mfivar.h> 85 #include <sys/interrupt.h> 86 #include <sys/priority.h> 87 88 static int mfi_alloc_commands(struct mfi_softc *); 89 static int mfi_comms_init(struct mfi_softc *); 90 static int mfi_get_controller_info(struct mfi_softc *); 91 static int mfi_get_log_state(struct mfi_softc *, 92 struct mfi_evt_log_state **); 93 static int mfi_parse_entries(struct mfi_softc *, int, int); 94 static void mfi_data_cb(void *, bus_dma_segment_t *, int, int); 95 static void mfi_startup(void *arg); 96 static void mfi_intr(void *arg); 97 static void mfi_ldprobe(struct mfi_softc *sc); 98 static void mfi_syspdprobe(struct mfi_softc *sc); 99 static void mfi_handle_evt(void *context, int pending); 100 static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale); 101 static void mfi_aen_complete(struct mfi_command *); 102 static int mfi_add_ld(struct mfi_softc *sc, int); 103 static void mfi_add_ld_complete(struct mfi_command *); 104 static int mfi_add_sys_pd(struct mfi_softc *sc, int); 105 static void mfi_add_sys_pd_complete(struct mfi_command *); 106 static struct mfi_command * mfi_bio_command(struct mfi_softc *); 107 static void mfi_bio_complete(struct mfi_command *); 108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*); 109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*); 110 static int mfi_send_frame(struct mfi_softc *, struct mfi_command *); 111 static int mfi_abort(struct mfi_softc *, struct mfi_command *); 112 static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *); 113 static void mfi_timeout(void *); 114 static int mfi_user_command(struct mfi_softc *, 115 struct mfi_ioc_passthru *); 116 static void mfi_enable_intr_xscale(struct mfi_softc *sc); 117 static void mfi_enable_intr_ppc(struct mfi_softc *sc); 118 static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc); 119 static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc); 120 static int mfi_check_clear_intr_xscale(struct mfi_softc *sc); 121 static int mfi_check_clear_intr_ppc(struct mfi_softc *sc); 122 static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, 123 uint32_t frame_cnt); 124 static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, 125 uint32_t frame_cnt); 126 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode); 127 static void mfi_config_unlock(struct mfi_softc *sc, int locked); 128 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm); 129 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm); 130 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm); 131 132 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters"); 133 static int mfi_event_locale = MFI_EVT_LOCALE_ALL; 134 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale); 135 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale, 136 0, "event message locale"); 137 138 static int mfi_event_class = MFI_EVT_CLASS_INFO; 139 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class); 140 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class, 141 0, "event message class"); 142 143 static int mfi_max_cmds = 128; 144 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds); 145 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds, 146 0, "Max commands"); 147 148 static int mfi_detect_jbod_change = 1; 149 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change); 150 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW, 151 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD"); 152 153 /* Management interface */ 154 static d_open_t mfi_open; 155 static d_close_t mfi_close; 156 static d_ioctl_t mfi_ioctl; 157 static d_poll_t mfi_poll; 158 159 static struct cdevsw mfi_cdevsw = { 160 .d_version = D_VERSION, 161 .d_flags = 0, 162 .d_open = mfi_open, 163 .d_close = mfi_close, 164 .d_ioctl = mfi_ioctl, 165 .d_poll = mfi_poll, 166 .d_name = "mfi", 167 }; 168 169 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver"); 170 171 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH 172 struct mfi_skinny_dma_info mfi_skinny; 173 174 static void 175 mfi_enable_intr_xscale(struct mfi_softc *sc) 176 { 177 MFI_WRITE4(sc, MFI_OMSK, 0x01); 178 } 179 180 static void 181 mfi_enable_intr_ppc(struct mfi_softc *sc) 182 { 183 if (sc->mfi_flags & MFI_FLAGS_1078) { 184 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF); 185 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM); 186 } 187 else if (sc->mfi_flags & MFI_FLAGS_GEN2) { 188 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF); 189 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM); 190 } 191 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 192 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001); 193 } 194 } 195 196 static int32_t 197 mfi_read_fw_status_xscale(struct mfi_softc *sc) 198 { 199 return MFI_READ4(sc, MFI_OMSG0); 200 } 201 202 static int32_t 203 mfi_read_fw_status_ppc(struct mfi_softc *sc) 204 { 205 return MFI_READ4(sc, MFI_OSP0); 206 } 207 208 static int 209 mfi_check_clear_intr_xscale(struct mfi_softc *sc) 210 { 211 int32_t status; 212 213 status = MFI_READ4(sc, MFI_OSTS); 214 if ((status & MFI_OSTS_INTR_VALID) == 0) 215 return 1; 216 217 MFI_WRITE4(sc, MFI_OSTS, status); 218 return 0; 219 } 220 221 static int 222 mfi_check_clear_intr_ppc(struct mfi_softc *sc) 223 { 224 int32_t status; 225 226 status = MFI_READ4(sc, MFI_OSTS); 227 if (sc->mfi_flags & MFI_FLAGS_1078) { 228 if (!(status & MFI_1078_RM)) { 229 return 1; 230 } 231 } 232 else if (sc->mfi_flags & MFI_FLAGS_GEN2) { 233 if (!(status & MFI_GEN2_RM)) { 234 return 1; 235 } 236 } 237 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 238 if (!(status & MFI_SKINNY_RM)) { 239 return 1; 240 } 241 } 242 if (sc->mfi_flags & MFI_FLAGS_SKINNY) 243 MFI_WRITE4(sc, MFI_OSTS, status); 244 else 245 MFI_WRITE4(sc, MFI_ODCR0, status); 246 return 0; 247 } 248 249 static void 250 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt) 251 { 252 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt); 253 } 254 255 static void 256 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt) 257 { 258 if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 259 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 ); 260 MFI_WRITE4(sc, MFI_IQPH, 0x00000000); 261 } else { 262 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 ); 263 } 264 } 265 266 int 267 mfi_transition_firmware(struct mfi_softc *sc) 268 { 269 uint32_t fw_state, cur_state; 270 int max_wait, i; 271 uint32_t cur_abs_reg_val = 0; 272 uint32_t prev_abs_reg_val = 0; 273 274 cur_abs_reg_val = sc->mfi_read_fw_status(sc); 275 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK; 276 while (fw_state != MFI_FWSTATE_READY) { 277 if (bootverbose) 278 device_printf(sc->mfi_dev, "Waiting for firmware to " 279 "become ready\n"); 280 cur_state = fw_state; 281 switch (fw_state) { 282 case MFI_FWSTATE_FAULT: 283 device_printf(sc->mfi_dev, "Firmware fault\n"); 284 return (ENXIO); 285 case MFI_FWSTATE_WAIT_HANDSHAKE: 286 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) 287 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE); 288 else 289 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE); 290 max_wait = MFI_RESET_WAIT_TIME; 291 break; 292 case MFI_FWSTATE_OPERATIONAL: 293 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) 294 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7); 295 else 296 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY); 297 max_wait = MFI_RESET_WAIT_TIME; 298 break; 299 case MFI_FWSTATE_UNDEFINED: 300 case MFI_FWSTATE_BB_INIT: 301 max_wait = MFI_RESET_WAIT_TIME; 302 break; 303 case MFI_FWSTATE_FW_INIT_2: 304 max_wait = MFI_RESET_WAIT_TIME; 305 break; 306 case MFI_FWSTATE_FW_INIT: 307 case MFI_FWSTATE_FLUSH_CACHE: 308 max_wait = MFI_RESET_WAIT_TIME; 309 break; 310 case MFI_FWSTATE_DEVICE_SCAN: 311 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */ 312 prev_abs_reg_val = cur_abs_reg_val; 313 break; 314 case MFI_FWSTATE_BOOT_MESSAGE_PENDING: 315 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) 316 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG); 317 else 318 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG); 319 max_wait = MFI_RESET_WAIT_TIME; 320 break; 321 default: 322 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n", 323 fw_state); 324 return (ENXIO); 325 } 326 for (i = 0; i < (max_wait * 10); i++) { 327 cur_abs_reg_val = sc->mfi_read_fw_status(sc); 328 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK; 329 if (fw_state == cur_state) 330 DELAY(100000); 331 else 332 break; 333 } 334 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) { 335 /* Check the device scanning progress */ 336 if (prev_abs_reg_val != cur_abs_reg_val) { 337 continue; 338 } 339 } 340 if (fw_state == cur_state) { 341 device_printf(sc->mfi_dev, "Firmware stuck in state " 342 "%#x\n", fw_state); 343 return (ENXIO); 344 } 345 } 346 return (0); 347 } 348 349 static void 350 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 351 { 352 bus_addr_t *addr; 353 354 addr = arg; 355 *addr = segs[0].ds_addr; 356 } 357 358 359 int 360 mfi_attach(struct mfi_softc *sc) 361 { 362 uint32_t status; 363 int error, commsz, framessz, sensesz; 364 int frames, unit, max_fw_sge; 365 uint32_t tb_mem_size = 0; 366 367 if (sc == NULL) 368 return EINVAL; 369 370 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n", 371 MEGASAS_VERSION); 372 373 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF); 374 sx_init(&sc->mfi_config_lock, "MFI config"); 375 TAILQ_INIT(&sc->mfi_ld_tqh); 376 TAILQ_INIT(&sc->mfi_syspd_tqh); 377 TAILQ_INIT(&sc->mfi_evt_queue); 378 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc); 379 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc); 380 TAILQ_INIT(&sc->mfi_aen_pids); 381 TAILQ_INIT(&sc->mfi_cam_ccbq); 382 383 mfi_initq_free(sc); 384 mfi_initq_ready(sc); 385 mfi_initq_busy(sc); 386 mfi_initq_bio(sc); 387 388 sc->adpreset = 0; 389 sc->last_seq_num = 0; 390 sc->disableOnlineCtrlReset = 1; 391 sc->issuepend_done = 1; 392 sc->hw_crit_error = 0; 393 394 if (sc->mfi_flags & MFI_FLAGS_1064R) { 395 sc->mfi_enable_intr = mfi_enable_intr_xscale; 396 sc->mfi_read_fw_status = mfi_read_fw_status_xscale; 397 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale; 398 sc->mfi_issue_cmd = mfi_issue_cmd_xscale; 399 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 400 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc; 401 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc; 402 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc; 403 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc; 404 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc; 405 sc->mfi_adp_reset = mfi_tbolt_adp_reset; 406 sc->mfi_tbolt = 1; 407 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh); 408 } else { 409 sc->mfi_enable_intr = mfi_enable_intr_ppc; 410 sc->mfi_read_fw_status = mfi_read_fw_status_ppc; 411 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc; 412 sc->mfi_issue_cmd = mfi_issue_cmd_ppc; 413 } 414 415 416 /* Before we get too far, see if the firmware is working */ 417 if ((error = mfi_transition_firmware(sc)) != 0) { 418 device_printf(sc->mfi_dev, "Firmware not in READY state, " 419 "error %d\n", error); 420 return (ENXIO); 421 } 422 423 /* Start: LSIP200113393 */ 424 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 425 1, 0, /* algnmnt, boundary */ 426 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 427 BUS_SPACE_MAXADDR, /* highaddr */ 428 NULL, NULL, /* filter, filterarg */ 429 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */ 430 1, /* msegments */ 431 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */ 432 0, /* flags */ 433 NULL, NULL, /* lockfunc, lockarg */ 434 &sc->verbuf_h_dmat)) { 435 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n"); 436 return (ENOMEM); 437 } 438 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf, 439 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) { 440 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n"); 441 return (ENOMEM); 442 } 443 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t)); 444 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap, 445 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t), 446 mfi_addr_cb, &sc->verbuf_h_busaddr, 0); 447 /* End: LSIP200113393 */ 448 449 /* 450 * Get information needed for sizing the contiguous memory for the 451 * frame pool. Size down the sgl parameter since we know that 452 * we will never need more than what's required for MAXPHYS. 453 * It would be nice if these constants were available at runtime 454 * instead of compile time. 455 */ 456 status = sc->mfi_read_fw_status(sc); 457 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK; 458 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16; 459 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1)); 460 461 /* ThunderBolt Support get the contiguous memory */ 462 463 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 464 mfi_tbolt_init_globals(sc); 465 device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n", 466 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status); 467 tb_mem_size = mfi_tbolt_get_memory_requirement(sc); 468 469 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 470 1, 0, /* algnmnt, boundary */ 471 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 472 BUS_SPACE_MAXADDR, /* highaddr */ 473 NULL, NULL, /* filter, filterarg */ 474 tb_mem_size, /* maxsize */ 475 1, /* msegments */ 476 tb_mem_size, /* maxsegsize */ 477 0, /* flags */ 478 NULL, NULL, /* lockfunc, lockarg */ 479 &sc->mfi_tb_dmat)) { 480 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); 481 return (ENOMEM); 482 } 483 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool, 484 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) { 485 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); 486 return (ENOMEM); 487 } 488 bzero(sc->request_message_pool, tb_mem_size); 489 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap, 490 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0); 491 492 /* For ThunderBolt memory init */ 493 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 494 0x100, 0, /* alignmnt, boundary */ 495 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 496 BUS_SPACE_MAXADDR, /* highaddr */ 497 NULL, NULL, /* filter, filterarg */ 498 MFI_FRAME_SIZE, /* maxsize */ 499 1, /* msegments */ 500 MFI_FRAME_SIZE, /* maxsegsize */ 501 0, /* flags */ 502 NULL, NULL, /* lockfunc, lockarg */ 503 &sc->mfi_tb_init_dmat)) { 504 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n"); 505 return (ENOMEM); 506 } 507 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init, 508 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) { 509 device_printf(sc->mfi_dev, "Cannot allocate init memory\n"); 510 return (ENOMEM); 511 } 512 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE); 513 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap, 514 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb, 515 &sc->mfi_tb_init_busaddr, 0); 516 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool, 517 tb_mem_size)) { 518 device_printf(sc->mfi_dev, 519 "Thunderbolt pool preparation error\n"); 520 return 0; 521 } 522 523 /* 524 Allocate DMA memory mapping for MPI2 IOC Init descriptor, 525 we are taking it diffrent from what we have allocated for Request 526 and reply descriptors to avoid confusion later 527 */ 528 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST); 529 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 530 1, 0, /* algnmnt, boundary */ 531 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 532 BUS_SPACE_MAXADDR, /* highaddr */ 533 NULL, NULL, /* filter, filterarg */ 534 tb_mem_size, /* maxsize */ 535 1, /* msegments */ 536 tb_mem_size, /* maxsegsize */ 537 0, /* flags */ 538 NULL, NULL, /* lockfunc, lockarg */ 539 &sc->mfi_tb_ioc_init_dmat)) { 540 device_printf(sc->mfi_dev, 541 "Cannot allocate comms DMA tag\n"); 542 return (ENOMEM); 543 } 544 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat, 545 (void **)&sc->mfi_tb_ioc_init_desc, 546 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) { 547 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); 548 return (ENOMEM); 549 } 550 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size); 551 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap, 552 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb, 553 &sc->mfi_tb_ioc_init_busaddr, 0); 554 } 555 /* 556 * Create the dma tag for data buffers. Used both for block I/O 557 * and for various internal data queries. 558 */ 559 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 560 1, 0, /* algnmnt, boundary */ 561 BUS_SPACE_MAXADDR, /* lowaddr */ 562 BUS_SPACE_MAXADDR, /* highaddr */ 563 NULL, NULL, /* filter, filterarg */ 564 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 565 sc->mfi_max_sge, /* nsegments */ 566 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 567 BUS_DMA_ALLOCNOW, /* flags */ 568 busdma_lock_mutex, /* lockfunc */ 569 &sc->mfi_io_lock, /* lockfuncarg */ 570 &sc->mfi_buffer_dmat)) { 571 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n"); 572 return (ENOMEM); 573 } 574 575 /* 576 * Allocate DMA memory for the comms queues. Keep it under 4GB for 577 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue 578 * entry, so the calculated size here will be will be 1 more than 579 * mfi_max_fw_cmds. This is apparently a requirement of the hardware. 580 */ 581 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) + 582 sizeof(struct mfi_hwcomms); 583 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 584 1, 0, /* algnmnt, boundary */ 585 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 586 BUS_SPACE_MAXADDR, /* highaddr */ 587 NULL, NULL, /* filter, filterarg */ 588 commsz, /* maxsize */ 589 1, /* msegments */ 590 commsz, /* maxsegsize */ 591 0, /* flags */ 592 NULL, NULL, /* lockfunc, lockarg */ 593 &sc->mfi_comms_dmat)) { 594 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); 595 return (ENOMEM); 596 } 597 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms, 598 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) { 599 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); 600 return (ENOMEM); 601 } 602 bzero(sc->mfi_comms, commsz); 603 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap, 604 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0); 605 /* 606 * Allocate DMA memory for the command frames. Keep them in the 607 * lower 4GB for efficiency. Calculate the size of the commands at 608 * the same time; each command is one 64 byte frame plus a set of 609 * additional frames for holding sg lists or other data. 610 * The assumption here is that the SG list will start at the second 611 * frame and not use the unused bytes in the first frame. While this 612 * isn't technically correct, it simplifies the calculation and allows 613 * for command frames that might be larger than an mfi_io_frame. 614 */ 615 if (sizeof(bus_addr_t) == 8) { 616 sc->mfi_sge_size = sizeof(struct mfi_sg64); 617 sc->mfi_flags |= MFI_FLAGS_SG64; 618 } else { 619 sc->mfi_sge_size = sizeof(struct mfi_sg32); 620 } 621 if (sc->mfi_flags & MFI_FLAGS_SKINNY) 622 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny); 623 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2; 624 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE; 625 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds; 626 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 627 64, 0, /* algnmnt, boundary */ 628 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 629 BUS_SPACE_MAXADDR, /* highaddr */ 630 NULL, NULL, /* filter, filterarg */ 631 framessz, /* maxsize */ 632 1, /* nsegments */ 633 framessz, /* maxsegsize */ 634 0, /* flags */ 635 NULL, NULL, /* lockfunc, lockarg */ 636 &sc->mfi_frames_dmat)) { 637 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n"); 638 return (ENOMEM); 639 } 640 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames, 641 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) { 642 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n"); 643 return (ENOMEM); 644 } 645 bzero(sc->mfi_frames, framessz); 646 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap, 647 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0); 648 /* 649 * Allocate DMA memory for the frame sense data. Keep them in the 650 * lower 4GB for efficiency 651 */ 652 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN; 653 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 654 4, 0, /* algnmnt, boundary */ 655 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 656 BUS_SPACE_MAXADDR, /* highaddr */ 657 NULL, NULL, /* filter, filterarg */ 658 sensesz, /* maxsize */ 659 1, /* nsegments */ 660 sensesz, /* maxsegsize */ 661 0, /* flags */ 662 NULL, NULL, /* lockfunc, lockarg */ 663 &sc->mfi_sense_dmat)) { 664 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n"); 665 return (ENOMEM); 666 } 667 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense, 668 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) { 669 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n"); 670 return (ENOMEM); 671 } 672 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap, 673 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0); 674 if ((error = mfi_alloc_commands(sc)) != 0) 675 return (error); 676 677 /* Before moving the FW to operational state, check whether 678 * hostmemory is required by the FW or not 679 */ 680 681 /* ThunderBolt MFI_IOC2 INIT */ 682 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 683 sc->mfi_disable_intr(sc); 684 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) { 685 device_printf(sc->mfi_dev, 686 "TB Init has failed with error %d\n",error); 687 return error; 688 } 689 690 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0) 691 return error; 692 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, 693 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc, 694 &sc->mfi_intr)) { 695 device_printf(sc->mfi_dev, "Cannot set up interrupt\n"); 696 return (EINVAL); 697 } 698 sc->mfi_enable_intr(sc); 699 } else { 700 if ((error = mfi_comms_init(sc)) != 0) 701 return (error); 702 703 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, 704 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) { 705 device_printf(sc->mfi_dev, "Cannot set up interrupt\n"); 706 return (EINVAL); 707 } 708 sc->mfi_enable_intr(sc); 709 } 710 if ((error = mfi_get_controller_info(sc)) != 0) 711 return (error); 712 sc->disableOnlineCtrlReset = 0; 713 714 /* Register a config hook to probe the bus for arrays */ 715 sc->mfi_ich.ich_func = mfi_startup; 716 sc->mfi_ich.ich_arg = sc; 717 if (config_intrhook_establish(&sc->mfi_ich) != 0) { 718 device_printf(sc->mfi_dev, "Cannot establish configuration " 719 "hook\n"); 720 return (EINVAL); 721 } 722 if ((error = mfi_aen_setup(sc, 0), 0) != 0) { 723 mtx_unlock(&sc->mfi_io_lock); 724 return (error); 725 } 726 727 /* 728 * Register a shutdown handler. 729 */ 730 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown, 731 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) { 732 device_printf(sc->mfi_dev, "Warning: shutdown event " 733 "registration failed\n"); 734 } 735 736 /* 737 * Create the control device for doing management 738 */ 739 unit = device_get_unit(sc->mfi_dev); 740 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR, 741 0640, "mfi%d", unit); 742 if (unit == 0) 743 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node"); 744 if (sc->mfi_cdev != NULL) 745 sc->mfi_cdev->si_drv1 = sc; 746 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), 747 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), 748 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW, 749 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes"); 750 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), 751 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), 752 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW, 753 &sc->mfi_keep_deleted_volumes, 0, 754 "Don't detach the mfid device for a busy volume that is deleted"); 755 756 device_add_child(sc->mfi_dev, "mfip", -1); 757 bus_generic_attach(sc->mfi_dev); 758 759 /* Start the timeout watchdog */ 760 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE); 761 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, 762 mfi_timeout, sc); 763 764 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 765 mfi_tbolt_sync_map_info(sc); 766 } 767 768 return (0); 769 } 770 771 static int 772 mfi_alloc_commands(struct mfi_softc *sc) 773 { 774 struct mfi_command *cm; 775 int i, ncmds; 776 777 /* 778 * XXX Should we allocate all the commands up front, or allocate on 779 * demand later like 'aac' does? 780 */ 781 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds); 782 if (bootverbose) 783 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver " 784 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds); 785 786 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF, 787 M_WAITOK | M_ZERO); 788 789 for (i = 0; i < ncmds; i++) { 790 cm = &sc->mfi_commands[i]; 791 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames + 792 sc->mfi_cmd_size * i); 793 cm->cm_frame_busaddr = sc->mfi_frames_busaddr + 794 sc->mfi_cmd_size * i; 795 cm->cm_frame->header.context = i; 796 cm->cm_sense = &sc->mfi_sense[i]; 797 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i; 798 cm->cm_sc = sc; 799 cm->cm_index = i; 800 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0, 801 &cm->cm_dmamap) == 0) { 802 mtx_lock(&sc->mfi_io_lock); 803 mfi_release_command(cm); 804 mtx_unlock(&sc->mfi_io_lock); 805 } 806 else 807 break; 808 sc->mfi_total_cmds++; 809 } 810 811 return (0); 812 } 813 814 void 815 mfi_release_command(struct mfi_command *cm) 816 { 817 struct mfi_frame_header *hdr; 818 uint32_t *hdr_data; 819 820 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED); 821 822 /* 823 * Zero out the important fields of the frame, but make sure the 824 * context field is preserved. For efficiency, handle the fields 825 * as 32 bit words. Clear out the first S/G entry too for safety. 826 */ 827 hdr = &cm->cm_frame->header; 828 if (cm->cm_data != NULL && hdr->sg_count) { 829 cm->cm_sg->sg32[0].len = 0; 830 cm->cm_sg->sg32[0].addr = 0; 831 } 832 833 hdr_data = (uint32_t *)cm->cm_frame; 834 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */ 835 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */ 836 hdr_data[4] = 0; /* flags, timeout */ 837 hdr_data[5] = 0; /* data_len */ 838 839 cm->cm_extra_frames = 0; 840 cm->cm_flags = 0; 841 cm->cm_complete = NULL; 842 cm->cm_private = NULL; 843 cm->cm_data = NULL; 844 cm->cm_sg = 0; 845 cm->cm_total_frame_size = 0; 846 cm->retry_for_fw_reset = 0; 847 848 mfi_enqueue_free(cm); 849 } 850 851 int 852 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, 853 uint32_t opcode, void **bufp, size_t bufsize) 854 { 855 struct mfi_command *cm; 856 struct mfi_dcmd_frame *dcmd; 857 void *buf = NULL; 858 uint32_t context = 0; 859 860 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 861 862 cm = mfi_dequeue_free(sc); 863 if (cm == NULL) 864 return (EBUSY); 865 866 /* Zero out the MFI frame */ 867 context = cm->cm_frame->header.context; 868 bzero(cm->cm_frame, sizeof(union mfi_frame)); 869 cm->cm_frame->header.context = context; 870 871 if ((bufsize > 0) && (bufp != NULL)) { 872 if (*bufp == NULL) { 873 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO); 874 if (buf == NULL) { 875 mfi_release_command(cm); 876 return (ENOMEM); 877 } 878 *bufp = buf; 879 } else { 880 buf = *bufp; 881 } 882 } 883 884 dcmd = &cm->cm_frame->dcmd; 885 bzero(dcmd->mbox, MFI_MBOX_SIZE); 886 dcmd->header.cmd = MFI_CMD_DCMD; 887 dcmd->header.timeout = 0; 888 dcmd->header.flags = 0; 889 dcmd->header.data_len = bufsize; 890 dcmd->header.scsi_status = 0; 891 dcmd->opcode = opcode; 892 cm->cm_sg = &dcmd->sgl; 893 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 894 cm->cm_flags = 0; 895 cm->cm_data = buf; 896 cm->cm_private = buf; 897 cm->cm_len = bufsize; 898 899 *cmp = cm; 900 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL)) 901 *bufp = buf; 902 return (0); 903 } 904 905 static int 906 mfi_comms_init(struct mfi_softc *sc) 907 { 908 struct mfi_command *cm; 909 struct mfi_init_frame *init; 910 struct mfi_init_qinfo *qinfo; 911 int error; 912 uint32_t context = 0; 913 914 mtx_lock(&sc->mfi_io_lock); 915 if ((cm = mfi_dequeue_free(sc)) == NULL) 916 return (EBUSY); 917 918 /* Zero out the MFI frame */ 919 context = cm->cm_frame->header.context; 920 bzero(cm->cm_frame, sizeof(union mfi_frame)); 921 cm->cm_frame->header.context = context; 922 923 /* 924 * Abuse the SG list area of the frame to hold the init_qinfo 925 * object; 926 */ 927 init = &cm->cm_frame->init; 928 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE); 929 930 bzero(qinfo, sizeof(struct mfi_init_qinfo)); 931 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1; 932 qinfo->rq_addr_lo = sc->mfi_comms_busaddr + 933 offsetof(struct mfi_hwcomms, hw_reply_q); 934 qinfo->pi_addr_lo = sc->mfi_comms_busaddr + 935 offsetof(struct mfi_hwcomms, hw_pi); 936 qinfo->ci_addr_lo = sc->mfi_comms_busaddr + 937 offsetof(struct mfi_hwcomms, hw_ci); 938 939 init->header.cmd = MFI_CMD_INIT; 940 init->header.data_len = sizeof(struct mfi_init_qinfo); 941 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE; 942 cm->cm_data = NULL; 943 cm->cm_flags = MFI_CMD_POLLED; 944 945 if ((error = mfi_mapcmd(sc, cm)) != 0) { 946 device_printf(sc->mfi_dev, "failed to send init command\n"); 947 mtx_unlock(&sc->mfi_io_lock); 948 return (error); 949 } 950 mfi_release_command(cm); 951 mtx_unlock(&sc->mfi_io_lock); 952 953 return (0); 954 } 955 956 static int 957 mfi_get_controller_info(struct mfi_softc *sc) 958 { 959 struct mfi_command *cm = NULL; 960 struct mfi_ctrl_info *ci = NULL; 961 uint32_t max_sectors_1, max_sectors_2; 962 int error; 963 964 mtx_lock(&sc->mfi_io_lock); 965 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO, 966 (void **)&ci, sizeof(*ci)); 967 if (error) 968 goto out; 969 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 970 971 if ((error = mfi_mapcmd(sc, cm)) != 0) { 972 device_printf(sc->mfi_dev, "Failed to get controller info\n"); 973 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE / 974 MFI_SECTOR_LEN; 975 error = 0; 976 goto out; 977 } 978 979 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 980 BUS_DMASYNC_POSTREAD); 981 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 982 983 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io; 984 max_sectors_2 = ci->max_request_size; 985 sc->mfi_max_io = min(max_sectors_1, max_sectors_2); 986 sc->disableOnlineCtrlReset = 987 ci->properties.OnOffProperties.disableOnlineCtrlReset; 988 989 out: 990 if (ci) 991 free(ci, M_MFIBUF); 992 if (cm) 993 mfi_release_command(cm); 994 mtx_unlock(&sc->mfi_io_lock); 995 return (error); 996 } 997 998 static int 999 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state) 1000 { 1001 struct mfi_command *cm = NULL; 1002 int error; 1003 1004 mtx_lock(&sc->mfi_io_lock); 1005 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO, 1006 (void **)log_state, sizeof(**log_state)); 1007 if (error) 1008 goto out; 1009 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1010 1011 if ((error = mfi_mapcmd(sc, cm)) != 0) { 1012 device_printf(sc->mfi_dev, "Failed to get log state\n"); 1013 goto out; 1014 } 1015 1016 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1017 BUS_DMASYNC_POSTREAD); 1018 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1019 1020 out: 1021 if (cm) 1022 mfi_release_command(cm); 1023 mtx_unlock(&sc->mfi_io_lock); 1024 1025 return (error); 1026 } 1027 1028 int 1029 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start) 1030 { 1031 struct mfi_evt_log_state *log_state = NULL; 1032 union mfi_evt class_locale; 1033 int error = 0; 1034 uint32_t seq; 1035 1036 class_locale.members.reserved = 0; 1037 class_locale.members.locale = mfi_event_locale; 1038 class_locale.members.evt_class = mfi_event_class; 1039 1040 if (seq_start == 0) { 1041 error = mfi_get_log_state(sc, &log_state); 1042 sc->mfi_boot_seq_num = log_state->boot_seq_num; 1043 if (error) { 1044 if (log_state) 1045 free(log_state, M_MFIBUF); 1046 return (error); 1047 } 1048 1049 /* 1050 * Walk through any events that fired since the last 1051 * shutdown. 1052 */ 1053 mfi_parse_entries(sc, log_state->shutdown_seq_num, 1054 log_state->newest_seq_num); 1055 seq = log_state->newest_seq_num; 1056 } else 1057 seq = seq_start; 1058 mfi_aen_register(sc, seq, class_locale.word); 1059 free(log_state, M_MFIBUF); 1060 1061 return 0; 1062 } 1063 1064 int 1065 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm) 1066 { 1067 1068 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1069 cm->cm_complete = NULL; 1070 1071 1072 /* 1073 * MegaCli can issue a DCMD of 0. In this case do nothing 1074 * and return 0 to it as status 1075 */ 1076 if (cm->cm_frame->dcmd.opcode == 0) { 1077 cm->cm_frame->header.cmd_status = MFI_STAT_OK; 1078 cm->cm_error = 0; 1079 return (cm->cm_error); 1080 } 1081 mfi_enqueue_ready(cm); 1082 mfi_startio(sc); 1083 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0) 1084 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0); 1085 return (cm->cm_error); 1086 } 1087 1088 void 1089 mfi_free(struct mfi_softc *sc) 1090 { 1091 struct mfi_command *cm; 1092 int i; 1093 1094 callout_drain(&sc->mfi_watchdog_callout); 1095 1096 if (sc->mfi_cdev != NULL) 1097 destroy_dev(sc->mfi_cdev); 1098 1099 if (sc->mfi_total_cmds != 0) { 1100 for (i = 0; i < sc->mfi_total_cmds; i++) { 1101 cm = &sc->mfi_commands[i]; 1102 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap); 1103 } 1104 free(sc->mfi_commands, M_MFIBUF); 1105 } 1106 1107 if (sc->mfi_intr) 1108 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr); 1109 if (sc->mfi_irq != NULL) 1110 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid, 1111 sc->mfi_irq); 1112 1113 if (sc->mfi_sense_busaddr != 0) 1114 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap); 1115 if (sc->mfi_sense != NULL) 1116 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense, 1117 sc->mfi_sense_dmamap); 1118 if (sc->mfi_sense_dmat != NULL) 1119 bus_dma_tag_destroy(sc->mfi_sense_dmat); 1120 1121 if (sc->mfi_frames_busaddr != 0) 1122 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap); 1123 if (sc->mfi_frames != NULL) 1124 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames, 1125 sc->mfi_frames_dmamap); 1126 if (sc->mfi_frames_dmat != NULL) 1127 bus_dma_tag_destroy(sc->mfi_frames_dmat); 1128 1129 if (sc->mfi_comms_busaddr != 0) 1130 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap); 1131 if (sc->mfi_comms != NULL) 1132 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms, 1133 sc->mfi_comms_dmamap); 1134 if (sc->mfi_comms_dmat != NULL) 1135 bus_dma_tag_destroy(sc->mfi_comms_dmat); 1136 1137 /* ThunderBolt contiguous memory free here */ 1138 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 1139 if (sc->mfi_tb_busaddr != 0) 1140 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap); 1141 if (sc->request_message_pool != NULL) 1142 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool, 1143 sc->mfi_tb_dmamap); 1144 if (sc->mfi_tb_dmat != NULL) 1145 bus_dma_tag_destroy(sc->mfi_tb_dmat); 1146 1147 /* Version buffer memory free */ 1148 /* Start LSIP200113393 */ 1149 if (sc->verbuf_h_busaddr != 0) 1150 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap); 1151 if (sc->verbuf != NULL) 1152 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf, 1153 sc->verbuf_h_dmamap); 1154 if (sc->verbuf_h_dmat != NULL) 1155 bus_dma_tag_destroy(sc->verbuf_h_dmat); 1156 1157 /* End LSIP200113393 */ 1158 /* ThunderBolt INIT packet memory Free */ 1159 if (sc->mfi_tb_init_busaddr != 0) 1160 bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap); 1161 if (sc->mfi_tb_init != NULL) 1162 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init, 1163 sc->mfi_tb_init_dmamap); 1164 if (sc->mfi_tb_init_dmat != NULL) 1165 bus_dma_tag_destroy(sc->mfi_tb_init_dmat); 1166 1167 /* ThunderBolt IOC Init Desc memory free here */ 1168 if (sc->mfi_tb_ioc_init_busaddr != 0) 1169 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat, 1170 sc->mfi_tb_ioc_init_dmamap); 1171 if (sc->mfi_tb_ioc_init_desc != NULL) 1172 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat, 1173 sc->mfi_tb_ioc_init_desc, 1174 sc->mfi_tb_ioc_init_dmamap); 1175 if (sc->mfi_tb_ioc_init_dmat != NULL) 1176 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat); 1177 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) { 1178 if (sc->mfi_cmd_pool_tbolt != NULL) { 1179 if (sc->mfi_cmd_pool_tbolt[i] != NULL) { 1180 free(sc->mfi_cmd_pool_tbolt[i], 1181 M_MFIBUF); 1182 sc->mfi_cmd_pool_tbolt[i] = NULL; 1183 } 1184 } 1185 } 1186 if (sc->mfi_cmd_pool_tbolt != NULL) { 1187 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF); 1188 sc->mfi_cmd_pool_tbolt = NULL; 1189 } 1190 if (sc->request_desc_pool != NULL) { 1191 free(sc->request_desc_pool, M_MFIBUF); 1192 sc->request_desc_pool = NULL; 1193 } 1194 } 1195 if (sc->mfi_buffer_dmat != NULL) 1196 bus_dma_tag_destroy(sc->mfi_buffer_dmat); 1197 if (sc->mfi_parent_dmat != NULL) 1198 bus_dma_tag_destroy(sc->mfi_parent_dmat); 1199 1200 if (mtx_initialized(&sc->mfi_io_lock)) { 1201 mtx_destroy(&sc->mfi_io_lock); 1202 sx_destroy(&sc->mfi_config_lock); 1203 } 1204 1205 return; 1206 } 1207 1208 static void 1209 mfi_startup(void *arg) 1210 { 1211 struct mfi_softc *sc; 1212 1213 sc = (struct mfi_softc *)arg; 1214 1215 config_intrhook_disestablish(&sc->mfi_ich); 1216 1217 sc->mfi_enable_intr(sc); 1218 sx_xlock(&sc->mfi_config_lock); 1219 mtx_lock(&sc->mfi_io_lock); 1220 mfi_ldprobe(sc); 1221 if (sc->mfi_flags & MFI_FLAGS_SKINNY) 1222 mfi_syspdprobe(sc); 1223 mtx_unlock(&sc->mfi_io_lock); 1224 sx_xunlock(&sc->mfi_config_lock); 1225 } 1226 1227 static void 1228 mfi_intr(void *arg) 1229 { 1230 struct mfi_softc *sc; 1231 struct mfi_command *cm; 1232 uint32_t pi, ci, context; 1233 1234 sc = (struct mfi_softc *)arg; 1235 1236 if (sc->mfi_check_clear_intr(sc)) 1237 return; 1238 1239 restart: 1240 pi = sc->mfi_comms->hw_pi; 1241 ci = sc->mfi_comms->hw_ci; 1242 mtx_lock(&sc->mfi_io_lock); 1243 while (ci != pi) { 1244 context = sc->mfi_comms->hw_reply_q[ci]; 1245 if (context < sc->mfi_max_fw_cmds) { 1246 cm = &sc->mfi_commands[context]; 1247 mfi_remove_busy(cm); 1248 cm->cm_error = 0; 1249 mfi_complete(sc, cm); 1250 } 1251 if (++ci == (sc->mfi_max_fw_cmds + 1)) { 1252 ci = 0; 1253 } 1254 } 1255 1256 sc->mfi_comms->hw_ci = ci; 1257 1258 /* Give defered I/O a chance to run */ 1259 if (sc->mfi_flags & MFI_FLAGS_QFRZN) 1260 sc->mfi_flags &= ~MFI_FLAGS_QFRZN; 1261 mfi_startio(sc); 1262 mtx_unlock(&sc->mfi_io_lock); 1263 1264 /* 1265 * Dummy read to flush the bus; this ensures that the indexes are up 1266 * to date. Restart processing if more commands have come it. 1267 */ 1268 (void)sc->mfi_read_fw_status(sc); 1269 if (pi != sc->mfi_comms->hw_pi) 1270 goto restart; 1271 1272 return; 1273 } 1274 1275 int 1276 mfi_shutdown(struct mfi_softc *sc) 1277 { 1278 struct mfi_dcmd_frame *dcmd; 1279 struct mfi_command *cm; 1280 int error; 1281 1282 mtx_lock(&sc->mfi_io_lock); 1283 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0); 1284 if (error) { 1285 mtx_unlock(&sc->mfi_io_lock); 1286 return (error); 1287 } 1288 1289 if (sc->mfi_aen_cm != NULL) 1290 mfi_abort(sc, sc->mfi_aen_cm); 1291 1292 if (sc->mfi_map_sync_cm != NULL) 1293 mfi_abort(sc, sc->mfi_map_sync_cm); 1294 1295 dcmd = &cm->cm_frame->dcmd; 1296 dcmd->header.flags = MFI_FRAME_DIR_NONE; 1297 cm->cm_flags = MFI_CMD_POLLED; 1298 cm->cm_data = NULL; 1299 1300 if ((error = mfi_mapcmd(sc, cm)) != 0) { 1301 device_printf(sc->mfi_dev, "Failed to shutdown controller\n"); 1302 } 1303 1304 mfi_release_command(cm); 1305 mtx_unlock(&sc->mfi_io_lock); 1306 return (error); 1307 } 1308 1309 static void 1310 mfi_syspdprobe(struct mfi_softc *sc) 1311 { 1312 struct mfi_frame_header *hdr; 1313 struct mfi_command *cm = NULL; 1314 struct mfi_pd_list *pdlist = NULL; 1315 struct mfi_system_pd *syspd, *tmp; 1316 int error, i, found; 1317 1318 sx_assert(&sc->mfi_config_lock, SA_XLOCKED); 1319 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1320 /* Add SYSTEM PD's */ 1321 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY, 1322 (void **)&pdlist, sizeof(*pdlist)); 1323 if (error) { 1324 device_printf(sc->mfi_dev, 1325 "Error while forming SYSTEM PD list\n"); 1326 goto out; 1327 } 1328 1329 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1330 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 1331 cm->cm_frame->dcmd.mbox[1] = 0; 1332 if (mfi_mapcmd(sc, cm) != 0) { 1333 device_printf(sc->mfi_dev, 1334 "Failed to get syspd device listing\n"); 1335 goto out; 1336 } 1337 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap, 1338 BUS_DMASYNC_POSTREAD); 1339 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1340 hdr = &cm->cm_frame->header; 1341 if (hdr->cmd_status != MFI_STAT_OK) { 1342 device_printf(sc->mfi_dev, 1343 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status); 1344 goto out; 1345 } 1346 /* Get each PD and add it to the system */ 1347 for (i = 0; i < pdlist->count; i++) { 1348 if (pdlist->addr[i].device_id == 1349 pdlist->addr[i].encl_device_id) 1350 continue; 1351 found = 0; 1352 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) { 1353 if (syspd->pd_id == pdlist->addr[i].device_id) 1354 found = 1; 1355 } 1356 if (found == 0) 1357 mfi_add_sys_pd(sc, pdlist->addr[i].device_id); 1358 } 1359 /* Delete SYSPD's whose state has been changed */ 1360 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) { 1361 found = 0; 1362 for (i = 0; i < pdlist->count; i++) { 1363 if (syspd->pd_id == pdlist->addr[i].device_id) 1364 found = 1; 1365 } 1366 if (found == 0) { 1367 printf("DELETE\n"); 1368 mtx_unlock(&sc->mfi_io_lock); 1369 mtx_lock(&Giant); 1370 device_delete_child(sc->mfi_dev, syspd->pd_dev); 1371 mtx_unlock(&Giant); 1372 mtx_lock(&sc->mfi_io_lock); 1373 } 1374 } 1375 out: 1376 if (pdlist) 1377 free(pdlist, M_MFIBUF); 1378 if (cm) 1379 mfi_release_command(cm); 1380 1381 return; 1382 } 1383 1384 static void 1385 mfi_ldprobe(struct mfi_softc *sc) 1386 { 1387 struct mfi_frame_header *hdr; 1388 struct mfi_command *cm = NULL; 1389 struct mfi_ld_list *list = NULL; 1390 struct mfi_disk *ld; 1391 int error, i; 1392 1393 sx_assert(&sc->mfi_config_lock, SA_XLOCKED); 1394 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1395 1396 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST, 1397 (void **)&list, sizeof(*list)); 1398 if (error) 1399 goto out; 1400 1401 cm->cm_flags = MFI_CMD_DATAIN; 1402 if (mfi_wait_command(sc, cm) != 0) { 1403 device_printf(sc->mfi_dev, "Failed to get device listing\n"); 1404 goto out; 1405 } 1406 1407 hdr = &cm->cm_frame->header; 1408 if (hdr->cmd_status != MFI_STAT_OK) { 1409 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n", 1410 hdr->cmd_status); 1411 goto out; 1412 } 1413 1414 for (i = 0; i < list->ld_count; i++) { 1415 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1416 if (ld->ld_id == list->ld_list[i].ld.v.target_id) 1417 goto skip_add; 1418 } 1419 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id); 1420 skip_add:; 1421 } 1422 out: 1423 if (list) 1424 free(list, M_MFIBUF); 1425 if (cm) 1426 mfi_release_command(cm); 1427 1428 return; 1429 } 1430 1431 /* 1432 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If 1433 * the bits in 24-31 are all set, then it is the number of seconds since 1434 * boot. 1435 */ 1436 static const char * 1437 format_timestamp(uint32_t timestamp) 1438 { 1439 static char buffer[32]; 1440 1441 if ((timestamp & 0xff000000) == 0xff000000) 1442 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 1443 0x00ffffff); 1444 else 1445 snprintf(buffer, sizeof(buffer), "%us", timestamp); 1446 return (buffer); 1447 } 1448 1449 static const char * 1450 format_class(int8_t class) 1451 { 1452 static char buffer[6]; 1453 1454 switch (class) { 1455 case MFI_EVT_CLASS_DEBUG: 1456 return ("debug"); 1457 case MFI_EVT_CLASS_PROGRESS: 1458 return ("progress"); 1459 case MFI_EVT_CLASS_INFO: 1460 return ("info"); 1461 case MFI_EVT_CLASS_WARNING: 1462 return ("WARN"); 1463 case MFI_EVT_CLASS_CRITICAL: 1464 return ("CRIT"); 1465 case MFI_EVT_CLASS_FATAL: 1466 return ("FATAL"); 1467 case MFI_EVT_CLASS_DEAD: 1468 return ("DEAD"); 1469 default: 1470 snprintf(buffer, sizeof(buffer), "%d", class); 1471 return (buffer); 1472 } 1473 } 1474 1475 static void 1476 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail) 1477 { 1478 struct mfi_system_pd *syspd = NULL; 1479 1480 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq, 1481 format_timestamp(detail->time), detail->evt_class.members.locale, 1482 format_class(detail->evt_class.members.evt_class), 1483 detail->description); 1484 1485 /* Don't act on old AEN's or while shutting down */ 1486 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching) 1487 return; 1488 1489 switch (detail->arg_type) { 1490 case MR_EVT_ARGS_NONE: 1491 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) { 1492 device_printf(sc->mfi_dev, "HostBus scan raised\n"); 1493 if (mfi_detect_jbod_change) { 1494 /* 1495 * Probe for new SYSPD's and Delete 1496 * invalid SYSPD's 1497 */ 1498 sx_xlock(&sc->mfi_config_lock); 1499 mtx_lock(&sc->mfi_io_lock); 1500 mfi_syspdprobe(sc); 1501 mtx_unlock(&sc->mfi_io_lock); 1502 sx_xunlock(&sc->mfi_config_lock); 1503 } 1504 } 1505 break; 1506 case MR_EVT_ARGS_LD_STATE: 1507 /* During load time driver reads all the events starting 1508 * from the one that has been logged after shutdown. Avoid 1509 * these old events. 1510 */ 1511 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) { 1512 /* Remove the LD */ 1513 struct mfi_disk *ld; 1514 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 1515 if (ld->ld_id == 1516 detail->args.ld_state.ld.target_id) 1517 break; 1518 } 1519 /* 1520 Fix: for kernel panics when SSCD is removed 1521 KASSERT(ld != NULL, ("volume dissappeared")); 1522 */ 1523 if (ld != NULL) { 1524 mtx_lock(&Giant); 1525 device_delete_child(sc->mfi_dev, ld->ld_dev); 1526 mtx_unlock(&Giant); 1527 } 1528 } 1529 break; 1530 case MR_EVT_ARGS_PD: 1531 if (detail->code == MR_EVT_PD_REMOVED) { 1532 if (mfi_detect_jbod_change) { 1533 /* 1534 * If the removed device is a SYSPD then 1535 * delete it 1536 */ 1537 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, 1538 pd_link) { 1539 if (syspd->pd_id == 1540 detail->args.pd.device_id) { 1541 mtx_lock(&Giant); 1542 device_delete_child( 1543 sc->mfi_dev, 1544 syspd->pd_dev); 1545 mtx_unlock(&Giant); 1546 break; 1547 } 1548 } 1549 } 1550 } 1551 if (detail->code == MR_EVT_PD_INSERTED) { 1552 if (mfi_detect_jbod_change) { 1553 /* Probe for new SYSPD's */ 1554 sx_xlock(&sc->mfi_config_lock); 1555 mtx_lock(&sc->mfi_io_lock); 1556 mfi_syspdprobe(sc); 1557 mtx_unlock(&sc->mfi_io_lock); 1558 sx_xunlock(&sc->mfi_config_lock); 1559 } 1560 } 1561 break; 1562 } 1563 } 1564 1565 static void 1566 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail) 1567 { 1568 struct mfi_evt_queue_elm *elm; 1569 1570 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1571 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO); 1572 if (elm == NULL) 1573 return; 1574 memcpy(&elm->detail, detail, sizeof(*detail)); 1575 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link); 1576 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task); 1577 } 1578 1579 static void 1580 mfi_handle_evt(void *context, int pending) 1581 { 1582 TAILQ_HEAD(,mfi_evt_queue_elm) queue; 1583 struct mfi_softc *sc; 1584 struct mfi_evt_queue_elm *elm; 1585 1586 sc = context; 1587 TAILQ_INIT(&queue); 1588 mtx_lock(&sc->mfi_io_lock); 1589 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link); 1590 mtx_unlock(&sc->mfi_io_lock); 1591 while ((elm = TAILQ_FIRST(&queue)) != NULL) { 1592 TAILQ_REMOVE(&queue, elm, link); 1593 mfi_decode_evt(sc, &elm->detail); 1594 free(elm, M_MFIBUF); 1595 } 1596 } 1597 1598 static int 1599 mfi_aen_register(struct mfi_softc *sc, int seq, int locale) 1600 { 1601 struct mfi_command *cm; 1602 struct mfi_dcmd_frame *dcmd; 1603 union mfi_evt current_aen, prior_aen; 1604 struct mfi_evt_detail *ed = NULL; 1605 int error = 0; 1606 1607 current_aen.word = locale; 1608 if (sc->mfi_aen_cm != NULL) { 1609 prior_aen.word = 1610 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1]; 1611 if (prior_aen.members.evt_class <= current_aen.members.evt_class && 1612 !((prior_aen.members.locale & current_aen.members.locale) 1613 ^current_aen.members.locale)) { 1614 return (0); 1615 } else { 1616 prior_aen.members.locale |= current_aen.members.locale; 1617 if (prior_aen.members.evt_class 1618 < current_aen.members.evt_class) 1619 current_aen.members.evt_class = 1620 prior_aen.members.evt_class; 1621 mtx_lock(&sc->mfi_io_lock); 1622 mfi_abort(sc, sc->mfi_aen_cm); 1623 mtx_unlock(&sc->mfi_io_lock); 1624 } 1625 } 1626 1627 mtx_lock(&sc->mfi_io_lock); 1628 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT, 1629 (void **)&ed, sizeof(*ed)); 1630 mtx_unlock(&sc->mfi_io_lock); 1631 if (error) { 1632 goto out; 1633 } 1634 1635 dcmd = &cm->cm_frame->dcmd; 1636 ((uint32_t *)&dcmd->mbox)[0] = seq; 1637 ((uint32_t *)&dcmd->mbox)[1] = locale; 1638 cm->cm_flags = MFI_CMD_DATAIN; 1639 cm->cm_complete = mfi_aen_complete; 1640 1641 sc->last_seq_num = seq; 1642 sc->mfi_aen_cm = cm; 1643 1644 mtx_lock(&sc->mfi_io_lock); 1645 mfi_enqueue_ready(cm); 1646 mfi_startio(sc); 1647 mtx_unlock(&sc->mfi_io_lock); 1648 1649 out: 1650 return (error); 1651 } 1652 1653 static void 1654 mfi_aen_complete(struct mfi_command *cm) 1655 { 1656 struct mfi_frame_header *hdr; 1657 struct mfi_softc *sc; 1658 struct mfi_evt_detail *detail; 1659 struct mfi_aen *mfi_aen_entry, *tmp; 1660 int seq = 0, aborted = 0; 1661 1662 sc = cm->cm_sc; 1663 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1664 1665 hdr = &cm->cm_frame->header; 1666 1667 if (sc->mfi_aen_cm == NULL) 1668 return; 1669 1670 if (sc->cm_aen_abort || 1671 hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1672 sc->cm_aen_abort = 0; 1673 aborted = 1; 1674 } else { 1675 sc->mfi_aen_triggered = 1; 1676 if (sc->mfi_poll_waiting) { 1677 sc->mfi_poll_waiting = 0; 1678 selwakeup(&sc->mfi_select); 1679 } 1680 detail = cm->cm_data; 1681 mfi_queue_evt(sc, detail); 1682 seq = detail->seq + 1; 1683 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, 1684 tmp) { 1685 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 1686 aen_link); 1687 PROC_LOCK(mfi_aen_entry->p); 1688 kern_psignal(mfi_aen_entry->p, SIGIO); 1689 PROC_UNLOCK(mfi_aen_entry->p); 1690 free(mfi_aen_entry, M_MFIBUF); 1691 } 1692 } 1693 1694 free(cm->cm_data, M_MFIBUF); 1695 sc->mfi_aen_cm = NULL; 1696 wakeup(&sc->mfi_aen_cm); 1697 mfi_release_command(cm); 1698 1699 /* set it up again so the driver can catch more events */ 1700 if (!aborted) { 1701 mtx_unlock(&sc->mfi_io_lock); 1702 mfi_aen_setup(sc, seq); 1703 mtx_lock(&sc->mfi_io_lock); 1704 } 1705 } 1706 1707 #define MAX_EVENTS 15 1708 1709 static int 1710 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq) 1711 { 1712 struct mfi_command *cm; 1713 struct mfi_dcmd_frame *dcmd; 1714 struct mfi_evt_list *el; 1715 union mfi_evt class_locale; 1716 int error, i, seq, size; 1717 1718 class_locale.members.reserved = 0; 1719 class_locale.members.locale = mfi_event_locale; 1720 class_locale.members.evt_class = mfi_event_class; 1721 1722 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail) 1723 * (MAX_EVENTS - 1); 1724 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO); 1725 if (el == NULL) 1726 return (ENOMEM); 1727 1728 for (seq = start_seq;;) { 1729 mtx_lock(&sc->mfi_io_lock); 1730 if ((cm = mfi_dequeue_free(sc)) == NULL) { 1731 free(el, M_MFIBUF); 1732 mtx_unlock(&sc->mfi_io_lock); 1733 return (EBUSY); 1734 } 1735 mtx_unlock(&sc->mfi_io_lock); 1736 1737 dcmd = &cm->cm_frame->dcmd; 1738 bzero(dcmd->mbox, MFI_MBOX_SIZE); 1739 dcmd->header.cmd = MFI_CMD_DCMD; 1740 dcmd->header.timeout = 0; 1741 dcmd->header.data_len = size; 1742 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET; 1743 ((uint32_t *)&dcmd->mbox)[0] = seq; 1744 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word; 1745 cm->cm_sg = &dcmd->sgl; 1746 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 1747 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1748 cm->cm_data = el; 1749 cm->cm_len = size; 1750 1751 mtx_lock(&sc->mfi_io_lock); 1752 if ((error = mfi_mapcmd(sc, cm)) != 0) { 1753 device_printf(sc->mfi_dev, 1754 "Failed to get controller entries\n"); 1755 mfi_release_command(cm); 1756 mtx_unlock(&sc->mfi_io_lock); 1757 break; 1758 } 1759 1760 mtx_unlock(&sc->mfi_io_lock); 1761 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1762 BUS_DMASYNC_POSTREAD); 1763 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1764 1765 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) { 1766 mtx_lock(&sc->mfi_io_lock); 1767 mfi_release_command(cm); 1768 mtx_unlock(&sc->mfi_io_lock); 1769 break; 1770 } 1771 if (dcmd->header.cmd_status != MFI_STAT_OK) { 1772 device_printf(sc->mfi_dev, 1773 "Error %d fetching controller entries\n", 1774 dcmd->header.cmd_status); 1775 mtx_lock(&sc->mfi_io_lock); 1776 mfi_release_command(cm); 1777 mtx_unlock(&sc->mfi_io_lock); 1778 break; 1779 } 1780 mtx_lock(&sc->mfi_io_lock); 1781 mfi_release_command(cm); 1782 mtx_unlock(&sc->mfi_io_lock); 1783 1784 for (i = 0; i < el->count; i++) { 1785 /* 1786 * If this event is newer than 'stop_seq' then 1787 * break out of the loop. Note that the log 1788 * is a circular buffer so we have to handle 1789 * the case that our stop point is earlier in 1790 * the buffer than our start point. 1791 */ 1792 if (el->event[i].seq >= stop_seq) { 1793 if (start_seq <= stop_seq) 1794 break; 1795 else if (el->event[i].seq < start_seq) 1796 break; 1797 } 1798 mtx_lock(&sc->mfi_io_lock); 1799 mfi_queue_evt(sc, &el->event[i]); 1800 mtx_unlock(&sc->mfi_io_lock); 1801 } 1802 seq = el->event[el->count - 1].seq + 1; 1803 } 1804 1805 free(el, M_MFIBUF); 1806 return (0); 1807 } 1808 1809 static int 1810 mfi_add_ld(struct mfi_softc *sc, int id) 1811 { 1812 struct mfi_command *cm; 1813 struct mfi_dcmd_frame *dcmd = NULL; 1814 struct mfi_ld_info *ld_info = NULL; 1815 int error; 1816 1817 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1818 1819 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO, 1820 (void **)&ld_info, sizeof(*ld_info)); 1821 if (error) { 1822 device_printf(sc->mfi_dev, 1823 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error); 1824 if (ld_info) 1825 free(ld_info, M_MFIBUF); 1826 return (error); 1827 } 1828 cm->cm_flags = MFI_CMD_DATAIN; 1829 dcmd = &cm->cm_frame->dcmd; 1830 dcmd->mbox[0] = id; 1831 if (mfi_wait_command(sc, cm) != 0) { 1832 device_printf(sc->mfi_dev, 1833 "Failed to get logical drive: %d\n", id); 1834 free(ld_info, M_MFIBUF); 1835 return (0); 1836 } 1837 if (ld_info->ld_config.params.isSSCD != 1) 1838 mfi_add_ld_complete(cm); 1839 else { 1840 mfi_release_command(cm); 1841 if (ld_info) /* SSCD drives ld_info free here */ 1842 free(ld_info, M_MFIBUF); 1843 } 1844 return (0); 1845 } 1846 1847 static void 1848 mfi_add_ld_complete(struct mfi_command *cm) 1849 { 1850 struct mfi_frame_header *hdr; 1851 struct mfi_ld_info *ld_info; 1852 struct mfi_softc *sc; 1853 device_t child; 1854 1855 sc = cm->cm_sc; 1856 hdr = &cm->cm_frame->header; 1857 ld_info = cm->cm_private; 1858 1859 if (hdr->cmd_status != MFI_STAT_OK) { 1860 free(ld_info, M_MFIBUF); 1861 mfi_release_command(cm); 1862 return; 1863 } 1864 mfi_release_command(cm); 1865 1866 mtx_unlock(&sc->mfi_io_lock); 1867 mtx_lock(&Giant); 1868 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) { 1869 device_printf(sc->mfi_dev, "Failed to add logical disk\n"); 1870 free(ld_info, M_MFIBUF); 1871 mtx_unlock(&Giant); 1872 mtx_lock(&sc->mfi_io_lock); 1873 return; 1874 } 1875 1876 device_set_ivars(child, ld_info); 1877 device_set_desc(child, "MFI Logical Disk"); 1878 bus_generic_attach(sc->mfi_dev); 1879 mtx_unlock(&Giant); 1880 mtx_lock(&sc->mfi_io_lock); 1881 } 1882 1883 static int mfi_add_sys_pd(struct mfi_softc *sc, int id) 1884 { 1885 struct mfi_command *cm; 1886 struct mfi_dcmd_frame *dcmd = NULL; 1887 struct mfi_pd_info *pd_info = NULL; 1888 int error; 1889 1890 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1891 1892 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO, 1893 (void **)&pd_info, sizeof(*pd_info)); 1894 if (error) { 1895 device_printf(sc->mfi_dev, 1896 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n", 1897 error); 1898 if (pd_info) 1899 free(pd_info, M_MFIBUF); 1900 return (error); 1901 } 1902 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; 1903 dcmd = &cm->cm_frame->dcmd; 1904 dcmd->mbox[0]=id; 1905 dcmd->header.scsi_status = 0; 1906 dcmd->header.pad0 = 0; 1907 if (mfi_mapcmd(sc, cm) != 0) { 1908 device_printf(sc->mfi_dev, 1909 "Failed to get physical drive info %d\n", id); 1910 free(pd_info, M_MFIBUF); 1911 return (0); 1912 } 1913 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 1914 BUS_DMASYNC_POSTREAD); 1915 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 1916 mfi_add_sys_pd_complete(cm); 1917 return (0); 1918 } 1919 1920 static void 1921 mfi_add_sys_pd_complete(struct mfi_command *cm) 1922 { 1923 struct mfi_frame_header *hdr; 1924 struct mfi_pd_info *pd_info; 1925 struct mfi_softc *sc; 1926 device_t child; 1927 1928 sc = cm->cm_sc; 1929 hdr = &cm->cm_frame->header; 1930 pd_info = cm->cm_private; 1931 1932 if (hdr->cmd_status != MFI_STAT_OK) { 1933 free(pd_info, M_MFIBUF); 1934 mfi_release_command(cm); 1935 return; 1936 } 1937 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) { 1938 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n", 1939 pd_info->ref.v.device_id); 1940 free(pd_info, M_MFIBUF); 1941 mfi_release_command(cm); 1942 return; 1943 } 1944 mfi_release_command(cm); 1945 1946 mtx_unlock(&sc->mfi_io_lock); 1947 mtx_lock(&Giant); 1948 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) { 1949 device_printf(sc->mfi_dev, "Failed to add system pd\n"); 1950 free(pd_info, M_MFIBUF); 1951 mtx_unlock(&Giant); 1952 mtx_lock(&sc->mfi_io_lock); 1953 return; 1954 } 1955 1956 device_set_ivars(child, pd_info); 1957 device_set_desc(child, "MFI System PD"); 1958 bus_generic_attach(sc->mfi_dev); 1959 mtx_unlock(&Giant); 1960 mtx_lock(&sc->mfi_io_lock); 1961 } 1962 1963 static struct mfi_command * 1964 mfi_bio_command(struct mfi_softc *sc) 1965 { 1966 struct bio *bio; 1967 struct mfi_command *cm = NULL; 1968 1969 /*reserving two commands to avoid starvation for IOCTL*/ 1970 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) { 1971 return (NULL); 1972 } 1973 if ((bio = mfi_dequeue_bio(sc)) == NULL) { 1974 return (NULL); 1975 } 1976 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) { 1977 cm = mfi_build_ldio(sc, bio); 1978 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) { 1979 cm = mfi_build_syspdio(sc, bio); 1980 } 1981 if (!cm) 1982 mfi_enqueue_bio(sc, bio); 1983 return cm; 1984 } 1985 static struct mfi_command * 1986 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio) 1987 { 1988 struct mfi_command *cm; 1989 struct mfi_pass_frame *pass; 1990 int flags = 0, blkcount = 0; 1991 uint32_t context = 0; 1992 1993 if ((cm = mfi_dequeue_free(sc)) == NULL) 1994 return (NULL); 1995 1996 /* Zero out the MFI frame */ 1997 context = cm->cm_frame->header.context; 1998 bzero(cm->cm_frame, sizeof(union mfi_frame)); 1999 cm->cm_frame->header.context = context; 2000 pass = &cm->cm_frame->pass; 2001 bzero(pass->cdb, 16); 2002 pass->header.cmd = MFI_CMD_PD_SCSI_IO; 2003 switch (bio->bio_cmd & 0x03) { 2004 case BIO_READ: 2005 #define SCSI_READ 0x28 2006 pass->cdb[0] = SCSI_READ; 2007 flags = MFI_CMD_DATAIN; 2008 break; 2009 case BIO_WRITE: 2010 #define SCSI_WRITE 0x2a 2011 pass->cdb[0] = SCSI_WRITE; 2012 flags = MFI_CMD_DATAOUT; 2013 break; 2014 default: 2015 panic("Invalid bio command"); 2016 } 2017 2018 /* Cheat with the sector length to avoid a non-constant division */ 2019 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 2020 /* Fill the LBA and Transfer length in CDB */ 2021 pass->cdb[2] = (bio->bio_pblkno & 0xff000000) >> 24; 2022 pass->cdb[3] = (bio->bio_pblkno & 0x00ff0000) >> 16; 2023 pass->cdb[4] = (bio->bio_pblkno & 0x0000ff00) >> 8; 2024 pass->cdb[5] = bio->bio_pblkno & 0x000000ff; 2025 pass->cdb[7] = (blkcount & 0xff00) >> 8; 2026 pass->cdb[8] = (blkcount & 0x00ff); 2027 pass->header.target_id = (uintptr_t)bio->bio_driver1; 2028 pass->header.timeout = 0; 2029 pass->header.flags = 0; 2030 pass->header.scsi_status = 0; 2031 pass->header.sense_len = MFI_SENSE_LEN; 2032 pass->header.data_len = bio->bio_bcount; 2033 pass->header.cdb_len = 10; 2034 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; 2035 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 2036 cm->cm_complete = mfi_bio_complete; 2037 cm->cm_private = bio; 2038 cm->cm_data = bio->bio_data; 2039 cm->cm_len = bio->bio_bcount; 2040 cm->cm_sg = &pass->sgl; 2041 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; 2042 cm->cm_flags = flags; 2043 return (cm); 2044 } 2045 2046 static struct mfi_command * 2047 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio) 2048 { 2049 struct mfi_io_frame *io; 2050 struct mfi_command *cm; 2051 int flags, blkcount; 2052 uint32_t context = 0; 2053 2054 if ((cm = mfi_dequeue_free(sc)) == NULL) 2055 return (NULL); 2056 2057 /* Zero out the MFI frame */ 2058 context = cm->cm_frame->header.context; 2059 bzero(cm->cm_frame, sizeof(union mfi_frame)); 2060 cm->cm_frame->header.context = context; 2061 io = &cm->cm_frame->io; 2062 switch (bio->bio_cmd & 0x03) { 2063 case BIO_READ: 2064 io->header.cmd = MFI_CMD_LD_READ; 2065 flags = MFI_CMD_DATAIN; 2066 break; 2067 case BIO_WRITE: 2068 io->header.cmd = MFI_CMD_LD_WRITE; 2069 flags = MFI_CMD_DATAOUT; 2070 break; 2071 default: 2072 panic("Invalid bio command"); 2073 } 2074 2075 /* Cheat with the sector length to avoid a non-constant division */ 2076 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 2077 io->header.target_id = (uintptr_t)bio->bio_driver1; 2078 io->header.timeout = 0; 2079 io->header.flags = 0; 2080 io->header.scsi_status = 0; 2081 io->header.sense_len = MFI_SENSE_LEN; 2082 io->header.data_len = blkcount; 2083 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; 2084 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 2085 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32; 2086 io->lba_lo = bio->bio_pblkno & 0xffffffff; 2087 cm->cm_complete = mfi_bio_complete; 2088 cm->cm_private = bio; 2089 cm->cm_data = bio->bio_data; 2090 cm->cm_len = bio->bio_bcount; 2091 cm->cm_sg = &io->sgl; 2092 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; 2093 cm->cm_flags = flags; 2094 return (cm); 2095 } 2096 2097 static void 2098 mfi_bio_complete(struct mfi_command *cm) 2099 { 2100 struct bio *bio; 2101 struct mfi_frame_header *hdr; 2102 struct mfi_softc *sc; 2103 2104 bio = cm->cm_private; 2105 hdr = &cm->cm_frame->header; 2106 sc = cm->cm_sc; 2107 2108 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) { 2109 bio->bio_flags |= BIO_ERROR; 2110 bio->bio_error = EIO; 2111 device_printf(sc->mfi_dev, "I/O error, status= %d " 2112 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status); 2113 mfi_print_sense(cm->cm_sc, cm->cm_sense); 2114 } else if (cm->cm_error != 0) { 2115 bio->bio_flags |= BIO_ERROR; 2116 } 2117 2118 mfi_release_command(cm); 2119 mfi_disk_complete(bio); 2120 } 2121 2122 void 2123 mfi_startio(struct mfi_softc *sc) 2124 { 2125 struct mfi_command *cm; 2126 struct ccb_hdr *ccbh; 2127 2128 for (;;) { 2129 /* Don't bother if we're short on resources */ 2130 if (sc->mfi_flags & MFI_FLAGS_QFRZN) 2131 break; 2132 2133 /* Try a command that has already been prepared */ 2134 cm = mfi_dequeue_ready(sc); 2135 2136 if (cm == NULL) { 2137 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL) 2138 cm = sc->mfi_cam_start(ccbh); 2139 } 2140 2141 /* Nope, so look for work on the bioq */ 2142 if (cm == NULL) 2143 cm = mfi_bio_command(sc); 2144 2145 /* No work available, so exit */ 2146 if (cm == NULL) 2147 break; 2148 2149 /* Send the command to the controller */ 2150 if (mfi_mapcmd(sc, cm) != 0) { 2151 mfi_requeue_ready(cm); 2152 break; 2153 } 2154 } 2155 } 2156 2157 int 2158 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm) 2159 { 2160 int error, polled; 2161 2162 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 2163 2164 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) { 2165 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0; 2166 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap, 2167 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled); 2168 if (error == EINPROGRESS) { 2169 sc->mfi_flags |= MFI_FLAGS_QFRZN; 2170 return (0); 2171 } 2172 } else { 2173 if (sc->MFA_enabled) 2174 error = mfi_tbolt_send_frame(sc, cm); 2175 else 2176 error = mfi_send_frame(sc, cm); 2177 } 2178 2179 return (error); 2180 } 2181 2182 static void 2183 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2184 { 2185 struct mfi_frame_header *hdr; 2186 struct mfi_command *cm; 2187 union mfi_sgl *sgl; 2188 struct mfi_softc *sc; 2189 int i, j, first, dir; 2190 int sge_size; 2191 2192 cm = (struct mfi_command *)arg; 2193 sc = cm->cm_sc; 2194 hdr = &cm->cm_frame->header; 2195 sgl = cm->cm_sg; 2196 2197 if (error) { 2198 printf("error %d in callback\n", error); 2199 cm->cm_error = error; 2200 mfi_complete(sc, cm); 2201 return; 2202 } 2203 /* Use IEEE sgl only for IO's on a SKINNY controller 2204 * For other commands on a SKINNY controller use either 2205 * sg32 or sg64 based on the sizeof(bus_addr_t). 2206 * Also calculate the total frame size based on the type 2207 * of SGL used. 2208 */ 2209 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) || 2210 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) || 2211 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) && 2212 (sc->mfi_flags & MFI_FLAGS_SKINNY)) { 2213 for (i = 0; i < nsegs; i++) { 2214 sgl->sg_skinny[i].addr = segs[i].ds_addr; 2215 sgl->sg_skinny[i].len = segs[i].ds_len; 2216 sgl->sg_skinny[i].flag = 0; 2217 } 2218 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64; 2219 sge_size = sizeof(struct mfi_sg_skinny); 2220 hdr->sg_count = nsegs; 2221 } else { 2222 j = 0; 2223 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 2224 first = cm->cm_stp_len; 2225 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { 2226 sgl->sg32[j].addr = segs[0].ds_addr; 2227 sgl->sg32[j++].len = first; 2228 } else { 2229 sgl->sg64[j].addr = segs[0].ds_addr; 2230 sgl->sg64[j++].len = first; 2231 } 2232 } else 2233 first = 0; 2234 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { 2235 for (i = 0; i < nsegs; i++) { 2236 sgl->sg32[j].addr = segs[i].ds_addr + first; 2237 sgl->sg32[j++].len = segs[i].ds_len - first; 2238 first = 0; 2239 } 2240 } else { 2241 for (i = 0; i < nsegs; i++) { 2242 sgl->sg64[j].addr = segs[i].ds_addr + first; 2243 sgl->sg64[j++].len = segs[i].ds_len - first; 2244 first = 0; 2245 } 2246 hdr->flags |= MFI_FRAME_SGL64; 2247 } 2248 hdr->sg_count = j; 2249 sge_size = sc->mfi_sge_size; 2250 } 2251 2252 dir = 0; 2253 if (cm->cm_flags & MFI_CMD_DATAIN) { 2254 dir |= BUS_DMASYNC_PREREAD; 2255 hdr->flags |= MFI_FRAME_DIR_READ; 2256 } 2257 if (cm->cm_flags & MFI_CMD_DATAOUT) { 2258 dir |= BUS_DMASYNC_PREWRITE; 2259 hdr->flags |= MFI_FRAME_DIR_WRITE; 2260 } 2261 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); 2262 cm->cm_flags |= MFI_CMD_MAPPED; 2263 2264 /* 2265 * Instead of calculating the total number of frames in the 2266 * compound frame, it's already assumed that there will be at 2267 * least 1 frame, so don't compensate for the modulo of the 2268 * following division. 2269 */ 2270 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs); 2271 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE; 2272 2273 if (sc->MFA_enabled) 2274 mfi_tbolt_send_frame(sc, cm); 2275 else 2276 mfi_send_frame(sc, cm); 2277 2278 return; 2279 } 2280 2281 static int 2282 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm) 2283 { 2284 struct mfi_frame_header *hdr; 2285 int tm = MFI_POLL_TIMEOUT_SECS * 1000; 2286 2287 hdr = &cm->cm_frame->header; 2288 2289 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) { 2290 cm->cm_timestamp = time_uptime; 2291 mfi_enqueue_busy(cm); 2292 } else { 2293 hdr->cmd_status = MFI_STAT_INVALID_STATUS; 2294 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2295 } 2296 2297 /* 2298 * The bus address of the command is aligned on a 64 byte boundary, 2299 * leaving the least 6 bits as zero. For whatever reason, the 2300 * hardware wants the address shifted right by three, leaving just 2301 * 3 zero bits. These three bits are then used as a prefetching 2302 * hint for the hardware to predict how many frames need to be 2303 * fetched across the bus. If a command has more than 8 frames 2304 * then the 3 bits are set to 0x7 and the firmware uses other 2305 * information in the command to determine the total amount to fetch. 2306 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames 2307 * is enough for both 32bit and 64bit systems. 2308 */ 2309 if (cm->cm_extra_frames > 7) 2310 cm->cm_extra_frames = 7; 2311 2312 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames); 2313 2314 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) 2315 return (0); 2316 2317 /* This is a polled command, so busy-wait for it to complete. */ 2318 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 2319 DELAY(1000); 2320 tm -= 1; 2321 if (tm <= 0) 2322 break; 2323 } 2324 2325 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 2326 device_printf(sc->mfi_dev, "Frame %p timed out " 2327 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode); 2328 return (ETIMEDOUT); 2329 } 2330 2331 return (0); 2332 } 2333 2334 2335 void 2336 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm) 2337 { 2338 int dir; 2339 2340 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) { 2341 dir = 0; 2342 if ((cm->cm_flags & MFI_CMD_DATAIN) || 2343 (cm->cm_frame->header.cmd == MFI_CMD_STP)) 2344 dir |= BUS_DMASYNC_POSTREAD; 2345 if (cm->cm_flags & MFI_CMD_DATAOUT) 2346 dir |= BUS_DMASYNC_POSTWRITE; 2347 2348 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); 2349 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 2350 cm->cm_flags &= ~MFI_CMD_MAPPED; 2351 } 2352 2353 cm->cm_flags |= MFI_CMD_COMPLETED; 2354 2355 if (cm->cm_complete != NULL) 2356 cm->cm_complete(cm); 2357 else 2358 wakeup(cm); 2359 } 2360 2361 static int 2362 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort) 2363 { 2364 struct mfi_command *cm; 2365 struct mfi_abort_frame *abort; 2366 int i = 0; 2367 uint32_t context = 0; 2368 2369 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 2370 2371 if ((cm = mfi_dequeue_free(sc)) == NULL) { 2372 return (EBUSY); 2373 } 2374 2375 /* Zero out the MFI frame */ 2376 context = cm->cm_frame->header.context; 2377 bzero(cm->cm_frame, sizeof(union mfi_frame)); 2378 cm->cm_frame->header.context = context; 2379 2380 abort = &cm->cm_frame->abort; 2381 abort->header.cmd = MFI_CMD_ABORT; 2382 abort->header.flags = 0; 2383 abort->header.scsi_status = 0; 2384 abort->abort_context = cm_abort->cm_frame->header.context; 2385 abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr; 2386 abort->abort_mfi_addr_hi = 2387 (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32); 2388 cm->cm_data = NULL; 2389 cm->cm_flags = MFI_CMD_POLLED; 2390 2391 if (sc->mfi_aen_cm) 2392 sc->cm_aen_abort = 1; 2393 if (sc->mfi_map_sync_cm) 2394 sc->cm_map_abort = 1; 2395 mfi_mapcmd(sc, cm); 2396 mfi_release_command(cm); 2397 2398 while (i < 5 && sc->mfi_aen_cm != NULL) { 2399 msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 2400 5 * hz); 2401 i++; 2402 } 2403 while (i < 5 && sc->mfi_map_sync_cm != NULL) { 2404 msleep(&sc->mfi_map_sync_cm, &sc->mfi_io_lock, 0, "mfiabort", 2405 5 * hz); 2406 i++; 2407 } 2408 2409 return (0); 2410 } 2411 2412 int 2413 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, 2414 int len) 2415 { 2416 struct mfi_command *cm; 2417 struct mfi_io_frame *io; 2418 int error; 2419 uint32_t context = 0; 2420 2421 if ((cm = mfi_dequeue_free(sc)) == NULL) 2422 return (EBUSY); 2423 2424 /* Zero out the MFI frame */ 2425 context = cm->cm_frame->header.context; 2426 bzero(cm->cm_frame, sizeof(union mfi_frame)); 2427 cm->cm_frame->header.context = context; 2428 2429 io = &cm->cm_frame->io; 2430 io->header.cmd = MFI_CMD_LD_WRITE; 2431 io->header.target_id = id; 2432 io->header.timeout = 0; 2433 io->header.flags = 0; 2434 io->header.scsi_status = 0; 2435 io->header.sense_len = MFI_SENSE_LEN; 2436 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 2437 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; 2438 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 2439 io->lba_hi = (lba & 0xffffffff00000000) >> 32; 2440 io->lba_lo = lba & 0xffffffff; 2441 cm->cm_data = virt; 2442 cm->cm_len = len; 2443 cm->cm_sg = &io->sgl; 2444 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; 2445 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT; 2446 2447 error = mfi_mapcmd(sc, cm); 2448 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 2449 BUS_DMASYNC_POSTWRITE); 2450 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 2451 mfi_release_command(cm); 2452 2453 return (error); 2454 } 2455 2456 int 2457 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, 2458 int len) 2459 { 2460 struct mfi_command *cm; 2461 struct mfi_pass_frame *pass; 2462 int error; 2463 int blkcount = 0; 2464 2465 if ((cm = mfi_dequeue_free(sc)) == NULL) 2466 return (EBUSY); 2467 2468 pass = &cm->cm_frame->pass; 2469 bzero(pass->cdb, 16); 2470 pass->header.cmd = MFI_CMD_PD_SCSI_IO; 2471 pass->cdb[0] = SCSI_WRITE; 2472 pass->cdb[2] = (lba & 0xff000000) >> 24; 2473 pass->cdb[3] = (lba & 0x00ff0000) >> 16; 2474 pass->cdb[4] = (lba & 0x0000ff00) >> 8; 2475 pass->cdb[5] = (lba & 0x000000ff); 2476 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; 2477 pass->cdb[7] = (blkcount & 0xff00) >> 8; 2478 pass->cdb[8] = (blkcount & 0x00ff); 2479 pass->header.target_id = id; 2480 pass->header.timeout = 0; 2481 pass->header.flags = 0; 2482 pass->header.scsi_status = 0; 2483 pass->header.sense_len = MFI_SENSE_LEN; 2484 pass->header.data_len = len; 2485 pass->header.cdb_len = 10; 2486 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; 2487 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 2488 cm->cm_data = virt; 2489 cm->cm_len = len; 2490 cm->cm_sg = &pass->sgl; 2491 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; 2492 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT; 2493 2494 error = mfi_mapcmd(sc, cm); 2495 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, 2496 BUS_DMASYNC_POSTWRITE); 2497 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); 2498 mfi_release_command(cm); 2499 2500 return (error); 2501 } 2502 2503 static int 2504 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2505 { 2506 struct mfi_softc *sc; 2507 int error; 2508 2509 sc = dev->si_drv1; 2510 2511 mtx_lock(&sc->mfi_io_lock); 2512 if (sc->mfi_detaching) 2513 error = ENXIO; 2514 else { 2515 sc->mfi_flags |= MFI_FLAGS_OPEN; 2516 error = 0; 2517 } 2518 mtx_unlock(&sc->mfi_io_lock); 2519 2520 return (error); 2521 } 2522 2523 static int 2524 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2525 { 2526 struct mfi_softc *sc; 2527 struct mfi_aen *mfi_aen_entry, *tmp; 2528 2529 sc = dev->si_drv1; 2530 2531 mtx_lock(&sc->mfi_io_lock); 2532 sc->mfi_flags &= ~MFI_FLAGS_OPEN; 2533 2534 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) { 2535 if (mfi_aen_entry->p == curproc) { 2536 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 2537 aen_link); 2538 free(mfi_aen_entry, M_MFIBUF); 2539 } 2540 } 2541 mtx_unlock(&sc->mfi_io_lock); 2542 return (0); 2543 } 2544 2545 static int 2546 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode) 2547 { 2548 2549 switch (opcode) { 2550 case MFI_DCMD_LD_DELETE: 2551 case MFI_DCMD_CFG_ADD: 2552 case MFI_DCMD_CFG_CLEAR: 2553 sx_xlock(&sc->mfi_config_lock); 2554 return (1); 2555 default: 2556 return (0); 2557 } 2558 } 2559 2560 static void 2561 mfi_config_unlock(struct mfi_softc *sc, int locked) 2562 { 2563 2564 if (locked) 2565 sx_xunlock(&sc->mfi_config_lock); 2566 } 2567 2568 /* 2569 * Perform pre-issue checks on commands from userland and possibly veto 2570 * them. 2571 */ 2572 static int 2573 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm) 2574 { 2575 struct mfi_disk *ld, *ld2; 2576 int error; 2577 struct mfi_system_pd *syspd = NULL; 2578 uint16_t syspd_id; 2579 uint16_t *mbox; 2580 2581 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 2582 error = 0; 2583 switch (cm->cm_frame->dcmd.opcode) { 2584 case MFI_DCMD_LD_DELETE: 2585 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 2586 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) 2587 break; 2588 } 2589 if (ld == NULL) 2590 error = ENOENT; 2591 else 2592 error = mfi_disk_disable(ld); 2593 break; 2594 case MFI_DCMD_CFG_CLEAR: 2595 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 2596 error = mfi_disk_disable(ld); 2597 if (error) 2598 break; 2599 } 2600 if (error) { 2601 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) { 2602 if (ld2 == ld) 2603 break; 2604 mfi_disk_enable(ld2); 2605 } 2606 } 2607 break; 2608 case MFI_DCMD_PD_STATE_SET: 2609 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox; 2610 syspd_id = mbox[0]; 2611 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) { 2612 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) { 2613 if (syspd->pd_id == syspd_id) 2614 break; 2615 } 2616 } 2617 else 2618 break; 2619 if (syspd) 2620 error = mfi_syspd_disable(syspd); 2621 break; 2622 default: 2623 break; 2624 } 2625 return (error); 2626 } 2627 2628 /* Perform post-issue checks on commands from userland. */ 2629 static void 2630 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm) 2631 { 2632 struct mfi_disk *ld, *ldn; 2633 struct mfi_system_pd *syspd = NULL; 2634 uint16_t syspd_id; 2635 uint16_t *mbox; 2636 2637 switch (cm->cm_frame->dcmd.opcode) { 2638 case MFI_DCMD_LD_DELETE: 2639 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 2640 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) 2641 break; 2642 } 2643 KASSERT(ld != NULL, ("volume dissappeared")); 2644 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { 2645 mtx_unlock(&sc->mfi_io_lock); 2646 mtx_lock(&Giant); 2647 device_delete_child(sc->mfi_dev, ld->ld_dev); 2648 mtx_unlock(&Giant); 2649 mtx_lock(&sc->mfi_io_lock); 2650 } else 2651 mfi_disk_enable(ld); 2652 break; 2653 case MFI_DCMD_CFG_CLEAR: 2654 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { 2655 mtx_unlock(&sc->mfi_io_lock); 2656 mtx_lock(&Giant); 2657 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) { 2658 device_delete_child(sc->mfi_dev, ld->ld_dev); 2659 } 2660 mtx_unlock(&Giant); 2661 mtx_lock(&sc->mfi_io_lock); 2662 } else { 2663 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) 2664 mfi_disk_enable(ld); 2665 } 2666 break; 2667 case MFI_DCMD_CFG_ADD: 2668 mfi_ldprobe(sc); 2669 break; 2670 case MFI_DCMD_CFG_FOREIGN_IMPORT: 2671 mfi_ldprobe(sc); 2672 break; 2673 case MFI_DCMD_PD_STATE_SET: 2674 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox; 2675 syspd_id = mbox[0]; 2676 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) { 2677 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) { 2678 if (syspd->pd_id == syspd_id) 2679 break; 2680 } 2681 } 2682 else 2683 break; 2684 /* If the transition fails then enable the syspd again */ 2685 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK) 2686 mfi_syspd_enable(syspd); 2687 break; 2688 } 2689 } 2690 2691 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm) 2692 { 2693 struct mfi_config_data *conf_data=(struct mfi_config_data *)cm->cm_data; 2694 struct mfi_command *ld_cm = NULL; 2695 struct mfi_ld_info *ld_info = NULL; 2696 int error = 0; 2697 2698 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) && 2699 (conf_data->ld[0].params.isSSCD == 1)) { 2700 error = 1; 2701 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) { 2702 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO, 2703 (void **)&ld_info, sizeof(*ld_info)); 2704 if (error) { 2705 device_printf(sc->mfi_dev, "Failed to allocate" 2706 "MFI_DCMD_LD_GET_INFO %d", error); 2707 if (ld_info) 2708 free(ld_info, M_MFIBUF); 2709 return 0; 2710 } 2711 ld_cm->cm_flags = MFI_CMD_DATAIN; 2712 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0]; 2713 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0]; 2714 if (mfi_wait_command(sc, ld_cm) != 0) { 2715 device_printf(sc->mfi_dev, "failed to get log drv\n"); 2716 mfi_release_command(ld_cm); 2717 free(ld_info, M_MFIBUF); 2718 return 0; 2719 } 2720 2721 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) { 2722 free(ld_info, M_MFIBUF); 2723 mfi_release_command(ld_cm); 2724 return 0; 2725 } 2726 else 2727 ld_info = (struct mfi_ld_info *)ld_cm->cm_private; 2728 2729 if (ld_info->ld_config.params.isSSCD == 1) 2730 error = 1; 2731 2732 mfi_release_command(ld_cm); 2733 free(ld_info, M_MFIBUF); 2734 2735 } 2736 return error; 2737 } 2738 2739 static int 2740 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg) 2741 { 2742 uint8_t i; 2743 struct mfi_ioc_packet *ioc; 2744 ioc = (struct mfi_ioc_packet *)arg; 2745 int sge_size, error; 2746 struct megasas_sge *kern_sge; 2747 2748 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr)); 2749 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off); 2750 cm->cm_frame->header.sg_count = ioc->mfi_sge_count; 2751 2752 if (sizeof(bus_addr_t) == 8) { 2753 cm->cm_frame->header.flags |= MFI_FRAME_SGL64; 2754 cm->cm_extra_frames = 2; 2755 sge_size = sizeof(struct mfi_sg64); 2756 } else { 2757 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE; 2758 sge_size = sizeof(struct mfi_sg32); 2759 } 2760 2761 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count); 2762 for (i = 0; i < ioc->mfi_sge_count; i++) { 2763 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 2764 1, 0, /* algnmnt, boundary */ 2765 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 2766 BUS_SPACE_MAXADDR, /* highaddr */ 2767 NULL, NULL, /* filter, filterarg */ 2768 ioc->mfi_sgl[i].iov_len,/* maxsize */ 2769 2, /* nsegments */ 2770 ioc->mfi_sgl[i].iov_len,/* maxsegsize */ 2771 BUS_DMA_ALLOCNOW, /* flags */ 2772 NULL, NULL, /* lockfunc, lockarg */ 2773 &sc->mfi_kbuff_arr_dmat[i])) { 2774 device_printf(sc->mfi_dev, 2775 "Cannot allocate mfi_kbuff_arr_dmat tag\n"); 2776 return (ENOMEM); 2777 } 2778 2779 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i], 2780 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT, 2781 &sc->mfi_kbuff_arr_dmamap[i])) { 2782 device_printf(sc->mfi_dev, 2783 "Cannot allocate mfi_kbuff_arr_dmamap memory\n"); 2784 return (ENOMEM); 2785 } 2786 2787 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i], 2788 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i], 2789 ioc->mfi_sgl[i].iov_len, mfi_addr_cb, 2790 &sc->mfi_kbuff_arr_busaddr[i], 0); 2791 2792 if (!sc->kbuff_arr[i]) { 2793 device_printf(sc->mfi_dev, 2794 "Could not allocate memory for kbuff_arr info\n"); 2795 return -1; 2796 } 2797 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i]; 2798 kern_sge[i].length = ioc->mfi_sgl[i].iov_len; 2799 2800 if (sizeof(bus_addr_t) == 8) { 2801 cm->cm_frame->stp.sgl.sg64[i].addr = 2802 kern_sge[i].phys_addr; 2803 cm->cm_frame->stp.sgl.sg64[i].len = 2804 ioc->mfi_sgl[i].iov_len; 2805 } else { 2806 cm->cm_frame->stp.sgl.sg32[i].len = 2807 kern_sge[i].phys_addr; 2808 cm->cm_frame->stp.sgl.sg32[i].len = 2809 ioc->mfi_sgl[i].iov_len; 2810 } 2811 2812 error = copyin(ioc->mfi_sgl[i].iov_base, 2813 sc->kbuff_arr[i], 2814 ioc->mfi_sgl[i].iov_len); 2815 if (error != 0) { 2816 device_printf(sc->mfi_dev, "Copy in failed\n"); 2817 return error; 2818 } 2819 } 2820 2821 cm->cm_flags |=MFI_CMD_MAPPED; 2822 return 0; 2823 } 2824 2825 static int 2826 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc) 2827 { 2828 struct mfi_command *cm; 2829 struct mfi_dcmd_frame *dcmd; 2830 void *ioc_buf = NULL; 2831 uint32_t context; 2832 int error = 0, locked; 2833 2834 2835 if (ioc->buf_size > 0) { 2836 if (ioc->buf_size > 1024 * 1024) 2837 return (ENOMEM); 2838 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK); 2839 error = copyin(ioc->buf, ioc_buf, ioc->buf_size); 2840 if (error) { 2841 device_printf(sc->mfi_dev, "failed to copyin\n"); 2842 free(ioc_buf, M_MFIBUF); 2843 return (error); 2844 } 2845 } 2846 2847 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode); 2848 2849 mtx_lock(&sc->mfi_io_lock); 2850 while ((cm = mfi_dequeue_free(sc)) == NULL) 2851 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz); 2852 2853 /* Save context for later */ 2854 context = cm->cm_frame->header.context; 2855 2856 dcmd = &cm->cm_frame->dcmd; 2857 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame)); 2858 2859 cm->cm_sg = &dcmd->sgl; 2860 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 2861 cm->cm_data = ioc_buf; 2862 cm->cm_len = ioc->buf_size; 2863 2864 /* restore context */ 2865 cm->cm_frame->header.context = context; 2866 2867 /* Cheat since we don't know if we're writing or reading */ 2868 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT; 2869 2870 error = mfi_check_command_pre(sc, cm); 2871 if (error) 2872 goto out; 2873 2874 error = mfi_wait_command(sc, cm); 2875 if (error) { 2876 device_printf(sc->mfi_dev, "ioctl failed %d\n", error); 2877 goto out; 2878 } 2879 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame)); 2880 mfi_check_command_post(sc, cm); 2881 out: 2882 mfi_release_command(cm); 2883 mtx_unlock(&sc->mfi_io_lock); 2884 mfi_config_unlock(sc, locked); 2885 if (ioc->buf_size > 0) 2886 error = copyout(ioc_buf, ioc->buf, ioc->buf_size); 2887 if (ioc_buf) 2888 free(ioc_buf, M_MFIBUF); 2889 return (error); 2890 } 2891 2892 #define PTRIN(p) ((void *)(uintptr_t)(p)) 2893 2894 static int 2895 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 2896 { 2897 struct mfi_softc *sc; 2898 union mfi_statrequest *ms; 2899 struct mfi_ioc_packet *ioc; 2900 #ifdef COMPAT_FREEBSD32 2901 struct mfi_ioc_packet32 *ioc32; 2902 #endif 2903 struct mfi_ioc_aen *aen; 2904 struct mfi_command *cm = NULL; 2905 uint32_t context = 0; 2906 union mfi_sense_ptr sense_ptr; 2907 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0; 2908 size_t len; 2909 int i, res; 2910 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg; 2911 #ifdef COMPAT_FREEBSD32 2912 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg; 2913 struct mfi_ioc_passthru iop_swab; 2914 #endif 2915 int error, locked; 2916 union mfi_sgl *sgl; 2917 sc = dev->si_drv1; 2918 error = 0; 2919 2920 if (sc->adpreset) 2921 return EBUSY; 2922 2923 if (sc->hw_crit_error) 2924 return EBUSY; 2925 2926 if (sc->issuepend_done == 0) 2927 return EBUSY; 2928 2929 switch (cmd) { 2930 case MFIIO_STATS: 2931 ms = (union mfi_statrequest *)arg; 2932 switch (ms->ms_item) { 2933 case MFIQ_FREE: 2934 case MFIQ_BIO: 2935 case MFIQ_READY: 2936 case MFIQ_BUSY: 2937 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat, 2938 sizeof(struct mfi_qstat)); 2939 break; 2940 default: 2941 error = ENOIOCTL; 2942 break; 2943 } 2944 break; 2945 case MFIIO_QUERY_DISK: 2946 { 2947 struct mfi_query_disk *qd; 2948 struct mfi_disk *ld; 2949 2950 qd = (struct mfi_query_disk *)arg; 2951 mtx_lock(&sc->mfi_io_lock); 2952 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { 2953 if (ld->ld_id == qd->array_id) 2954 break; 2955 } 2956 if (ld == NULL) { 2957 qd->present = 0; 2958 mtx_unlock(&sc->mfi_io_lock); 2959 return (0); 2960 } 2961 qd->present = 1; 2962 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN) 2963 qd->open = 1; 2964 bzero(qd->devname, SPECNAMELEN + 1); 2965 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit); 2966 mtx_unlock(&sc->mfi_io_lock); 2967 break; 2968 } 2969 case MFI_CMD: 2970 #ifdef COMPAT_FREEBSD32 2971 case MFI_CMD32: 2972 #endif 2973 { 2974 devclass_t devclass; 2975 ioc = (struct mfi_ioc_packet *)arg; 2976 int adapter; 2977 2978 adapter = ioc->mfi_adapter_no; 2979 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) { 2980 devclass = devclass_find("mfi"); 2981 sc = devclass_get_softc(devclass, adapter); 2982 } 2983 mtx_lock(&sc->mfi_io_lock); 2984 if ((cm = mfi_dequeue_free(sc)) == NULL) { 2985 mtx_unlock(&sc->mfi_io_lock); 2986 return (EBUSY); 2987 } 2988 mtx_unlock(&sc->mfi_io_lock); 2989 locked = 0; 2990 2991 /* 2992 * save off original context since copying from user 2993 * will clobber some data 2994 */ 2995 context = cm->cm_frame->header.context; 2996 cm->cm_frame->header.context = cm->cm_index; 2997 2998 bcopy(ioc->mfi_frame.raw, cm->cm_frame, 2999 2 * MEGAMFI_FRAME_SIZE); 3000 cm->cm_total_frame_size = (sizeof(union mfi_sgl) 3001 * ioc->mfi_sge_count) + ioc->mfi_sgl_off; 3002 cm->cm_frame->header.scsi_status = 0; 3003 cm->cm_frame->header.pad0 = 0; 3004 if (ioc->mfi_sge_count) { 3005 cm->cm_sg = 3006 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off]; 3007 } 3008 sgl = cm->cm_sg; 3009 cm->cm_flags = 0; 3010 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) 3011 cm->cm_flags |= MFI_CMD_DATAIN; 3012 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) 3013 cm->cm_flags |= MFI_CMD_DATAOUT; 3014 /* Legacy app shim */ 3015 if (cm->cm_flags == 0) 3016 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT; 3017 cm->cm_len = cm->cm_frame->header.data_len; 3018 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 3019 #ifdef COMPAT_FREEBSD32 3020 if (cmd == MFI_CMD) { 3021 #endif 3022 /* Native */ 3023 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len; 3024 #ifdef COMPAT_FREEBSD32 3025 } else { 3026 /* 32bit on 64bit */ 3027 ioc32 = (struct mfi_ioc_packet32 *)ioc; 3028 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len; 3029 } 3030 #endif 3031 cm->cm_len += cm->cm_stp_len; 3032 } 3033 if (cm->cm_len && 3034 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) { 3035 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, 3036 M_WAITOK | M_ZERO); 3037 if (cm->cm_data == NULL) { 3038 device_printf(sc->mfi_dev, "Malloc failed\n"); 3039 goto out; 3040 } 3041 } else { 3042 cm->cm_data = 0; 3043 } 3044 3045 /* restore header context */ 3046 cm->cm_frame->header.context = context; 3047 3048 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 3049 res = mfi_stp_cmd(sc, cm, arg); 3050 if (res != 0) 3051 goto out; 3052 } else { 3053 temp = data; 3054 if ((cm->cm_flags & MFI_CMD_DATAOUT) || 3055 (cm->cm_frame->header.cmd == MFI_CMD_STP)) { 3056 for (i = 0; i < ioc->mfi_sge_count; i++) { 3057 #ifdef COMPAT_FREEBSD32 3058 if (cmd == MFI_CMD) { 3059 #endif 3060 /* Native */ 3061 addr = ioc->mfi_sgl[i].iov_base; 3062 len = ioc->mfi_sgl[i].iov_len; 3063 #ifdef COMPAT_FREEBSD32 3064 } else { 3065 /* 32bit on 64bit */ 3066 ioc32 = (struct mfi_ioc_packet32 *)ioc; 3067 addr = PTRIN(ioc32->mfi_sgl[i].iov_base); 3068 len = ioc32->mfi_sgl[i].iov_len; 3069 } 3070 #endif 3071 error = copyin(addr, temp, len); 3072 if (error != 0) { 3073 device_printf(sc->mfi_dev, 3074 "Copy in failed\n"); 3075 goto out; 3076 } 3077 temp = &temp[len]; 3078 } 3079 } 3080 } 3081 3082 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) 3083 locked = mfi_config_lock(sc, 3084 cm->cm_frame->dcmd.opcode); 3085 3086 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) { 3087 cm->cm_frame->pass.sense_addr_lo = 3088 (uint32_t)cm->cm_sense_busaddr; 3089 cm->cm_frame->pass.sense_addr_hi = 3090 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 3091 } 3092 mtx_lock(&sc->mfi_io_lock); 3093 skip_pre_post = mfi_check_for_sscd (sc, cm); 3094 if (!skip_pre_post) { 3095 error = mfi_check_command_pre(sc, cm); 3096 if (error) { 3097 mtx_unlock(&sc->mfi_io_lock); 3098 goto out; 3099 } 3100 } 3101 if ((error = mfi_wait_command(sc, cm)) != 0) { 3102 device_printf(sc->mfi_dev, 3103 "Controller polled failed\n"); 3104 mtx_unlock(&sc->mfi_io_lock); 3105 goto out; 3106 } 3107 if (!skip_pre_post) { 3108 mfi_check_command_post(sc, cm); 3109 } 3110 mtx_unlock(&sc->mfi_io_lock); 3111 3112 if (cm->cm_frame->header.cmd != MFI_CMD_STP) { 3113 temp = data; 3114 if ((cm->cm_flags & MFI_CMD_DATAIN) || 3115 (cm->cm_frame->header.cmd == MFI_CMD_STP)) { 3116 for (i = 0; i < ioc->mfi_sge_count; i++) { 3117 #ifdef COMPAT_FREEBSD32 3118 if (cmd == MFI_CMD) { 3119 #endif 3120 /* Native */ 3121 addr = ioc->mfi_sgl[i].iov_base; 3122 len = ioc->mfi_sgl[i].iov_len; 3123 #ifdef COMPAT_FREEBSD32 3124 } else { 3125 /* 32bit on 64bit */ 3126 ioc32 = (struct mfi_ioc_packet32 *)ioc; 3127 addr = PTRIN(ioc32->mfi_sgl[i].iov_base); 3128 len = ioc32->mfi_sgl[i].iov_len; 3129 } 3130 #endif 3131 error = copyout(temp, addr, len); 3132 if (error != 0) { 3133 device_printf(sc->mfi_dev, 3134 "Copy out failed\n"); 3135 goto out; 3136 } 3137 temp = &temp[len]; 3138 } 3139 } 3140 } 3141 3142 if (ioc->mfi_sense_len) { 3143 /* get user-space sense ptr then copy out sense */ 3144 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off], 3145 &sense_ptr.sense_ptr_data[0], 3146 sizeof(sense_ptr.sense_ptr_data)); 3147 #ifdef COMPAT_FREEBSD32 3148 if (cmd != MFI_CMD) { 3149 /* 3150 * not 64bit native so zero out any address 3151 * over 32bit */ 3152 sense_ptr.addr.high = 0; 3153 } 3154 #endif 3155 error = copyout(cm->cm_sense, sense_ptr.user_space, 3156 ioc->mfi_sense_len); 3157 if (error != 0) { 3158 device_printf(sc->mfi_dev, 3159 "Copy out failed\n"); 3160 goto out; 3161 } 3162 } 3163 3164 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status; 3165 out: 3166 mfi_config_unlock(sc, locked); 3167 if (data) 3168 free(data, M_MFIBUF); 3169 if (cm->cm_frame->header.cmd == MFI_CMD_STP) { 3170 for (i = 0; i < 2; i++) { 3171 if (sc->kbuff_arr[i]) { 3172 if (sc->mfi_kbuff_arr_busaddr != 0) 3173 bus_dmamap_unload( 3174 sc->mfi_kbuff_arr_dmat[i], 3175 sc->mfi_kbuff_arr_dmamap[i] 3176 ); 3177 if (sc->kbuff_arr[i] != NULL) 3178 bus_dmamem_free( 3179 sc->mfi_kbuff_arr_dmat[i], 3180 sc->kbuff_arr[i], 3181 sc->mfi_kbuff_arr_dmamap[i] 3182 ); 3183 if (sc->mfi_kbuff_arr_dmat[i] != NULL) 3184 bus_dma_tag_destroy( 3185 sc->mfi_kbuff_arr_dmat[i]); 3186 } 3187 } 3188 } 3189 if (cm) { 3190 mtx_lock(&sc->mfi_io_lock); 3191 mfi_release_command(cm); 3192 mtx_unlock(&sc->mfi_io_lock); 3193 } 3194 3195 break; 3196 } 3197 case MFI_SET_AEN: 3198 aen = (struct mfi_ioc_aen *)arg; 3199 error = mfi_aen_register(sc, aen->aen_seq_num, 3200 aen->aen_class_locale); 3201 3202 break; 3203 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ 3204 { 3205 devclass_t devclass; 3206 struct mfi_linux_ioc_packet l_ioc; 3207 int adapter; 3208 3209 devclass = devclass_find("mfi"); 3210 if (devclass == NULL) 3211 return (ENOENT); 3212 3213 error = copyin(arg, &l_ioc, sizeof(l_ioc)); 3214 if (error) 3215 return (error); 3216 adapter = l_ioc.lioc_adapter_no; 3217 sc = devclass_get_softc(devclass, adapter); 3218 if (sc == NULL) 3219 return (ENOENT); 3220 return (mfi_linux_ioctl_int(sc->mfi_cdev, 3221 cmd, arg, flag, td)); 3222 break; 3223 } 3224 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ 3225 { 3226 devclass_t devclass; 3227 struct mfi_linux_ioc_aen l_aen; 3228 int adapter; 3229 3230 devclass = devclass_find("mfi"); 3231 if (devclass == NULL) 3232 return (ENOENT); 3233 3234 error = copyin(arg, &l_aen, sizeof(l_aen)); 3235 if (error) 3236 return (error); 3237 adapter = l_aen.laen_adapter_no; 3238 sc = devclass_get_softc(devclass, adapter); 3239 if (sc == NULL) 3240 return (ENOENT); 3241 return (mfi_linux_ioctl_int(sc->mfi_cdev, 3242 cmd, arg, flag, td)); 3243 break; 3244 } 3245 #ifdef COMPAT_FREEBSD32 3246 case MFIIO_PASSTHRU32: 3247 if (!SV_CURPROC_FLAG(SV_ILP32)) { 3248 error = ENOTTY; 3249 break; 3250 } 3251 iop_swab.ioc_frame = iop32->ioc_frame; 3252 iop_swab.buf_size = iop32->buf_size; 3253 iop_swab.buf = PTRIN(iop32->buf); 3254 iop = &iop_swab; 3255 /* FALLTHROUGH */ 3256 #endif 3257 case MFIIO_PASSTHRU: 3258 error = mfi_user_command(sc, iop); 3259 #ifdef COMPAT_FREEBSD32 3260 if (cmd == MFIIO_PASSTHRU32) 3261 iop32->ioc_frame = iop_swab.ioc_frame; 3262 #endif 3263 break; 3264 default: 3265 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); 3266 error = ENOTTY; 3267 break; 3268 } 3269 3270 return (error); 3271 } 3272 3273 static int 3274 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 3275 { 3276 struct mfi_softc *sc; 3277 struct mfi_linux_ioc_packet l_ioc; 3278 struct mfi_linux_ioc_aen l_aen; 3279 struct mfi_command *cm = NULL; 3280 struct mfi_aen *mfi_aen_entry; 3281 union mfi_sense_ptr sense_ptr; 3282 uint32_t context = 0; 3283 uint8_t *data = NULL, *temp; 3284 int i; 3285 int error, locked; 3286 3287 sc = dev->si_drv1; 3288 error = 0; 3289 switch (cmd) { 3290 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ 3291 error = copyin(arg, &l_ioc, sizeof(l_ioc)); 3292 if (error != 0) 3293 return (error); 3294 3295 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) { 3296 return (EINVAL); 3297 } 3298 3299 mtx_lock(&sc->mfi_io_lock); 3300 if ((cm = mfi_dequeue_free(sc)) == NULL) { 3301 mtx_unlock(&sc->mfi_io_lock); 3302 return (EBUSY); 3303 } 3304 mtx_unlock(&sc->mfi_io_lock); 3305 locked = 0; 3306 3307 /* 3308 * save off original context since copying from user 3309 * will clobber some data 3310 */ 3311 context = cm->cm_frame->header.context; 3312 3313 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame, 3314 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */ 3315 cm->cm_total_frame_size = (sizeof(union mfi_sgl) 3316 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off; 3317 cm->cm_frame->header.scsi_status = 0; 3318 cm->cm_frame->header.pad0 = 0; 3319 if (l_ioc.lioc_sge_count) 3320 cm->cm_sg = 3321 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off]; 3322 cm->cm_flags = 0; 3323 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) 3324 cm->cm_flags |= MFI_CMD_DATAIN; 3325 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) 3326 cm->cm_flags |= MFI_CMD_DATAOUT; 3327 cm->cm_len = cm->cm_frame->header.data_len; 3328 if (cm->cm_len && 3329 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) { 3330 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, 3331 M_WAITOK | M_ZERO); 3332 if (cm->cm_data == NULL) { 3333 device_printf(sc->mfi_dev, "Malloc failed\n"); 3334 goto out; 3335 } 3336 } else { 3337 cm->cm_data = 0; 3338 } 3339 3340 /* restore header context */ 3341 cm->cm_frame->header.context = context; 3342 3343 temp = data; 3344 if (cm->cm_flags & MFI_CMD_DATAOUT) { 3345 for (i = 0; i < l_ioc.lioc_sge_count; i++) { 3346 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base), 3347 temp, 3348 l_ioc.lioc_sgl[i].iov_len); 3349 if (error != 0) { 3350 device_printf(sc->mfi_dev, 3351 "Copy in failed\n"); 3352 goto out; 3353 } 3354 temp = &temp[l_ioc.lioc_sgl[i].iov_len]; 3355 } 3356 } 3357 3358 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) 3359 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode); 3360 3361 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) { 3362 cm->cm_frame->pass.sense_addr_lo = 3363 (uint32_t)cm->cm_sense_busaddr; 3364 cm->cm_frame->pass.sense_addr_hi = 3365 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); 3366 } 3367 3368 mtx_lock(&sc->mfi_io_lock); 3369 error = mfi_check_command_pre(sc, cm); 3370 if (error) { 3371 mtx_unlock(&sc->mfi_io_lock); 3372 goto out; 3373 } 3374 3375 if ((error = mfi_wait_command(sc, cm)) != 0) { 3376 device_printf(sc->mfi_dev, 3377 "Controller polled failed\n"); 3378 mtx_unlock(&sc->mfi_io_lock); 3379 goto out; 3380 } 3381 3382 mfi_check_command_post(sc, cm); 3383 mtx_unlock(&sc->mfi_io_lock); 3384 3385 temp = data; 3386 if (cm->cm_flags & MFI_CMD_DATAIN) { 3387 for (i = 0; i < l_ioc.lioc_sge_count; i++) { 3388 error = copyout(temp, 3389 PTRIN(l_ioc.lioc_sgl[i].iov_base), 3390 l_ioc.lioc_sgl[i].iov_len); 3391 if (error != 0) { 3392 device_printf(sc->mfi_dev, 3393 "Copy out failed\n"); 3394 goto out; 3395 } 3396 temp = &temp[l_ioc.lioc_sgl[i].iov_len]; 3397 } 3398 } 3399 3400 if (l_ioc.lioc_sense_len) { 3401 /* get user-space sense ptr then copy out sense */ 3402 bcopy(&((struct mfi_linux_ioc_packet*)arg) 3403 ->lioc_frame.raw[l_ioc.lioc_sense_off], 3404 &sense_ptr.sense_ptr_data[0], 3405 sizeof(sense_ptr.sense_ptr_data)); 3406 #ifdef __amd64__ 3407 /* 3408 * only 32bit Linux support so zero out any 3409 * address over 32bit 3410 */ 3411 sense_ptr.addr.high = 0; 3412 #endif 3413 error = copyout(cm->cm_sense, sense_ptr.user_space, 3414 l_ioc.lioc_sense_len); 3415 if (error != 0) { 3416 device_printf(sc->mfi_dev, 3417 "Copy out failed\n"); 3418 goto out; 3419 } 3420 } 3421 3422 error = copyout(&cm->cm_frame->header.cmd_status, 3423 &((struct mfi_linux_ioc_packet*)arg) 3424 ->lioc_frame.hdr.cmd_status, 3425 1); 3426 if (error != 0) { 3427 device_printf(sc->mfi_dev, 3428 "Copy out failed\n"); 3429 goto out; 3430 } 3431 3432 out: 3433 mfi_config_unlock(sc, locked); 3434 if (data) 3435 free(data, M_MFIBUF); 3436 if (cm) { 3437 mtx_lock(&sc->mfi_io_lock); 3438 mfi_release_command(cm); 3439 mtx_unlock(&sc->mfi_io_lock); 3440 } 3441 3442 return (error); 3443 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ 3444 error = copyin(arg, &l_aen, sizeof(l_aen)); 3445 if (error != 0) 3446 return (error); 3447 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid); 3448 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF, 3449 M_WAITOK); 3450 mtx_lock(&sc->mfi_io_lock); 3451 if (mfi_aen_entry != NULL) { 3452 mfi_aen_entry->p = curproc; 3453 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry, 3454 aen_link); 3455 } 3456 error = mfi_aen_register(sc, l_aen.laen_seq_num, 3457 l_aen.laen_class_locale); 3458 3459 if (error != 0) { 3460 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, 3461 aen_link); 3462 free(mfi_aen_entry, M_MFIBUF); 3463 } 3464 mtx_unlock(&sc->mfi_io_lock); 3465 3466 return (error); 3467 default: 3468 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); 3469 error = ENOENT; 3470 break; 3471 } 3472 3473 return (error); 3474 } 3475 3476 static int 3477 mfi_poll(struct cdev *dev, int poll_events, struct thread *td) 3478 { 3479 struct mfi_softc *sc; 3480 int revents = 0; 3481 3482 sc = dev->si_drv1; 3483 3484 if (poll_events & (POLLIN | POLLRDNORM)) { 3485 if (sc->mfi_aen_triggered != 0) { 3486 revents |= poll_events & (POLLIN | POLLRDNORM); 3487 sc->mfi_aen_triggered = 0; 3488 } 3489 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) { 3490 revents |= POLLERR; 3491 } 3492 } 3493 3494 if (revents == 0) { 3495 if (poll_events & (POLLIN | POLLRDNORM)) { 3496 sc->mfi_poll_waiting = 1; 3497 selrecord(td, &sc->mfi_select); 3498 } 3499 } 3500 3501 return revents; 3502 } 3503 3504 static void 3505 mfi_dump_all(void) 3506 { 3507 struct mfi_softc *sc; 3508 struct mfi_command *cm; 3509 devclass_t dc; 3510 time_t deadline; 3511 int timedout; 3512 int i; 3513 3514 dc = devclass_find("mfi"); 3515 if (dc == NULL) { 3516 printf("No mfi dev class\n"); 3517 return; 3518 } 3519 3520 for (i = 0; ; i++) { 3521 sc = devclass_get_softc(dc, i); 3522 if (sc == NULL) 3523 break; 3524 device_printf(sc->mfi_dev, "Dumping\n\n"); 3525 timedout = 0; 3526 deadline = time_uptime - MFI_CMD_TIMEOUT; 3527 mtx_lock(&sc->mfi_io_lock); 3528 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) { 3529 if (cm->cm_timestamp < deadline) { 3530 device_printf(sc->mfi_dev, 3531 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", 3532 cm, (int)(time_uptime - cm->cm_timestamp)); 3533 MFI_PRINT_CMD(cm); 3534 timedout++; 3535 } 3536 } 3537 3538 #if 0 3539 if (timedout) 3540 MFI_DUMP_CMDS(SC); 3541 #endif 3542 3543 mtx_unlock(&sc->mfi_io_lock); 3544 } 3545 3546 return; 3547 } 3548 3549 static void 3550 mfi_timeout(void *data) 3551 { 3552 struct mfi_softc *sc = (struct mfi_softc *)data; 3553 struct mfi_command *cm; 3554 time_t deadline; 3555 int timedout = 0; 3556 3557 deadline = time_uptime - MFI_CMD_TIMEOUT; 3558 if (sc->adpreset == 0) { 3559 if (!mfi_tbolt_reset(sc)) { 3560 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc); 3561 return; 3562 } 3563 } 3564 mtx_lock(&sc->mfi_io_lock); 3565 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) { 3566 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm) 3567 continue; 3568 if (cm->cm_timestamp < deadline) { 3569 if (sc->adpreset != 0 && sc->issuepend_done == 0) { 3570 cm->cm_timestamp = time_uptime; 3571 } else { 3572 device_printf(sc->mfi_dev, 3573 "COMMAND %p TIMEOUT AFTER %d SECONDS\n", 3574 cm, (int)(time_uptime - cm->cm_timestamp) 3575 ); 3576 MFI_PRINT_CMD(cm); 3577 MFI_VALIDATE_CMD(sc, cm); 3578 timedout++; 3579 } 3580 } 3581 } 3582 3583 #if 0 3584 if (timedout) 3585 MFI_DUMP_CMDS(SC); 3586 #endif 3587 3588 mtx_unlock(&sc->mfi_io_lock); 3589 3590 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, 3591 mfi_timeout, sc); 3592 3593 if (0) 3594 mfi_dump_all(); 3595 return; 3596 } 3597