xref: /freebsd/sys/dev/mfi/mfi.c (revision 57c4583f70ab9d25b3aed17f20ec7843f9673539)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_mfi.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/sysctl.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/poll.h>
38 #include <sys/selinfo.h>
39 #include <sys/bus.h>
40 #include <sys/conf.h>
41 #include <sys/eventhandler.h>
42 #include <sys/rman.h>
43 #include <sys/bus_dma.h>
44 #include <sys/bio.h>
45 #include <sys/ioccom.h>
46 #include <sys/uio.h>
47 #include <sys/proc.h>
48 #include <sys/signalvar.h>
49 
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 
53 #include <dev/mfi/mfireg.h>
54 #include <dev/mfi/mfi_ioctl.h>
55 #include <dev/mfi/mfivar.h>
56 
57 static int	mfi_alloc_commands(struct mfi_softc *);
58 static void	mfi_release_command(struct mfi_command *cm);
59 static int	mfi_comms_init(struct mfi_softc *);
60 static int	mfi_polled_command(struct mfi_softc *, struct mfi_command *);
61 static int	mfi_wait_command(struct mfi_softc *, struct mfi_command *);
62 static int	mfi_get_controller_info(struct mfi_softc *);
63 static int	mfi_get_log_state(struct mfi_softc *,
64 		    struct mfi_evt_log_state **);
65 static int	mfi_get_entry(struct mfi_softc *, int);
66 static int	mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
67 		    uint32_t, void **, size_t);
68 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
69 static void	mfi_startup(void *arg);
70 static void	mfi_intr(void *arg);
71 static void	mfi_enable_intr(struct mfi_softc *sc);
72 static void	mfi_ldprobe(struct mfi_softc *sc);
73 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
74 static void	mfi_aen_complete(struct mfi_command *);
75 static int	mfi_aen_setup(struct mfi_softc *, uint32_t);
76 static int	mfi_add_ld(struct mfi_softc *sc, int);
77 static void	mfi_add_ld_complete(struct mfi_command *);
78 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
79 static void	mfi_bio_complete(struct mfi_command *);
80 static int	mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
81 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
82 static void	mfi_complete(struct mfi_softc *, struct mfi_command *);
83 static int	mfi_abort(struct mfi_softc *, struct mfi_command *);
84 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, d_thread_t *);
85 static void	mfi_timeout(void *);
86 
87 
88 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
89 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
90 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
91 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
92             0, "event message locale");
93 
94 static int	mfi_event_class =  MFI_EVT_CLASS_DEBUG;
95 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
96 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
97           0, "event message class");
98 
99 /* Management interface */
100 static d_open_t		mfi_open;
101 static d_close_t	mfi_close;
102 static d_ioctl_t	mfi_ioctl;
103 static d_poll_t		mfi_poll;
104 
105 static struct cdevsw mfi_cdevsw = {
106 	.d_version = 	D_VERSION,
107 	.d_flags =	0,
108 	.d_open = 	mfi_open,
109 	.d_close =	mfi_close,
110 	.d_ioctl =	mfi_ioctl,
111 	.d_poll =	mfi_poll,
112 	.d_name =	"mfi",
113 };
114 
115 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
116 
117 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
118 
119 static int
120 mfi_transition_firmware(struct mfi_softc *sc)
121 {
122 	int32_t fw_state, cur_state;
123 	int max_wait, i;
124 
125 	fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
126 	while (fw_state != MFI_FWSTATE_READY) {
127 		if (bootverbose)
128 			device_printf(sc->mfi_dev, "Waiting for firmware to "
129 			    "become ready\n");
130 		cur_state = fw_state;
131 		switch (fw_state) {
132 		case MFI_FWSTATE_FAULT:
133 			device_printf(sc->mfi_dev, "Firmware fault\n");
134 			return (ENXIO);
135 		case MFI_FWSTATE_WAIT_HANDSHAKE:
136 			MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
137 			max_wait = 2;
138 			break;
139 		case MFI_FWSTATE_OPERATIONAL:
140 			MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
141 			max_wait = 10;
142 			break;
143 		case MFI_FWSTATE_UNDEFINED:
144 		case MFI_FWSTATE_BB_INIT:
145 			max_wait = 2;
146 			break;
147 		case MFI_FWSTATE_FW_INIT:
148 		case MFI_FWSTATE_DEVICE_SCAN:
149 		case MFI_FWSTATE_FLUSH_CACHE:
150 			max_wait = 20;
151 			break;
152 		default:
153 			device_printf(sc->mfi_dev,"Unknown firmware state %d\n",
154 			    fw_state);
155 			return (ENXIO);
156 		}
157 		for (i = 0; i < (max_wait * 10); i++) {
158 			fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
159 			if (fw_state == cur_state)
160 				DELAY(100000);
161 			else
162 				break;
163 		}
164 		if (fw_state == cur_state) {
165 			device_printf(sc->mfi_dev, "firmware stuck in state "
166 			    "%#x\n", fw_state);
167 			return (ENXIO);
168 		}
169 	}
170 	return (0);
171 }
172 
173 static void
174 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
175 {
176 	uint32_t *addr;
177 
178 	addr = arg;
179 	*addr = segs[0].ds_addr;
180 }
181 
182 int
183 mfi_attach(struct mfi_softc *sc)
184 {
185 	uint32_t status;
186 	int error, commsz, framessz, sensesz;
187 	int frames, unit, max_fw_sge;
188 
189 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
190 	TAILQ_INIT(&sc->mfi_ld_tqh);
191 	TAILQ_INIT(&sc->mfi_aen_pids);
192 
193 	mfi_initq_free(sc);
194 	mfi_initq_ready(sc);
195 	mfi_initq_busy(sc);
196 	mfi_initq_bio(sc);
197 
198 	/* Before we get too far, see if the firmware is working */
199 	if ((error = mfi_transition_firmware(sc)) != 0) {
200 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
201 		    "error %d\n", error);
202 		return (ENXIO);
203 	}
204 
205 	/*
206 	 * Get information needed for sizing the contiguous memory for the
207 	 * frame pool.  Size down the sgl parameter since we know that
208 	 * we will never need more than what's required for MAXPHYS.
209 	 * It would be nice if these constants were available at runtime
210 	 * instead of compile time.
211 	 */
212 	status = MFI_READ4(sc, MFI_OMSG0);
213 	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
214 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
215 	sc->mfi_max_sge = min(max_fw_sge, ((MAXPHYS / PAGE_SIZE) + 1));
216 
217 	/*
218 	 * Create the dma tag for data buffers.  Used both for block I/O
219 	 * and for various internal data queries.
220 	 */
221 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
222 				1, 0,			/* algnmnt, boundary */
223 				BUS_SPACE_MAXADDR,	/* lowaddr */
224 				BUS_SPACE_MAXADDR,	/* highaddr */
225 				NULL, NULL,		/* filter, filterarg */
226 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
227 				sc->mfi_max_sge,	/* nsegments */
228 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
229 				BUS_DMA_ALLOCNOW,	/* flags */
230 				busdma_lock_mutex,	/* lockfunc */
231 				&sc->mfi_io_lock,	/* lockfuncarg */
232 				&sc->mfi_buffer_dmat)) {
233 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
234 		return (ENOMEM);
235 	}
236 
237 	/*
238 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
239 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
240 	 * entry, so the calculated size here will be will be 1 more than
241 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
242 	 */
243 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
244 	    sizeof(struct mfi_hwcomms);
245 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
246 				1, 0,			/* algnmnt, boundary */
247 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
248 				BUS_SPACE_MAXADDR,	/* highaddr */
249 				NULL, NULL,		/* filter, filterarg */
250 				commsz,			/* maxsize */
251 				1,			/* msegments */
252 				commsz,			/* maxsegsize */
253 				0,			/* flags */
254 				NULL, NULL,		/* lockfunc, lockarg */
255 				&sc->mfi_comms_dmat)) {
256 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
257 		return (ENOMEM);
258 	}
259 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
260 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
261 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
262 		return (ENOMEM);
263 	}
264 	bzero(sc->mfi_comms, commsz);
265 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
266 	    sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
267 
268 	/*
269 	 * Allocate DMA memory for the command frames.  Keep them in the
270 	 * lower 4GB for efficiency.  Calculate the size of the commands at
271 	 * the same time; each command is one 64 byte frame plus a set of
272          * additional frames for holding sg lists or other data.
273 	 * The assumption here is that the SG list will start at the second
274 	 * frame and not use the unused bytes in the first frame.  While this
275 	 * isn't technically correct, it simplifies the calculation and allows
276 	 * for command frames that might be larger than an mfi_io_frame.
277 	 */
278 	if (sizeof(bus_addr_t) == 8) {
279 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
280 		sc->mfi_flags |= MFI_FLAGS_SG64;
281 	} else {
282 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
283 	}
284 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
285 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
286 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
287 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
288 				64, 0,			/* algnmnt, boundary */
289 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
290 				BUS_SPACE_MAXADDR,	/* highaddr */
291 				NULL, NULL,		/* filter, filterarg */
292 				framessz,		/* maxsize */
293 				1,			/* nsegments */
294 				framessz,		/* maxsegsize */
295 				0,			/* flags */
296 				NULL, NULL,		/* lockfunc, lockarg */
297 				&sc->mfi_frames_dmat)) {
298 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
299 		return (ENOMEM);
300 	}
301 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
302 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
303 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
304 		return (ENOMEM);
305 	}
306 	bzero(sc->mfi_frames, framessz);
307 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
308 	    sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
309 
310 	/*
311 	 * Allocate DMA memory for the frame sense data.  Keep them in the
312 	 * lower 4GB for efficiency
313 	 */
314 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
315 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
316 				4, 0,			/* algnmnt, boundary */
317 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
318 				BUS_SPACE_MAXADDR,	/* highaddr */
319 				NULL, NULL,		/* filter, filterarg */
320 				sensesz,		/* maxsize */
321 				1,			/* nsegments */
322 				sensesz,		/* maxsegsize */
323 				0,			/* flags */
324 				NULL, NULL,		/* lockfunc, lockarg */
325 				&sc->mfi_sense_dmat)) {
326 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
327 		return (ENOMEM);
328 	}
329 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
330 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
331 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
332 		return (ENOMEM);
333 	}
334 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
335 	    sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
336 
337 	if ((error = mfi_alloc_commands(sc)) != 0)
338 		return (error);
339 
340 	if ((error = mfi_comms_init(sc)) != 0)
341 		return (error);
342 
343 	if ((error = mfi_get_controller_info(sc)) != 0)
344 		return (error);
345 
346 	mtx_lock(&sc->mfi_io_lock);
347 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
348 		mtx_unlock(&sc->mfi_io_lock);
349 		return (error);
350 	}
351 	mtx_unlock(&sc->mfi_io_lock);
352 
353 	/*
354 	 * Set up the interrupt handler.  XXX This should happen in
355 	 * mfi_pci.c
356 	 */
357 	sc->mfi_irq_rid = 0;
358 	if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
359 	    &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
360 		device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
361 		return (EINVAL);
362 	}
363 	if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
364 	    mfi_intr, sc, &sc->mfi_intr)) {
365 		device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
366 		return (EINVAL);
367 	}
368 
369 	/* Register a config hook to probe the bus for arrays */
370 	sc->mfi_ich.ich_func = mfi_startup;
371 	sc->mfi_ich.ich_arg = sc;
372 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
373 		device_printf(sc->mfi_dev, "Cannot establish configuration "
374 		    "hook\n");
375 		return (EINVAL);
376 	}
377 
378 	/*
379 	 * Register a shutdown handler.
380 	 */
381 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
382 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
383 		device_printf(sc->mfi_dev, "Warning: shutdown event "
384 		    "registration failed\n");
385 	}
386 
387 	/*
388 	 * Create the control device for doing management
389 	 */
390 	unit = device_get_unit(sc->mfi_dev);
391 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
392 	    0640, "mfi%d", unit);
393 	if (unit == 0)
394 		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
395 	if (sc->mfi_cdev != NULL)
396 		sc->mfi_cdev->si_drv1 = sc;
397 
398 	/* Start the timeout watchdog */
399 	callout_init(&sc->mfi_watchdog_callout, 1);
400 	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
401 	    mfi_timeout, sc);
402 
403 	return (0);
404 }
405 
406 static int
407 mfi_alloc_commands(struct mfi_softc *sc)
408 {
409 	struct mfi_command *cm;
410 	int i, ncmds;
411 
412 	/*
413 	 * XXX Should we allocate all the commands up front, or allocate on
414 	 * demand later like 'aac' does?
415 	 */
416 	ncmds = sc->mfi_max_fw_cmds;
417 	sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
418 	    M_WAITOK | M_ZERO);
419 
420 	for (i = 0; i < ncmds; i++) {
421 		cm = &sc->mfi_commands[i];
422 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
423 		    sc->mfi_cmd_size * i);
424 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
425 		    sc->mfi_cmd_size * i;
426 		cm->cm_frame->header.context = i;
427 		cm->cm_sense = &sc->mfi_sense[i];
428 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
429 		cm->cm_sc = sc;
430 		cm->cm_index = i;
431 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
432 		    &cm->cm_dmamap) == 0)
433 			mfi_release_command(cm);
434 		else
435 			break;
436 		sc->mfi_total_cmds++;
437 	}
438 
439 	return (0);
440 }
441 
442 static void
443 mfi_release_command(struct mfi_command *cm)
444 {
445 	struct mfi_frame_header *hdr;
446 	uint32_t *hdr_data;
447 
448 	/*
449 	 * Zero out the important fields of the frame, but make sure the
450 	 * context field is preserved
451 	 */
452 	hdr_data = (uint32_t *)cm->cm_frame;
453 	hdr_data[0] = 0;
454 	hdr_data[1] = 0;
455 
456 	hdr = &cm->cm_frame->header;
457 	if (hdr->sg_count) {
458 		cm->cm_sg->sg32[0].len = 0;
459 		cm->cm_sg->sg32[0].addr = 0;
460 	}
461 	cm->cm_extra_frames = 0;
462 	cm->cm_flags = 0;
463 	cm->cm_complete = NULL;
464 	cm->cm_private = NULL;
465 	cm->cm_sg = 0;
466 	cm->cm_total_frame_size = 0;
467 
468 	mfi_enqueue_free(cm);
469 }
470 
471 static int
472 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
473     void **bufp, size_t bufsize)
474 {
475 	struct mfi_command *cm;
476 	struct mfi_dcmd_frame *dcmd;
477 	void *buf = NULL;
478 
479 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
480 
481 	cm = mfi_dequeue_free(sc);
482 	if (cm == NULL)
483 		return (EBUSY);
484 
485 	if ((bufsize > 0) && (bufp != NULL)) {
486 		if (*bufp == NULL) {
487 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
488 			if (buf == NULL) {
489 				mfi_release_command(cm);
490 				return (ENOMEM);
491 			}
492 			*bufp = buf;
493 		} else {
494 			buf = *bufp;
495 		}
496 	}
497 
498 	dcmd =  &cm->cm_frame->dcmd;
499 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
500 	dcmd->header.cmd = MFI_CMD_DCMD;
501 	dcmd->header.timeout = 0;
502 	dcmd->header.flags = 0;
503 	dcmd->header.data_len = bufsize;
504 	dcmd->opcode = opcode;
505 	cm->cm_sg = &dcmd->sgl;
506 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
507 	cm->cm_flags = 0;
508 	cm->cm_data = buf;
509 	cm->cm_private = buf;
510 	cm->cm_len = bufsize;
511 
512 	*cmp = cm;
513 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
514 		*bufp = buf;
515 	return (0);
516 }
517 
518 static int
519 mfi_comms_init(struct mfi_softc *sc)
520 {
521 	struct mfi_command *cm;
522 	struct mfi_init_frame *init;
523 	struct mfi_init_qinfo *qinfo;
524 	int error;
525 
526 	mtx_lock(&sc->mfi_io_lock);
527 	if ((cm = mfi_dequeue_free(sc)) == NULL)
528 		return (EBUSY);
529 
530 	/*
531 	 * Abuse the SG list area of the frame to hold the init_qinfo
532 	 * object;
533 	 */
534 	init = &cm->cm_frame->init;
535 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
536 
537 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
538 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
539 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
540 	    offsetof(struct mfi_hwcomms, hw_reply_q);
541 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
542 	    offsetof(struct mfi_hwcomms, hw_pi);
543 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
544 	    offsetof(struct mfi_hwcomms, hw_ci);
545 
546 	init->header.cmd = MFI_CMD_INIT;
547 	init->header.data_len = sizeof(struct mfi_init_qinfo);
548 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
549 
550 	if ((error = mfi_polled_command(sc, cm)) != 0) {
551 		device_printf(sc->mfi_dev, "failed to send init command\n");
552 		mtx_unlock(&sc->mfi_io_lock);
553 		return (error);
554 	}
555 	mfi_release_command(cm);
556 	mtx_unlock(&sc->mfi_io_lock);
557 
558 	return (0);
559 }
560 
561 static int
562 mfi_get_controller_info(struct mfi_softc *sc)
563 {
564 	struct mfi_command *cm = NULL;
565 	struct mfi_ctrl_info *ci = NULL;
566 	uint32_t max_sectors_1, max_sectors_2;
567 	int error;
568 
569 	mtx_lock(&sc->mfi_io_lock);
570 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
571 	    (void **)&ci, sizeof(*ci));
572 	if (error)
573 		goto out;
574 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
575 
576 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
577 		device_printf(sc->mfi_dev, "Controller info buffer map failed\n");
578 		free(ci, M_MFIBUF);
579 		mfi_release_command(cm);
580 		mtx_unlock(&sc->mfi_io_lock);
581 		return (error);
582 	}
583 
584 	/* It's ok if this fails, just use default info instead */
585 	if ((error = mfi_polled_command(sc, cm)) != 0) {
586 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
587 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
588 		    MFI_SECTOR_LEN;
589 		error = 0;
590 		goto out;
591 	}
592 
593 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
594 	    BUS_DMASYNC_POSTREAD);
595 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
596 
597 	max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
598 	max_sectors_2 = ci->max_request_size;
599 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
600 
601 out:
602 	if (ci)
603 		free(ci, M_MFIBUF);
604 	if (cm)
605 		mfi_release_command(cm);
606 	mtx_unlock(&sc->mfi_io_lock);
607 	return (error);
608 }
609 
610 static int
611 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
612 {
613 	struct mfi_command *cm = NULL;
614 	int error;
615 
616 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
617 	    (void **)log_state, sizeof(**log_state));
618 	if (error)
619 		goto out;
620 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
621 
622 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
623 		device_printf(sc->mfi_dev, "Log state buffer map failed\n");
624 		goto out;
625 	}
626 
627 	if ((error = mfi_polled_command(sc, cm)) != 0) {
628 		device_printf(sc->mfi_dev, "Failed to get log state\n");
629 		goto out;
630 	}
631 
632 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
633 	    BUS_DMASYNC_POSTREAD);
634 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
635 
636 out:
637 	if (cm)
638 		mfi_release_command(cm);
639 
640 	return (error);
641 }
642 
643 static int
644 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
645 {
646 	struct mfi_evt_log_state *log_state = NULL;
647 	union mfi_evt class_locale;
648 	int error = 0;
649 	uint32_t seq;
650 
651 	class_locale.members.reserved = 0;
652 	class_locale.members.locale = mfi_event_locale;
653 	class_locale.members.class  = mfi_event_class;
654 
655 	if (seq_start == 0) {
656 		error = mfi_get_log_state(sc, &log_state);
657 		if (error) {
658 			if (log_state)
659 				free(log_state, M_MFIBUF);
660 			return (error);
661 		}
662 		/*
663 		 * Don't run them yet since we can't parse them.
664 		 * We can indirectly get the contents from
665 		 * the AEN mechanism via setting it lower then
666 		 * current.  The firmware will iterate through them.
667 		 */
668 		for (seq = log_state->shutdown_seq_num;
669 		     seq <= log_state->newest_seq_num; seq++) {
670 			mfi_get_entry(sc, seq);
671 		}
672 	} else
673 		seq = seq_start;
674 	mfi_aen_register(sc, seq, class_locale.word);
675 	free(log_state, M_MFIBUF);
676 
677 	return 0;
678 }
679 
680 static int
681 mfi_polled_command(struct mfi_softc *sc, struct mfi_command *cm)
682 {
683 	struct mfi_frame_header *hdr;
684 	int tm = MFI_POLL_TIMEOUT_SECS * 1000000;
685 
686 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
687 
688 	hdr = &cm->cm_frame->header;
689 	hdr->cmd_status = 0xff;
690 	hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
691 
692 	mfi_send_frame(sc, cm);
693 
694 	while (hdr->cmd_status == 0xff) {
695 		DELAY(1000);
696 		tm -= 1000;
697 		if (tm <= 0)
698 			break;
699 	}
700 
701 	if (hdr->cmd_status == 0xff) {
702 		device_printf(sc->mfi_dev, "Frame %p timed out\n", hdr);
703 		return (ETIMEDOUT);
704 	}
705 
706 	return (0);
707 }
708 
709 static int
710 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
711 {
712 
713 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
714 	cm->cm_complete = NULL;
715 
716 	mfi_enqueue_ready(cm);
717 	mfi_startio(sc);
718 	return (msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0));
719 }
720 
721 void
722 mfi_free(struct mfi_softc *sc)
723 {
724 	struct mfi_command *cm;
725 	int i;
726 
727 	callout_drain(&sc->mfi_watchdog_callout);
728 
729 	if (sc->mfi_cdev != NULL)
730 		destroy_dev(sc->mfi_cdev);
731 
732 	if (sc->mfi_total_cmds != 0) {
733 		for (i = 0; i < sc->mfi_total_cmds; i++) {
734 			cm = &sc->mfi_commands[i];
735 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
736 		}
737 		free(sc->mfi_commands, M_MFIBUF);
738 	}
739 
740 	if (sc->mfi_intr)
741 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
742 	if (sc->mfi_irq != NULL)
743 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
744 		    sc->mfi_irq);
745 
746 	if (sc->mfi_sense_busaddr != 0)
747 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
748 	if (sc->mfi_sense != NULL)
749 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
750 		    sc->mfi_sense_dmamap);
751 	if (sc->mfi_sense_dmat != NULL)
752 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
753 
754 	if (sc->mfi_frames_busaddr != 0)
755 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
756 	if (sc->mfi_frames != NULL)
757 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
758 		    sc->mfi_frames_dmamap);
759 	if (sc->mfi_frames_dmat != NULL)
760 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
761 
762 	if (sc->mfi_comms_busaddr != 0)
763 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
764 	if (sc->mfi_comms != NULL)
765 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
766 		    sc->mfi_comms_dmamap);
767 	if (sc->mfi_comms_dmat != NULL)
768 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
769 
770 	if (sc->mfi_buffer_dmat != NULL)
771 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
772 	if (sc->mfi_parent_dmat != NULL)
773 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
774 
775 	if (mtx_initialized(&sc->mfi_io_lock))
776 		mtx_destroy(&sc->mfi_io_lock);
777 
778 	return;
779 }
780 
781 static void
782 mfi_startup(void *arg)
783 {
784 	struct mfi_softc *sc;
785 
786 	sc = (struct mfi_softc *)arg;
787 
788 	config_intrhook_disestablish(&sc->mfi_ich);
789 
790 	mfi_enable_intr(sc);
791 	mtx_lock(&sc->mfi_io_lock);
792 	mfi_ldprobe(sc);
793 	mtx_unlock(&sc->mfi_io_lock);
794 }
795 
796 static void
797 mfi_intr(void *arg)
798 {
799 	struct mfi_softc *sc;
800 	struct mfi_command *cm;
801 	uint32_t status, pi, ci, context;
802 
803 	sc = (struct mfi_softc *)arg;
804 
805 	status = MFI_READ4(sc, MFI_OSTS);
806 	if ((status & MFI_OSTS_INTR_VALID) == 0)
807 		return;
808 
809 	MFI_WRITE4(sc, MFI_OSTS, status);
810 
811 	pi = sc->mfi_comms->hw_pi;
812 	ci = sc->mfi_comms->hw_ci;
813 	mtx_lock(&sc->mfi_io_lock);
814 	while (ci != pi) {
815 		context = sc->mfi_comms->hw_reply_q[ci];
816 		cm = &sc->mfi_commands[context];
817 		mfi_remove_busy(cm);
818 		mfi_complete(sc, cm);
819 		if (++ci == (sc->mfi_max_fw_cmds + 1)) {
820 			ci = 0;
821 		}
822 	}
823 
824 	sc->mfi_comms->hw_ci = ci;
825 
826 	/* Give defered I/O a chance to run */
827 	if (sc->mfi_flags & MFI_FLAGS_QFRZN)
828 		sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
829 	mfi_startio(sc);
830 	mtx_unlock(&sc->mfi_io_lock);
831 
832 	return;
833 }
834 
835 int
836 mfi_shutdown(struct mfi_softc *sc)
837 {
838 	struct mfi_dcmd_frame *dcmd;
839 	struct mfi_command *cm;
840 	int error;
841 
842 	mtx_lock(&sc->mfi_io_lock);
843 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
844 	if (error) {
845 		mtx_unlock(&sc->mfi_io_lock);
846 		return (error);
847 	}
848 
849 	if (sc->mfi_aen_cm != NULL)
850 		mfi_abort(sc, sc->mfi_aen_cm);
851 
852 	dcmd = &cm->cm_frame->dcmd;
853 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
854 
855 	if ((error = mfi_polled_command(sc, cm)) != 0) {
856 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
857 	}
858 
859 	mfi_release_command(cm);
860 	mtx_unlock(&sc->mfi_io_lock);
861 	return (error);
862 }
863 
864 static void
865 mfi_enable_intr(struct mfi_softc *sc)
866 {
867 
868 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
869 }
870 
871 static void
872 mfi_ldprobe(struct mfi_softc *sc)
873 {
874 	struct mfi_frame_header *hdr;
875 	struct mfi_command *cm = NULL;
876 	struct mfi_ld_list *list = NULL;
877 	int error, i;
878 
879 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
880 
881 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
882 	    (void **)&list, sizeof(*list));
883 	if (error)
884 		goto out;
885 
886 	cm->cm_flags = MFI_CMD_DATAIN;
887 	if (mfi_wait_command(sc, cm) != 0) {
888 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
889 		goto out;
890 	}
891 
892 	hdr = &cm->cm_frame->header;
893 	if (hdr->cmd_status != MFI_STAT_OK) {
894 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
895 		    hdr->cmd_status);
896 		goto out;
897 	}
898 
899 	for (i = 0; i < list->ld_count; i++)
900 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
901 out:
902 	if (list)
903 		free(list, M_MFIBUF);
904 	if (cm)
905 		mfi_release_command(cm);
906 
907 	return;
908 }
909 
910 static void
911 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
912 {
913 	switch (detail->arg_type) {
914 	case MR_EVT_ARGS_NONE:
915 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - %s\n",
916 		    detail->seq,
917 		    detail->time,
918 		    detail->class.members.locale,
919 		    detail->class.members.class,
920 		    detail->description
921 		    );
922 		break;
923 	case MR_EVT_ARGS_CDB_SENSE:
924 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) CDB %*D"
925 		    "Sense %*D\n: %s\n",
926 		    detail->seq,
927 		    detail->time,
928 		    detail->class.members.locale,
929 		    detail->class.members.class,
930 		    detail->args.cdb_sense.pd.device_id,
931 		    detail->args.cdb_sense.pd.enclosure_index,
932 		    detail->args.cdb_sense.pd.slot_number,
933 		    detail->args.cdb_sense.cdb_len,
934 		    detail->args.cdb_sense.cdb,
935 		    ":",
936 		    detail->args.cdb_sense.sense_len,
937 		    detail->args.cdb_sense.sense,
938 		    ":",
939 		    detail->description
940 		    );
941 		break;
942 	case MR_EVT_ARGS_LD:
943 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
944 		    "event: %s\n",
945 		    detail->seq,
946 		    detail->time,
947 		    detail->class.members.locale,
948 		    detail->class.members.class,
949 		    detail->args.ld.ld_index,
950 		    detail->args.ld.target_id,
951 		    detail->description
952 		    );
953 		break;
954 	case MR_EVT_ARGS_LD_COUNT:
955 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
956 		    "count %lld: %s\n",
957 		    detail->seq,
958 		    detail->time,
959 		    detail->class.members.locale,
960 		    detail->class.members.class,
961 		    detail->args.ld_count.ld.ld_index,
962 		    detail->args.ld_count.ld.target_id,
963 		    (long long)detail->args.ld_count.count,
964 		    detail->description
965 		    );
966 		break;
967 	case MR_EVT_ARGS_LD_LBA:
968 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
969 		    "lba %lld: %s\n",
970 		    detail->seq,
971 		    detail->time,
972 		    detail->class.members.locale,
973 		    detail->class.members.class,
974 		    detail->args.ld_lba.ld.ld_index,
975 		    detail->args.ld_lba.ld.target_id,
976 		    (long long)detail->args.ld_lba.lba,
977 		    detail->description
978 		    );
979 		break;
980 	case MR_EVT_ARGS_LD_OWNER:
981 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
982 		    "owner changed: prior %d, new %d: %s\n",
983 		    detail->seq,
984 		    detail->time,
985 		    detail->class.members.locale,
986 		    detail->class.members.class,
987 		    detail->args.ld_owner.ld.ld_index,
988 		    detail->args.ld_owner.ld.target_id,
989 		    detail->args.ld_owner.pre_owner,
990 		    detail->args.ld_owner.new_owner,
991 		    detail->description
992 		    );
993 		break;
994 	case MR_EVT_ARGS_LD_LBA_PD_LBA:
995 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
996 		    "lba %lld, physical drive PD %02d(e%d/s%d) lba %lld: %s\n",
997 		    detail->seq,
998 		    detail->time,
999 		    detail->class.members.locale,
1000 		    detail->class.members.class,
1001 		    detail->args.ld_lba_pd_lba.ld.ld_index,
1002 		    detail->args.ld_lba_pd_lba.ld.target_id,
1003 		    (long long)detail->args.ld_lba_pd_lba.ld_lba,
1004 		    detail->args.ld_lba_pd_lba.pd.device_id,
1005 		    detail->args.ld_lba_pd_lba.pd.enclosure_index,
1006 		    detail->args.ld_lba_pd_lba.pd.slot_number,
1007 		    (long long)detail->args.ld_lba_pd_lba.pd_lba,
1008 		    detail->description
1009 		    );
1010 		break;
1011 	case MR_EVT_ARGS_LD_PROG:
1012 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1013 		    "progress %d%% in %ds: %s\n",
1014 		    detail->seq,
1015 		    detail->time,
1016 		    detail->class.members.locale,
1017 		    detail->class.members.class,
1018 		    detail->args.ld_prog.ld.ld_index,
1019 		    detail->args.ld_prog.ld.target_id,
1020 		    detail->args.ld_prog.prog.progress/655,
1021 		    detail->args.ld_prog.prog.elapsed_seconds,
1022 		    detail->description
1023 		    );
1024 		break;
1025 	case MR_EVT_ARGS_LD_STATE:
1026 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1027 		    "state prior %d new %d: %s\n",
1028 		    detail->seq,
1029 		    detail->time,
1030 		    detail->class.members.locale,
1031 		    detail->class.members.class,
1032 		    detail->args.ld_state.ld.ld_index,
1033 		    detail->args.ld_state.ld.target_id,
1034 		    detail->args.ld_state.prev_state,
1035 		    detail->args.ld_state.new_state,
1036 		    detail->description
1037 		    );
1038 		break;
1039 	case MR_EVT_ARGS_LD_STRIP:
1040 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1041 		    "strip %lld: %s\n",
1042 		    detail->seq,
1043 		    detail->time,
1044 		    detail->class.members.locale,
1045 		    detail->class.members.class,
1046 		    detail->args.ld_strip.ld.ld_index,
1047 		    detail->args.ld_strip.ld.target_id,
1048 		    (long long)detail->args.ld_strip.strip,
1049 		    detail->description
1050 		    );
1051 		break;
1052 	case MR_EVT_ARGS_PD:
1053 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1054 		    "event: %s\n",
1055 		    detail->seq,
1056 		    detail->time,
1057 		    detail->class.members.locale,
1058 		    detail->class.members.class,
1059 		    detail->args.pd.device_id,
1060 		    detail->args.pd.enclosure_index,
1061 		    detail->args.pd.slot_number,
1062 		    detail->description
1063 		    );
1064 		break;
1065 	case MR_EVT_ARGS_PD_ERR:
1066 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1067 		    "err %d: %s\n",
1068 		    detail->seq,
1069 		    detail->time,
1070 		    detail->class.members.locale,
1071 		    detail->class.members.class,
1072 		    detail->args.pd_err.pd.device_id,
1073 		    detail->args.pd_err.pd.enclosure_index,
1074 		    detail->args.pd_err.pd.slot_number,
1075 		    detail->args.pd_err.err,
1076 		    detail->description
1077 		    );
1078 		break;
1079 	case MR_EVT_ARGS_PD_LBA:
1080 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1081 		    "lba %lld: %s\n",
1082 		    detail->seq,
1083 		    detail->time,
1084 		    detail->class.members.locale,
1085 		    detail->class.members.class,
1086 		    detail->args.pd_lba.pd.device_id,
1087 		    detail->args.pd_lba.pd.enclosure_index,
1088 		    detail->args.pd_lba.pd.slot_number,
1089 		    (long long)detail->args.pd_lba.lba,
1090 		    detail->description
1091 		    );
1092 		break;
1093 	case MR_EVT_ARGS_PD_LBA_LD:
1094 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1095 		    "lba %lld VD %02d/%d: %s\n",
1096 		    detail->seq,
1097 		    detail->time,
1098 		    detail->class.members.locale,
1099 		    detail->class.members.class,
1100 		    detail->args.pd_lba_ld.pd.device_id,
1101 		    detail->args.pd_lba_ld.pd.enclosure_index,
1102 		    detail->args.pd_lba_ld.pd.slot_number,
1103 		    (long long)detail->args.pd_lba.lba,
1104 		    detail->args.pd_lba_ld.ld.ld_index,
1105 		    detail->args.pd_lba_ld.ld.target_id,
1106 		    detail->description
1107 		    );
1108 		break;
1109 	case MR_EVT_ARGS_PD_PROG:
1110 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1111 		    "progress %d%% seconds %ds: %s\n",
1112 		    detail->seq,
1113 		    detail->time,
1114 		    detail->class.members.locale,
1115 		    detail->class.members.class,
1116 		    detail->args.pd_prog.pd.device_id,
1117 		    detail->args.pd_prog.pd.enclosure_index,
1118 		    detail->args.pd_prog.pd.slot_number,
1119 		    detail->args.pd_prog.prog.progress/655,
1120 		    detail->args.pd_prog.prog.elapsed_seconds,
1121 		    detail->description
1122 		    );
1123 		break;
1124 	case MR_EVT_ARGS_PD_STATE:
1125 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1126 		    "state prior %d new %d: %s\n",
1127 		    detail->seq,
1128 		    detail->time,
1129 		    detail->class.members.locale,
1130 		    detail->class.members.class,
1131 		    detail->args.pd_prog.pd.device_id,
1132 		    detail->args.pd_prog.pd.enclosure_index,
1133 		    detail->args.pd_prog.pd.slot_number,
1134 		    detail->args.pd_state.prev_state,
1135 		    detail->args.pd_state.new_state,
1136 		    detail->description
1137 		    );
1138 		break;
1139 	case MR_EVT_ARGS_PCI:
1140 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PCI 0x04%x 0x04%x "
1141 		    "0x04%x 0x04%x: %s\n",
1142 		    detail->seq,
1143 		    detail->time,
1144 		    detail->class.members.locale,
1145 		    detail->class.members.class,
1146 		    detail->args.pci.venderId,
1147 		    detail->args.pci.deviceId,
1148 		    detail->args.pci.subVenderId,
1149 		    detail->args.pci.subDeviceId,
1150 		    detail->description
1151 		    );
1152 		break;
1153 	case MR_EVT_ARGS_RATE:
1154 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Rebuild rate %d: %s\n",
1155 		    detail->seq,
1156 		    detail->time,
1157 		    detail->class.members.locale,
1158 		    detail->class.members.class,
1159 		    detail->args.rate,
1160 		    detail->description
1161 		    );
1162 		break;
1163 	case MR_EVT_ARGS_TIME:
1164 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Adapter ticks %d "
1165 		    "elapsed %ds: %s\n",
1166 		    detail->seq,
1167 		    detail->time,
1168 		    detail->class.members.locale,
1169 		    detail->class.members.class,
1170 		    detail->args.time.rtc,
1171 		    detail->args.time.elapsedSeconds,
1172 		    detail->description
1173 		    );
1174 		break;
1175 	case MR_EVT_ARGS_ECC:
1176 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Adapter ECC %x,%x: %s: %s\n",
1177 		    detail->seq,
1178 		    detail->time,
1179 		    detail->class.members.locale,
1180 		    detail->class.members.class,
1181 		    detail->args.ecc.ecar,
1182 		    detail->args.ecc.elog,
1183 		    detail->args.ecc.str,
1184 		    detail->description
1185 		    );
1186 		break;
1187 	default:
1188 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Type %d: %s\n",
1189 		    detail->seq,
1190 		    detail->time,
1191 		    detail->class.members.locale,
1192 		    detail->class.members.class,
1193 		    detail->arg_type, detail->description
1194 		    );
1195 	}
1196 }
1197 
1198 static int
1199 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1200 {
1201 	struct mfi_command *cm;
1202 	struct mfi_dcmd_frame *dcmd;
1203 	union mfi_evt current_aen, prior_aen;
1204 	struct mfi_evt_detail *ed = NULL;
1205 	int error = 0;
1206 
1207 	current_aen.word = locale;
1208 	if (sc->mfi_aen_cm != NULL) {
1209 		prior_aen.word =
1210 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1211 		if (prior_aen.members.class <= current_aen.members.class &&
1212 		    !((prior_aen.members.locale & current_aen.members.locale)
1213 		    ^current_aen.members.locale)) {
1214 			return (0);
1215 		} else {
1216 			prior_aen.members.locale |= current_aen.members.locale;
1217 			if (prior_aen.members.class
1218 			    < current_aen.members.class)
1219 				current_aen.members.class =
1220 				    prior_aen.members.class;
1221 			mfi_abort(sc, sc->mfi_aen_cm);
1222 		}
1223 	}
1224 
1225 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1226 	    (void **)&ed, sizeof(*ed));
1227 	if (error) {
1228 		goto out;
1229 	}
1230 
1231 	dcmd = &cm->cm_frame->dcmd;
1232 	((uint32_t *)&dcmd->mbox)[0] = seq;
1233 	((uint32_t *)&dcmd->mbox)[1] = locale;
1234 	cm->cm_flags = MFI_CMD_DATAIN;
1235 	cm->cm_complete = mfi_aen_complete;
1236 
1237 	sc->mfi_aen_cm = cm;
1238 
1239 	mfi_enqueue_ready(cm);
1240 	mfi_startio(sc);
1241 
1242 out:
1243 	return (error);
1244 }
1245 
1246 static void
1247 mfi_aen_complete(struct mfi_command *cm)
1248 {
1249 	struct mfi_frame_header *hdr;
1250 	struct mfi_softc *sc;
1251 	struct mfi_evt_detail *detail;
1252 	struct mfi_aen *mfi_aen_entry, *tmp;
1253 	int seq = 0, aborted = 0;
1254 
1255 	sc = cm->cm_sc;
1256 	hdr = &cm->cm_frame->header;
1257 
1258 	if (sc->mfi_aen_cm == NULL)
1259 		return;
1260 
1261 	if (sc->mfi_aen_cm->cm_aen_abort || hdr->cmd_status == 0xff) {
1262 		sc->mfi_aen_cm->cm_aen_abort = 0;
1263 		aborted = 1;
1264 	} else {
1265 		sc->mfi_aen_triggered = 1;
1266 		if (sc->mfi_poll_waiting) {
1267 			sc->mfi_poll_waiting = 0;
1268 			selwakeup(&sc->mfi_select);
1269 		}
1270 		detail = cm->cm_data;
1271 		/*
1272 		 * XXX If this function is too expensive or is recursive, then
1273 		 * events should be put onto a queue and processed later.
1274 		 */
1275 		mtx_unlock(&sc->mfi_io_lock);
1276 		mfi_decode_evt(sc, detail);
1277 		mtx_lock(&sc->mfi_io_lock);
1278 		seq = detail->seq + 1;
1279 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1280 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1281 			    aen_link);
1282 			PROC_LOCK(mfi_aen_entry->p);
1283 			psignal(mfi_aen_entry->p, SIGIO);
1284 			PROC_UNLOCK(mfi_aen_entry->p);
1285 			free(mfi_aen_entry, M_MFIBUF);
1286 		}
1287 	}
1288 
1289 	free(cm->cm_data, M_MFIBUF);
1290 	sc->mfi_aen_cm = NULL;
1291 	wakeup(&sc->mfi_aen_cm);
1292 	mfi_release_command(cm);
1293 
1294 	/* set it up again so the driver can catch more events */
1295 	if (!aborted) {
1296 		mfi_aen_setup(sc, seq);
1297 	}
1298 }
1299 
1300 /* Only do one event for now so we can easily iterate through them */
1301 #define MAX_EVENTS 1
1302 static int
1303 mfi_get_entry(struct mfi_softc *sc, int seq)
1304 {
1305 	struct mfi_command *cm;
1306 	struct mfi_dcmd_frame *dcmd;
1307 	struct mfi_evt_list *el;
1308 	int error;
1309 	int i;
1310 	int size;
1311 
1312 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
1313 		return (EBUSY);
1314 	}
1315 
1316 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1317 		* (MAX_EVENTS - 1);
1318 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1319 	if (el == NULL) {
1320 		mfi_release_command(cm);
1321 		return (ENOMEM);
1322 	}
1323 
1324 	dcmd = &cm->cm_frame->dcmd;
1325 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
1326 	dcmd->header.cmd = MFI_CMD_DCMD;
1327 	dcmd->header.timeout = 0;
1328 	dcmd->header.data_len = size;
1329 	dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1330 	((uint32_t *)&dcmd->mbox)[0] = seq;
1331 	((uint32_t *)&dcmd->mbox)[1] = MFI_EVT_LOCALE_ALL;
1332 	cm->cm_sg = &dcmd->sgl;
1333 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1334 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1335 	cm->cm_data = el;
1336 	cm->cm_len = size;
1337 
1338 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1339 		device_printf(sc->mfi_dev, "Controller info buffer map failed");
1340 		free(el, M_MFIBUF);
1341 		mfi_release_command(cm);
1342 		return (error);
1343 	}
1344 
1345 	if ((error = mfi_polled_command(sc, cm)) != 0) {
1346 		device_printf(sc->mfi_dev, "Failed to get controller entry\n");
1347 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1348 		    MFI_SECTOR_LEN;
1349 		free(el, M_MFIBUF);
1350 		mfi_release_command(cm);
1351 		return (0);
1352 	}
1353 
1354 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1355 	    BUS_DMASYNC_POSTREAD);
1356 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1357 
1358 	if (dcmd->header.cmd_status != MFI_STAT_NOT_FOUND) {
1359 		for (i = 0; i < el->count; i++) {
1360 			if (seq + i == el->event[i].seq)
1361 				mfi_decode_evt(sc, &el->event[i]);
1362 		}
1363 	}
1364 
1365 	free(cm->cm_data, M_MFIBUF);
1366 	mfi_release_command(cm);
1367 	return (0);
1368 }
1369 
1370 static int
1371 mfi_add_ld(struct mfi_softc *sc, int id)
1372 {
1373 	struct mfi_command *cm;
1374 	struct mfi_dcmd_frame *dcmd = NULL;
1375 	struct mfi_ld_info *ld_info = NULL;
1376 	int error;
1377 
1378 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1379 
1380 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1381 	    (void **)&ld_info, sizeof(*ld_info));
1382 	if (error) {
1383 		device_printf(sc->mfi_dev,
1384 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1385 		if (ld_info)
1386 			free(ld_info, M_MFIBUF);
1387 		return (error);
1388 	}
1389 	cm->cm_flags = MFI_CMD_DATAIN;
1390 	dcmd = &cm->cm_frame->dcmd;
1391 	dcmd->mbox[0] = id;
1392 	if (mfi_wait_command(sc, cm) != 0) {
1393 		device_printf(sc->mfi_dev,
1394 		    "Failed to get logical drive: %d\n", id);
1395 		free(ld_info, M_MFIBUF);
1396 		return (0);
1397 	}
1398 
1399 	mfi_add_ld_complete(cm);
1400 	return (0);
1401 }
1402 
1403 static void
1404 mfi_add_ld_complete(struct mfi_command *cm)
1405 {
1406 	struct mfi_frame_header *hdr;
1407 	struct mfi_ld_info *ld_info;
1408 	struct mfi_softc *sc;
1409 	struct mfi_ld *ld;
1410 	device_t child;
1411 
1412 	sc = cm->cm_sc;
1413 	hdr = &cm->cm_frame->header;
1414 	ld_info = cm->cm_private;
1415 
1416 	if (hdr->cmd_status != MFI_STAT_OK) {
1417 		free(ld_info, M_MFIBUF);
1418 		mfi_release_command(cm);
1419 		return;
1420 	}
1421 	mfi_release_command(cm);
1422 
1423 	ld = malloc(sizeof(struct mfi_ld), M_MFIBUF, M_NOWAIT|M_ZERO);
1424 	if (ld == NULL) {
1425 		device_printf(sc->mfi_dev, "Cannot allocate ld\n");
1426 		free(ld_info, M_MFIBUF);
1427 		return;
1428 	}
1429 
1430 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1431 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1432 		free(ld, M_MFIBUF);
1433 		free(ld_info, M_MFIBUF);
1434 		return;
1435 	}
1436 
1437 	ld->ld_id = ld_info->ld_config.properties.ld.v.target_id;
1438 	ld->ld_disk = child;
1439 	ld->ld_info = ld_info;
1440 
1441 	device_set_ivars(child, ld);
1442 	device_set_desc(child, "MFI Logical Disk");
1443 	mtx_unlock(&sc->mfi_io_lock);
1444 	mtx_lock(&Giant);
1445 	bus_generic_attach(sc->mfi_dev);
1446 	mtx_unlock(&Giant);
1447 	mtx_lock(&sc->mfi_io_lock);
1448 }
1449 
1450 static struct mfi_command *
1451 mfi_bio_command(struct mfi_softc *sc)
1452 {
1453 	struct mfi_io_frame *io;
1454 	struct mfi_command *cm;
1455 	struct bio *bio;
1456 	int flags, blkcount;
1457 
1458 	if ((cm = mfi_dequeue_free(sc)) == NULL)
1459 		return (NULL);
1460 
1461 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1462 		mfi_release_command(cm);
1463 		return (NULL);
1464 	}
1465 
1466 	io = &cm->cm_frame->io;
1467 	switch (bio->bio_cmd & 0x03) {
1468 	case BIO_READ:
1469 		io->header.cmd = MFI_CMD_LD_READ;
1470 		flags = MFI_CMD_DATAIN;
1471 		break;
1472 	case BIO_WRITE:
1473 		io->header.cmd = MFI_CMD_LD_WRITE;
1474 		flags = MFI_CMD_DATAOUT;
1475 		break;
1476 	default:
1477 		panic("Invalid bio command");
1478 	}
1479 
1480 	/* Cheat with the sector length to avoid a non-constant division */
1481 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1482 	io->header.target_id = (uintptr_t)bio->bio_driver1;
1483 	io->header.timeout = 0;
1484 	io->header.flags = 0;
1485 	io->header.sense_len = MFI_SENSE_LEN;
1486 	io->header.data_len = blkcount;
1487 	io->sense_addr_lo = cm->cm_sense_busaddr;
1488 	io->sense_addr_hi = 0;
1489 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
1490 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
1491 	cm->cm_complete = mfi_bio_complete;
1492 	cm->cm_private = bio;
1493 	cm->cm_data = bio->bio_data;
1494 	cm->cm_len = bio->bio_bcount;
1495 	cm->cm_sg = &io->sgl;
1496 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1497 	cm->cm_flags = flags;
1498 	return (cm);
1499 }
1500 
1501 static void
1502 mfi_bio_complete(struct mfi_command *cm)
1503 {
1504 	struct bio *bio;
1505 	struct mfi_frame_header *hdr;
1506 	struct mfi_softc *sc;
1507 
1508 	bio = cm->cm_private;
1509 	hdr = &cm->cm_frame->header;
1510 	sc = cm->cm_sc;
1511 
1512 	if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) {
1513 		bio->bio_flags |= BIO_ERROR;
1514 		bio->bio_error = EIO;
1515 		device_printf(sc->mfi_dev, "I/O error, status= %d "
1516 		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1517 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
1518 	}
1519 
1520 	mfi_release_command(cm);
1521 	mfi_disk_complete(bio);
1522 }
1523 
1524 void
1525 mfi_startio(struct mfi_softc *sc)
1526 {
1527 	struct mfi_command *cm;
1528 
1529 	for (;;) {
1530 		/* Don't bother if we're short on resources */
1531 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1532 			break;
1533 
1534 		/* Try a command that has already been prepared */
1535 		cm = mfi_dequeue_ready(sc);
1536 
1537 		/* Nope, so look for work on the bioq */
1538 		if (cm == NULL)
1539 			cm = mfi_bio_command(sc);
1540 
1541 		/* No work available, so exit */
1542 		if (cm == NULL)
1543 			break;
1544 
1545 		/* Send the command to the controller */
1546 		if (mfi_mapcmd(sc, cm) != 0) {
1547 			mfi_requeue_ready(cm);
1548 			break;
1549 		}
1550 	}
1551 }
1552 
1553 static int
1554 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1555 {
1556 	int error, polled;
1557 
1558 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1559 
1560 	if (cm->cm_data != NULL) {
1561 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1562 		error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1563 		    cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1564 		if (error == EINPROGRESS) {
1565 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
1566 			return (0);
1567 		}
1568 	} else {
1569 		cm->cm_timestamp = time_uptime;
1570 		mfi_enqueue_busy(cm);
1571 		error = mfi_send_frame(sc, cm);
1572 	}
1573 
1574 	return (error);
1575 }
1576 
1577 static void
1578 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1579 {
1580 	struct mfi_frame_header *hdr;
1581 	struct mfi_command *cm;
1582 	union mfi_sgl *sgl;
1583 	struct mfi_softc *sc;
1584 	int i, dir;
1585 
1586 	if (error)
1587 		return;
1588 
1589 	cm = (struct mfi_command *)arg;
1590 	sc = cm->cm_sc;
1591 	hdr = &cm->cm_frame->header;
1592 	sgl = cm->cm_sg;
1593 
1594 	if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1595 		for (i = 0; i < nsegs; i++) {
1596 			sgl->sg32[i].addr = segs[i].ds_addr;
1597 			sgl->sg32[i].len = segs[i].ds_len;
1598 		}
1599 	} else {
1600 		for (i = 0; i < nsegs; i++) {
1601 			sgl->sg64[i].addr = segs[i].ds_addr;
1602 			sgl->sg64[i].len = segs[i].ds_len;
1603 		}
1604 		hdr->flags |= MFI_FRAME_SGL64;
1605 	}
1606 	hdr->sg_count = nsegs;
1607 
1608 	dir = 0;
1609 	if (cm->cm_flags & MFI_CMD_DATAIN) {
1610 		dir |= BUS_DMASYNC_PREREAD;
1611 		hdr->flags |= MFI_FRAME_DIR_READ;
1612 	}
1613 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
1614 		dir |= BUS_DMASYNC_PREWRITE;
1615 		hdr->flags |= MFI_FRAME_DIR_WRITE;
1616 	}
1617 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1618 	cm->cm_flags |= MFI_CMD_MAPPED;
1619 
1620 	/*
1621 	 * Instead of calculating the total number of frames in the
1622 	 * compound frame, it's already assumed that there will be at
1623 	 * least 1 frame, so don't compensate for the modulo of the
1624 	 * following division.
1625 	 */
1626 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
1627 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1628 
1629 	/* The caller will take care of delivering polled commands */
1630 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1631 		cm->cm_timestamp = time_uptime;
1632 		mfi_enqueue_busy(cm);
1633 		mfi_send_frame(sc, cm);
1634 	}
1635 
1636 	return;
1637 }
1638 
1639 static int
1640 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1641 {
1642 
1643 	/*
1644 	 * The bus address of the command is aligned on a 64 byte boundary,
1645 	 * leaving the least 6 bits as zero.  For whatever reason, the
1646 	 * hardware wants the address shifted right by three, leaving just
1647 	 * 3 zero bits.  These three bits are then used as a prefetching
1648 	 * hint for the hardware to predict how many frames need to be
1649 	 * fetched across the bus.  If a command has more than 8 frames
1650 	 * then the 3 bits are set to 0x7 and the firmware uses other
1651 	 * information in the command to determine the total amount to fetch.
1652 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
1653 	 * is enough for both 32bit and 64bit systems.
1654 	 */
1655 	if (cm->cm_extra_frames > 7)
1656 		cm->cm_extra_frames = 7;
1657 
1658 	MFI_WRITE4(sc, MFI_IQP, (cm->cm_frame_busaddr >> 3) |
1659 	    cm->cm_extra_frames);
1660 	return (0);
1661 }
1662 
1663 static void
1664 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
1665 {
1666 	int dir;
1667 
1668 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
1669 		dir = 0;
1670 		if (cm->cm_flags & MFI_CMD_DATAIN)
1671 			dir |= BUS_DMASYNC_POSTREAD;
1672 		if (cm->cm_flags & MFI_CMD_DATAOUT)
1673 			dir |= BUS_DMASYNC_POSTWRITE;
1674 
1675 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1676 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1677 		cm->cm_flags &= ~MFI_CMD_MAPPED;
1678 	}
1679 
1680 	if (cm->cm_complete != NULL)
1681 		cm->cm_complete(cm);
1682 	else
1683 		wakeup(cm);
1684 }
1685 
1686 static int
1687 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
1688 {
1689 	struct mfi_command *cm;
1690 	struct mfi_abort_frame *abort;
1691 
1692 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1693 
1694 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
1695 		return (EBUSY);
1696 	}
1697 
1698 	abort = &cm->cm_frame->abort;
1699 	abort->header.cmd = MFI_CMD_ABORT;
1700 	abort->header.flags = 0;
1701 	abort->abort_context = cm_abort->cm_frame->header.context;
1702 	abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
1703 	abort->abort_mfi_addr_hi = 0;
1704 	cm->cm_data = NULL;
1705 
1706 	sc->mfi_aen_cm->cm_aen_abort = 1;
1707 	mfi_mapcmd(sc, cm);
1708 	mfi_polled_command(sc, cm);
1709 	mfi_release_command(cm);
1710 
1711 	while (sc->mfi_aen_cm != NULL) {
1712 		msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz);
1713 	}
1714 
1715 	return (0);
1716 }
1717 
1718 int
1719 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
1720 {
1721 	struct mfi_command *cm;
1722 	struct mfi_io_frame *io;
1723 	int error;
1724 
1725 	if ((cm = mfi_dequeue_free(sc)) == NULL)
1726 		return (EBUSY);
1727 
1728 	io = &cm->cm_frame->io;
1729 	io->header.cmd = MFI_CMD_LD_WRITE;
1730 	io->header.target_id = id;
1731 	io->header.timeout = 0;
1732 	io->header.flags = 0;
1733 	io->header.sense_len = MFI_SENSE_LEN;
1734 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1735 	io->sense_addr_lo = cm->cm_sense_busaddr;
1736 	io->sense_addr_hi = 0;
1737 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
1738 	io->lba_lo = lba & 0xffffffff;
1739 	cm->cm_data = virt;
1740 	cm->cm_len = len;
1741 	cm->cm_sg = &io->sgl;
1742 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1743 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
1744 
1745 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1746 		mfi_release_command(cm);
1747 		return (error);
1748 	}
1749 
1750 	error = mfi_polled_command(sc, cm);
1751 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1752 	    BUS_DMASYNC_POSTWRITE);
1753 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1754 	mfi_release_command(cm);
1755 
1756 	return (error);
1757 }
1758 
1759 static int
1760 mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1761 {
1762 	struct mfi_softc *sc;
1763 
1764 	sc = dev->si_drv1;
1765 
1766 	mtx_lock(&sc->mfi_io_lock);
1767 	sc->mfi_flags |= MFI_FLAGS_OPEN;
1768 	mtx_unlock(&sc->mfi_io_lock);
1769 
1770 	return (0);
1771 }
1772 
1773 static int
1774 mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1775 {
1776 	struct mfi_softc *sc;
1777 	struct mfi_aen *mfi_aen_entry, *tmp;
1778 
1779 	sc = dev->si_drv1;
1780 
1781 	mtx_lock(&sc->mfi_io_lock);
1782 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
1783 
1784 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1785 		if (mfi_aen_entry->p == curproc) {
1786 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1787 			    aen_link);
1788 			free(mfi_aen_entry, M_MFIBUF);
1789 		}
1790 	}
1791 	mtx_unlock(&sc->mfi_io_lock);
1792 	return (0);
1793 }
1794 
1795 static int
1796 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1797 {
1798 	struct mfi_softc *sc;
1799 	union mfi_statrequest *ms;
1800 	struct mfi_ioc_packet *ioc;
1801 	struct mfi_ioc_aen *aen;
1802 	struct mfi_command *cm = NULL;
1803 	struct mfi_dcmd_frame *dcmd;
1804 	uint32_t context;
1805 	uint32_t *sense_ptr;
1806 	uint8_t *data = NULL, *temp;
1807 	int i;
1808 	int error;
1809 
1810 	sc = dev->si_drv1;
1811 	error = 0;
1812 
1813 	switch (cmd) {
1814 	case MFIIO_STATS:
1815 		ms = (union mfi_statrequest *)arg;
1816 		switch (ms->ms_item) {
1817 		case MFIQ_FREE:
1818 		case MFIQ_BIO:
1819 		case MFIQ_READY:
1820 		case MFIQ_BUSY:
1821 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
1822 			    sizeof(struct mfi_qstat));
1823 			break;
1824 		default:
1825 			error = ENOIOCTL;
1826 			break;
1827 		}
1828 		break;
1829 	case MFI_CMD:
1830 		ioc = (struct mfi_ioc_packet *)arg;
1831 
1832 		mtx_lock(&sc->mfi_io_lock);
1833 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1834 			mtx_unlock(&sc->mfi_io_lock);
1835 			return (EBUSY);
1836 		}
1837 		mtx_unlock(&sc->mfi_io_lock);
1838 
1839 		/*
1840 		 * save off original context since copying from user
1841 		 * will clobber some data
1842 		 */
1843 		context = cm->cm_frame->header.context;
1844 
1845 		bcopy(ioc->mi_frame.raw, cm->cm_frame,
1846 		      ioc->mi_sgl_off); /* Linux can do 2 frames ? */
1847 		cm->cm_total_frame_size = ioc->mi_sgl_off;
1848 		cm->cm_sg =
1849 		    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mi_sgl_off];
1850 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT
1851 			| MFI_CMD_POLLED;
1852 		cm->cm_len = cm->cm_frame->header.data_len;
1853 		cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
1854 					    M_WAITOK | M_ZERO);
1855 
1856 		/* restore header context */
1857 		cm->cm_frame->header.context = context;
1858 		/* ioctl's are dcmd types */
1859 		dcmd =  &cm->cm_frame->dcmd;
1860 
1861 		temp = data;
1862 		for (i = 0; i < ioc->mi_sge_count; i++) {
1863 			error = copyin(ioc->mi_sgl[i].iov_base,
1864 			       temp,
1865 			       ioc->mi_sgl[i].iov_len);
1866 			if (error != 0) {
1867 				device_printf(sc->mfi_dev,
1868 				    "Copy in failed");
1869 				goto out;
1870 			}
1871 			temp = &temp[ioc->mi_sgl[i].iov_len];
1872 		}
1873 
1874 		if (ioc->mi_sense_len) {
1875 			sense_ptr =
1876 			    (void *)&cm->cm_frame->bytes[ioc->mi_sense_off];
1877 			*sense_ptr = cm->cm_sense_busaddr;
1878 		}
1879 
1880 		mtx_lock(&sc->mfi_io_lock);
1881 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1882 			device_printf(sc->mfi_dev,
1883 			    "Controller info buffer map failed");
1884 			mtx_unlock(&sc->mfi_io_lock);
1885 			goto out;
1886 		}
1887 
1888 		if ((error = mfi_polled_command(sc, cm)) != 0) {
1889 			device_printf(sc->mfi_dev,
1890 			    "Controller polled failed");
1891 			mtx_unlock(&sc->mfi_io_lock);
1892 			goto out;
1893 		}
1894 
1895 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1896 				BUS_DMASYNC_POSTREAD);
1897 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1898 		mtx_unlock(&sc->mfi_io_lock);
1899 
1900 		temp = data;
1901 		for (i = 0; i < ioc->mi_sge_count; i++) {
1902 			error = copyout(temp,
1903 				ioc->mi_sgl[i].iov_base,
1904 				ioc->mi_sgl[i].iov_len);
1905 			if (error != 0) {
1906 				device_printf(sc->mfi_dev,
1907 				    "Copy out failed");
1908 				goto out;
1909 			}
1910 			temp = &temp[ioc->mi_sgl[i].iov_len];
1911 		}
1912 
1913 		if (ioc->mi_sense_len) {
1914 			/* copy out sense */
1915 			sense_ptr = (void *)
1916 			    &ioc->mi_frame.raw[ioc->mi_sense_off];
1917 			temp = 0;
1918 			temp += cm->cm_sense_busaddr;
1919 			error = copyout(temp, sense_ptr,
1920 			    ioc->mi_sense_len);
1921 			if (error != 0) {
1922 				device_printf(sc->mfi_dev,
1923 				    "Copy out failed");
1924 				goto out;
1925 			}
1926 		}
1927 
1928 		ioc->mi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
1929 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
1930 			switch (dcmd->opcode) {
1931 			case MFI_DCMD_CFG_CLEAR:
1932 			case MFI_DCMD_CFG_ADD:
1933 /*
1934 				mfi_ldrescan(sc);
1935 */
1936 				break;
1937 			}
1938 		}
1939 out:
1940 		if (data)
1941 			free(data, M_MFIBUF);
1942 		if (cm) {
1943 			mtx_lock(&sc->mfi_io_lock);
1944 			mfi_release_command(cm);
1945 			mtx_unlock(&sc->mfi_io_lock);
1946 		}
1947 
1948 		break;
1949 	case MFI_SET_AEN:
1950 		aen = (struct mfi_ioc_aen *)arg;
1951 		error = mfi_aen_register(sc, aen->aen_seq_num,
1952 		    aen->aen_class_locale);
1953 
1954 		break;
1955 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
1956 		{
1957 			devclass_t devclass;
1958 			struct mfi_linux_ioc_packet l_ioc;
1959 			int adapter;
1960 
1961 			devclass = devclass_find("mfi");
1962 			if (devclass == NULL)
1963 				return (ENOENT);
1964 
1965 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
1966 			if (error)
1967 				return (error);
1968 			adapter = l_ioc.lioc_adapter_no;
1969 			sc = devclass_get_softc(devclass, adapter);
1970 			if (sc == NULL)
1971 				return (ENOENT);
1972 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
1973 			    cmd, arg, flag, td));
1974 			break;
1975 		}
1976 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
1977 		{
1978 			devclass_t devclass;
1979 			struct mfi_linux_ioc_aen l_aen;
1980 			int adapter;
1981 
1982 			devclass = devclass_find("mfi");
1983 			if (devclass == NULL)
1984 				return (ENOENT);
1985 
1986 			error = copyin(arg, &l_aen, sizeof(l_aen));
1987 			if (error)
1988 				return (error);
1989 			adapter = l_aen.laen_adapter_no;
1990 			sc = devclass_get_softc(devclass, adapter);
1991 			if (sc == NULL)
1992 				return (ENOENT);
1993 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
1994 			    cmd, arg, flag, td));
1995 			break;
1996 		}
1997 	default:
1998 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
1999 		error = ENOENT;
2000 		break;
2001 	}
2002 
2003 	return (error);
2004 }
2005 
2006 static int
2007 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
2008 {
2009 	struct mfi_softc *sc;
2010 	struct mfi_linux_ioc_packet l_ioc;
2011 	struct mfi_linux_ioc_aen l_aen;
2012 	struct mfi_command *cm = NULL;
2013 	struct mfi_aen *mfi_aen_entry;
2014 	uint32_t *sense_ptr;
2015 	uint32_t context;
2016 	uint8_t *data = NULL, *temp;
2017 	void *temp_convert;
2018 	int i;
2019 	int error;
2020 
2021 	sc = dev->si_drv1;
2022 	error = 0;
2023 	switch (cmd) {
2024 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2025 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
2026 		if (error != 0)
2027 			return (error);
2028 
2029 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
2030 			return (EINVAL);
2031 		}
2032 
2033 		mtx_lock(&sc->mfi_io_lock);
2034 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
2035 			mtx_unlock(&sc->mfi_io_lock);
2036 			return (EBUSY);
2037 		}
2038 		mtx_unlock(&sc->mfi_io_lock);
2039 
2040 		/*
2041 		 * save off original context since copying from user
2042 		 * will clobber some data
2043 		 */
2044 		context = cm->cm_frame->header.context;
2045 
2046 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
2047 		      l_ioc.lioc_sgl_off); /* Linux can do 2 frames ? */
2048 		cm->cm_total_frame_size = l_ioc.lioc_sgl_off;
2049 		cm->cm_sg =
2050 		    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
2051 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT
2052 			| MFI_CMD_POLLED;
2053 		cm->cm_len = cm->cm_frame->header.data_len;
2054 		cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
2055 					    M_WAITOK | M_ZERO);
2056 
2057 		/* restore header context */
2058 		cm->cm_frame->header.context = context;
2059 
2060 		temp = data;
2061 		for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2062 			temp_convert =
2063 			    (void *)(uintptr_t)l_ioc.lioc_sgl[i].iov_base;
2064 			error = copyin(temp_convert,
2065 			       temp,
2066 			       l_ioc.lioc_sgl[i].iov_len);
2067 			if (error != 0) {
2068 				device_printf(sc->mfi_dev,
2069 				    "Copy in failed");
2070 				goto out;
2071 			}
2072 			temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2073 		}
2074 
2075 		if (l_ioc.lioc_sense_len) {
2076 			sense_ptr =
2077 			    (void *)&cm->cm_frame->bytes[l_ioc.lioc_sense_off];
2078 			*sense_ptr = cm->cm_sense_busaddr;
2079 		}
2080 
2081 		mtx_lock(&sc->mfi_io_lock);
2082 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
2083 			device_printf(sc->mfi_dev,
2084 			    "Controller info buffer map failed");
2085 			mtx_unlock(&sc->mfi_io_lock);
2086 			goto out;
2087 		}
2088 
2089 		if ((error = mfi_polled_command(sc, cm)) != 0) {
2090 			device_printf(sc->mfi_dev,
2091 			    "Controller polled failed");
2092 			mtx_unlock(&sc->mfi_io_lock);
2093 			goto out;
2094 		}
2095 
2096 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2097 				BUS_DMASYNC_POSTREAD);
2098 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2099 		mtx_unlock(&sc->mfi_io_lock);
2100 
2101 		temp = data;
2102 		for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2103 			temp_convert =
2104 			    (void *)(uintptr_t)l_ioc.lioc_sgl[i].iov_base;
2105 			error = copyout(temp,
2106 				temp_convert,
2107 				l_ioc.lioc_sgl[i].iov_len);
2108 			if (error != 0) {
2109 				device_printf(sc->mfi_dev,
2110 				    "Copy out failed");
2111 				goto out;
2112 			}
2113 			temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2114 		}
2115 
2116 		if (l_ioc.lioc_sense_len) {
2117 			/* copy out sense */
2118 			sense_ptr = (void *)
2119 			    &l_ioc.lioc_frame.raw[l_ioc.lioc_sense_off];
2120 			temp = 0;
2121 			temp += cm->cm_sense_busaddr;
2122 			error = copyout(temp, sense_ptr,
2123 			    l_ioc.lioc_sense_len);
2124 			if (error != 0) {
2125 				device_printf(sc->mfi_dev,
2126 				    "Copy out failed");
2127 				goto out;
2128 			}
2129 		}
2130 
2131 		error = copyout(&cm->cm_frame->header.cmd_status,
2132 			&((struct mfi_linux_ioc_packet*)arg)
2133 			->lioc_frame.hdr.cmd_status,
2134 			1);
2135 		if (error != 0) {
2136 			device_printf(sc->mfi_dev,
2137 				      "Copy out failed");
2138 			goto out;
2139 		}
2140 
2141 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2142 			switch (cm->cm_frame->dcmd.opcode) {
2143 			case MFI_DCMD_CFG_CLEAR:
2144 			case MFI_DCMD_CFG_ADD:
2145 				/* mfi_ldrescan(sc); */
2146 				break;
2147 			}
2148 		}
2149 out:
2150 		if (data)
2151 			free(data, M_MFIBUF);
2152 		if (cm) {
2153 			mtx_lock(&sc->mfi_io_lock);
2154 			mfi_release_command(cm);
2155 			mtx_unlock(&sc->mfi_io_lock);
2156 		}
2157 
2158 		return (error);
2159 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2160 		error = copyin(arg, &l_aen, sizeof(l_aen));
2161 		if (error != 0)
2162 			return (error);
2163 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
2164 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
2165 		    M_WAITOK);
2166 		mtx_lock(&sc->mfi_io_lock);
2167 		if (mfi_aen_entry != NULL) {
2168 			mfi_aen_entry->p = curproc;
2169 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
2170 			    aen_link);
2171 		}
2172 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
2173 		    l_aen.laen_class_locale);
2174 
2175 		if (error != 0) {
2176 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2177 			    aen_link);
2178 			free(mfi_aen_entry, M_MFIBUF);
2179 		}
2180 		mtx_unlock(&sc->mfi_io_lock);
2181 
2182 		return (error);
2183 	default:
2184 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2185 		error = ENOENT;
2186 		break;
2187 	}
2188 
2189 	return (error);
2190 }
2191 
2192 static int
2193 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
2194 {
2195 	struct mfi_softc *sc;
2196 	int revents = 0;
2197 
2198 	sc = dev->si_drv1;
2199 
2200 	if (poll_events & (POLLIN | POLLRDNORM)) {
2201 		if (sc->mfi_aen_triggered != 0) {
2202 			revents |= poll_events & (POLLIN | POLLRDNORM);
2203 			sc->mfi_aen_triggered = 0;
2204 		}
2205 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
2206 			revents |= POLLERR;
2207 		}
2208 	}
2209 
2210 	if (revents == 0) {
2211 		if (poll_events & (POLLIN | POLLRDNORM)) {
2212 			sc->mfi_poll_waiting = 1;
2213 			selrecord(td, &sc->mfi_select);
2214 		}
2215 	}
2216 
2217 	return revents;
2218 }
2219 
2220 
2221 static void
2222 mfi_dump_all(void)
2223 {
2224 	struct mfi_softc *sc;
2225 	struct mfi_command *cm;
2226 	devclass_t dc;
2227 	time_t deadline;
2228 	int timedout;
2229 	int i;
2230 
2231 	dc = devclass_find("mfi");
2232 	if (dc == NULL) {
2233 		printf("No mfi dev class\n");
2234 		return;
2235 	}
2236 
2237 	for (i = 0; ; i++) {
2238 		sc = devclass_get_softc(dc, i);
2239 		if (sc == NULL)
2240 			break;
2241 		device_printf(sc->mfi_dev, "Dumping\n\n");
2242 		timedout = 0;
2243 		deadline = time_uptime - MFI_CMD_TIMEOUT;
2244 		mtx_lock(&sc->mfi_io_lock);
2245 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
2246 			if (cm->cm_timestamp < deadline) {
2247 				device_printf(sc->mfi_dev,
2248 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
2249 				    (int)(time_uptime - cm->cm_timestamp));
2250 				MFI_PRINT_CMD(cm);
2251 				timedout++;
2252 			}
2253 		}
2254 
2255 #if 0
2256 		if (timedout)
2257 			MFI_DUMP_CMDS(SC);
2258 #endif
2259 
2260 		mtx_unlock(&sc->mfi_io_lock);
2261 	}
2262 
2263 	return;
2264 }
2265 
2266 static void
2267 mfi_timeout(void *data)
2268 {
2269 	struct mfi_softc *sc = (struct mfi_softc *)data;
2270 	struct mfi_command *cm;
2271 	time_t deadline;
2272 	int timedout = 0;
2273 
2274 	deadline = time_uptime - MFI_CMD_TIMEOUT;
2275 	mtx_lock(&sc->mfi_io_lock);
2276 	TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
2277 		if (sc->mfi_aen_cm == cm)
2278 			continue;
2279 		if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
2280 			device_printf(sc->mfi_dev,
2281 			    "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
2282 			    (int)(time_uptime - cm->cm_timestamp));
2283 			MFI_PRINT_CMD(cm);
2284 			MFI_VALIDATE_CMD(sc, cm);
2285 			timedout++;
2286 		}
2287 	}
2288 
2289 #if 0
2290 	if (timedout)
2291 		MFI_DUMP_CMDS(SC);
2292 #endif
2293 
2294 	mtx_unlock(&sc->mfi_io_lock);
2295 
2296 	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
2297 	    mfi_timeout, sc);
2298 
2299 	if (0)
2300 		mfi_dump_all();
2301 	return;
2302 }
2303