xref: /freebsd/sys/dev/mfi/mfi.c (revision 3fe92528afe8313fecf48822dde74bad5e380f48)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_mfi.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/sysctl.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/poll.h>
38 #include <sys/selinfo.h>
39 #include <sys/bus.h>
40 #include <sys/conf.h>
41 #include <sys/eventhandler.h>
42 #include <sys/rman.h>
43 #include <sys/bus_dma.h>
44 #include <sys/bio.h>
45 #include <sys/ioccom.h>
46 #include <sys/uio.h>
47 #include <sys/proc.h>
48 
49 #include <machine/bus.h>
50 #include <machine/resource.h>
51 
52 #include <dev/mfi/mfireg.h>
53 #include <dev/mfi/mfi_ioctl.h>
54 #include <dev/mfi/mfivar.h>
55 
56 static int	mfi_alloc_commands(struct mfi_softc *);
57 static void	mfi_release_command(struct mfi_command *cm);
58 static int	mfi_comms_init(struct mfi_softc *);
59 static int	mfi_polled_command(struct mfi_softc *, struct mfi_command *);
60 static int	mfi_wait_command(struct mfi_softc *, struct mfi_command *);
61 static int	mfi_get_controller_info(struct mfi_softc *);
62 static int	mfi_get_log_state(struct mfi_softc *,
63 		    struct mfi_evt_log_state **);
64 static int	mfi_get_entry(struct mfi_softc *, int);
65 static int	mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
66 		    uint32_t, void **, size_t);
67 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
68 static void	mfi_startup(void *arg);
69 static void	mfi_intr(void *arg);
70 static void	mfi_enable_intr(struct mfi_softc *sc);
71 static void	mfi_ldprobe(struct mfi_softc *sc);
72 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
73 static void	mfi_aen_complete(struct mfi_command *);
74 static int	mfi_aen_setup(struct mfi_softc *, uint32_t);
75 static int	mfi_add_ld(struct mfi_softc *sc, int);
76 static void	mfi_add_ld_complete(struct mfi_command *);
77 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
78 static void	mfi_bio_complete(struct mfi_command *);
79 static int	mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
80 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
81 static void	mfi_complete(struct mfi_softc *, struct mfi_command *);
82 static int	mfi_abort(struct mfi_softc *, struct mfi_command *);
83 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, d_thread_t *);
84 
85 
86 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
87 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
88 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
89             0, "event message locale");
90 static int	mfi_event_class =  MFI_EVT_CLASS_DEBUG;
91 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
92           0, "event message class");
93 
94 /* Management interface */
95 static d_open_t		mfi_open;
96 static d_close_t	mfi_close;
97 static d_ioctl_t	mfi_ioctl;
98 static d_poll_t		mfi_poll;
99 
100 static struct cdevsw mfi_cdevsw = {
101 	.d_version = 	D_VERSION,
102 	.d_flags =	0,
103 	.d_open = 	mfi_open,
104 	.d_close =	mfi_close,
105 	.d_ioctl =	mfi_ioctl,
106 	.d_poll =	mfi_poll,
107 	.d_name =	"mfi",
108 };
109 
110 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
111 
112 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
113 
114 static int
115 mfi_transition_firmware(struct mfi_softc *sc)
116 {
117 	int32_t fw_state, cur_state;
118 	int max_wait, i;
119 
120 	fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
121 	while (fw_state != MFI_FWSTATE_READY) {
122 		if (bootverbose)
123 			device_printf(sc->mfi_dev, "Waiting for firmware to "
124 			    "become ready\n");
125 		cur_state = fw_state;
126 		switch (fw_state) {
127 		case MFI_FWSTATE_FAULT:
128 			device_printf(sc->mfi_dev, "Firmware fault\n");
129 			return (ENXIO);
130 		case MFI_FWSTATE_WAIT_HANDSHAKE:
131 			MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
132 			max_wait = 2;
133 			break;
134 		case MFI_FWSTATE_OPERATIONAL:
135 			MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
136 			max_wait = 10;
137 			break;
138 		case MFI_FWSTATE_UNDEFINED:
139 		case MFI_FWSTATE_BB_INIT:
140 			max_wait = 2;
141 			break;
142 		case MFI_FWSTATE_FW_INIT:
143 		case MFI_FWSTATE_DEVICE_SCAN:
144 		case MFI_FWSTATE_FLUSH_CACHE:
145 			max_wait = 20;
146 			break;
147 		default:
148 			device_printf(sc->mfi_dev,"Unknown firmware state %d\n",
149 			    fw_state);
150 			return (ENXIO);
151 		}
152 		for (i = 0; i < (max_wait * 10); i++) {
153 			fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
154 			if (fw_state == cur_state)
155 				DELAY(100000);
156 			else
157 				break;
158 		}
159 		if (fw_state == cur_state) {
160 			device_printf(sc->mfi_dev, "firmware stuck in state "
161 			    "%#x\n", fw_state);
162 			return (ENXIO);
163 		}
164 	}
165 	return (0);
166 }
167 
168 static void
169 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
170 {
171 	uint32_t *addr;
172 
173 	addr = arg;
174 	*addr = segs[0].ds_addr;
175 }
176 
177 int
178 mfi_attach(struct mfi_softc *sc)
179 {
180 	uint32_t status;
181 	int error, commsz, framessz, sensesz;
182 	int frames, unit;
183 
184 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
185 	TAILQ_INIT(&sc->mfi_ld_tqh);
186 	TAILQ_INIT(&sc->mfi_aen_pids);
187 
188 	mfi_initq_free(sc);
189 	mfi_initq_ready(sc);
190 	mfi_initq_busy(sc);
191 	mfi_initq_bio(sc);
192 
193 	/* Before we get too far, see if the firmware is working */
194 	if ((error = mfi_transition_firmware(sc)) != 0) {
195 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
196 		    "error %d\n", error);
197 		return (ENXIO);
198 	}
199 
200 	/*
201 	 * Get information needed for sizing the contiguous memory for the
202 	 * frame pool.  Size down the sgl parameter since we know that
203 	 * we will never need more than what's required for MAXPHYS.
204 	 * It would be nice if these constants were available at runtime
205 	 * instead of compile time.
206 	 */
207 	status = MFI_READ4(sc, MFI_OMSG0);
208 	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
209 	sc->mfi_max_fw_sgl = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
210 	sc->mfi_total_sgl = min(sc->mfi_max_fw_sgl, ((MAXPHYS / PAGE_SIZE) +1));
211 
212 	/*
213 	 * Create the dma tag for data buffers.  Used both for block I/O
214 	 * and for various internal data queries.
215 	 */
216 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
217 				1, 0,			/* algnmnt, boundary */
218 				BUS_SPACE_MAXADDR,	/* lowaddr */
219 				BUS_SPACE_MAXADDR,	/* highaddr */
220 				NULL, NULL,		/* filter, filterarg */
221 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
222 				sc->mfi_total_sgl,	/* nsegments */
223 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
224 				BUS_DMA_ALLOCNOW,	/* flags */
225 				busdma_lock_mutex,	/* lockfunc */
226 				&sc->mfi_io_lock,	/* lockfuncarg */
227 				&sc->mfi_buffer_dmat)) {
228 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
229 		return (ENOMEM);
230 	}
231 
232 	/*
233 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
234 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
235 	 * entry, so the calculated size here will be will be 1 more than
236 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
237 	 */
238 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
239 	    sizeof(struct mfi_hwcomms);
240 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
241 				1, 0,			/* algnmnt, boundary */
242 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
243 				BUS_SPACE_MAXADDR,	/* highaddr */
244 				NULL, NULL,		/* filter, filterarg */
245 				commsz,			/* maxsize */
246 				1,			/* msegments */
247 				commsz,			/* maxsegsize */
248 				0,			/* flags */
249 				NULL, NULL,		/* lockfunc, lockarg */
250 				&sc->mfi_comms_dmat)) {
251 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
252 		return (ENOMEM);
253 	}
254 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
255 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
256 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
257 		return (ENOMEM);
258 	}
259 	bzero(sc->mfi_comms, commsz);
260 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
261 	    sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
262 
263 	/*
264 	 * Allocate DMA memory for the command frames.  Keep them in the
265 	 * lower 4GB for efficiency.  Calculate the size of the frames at
266 	 * the same time; the frame is 64 bytes plus space for the SG lists.
267 	 * The assumption here is that the SG list will start at the second
268 	 * 64 byte segment of the frame and not use the unused bytes in the
269 	 * frame.  While this might seem wasteful, apparently the frames must
270 	 * be 64 byte aligned, so any savings would be negated by the extra
271 	 * alignment padding.
272 	 */
273 	if (sizeof(bus_addr_t) == 8) {
274 		sc->mfi_sgsize = sizeof(struct mfi_sg64);
275 		sc->mfi_flags |= MFI_FLAGS_SG64;
276 	} else {
277 		sc->mfi_sgsize = sizeof(struct mfi_sg32);
278 	}
279 	frames = (sc->mfi_sgsize * sc->mfi_total_sgl + MFI_FRAME_SIZE - 1) /
280 	    MFI_FRAME_SIZE + 1;
281 	sc->mfi_frame_size = frames * MFI_FRAME_SIZE;
282 	framessz = sc->mfi_frame_size * sc->mfi_max_fw_cmds;
283 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
284 				64, 0,			/* algnmnt, boundary */
285 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
286 				BUS_SPACE_MAXADDR,	/* highaddr */
287 				NULL, NULL,		/* filter, filterarg */
288 				framessz,		/* maxsize */
289 				1,			/* nsegments */
290 				framessz,		/* maxsegsize */
291 				0,			/* flags */
292 				NULL, NULL,		/* lockfunc, lockarg */
293 				&sc->mfi_frames_dmat)) {
294 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
295 		return (ENOMEM);
296 	}
297 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
298 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
299 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
300 		return (ENOMEM);
301 	}
302 	bzero(sc->mfi_frames, framessz);
303 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
304 	    sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
305 
306 	/*
307 	 * Allocate DMA memory for the frame sense data.  Keep them in the
308 	 * lower 4GB for efficiency
309 	 */
310 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
311 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
312 				4, 0,			/* algnmnt, boundary */
313 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
314 				BUS_SPACE_MAXADDR,	/* highaddr */
315 				NULL, NULL,		/* filter, filterarg */
316 				sensesz,		/* maxsize */
317 				1,			/* nsegments */
318 				sensesz,		/* maxsegsize */
319 				0,			/* flags */
320 				NULL, NULL,		/* lockfunc, lockarg */
321 				&sc->mfi_sense_dmat)) {
322 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
323 		return (ENOMEM);
324 	}
325 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
326 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
327 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
328 		return (ENOMEM);
329 	}
330 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
331 	    sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
332 
333 	if ((error = mfi_alloc_commands(sc)) != 0)
334 		return (error);
335 
336 	if ((error = mfi_comms_init(sc)) != 0)
337 		return (error);
338 
339 	if ((error = mfi_get_controller_info(sc)) != 0)
340 		return (error);
341 
342 	if ((error = mfi_aen_setup(sc, 0), 0) != 0)
343 		return (error);
344 
345 	/*
346 	 * Set up the interrupt handler.  XXX This should happen in
347 	 * mfi_pci.c
348 	 */
349 	sc->mfi_irq_rid = 0;
350 	if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
351 	    &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
352 		device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
353 		return (EINVAL);
354 	}
355 	if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
356 	    mfi_intr, sc, &sc->mfi_intr)) {
357 		device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
358 		return (EINVAL);
359 	}
360 
361 	/* Register a config hook to probe the bus for arrays */
362 	sc->mfi_ich.ich_func = mfi_startup;
363 	sc->mfi_ich.ich_arg = sc;
364 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
365 		device_printf(sc->mfi_dev, "Cannot establish configuration "
366 		    "hook\n");
367 		return (EINVAL);
368 	}
369 
370 	/*
371 	 * Register a shutdown handler.
372 	 */
373 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
374 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
375 		device_printf(sc->mfi_dev, "Warning: shutdown event "
376 		    "registration failed\n");
377 	}
378 
379 	/*
380 	 * Create the control device for doing management
381 	 */
382 	unit = device_get_unit(sc->mfi_dev);
383 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
384 	    0640, "mfi%d", unit);
385 	if (unit == 0)
386 		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
387 	if (sc->mfi_cdev != NULL)
388 		sc->mfi_cdev->si_drv1 = sc;
389 
390 	return (0);
391 }
392 
393 static int
394 mfi_alloc_commands(struct mfi_softc *sc)
395 {
396 	struct mfi_command *cm;
397 	int i, ncmds;
398 
399 	/*
400 	 * XXX Should we allocate all the commands up front, or allocate on
401 	 * demand later like 'aac' does?
402 	 */
403 	ncmds = sc->mfi_max_fw_cmds;
404 	sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
405 	    M_WAITOK | M_ZERO);
406 
407 	for (i = 0; i < ncmds; i++) {
408 		cm = &sc->mfi_commands[i];
409 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
410 		    sc->mfi_frame_size * i);
411 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
412 		    sc->mfi_frame_size * i;
413 		cm->cm_frame->header.context = i;
414 		cm->cm_sense = &sc->mfi_sense[i];
415 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
416 		cm->cm_sc = sc;
417 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
418 		    &cm->cm_dmamap) == 0)
419 			mfi_release_command(cm);
420 		else
421 			break;
422 		sc->mfi_total_cmds++;
423 	}
424 
425 	return (0);
426 }
427 
428 static void
429 mfi_release_command(struct mfi_command *cm)
430 {
431 	uint32_t *hdr_data;
432 
433 	/*
434 	 * Zero out the important fields of the frame, but make sure the
435 	 * context field is preserved
436 	 */
437 	hdr_data = (uint32_t *)cm->cm_frame;
438 	hdr_data[0] = 0;
439 	hdr_data[1] = 0;
440 
441 	cm->cm_extra_frames = 0;
442 	cm->cm_flags = 0;
443 	cm->cm_complete = NULL;
444 	cm->cm_private = NULL;
445 	cm->cm_sg = 0;
446 	cm->cm_total_frame_size = 0;
447 	mfi_enqueue_free(cm);
448 }
449 
450 static int
451 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
452     void **bufp, size_t bufsize)
453 {
454 	struct mfi_command *cm;
455 	struct mfi_dcmd_frame *dcmd;
456 	void *buf = NULL;
457 
458 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
459 
460 	cm = mfi_dequeue_free(sc);
461 	if (cm == NULL)
462 		return (EBUSY);
463 
464 	if ((bufsize > 0) && (bufp != NULL)) {
465 		if (*bufp == NULL) {
466 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
467 			if (buf == NULL) {
468 				mfi_release_command(cm);
469 				return (ENOMEM);
470 			}
471 			*bufp = buf;
472 		} else {
473 			buf = *bufp;
474 		}
475 	}
476 
477 	dcmd =  &cm->cm_frame->dcmd;
478 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
479 	dcmd->header.cmd = MFI_CMD_DCMD;
480 	dcmd->header.timeout = 0;
481 	dcmd->header.flags = 0;
482 	dcmd->header.data_len = bufsize;
483 	dcmd->opcode = opcode;
484 	cm->cm_sg = &dcmd->sgl;
485 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
486 	cm->cm_flags = 0;
487 	cm->cm_data = buf;
488 	cm->cm_private = buf;
489 	cm->cm_len = bufsize;
490 
491 	*cmp = cm;
492 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
493 		*bufp = buf;
494 	return (0);
495 }
496 
497 static int
498 mfi_comms_init(struct mfi_softc *sc)
499 {
500 	struct mfi_command *cm;
501 	struct mfi_init_frame *init;
502 	struct mfi_init_qinfo *qinfo;
503 	int error;
504 
505 	if ((cm = mfi_dequeue_free(sc)) == NULL)
506 		return (EBUSY);
507 
508 	/*
509 	 * Abuse the SG list area of the frame to hold the init_qinfo
510 	 * object;
511 	 */
512 	init = &cm->cm_frame->init;
513 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
514 
515 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
516 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
517 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
518 	    offsetof(struct mfi_hwcomms, hw_reply_q);
519 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
520 	    offsetof(struct mfi_hwcomms, hw_pi);
521 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
522 	    offsetof(struct mfi_hwcomms, hw_ci);
523 
524 	init->header.cmd = MFI_CMD_INIT;
525 	init->header.data_len = sizeof(struct mfi_init_qinfo);
526 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
527 
528 	if ((error = mfi_polled_command(sc, cm)) != 0) {
529 		device_printf(sc->mfi_dev, "failed to send init command\n");
530 		return (error);
531 	}
532 	mfi_release_command(cm);
533 
534 	return (0);
535 }
536 
537 static int
538 mfi_get_controller_info(struct mfi_softc *sc)
539 {
540 	struct mfi_command *cm = NULL;
541 	struct mfi_ctrl_info *ci = NULL;
542 	uint32_t max_sectors_1, max_sectors_2;
543 	int error;
544 
545 	mtx_lock(&sc->mfi_io_lock);
546 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
547 	    (void **)&ci, sizeof(*ci));
548 	if (error)
549 		goto out;
550 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
551 
552 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
553 		device_printf(sc->mfi_dev, "Controller info buffer map failed\n");
554 		free(ci, M_MFIBUF);
555 		mfi_release_command(cm);
556 		return (error);
557 	}
558 
559 	/* It's ok if this fails, just use default info instead */
560 	if ((error = mfi_polled_command(sc, cm)) != 0) {
561 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
562 		sc->mfi_max_io = (sc->mfi_total_sgl - 1) * PAGE_SIZE /
563 		    MFI_SECTOR_LEN;
564 		error = 0;
565 		goto out;
566 	}
567 
568 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
569 	    BUS_DMASYNC_POSTREAD);
570 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
571 
572 	max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
573 	max_sectors_2 = ci->max_request_size;
574 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
575 
576 out:
577 	if (ci)
578 		free(ci, M_MFIBUF);
579 	if (cm)
580 		mfi_release_command(cm);
581 	mtx_unlock(&sc->mfi_io_lock);
582 	return (error);
583 }
584 
585 static int
586 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
587 {
588 	struct mfi_command *cm = NULL;
589 	int error;
590 
591 	mtx_lock(&sc->mfi_io_lock);
592 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
593 	    (void **)log_state, sizeof(**log_state));
594 	if (error)
595 		goto out;
596 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
597 
598 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
599 		device_printf(sc->mfi_dev, "Log state buffer map failed\n");
600 		goto out;
601 	}
602 
603 	if ((error = mfi_polled_command(sc, cm)) != 0) {
604 		device_printf(sc->mfi_dev, "Failed to get log state\n");
605 		goto out;
606 	}
607 
608 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
609 	    BUS_DMASYNC_POSTREAD);
610 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
611 
612 out:
613 	if (cm)
614 		mfi_release_command(cm);
615 	mtx_unlock(&sc->mfi_io_lock);
616 
617 	return (error);
618 }
619 
620 static int
621 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
622 {
623 	struct mfi_evt_log_state *log_state = NULL;
624 	union mfi_evt class_locale;
625 	int error = 0;
626 	uint32_t seq;
627 
628 	class_locale.members.reserved = 0;
629 	class_locale.members.locale = mfi_event_locale;
630 	class_locale.members.class  = mfi_event_class;
631 
632 	if (seq_start == 0) {
633 		error = mfi_get_log_state(sc, &log_state);
634 		if (error) {
635 			if (log_state)
636 				free(log_state, M_MFIBUF);
637 			return (error);
638 		}
639 		/*
640 		 * Don't run them yet since we can't parse them.
641 		 * We can indirectly get the contents from
642 		 * the AEN mechanism via setting it lower then
643 		 * current.  The firmware will iterate through them.
644 		 */
645 		for (seq = log_state->shutdown_seq_num;
646 		     seq <= log_state->newest_seq_num; seq++) {
647 			mfi_get_entry(sc, seq);
648 		}
649 	} else
650 		seq = seq_start;
651 	mfi_aen_register(sc, seq, class_locale.word);
652 	free(log_state, M_MFIBUF);
653 
654 	return 0;
655 }
656 
657 static int
658 mfi_polled_command(struct mfi_softc *sc, struct mfi_command *cm)
659 {
660 	struct mfi_frame_header *hdr;
661 	int tm = MFI_POLL_TIMEOUT_SECS * 1000000;
662 
663 	hdr = &cm->cm_frame->header;
664 	hdr->cmd_status = 0xff;
665 	hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
666 
667 	mfi_send_frame(sc, cm);
668 
669 	while (hdr->cmd_status == 0xff) {
670 		DELAY(1000);
671 		tm -= 1000;
672 		if (tm <= 0)
673 			break;
674 	}
675 
676 	if (hdr->cmd_status == 0xff) {
677 		device_printf(sc->mfi_dev, "Frame %p timed out\n", hdr);
678 		return (ETIMEDOUT);
679 	}
680 
681 	return (0);
682 }
683 
684 static int
685 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
686 {
687 
688 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
689 	cm->cm_complete = NULL;
690 
691 	mfi_enqueue_ready(cm);
692 	mfi_startio(sc);
693 	return (msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0));
694 }
695 
696 void
697 mfi_free(struct mfi_softc *sc)
698 {
699 	struct mfi_command *cm;
700 	int i;
701 
702 	if (sc->mfi_cdev != NULL)
703 		destroy_dev(sc->mfi_cdev);
704 
705 	if (sc->mfi_total_cmds != 0) {
706 		for (i = 0; i < sc->mfi_total_cmds; i++) {
707 			cm = &sc->mfi_commands[i];
708 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
709 		}
710 		free(sc->mfi_commands, M_MFIBUF);
711 	}
712 
713 	if (sc->mfi_intr)
714 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
715 	if (sc->mfi_irq != NULL)
716 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
717 		    sc->mfi_irq);
718 
719 	if (sc->mfi_sense_busaddr != 0)
720 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
721 	if (sc->mfi_sense != NULL)
722 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
723 		    sc->mfi_sense_dmamap);
724 	if (sc->mfi_sense_dmat != NULL)
725 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
726 
727 	if (sc->mfi_frames_busaddr != 0)
728 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
729 	if (sc->mfi_frames != NULL)
730 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
731 		    sc->mfi_frames_dmamap);
732 	if (sc->mfi_frames_dmat != NULL)
733 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
734 
735 	if (sc->mfi_comms_busaddr != 0)
736 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
737 	if (sc->mfi_comms != NULL)
738 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
739 		    sc->mfi_comms_dmamap);
740 	if (sc->mfi_comms_dmat != NULL)
741 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
742 
743 	if (sc->mfi_buffer_dmat != NULL)
744 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
745 	if (sc->mfi_parent_dmat != NULL)
746 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
747 
748 	if (mtx_initialized(&sc->mfi_io_lock))
749 		mtx_destroy(&sc->mfi_io_lock);
750 
751 	return;
752 }
753 
754 static void
755 mfi_startup(void *arg)
756 {
757 	struct mfi_softc *sc;
758 
759 	sc = (struct mfi_softc *)arg;
760 
761 	config_intrhook_disestablish(&sc->mfi_ich);
762 
763 	mfi_enable_intr(sc);
764 	mfi_ldprobe(sc);
765 }
766 
767 static void
768 mfi_intr(void *arg)
769 {
770 	struct mfi_softc *sc;
771 	struct mfi_command *cm;
772 	uint32_t status, pi, ci, context;
773 
774 	sc = (struct mfi_softc *)arg;
775 
776 	status = MFI_READ4(sc, MFI_OSTS);
777 	if ((status & MFI_OSTS_INTR_VALID) == 0)
778 		return;
779 	MFI_WRITE4(sc, MFI_OSTS, status);
780 
781 	pi = sc->mfi_comms->hw_pi;
782 	ci = sc->mfi_comms->hw_ci;
783 	mtx_lock(&sc->mfi_io_lock);
784 	while (ci != pi) {
785 		context = sc->mfi_comms->hw_reply_q[ci];
786 		cm = &sc->mfi_commands[context];
787 		mfi_remove_busy(cm);
788 		mfi_complete(sc, cm);
789 		if (++ci == (sc->mfi_max_fw_cmds + 1)) {
790 			ci = 0;
791 		}
792 	}
793 	mtx_unlock(&sc->mfi_io_lock);
794 
795 	sc->mfi_comms->hw_ci = ci;
796 
797 	return;
798 }
799 
800 int
801 mfi_shutdown(struct mfi_softc *sc)
802 {
803 	struct mfi_dcmd_frame *dcmd;
804 	struct mfi_command *cm;
805 	int error;
806 
807 	mtx_lock(&sc->mfi_io_lock);
808 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
809 	mtx_unlock(&sc->mfi_io_lock);
810 	if (error)
811 		return (error);
812 
813 	if (sc->mfi_aen_cm != NULL)
814 		mfi_abort(sc, sc->mfi_aen_cm);
815 
816 	dcmd = &cm->cm_frame->dcmd;
817 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
818 
819 	if ((error = mfi_polled_command(sc, cm)) != 0) {
820 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
821 	}
822 
823 	mfi_release_command(cm);
824 	return (error);
825 }
826 
827 static void
828 mfi_enable_intr(struct mfi_softc *sc)
829 {
830 
831 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
832 }
833 
834 static void
835 mfi_ldprobe(struct mfi_softc *sc)
836 {
837 	struct mfi_frame_header *hdr;
838 	struct mfi_command *cm = NULL;
839 	struct mfi_ld_list *list = NULL;
840 	int error, i;
841 
842 	mtx_lock(&sc->mfi_io_lock);
843 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
844 	    (void **)&list, sizeof(*list));
845 	if (error)
846 		goto out;
847 
848 	cm->cm_flags = MFI_CMD_DATAIN;
849 	if (mfi_wait_command(sc, cm) != 0) {
850 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
851 		goto out;
852 	}
853 
854 	hdr = &cm->cm_frame->header;
855 	if (hdr->cmd_status != MFI_STAT_OK) {
856 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
857 		    hdr->cmd_status);
858 		goto out;
859 	}
860 
861 	for (i = 0; i < list->ld_count; i++)
862 		mfi_add_ld(sc, list->ld_list[i].ld.target_id);
863 out:
864 	if (list)
865 		free(list, M_MFIBUF);
866 	if (cm)
867 		mfi_release_command(cm);
868 	mtx_unlock(&sc->mfi_io_lock);
869 	return;
870 }
871 
872 static void
873 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
874 {
875 	switch (detail->arg_type) {
876 	case MR_EVT_ARGS_NONE:
877 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - %s\n",
878 		    detail->seq,
879 		    detail->time,
880 		    detail->class.members.locale,
881 		    detail->class.members.class,
882 		    detail->description
883 		    );
884 		break;
885 	case MR_EVT_ARGS_CDB_SENSE:
886 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) CDB %*D"
887 		    "Sense %*D\n: %s\n",
888 		    detail->seq,
889 		    detail->time,
890 		    detail->class.members.locale,
891 		    detail->class.members.class,
892 		    detail->args.cdb_sense.pd.device_id,
893 		    detail->args.cdb_sense.pd.enclosure_index,
894 		    detail->args.cdb_sense.pd.slot_number,
895 		    detail->args.cdb_sense.cdb_len,
896 		    detail->args.cdb_sense.cdb,
897 		    ":",
898 		    detail->args.cdb_sense.sense_len,
899 		    detail->args.cdb_sense.sense,
900 		    ":",
901 		    detail->description
902 		    );
903 		break;
904 	case MR_EVT_ARGS_LD:
905 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
906 		    "event: %s\n",
907 		    detail->seq,
908 		    detail->time,
909 		    detail->class.members.locale,
910 		    detail->class.members.class,
911 		    detail->args.ld.ld_index,
912 		    detail->args.ld.target_id,
913 		    detail->description
914 		    );
915 		break;
916 	case MR_EVT_ARGS_LD_COUNT:
917 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
918 		    "count %lld: %s\n",
919 		    detail->seq,
920 		    detail->time,
921 		    detail->class.members.locale,
922 		    detail->class.members.class,
923 		    detail->args.ld_count.ld.ld_index,
924 		    detail->args.ld_count.ld.target_id,
925 		    (long long)detail->args.ld_count.count,
926 		    detail->description
927 		    );
928 		break;
929 	case MR_EVT_ARGS_LD_LBA:
930 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
931 		    "lba %lld: %s\n",
932 		    detail->seq,
933 		    detail->time,
934 		    detail->class.members.locale,
935 		    detail->class.members.class,
936 		    detail->args.ld_lba.ld.ld_index,
937 		    detail->args.ld_lba.ld.target_id,
938 		    (long long)detail->args.ld_lba.lba,
939 		    detail->description
940 		    );
941 		break;
942 	case MR_EVT_ARGS_LD_OWNER:
943 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
944 		    "owner changed: prior %d, new %d: %s\n",
945 		    detail->seq,
946 		    detail->time,
947 		    detail->class.members.locale,
948 		    detail->class.members.class,
949 		    detail->args.ld_owner.ld.ld_index,
950 		    detail->args.ld_owner.ld.target_id,
951 		    detail->args.ld_owner.pre_owner,
952 		    detail->args.ld_owner.new_owner,
953 		    detail->description
954 		    );
955 		break;
956 	case MR_EVT_ARGS_LD_LBA_PD_LBA:
957 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
958 		    "lba %lld, physical drive PD %02d(e%d/s%d) lba %lld: %s\n",
959 		    detail->seq,
960 		    detail->time,
961 		    detail->class.members.locale,
962 		    detail->class.members.class,
963 		    detail->args.ld_lba_pd_lba.ld.ld_index,
964 		    detail->args.ld_lba_pd_lba.ld.target_id,
965 		    (long long)detail->args.ld_lba_pd_lba.ld_lba,
966 		    detail->args.ld_lba_pd_lba.pd.device_id,
967 		    detail->args.ld_lba_pd_lba.pd.enclosure_index,
968 		    detail->args.ld_lba_pd_lba.pd.slot_number,
969 		    (long long)detail->args.ld_lba_pd_lba.pd_lba,
970 		    detail->description
971 		    );
972 		break;
973 	case MR_EVT_ARGS_LD_PROG:
974 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
975 		    "progress %d%% in %ds: %s\n",
976 		    detail->seq,
977 		    detail->time,
978 		    detail->class.members.locale,
979 		    detail->class.members.class,
980 		    detail->args.ld_prog.ld.ld_index,
981 		    detail->args.ld_prog.ld.target_id,
982 		    detail->args.ld_prog.prog.progress/655,
983 		    detail->args.ld_prog.prog.elapsed_seconds,
984 		    detail->description
985 		    );
986 		break;
987 	case MR_EVT_ARGS_LD_STATE:
988 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
989 		    "state prior %d new %d: %s\n",
990 		    detail->seq,
991 		    detail->time,
992 		    detail->class.members.locale,
993 		    detail->class.members.class,
994 		    detail->args.ld_state.ld.ld_index,
995 		    detail->args.ld_state.ld.target_id,
996 		    detail->args.ld_state.prev_state,
997 		    detail->args.ld_state.new_state,
998 		    detail->description
999 		    );
1000 		break;
1001 	case MR_EVT_ARGS_LD_STRIP:
1002 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1003 		    "strip %lld: %s\n",
1004 		    detail->seq,
1005 		    detail->time,
1006 		    detail->class.members.locale,
1007 		    detail->class.members.class,
1008 		    detail->args.ld_strip.ld.ld_index,
1009 		    detail->args.ld_strip.ld.target_id,
1010 		    (long long)detail->args.ld_strip.strip,
1011 		    detail->description
1012 		    );
1013 		break;
1014 	case MR_EVT_ARGS_PD:
1015 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1016 		    "event: %s\n",
1017 		    detail->seq,
1018 		    detail->time,
1019 		    detail->class.members.locale,
1020 		    detail->class.members.class,
1021 		    detail->args.pd.device_id,
1022 		    detail->args.pd.enclosure_index,
1023 		    detail->args.pd.slot_number,
1024 		    detail->description
1025 		    );
1026 		break;
1027 	case MR_EVT_ARGS_PD_ERR:
1028 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1029 		    "err %d: %s\n",
1030 		    detail->seq,
1031 		    detail->time,
1032 		    detail->class.members.locale,
1033 		    detail->class.members.class,
1034 		    detail->args.pd_err.pd.device_id,
1035 		    detail->args.pd_err.pd.enclosure_index,
1036 		    detail->args.pd_err.pd.slot_number,
1037 		    detail->args.pd_err.err,
1038 		    detail->description
1039 		    );
1040 		break;
1041 	case MR_EVT_ARGS_PD_LBA:
1042 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1043 		    "lba %lld: %s\n",
1044 		    detail->seq,
1045 		    detail->time,
1046 		    detail->class.members.locale,
1047 		    detail->class.members.class,
1048 		    detail->args.pd_lba.pd.device_id,
1049 		    detail->args.pd_lba.pd.enclosure_index,
1050 		    detail->args.pd_lba.pd.slot_number,
1051 		    (long long)detail->args.pd_lba.lba,
1052 		    detail->description
1053 		    );
1054 		break;
1055 	case MR_EVT_ARGS_PD_LBA_LD:
1056 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1057 		    "lba %lld VD %02d/%d: %s\n",
1058 		    detail->seq,
1059 		    detail->time,
1060 		    detail->class.members.locale,
1061 		    detail->class.members.class,
1062 		    detail->args.pd_lba_ld.pd.device_id,
1063 		    detail->args.pd_lba_ld.pd.enclosure_index,
1064 		    detail->args.pd_lba_ld.pd.slot_number,
1065 		    (long long)detail->args.pd_lba.lba,
1066 		    detail->args.pd_lba_ld.ld.ld_index,
1067 		    detail->args.pd_lba_ld.ld.target_id,
1068 		    detail->description
1069 		    );
1070 		break;
1071 	case MR_EVT_ARGS_PD_PROG:
1072 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1073 		    "progress %d%% seconds %ds: %s\n",
1074 		    detail->seq,
1075 		    detail->time,
1076 		    detail->class.members.locale,
1077 		    detail->class.members.class,
1078 		    detail->args.pd_prog.pd.device_id,
1079 		    detail->args.pd_prog.pd.enclosure_index,
1080 		    detail->args.pd_prog.pd.slot_number,
1081 		    detail->args.pd_prog.prog.progress/655,
1082 		    detail->args.pd_prog.prog.elapsed_seconds,
1083 		    detail->description
1084 		    );
1085 		break;
1086 	case MR_EVT_ARGS_PD_STATE:
1087 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1088 		    "state prior %d new %d: %s\n",
1089 		    detail->seq,
1090 		    detail->time,
1091 		    detail->class.members.locale,
1092 		    detail->class.members.class,
1093 		    detail->args.pd_prog.pd.device_id,
1094 		    detail->args.pd_prog.pd.enclosure_index,
1095 		    detail->args.pd_prog.pd.slot_number,
1096 		    detail->args.pd_state.prev_state,
1097 		    detail->args.pd_state.new_state,
1098 		    detail->description
1099 		    );
1100 		break;
1101 	case MR_EVT_ARGS_PCI:
1102 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PCI 0x04%x 0x04%x "
1103 		    "0x04%x 0x04%x: %s\n",
1104 		    detail->seq,
1105 		    detail->time,
1106 		    detail->class.members.locale,
1107 		    detail->class.members.class,
1108 		    detail->args.pci.venderId,
1109 		    detail->args.pci.deviceId,
1110 		    detail->args.pci.subVenderId,
1111 		    detail->args.pci.subDeviceId,
1112 		    detail->description
1113 		    );
1114 		break;
1115 	case MR_EVT_ARGS_RATE:
1116 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Rebuild rate %d: %s\n",
1117 		    detail->seq,
1118 		    detail->time,
1119 		    detail->class.members.locale,
1120 		    detail->class.members.class,
1121 		    detail->args.rate,
1122 		    detail->description
1123 		    );
1124 		break;
1125 	case MR_EVT_ARGS_TIME:
1126 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Adapter ticks %d "
1127 		    "elapsed %ds: %s\n",
1128 		    detail->seq,
1129 		    detail->time,
1130 		    detail->class.members.locale,
1131 		    detail->class.members.class,
1132 		    detail->args.time.rtc,
1133 		    detail->args.time.elapsedSeconds,
1134 		    detail->description
1135 		    );
1136 		break;
1137 	case MR_EVT_ARGS_ECC:
1138 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Adapter ECC %x,%x: %s: %s\n",
1139 		    detail->seq,
1140 		    detail->time,
1141 		    detail->class.members.locale,
1142 		    detail->class.members.class,
1143 		    detail->args.ecc.ecar,
1144 		    detail->args.ecc.elog,
1145 		    detail->args.ecc.str,
1146 		    detail->description
1147 		    );
1148 		break;
1149 	default:
1150 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Type %d: %s\n",
1151 		    detail->seq,
1152 		    detail->time,
1153 		    detail->class.members.locale,
1154 		    detail->class.members.class,
1155 		    detail->arg_type, detail->description
1156 		    );
1157 	}
1158 }
1159 
1160 static int
1161 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1162 {
1163 	struct mfi_command *cm;
1164 	struct mfi_dcmd_frame *dcmd;
1165 	union mfi_evt current_aen, prior_aen;
1166 	struct mfi_evt_detail *ed = NULL;
1167 	int error;
1168 
1169 	current_aen.word = locale;
1170 	if (sc->mfi_aen_cm != NULL) {
1171 		prior_aen.word =
1172 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1173 		if (prior_aen.members.class <= current_aen.members.class &&
1174 		    !((prior_aen.members.locale & current_aen.members.locale)
1175 		    ^current_aen.members.locale)) {
1176 			return (0);
1177 		} else {
1178 			prior_aen.members.locale |= current_aen.members.locale;
1179 			if (prior_aen.members.class
1180 			    < current_aen.members.class)
1181 				current_aen.members.class =
1182 				    prior_aen.members.class;
1183 			mfi_abort(sc, sc->mfi_aen_cm);
1184 		}
1185 	}
1186 
1187 	mtx_lock(&sc->mfi_io_lock);
1188 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1189 	    (void **)&ed, sizeof(*ed));
1190 	mtx_unlock(&sc->mfi_io_lock);
1191 	if (error)
1192 		return (error);
1193 
1194 	dcmd = &cm->cm_frame->dcmd;
1195 	((uint32_t *)&dcmd->mbox)[0] = seq;
1196 	((uint32_t *)&dcmd->mbox)[1] = locale;
1197 	cm->cm_flags = MFI_CMD_DATAIN;
1198 	cm->cm_complete = mfi_aen_complete;
1199 
1200 	sc->mfi_aen_cm = cm;
1201 
1202 	mfi_enqueue_ready(cm);
1203 	mfi_startio(sc);
1204 
1205 	return (0);
1206 }
1207 
1208 static void
1209 mfi_aen_complete(struct mfi_command *cm)
1210 {
1211 	struct mfi_frame_header *hdr;
1212 	struct mfi_softc *sc;
1213 	struct mfi_evt_detail *detail;
1214 	struct mfi_aen *mfi_aen_entry;
1215 	int seq = 0, aborted = 0;
1216 
1217 	sc = cm->cm_sc;
1218 	hdr = &cm->cm_frame->header;
1219 
1220 	if (sc->mfi_aen_cm == NULL)
1221 		return;
1222 
1223 	if (sc->mfi_aen_cm->cm_aen_abort || hdr->cmd_status == 0xff) {
1224 		sc->mfi_aen_cm->cm_aen_abort = 0;
1225 		aborted = 1;
1226 	} else {
1227 		sc->mfi_aen_triggered = 1;
1228 		if (sc->mfi_poll_waiting)
1229 			selwakeup(&sc->mfi_select);
1230 		detail = cm->cm_data;
1231 		mtx_unlock(&sc->mfi_io_lock);
1232 		mfi_decode_evt(sc, detail);
1233 		mtx_lock(&sc->mfi_io_lock);
1234 		seq = detail->seq + 1;
1235 		TAILQ_FOREACH(mfi_aen_entry, &sc->mfi_aen_pids, aen_link) {
1236 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1237 			    aen_link);
1238 			psignal(mfi_aen_entry->p, SIGIO);
1239 			free(mfi_aen_entry, M_MFIBUF);
1240 		}
1241 	}
1242 
1243 	free(cm->cm_data, M_MFIBUF);
1244 	sc->mfi_aen_cm = NULL;
1245 	wakeup(&sc->mfi_aen_cm);
1246 	mfi_release_command(cm);
1247 
1248 	/* set it up again so the driver can catch more events */
1249 	if (!aborted) {
1250 		mtx_unlock(&sc->mfi_io_lock);
1251 		mfi_aen_setup(sc, seq);
1252 		mtx_lock(&sc->mfi_io_lock);
1253 	}
1254 }
1255 
1256 /* Only do one event for now so we can easily iterate through them */
1257 #define MAX_EVENTS 1
1258 static int
1259 mfi_get_entry(struct mfi_softc *sc, int seq)
1260 {
1261 	struct mfi_command *cm;
1262 	struct mfi_dcmd_frame *dcmd;
1263 	struct mfi_evt_list *el;
1264 	int error;
1265 	int i;
1266 	int size;
1267 
1268 	mtx_lock(&sc->mfi_io_lock);
1269 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
1270 		mtx_unlock(&sc->mfi_io_lock);
1271 		return (EBUSY);
1272 	}
1273 	mtx_unlock(&sc->mfi_io_lock);
1274 
1275 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1276 		* (MAX_EVENTS - 1);
1277 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1278 	if (el == NULL) {
1279 		mtx_lock(&sc->mfi_io_lock);
1280 		mfi_release_command(cm);
1281 		mtx_unlock(&sc->mfi_io_lock);
1282 		return (ENOMEM);
1283 	}
1284 
1285 	dcmd = &cm->cm_frame->dcmd;
1286 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
1287 	dcmd->header.cmd = MFI_CMD_DCMD;
1288 	dcmd->header.timeout = 0;
1289 	dcmd->header.data_len = size;
1290 	dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1291 	((uint32_t *)&dcmd->mbox)[0] = seq;
1292 	((uint32_t *)&dcmd->mbox)[1] = MFI_EVT_LOCALE_ALL;
1293 	cm->cm_sg = &dcmd->sgl;
1294 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1295 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1296 	cm->cm_data = el;
1297 	cm->cm_len = size;
1298 
1299 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1300 		device_printf(sc->mfi_dev, "Controller info buffer map failed");
1301 		free(el, M_MFIBUF);
1302 		mfi_release_command(cm);
1303 		return (error);
1304 	}
1305 
1306 	if ((error = mfi_polled_command(sc, cm)) != 0) {
1307 		device_printf(sc->mfi_dev, "Failed to get controller entry\n");
1308 		sc->mfi_max_io = (sc->mfi_total_sgl - 1) * PAGE_SIZE /
1309 		    MFI_SECTOR_LEN;
1310 		free(el, M_MFIBUF);
1311 		mfi_release_command(cm);
1312 		return (0);
1313 	}
1314 
1315 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1316 	    BUS_DMASYNC_POSTREAD);
1317 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1318 
1319 	for (i = 0; i < el->count; i++) {
1320 		mfi_decode_evt(sc, &el->event[0]);
1321 	}
1322 
1323 	mtx_lock(&sc->mfi_io_lock);
1324 	free(cm->cm_data, M_MFIBUF);
1325 	mfi_release_command(cm);
1326 	mtx_unlock(&sc->mfi_io_lock);
1327 	return (0);
1328 }
1329 
1330 static int
1331 mfi_add_ld(struct mfi_softc *sc, int id)
1332 {
1333 	struct mfi_command *cm;
1334 	struct mfi_dcmd_frame *dcmd = NULL;
1335 	struct mfi_ld_info *ld_info = NULL;
1336 	int error;
1337 
1338 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1339 
1340 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1341 	    (void **)&ld_info, sizeof(*ld_info));
1342 	if (error) {
1343 		device_printf(sc->mfi_dev,
1344 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1345 		if (ld_info)
1346 			free(ld_info, M_MFIBUF);
1347 		return (error);
1348 	}
1349 	cm->cm_flags = MFI_CMD_DATAIN;
1350 	dcmd = &cm->cm_frame->dcmd;
1351 	dcmd->mbox[0] = id;
1352 	if (mfi_wait_command(sc, cm) != 0) {
1353 		device_printf(sc->mfi_dev,
1354 		    "Failed to get logical drive: %d\n", id);
1355 		free(ld_info, M_MFIBUF);
1356 		return (0);
1357 	}
1358 
1359 	mfi_add_ld_complete(cm);
1360 	return (0);
1361 }
1362 
1363 static void
1364 mfi_add_ld_complete(struct mfi_command *cm)
1365 {
1366 	struct mfi_frame_header *hdr;
1367 	struct mfi_ld_info *ld_info;
1368 	struct mfi_softc *sc;
1369 	struct mfi_ld *ld;
1370 	device_t child;
1371 
1372 	sc = cm->cm_sc;
1373 	hdr = &cm->cm_frame->header;
1374 	ld_info = cm->cm_private;
1375 
1376 	if (hdr->cmd_status != MFI_STAT_OK) {
1377 		free(ld_info, M_MFIBUF);
1378 		mfi_release_command(cm);
1379 		return;
1380 	}
1381 	mfi_release_command(cm);
1382 
1383 	ld = malloc(sizeof(struct mfi_ld), M_MFIBUF, M_NOWAIT|M_ZERO);
1384 	if (ld == NULL) {
1385 		device_printf(sc->mfi_dev, "Cannot allocate ld\n");
1386 		free(ld_info, M_MFIBUF);
1387 		return;
1388 	}
1389 
1390 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1391 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1392 		free(ld, M_MFIBUF);
1393 		free(ld_info, M_MFIBUF);
1394 		return;
1395 	}
1396 
1397 	ld->ld_id = ld_info->ld_config.properties.ld.target_id;
1398 	ld->ld_disk = child;
1399 	ld->ld_info = ld_info;
1400 
1401 	device_set_ivars(child, ld);
1402 	device_set_desc(child, "MFI Logical Disk");
1403 	mtx_unlock(&sc->mfi_io_lock);
1404 	mtx_lock(&Giant);
1405 	bus_generic_attach(sc->mfi_dev);
1406 	mtx_unlock(&Giant);
1407 	mtx_lock(&sc->mfi_io_lock);
1408 }
1409 
1410 static struct mfi_command *
1411 mfi_bio_command(struct mfi_softc *sc)
1412 {
1413 	struct mfi_io_frame *io;
1414 	struct mfi_command *cm;
1415 	struct bio *bio;
1416 	int flags, blkcount;
1417 
1418 	if ((cm = mfi_dequeue_free(sc)) == NULL)
1419 		return (NULL);
1420 
1421 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1422 		mfi_release_command(cm);
1423 		return (NULL);
1424 	}
1425 
1426 	io = &cm->cm_frame->io;
1427 	switch (bio->bio_cmd & 0x03) {
1428 	case BIO_READ:
1429 		io->header.cmd = MFI_CMD_LD_READ;
1430 		flags = MFI_CMD_DATAIN;
1431 		break;
1432 	case BIO_WRITE:
1433 		io->header.cmd = MFI_CMD_LD_WRITE;
1434 		flags = MFI_CMD_DATAOUT;
1435 		break;
1436 	default:
1437 		panic("Invalid bio command");
1438 	}
1439 
1440 	/* Cheat with the sector length to avoid a non-constant division */
1441 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1442 	io->header.target_id = (uintptr_t)bio->bio_driver1;
1443 	io->header.timeout = 0;
1444 	io->header.flags = 0;
1445 	io->header.sense_len = MFI_SENSE_LEN;
1446 	io->header.data_len = blkcount;
1447 	io->sense_addr_lo = cm->cm_sense_busaddr;
1448 	io->sense_addr_hi = 0;
1449 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
1450 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
1451 	cm->cm_complete = mfi_bio_complete;
1452 	cm->cm_private = bio;
1453 	cm->cm_data = bio->bio_data;
1454 	cm->cm_len = bio->bio_bcount;
1455 	cm->cm_sg = &io->sgl;
1456 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1457 	cm->cm_flags = flags;
1458 
1459 	return (cm);
1460 }
1461 
1462 static void
1463 mfi_bio_complete(struct mfi_command *cm)
1464 {
1465 	struct bio *bio;
1466 	struct mfi_frame_header *hdr;
1467 	struct mfi_softc *sc;
1468 
1469 	bio = cm->cm_private;
1470 	hdr = &cm->cm_frame->header;
1471 	sc = cm->cm_sc;
1472 
1473 	if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) {
1474 		bio->bio_flags |= BIO_ERROR;
1475 		bio->bio_error = EIO;
1476 		device_printf(sc->mfi_dev, "I/O error, status= %d "
1477 		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1478 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
1479 	}
1480 
1481 	mfi_release_command(cm);
1482 	mfi_disk_complete(bio);
1483 }
1484 
1485 void
1486 mfi_startio(struct mfi_softc *sc)
1487 {
1488 	struct mfi_command *cm;
1489 
1490 	for (;;) {
1491 		/* Don't bother if we're short on resources */
1492 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1493 			break;
1494 
1495 		/* Try a command that has already been prepared */
1496 		cm = mfi_dequeue_ready(sc);
1497 
1498 		/* Nope, so look for work on the bioq */
1499 		if (cm == NULL)
1500 			cm = mfi_bio_command(sc);
1501 
1502 		/* No work available, so exit */
1503 		if (cm == NULL)
1504 			break;
1505 
1506 		/* Send the command to the controller */
1507 		if (mfi_mapcmd(sc, cm) != 0) {
1508 			mfi_requeue_ready(cm);
1509 			break;
1510 		}
1511 	}
1512 }
1513 
1514 static int
1515 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1516 {
1517 	int error, polled;
1518 
1519 	if (cm->cm_data != NULL) {
1520 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1521 		error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1522 		    cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1523 		if (error == EINPROGRESS) {
1524 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
1525 			return (0);
1526 		}
1527 	} else {
1528 		mfi_enqueue_busy(cm);
1529 		error = mfi_send_frame(sc, cm);
1530 	}
1531 
1532 	return (error);
1533 }
1534 
1535 static void
1536 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1537 {
1538 	struct mfi_frame_header *hdr;
1539 	struct mfi_command *cm;
1540 	union mfi_sgl *sgl;
1541 	struct mfi_softc *sc;
1542 	int i, dir;
1543 
1544 	if (error)
1545 		return;
1546 
1547 	cm = (struct mfi_command *)arg;
1548 	sc = cm->cm_sc;
1549 	hdr = &cm->cm_frame->header;
1550 	sgl = cm->cm_sg;
1551 
1552 	if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1553 		for (i = 0; i < nsegs; i++) {
1554 			sgl->sg32[i].addr = segs[i].ds_addr;
1555 			sgl->sg32[i].len = segs[i].ds_len;
1556 		}
1557 	} else {
1558 		for (i = 0; i < nsegs; i++) {
1559 			sgl->sg64[i].addr = segs[i].ds_addr;
1560 			sgl->sg64[i].len = segs[i].ds_len;
1561 		}
1562 		hdr->flags |= MFI_FRAME_SGL64;
1563 	}
1564 	hdr->sg_count = nsegs;
1565 
1566 	dir = 0;
1567 	if (cm->cm_flags & MFI_CMD_DATAIN) {
1568 		dir |= BUS_DMASYNC_PREREAD;
1569 		hdr->flags |= MFI_FRAME_DIR_READ;
1570 	}
1571 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
1572 		dir |= BUS_DMASYNC_PREWRITE;
1573 		hdr->flags |= MFI_FRAME_DIR_WRITE;
1574 	}
1575 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1576 	cm->cm_flags |= MFI_CMD_MAPPED;
1577 
1578 	/*
1579 	 * Instead of calculating the total number of frames in the
1580 	 * compound frame, it's already assumed that there will be at
1581 	 * least 1 frame, so don't compensate for the modulo of the
1582 	 * following division.
1583 	 */
1584 	cm->cm_total_frame_size += (sc->mfi_sgsize * nsegs);
1585 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1586 
1587 	/* The caller will take care of delivering polled commands */
1588 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1589 		mfi_enqueue_busy(cm);
1590 		mfi_send_frame(sc, cm);
1591 	}
1592 
1593 	return;
1594 }
1595 
1596 static int
1597 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1598 {
1599 
1600 	/*
1601 	 * The bus address of the command is aligned on a 64 byte boundary,
1602 	 * leaving the least 6 bits as zero.  For whatever reason, the
1603 	 * hardware wants the address shifted right by three, leaving just
1604 	 * 3 zero bits.  These three bits are then used to indicate how many
1605 	 * 64 byte frames beyond the first one are used in the command.  The
1606 	 * extra frames are typically filled with S/G elements.  The extra
1607 	 * frames must also be contiguous.  Thus, a compound frame can be at
1608 	 * most 512 bytes long, allowing for up to 59 32-bit S/G elements or
1609 	 * 39 64-bit S/G elements for block I/O commands.  This means that
1610 	 * I/O transfers of 256k and higher simply are not possible, which
1611 	 * is quite odd for such a modern adapter.
1612 	 */
1613 	MFI_WRITE4(sc, MFI_IQP, (cm->cm_frame_busaddr >> 3) |
1614 	    cm->cm_extra_frames);
1615 	return (0);
1616 }
1617 
1618 static void
1619 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
1620 {
1621 	int dir;
1622 
1623 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
1624 		dir = 0;
1625 		if (cm->cm_flags & MFI_CMD_DATAIN)
1626 			dir |= BUS_DMASYNC_POSTREAD;
1627 		if (cm->cm_flags & MFI_CMD_DATAOUT)
1628 			dir |= BUS_DMASYNC_POSTWRITE;
1629 
1630 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1631 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1632 		cm->cm_flags &= ~MFI_CMD_MAPPED;
1633 	}
1634 
1635 	if (cm->cm_complete != NULL)
1636 		cm->cm_complete(cm);
1637 	else
1638 		wakeup(cm);
1639 
1640 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1641 	mfi_startio(sc);
1642 }
1643 
1644 static int
1645 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
1646 {
1647 	struct mfi_command *cm;
1648 	struct mfi_abort_frame *abort;
1649 
1650 	mtx_lock(&sc->mfi_io_lock);
1651 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
1652 		mtx_unlock(&sc->mfi_io_lock);
1653 		return (EBUSY);
1654 	}
1655 	mtx_unlock(&sc->mfi_io_lock);
1656 
1657 	abort = &cm->cm_frame->abort;
1658 	abort->header.cmd = MFI_CMD_ABORT;
1659 	abort->header.flags = 0;
1660 	abort->abort_context = cm_abort->cm_frame->header.context;
1661 	abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
1662 	abort->abort_mfi_addr_hi = 0;
1663 	cm->cm_data = NULL;
1664 
1665 	sc->mfi_aen_cm->cm_aen_abort = 1;
1666 	mfi_mapcmd(sc, cm);
1667 	mfi_polled_command(sc, cm);
1668 	mtx_lock(&sc->mfi_io_lock);
1669 	mfi_release_command(cm);
1670 	mtx_unlock(&sc->mfi_io_lock);
1671 
1672 	while (sc->mfi_aen_cm != NULL) {
1673 		tsleep(&sc->mfi_aen_cm, 0, "mfiabort", 5 * hz);
1674 	}
1675 
1676 	return (0);
1677 }
1678 
1679 int
1680 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
1681 {
1682 	struct mfi_command *cm;
1683 	struct mfi_io_frame *io;
1684 	int error;
1685 
1686 	if ((cm = mfi_dequeue_free(sc)) == NULL)
1687 		return (EBUSY);
1688 
1689 	io = &cm->cm_frame->io;
1690 	io->header.cmd = MFI_CMD_LD_WRITE;
1691 	io->header.target_id = id;
1692 	io->header.timeout = 0;
1693 	io->header.flags = 0;
1694 	io->header.sense_len = MFI_SENSE_LEN;
1695 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1696 	io->sense_addr_lo = cm->cm_sense_busaddr;
1697 	io->sense_addr_hi = 0;
1698 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
1699 	io->lba_lo = lba & 0xffffffff;
1700 	cm->cm_data = virt;
1701 	cm->cm_len = len;
1702 	cm->cm_sg = &io->sgl;
1703 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1704 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
1705 
1706 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1707 		mfi_release_command(cm);
1708 		return (error);
1709 	}
1710 
1711 	error = mfi_polled_command(sc, cm);
1712 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1713 	    BUS_DMASYNC_POSTWRITE);
1714 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1715 	mfi_release_command(cm);
1716 
1717 	return (error);
1718 }
1719 
1720 static int
1721 mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1722 {
1723 	struct mfi_softc *sc;
1724 
1725 	sc = dev->si_drv1;
1726 	sc->mfi_flags |= MFI_FLAGS_OPEN;
1727 
1728 	return (0);
1729 }
1730 
1731 static int
1732 mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1733 {
1734 	struct mfi_softc *sc;
1735 	struct mfi_aen *mfi_aen_entry;
1736 
1737 	sc = dev->si_drv1;
1738 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
1739 
1740 	TAILQ_FOREACH(mfi_aen_entry, &sc->mfi_aen_pids, aen_link) {
1741 		if (mfi_aen_entry->p == curproc) {
1742 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1743 			    aen_link);
1744 			free(mfi_aen_entry, M_MFIBUF);
1745 		}
1746 	}
1747 	return (0);
1748 }
1749 
1750 static int
1751 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1752 {
1753 	struct mfi_softc *sc;
1754 	union mfi_statrequest *ms;
1755 	int error;
1756 
1757 	sc = dev->si_drv1;
1758 	error = 0;
1759 
1760 	switch (cmd) {
1761 	case MFIIO_STATS:
1762 		ms = (union mfi_statrequest *)arg;
1763 		switch (ms->ms_item) {
1764 		case MFIQ_FREE:
1765 		case MFIQ_BIO:
1766 		case MFIQ_READY:
1767 		case MFIQ_BUSY:
1768 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
1769 			    sizeof(struct mfi_qstat));
1770 			break;
1771 		default:
1772 			error = ENOIOCTL;
1773 			break;
1774 		}
1775 		break;
1776 	case 0xc1144d01: /* Firmware Linux ioctl shim */
1777 		{
1778 			devclass_t devclass;
1779 			struct mfi_linux_ioc_packet l_ioc;
1780 			int adapter;
1781 
1782 			devclass = devclass_find("mfi");
1783 			if (devclass == NULL)
1784 				return (ENOENT);
1785 
1786 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
1787 			if (error)
1788 				return (error);
1789 			adapter = l_ioc.lioc_adapter_no;
1790 			sc = devclass_get_softc(devclass, adapter);
1791 			if (sc == NULL)
1792 				return (ENOENT);
1793 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
1794 			    cmd, arg, flag, td));
1795 			break;
1796 		}
1797 	case 0x400c4d03: /* AEN Linux ioctl shim */
1798 		{
1799 			devclass_t devclass;
1800 			struct mfi_linux_ioc_aen l_aen;
1801 			int adapter;
1802 
1803 			devclass = devclass_find("mfi");
1804 			if (devclass == NULL)
1805 				return (ENOENT);
1806 
1807 			error = copyin(arg, &l_aen, sizeof(l_aen));
1808 			if (error)
1809 				return (error);
1810 			adapter = l_aen.laen_adapter_no;
1811 			sc = devclass_get_softc(devclass, adapter);
1812 			if (sc == NULL)
1813 				return (ENOENT);
1814 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
1815 			    cmd, arg, flag, td));
1816 			break;
1817 		}
1818 	default:
1819 		error = ENOENT;
1820 		break;
1821 	}
1822 
1823 	return (error);
1824 }
1825 
1826 static int
1827 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1828 {
1829 	struct mfi_softc *sc;
1830 	struct mfi_linux_ioc_packet l_ioc;
1831 	struct mfi_linux_ioc_aen l_aen;
1832 	struct mfi_command *cm = NULL;
1833 	struct mfi_aen *mfi_aen_entry;
1834 	uint32_t *sense_ptr;
1835 	uint32_t context;
1836 	uint8_t *data = NULL, *temp;
1837 	int i;
1838 	int error;
1839 
1840 	sc = dev->si_drv1;
1841 	error = 0;
1842 	switch (cmd) {
1843 	case 0xc1144d01: /* Firmware Linux ioctl shim */
1844 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
1845 		if (error != 0)
1846 			return (error);
1847 
1848 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
1849 			return (EINVAL);
1850 		}
1851 
1852 		mtx_lock(&sc->mfi_io_lock);
1853 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1854 			mtx_unlock(&sc->mfi_io_lock);
1855 			return (EBUSY);
1856 		}
1857 		mtx_unlock(&sc->mfi_io_lock);
1858 
1859 		/*
1860 		 * save off original context since copying from user
1861 		 * will clobber some data
1862 		 */
1863 		context = cm->cm_frame->header.context;
1864 
1865 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
1866 		      l_ioc.lioc_sgl_off); /* Linux can do 2 frames ? */
1867 		cm->cm_total_frame_size = l_ioc.lioc_sgl_off;
1868 		cm->cm_sg =
1869 		    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
1870 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT
1871 			| MFI_CMD_POLLED;
1872 		cm->cm_len = cm->cm_frame->header.data_len;
1873 		cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
1874 					    M_WAITOK | M_ZERO);
1875 
1876 		/* restore header context */
1877 		cm->cm_frame->header.context = context;
1878 
1879 		temp = data;
1880 		for (i = 0; i < l_ioc.lioc_sge_count; i++) {
1881 			error = copyin(l_ioc.lioc_sgl[i].iov_base,
1882 			       temp,
1883 			       l_ioc.lioc_sgl[i].iov_len);
1884 			if (error != 0) {
1885 				device_printf(sc->mfi_dev,
1886 				    "Copy in failed");
1887 				goto out;
1888 			}
1889 			temp = &temp[l_ioc.lioc_sgl[i].iov_len];
1890 		}
1891 
1892 		if (l_ioc.lioc_sense_len) {
1893 			sense_ptr =
1894 			    (void *)&cm->cm_frame->bytes[l_ioc.lioc_sense_off];
1895 			*sense_ptr = cm->cm_sense_busaddr;
1896 		}
1897 
1898 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1899 			device_printf(sc->mfi_dev,
1900 			    "Controller info buffer map failed");
1901 			goto out;
1902 		}
1903 
1904 		if ((error = mfi_polled_command(sc, cm)) != 0) {
1905 			device_printf(sc->mfi_dev,
1906 			    "Controller polled failed");
1907 			goto out;
1908 		}
1909 
1910 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1911 				BUS_DMASYNC_POSTREAD);
1912 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1913 
1914 		temp = data;
1915 		for (i = 0; i < l_ioc.lioc_sge_count; i++) {
1916 			error = copyout(temp,
1917 				l_ioc.lioc_sgl[i].iov_base,
1918 				l_ioc.lioc_sgl[i].iov_len);
1919 			if (error != 0) {
1920 				device_printf(sc->mfi_dev,
1921 				    "Copy out failed");
1922 				goto out;
1923 			}
1924 			temp = &temp[l_ioc.lioc_sgl[i].iov_len];
1925 		}
1926 
1927 		if (l_ioc.lioc_sense_len) {
1928 			/* copy out sense */
1929 			sense_ptr = (void *)
1930 			    &l_ioc.lioc_frame.raw[l_ioc.lioc_sense_off];
1931 			temp = 0;
1932 			temp += cm->cm_sense_busaddr;
1933 			error = copyout(temp, sense_ptr,
1934 			    l_ioc.lioc_sense_len);
1935 			if (error != 0) {
1936 				device_printf(sc->mfi_dev,
1937 				    "Copy out failed");
1938 				goto out;
1939 			}
1940 		}
1941 
1942 		error = copyout(&cm->cm_frame->header.cmd_status,
1943 			&((struct mfi_linux_ioc_packet*)arg)
1944 			->lioc_frame.hdr.cmd_status,
1945 			1);
1946 		if (error != 0) {
1947 			device_printf(sc->mfi_dev,
1948 				      "Copy out failed");
1949 			goto out;
1950 		}
1951 
1952 out:
1953 		if (data)
1954 			free(data, M_MFIBUF);
1955 		if (cm) {
1956 			mtx_lock(&sc->mfi_io_lock);
1957 			mfi_release_command(cm);
1958 			mtx_unlock(&sc->mfi_io_lock);
1959 		}
1960 
1961 		return (error);
1962 	case 0x400c4d03: /* AEN Linux ioctl shim */
1963 		error = copyin(arg, &l_aen, sizeof(l_aen));
1964 		if (error != 0)
1965 			return (error);
1966 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
1967 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
1968 		    M_WAITOK);
1969 		if (mfi_aen_entry != NULL) {
1970 			mfi_aen_entry->p = curproc;
1971 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
1972 			    aen_link);
1973 		}
1974 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
1975 		    l_aen.laen_class_locale);
1976 
1977 		if (error != 0) {
1978 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1979 			    aen_link);
1980 			free(mfi_aen_entry, M_MFIBUF);
1981 		}
1982 
1983 		return (error);
1984 	default:
1985 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
1986 		error = ENOENT;
1987 		break;
1988 	}
1989 
1990 	return (error);
1991 }
1992 
1993 static int
1994 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
1995 {
1996 	struct mfi_softc *sc;
1997 	int revents = 0;
1998 
1999 	sc = dev->si_drv1;
2000 
2001 	if (poll_events & (POLLIN | POLLRDNORM)) {
2002 		if (sc->mfi_aen_triggered != 0)
2003 			revents |= poll_events & (POLLIN | POLLRDNORM);
2004 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
2005 			revents |= POLLERR;
2006 		}
2007 	}
2008 
2009 	if (revents == 0) {
2010 		if (poll_events & (POLLIN | POLLRDNORM)) {
2011 			sc->mfi_poll_waiting = 1;
2012 			selrecord(td, &sc->mfi_select);
2013 			sc->mfi_poll_waiting = 0;
2014 		}
2015 	}
2016 
2017 	return revents;
2018 }
2019