xref: /freebsd/sys/dev/mfi/mfi.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2007 LSI Corp.
28  * Copyright (c) 2007 Rajesh Prabhakaran.
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52 
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
55 
56 #include "opt_mfi.h"
57 
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sysctl.h>
61 #include <sys/malloc.h>
62 #include <sys/kernel.h>
63 #include <sys/poll.h>
64 #include <sys/selinfo.h>
65 #include <sys/bus.h>
66 #include <sys/conf.h>
67 #include <sys/eventhandler.h>
68 #include <sys/rman.h>
69 #include <sys/bus_dma.h>
70 #include <sys/bio.h>
71 #include <sys/ioccom.h>
72 #include <sys/uio.h>
73 #include <sys/proc.h>
74 #include <sys/signalvar.h>
75 
76 #include <machine/bus.h>
77 #include <machine/resource.h>
78 
79 #include <dev/mfi/mfireg.h>
80 #include <dev/mfi/mfi_ioctl.h>
81 #include <dev/mfi/mfivar.h>
82 
83 static int	mfi_alloc_commands(struct mfi_softc *);
84 static int	mfi_comms_init(struct mfi_softc *);
85 static int	mfi_wait_command(struct mfi_softc *, struct mfi_command *);
86 static int	mfi_get_controller_info(struct mfi_softc *);
87 static int	mfi_get_log_state(struct mfi_softc *,
88 		    struct mfi_evt_log_state **);
89 static int	mfi_get_entry(struct mfi_softc *, int);
90 static int	mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
91 		    uint32_t, void **, size_t);
92 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
93 static void	mfi_startup(void *arg);
94 static void	mfi_intr(void *arg);
95 static void	mfi_ldprobe(struct mfi_softc *sc);
96 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
97 static void	mfi_aen_complete(struct mfi_command *);
98 static int	mfi_aen_setup(struct mfi_softc *, uint32_t);
99 static int	mfi_add_ld(struct mfi_softc *sc, int);
100 static void	mfi_add_ld_complete(struct mfi_command *);
101 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
102 static void	mfi_bio_complete(struct mfi_command *);
103 static int	mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
104 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
105 static void	mfi_complete(struct mfi_softc *, struct mfi_command *);
106 static int	mfi_abort(struct mfi_softc *, struct mfi_command *);
107 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, d_thread_t *);
108 static void	mfi_timeout(void *);
109 static void 	mfi_enable_intr_xscale(struct mfi_softc *sc);
110 static void 	mfi_enable_intr_ppc(struct mfi_softc *sc);
111 static int32_t 	mfi_read_fw_status_xscale(struct mfi_softc *sc);
112 static int32_t 	mfi_read_fw_status_ppc(struct mfi_softc *sc);
113 static int 	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
114 static int 	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
115 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
116 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
117 
118 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
119 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
120 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
121 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
122             0, "event message locale");
123 
124 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
125 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
126 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
127           0, "event message class");
128 
129 /* Management interface */
130 static d_open_t		mfi_open;
131 static d_close_t	mfi_close;
132 static d_ioctl_t	mfi_ioctl;
133 static d_poll_t		mfi_poll;
134 
135 static struct cdevsw mfi_cdevsw = {
136 	.d_version = 	D_VERSION,
137 	.d_flags =	0,
138 	.d_open = 	mfi_open,
139 	.d_close =	mfi_close,
140 	.d_ioctl =	mfi_ioctl,
141 	.d_poll =	mfi_poll,
142 	.d_name =	"mfi",
143 };
144 
145 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
146 
147 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
148 
149 static void
150 mfi_enable_intr_xscale(struct mfi_softc *sc)
151 {
152 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
153 }
154 
155 static void
156 mfi_enable_intr_ppc(struct mfi_softc *sc)
157 {
158 	MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
159 	MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
160 }
161 
162 static int32_t
163 mfi_read_fw_status_xscale(struct mfi_softc *sc)
164 {
165 	return MFI_READ4(sc, MFI_OMSG0);
166 }
167 
168 static int32_t
169 mfi_read_fw_status_ppc(struct mfi_softc *sc)
170 {
171 	return MFI_READ4(sc, MFI_OSP0);
172 }
173 
174 static int
175 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
176 {
177 	int32_t status;
178 
179 	status = MFI_READ4(sc, MFI_OSTS);
180 	if ((status & MFI_OSTS_INTR_VALID) == 0)
181 		return 1;
182 
183 	MFI_WRITE4(sc, MFI_OSTS, status);
184 	return 0;
185  }
186 
187 static int
188 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
189 {
190 	int32_t status;
191 
192 	status = MFI_READ4(sc, MFI_OSTS);
193 	if (!status)
194 		return 1;
195 
196 	MFI_WRITE4(sc, MFI_ODCR0, status);
197 	return 0;
198  }
199 
200 static void
201 mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
202 {
203 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
204 }
205 
206 static void
207 mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
208 {
209 	MFI_WRITE4(sc, MFI_IQP, (bus_add |frame_cnt <<1)|1 );
210 }
211 
212 static int
213 mfi_transition_firmware(struct mfi_softc *sc)
214 {
215 	int32_t fw_state, cur_state;
216 	int max_wait, i;
217 
218 	fw_state = sc->mfi_read_fw_status(sc)& MFI_FWSTATE_MASK;
219 	while (fw_state != MFI_FWSTATE_READY) {
220 		if (bootverbose)
221 			device_printf(sc->mfi_dev, "Waiting for firmware to "
222 			"become ready\n");
223 		cur_state = fw_state;
224 		switch (fw_state) {
225 		case MFI_FWSTATE_FAULT:
226 			device_printf(sc->mfi_dev, "Firmware fault\n");
227 			return (ENXIO);
228 		case MFI_FWSTATE_WAIT_HANDSHAKE:
229 			MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
230 			max_wait = 2;
231 			break;
232 		case MFI_FWSTATE_OPERATIONAL:
233 			MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
234 			max_wait = 10;
235 			break;
236 		case MFI_FWSTATE_UNDEFINED:
237 		case MFI_FWSTATE_BB_INIT:
238 			max_wait = 2;
239 			break;
240 		case MFI_FWSTATE_FW_INIT:
241 		case MFI_FWSTATE_DEVICE_SCAN:
242 		case MFI_FWSTATE_FLUSH_CACHE:
243 			max_wait = 20;
244 			break;
245 		default:
246 			device_printf(sc->mfi_dev,"Unknown firmware state %d\n",
247 			    fw_state);
248 			return (ENXIO);
249 		}
250 		for (i = 0; i < (max_wait * 10); i++) {
251 			fw_state = sc->mfi_read_fw_status(sc) & MFI_FWSTATE_MASK;
252 			if (fw_state == cur_state)
253 				DELAY(100000);
254 			else
255 				break;
256 		}
257 		if (fw_state == cur_state) {
258 			device_printf(sc->mfi_dev, "firmware stuck in state "
259 			    "%#x\n", fw_state);
260 			return (ENXIO);
261 		}
262 	}
263 	return (0);
264 }
265 
266 static void
267 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
268 {
269 	uint32_t *addr;
270 
271 	addr = arg;
272 	*addr = segs[0].ds_addr;
273 }
274 
275 int
276 mfi_attach(struct mfi_softc *sc)
277 {
278 	uint32_t status;
279 	int error, commsz, framessz, sensesz;
280 	int frames, unit, max_fw_sge;
281     device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 2.00 \n");
282 
283 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
284 	sx_init(&sc->mfi_config_lock, "MFI config");
285 	TAILQ_INIT(&sc->mfi_ld_tqh);
286 	TAILQ_INIT(&sc->mfi_aen_pids);
287 	TAILQ_INIT(&sc->mfi_cam_ccbq);
288 
289 	mfi_initq_free(sc);
290 	mfi_initq_ready(sc);
291 	mfi_initq_busy(sc);
292 	mfi_initq_bio(sc);
293 
294 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
295 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
296 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
297 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
298 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
299 	}
300 	else {
301 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
302  		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
303 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
304 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
305 	}
306 
307 
308 	/* Before we get too far, see if the firmware is working */
309 	if ((error = mfi_transition_firmware(sc)) != 0) {
310 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
311 		    "error %d\n", error);
312 		return (ENXIO);
313 	}
314 
315 	/*
316 	 * Get information needed for sizing the contiguous memory for the
317 	 * frame pool.  Size down the sgl parameter since we know that
318 	 * we will never need more than what's required for MAXPHYS.
319 	 * It would be nice if these constants were available at runtime
320 	 * instead of compile time.
321 	 */
322 	status = sc->mfi_read_fw_status(sc);
323 	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
324 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
325 	sc->mfi_max_sge = min(max_fw_sge, ((MAXPHYS / PAGE_SIZE) + 1));
326 
327 	/*
328 	 * Create the dma tag for data buffers.  Used both for block I/O
329 	 * and for various internal data queries.
330 	 */
331 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
332 				1, 0,			/* algnmnt, boundary */
333 				BUS_SPACE_MAXADDR,	/* lowaddr */
334 				BUS_SPACE_MAXADDR,	/* highaddr */
335 				NULL, NULL,		/* filter, filterarg */
336 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
337 				sc->mfi_max_sge,	/* nsegments */
338 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
339 				BUS_DMA_ALLOCNOW,	/* flags */
340 				busdma_lock_mutex,	/* lockfunc */
341 				&sc->mfi_io_lock,	/* lockfuncarg */
342 				&sc->mfi_buffer_dmat)) {
343 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
344 		return (ENOMEM);
345 	}
346 
347 	/*
348 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
349 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
350 	 * entry, so the calculated size here will be will be 1 more than
351 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
352 	 */
353 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
354 	    sizeof(struct mfi_hwcomms);
355 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
356 				1, 0,			/* algnmnt, boundary */
357 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
358 				BUS_SPACE_MAXADDR,	/* highaddr */
359 				NULL, NULL,		/* filter, filterarg */
360 				commsz,			/* maxsize */
361 				1,			/* msegments */
362 				commsz,			/* maxsegsize */
363 				0,			/* flags */
364 				NULL, NULL,		/* lockfunc, lockarg */
365 				&sc->mfi_comms_dmat)) {
366 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
367 		return (ENOMEM);
368 	}
369 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
370 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
371 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
372 		return (ENOMEM);
373 	}
374 	bzero(sc->mfi_comms, commsz);
375 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
376 	    sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
377 
378 	/*
379 	 * Allocate DMA memory for the command frames.  Keep them in the
380 	 * lower 4GB for efficiency.  Calculate the size of the commands at
381 	 * the same time; each command is one 64 byte frame plus a set of
382          * additional frames for holding sg lists or other data.
383 	 * The assumption here is that the SG list will start at the second
384 	 * frame and not use the unused bytes in the first frame.  While this
385 	 * isn't technically correct, it simplifies the calculation and allows
386 	 * for command frames that might be larger than an mfi_io_frame.
387 	 */
388 	if (sizeof(bus_addr_t) == 8) {
389 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
390 		sc->mfi_flags |= MFI_FLAGS_SG64;
391 	} else {
392 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
393 	}
394 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
395 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
396 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
397 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
398 				64, 0,			/* algnmnt, boundary */
399 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
400 				BUS_SPACE_MAXADDR,	/* highaddr */
401 				NULL, NULL,		/* filter, filterarg */
402 				framessz,		/* maxsize */
403 				1,			/* nsegments */
404 				framessz,		/* maxsegsize */
405 				0,			/* flags */
406 				NULL, NULL,		/* lockfunc, lockarg */
407 				&sc->mfi_frames_dmat)) {
408 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
409 		return (ENOMEM);
410 	}
411 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
412 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
413 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
414 		return (ENOMEM);
415 	}
416 	bzero(sc->mfi_frames, framessz);
417 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
418 	    sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
419 
420 	/*
421 	 * Allocate DMA memory for the frame sense data.  Keep them in the
422 	 * lower 4GB for efficiency
423 	 */
424 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
425 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
426 				4, 0,			/* algnmnt, boundary */
427 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
428 				BUS_SPACE_MAXADDR,	/* highaddr */
429 				NULL, NULL,		/* filter, filterarg */
430 				sensesz,		/* maxsize */
431 				1,			/* nsegments */
432 				sensesz,		/* maxsegsize */
433 				0,			/* flags */
434 				NULL, NULL,		/* lockfunc, lockarg */
435 				&sc->mfi_sense_dmat)) {
436 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
437 		return (ENOMEM);
438 	}
439 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
440 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
441 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
442 		return (ENOMEM);
443 	}
444 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
445 	    sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
446 
447 	if ((error = mfi_alloc_commands(sc)) != 0)
448 		return (error);
449 
450 	if ((error = mfi_comms_init(sc)) != 0)
451 		return (error);
452 
453 	if ((error = mfi_get_controller_info(sc)) != 0)
454 		return (error);
455 
456 	mtx_lock(&sc->mfi_io_lock);
457 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
458 		mtx_unlock(&sc->mfi_io_lock);
459 		return (error);
460 	}
461 	mtx_unlock(&sc->mfi_io_lock);
462 
463 	/*
464 	 * Set up the interrupt handler.  XXX This should happen in
465 	 * mfi_pci.c
466 	 */
467 	sc->mfi_irq_rid = 0;
468 	if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
469 	    &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
470 		device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
471 		return (EINVAL);
472 	}
473 	if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
474 	    NULL, mfi_intr, sc, &sc->mfi_intr)) {
475 		device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
476 		return (EINVAL);
477 	}
478 
479 	/* Register a config hook to probe the bus for arrays */
480 	sc->mfi_ich.ich_func = mfi_startup;
481 	sc->mfi_ich.ich_arg = sc;
482 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
483 		device_printf(sc->mfi_dev, "Cannot establish configuration "
484 		    "hook\n");
485 		return (EINVAL);
486 	}
487 
488 	/*
489 	 * Register a shutdown handler.
490 	 */
491 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
492 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
493 		device_printf(sc->mfi_dev, "Warning: shutdown event "
494 		    "registration failed\n");
495 	}
496 
497 	/*
498 	 * Create the control device for doing management
499 	 */
500 	unit = device_get_unit(sc->mfi_dev);
501 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
502 	    0640, "mfi%d", unit);
503 	if (unit == 0)
504 		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
505 	if (sc->mfi_cdev != NULL)
506 		sc->mfi_cdev->si_drv1 = sc;
507 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
508 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
509 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
510 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
511 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
512 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
513 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
514 	    &sc->mfi_keep_deleted_volumes, 0,
515 	    "Don't detach the mfid device for a busy volume that is deleted");
516 
517 	device_add_child(sc->mfi_dev, "mfip", -1);
518 	bus_generic_attach(sc->mfi_dev);
519 
520 	/* Start the timeout watchdog */
521 	callout_init(&sc->mfi_watchdog_callout, 1);
522 	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
523 	    mfi_timeout, sc);
524 
525 	return (0);
526 }
527 
528 static int
529 mfi_alloc_commands(struct mfi_softc *sc)
530 {
531 	struct mfi_command *cm;
532 	int i, ncmds;
533 
534 	/*
535 	 * XXX Should we allocate all the commands up front, or allocate on
536 	 * demand later like 'aac' does?
537 	 */
538 	ncmds = sc->mfi_max_fw_cmds;
539 	sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
540 	    M_WAITOK | M_ZERO);
541 
542 	for (i = 0; i < ncmds; i++) {
543 		cm = &sc->mfi_commands[i];
544 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
545 		    sc->mfi_cmd_size * i);
546 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
547 		    sc->mfi_cmd_size * i;
548 		cm->cm_frame->header.context = i;
549 		cm->cm_sense = &sc->mfi_sense[i];
550 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
551 		cm->cm_sc = sc;
552 		cm->cm_index = i;
553 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
554 		    &cm->cm_dmamap) == 0)
555 			mfi_release_command(cm);
556 		else
557 			break;
558 		sc->mfi_total_cmds++;
559 	}
560 
561 	return (0);
562 }
563 
564 void
565 mfi_release_command(struct mfi_command *cm)
566 {
567 	struct mfi_frame_header *hdr;
568 	uint32_t *hdr_data;
569 
570 	/*
571 	 * Zero out the important fields of the frame, but make sure the
572 	 * context field is preserved.  For efficiency, handle the fields
573 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
574 	 */
575 	hdr = &cm->cm_frame->header;
576 	if (hdr->sg_count) {
577 		cm->cm_sg->sg32[0].len = 0;
578 		cm->cm_sg->sg32[0].addr = 0;
579 	}
580 
581 	hdr_data = (uint32_t *)cm->cm_frame;
582 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
583 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
584 	hdr_data[4] = 0;	/* flags, timeout */
585 	hdr_data[5] = 0;	/* data_len */
586 
587 	cm->cm_extra_frames = 0;
588 	cm->cm_flags = 0;
589 	cm->cm_complete = NULL;
590 	cm->cm_private = NULL;
591 	cm->cm_data = NULL;
592 	cm->cm_sg = 0;
593 	cm->cm_total_frame_size = 0;
594 
595 	mfi_enqueue_free(cm);
596 }
597 
598 static int
599 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
600     void **bufp, size_t bufsize)
601 {
602 	struct mfi_command *cm;
603 	struct mfi_dcmd_frame *dcmd;
604 	void *buf = NULL;
605 
606 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
607 
608 	cm = mfi_dequeue_free(sc);
609 	if (cm == NULL)
610 		return (EBUSY);
611 
612 	if ((bufsize > 0) && (bufp != NULL)) {
613 		if (*bufp == NULL) {
614 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
615 			if (buf == NULL) {
616 				mfi_release_command(cm);
617 				return (ENOMEM);
618 			}
619 			*bufp = buf;
620 		} else {
621 			buf = *bufp;
622 		}
623 	}
624 
625 	dcmd =  &cm->cm_frame->dcmd;
626 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
627 	dcmd->header.cmd = MFI_CMD_DCMD;
628 	dcmd->header.timeout = 0;
629 	dcmd->header.flags = 0;
630 	dcmd->header.data_len = bufsize;
631 	dcmd->opcode = opcode;
632 	cm->cm_sg = &dcmd->sgl;
633 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
634 	cm->cm_flags = 0;
635 	cm->cm_data = buf;
636 	cm->cm_private = buf;
637 	cm->cm_len = bufsize;
638 
639 	*cmp = cm;
640 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
641 		*bufp = buf;
642 	return (0);
643 }
644 
645 static int
646 mfi_comms_init(struct mfi_softc *sc)
647 {
648 	struct mfi_command *cm;
649 	struct mfi_init_frame *init;
650 	struct mfi_init_qinfo *qinfo;
651 	int error;
652 
653 	mtx_lock(&sc->mfi_io_lock);
654 	if ((cm = mfi_dequeue_free(sc)) == NULL)
655 		return (EBUSY);
656 
657 	/*
658 	 * Abuse the SG list area of the frame to hold the init_qinfo
659 	 * object;
660 	 */
661 	init = &cm->cm_frame->init;
662 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
663 
664 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
665 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
666 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
667 	    offsetof(struct mfi_hwcomms, hw_reply_q);
668 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
669 	    offsetof(struct mfi_hwcomms, hw_pi);
670 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
671 	    offsetof(struct mfi_hwcomms, hw_ci);
672 
673 	init->header.cmd = MFI_CMD_INIT;
674 	init->header.data_len = sizeof(struct mfi_init_qinfo);
675 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
676 	cm->cm_data = NULL;
677 	cm->cm_flags = MFI_CMD_POLLED;
678 
679 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
680 		device_printf(sc->mfi_dev, "failed to send init command\n");
681 		mtx_unlock(&sc->mfi_io_lock);
682 		return (error);
683 	}
684 	mfi_release_command(cm);
685 	mtx_unlock(&sc->mfi_io_lock);
686 
687 	return (0);
688 }
689 
690 static int
691 mfi_get_controller_info(struct mfi_softc *sc)
692 {
693 	struct mfi_command *cm = NULL;
694 	struct mfi_ctrl_info *ci = NULL;
695 	uint32_t max_sectors_1, max_sectors_2;
696 	int error;
697 
698 	mtx_lock(&sc->mfi_io_lock);
699 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
700 	    (void **)&ci, sizeof(*ci));
701 	if (error)
702 		goto out;
703 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
704 
705 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
706 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
707 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
708 		    MFI_SECTOR_LEN;
709 		error = 0;
710 		goto out;
711 	}
712 
713 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
714 	    BUS_DMASYNC_POSTREAD);
715 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
716 
717 	max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
718 	max_sectors_2 = ci->max_request_size;
719 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
720 
721 out:
722 	if (ci)
723 		free(ci, M_MFIBUF);
724 	if (cm)
725 		mfi_release_command(cm);
726 	mtx_unlock(&sc->mfi_io_lock);
727 	return (error);
728 }
729 
730 static int
731 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
732 {
733 	struct mfi_command *cm = NULL;
734 	int error;
735 
736 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
737 	    (void **)log_state, sizeof(**log_state));
738 	if (error)
739 		goto out;
740 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
741 
742 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
743 		device_printf(sc->mfi_dev, "Failed to get log state\n");
744 		goto out;
745 	}
746 
747 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
748 	    BUS_DMASYNC_POSTREAD);
749 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
750 
751 out:
752 	if (cm)
753 		mfi_release_command(cm);
754 
755 	return (error);
756 }
757 
758 static int
759 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
760 {
761 	struct mfi_evt_log_state *log_state = NULL;
762 	union mfi_evt class_locale;
763 	int error = 0;
764 	uint32_t seq;
765 
766 	class_locale.members.reserved = 0;
767 	class_locale.members.locale = mfi_event_locale;
768 	class_locale.members.class  = mfi_event_class;
769 
770 	if (seq_start == 0) {
771 		error = mfi_get_log_state(sc, &log_state);
772 		if (error) {
773 			if (log_state)
774 				free(log_state, M_MFIBUF);
775 			return (error);
776 		}
777 		/* The message log is a circular buffer */
778 		for (seq = log_state->shutdown_seq_num;
779 		     seq != log_state->newest_seq_num; seq++) {
780 			mfi_get_entry(sc, seq);
781 		}
782 		mfi_get_entry(sc, seq);
783 	} else
784 		seq = seq_start;
785 	mfi_aen_register(sc, seq, class_locale.word);
786 	free(log_state, M_MFIBUF);
787 
788 	return 0;
789 }
790 
791 static int
792 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
793 {
794 
795 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
796 	cm->cm_complete = NULL;
797 
798 
799 	/*
800 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
801 	 * and return 0 to it as status
802 	 */
803 	if (cm->cm_frame->dcmd.opcode == 0) {
804 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
805 		cm->cm_error = 0;
806 		return (cm->cm_error);
807 	}
808 	mfi_enqueue_ready(cm);
809 	mfi_startio(sc);
810 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
811 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
812 	return (cm->cm_error);
813 }
814 
815 void
816 mfi_free(struct mfi_softc *sc)
817 {
818 	struct mfi_command *cm;
819 	int i;
820 
821 	callout_drain(&sc->mfi_watchdog_callout);
822 
823 	if (sc->mfi_cdev != NULL)
824 		destroy_dev(sc->mfi_cdev);
825 
826 	if (sc->mfi_total_cmds != 0) {
827 		for (i = 0; i < sc->mfi_total_cmds; i++) {
828 			cm = &sc->mfi_commands[i];
829 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
830 		}
831 		free(sc->mfi_commands, M_MFIBUF);
832 	}
833 
834 	if (sc->mfi_intr)
835 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
836 	if (sc->mfi_irq != NULL)
837 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
838 		    sc->mfi_irq);
839 
840 	if (sc->mfi_sense_busaddr != 0)
841 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
842 	if (sc->mfi_sense != NULL)
843 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
844 		    sc->mfi_sense_dmamap);
845 	if (sc->mfi_sense_dmat != NULL)
846 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
847 
848 	if (sc->mfi_frames_busaddr != 0)
849 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
850 	if (sc->mfi_frames != NULL)
851 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
852 		    sc->mfi_frames_dmamap);
853 	if (sc->mfi_frames_dmat != NULL)
854 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
855 
856 	if (sc->mfi_comms_busaddr != 0)
857 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
858 	if (sc->mfi_comms != NULL)
859 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
860 		    sc->mfi_comms_dmamap);
861 	if (sc->mfi_comms_dmat != NULL)
862 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
863 
864 	if (sc->mfi_buffer_dmat != NULL)
865 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
866 	if (sc->mfi_parent_dmat != NULL)
867 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
868 
869 	if (mtx_initialized(&sc->mfi_io_lock)) {
870 		mtx_destroy(&sc->mfi_io_lock);
871 		sx_destroy(&sc->mfi_config_lock);
872 	}
873 
874 	return;
875 }
876 
877 static void
878 mfi_startup(void *arg)
879 {
880 	struct mfi_softc *sc;
881 
882 	sc = (struct mfi_softc *)arg;
883 
884 	config_intrhook_disestablish(&sc->mfi_ich);
885 
886 	sc->mfi_enable_intr(sc);
887 	sx_xlock(&sc->mfi_config_lock);
888 	mtx_lock(&sc->mfi_io_lock);
889 	mfi_ldprobe(sc);
890 	mtx_unlock(&sc->mfi_io_lock);
891 	sx_xunlock(&sc->mfi_config_lock);
892 }
893 
894 static void
895 mfi_intr(void *arg)
896 {
897 	struct mfi_softc *sc;
898 	struct mfi_command *cm;
899 	uint32_t pi, ci, context;
900 
901 	sc = (struct mfi_softc *)arg;
902 
903 	if (sc->mfi_check_clear_intr(sc))
904 		return;
905 
906 	pi = sc->mfi_comms->hw_pi;
907 	ci = sc->mfi_comms->hw_ci;
908 	mtx_lock(&sc->mfi_io_lock);
909 	while (ci != pi) {
910 		context = sc->mfi_comms->hw_reply_q[ci];
911 		if (context < sc->mfi_max_fw_cmds) {
912 			cm = &sc->mfi_commands[context];
913 			mfi_remove_busy(cm);
914 			cm->cm_error = 0;
915 			mfi_complete(sc, cm);
916 		}
917 		if (++ci == (sc->mfi_max_fw_cmds + 1)) {
918 			ci = 0;
919 		}
920 	}
921 
922 	sc->mfi_comms->hw_ci = ci;
923 
924 	/* Give defered I/O a chance to run */
925 	if (sc->mfi_flags & MFI_FLAGS_QFRZN)
926 		sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
927 	mfi_startio(sc);
928 	mtx_unlock(&sc->mfi_io_lock);
929 
930 	return;
931 }
932 
933 int
934 mfi_shutdown(struct mfi_softc *sc)
935 {
936 	struct mfi_dcmd_frame *dcmd;
937 	struct mfi_command *cm;
938 	int error;
939 
940 	mtx_lock(&sc->mfi_io_lock);
941 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
942 	if (error) {
943 		mtx_unlock(&sc->mfi_io_lock);
944 		return (error);
945 	}
946 
947 	if (sc->mfi_aen_cm != NULL)
948 		mfi_abort(sc, sc->mfi_aen_cm);
949 
950 	dcmd = &cm->cm_frame->dcmd;
951 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
952 	cm->cm_flags = MFI_CMD_POLLED;
953 	cm->cm_data = NULL;
954 
955 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
956 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
957 	}
958 
959 	mfi_release_command(cm);
960 	mtx_unlock(&sc->mfi_io_lock);
961 	return (error);
962 }
963 
964 static void
965 mfi_ldprobe(struct mfi_softc *sc)
966 {
967 	struct mfi_frame_header *hdr;
968 	struct mfi_command *cm = NULL;
969 	struct mfi_ld_list *list = NULL;
970 	struct mfi_disk *ld;
971 	int error, i;
972 
973 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
974 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
975 
976 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
977 	    (void **)&list, sizeof(*list));
978 	if (error)
979 		goto out;
980 
981 	cm->cm_flags = MFI_CMD_DATAIN;
982 	if (mfi_wait_command(sc, cm) != 0) {
983 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
984 		goto out;
985 	}
986 
987 	hdr = &cm->cm_frame->header;
988 	if (hdr->cmd_status != MFI_STAT_OK) {
989 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
990 		    hdr->cmd_status);
991 		goto out;
992 	}
993 
994 	for (i = 0; i < list->ld_count; i++) {
995 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
996 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
997 				goto skip_add;
998 		}
999 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1000 	skip_add:;
1001 	}
1002 out:
1003 	if (list)
1004 		free(list, M_MFIBUF);
1005 	if (cm)
1006 		mfi_release_command(cm);
1007 
1008 	return;
1009 }
1010 
1011 static void
1012 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1013 {
1014 	switch (detail->arg_type) {
1015 	case MR_EVT_ARGS_NONE:
1016 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - %s\n",
1017 		    detail->seq,
1018 		    detail->time,
1019 		    detail->class.members.locale,
1020 		    detail->class.members.class,
1021 		    detail->description
1022 		    );
1023 		break;
1024 	case MR_EVT_ARGS_CDB_SENSE:
1025 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) CDB %*D"
1026 		    "Sense %*D\n: %s\n",
1027 		    detail->seq,
1028 		    detail->time,
1029 		    detail->class.members.locale,
1030 		    detail->class.members.class,
1031 		    detail->args.cdb_sense.pd.device_id,
1032 		    detail->args.cdb_sense.pd.enclosure_index,
1033 		    detail->args.cdb_sense.pd.slot_number,
1034 		    detail->args.cdb_sense.cdb_len,
1035 		    detail->args.cdb_sense.cdb,
1036 		    ":",
1037 		    detail->args.cdb_sense.sense_len,
1038 		    detail->args.cdb_sense.sense,
1039 		    ":",
1040 		    detail->description
1041 		    );
1042 		break;
1043 	case MR_EVT_ARGS_LD:
1044 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1045 		    "event: %s\n",
1046 		    detail->seq,
1047 		    detail->time,
1048 		    detail->class.members.locale,
1049 		    detail->class.members.class,
1050 		    detail->args.ld.ld_index,
1051 		    detail->args.ld.target_id,
1052 		    detail->description
1053 		    );
1054 		break;
1055 	case MR_EVT_ARGS_LD_COUNT:
1056 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1057 		    "count %lld: %s\n",
1058 		    detail->seq,
1059 		    detail->time,
1060 		    detail->class.members.locale,
1061 		    detail->class.members.class,
1062 		    detail->args.ld_count.ld.ld_index,
1063 		    detail->args.ld_count.ld.target_id,
1064 		    (long long)detail->args.ld_count.count,
1065 		    detail->description
1066 		    );
1067 		break;
1068 	case MR_EVT_ARGS_LD_LBA:
1069 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1070 		    "lba %lld: %s\n",
1071 		    detail->seq,
1072 		    detail->time,
1073 		    detail->class.members.locale,
1074 		    detail->class.members.class,
1075 		    detail->args.ld_lba.ld.ld_index,
1076 		    detail->args.ld_lba.ld.target_id,
1077 		    (long long)detail->args.ld_lba.lba,
1078 		    detail->description
1079 		    );
1080 		break;
1081 	case MR_EVT_ARGS_LD_OWNER:
1082 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1083 		    "owner changed: prior %d, new %d: %s\n",
1084 		    detail->seq,
1085 		    detail->time,
1086 		    detail->class.members.locale,
1087 		    detail->class.members.class,
1088 		    detail->args.ld_owner.ld.ld_index,
1089 		    detail->args.ld_owner.ld.target_id,
1090 		    detail->args.ld_owner.pre_owner,
1091 		    detail->args.ld_owner.new_owner,
1092 		    detail->description
1093 		    );
1094 		break;
1095 	case MR_EVT_ARGS_LD_LBA_PD_LBA:
1096 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1097 		    "lba %lld, physical drive PD %02d(e%d/s%d) lba %lld: %s\n",
1098 		    detail->seq,
1099 		    detail->time,
1100 		    detail->class.members.locale,
1101 		    detail->class.members.class,
1102 		    detail->args.ld_lba_pd_lba.ld.ld_index,
1103 		    detail->args.ld_lba_pd_lba.ld.target_id,
1104 		    (long long)detail->args.ld_lba_pd_lba.ld_lba,
1105 		    detail->args.ld_lba_pd_lba.pd.device_id,
1106 		    detail->args.ld_lba_pd_lba.pd.enclosure_index,
1107 		    detail->args.ld_lba_pd_lba.pd.slot_number,
1108 		    (long long)detail->args.ld_lba_pd_lba.pd_lba,
1109 		    detail->description
1110 		    );
1111 		break;
1112 	case MR_EVT_ARGS_LD_PROG:
1113 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1114 		    "progress %d%% in %ds: %s\n",
1115 		    detail->seq,
1116 		    detail->time,
1117 		    detail->class.members.locale,
1118 		    detail->class.members.class,
1119 		    detail->args.ld_prog.ld.ld_index,
1120 		    detail->args.ld_prog.ld.target_id,
1121 		    detail->args.ld_prog.prog.progress/655,
1122 		    detail->args.ld_prog.prog.elapsed_seconds,
1123 		    detail->description
1124 		    );
1125 		break;
1126 	case MR_EVT_ARGS_LD_STATE:
1127 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1128 		    "state prior %d new %d: %s\n",
1129 		    detail->seq,
1130 		    detail->time,
1131 		    detail->class.members.locale,
1132 		    detail->class.members.class,
1133 		    detail->args.ld_state.ld.ld_index,
1134 		    detail->args.ld_state.ld.target_id,
1135 		    detail->args.ld_state.prev_state,
1136 		    detail->args.ld_state.new_state,
1137 		    detail->description
1138 		    );
1139 		break;
1140 	case MR_EVT_ARGS_LD_STRIP:
1141 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1142 		    "strip %lld: %s\n",
1143 		    detail->seq,
1144 		    detail->time,
1145 		    detail->class.members.locale,
1146 		    detail->class.members.class,
1147 		    detail->args.ld_strip.ld.ld_index,
1148 		    detail->args.ld_strip.ld.target_id,
1149 		    (long long)detail->args.ld_strip.strip,
1150 		    detail->description
1151 		    );
1152 		break;
1153 	case MR_EVT_ARGS_PD:
1154 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1155 		    "event: %s\n",
1156 		    detail->seq,
1157 		    detail->time,
1158 		    detail->class.members.locale,
1159 		    detail->class.members.class,
1160 		    detail->args.pd.device_id,
1161 		    detail->args.pd.enclosure_index,
1162 		    detail->args.pd.slot_number,
1163 		    detail->description
1164 		    );
1165 		break;
1166 	case MR_EVT_ARGS_PD_ERR:
1167 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1168 		    "err %d: %s\n",
1169 		    detail->seq,
1170 		    detail->time,
1171 		    detail->class.members.locale,
1172 		    detail->class.members.class,
1173 		    detail->args.pd_err.pd.device_id,
1174 		    detail->args.pd_err.pd.enclosure_index,
1175 		    detail->args.pd_err.pd.slot_number,
1176 		    detail->args.pd_err.err,
1177 		    detail->description
1178 		    );
1179 		break;
1180 	case MR_EVT_ARGS_PD_LBA:
1181 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1182 		    "lba %lld: %s\n",
1183 		    detail->seq,
1184 		    detail->time,
1185 		    detail->class.members.locale,
1186 		    detail->class.members.class,
1187 		    detail->args.pd_lba.pd.device_id,
1188 		    detail->args.pd_lba.pd.enclosure_index,
1189 		    detail->args.pd_lba.pd.slot_number,
1190 		    (long long)detail->args.pd_lba.lba,
1191 		    detail->description
1192 		    );
1193 		break;
1194 	case MR_EVT_ARGS_PD_LBA_LD:
1195 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1196 		    "lba %lld VD %02d/%d: %s\n",
1197 		    detail->seq,
1198 		    detail->time,
1199 		    detail->class.members.locale,
1200 		    detail->class.members.class,
1201 		    detail->args.pd_lba_ld.pd.device_id,
1202 		    detail->args.pd_lba_ld.pd.enclosure_index,
1203 		    detail->args.pd_lba_ld.pd.slot_number,
1204 		    (long long)detail->args.pd_lba.lba,
1205 		    detail->args.pd_lba_ld.ld.ld_index,
1206 		    detail->args.pd_lba_ld.ld.target_id,
1207 		    detail->description
1208 		    );
1209 		break;
1210 	case MR_EVT_ARGS_PD_PROG:
1211 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1212 		    "progress %d%% seconds %ds: %s\n",
1213 		    detail->seq,
1214 		    detail->time,
1215 		    detail->class.members.locale,
1216 		    detail->class.members.class,
1217 		    detail->args.pd_prog.pd.device_id,
1218 		    detail->args.pd_prog.pd.enclosure_index,
1219 		    detail->args.pd_prog.pd.slot_number,
1220 		    detail->args.pd_prog.prog.progress/655,
1221 		    detail->args.pd_prog.prog.elapsed_seconds,
1222 		    detail->description
1223 		    );
1224 		break;
1225 	case MR_EVT_ARGS_PD_STATE:
1226 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1227 		    "state prior %d new %d: %s\n",
1228 		    detail->seq,
1229 		    detail->time,
1230 		    detail->class.members.locale,
1231 		    detail->class.members.class,
1232 		    detail->args.pd_prog.pd.device_id,
1233 		    detail->args.pd_prog.pd.enclosure_index,
1234 		    detail->args.pd_prog.pd.slot_number,
1235 		    detail->args.pd_state.prev_state,
1236 		    detail->args.pd_state.new_state,
1237 		    detail->description
1238 		    );
1239 		break;
1240 	case MR_EVT_ARGS_PCI:
1241 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PCI 0x04%x 0x04%x "
1242 		    "0x04%x 0x04%x: %s\n",
1243 		    detail->seq,
1244 		    detail->time,
1245 		    detail->class.members.locale,
1246 		    detail->class.members.class,
1247 		    detail->args.pci.venderId,
1248 		    detail->args.pci.deviceId,
1249 		    detail->args.pci.subVenderId,
1250 		    detail->args.pci.subDeviceId,
1251 		    detail->description
1252 		    );
1253 		break;
1254 	case MR_EVT_ARGS_RATE:
1255 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Rebuild rate %d: %s\n",
1256 		    detail->seq,
1257 		    detail->time,
1258 		    detail->class.members.locale,
1259 		    detail->class.members.class,
1260 		    detail->args.rate,
1261 		    detail->description
1262 		    );
1263 		break;
1264 	case MR_EVT_ARGS_TIME:
1265 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Adapter ticks %d "
1266 		    "elapsed %ds: %s\n",
1267 		    detail->seq,
1268 		    detail->time,
1269 		    detail->class.members.locale,
1270 		    detail->class.members.class,
1271 		    detail->args.time.rtc,
1272 		    detail->args.time.elapsedSeconds,
1273 		    detail->description
1274 		    );
1275 		break;
1276 	case MR_EVT_ARGS_ECC:
1277 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Adapter ECC %x,%x: %s: %s\n",
1278 		    detail->seq,
1279 		    detail->time,
1280 		    detail->class.members.locale,
1281 		    detail->class.members.class,
1282 		    detail->args.ecc.ecar,
1283 		    detail->args.ecc.elog,
1284 		    detail->args.ecc.str,
1285 		    detail->description
1286 		    );
1287 		break;
1288 	default:
1289 		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Type %d: %s\n",
1290 		    detail->seq,
1291 		    detail->time,
1292 		    detail->class.members.locale,
1293 		    detail->class.members.class,
1294 		    detail->arg_type, detail->description
1295 		    );
1296 	}
1297 }
1298 
1299 static int
1300 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1301 {
1302 	struct mfi_command *cm;
1303 	struct mfi_dcmd_frame *dcmd;
1304 	union mfi_evt current_aen, prior_aen;
1305 	struct mfi_evt_detail *ed = NULL;
1306 	int error = 0;
1307 
1308 	current_aen.word = locale;
1309 	if (sc->mfi_aen_cm != NULL) {
1310 		prior_aen.word =
1311 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1312 		if (prior_aen.members.class <= current_aen.members.class &&
1313 		    !((prior_aen.members.locale & current_aen.members.locale)
1314 		    ^current_aen.members.locale)) {
1315 			return (0);
1316 		} else {
1317 			prior_aen.members.locale |= current_aen.members.locale;
1318 			if (prior_aen.members.class
1319 			    < current_aen.members.class)
1320 				current_aen.members.class =
1321 				    prior_aen.members.class;
1322 			mfi_abort(sc, sc->mfi_aen_cm);
1323 		}
1324 	}
1325 
1326 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1327 	    (void **)&ed, sizeof(*ed));
1328 	if (error) {
1329 		goto out;
1330 	}
1331 
1332 	dcmd = &cm->cm_frame->dcmd;
1333 	((uint32_t *)&dcmd->mbox)[0] = seq;
1334 	((uint32_t *)&dcmd->mbox)[1] = locale;
1335 	cm->cm_flags = MFI_CMD_DATAIN;
1336 	cm->cm_complete = mfi_aen_complete;
1337 
1338 	sc->mfi_aen_cm = cm;
1339 
1340 	mfi_enqueue_ready(cm);
1341 	mfi_startio(sc);
1342 
1343 out:
1344 	return (error);
1345 }
1346 
1347 static void
1348 mfi_aen_complete(struct mfi_command *cm)
1349 {
1350 	struct mfi_frame_header *hdr;
1351 	struct mfi_softc *sc;
1352 	struct mfi_evt_detail *detail;
1353 	struct mfi_aen *mfi_aen_entry, *tmp;
1354 	int seq = 0, aborted = 0;
1355 
1356 	sc = cm->cm_sc;
1357 	hdr = &cm->cm_frame->header;
1358 
1359 	if (sc->mfi_aen_cm == NULL)
1360 		return;
1361 
1362 	if (sc->mfi_aen_cm->cm_aen_abort || hdr->cmd_status == 0xff) {
1363 		sc->mfi_aen_cm->cm_aen_abort = 0;
1364 		aborted = 1;
1365 	} else {
1366 		sc->mfi_aen_triggered = 1;
1367 		if (sc->mfi_poll_waiting) {
1368 			sc->mfi_poll_waiting = 0;
1369 			selwakeup(&sc->mfi_select);
1370 		}
1371 		detail = cm->cm_data;
1372 		/*
1373 		 * XXX If this function is too expensive or is recursive, then
1374 		 * events should be put onto a queue and processed later.
1375 		 */
1376 		mtx_unlock(&sc->mfi_io_lock);
1377 		mfi_decode_evt(sc, detail);
1378 		mtx_lock(&sc->mfi_io_lock);
1379 		seq = detail->seq + 1;
1380 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1381 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1382 			    aen_link);
1383 			PROC_LOCK(mfi_aen_entry->p);
1384 			psignal(mfi_aen_entry->p, SIGIO);
1385 			PROC_UNLOCK(mfi_aen_entry->p);
1386 			free(mfi_aen_entry, M_MFIBUF);
1387 		}
1388 	}
1389 
1390 	free(cm->cm_data, M_MFIBUF);
1391 	sc->mfi_aen_cm = NULL;
1392 	wakeup(&sc->mfi_aen_cm);
1393 	mfi_release_command(cm);
1394 
1395 	/* set it up again so the driver can catch more events */
1396 	if (!aborted) {
1397 		mfi_aen_setup(sc, seq);
1398 	}
1399 }
1400 
1401 /* Only do one event for now so we can easily iterate through them */
1402 #define MAX_EVENTS 1
1403 static int
1404 mfi_get_entry(struct mfi_softc *sc, int seq)
1405 {
1406 	struct mfi_command *cm;
1407 	struct mfi_dcmd_frame *dcmd;
1408 	struct mfi_evt_list *el;
1409 	int error;
1410 	int i;
1411 	int size;
1412 
1413 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
1414 		return (EBUSY);
1415 	}
1416 
1417 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1418 		* (MAX_EVENTS - 1);
1419 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1420 	if (el == NULL) {
1421 		mfi_release_command(cm);
1422 		return (ENOMEM);
1423 	}
1424 
1425 	dcmd = &cm->cm_frame->dcmd;
1426 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
1427 	dcmd->header.cmd = MFI_CMD_DCMD;
1428 	dcmd->header.timeout = 0;
1429 	dcmd->header.data_len = size;
1430 	dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1431 	((uint32_t *)&dcmd->mbox)[0] = seq;
1432 	((uint32_t *)&dcmd->mbox)[1] = MFI_EVT_LOCALE_ALL;
1433 	cm->cm_sg = &dcmd->sgl;
1434 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1435 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1436 	cm->cm_data = el;
1437 	cm->cm_len = size;
1438 
1439 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1440 		device_printf(sc->mfi_dev, "Failed to get controller entry\n");
1441 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1442 		    MFI_SECTOR_LEN;
1443 		free(el, M_MFIBUF);
1444 		mfi_release_command(cm);
1445 		return (0);
1446 	}
1447 
1448 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1449 	    BUS_DMASYNC_POSTREAD);
1450 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1451 
1452 	if (dcmd->header.cmd_status != MFI_STAT_NOT_FOUND) {
1453 		for (i = 0; i < el->count; i++) {
1454 			if (seq + i == el->event[i].seq)
1455 				mfi_decode_evt(sc, &el->event[i]);
1456 		}
1457 	}
1458 
1459 	free(cm->cm_data, M_MFIBUF);
1460 	mfi_release_command(cm);
1461 	return (0);
1462 }
1463 
1464 static int
1465 mfi_add_ld(struct mfi_softc *sc, int id)
1466 {
1467 	struct mfi_command *cm;
1468 	struct mfi_dcmd_frame *dcmd = NULL;
1469 	struct mfi_ld_info *ld_info = NULL;
1470 	int error;
1471 
1472 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1473 
1474 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1475 	    (void **)&ld_info, sizeof(*ld_info));
1476 	if (error) {
1477 		device_printf(sc->mfi_dev,
1478 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1479 		if (ld_info)
1480 			free(ld_info, M_MFIBUF);
1481 		return (error);
1482 	}
1483 	cm->cm_flags = MFI_CMD_DATAIN;
1484 	dcmd = &cm->cm_frame->dcmd;
1485 	dcmd->mbox[0] = id;
1486 	if (mfi_wait_command(sc, cm) != 0) {
1487 		device_printf(sc->mfi_dev,
1488 		    "Failed to get logical drive: %d\n", id);
1489 		free(ld_info, M_MFIBUF);
1490 		return (0);
1491 	}
1492 
1493 	mfi_add_ld_complete(cm);
1494 	return (0);
1495 }
1496 
1497 static void
1498 mfi_add_ld_complete(struct mfi_command *cm)
1499 {
1500 	struct mfi_frame_header *hdr;
1501 	struct mfi_ld_info *ld_info;
1502 	struct mfi_softc *sc;
1503 	device_t child;
1504 
1505 	sc = cm->cm_sc;
1506 	hdr = &cm->cm_frame->header;
1507 	ld_info = cm->cm_private;
1508 
1509 	if (hdr->cmd_status != MFI_STAT_OK) {
1510 		free(ld_info, M_MFIBUF);
1511 		mfi_release_command(cm);
1512 		return;
1513 	}
1514 	mfi_release_command(cm);
1515 
1516 	mtx_unlock(&sc->mfi_io_lock);
1517 	mtx_lock(&Giant);
1518 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1519 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1520 		free(ld_info, M_MFIBUF);
1521 		mtx_unlock(&Giant);
1522 		mtx_lock(&sc->mfi_io_lock);
1523 		return;
1524 	}
1525 
1526 	device_set_ivars(child, ld_info);
1527 	device_set_desc(child, "MFI Logical Disk");
1528 	bus_generic_attach(sc->mfi_dev);
1529 	mtx_unlock(&Giant);
1530 	mtx_lock(&sc->mfi_io_lock);
1531 }
1532 
1533 static struct mfi_command *
1534 mfi_bio_command(struct mfi_softc *sc)
1535 {
1536 	struct mfi_io_frame *io;
1537 	struct mfi_command *cm;
1538 	struct bio *bio;
1539 	int flags, blkcount;
1540 
1541 	if ((cm = mfi_dequeue_free(sc)) == NULL)
1542 		return (NULL);
1543 
1544 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1545 		mfi_release_command(cm);
1546 		return (NULL);
1547 	}
1548 
1549 	io = &cm->cm_frame->io;
1550 	switch (bio->bio_cmd & 0x03) {
1551 	case BIO_READ:
1552 		io->header.cmd = MFI_CMD_LD_READ;
1553 		flags = MFI_CMD_DATAIN;
1554 		break;
1555 	case BIO_WRITE:
1556 		io->header.cmd = MFI_CMD_LD_WRITE;
1557 		flags = MFI_CMD_DATAOUT;
1558 		break;
1559 	default:
1560 		panic("Invalid bio command");
1561 	}
1562 
1563 	/* Cheat with the sector length to avoid a non-constant division */
1564 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1565 	io->header.target_id = (uintptr_t)bio->bio_driver1;
1566 	io->header.timeout = 0;
1567 	io->header.flags = 0;
1568 	io->header.sense_len = MFI_SENSE_LEN;
1569 	io->header.data_len = blkcount;
1570 	io->sense_addr_lo = cm->cm_sense_busaddr;
1571 	io->sense_addr_hi = 0;
1572 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
1573 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
1574 	cm->cm_complete = mfi_bio_complete;
1575 	cm->cm_private = bio;
1576 	cm->cm_data = bio->bio_data;
1577 	cm->cm_len = bio->bio_bcount;
1578 	cm->cm_sg = &io->sgl;
1579 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1580 	cm->cm_flags = flags;
1581 	return (cm);
1582 }
1583 
1584 static void
1585 mfi_bio_complete(struct mfi_command *cm)
1586 {
1587 	struct bio *bio;
1588 	struct mfi_frame_header *hdr;
1589 	struct mfi_softc *sc;
1590 
1591 	bio = cm->cm_private;
1592 	hdr = &cm->cm_frame->header;
1593 	sc = cm->cm_sc;
1594 
1595 	if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) {
1596 		bio->bio_flags |= BIO_ERROR;
1597 		bio->bio_error = EIO;
1598 		device_printf(sc->mfi_dev, "I/O error, status= %d "
1599 		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1600 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
1601 	}
1602 
1603 	mfi_release_command(cm);
1604 	mfi_disk_complete(bio);
1605 }
1606 
1607 void
1608 mfi_startio(struct mfi_softc *sc)
1609 {
1610 	struct mfi_command *cm;
1611 	struct ccb_hdr *ccbh;
1612 
1613 	for (;;) {
1614 		/* Don't bother if we're short on resources */
1615 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1616 			break;
1617 
1618 		/* Try a command that has already been prepared */
1619 		cm = mfi_dequeue_ready(sc);
1620 
1621 		if (cm == NULL) {
1622 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
1623 				cm = sc->mfi_cam_start(ccbh);
1624 		}
1625 
1626 		/* Nope, so look for work on the bioq */
1627 		if (cm == NULL)
1628 			cm = mfi_bio_command(sc);
1629 
1630 		/* No work available, so exit */
1631 		if (cm == NULL)
1632 			break;
1633 
1634 		/* Send the command to the controller */
1635 		if (mfi_mapcmd(sc, cm) != 0) {
1636 			mfi_requeue_ready(cm);
1637 			break;
1638 		}
1639 	}
1640 }
1641 
1642 static int
1643 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1644 {
1645 	int error, polled;
1646 
1647 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1648 
1649 	if (cm->cm_data != NULL) {
1650 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1651 		error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1652 		    cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1653 		if (error == EINPROGRESS) {
1654 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
1655 			return (0);
1656 		}
1657 	} else {
1658 		error = mfi_send_frame(sc, cm);
1659 	}
1660 
1661 	return (error);
1662 }
1663 
1664 static void
1665 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1666 {
1667 	struct mfi_frame_header *hdr;
1668 	struct mfi_command *cm;
1669 	union mfi_sgl *sgl;
1670 	struct mfi_softc *sc;
1671 	int i, dir;
1672 
1673 	cm = (struct mfi_command *)arg;
1674 	sc = cm->cm_sc;
1675 	hdr = &cm->cm_frame->header;
1676 	sgl = cm->cm_sg;
1677 
1678 	if (error) {
1679 		printf("error %d in callback\n", error);
1680 		cm->cm_error = error;
1681 		mfi_complete(sc, cm);
1682 		return;
1683 	}
1684 
1685 	if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1686 		for (i = 0; i < nsegs; i++) {
1687 			sgl->sg32[i].addr = segs[i].ds_addr;
1688 			sgl->sg32[i].len = segs[i].ds_len;
1689 		}
1690 	} else {
1691 		for (i = 0; i < nsegs; i++) {
1692 			sgl->sg64[i].addr = segs[i].ds_addr;
1693 			sgl->sg64[i].len = segs[i].ds_len;
1694 		}
1695 		hdr->flags |= MFI_FRAME_SGL64;
1696 	}
1697 	hdr->sg_count = nsegs;
1698 
1699 	dir = 0;
1700 	if (cm->cm_flags & MFI_CMD_DATAIN) {
1701 		dir |= BUS_DMASYNC_PREREAD;
1702 		hdr->flags |= MFI_FRAME_DIR_READ;
1703 	}
1704 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
1705 		dir |= BUS_DMASYNC_PREWRITE;
1706 		hdr->flags |= MFI_FRAME_DIR_WRITE;
1707 	}
1708 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1709 	cm->cm_flags |= MFI_CMD_MAPPED;
1710 
1711 	/*
1712 	 * Instead of calculating the total number of frames in the
1713 	 * compound frame, it's already assumed that there will be at
1714 	 * least 1 frame, so don't compensate for the modulo of the
1715 	 * following division.
1716 	 */
1717 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
1718 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1719 
1720 	mfi_send_frame(sc, cm);
1721 
1722 	return;
1723 }
1724 
1725 static int
1726 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1727 {
1728 	struct mfi_frame_header *hdr;
1729 	int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1730 
1731 	hdr = &cm->cm_frame->header;
1732 
1733 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1734 		cm->cm_timestamp = time_uptime;
1735 		mfi_enqueue_busy(cm);
1736 	} else {
1737 		hdr->cmd_status = 0xff;
1738 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1739 	}
1740 
1741 	/*
1742 	 * The bus address of the command is aligned on a 64 byte boundary,
1743 	 * leaving the least 6 bits as zero.  For whatever reason, the
1744 	 * hardware wants the address shifted right by three, leaving just
1745 	 * 3 zero bits.  These three bits are then used as a prefetching
1746 	 * hint for the hardware to predict how many frames need to be
1747 	 * fetched across the bus.  If a command has more than 8 frames
1748 	 * then the 3 bits are set to 0x7 and the firmware uses other
1749 	 * information in the command to determine the total amount to fetch.
1750 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
1751 	 * is enough for both 32bit and 64bit systems.
1752 	 */
1753 	if (cm->cm_extra_frames > 7)
1754 		cm->cm_extra_frames = 7;
1755 
1756 	sc->mfi_issue_cmd(sc,cm->cm_frame_busaddr,cm->cm_extra_frames);
1757 
1758 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1759 		return (0);
1760 
1761 	/* This is a polled command, so busy-wait for it to complete. */
1762 	while (hdr->cmd_status == 0xff) {
1763 		DELAY(1000);
1764 		tm -= 1;
1765 		if (tm <= 0)
1766 			break;
1767 	}
1768 
1769 	if (hdr->cmd_status == 0xff) {
1770 		device_printf(sc->mfi_dev, "Frame %p timed out "
1771 			      "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1772 		return (ETIMEDOUT);
1773 	}
1774 
1775 	return (0);
1776 }
1777 
1778 static void
1779 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
1780 {
1781 	int dir;
1782 
1783 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
1784 		dir = 0;
1785 		if (cm->cm_flags & MFI_CMD_DATAIN)
1786 			dir |= BUS_DMASYNC_POSTREAD;
1787 		if (cm->cm_flags & MFI_CMD_DATAOUT)
1788 			dir |= BUS_DMASYNC_POSTWRITE;
1789 
1790 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1791 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1792 		cm->cm_flags &= ~MFI_CMD_MAPPED;
1793 	}
1794 
1795 	cm->cm_flags |= MFI_CMD_COMPLETED;
1796 
1797 	if (cm->cm_complete != NULL)
1798 		cm->cm_complete(cm);
1799 	else
1800 		wakeup(cm);
1801 }
1802 
1803 static int
1804 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
1805 {
1806 	struct mfi_command *cm;
1807 	struct mfi_abort_frame *abort;
1808 	int i = 0;
1809 
1810 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1811 
1812 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
1813 		return (EBUSY);
1814 	}
1815 
1816 	abort = &cm->cm_frame->abort;
1817 	abort->header.cmd = MFI_CMD_ABORT;
1818 	abort->header.flags = 0;
1819 	abort->abort_context = cm_abort->cm_frame->header.context;
1820 	abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
1821 	abort->abort_mfi_addr_hi = 0;
1822 	cm->cm_data = NULL;
1823 	cm->cm_flags = MFI_CMD_POLLED;
1824 
1825 	sc->mfi_aen_cm->cm_aen_abort = 1;
1826 	mfi_mapcmd(sc, cm);
1827 	mfi_release_command(cm);
1828 
1829 	while (i < 5 && sc->mfi_aen_cm != NULL) {
1830 		msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz);
1831 		i++;
1832 	}
1833 
1834 	return (0);
1835 }
1836 
1837 int
1838 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
1839 {
1840 	struct mfi_command *cm;
1841 	struct mfi_io_frame *io;
1842 	int error;
1843 
1844 	if ((cm = mfi_dequeue_free(sc)) == NULL)
1845 		return (EBUSY);
1846 
1847 	io = &cm->cm_frame->io;
1848 	io->header.cmd = MFI_CMD_LD_WRITE;
1849 	io->header.target_id = id;
1850 	io->header.timeout = 0;
1851 	io->header.flags = 0;
1852 	io->header.sense_len = MFI_SENSE_LEN;
1853 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1854 	io->sense_addr_lo = cm->cm_sense_busaddr;
1855 	io->sense_addr_hi = 0;
1856 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
1857 	io->lba_lo = lba & 0xffffffff;
1858 	cm->cm_data = virt;
1859 	cm->cm_len = len;
1860 	cm->cm_sg = &io->sgl;
1861 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1862 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
1863 
1864 	error = mfi_mapcmd(sc, cm);
1865 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1866 	    BUS_DMASYNC_POSTWRITE);
1867 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1868 	mfi_release_command(cm);
1869 
1870 	return (error);
1871 }
1872 
1873 static int
1874 mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1875 {
1876 	struct mfi_softc *sc;
1877 	int error;
1878 
1879 	sc = dev->si_drv1;
1880 
1881 	mtx_lock(&sc->mfi_io_lock);
1882 	if (sc->mfi_detaching)
1883 		error = ENXIO;
1884 	else {
1885 		sc->mfi_flags |= MFI_FLAGS_OPEN;
1886 		error = 0;
1887 	}
1888 	mtx_unlock(&sc->mfi_io_lock);
1889 
1890 	return (error);
1891 }
1892 
1893 static int
1894 mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1895 {
1896 	struct mfi_softc *sc;
1897 	struct mfi_aen *mfi_aen_entry, *tmp;
1898 
1899 	sc = dev->si_drv1;
1900 
1901 	mtx_lock(&sc->mfi_io_lock);
1902 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
1903 
1904 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1905 		if (mfi_aen_entry->p == curproc) {
1906 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1907 			    aen_link);
1908 			free(mfi_aen_entry, M_MFIBUF);
1909 		}
1910 	}
1911 	mtx_unlock(&sc->mfi_io_lock);
1912 	return (0);
1913 }
1914 
1915 static int
1916 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
1917 {
1918 
1919 	switch (opcode) {
1920 	case MFI_DCMD_LD_DELETE:
1921 	case MFI_DCMD_CFG_ADD:
1922 	case MFI_DCMD_CFG_CLEAR:
1923 		sx_xlock(&sc->mfi_config_lock);
1924 		return (1);
1925 	default:
1926 		return (0);
1927 	}
1928 }
1929 
1930 static void
1931 mfi_config_unlock(struct mfi_softc *sc, int locked)
1932 {
1933 
1934 	if (locked)
1935 		sx_xunlock(&sc->mfi_config_lock);
1936 }
1937 
1938 /* Perform pre-issue checks on commands from userland and possibly veto them. */
1939 static int
1940 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
1941 {
1942 	struct mfi_disk *ld, *ld2;
1943 	int error;
1944 
1945 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1946 	error = 0;
1947 	switch (cm->cm_frame->dcmd.opcode) {
1948 	case MFI_DCMD_LD_DELETE:
1949 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1950 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
1951 				break;
1952 		}
1953 		if (ld == NULL)
1954 			error = ENOENT;
1955 		else
1956 			error = mfi_disk_disable(ld);
1957 		break;
1958 	case MFI_DCMD_CFG_CLEAR:
1959 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1960 			error = mfi_disk_disable(ld);
1961 			if (error)
1962 				break;
1963 		}
1964 		if (error) {
1965 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
1966 				if (ld2 == ld)
1967 					break;
1968 				mfi_disk_enable(ld2);
1969 			}
1970 		}
1971 		break;
1972 	default:
1973 		break;
1974 	}
1975 	return (error);
1976 }
1977 
1978 /* Perform post-issue checks on commands from userland. */
1979 static void
1980 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
1981 {
1982 	struct mfi_disk *ld, *ldn;
1983 
1984 	switch (cm->cm_frame->dcmd.opcode) {
1985 	case MFI_DCMD_LD_DELETE:
1986 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1987 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
1988 				break;
1989 		}
1990 		KASSERT(ld != NULL, ("volume dissappeared"));
1991 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
1992 			mtx_unlock(&sc->mfi_io_lock);
1993 			mtx_lock(&Giant);
1994 			device_delete_child(sc->mfi_dev, ld->ld_dev);
1995 			mtx_unlock(&Giant);
1996 			mtx_lock(&sc->mfi_io_lock);
1997 		} else
1998 			mfi_disk_enable(ld);
1999 		break;
2000 	case MFI_DCMD_CFG_CLEAR:
2001 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2002 			mtx_unlock(&sc->mfi_io_lock);
2003 			mtx_lock(&Giant);
2004 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2005 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2006 			}
2007 			mtx_unlock(&Giant);
2008 			mtx_lock(&sc->mfi_io_lock);
2009 		} else {
2010 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2011 				mfi_disk_enable(ld);
2012 		}
2013 		break;
2014 	case MFI_DCMD_CFG_ADD:
2015 		mfi_ldprobe(sc);
2016 		break;
2017 	}
2018 }
2019 
2020 static int
2021 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
2022 {
2023 	struct mfi_softc *sc;
2024 	union mfi_statrequest *ms;
2025 	struct mfi_ioc_packet *ioc;
2026 	struct mfi_ioc_aen *aen;
2027 	struct mfi_command *cm = NULL;
2028 	uint32_t context;
2029 	uint8_t *sense_ptr;
2030 	uint8_t *data = NULL, *temp;
2031 	int i;
2032 	int error, locked;
2033 
2034 	sc = dev->si_drv1;
2035 	error = 0;
2036 
2037 	switch (cmd) {
2038 	case MFIIO_STATS:
2039 		ms = (union mfi_statrequest *)arg;
2040 		switch (ms->ms_item) {
2041 		case MFIQ_FREE:
2042 		case MFIQ_BIO:
2043 		case MFIQ_READY:
2044 		case MFIQ_BUSY:
2045 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2046 			    sizeof(struct mfi_qstat));
2047 			break;
2048 		default:
2049 			error = ENOIOCTL;
2050 			break;
2051 		}
2052 		break;
2053 	case MFIIO_QUERY_DISK:
2054 	{
2055 		struct mfi_query_disk *qd;
2056 		struct mfi_disk *ld;
2057 
2058 		qd = (struct mfi_query_disk *)arg;
2059 		mtx_lock(&sc->mfi_io_lock);
2060 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2061 			if (ld->ld_id == qd->array_id)
2062 				break;
2063 		}
2064 		if (ld == NULL) {
2065 			qd->present = 0;
2066 			mtx_unlock(&sc->mfi_io_lock);
2067 			return (0);
2068 		}
2069 		qd->present = 1;
2070 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2071 			qd->open = 1;
2072 		bzero(qd->devname, SPECNAMELEN + 1);
2073 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2074 		mtx_unlock(&sc->mfi_io_lock);
2075 		break;
2076 	}
2077 	case MFI_CMD:
2078 		ioc = (struct mfi_ioc_packet *)arg;
2079 
2080 		mtx_lock(&sc->mfi_io_lock);
2081 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
2082 			mtx_unlock(&sc->mfi_io_lock);
2083 			return (EBUSY);
2084 		}
2085 		mtx_unlock(&sc->mfi_io_lock);
2086 		locked = 0;
2087 
2088 		/*
2089 		 * save off original context since copying from user
2090 		 * will clobber some data
2091 		 */
2092 		context = cm->cm_frame->header.context;
2093 
2094 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2095 		      ioc->mfi_sgl_off); /* Linux can do 2 frames ? */
2096 		cm->cm_total_frame_size = ioc->mfi_sgl_off;
2097 		cm->cm_sg =
2098 		    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
2099 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2100 		cm->cm_len = cm->cm_frame->header.data_len;
2101 		cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
2102 					    M_WAITOK | M_ZERO);
2103 		if (cm->cm_data == NULL) {
2104 			device_printf(sc->mfi_dev, "Malloc failed\n");
2105 			goto out;
2106 		}
2107 
2108 		/* restore header context */
2109 		cm->cm_frame->header.context = context;
2110 
2111 		temp = data;
2112 		for (i = 0; i < ioc->mfi_sge_count; i++) {
2113 			error = copyin(ioc->mfi_sgl[i].iov_base,
2114 			       temp,
2115 			       ioc->mfi_sgl[i].iov_len);
2116 			if (error != 0) {
2117 				device_printf(sc->mfi_dev,
2118 				    "Copy in failed\n");
2119 				goto out;
2120 			}
2121 			temp = &temp[ioc->mfi_sgl[i].iov_len];
2122 		}
2123 
2124 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2125 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2126 
2127 		mtx_lock(&sc->mfi_io_lock);
2128 		error = mfi_check_command_pre(sc, cm);
2129 		if (error) {
2130 			mtx_unlock(&sc->mfi_io_lock);
2131 			goto out;
2132 		}
2133 
2134 		if ((error = mfi_wait_command(sc, cm)) != 0) {
2135 			device_printf(sc->mfi_dev,
2136 			    "Controller polled failed\n");
2137 			mtx_unlock(&sc->mfi_io_lock);
2138 			goto out;
2139 		}
2140 
2141 		mfi_check_command_post(sc, cm);
2142 		mtx_unlock(&sc->mfi_io_lock);
2143 
2144 		temp = data;
2145 		for (i = 0; i < ioc->mfi_sge_count; i++) {
2146 			error = copyout(temp,
2147 				ioc->mfi_sgl[i].iov_base,
2148 				ioc->mfi_sgl[i].iov_len);
2149 			if (error != 0) {
2150 				device_printf(sc->mfi_dev,
2151 				    "Copy out failed\n");
2152 				goto out;
2153 			}
2154 			temp = &temp[ioc->mfi_sgl[i].iov_len];
2155 		}
2156 
2157 		if (ioc->mfi_sense_len) {
2158 			/* copy out sense */
2159 			sense_ptr = &((struct mfi_ioc_packet*)arg)
2160 			    ->mfi_frame.raw[0];
2161 			error = copyout(cm->cm_sense, sense_ptr,
2162 			    ioc->mfi_sense_len);
2163 			if (error != 0) {
2164 				device_printf(sc->mfi_dev,
2165 				    "Copy out failed\n");
2166 				goto out;
2167 			}
2168 		}
2169 
2170 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
2171 out:
2172 		mfi_config_unlock(sc, locked);
2173 		if (data)
2174 			free(data, M_MFIBUF);
2175 		if (cm) {
2176 			mtx_lock(&sc->mfi_io_lock);
2177 			mfi_release_command(cm);
2178 			mtx_unlock(&sc->mfi_io_lock);
2179 		}
2180 
2181 		break;
2182 	case MFI_SET_AEN:
2183 		aen = (struct mfi_ioc_aen *)arg;
2184 		error = mfi_aen_register(sc, aen->aen_seq_num,
2185 		    aen->aen_class_locale);
2186 
2187 		break;
2188 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2189 		{
2190 			devclass_t devclass;
2191 			struct mfi_linux_ioc_packet l_ioc;
2192 			int adapter;
2193 
2194 			devclass = devclass_find("mfi");
2195 			if (devclass == NULL)
2196 				return (ENOENT);
2197 
2198 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
2199 			if (error)
2200 				return (error);
2201 			adapter = l_ioc.lioc_adapter_no;
2202 			sc = devclass_get_softc(devclass, adapter);
2203 			if (sc == NULL)
2204 				return (ENOENT);
2205 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
2206 			    cmd, arg, flag, td));
2207 			break;
2208 		}
2209 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2210 		{
2211 			devclass_t devclass;
2212 			struct mfi_linux_ioc_aen l_aen;
2213 			int adapter;
2214 
2215 			devclass = devclass_find("mfi");
2216 			if (devclass == NULL)
2217 				return (ENOENT);
2218 
2219 			error = copyin(arg, &l_aen, sizeof(l_aen));
2220 			if (error)
2221 				return (error);
2222 			adapter = l_aen.laen_adapter_no;
2223 			sc = devclass_get_softc(devclass, adapter);
2224 			if (sc == NULL)
2225 				return (ENOENT);
2226 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
2227 			    cmd, arg, flag, td));
2228 			break;
2229 		}
2230 	default:
2231 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2232 		error = ENOENT;
2233 		break;
2234 	}
2235 
2236 	return (error);
2237 }
2238 
2239 static int
2240 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
2241 {
2242 	struct mfi_softc *sc;
2243 	struct mfi_linux_ioc_packet l_ioc;
2244 	struct mfi_linux_ioc_aen l_aen;
2245 	struct mfi_command *cm = NULL;
2246 	struct mfi_aen *mfi_aen_entry;
2247 	uint8_t *sense_ptr;
2248 	uint32_t context;
2249 	uint8_t *data = NULL, *temp;
2250 	void *temp_convert;
2251 	int i;
2252 	int error, locked;
2253 
2254 	sc = dev->si_drv1;
2255 	error = 0;
2256 	switch (cmd) {
2257 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2258 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
2259 		if (error != 0)
2260 			return (error);
2261 
2262 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
2263 			return (EINVAL);
2264 		}
2265 
2266 		mtx_lock(&sc->mfi_io_lock);
2267 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
2268 			mtx_unlock(&sc->mfi_io_lock);
2269 			return (EBUSY);
2270 		}
2271 		mtx_unlock(&sc->mfi_io_lock);
2272 		locked = 0;
2273 
2274 		/*
2275 		 * save off original context since copying from user
2276 		 * will clobber some data
2277 		 */
2278 		context = cm->cm_frame->header.context;
2279 
2280 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
2281 		      l_ioc.lioc_sgl_off); /* Linux can do 2 frames ? */
2282 		cm->cm_total_frame_size = l_ioc.lioc_sgl_off;
2283 		cm->cm_sg =
2284 		    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
2285 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2286 		cm->cm_len = cm->cm_frame->header.data_len;
2287 		cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
2288 					    M_WAITOK | M_ZERO);
2289 
2290 		/* restore header context */
2291 		cm->cm_frame->header.context = context;
2292 
2293 		temp = data;
2294 		for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2295 			temp_convert =
2296 			    (void *)(uintptr_t)l_ioc.lioc_sgl[i].iov_base;
2297 			error = copyin(temp_convert,
2298 			       temp,
2299 			       l_ioc.lioc_sgl[i].iov_len);
2300 			if (error != 0) {
2301 				device_printf(sc->mfi_dev,
2302 				    "Copy in failed\n");
2303 				goto out;
2304 			}
2305 			temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2306 		}
2307 
2308 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2309 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2310 
2311 		mtx_lock(&sc->mfi_io_lock);
2312 		error = mfi_check_command_pre(sc, cm);
2313 		if (error) {
2314 			mtx_unlock(&sc->mfi_io_lock);
2315 			goto out;
2316 		}
2317 
2318 		if ((error = mfi_wait_command(sc, cm)) != 0) {
2319 			device_printf(sc->mfi_dev,
2320 			    "Controller polled failed\n");
2321 			mtx_unlock(&sc->mfi_io_lock);
2322 			goto out;
2323 		}
2324 
2325 		mfi_check_command_post(sc, cm);
2326 		mtx_unlock(&sc->mfi_io_lock);
2327 
2328 		temp = data;
2329 		for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2330 			temp_convert =
2331 			    (void *)(uintptr_t)l_ioc.lioc_sgl[i].iov_base;
2332 			error = copyout(temp,
2333 				temp_convert,
2334 				l_ioc.lioc_sgl[i].iov_len);
2335 			if (error != 0) {
2336 				device_printf(sc->mfi_dev,
2337 				    "Copy out failed\n");
2338 				goto out;
2339 			}
2340 			temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2341 		}
2342 
2343 		if (l_ioc.lioc_sense_len) {
2344 			/* copy out sense */
2345 			sense_ptr = &((struct mfi_linux_ioc_packet*)arg)
2346 			    ->lioc_frame.raw[0];
2347 			error = copyout(cm->cm_sense, sense_ptr,
2348 			    l_ioc.lioc_sense_len);
2349 			if (error != 0) {
2350 				device_printf(sc->mfi_dev,
2351 				    "Copy out failed\n");
2352 				goto out;
2353 			}
2354 		}
2355 
2356 		error = copyout(&cm->cm_frame->header.cmd_status,
2357 			&((struct mfi_linux_ioc_packet*)arg)
2358 			->lioc_frame.hdr.cmd_status,
2359 			1);
2360 		if (error != 0) {
2361 			device_printf(sc->mfi_dev,
2362 				      "Copy out failed\n");
2363 			goto out;
2364 		}
2365 
2366 out:
2367 		mfi_config_unlock(sc, locked);
2368 		if (data)
2369 			free(data, M_MFIBUF);
2370 		if (cm) {
2371 			mtx_lock(&sc->mfi_io_lock);
2372 			mfi_release_command(cm);
2373 			mtx_unlock(&sc->mfi_io_lock);
2374 		}
2375 
2376 		return (error);
2377 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2378 		error = copyin(arg, &l_aen, sizeof(l_aen));
2379 		if (error != 0)
2380 			return (error);
2381 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
2382 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
2383 		    M_WAITOK);
2384 		mtx_lock(&sc->mfi_io_lock);
2385 		if (mfi_aen_entry != NULL) {
2386 			mfi_aen_entry->p = curproc;
2387 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
2388 			    aen_link);
2389 		}
2390 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
2391 		    l_aen.laen_class_locale);
2392 
2393 		if (error != 0) {
2394 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2395 			    aen_link);
2396 			free(mfi_aen_entry, M_MFIBUF);
2397 		}
2398 		mtx_unlock(&sc->mfi_io_lock);
2399 
2400 		return (error);
2401 	default:
2402 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2403 		error = ENOENT;
2404 		break;
2405 	}
2406 
2407 	return (error);
2408 }
2409 
2410 static int
2411 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
2412 {
2413 	struct mfi_softc *sc;
2414 	int revents = 0;
2415 
2416 	sc = dev->si_drv1;
2417 
2418 	if (poll_events & (POLLIN | POLLRDNORM)) {
2419 		if (sc->mfi_aen_triggered != 0) {
2420 			revents |= poll_events & (POLLIN | POLLRDNORM);
2421 			sc->mfi_aen_triggered = 0;
2422 		}
2423 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
2424 			revents |= POLLERR;
2425 		}
2426 	}
2427 
2428 	if (revents == 0) {
2429 		if (poll_events & (POLLIN | POLLRDNORM)) {
2430 			sc->mfi_poll_waiting = 1;
2431 			selrecord(td, &sc->mfi_select);
2432 		}
2433 	}
2434 
2435 	return revents;
2436 }
2437 
2438 
2439 static void
2440 mfi_dump_all(void)
2441 {
2442 	struct mfi_softc *sc;
2443 	struct mfi_command *cm;
2444 	devclass_t dc;
2445 	time_t deadline;
2446 	int timedout;
2447 	int i;
2448 
2449 	dc = devclass_find("mfi");
2450 	if (dc == NULL) {
2451 		printf("No mfi dev class\n");
2452 		return;
2453 	}
2454 
2455 	for (i = 0; ; i++) {
2456 		sc = devclass_get_softc(dc, i);
2457 		if (sc == NULL)
2458 			break;
2459 		device_printf(sc->mfi_dev, "Dumping\n\n");
2460 		timedout = 0;
2461 		deadline = time_uptime - MFI_CMD_TIMEOUT;
2462 		mtx_lock(&sc->mfi_io_lock);
2463 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
2464 			if (cm->cm_timestamp < deadline) {
2465 				device_printf(sc->mfi_dev,
2466 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
2467 				    (int)(time_uptime - cm->cm_timestamp));
2468 				MFI_PRINT_CMD(cm);
2469 				timedout++;
2470 			}
2471 		}
2472 
2473 #if 0
2474 		if (timedout)
2475 			MFI_DUMP_CMDS(SC);
2476 #endif
2477 
2478 		mtx_unlock(&sc->mfi_io_lock);
2479 	}
2480 
2481 	return;
2482 }
2483 
2484 static void
2485 mfi_timeout(void *data)
2486 {
2487 	struct mfi_softc *sc = (struct mfi_softc *)data;
2488 	struct mfi_command *cm;
2489 	time_t deadline;
2490 	int timedout = 0;
2491 
2492 	deadline = time_uptime - MFI_CMD_TIMEOUT;
2493 	mtx_lock(&sc->mfi_io_lock);
2494 	TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
2495 		if (sc->mfi_aen_cm == cm)
2496 			continue;
2497 		if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
2498 			device_printf(sc->mfi_dev,
2499 			    "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
2500 			    (int)(time_uptime - cm->cm_timestamp));
2501 			MFI_PRINT_CMD(cm);
2502 			MFI_VALIDATE_CMD(sc, cm);
2503 			timedout++;
2504 		}
2505 	}
2506 
2507 #if 0
2508 	if (timedout)
2509 		MFI_DUMP_CMDS(SC);
2510 #endif
2511 
2512 	mtx_unlock(&sc->mfi_io_lock);
2513 
2514 	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
2515 	    mfi_timeout, sc);
2516 
2517 	if (0)
2518 		mfi_dump_all();
2519 	return;
2520 }
2521