xref: /freebsd/sys/dev/mfi/mfi.c (revision a3cbca537ef1d8ac03a693cd51d98fb8087acc8d)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2007 LSI Corp.
28  * Copyright (c) 2007 Rajesh Prabhakaran.
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52 
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
55 
56 #include "opt_compat.h"
57 #include "opt_mfi.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
64 #include <sys/poll.h>
65 #include <sys/selinfo.h>
66 #include <sys/bus.h>
67 #include <sys/conf.h>
68 #include <sys/eventhandler.h>
69 #include <sys/rman.h>
70 #include <sys/bus_dma.h>
71 #include <sys/bio.h>
72 #include <sys/ioccom.h>
73 #include <sys/uio.h>
74 #include <sys/proc.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
78 
79 #include <machine/bus.h>
80 #include <machine/resource.h>
81 
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
87 
88 static int	mfi_alloc_commands(struct mfi_softc *);
89 static int	mfi_comms_init(struct mfi_softc *);
90 static int	mfi_get_controller_info(struct mfi_softc *);
91 static int	mfi_get_log_state(struct mfi_softc *,
92 		    struct mfi_evt_log_state **);
93 static int	mfi_parse_entries(struct mfi_softc *, int, int);
94 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void	mfi_startup(void *arg);
96 static void	mfi_intr(void *arg);
97 static void	mfi_ldprobe(struct mfi_softc *sc);
98 static void	mfi_syspdprobe(struct mfi_softc *sc);
99 static void	mfi_handle_evt(void *context, int pending);
100 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void	mfi_aen_complete(struct mfi_command *);
102 static int	mfi_add_ld(struct mfi_softc *sc, int);
103 static void	mfi_add_ld_complete(struct mfi_command *);
104 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void	mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void	mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int	mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void	mfi_timeout(void *);
115 static int	mfi_user_command(struct mfi_softc *,
116 		    struct mfi_ioc_passthru *);
117 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
124 		    uint32_t frame_cnt);
125 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
126 		    uint32_t frame_cnt);
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
132 
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
136            0, "event message locale");
137 
138 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
139 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
140            0, "event message class");
141 
142 static int	mfi_max_cmds = 128;
143 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
144 	   0, "Max commands limit (-1 = controller limit)");
145 
146 static int	mfi_detect_jbod_change = 1;
147 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
148 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
149 
150 int		mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
151 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
152 	   &mfi_polled_cmd_timeout, 0,
153 	   "Polled command timeout - used for firmware flash etc (in seconds)");
154 
155 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
156 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
157 	   0, "Command timeout (in seconds)");
158 
159 /* Management interface */
160 static d_open_t		mfi_open;
161 static d_close_t	mfi_close;
162 static d_ioctl_t	mfi_ioctl;
163 static d_poll_t		mfi_poll;
164 
165 static struct cdevsw mfi_cdevsw = {
166 	.d_version = 	D_VERSION,
167 	.d_flags =	0,
168 	.d_open = 	mfi_open,
169 	.d_close =	mfi_close,
170 	.d_ioctl =	mfi_ioctl,
171 	.d_poll =	mfi_poll,
172 	.d_name =	"mfi",
173 };
174 
175 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
176 
177 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
178 struct mfi_skinny_dma_info mfi_skinny;
179 
180 static void
181 mfi_enable_intr_xscale(struct mfi_softc *sc)
182 {
183 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
184 }
185 
186 static void
187 mfi_enable_intr_ppc(struct mfi_softc *sc)
188 {
189 	if (sc->mfi_flags & MFI_FLAGS_1078) {
190 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
191 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
192 	}
193 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
194 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
195 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
196 	}
197 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
198 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
199 	}
200 }
201 
202 static int32_t
203 mfi_read_fw_status_xscale(struct mfi_softc *sc)
204 {
205 	return MFI_READ4(sc, MFI_OMSG0);
206 }
207 
208 static int32_t
209 mfi_read_fw_status_ppc(struct mfi_softc *sc)
210 {
211 	return MFI_READ4(sc, MFI_OSP0);
212 }
213 
214 static int
215 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
216 {
217 	int32_t status;
218 
219 	status = MFI_READ4(sc, MFI_OSTS);
220 	if ((status & MFI_OSTS_INTR_VALID) == 0)
221 		return 1;
222 
223 	MFI_WRITE4(sc, MFI_OSTS, status);
224 	return 0;
225 }
226 
227 static int
228 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
229 {
230 	int32_t status;
231 
232 	status = MFI_READ4(sc, MFI_OSTS);
233 	if (sc->mfi_flags & MFI_FLAGS_1078) {
234 		if (!(status & MFI_1078_RM)) {
235 			return 1;
236 		}
237 	}
238 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
239 		if (!(status & MFI_GEN2_RM)) {
240 			return 1;
241 		}
242 	}
243 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
244 		if (!(status & MFI_SKINNY_RM)) {
245 			return 1;
246 		}
247 	}
248 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
249 		MFI_WRITE4(sc, MFI_OSTS, status);
250 	else
251 		MFI_WRITE4(sc, MFI_ODCR0, status);
252 	return 0;
253 }
254 
255 static void
256 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
257 {
258 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
259 }
260 
261 static void
262 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
263 {
264 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
265 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
266 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
267 	} else {
268 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
269 	}
270 }
271 
272 int
273 mfi_transition_firmware(struct mfi_softc *sc)
274 {
275 	uint32_t fw_state, cur_state;
276 	int max_wait, i;
277 	uint32_t cur_abs_reg_val = 0;
278 	uint32_t prev_abs_reg_val = 0;
279 
280 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
281 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
282 	while (fw_state != MFI_FWSTATE_READY) {
283 		if (bootverbose)
284 			device_printf(sc->mfi_dev, "Waiting for firmware to "
285 			"become ready\n");
286 		cur_state = fw_state;
287 		switch (fw_state) {
288 		case MFI_FWSTATE_FAULT:
289 			device_printf(sc->mfi_dev, "Firmware fault\n");
290 			return (ENXIO);
291 		case MFI_FWSTATE_WAIT_HANDSHAKE:
292 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
294 			else
295 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
296 			max_wait = MFI_RESET_WAIT_TIME;
297 			break;
298 		case MFI_FWSTATE_OPERATIONAL:
299 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
300 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
301 			else
302 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
303 			max_wait = MFI_RESET_WAIT_TIME;
304 			break;
305 		case MFI_FWSTATE_UNDEFINED:
306 		case MFI_FWSTATE_BB_INIT:
307 			max_wait = MFI_RESET_WAIT_TIME;
308 			break;
309 		case MFI_FWSTATE_FW_INIT_2:
310 			max_wait = MFI_RESET_WAIT_TIME;
311 			break;
312 		case MFI_FWSTATE_FW_INIT:
313 		case MFI_FWSTATE_FLUSH_CACHE:
314 			max_wait = MFI_RESET_WAIT_TIME;
315 			break;
316 		case MFI_FWSTATE_DEVICE_SCAN:
317 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
318 			prev_abs_reg_val = cur_abs_reg_val;
319 			break;
320 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
321 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
322 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
323 			else
324 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
325 			max_wait = MFI_RESET_WAIT_TIME;
326 			break;
327 		default:
328 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
329 			    fw_state);
330 			return (ENXIO);
331 		}
332 		for (i = 0; i < (max_wait * 10); i++) {
333 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
334 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
335 			if (fw_state == cur_state)
336 				DELAY(100000);
337 			else
338 				break;
339 		}
340 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
341 			/* Check the device scanning progress */
342 			if (prev_abs_reg_val != cur_abs_reg_val) {
343 				continue;
344 			}
345 		}
346 		if (fw_state == cur_state) {
347 			device_printf(sc->mfi_dev, "Firmware stuck in state "
348 			    "%#x\n", fw_state);
349 			return (ENXIO);
350 		}
351 	}
352 	return (0);
353 }
354 
355 static void
356 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
357 {
358 	bus_addr_t *addr;
359 
360 	addr = arg;
361 	*addr = segs[0].ds_addr;
362 }
363 
364 
365 int
366 mfi_attach(struct mfi_softc *sc)
367 {
368 	uint32_t status;
369 	int error, commsz, framessz, sensesz;
370 	int frames, unit, max_fw_sge, max_fw_cmds;
371 	uint32_t tb_mem_size = 0;
372 
373 	if (sc == NULL)
374 		return EINVAL;
375 
376 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
377 	    MEGASAS_VERSION);
378 
379 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
380 	sx_init(&sc->mfi_config_lock, "MFI config");
381 	TAILQ_INIT(&sc->mfi_ld_tqh);
382 	TAILQ_INIT(&sc->mfi_syspd_tqh);
383 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
384 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
385 	TAILQ_INIT(&sc->mfi_evt_queue);
386 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
387 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
388 	TAILQ_INIT(&sc->mfi_aen_pids);
389 	TAILQ_INIT(&sc->mfi_cam_ccbq);
390 
391 	mfi_initq_free(sc);
392 	mfi_initq_ready(sc);
393 	mfi_initq_busy(sc);
394 	mfi_initq_bio(sc);
395 
396 	sc->adpreset = 0;
397 	sc->last_seq_num = 0;
398 	sc->disableOnlineCtrlReset = 1;
399 	sc->issuepend_done = 1;
400 	sc->hw_crit_error = 0;
401 
402 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
403 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
404 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
405 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
406 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
407 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
408 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
409 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
410 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
411 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
412 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
413 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
414 		sc->mfi_tbolt = 1;
415 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
416 	} else {
417 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
418 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
419 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
420 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
421 	}
422 
423 
424 	/* Before we get too far, see if the firmware is working */
425 	if ((error = mfi_transition_firmware(sc)) != 0) {
426 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
427 		    "error %d\n", error);
428 		return (ENXIO);
429 	}
430 
431 	/* Start: LSIP200113393 */
432 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
433 				1, 0,			/* algnmnt, boundary */
434 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
435 				BUS_SPACE_MAXADDR,	/* highaddr */
436 				NULL, NULL,		/* filter, filterarg */
437 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
438 				1,			/* msegments */
439 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
440 				0,			/* flags */
441 				NULL, NULL,		/* lockfunc, lockarg */
442 				&sc->verbuf_h_dmat)) {
443 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
444 		return (ENOMEM);
445 	}
446 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
447 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
448 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
449 		return (ENOMEM);
450 	}
451 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
452 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
453 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
454 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
455 	/* End: LSIP200113393 */
456 
457 	/*
458 	 * Get information needed for sizing the contiguous memory for the
459 	 * frame pool.  Size down the sgl parameter since we know that
460 	 * we will never need more than what's required for MAXPHYS.
461 	 * It would be nice if these constants were available at runtime
462 	 * instead of compile time.
463 	 */
464 	status = sc->mfi_read_fw_status(sc);
465 	max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
466 	if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
467 		device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
468 		    max_fw_cmds, mfi_max_cmds);
469 		sc->mfi_max_fw_cmds = mfi_max_cmds;
470 	} else {
471 		sc->mfi_max_fw_cmds = max_fw_cmds;
472 	}
473 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
474 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
475 
476 	/* ThunderBolt Support get the contiguous memory */
477 
478 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
479 		mfi_tbolt_init_globals(sc);
480 		device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
481 		    "MaxSgl = %d, state = %#x\n", max_fw_cmds,
482 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
483 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
484 
485 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
486 				1, 0,			/* algnmnt, boundary */
487 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
488 				BUS_SPACE_MAXADDR,	/* highaddr */
489 				NULL, NULL,		/* filter, filterarg */
490 				tb_mem_size,		/* maxsize */
491 				1,			/* msegments */
492 				tb_mem_size,		/* maxsegsize */
493 				0,			/* flags */
494 				NULL, NULL,		/* lockfunc, lockarg */
495 				&sc->mfi_tb_dmat)) {
496 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
497 			return (ENOMEM);
498 		}
499 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
500 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
501 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
502 			return (ENOMEM);
503 		}
504 		bzero(sc->request_message_pool, tb_mem_size);
505 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
506 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
507 
508 		/* For ThunderBolt memory init */
509 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
510 				0x100, 0,		/* alignmnt, boundary */
511 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
512 				BUS_SPACE_MAXADDR,	/* highaddr */
513 				NULL, NULL,		/* filter, filterarg */
514 				MFI_FRAME_SIZE,		/* maxsize */
515 				1,			/* msegments */
516 				MFI_FRAME_SIZE,		/* maxsegsize */
517 				0,			/* flags */
518 				NULL, NULL,		/* lockfunc, lockarg */
519 				&sc->mfi_tb_init_dmat)) {
520 			device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
521 			return (ENOMEM);
522 		}
523 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
524 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
525 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
526 			return (ENOMEM);
527 		}
528 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
529 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
530 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
531 		    &sc->mfi_tb_init_busaddr, 0);
532 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
533 		    tb_mem_size)) {
534 			device_printf(sc->mfi_dev,
535 			    "Thunderbolt pool preparation error\n");
536 			return 0;
537 		}
538 
539 		/*
540 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
541 		  we are taking it diffrent from what we have allocated for Request
542 		  and reply descriptors to avoid confusion later
543 		*/
544 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
545 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
546 				1, 0,			/* algnmnt, boundary */
547 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
548 				BUS_SPACE_MAXADDR,	/* highaddr */
549 				NULL, NULL,		/* filter, filterarg */
550 				tb_mem_size,		/* maxsize */
551 				1,			/* msegments */
552 				tb_mem_size,		/* maxsegsize */
553 				0,			/* flags */
554 				NULL, NULL,		/* lockfunc, lockarg */
555 				&sc->mfi_tb_ioc_init_dmat)) {
556 			device_printf(sc->mfi_dev,
557 			    "Cannot allocate comms DMA tag\n");
558 			return (ENOMEM);
559 		}
560 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
561 		    (void **)&sc->mfi_tb_ioc_init_desc,
562 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
563 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
564 			return (ENOMEM);
565 		}
566 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
567 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
568 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
569 		    &sc->mfi_tb_ioc_init_busaddr, 0);
570 	}
571 	/*
572 	 * Create the dma tag for data buffers.  Used both for block I/O
573 	 * and for various internal data queries.
574 	 */
575 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
576 				1, 0,			/* algnmnt, boundary */
577 				BUS_SPACE_MAXADDR,	/* lowaddr */
578 				BUS_SPACE_MAXADDR,	/* highaddr */
579 				NULL, NULL,		/* filter, filterarg */
580 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
581 				sc->mfi_max_sge,	/* nsegments */
582 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
583 				BUS_DMA_ALLOCNOW,	/* flags */
584 				busdma_lock_mutex,	/* lockfunc */
585 				&sc->mfi_io_lock,	/* lockfuncarg */
586 				&sc->mfi_buffer_dmat)) {
587 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
588 		return (ENOMEM);
589 	}
590 
591 	/*
592 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
593 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
594 	 * entry, so the calculated size here will be will be 1 more than
595 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
596 	 */
597 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
598 	    sizeof(struct mfi_hwcomms);
599 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
600 				1, 0,			/* algnmnt, boundary */
601 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
602 				BUS_SPACE_MAXADDR,	/* highaddr */
603 				NULL, NULL,		/* filter, filterarg */
604 				commsz,			/* maxsize */
605 				1,			/* msegments */
606 				commsz,			/* maxsegsize */
607 				0,			/* flags */
608 				NULL, NULL,		/* lockfunc, lockarg */
609 				&sc->mfi_comms_dmat)) {
610 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
611 		return (ENOMEM);
612 	}
613 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
614 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
615 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
616 		return (ENOMEM);
617 	}
618 	bzero(sc->mfi_comms, commsz);
619 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
620 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
621 	/*
622 	 * Allocate DMA memory for the command frames.  Keep them in the
623 	 * lower 4GB for efficiency.  Calculate the size of the commands at
624 	 * the same time; each command is one 64 byte frame plus a set of
625          * additional frames for holding sg lists or other data.
626 	 * The assumption here is that the SG list will start at the second
627 	 * frame and not use the unused bytes in the first frame.  While this
628 	 * isn't technically correct, it simplifies the calculation and allows
629 	 * for command frames that might be larger than an mfi_io_frame.
630 	 */
631 	if (sizeof(bus_addr_t) == 8) {
632 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
633 		sc->mfi_flags |= MFI_FLAGS_SG64;
634 	} else {
635 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
636 	}
637 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
638 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
639 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
640 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
641 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
642 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
643 				64, 0,			/* algnmnt, boundary */
644 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
645 				BUS_SPACE_MAXADDR,	/* highaddr */
646 				NULL, NULL,		/* filter, filterarg */
647 				framessz,		/* maxsize */
648 				1,			/* nsegments */
649 				framessz,		/* maxsegsize */
650 				0,			/* flags */
651 				NULL, NULL,		/* lockfunc, lockarg */
652 				&sc->mfi_frames_dmat)) {
653 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
654 		return (ENOMEM);
655 	}
656 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
657 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
658 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
659 		return (ENOMEM);
660 	}
661 	bzero(sc->mfi_frames, framessz);
662 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
663 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
664 	/*
665 	 * Allocate DMA memory for the frame sense data.  Keep them in the
666 	 * lower 4GB for efficiency
667 	 */
668 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
669 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
670 				4, 0,			/* algnmnt, boundary */
671 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
672 				BUS_SPACE_MAXADDR,	/* highaddr */
673 				NULL, NULL,		/* filter, filterarg */
674 				sensesz,		/* maxsize */
675 				1,			/* nsegments */
676 				sensesz,		/* maxsegsize */
677 				0,			/* flags */
678 				NULL, NULL,		/* lockfunc, lockarg */
679 				&sc->mfi_sense_dmat)) {
680 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
681 		return (ENOMEM);
682 	}
683 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
684 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
685 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
686 		return (ENOMEM);
687 	}
688 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
689 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
690 	if ((error = mfi_alloc_commands(sc)) != 0)
691 		return (error);
692 
693 	/* Before moving the FW to operational state, check whether
694 	 * hostmemory is required by the FW or not
695 	 */
696 
697 	/* ThunderBolt MFI_IOC2 INIT */
698 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
699 		sc->mfi_disable_intr(sc);
700 		mtx_lock(&sc->mfi_io_lock);
701 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
702 			device_printf(sc->mfi_dev,
703 			    "TB Init has failed with error %d\n",error);
704 			mtx_unlock(&sc->mfi_io_lock);
705 			return error;
706 		}
707 		mtx_unlock(&sc->mfi_io_lock);
708 
709 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
710 			return error;
711 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
712 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
713 		    &sc->mfi_intr)) {
714 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
715 			return (EINVAL);
716 		}
717 		sc->mfi_intr_ptr = mfi_intr_tbolt;
718 		sc->mfi_enable_intr(sc);
719 	} else {
720 		if ((error = mfi_comms_init(sc)) != 0)
721 			return (error);
722 
723 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
724 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
725 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
726 			return (EINVAL);
727 		}
728 		sc->mfi_intr_ptr = mfi_intr;
729 		sc->mfi_enable_intr(sc);
730 	}
731 	if ((error = mfi_get_controller_info(sc)) != 0)
732 		return (error);
733 	sc->disableOnlineCtrlReset = 0;
734 
735 	/* Register a config hook to probe the bus for arrays */
736 	sc->mfi_ich.ich_func = mfi_startup;
737 	sc->mfi_ich.ich_arg = sc;
738 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
739 		device_printf(sc->mfi_dev, "Cannot establish configuration "
740 		    "hook\n");
741 		return (EINVAL);
742 	}
743 	mtx_lock(&sc->mfi_io_lock);
744 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
745 		mtx_unlock(&sc->mfi_io_lock);
746 		return (error);
747 	}
748 	mtx_unlock(&sc->mfi_io_lock);
749 
750 	/*
751 	 * Register a shutdown handler.
752 	 */
753 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
754 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
755 		device_printf(sc->mfi_dev, "Warning: shutdown event "
756 		    "registration failed\n");
757 	}
758 
759 	/*
760 	 * Create the control device for doing management
761 	 */
762 	unit = device_get_unit(sc->mfi_dev);
763 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
764 	    0640, "mfi%d", unit);
765 	if (unit == 0)
766 		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
767 	if (sc->mfi_cdev != NULL)
768 		sc->mfi_cdev->si_drv1 = sc;
769 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
770 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
771 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
772 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
773 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
774 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
775 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
776 	    &sc->mfi_keep_deleted_volumes, 0,
777 	    "Don't detach the mfid device for a busy volume that is deleted");
778 
779 	device_add_child(sc->mfi_dev, "mfip", -1);
780 	bus_generic_attach(sc->mfi_dev);
781 
782 	/* Start the timeout watchdog */
783 	callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
784 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
785 	    mfi_timeout, sc);
786 
787 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
788 		mtx_lock(&sc->mfi_io_lock);
789 		mfi_tbolt_sync_map_info(sc);
790 		mtx_unlock(&sc->mfi_io_lock);
791 	}
792 
793 	return (0);
794 }
795 
796 static int
797 mfi_alloc_commands(struct mfi_softc *sc)
798 {
799 	struct mfi_command *cm;
800 	int i, j;
801 
802 	/*
803 	 * XXX Should we allocate all the commands up front, or allocate on
804 	 * demand later like 'aac' does?
805 	 */
806 	sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
807 	    sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
808 
809 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
810 		cm = &sc->mfi_commands[i];
811 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
812 		    sc->mfi_cmd_size * i);
813 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
814 		    sc->mfi_cmd_size * i;
815 		cm->cm_frame->header.context = i;
816 		cm->cm_sense = &sc->mfi_sense[i];
817 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
818 		cm->cm_sc = sc;
819 		cm->cm_index = i;
820 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
821 		    &cm->cm_dmamap) == 0) {
822 			mtx_lock(&sc->mfi_io_lock);
823 			mfi_release_command(cm);
824 			mtx_unlock(&sc->mfi_io_lock);
825 		} else {
826 			device_printf(sc->mfi_dev, "Failed to allocate %d "
827 			   "command blocks, only allocated %d\n",
828 			    sc->mfi_max_fw_cmds, i - 1);
829 			for (j = 0; j < i; j++) {
830 				cm = &sc->mfi_commands[i];
831 				bus_dmamap_destroy(sc->mfi_buffer_dmat,
832 				    cm->cm_dmamap);
833 			}
834 			free(sc->mfi_commands, M_MFIBUF);
835 			sc->mfi_commands = NULL;
836 
837 			return (ENOMEM);
838 		}
839 	}
840 
841 	return (0);
842 }
843 
844 void
845 mfi_release_command(struct mfi_command *cm)
846 {
847 	struct mfi_frame_header *hdr;
848 	uint32_t *hdr_data;
849 
850 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
851 
852 	/*
853 	 * Zero out the important fields of the frame, but make sure the
854 	 * context field is preserved.  For efficiency, handle the fields
855 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
856 	 */
857 	hdr = &cm->cm_frame->header;
858 	if (cm->cm_data != NULL && hdr->sg_count) {
859 		cm->cm_sg->sg32[0].len = 0;
860 		cm->cm_sg->sg32[0].addr = 0;
861 	}
862 
863 	/*
864 	 * Command may be on other queues e.g. busy queue depending on the
865 	 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
866 	 * properly
867 	 */
868 	if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
869 		mfi_remove_busy(cm);
870 	if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
871 		mfi_remove_ready(cm);
872 
873 	/* We're not expecting it to be on any other queue but check */
874 	if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
875 		panic("Command %p is still on another queue, flags = %#x",
876 		    cm, cm->cm_flags);
877 	}
878 
879 	/* tbolt cleanup */
880 	if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
881 		mfi_tbolt_return_cmd(cm->cm_sc,
882 		    cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
883 		    cm);
884 	}
885 
886 	hdr_data = (uint32_t *)cm->cm_frame;
887 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
888 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
889 	hdr_data[4] = 0;	/* flags, timeout */
890 	hdr_data[5] = 0;	/* data_len */
891 
892 	cm->cm_extra_frames = 0;
893 	cm->cm_flags = 0;
894 	cm->cm_complete = NULL;
895 	cm->cm_private = NULL;
896 	cm->cm_data = NULL;
897 	cm->cm_sg = 0;
898 	cm->cm_total_frame_size = 0;
899 	cm->retry_for_fw_reset = 0;
900 
901 	mfi_enqueue_free(cm);
902 }
903 
904 int
905 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
906     uint32_t opcode, void **bufp, size_t bufsize)
907 {
908 	struct mfi_command *cm;
909 	struct mfi_dcmd_frame *dcmd;
910 	void *buf = NULL;
911 	uint32_t context = 0;
912 
913 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
914 
915 	cm = mfi_dequeue_free(sc);
916 	if (cm == NULL)
917 		return (EBUSY);
918 
919 	/* Zero out the MFI frame */
920 	context = cm->cm_frame->header.context;
921 	bzero(cm->cm_frame, sizeof(union mfi_frame));
922 	cm->cm_frame->header.context = context;
923 
924 	if ((bufsize > 0) && (bufp != NULL)) {
925 		if (*bufp == NULL) {
926 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
927 			if (buf == NULL) {
928 				mfi_release_command(cm);
929 				return (ENOMEM);
930 			}
931 			*bufp = buf;
932 		} else {
933 			buf = *bufp;
934 		}
935 	}
936 
937 	dcmd =  &cm->cm_frame->dcmd;
938 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
939 	dcmd->header.cmd = MFI_CMD_DCMD;
940 	dcmd->header.timeout = 0;
941 	dcmd->header.flags = 0;
942 	dcmd->header.data_len = bufsize;
943 	dcmd->header.scsi_status = 0;
944 	dcmd->opcode = opcode;
945 	cm->cm_sg = &dcmd->sgl;
946 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
947 	cm->cm_flags = 0;
948 	cm->cm_data = buf;
949 	cm->cm_private = buf;
950 	cm->cm_len = bufsize;
951 
952 	*cmp = cm;
953 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
954 		*bufp = buf;
955 	return (0);
956 }
957 
958 static int
959 mfi_comms_init(struct mfi_softc *sc)
960 {
961 	struct mfi_command *cm;
962 	struct mfi_init_frame *init;
963 	struct mfi_init_qinfo *qinfo;
964 	int error;
965 	uint32_t context = 0;
966 
967 	mtx_lock(&sc->mfi_io_lock);
968 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
969 		mtx_unlock(&sc->mfi_io_lock);
970 		return (EBUSY);
971 	}
972 
973 	/* Zero out the MFI frame */
974 	context = cm->cm_frame->header.context;
975 	bzero(cm->cm_frame, sizeof(union mfi_frame));
976 	cm->cm_frame->header.context = context;
977 
978 	/*
979 	 * Abuse the SG list area of the frame to hold the init_qinfo
980 	 * object;
981 	 */
982 	init = &cm->cm_frame->init;
983 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
984 
985 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
986 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
987 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
988 	    offsetof(struct mfi_hwcomms, hw_reply_q);
989 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
990 	    offsetof(struct mfi_hwcomms, hw_pi);
991 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
992 	    offsetof(struct mfi_hwcomms, hw_ci);
993 
994 	init->header.cmd = MFI_CMD_INIT;
995 	init->header.data_len = sizeof(struct mfi_init_qinfo);
996 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
997 	cm->cm_data = NULL;
998 	cm->cm_flags = MFI_CMD_POLLED;
999 
1000 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1001 		device_printf(sc->mfi_dev, "failed to send init command\n");
1002 	mfi_release_command(cm);
1003 	mtx_unlock(&sc->mfi_io_lock);
1004 
1005 	return (error);
1006 }
1007 
1008 static int
1009 mfi_get_controller_info(struct mfi_softc *sc)
1010 {
1011 	struct mfi_command *cm = NULL;
1012 	struct mfi_ctrl_info *ci = NULL;
1013 	uint32_t max_sectors_1, max_sectors_2;
1014 	int error;
1015 
1016 	mtx_lock(&sc->mfi_io_lock);
1017 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1018 	    (void **)&ci, sizeof(*ci));
1019 	if (error)
1020 		goto out;
1021 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1022 
1023 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1024 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
1025 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1026 		    MFI_SECTOR_LEN;
1027 		error = 0;
1028 		goto out;
1029 	}
1030 
1031 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1032 	    BUS_DMASYNC_POSTREAD);
1033 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1034 
1035 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1036 	max_sectors_2 = ci->max_request_size;
1037 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1038 	sc->disableOnlineCtrlReset =
1039 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
1040 
1041 out:
1042 	if (ci)
1043 		free(ci, M_MFIBUF);
1044 	if (cm)
1045 		mfi_release_command(cm);
1046 	mtx_unlock(&sc->mfi_io_lock);
1047 	return (error);
1048 }
1049 
1050 static int
1051 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1052 {
1053 	struct mfi_command *cm = NULL;
1054 	int error;
1055 
1056 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1057 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1058 	    (void **)log_state, sizeof(**log_state));
1059 	if (error)
1060 		goto out;
1061 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1062 
1063 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1064 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1065 		goto out;
1066 	}
1067 
1068 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1069 	    BUS_DMASYNC_POSTREAD);
1070 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1071 
1072 out:
1073 	if (cm)
1074 		mfi_release_command(cm);
1075 
1076 	return (error);
1077 }
1078 
1079 int
1080 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1081 {
1082 	struct mfi_evt_log_state *log_state = NULL;
1083 	union mfi_evt class_locale;
1084 	int error = 0;
1085 	uint32_t seq;
1086 
1087 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1088 
1089 	class_locale.members.reserved = 0;
1090 	class_locale.members.locale = mfi_event_locale;
1091 	class_locale.members.evt_class  = mfi_event_class;
1092 
1093 	if (seq_start == 0) {
1094 		if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1095 			goto out;
1096 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1097 
1098 		/*
1099 		 * Walk through any events that fired since the last
1100 		 * shutdown.
1101 		 */
1102 		if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1103 		    log_state->newest_seq_num)) != 0)
1104 			goto out;
1105 		seq = log_state->newest_seq_num;
1106 	} else
1107 		seq = seq_start;
1108 	error = mfi_aen_register(sc, seq, class_locale.word);
1109 out:
1110 	free(log_state, M_MFIBUF);
1111 
1112 	return (error);
1113 }
1114 
1115 int
1116 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1117 {
1118 
1119 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1120 	cm->cm_complete = NULL;
1121 
1122 	/*
1123 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1124 	 * and return 0 to it as status
1125 	 */
1126 	if (cm->cm_frame->dcmd.opcode == 0) {
1127 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1128 		cm->cm_error = 0;
1129 		return (cm->cm_error);
1130 	}
1131 	mfi_enqueue_ready(cm);
1132 	mfi_startio(sc);
1133 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1134 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1135 	return (cm->cm_error);
1136 }
1137 
1138 void
1139 mfi_free(struct mfi_softc *sc)
1140 {
1141 	struct mfi_command *cm;
1142 	int i;
1143 
1144 	callout_drain(&sc->mfi_watchdog_callout);
1145 
1146 	if (sc->mfi_cdev != NULL)
1147 		destroy_dev(sc->mfi_cdev);
1148 
1149 	if (sc->mfi_commands != NULL) {
1150 		for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1151 			cm = &sc->mfi_commands[i];
1152 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1153 		}
1154 		free(sc->mfi_commands, M_MFIBUF);
1155 		sc->mfi_commands = NULL;
1156 	}
1157 
1158 	if (sc->mfi_intr)
1159 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1160 	if (sc->mfi_irq != NULL)
1161 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1162 		    sc->mfi_irq);
1163 
1164 	if (sc->mfi_sense_busaddr != 0)
1165 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1166 	if (sc->mfi_sense != NULL)
1167 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1168 		    sc->mfi_sense_dmamap);
1169 	if (sc->mfi_sense_dmat != NULL)
1170 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1171 
1172 	if (sc->mfi_frames_busaddr != 0)
1173 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1174 	if (sc->mfi_frames != NULL)
1175 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1176 		    sc->mfi_frames_dmamap);
1177 	if (sc->mfi_frames_dmat != NULL)
1178 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1179 
1180 	if (sc->mfi_comms_busaddr != 0)
1181 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1182 	if (sc->mfi_comms != NULL)
1183 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1184 		    sc->mfi_comms_dmamap);
1185 	if (sc->mfi_comms_dmat != NULL)
1186 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1187 
1188 	/* ThunderBolt contiguous memory free here */
1189 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1190 		if (sc->mfi_tb_busaddr != 0)
1191 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1192 		if (sc->request_message_pool != NULL)
1193 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1194 			    sc->mfi_tb_dmamap);
1195 		if (sc->mfi_tb_dmat != NULL)
1196 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1197 
1198 		/* Version buffer memory free */
1199 		/* Start LSIP200113393 */
1200 		if (sc->verbuf_h_busaddr != 0)
1201 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1202 		if (sc->verbuf != NULL)
1203 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1204 			    sc->verbuf_h_dmamap);
1205 		if (sc->verbuf_h_dmat != NULL)
1206 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1207 
1208 		/* End LSIP200113393 */
1209 		/* ThunderBolt INIT packet memory Free */
1210 		if (sc->mfi_tb_init_busaddr != 0)
1211 			bus_dmamap_unload(sc->mfi_tb_init_dmat,
1212 			    sc->mfi_tb_init_dmamap);
1213 		if (sc->mfi_tb_init != NULL)
1214 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1215 			    sc->mfi_tb_init_dmamap);
1216 		if (sc->mfi_tb_init_dmat != NULL)
1217 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1218 
1219 		/* ThunderBolt IOC Init Desc memory free here */
1220 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1221 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1222 			    sc->mfi_tb_ioc_init_dmamap);
1223 		if (sc->mfi_tb_ioc_init_desc != NULL)
1224 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1225 			    sc->mfi_tb_ioc_init_desc,
1226 			    sc->mfi_tb_ioc_init_dmamap);
1227 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1228 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1229 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1230 			for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1231 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1232 					free(sc->mfi_cmd_pool_tbolt[i],
1233 					    M_MFIBUF);
1234 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1235 				}
1236 			}
1237 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1238 			sc->mfi_cmd_pool_tbolt = NULL;
1239 		}
1240 		if (sc->request_desc_pool != NULL) {
1241 			free(sc->request_desc_pool, M_MFIBUF);
1242 			sc->request_desc_pool = NULL;
1243 		}
1244 	}
1245 	if (sc->mfi_buffer_dmat != NULL)
1246 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1247 	if (sc->mfi_parent_dmat != NULL)
1248 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1249 
1250 	if (mtx_initialized(&sc->mfi_io_lock)) {
1251 		mtx_destroy(&sc->mfi_io_lock);
1252 		sx_destroy(&sc->mfi_config_lock);
1253 	}
1254 
1255 	return;
1256 }
1257 
1258 static void
1259 mfi_startup(void *arg)
1260 {
1261 	struct mfi_softc *sc;
1262 
1263 	sc = (struct mfi_softc *)arg;
1264 
1265 	config_intrhook_disestablish(&sc->mfi_ich);
1266 
1267 	sc->mfi_enable_intr(sc);
1268 	sx_xlock(&sc->mfi_config_lock);
1269 	mtx_lock(&sc->mfi_io_lock);
1270 	mfi_ldprobe(sc);
1271 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1272 	    mfi_syspdprobe(sc);
1273 	mtx_unlock(&sc->mfi_io_lock);
1274 	sx_xunlock(&sc->mfi_config_lock);
1275 }
1276 
1277 static void
1278 mfi_intr(void *arg)
1279 {
1280 	struct mfi_softc *sc;
1281 	struct mfi_command *cm;
1282 	uint32_t pi, ci, context;
1283 
1284 	sc = (struct mfi_softc *)arg;
1285 
1286 	if (sc->mfi_check_clear_intr(sc))
1287 		return;
1288 
1289 restart:
1290 	pi = sc->mfi_comms->hw_pi;
1291 	ci = sc->mfi_comms->hw_ci;
1292 	mtx_lock(&sc->mfi_io_lock);
1293 	while (ci != pi) {
1294 		context = sc->mfi_comms->hw_reply_q[ci];
1295 		if (context < sc->mfi_max_fw_cmds) {
1296 			cm = &sc->mfi_commands[context];
1297 			mfi_remove_busy(cm);
1298 			cm->cm_error = 0;
1299 			mfi_complete(sc, cm);
1300 		}
1301 		if (++ci == (sc->mfi_max_fw_cmds + 1))
1302 			ci = 0;
1303 	}
1304 
1305 	sc->mfi_comms->hw_ci = ci;
1306 
1307 	/* Give defered I/O a chance to run */
1308 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1309 	mfi_startio(sc);
1310 	mtx_unlock(&sc->mfi_io_lock);
1311 
1312 	/*
1313 	 * Dummy read to flush the bus; this ensures that the indexes are up
1314 	 * to date.  Restart processing if more commands have come it.
1315 	 */
1316 	(void)sc->mfi_read_fw_status(sc);
1317 	if (pi != sc->mfi_comms->hw_pi)
1318 		goto restart;
1319 
1320 	return;
1321 }
1322 
1323 int
1324 mfi_shutdown(struct mfi_softc *sc)
1325 {
1326 	struct mfi_dcmd_frame *dcmd;
1327 	struct mfi_command *cm;
1328 	int error;
1329 
1330 
1331 	if (sc->mfi_aen_cm != NULL) {
1332 		sc->cm_aen_abort = 1;
1333 		mfi_abort(sc, &sc->mfi_aen_cm);
1334 	}
1335 
1336 	if (sc->mfi_map_sync_cm != NULL) {
1337 		sc->cm_map_abort = 1;
1338 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1339 	}
1340 
1341 	mtx_lock(&sc->mfi_io_lock);
1342 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1343 	if (error) {
1344 		mtx_unlock(&sc->mfi_io_lock);
1345 		return (error);
1346 	}
1347 
1348 	dcmd = &cm->cm_frame->dcmd;
1349 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1350 	cm->cm_flags = MFI_CMD_POLLED;
1351 	cm->cm_data = NULL;
1352 
1353 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1354 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1355 
1356 	mfi_release_command(cm);
1357 	mtx_unlock(&sc->mfi_io_lock);
1358 	return (error);
1359 }
1360 
1361 static void
1362 mfi_syspdprobe(struct mfi_softc *sc)
1363 {
1364 	struct mfi_frame_header *hdr;
1365 	struct mfi_command *cm = NULL;
1366 	struct mfi_pd_list *pdlist = NULL;
1367 	struct mfi_system_pd *syspd, *tmp;
1368 	struct mfi_system_pending *syspd_pend;
1369 	int error, i, found;
1370 
1371 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1372 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1373 	/* Add SYSTEM PD's */
1374 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1375 	    (void **)&pdlist, sizeof(*pdlist));
1376 	if (error) {
1377 		device_printf(sc->mfi_dev,
1378 		    "Error while forming SYSTEM PD list\n");
1379 		goto out;
1380 	}
1381 
1382 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1383 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1384 	cm->cm_frame->dcmd.mbox[1] = 0;
1385 	if (mfi_mapcmd(sc, cm) != 0) {
1386 		device_printf(sc->mfi_dev,
1387 		    "Failed to get syspd device listing\n");
1388 		goto out;
1389 	}
1390 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1391 	    BUS_DMASYNC_POSTREAD);
1392 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1393 	hdr = &cm->cm_frame->header;
1394 	if (hdr->cmd_status != MFI_STAT_OK) {
1395 		device_printf(sc->mfi_dev,
1396 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1397 		goto out;
1398 	}
1399 	/* Get each PD and add it to the system */
1400 	for (i = 0; i < pdlist->count; i++) {
1401 		if (pdlist->addr[i].device_id ==
1402 		    pdlist->addr[i].encl_device_id)
1403 			continue;
1404 		found = 0;
1405 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1406 			if (syspd->pd_id == pdlist->addr[i].device_id)
1407 				found = 1;
1408 		}
1409 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1410 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1411 				found = 1;
1412 		}
1413 		if (found == 0)
1414 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1415 	}
1416 	/* Delete SYSPD's whose state has been changed */
1417 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1418 		found = 0;
1419 		for (i = 0; i < pdlist->count; i++) {
1420 			if (syspd->pd_id == pdlist->addr[i].device_id) {
1421 				found = 1;
1422 				break;
1423 			}
1424 		}
1425 		if (found == 0) {
1426 			printf("DELETE\n");
1427 			mtx_unlock(&sc->mfi_io_lock);
1428 			mtx_lock(&Giant);
1429 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1430 			mtx_unlock(&Giant);
1431 			mtx_lock(&sc->mfi_io_lock);
1432 		}
1433 	}
1434 out:
1435 	if (pdlist)
1436 	    free(pdlist, M_MFIBUF);
1437 	if (cm)
1438 	    mfi_release_command(cm);
1439 
1440 	return;
1441 }
1442 
1443 static void
1444 mfi_ldprobe(struct mfi_softc *sc)
1445 {
1446 	struct mfi_frame_header *hdr;
1447 	struct mfi_command *cm = NULL;
1448 	struct mfi_ld_list *list = NULL;
1449 	struct mfi_disk *ld;
1450 	struct mfi_disk_pending *ld_pend;
1451 	int error, i;
1452 
1453 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1454 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1455 
1456 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1457 	    (void **)&list, sizeof(*list));
1458 	if (error)
1459 		goto out;
1460 
1461 	cm->cm_flags = MFI_CMD_DATAIN;
1462 	if (mfi_wait_command(sc, cm) != 0) {
1463 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1464 		goto out;
1465 	}
1466 
1467 	hdr = &cm->cm_frame->header;
1468 	if (hdr->cmd_status != MFI_STAT_OK) {
1469 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1470 		    hdr->cmd_status);
1471 		goto out;
1472 	}
1473 
1474 	for (i = 0; i < list->ld_count; i++) {
1475 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1476 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1477 				goto skip_add;
1478 		}
1479 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1480 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1481 				goto skip_add;
1482 		}
1483 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1484 	skip_add:;
1485 	}
1486 out:
1487 	if (list)
1488 		free(list, M_MFIBUF);
1489 	if (cm)
1490 		mfi_release_command(cm);
1491 
1492 	return;
1493 }
1494 
1495 /*
1496  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1497  * the bits in 24-31 are all set, then it is the number of seconds since
1498  * boot.
1499  */
1500 static const char *
1501 format_timestamp(uint32_t timestamp)
1502 {
1503 	static char buffer[32];
1504 
1505 	if ((timestamp & 0xff000000) == 0xff000000)
1506 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1507 		    0x00ffffff);
1508 	else
1509 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1510 	return (buffer);
1511 }
1512 
1513 static const char *
1514 format_class(int8_t class)
1515 {
1516 	static char buffer[6];
1517 
1518 	switch (class) {
1519 	case MFI_EVT_CLASS_DEBUG:
1520 		return ("debug");
1521 	case MFI_EVT_CLASS_PROGRESS:
1522 		return ("progress");
1523 	case MFI_EVT_CLASS_INFO:
1524 		return ("info");
1525 	case MFI_EVT_CLASS_WARNING:
1526 		return ("WARN");
1527 	case MFI_EVT_CLASS_CRITICAL:
1528 		return ("CRIT");
1529 	case MFI_EVT_CLASS_FATAL:
1530 		return ("FATAL");
1531 	case MFI_EVT_CLASS_DEAD:
1532 		return ("DEAD");
1533 	default:
1534 		snprintf(buffer, sizeof(buffer), "%d", class);
1535 		return (buffer);
1536 	}
1537 }
1538 
1539 static void
1540 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1541 {
1542 	struct mfi_system_pd *syspd = NULL;
1543 
1544 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1545 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1546 	    format_class(detail->evt_class.members.evt_class),
1547 	    detail->description);
1548 
1549         /* Don't act on old AEN's or while shutting down */
1550         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1551                 return;
1552 
1553 	switch (detail->arg_type) {
1554 	case MR_EVT_ARGS_NONE:
1555 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1556 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1557 			if (mfi_detect_jbod_change) {
1558 				/*
1559 				 * Probe for new SYSPD's and Delete
1560 				 * invalid SYSPD's
1561 				 */
1562 				sx_xlock(&sc->mfi_config_lock);
1563 				mtx_lock(&sc->mfi_io_lock);
1564 				mfi_syspdprobe(sc);
1565 				mtx_unlock(&sc->mfi_io_lock);
1566 				sx_xunlock(&sc->mfi_config_lock);
1567 			}
1568 		}
1569 		break;
1570 	case MR_EVT_ARGS_LD_STATE:
1571 		/* During load time driver reads all the events starting
1572 		 * from the one that has been logged after shutdown. Avoid
1573 		 * these old events.
1574 		 */
1575 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1576 			/* Remove the LD */
1577 			struct mfi_disk *ld;
1578 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1579 				if (ld->ld_id ==
1580 				    detail->args.ld_state.ld.target_id)
1581 					break;
1582 			}
1583 			/*
1584 			Fix: for kernel panics when SSCD is removed
1585 			KASSERT(ld != NULL, ("volume dissappeared"));
1586 			*/
1587 			if (ld != NULL) {
1588 				mtx_lock(&Giant);
1589 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1590 				mtx_unlock(&Giant);
1591 			}
1592 		}
1593 		break;
1594 	case MR_EVT_ARGS_PD:
1595 		if (detail->code == MR_EVT_PD_REMOVED) {
1596 			if (mfi_detect_jbod_change) {
1597 				/*
1598 				 * If the removed device is a SYSPD then
1599 				 * delete it
1600 				 */
1601 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1602 				    pd_link) {
1603 					if (syspd->pd_id ==
1604 					    detail->args.pd.device_id) {
1605 						mtx_lock(&Giant);
1606 						device_delete_child(
1607 						    sc->mfi_dev,
1608 						    syspd->pd_dev);
1609 						mtx_unlock(&Giant);
1610 						break;
1611 					}
1612 				}
1613 			}
1614 		}
1615 		if (detail->code == MR_EVT_PD_INSERTED) {
1616 			if (mfi_detect_jbod_change) {
1617 				/* Probe for new SYSPD's */
1618 				sx_xlock(&sc->mfi_config_lock);
1619 				mtx_lock(&sc->mfi_io_lock);
1620 				mfi_syspdprobe(sc);
1621 				mtx_unlock(&sc->mfi_io_lock);
1622 				sx_xunlock(&sc->mfi_config_lock);
1623 			}
1624 		}
1625 		if (sc->mfi_cam_rescan_cb != NULL &&
1626 		    (detail->code == MR_EVT_PD_INSERTED ||
1627 		    detail->code == MR_EVT_PD_REMOVED)) {
1628 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1629 		}
1630 		break;
1631 	}
1632 }
1633 
1634 static void
1635 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1636 {
1637 	struct mfi_evt_queue_elm *elm;
1638 
1639 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1640 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1641 	if (elm == NULL)
1642 		return;
1643 	memcpy(&elm->detail, detail, sizeof(*detail));
1644 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1645 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1646 }
1647 
1648 static void
1649 mfi_handle_evt(void *context, int pending)
1650 {
1651 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1652 	struct mfi_softc *sc;
1653 	struct mfi_evt_queue_elm *elm;
1654 
1655 	sc = context;
1656 	TAILQ_INIT(&queue);
1657 	mtx_lock(&sc->mfi_io_lock);
1658 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1659 	mtx_unlock(&sc->mfi_io_lock);
1660 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1661 		TAILQ_REMOVE(&queue, elm, link);
1662 		mfi_decode_evt(sc, &elm->detail);
1663 		free(elm, M_MFIBUF);
1664 	}
1665 }
1666 
1667 static int
1668 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1669 {
1670 	struct mfi_command *cm;
1671 	struct mfi_dcmd_frame *dcmd;
1672 	union mfi_evt current_aen, prior_aen;
1673 	struct mfi_evt_detail *ed = NULL;
1674 	int error = 0;
1675 
1676 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1677 
1678 	current_aen.word = locale;
1679 	if (sc->mfi_aen_cm != NULL) {
1680 		prior_aen.word =
1681 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1682 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1683 		    !((prior_aen.members.locale & current_aen.members.locale)
1684 		    ^current_aen.members.locale)) {
1685 			return (0);
1686 		} else {
1687 			prior_aen.members.locale |= current_aen.members.locale;
1688 			if (prior_aen.members.evt_class
1689 			    < current_aen.members.evt_class)
1690 				current_aen.members.evt_class =
1691 				    prior_aen.members.evt_class;
1692 			mfi_abort(sc, &sc->mfi_aen_cm);
1693 		}
1694 	}
1695 
1696 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1697 	    (void **)&ed, sizeof(*ed));
1698 	if (error)
1699 		goto out;
1700 
1701 	dcmd = &cm->cm_frame->dcmd;
1702 	((uint32_t *)&dcmd->mbox)[0] = seq;
1703 	((uint32_t *)&dcmd->mbox)[1] = locale;
1704 	cm->cm_flags = MFI_CMD_DATAIN;
1705 	cm->cm_complete = mfi_aen_complete;
1706 
1707 	sc->last_seq_num = seq;
1708 	sc->mfi_aen_cm = cm;
1709 
1710 	mfi_enqueue_ready(cm);
1711 	mfi_startio(sc);
1712 
1713 out:
1714 	return (error);
1715 }
1716 
1717 static void
1718 mfi_aen_complete(struct mfi_command *cm)
1719 {
1720 	struct mfi_frame_header *hdr;
1721 	struct mfi_softc *sc;
1722 	struct mfi_evt_detail *detail;
1723 	struct mfi_aen *mfi_aen_entry, *tmp;
1724 	int seq = 0, aborted = 0;
1725 
1726 	sc = cm->cm_sc;
1727 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1728 
1729 	if (sc->mfi_aen_cm == NULL)
1730 		return;
1731 
1732 	hdr = &cm->cm_frame->header;
1733 
1734 	if (sc->cm_aen_abort ||
1735 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1736 		sc->cm_aen_abort = 0;
1737 		aborted = 1;
1738 	} else {
1739 		sc->mfi_aen_triggered = 1;
1740 		if (sc->mfi_poll_waiting) {
1741 			sc->mfi_poll_waiting = 0;
1742 			selwakeup(&sc->mfi_select);
1743 		}
1744 		detail = cm->cm_data;
1745 		mfi_queue_evt(sc, detail);
1746 		seq = detail->seq + 1;
1747 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1748 		    tmp) {
1749 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1750 			    aen_link);
1751 			PROC_LOCK(mfi_aen_entry->p);
1752 			kern_psignal(mfi_aen_entry->p, SIGIO);
1753 			PROC_UNLOCK(mfi_aen_entry->p);
1754 			free(mfi_aen_entry, M_MFIBUF);
1755 		}
1756 	}
1757 
1758 	free(cm->cm_data, M_MFIBUF);
1759 	wakeup(&sc->mfi_aen_cm);
1760 	sc->mfi_aen_cm = NULL;
1761 	mfi_release_command(cm);
1762 
1763 	/* set it up again so the driver can catch more events */
1764 	if (!aborted)
1765 		mfi_aen_setup(sc, seq);
1766 }
1767 
1768 #define MAX_EVENTS 15
1769 
1770 static int
1771 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1772 {
1773 	struct mfi_command *cm;
1774 	struct mfi_dcmd_frame *dcmd;
1775 	struct mfi_evt_list *el;
1776 	union mfi_evt class_locale;
1777 	int error, i, seq, size;
1778 
1779 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1780 
1781 	class_locale.members.reserved = 0;
1782 	class_locale.members.locale = mfi_event_locale;
1783 	class_locale.members.evt_class  = mfi_event_class;
1784 
1785 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1786 		* (MAX_EVENTS - 1);
1787 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1788 	if (el == NULL)
1789 		return (ENOMEM);
1790 
1791 	for (seq = start_seq;;) {
1792 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1793 			free(el, M_MFIBUF);
1794 			return (EBUSY);
1795 		}
1796 
1797 		dcmd = &cm->cm_frame->dcmd;
1798 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1799 		dcmd->header.cmd = MFI_CMD_DCMD;
1800 		dcmd->header.timeout = 0;
1801 		dcmd->header.data_len = size;
1802 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1803 		((uint32_t *)&dcmd->mbox)[0] = seq;
1804 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1805 		cm->cm_sg = &dcmd->sgl;
1806 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1807 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1808 		cm->cm_data = el;
1809 		cm->cm_len = size;
1810 
1811 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1812 			device_printf(sc->mfi_dev,
1813 			    "Failed to get controller entries\n");
1814 			mfi_release_command(cm);
1815 			break;
1816 		}
1817 
1818 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1819 		    BUS_DMASYNC_POSTREAD);
1820 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1821 
1822 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1823 			mfi_release_command(cm);
1824 			break;
1825 		}
1826 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1827 			device_printf(sc->mfi_dev,
1828 			    "Error %d fetching controller entries\n",
1829 			    dcmd->header.cmd_status);
1830 			mfi_release_command(cm);
1831 			error = EIO;
1832 			break;
1833 		}
1834 		mfi_release_command(cm);
1835 
1836 		for (i = 0; i < el->count; i++) {
1837 			/*
1838 			 * If this event is newer than 'stop_seq' then
1839 			 * break out of the loop.  Note that the log
1840 			 * is a circular buffer so we have to handle
1841 			 * the case that our stop point is earlier in
1842 			 * the buffer than our start point.
1843 			 */
1844 			if (el->event[i].seq >= stop_seq) {
1845 				if (start_seq <= stop_seq)
1846 					break;
1847 				else if (el->event[i].seq < start_seq)
1848 					break;
1849 			}
1850 			mfi_queue_evt(sc, &el->event[i]);
1851 		}
1852 		seq = el->event[el->count - 1].seq + 1;
1853 	}
1854 
1855 	free(el, M_MFIBUF);
1856 	return (error);
1857 }
1858 
1859 static int
1860 mfi_add_ld(struct mfi_softc *sc, int id)
1861 {
1862 	struct mfi_command *cm;
1863 	struct mfi_dcmd_frame *dcmd = NULL;
1864 	struct mfi_ld_info *ld_info = NULL;
1865 	struct mfi_disk_pending *ld_pend;
1866 	int error;
1867 
1868 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1869 
1870 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1871 	if (ld_pend != NULL) {
1872 		ld_pend->ld_id = id;
1873 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1874 	}
1875 
1876 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1877 	    (void **)&ld_info, sizeof(*ld_info));
1878 	if (error) {
1879 		device_printf(sc->mfi_dev,
1880 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1881 		if (ld_info)
1882 			free(ld_info, M_MFIBUF);
1883 		return (error);
1884 	}
1885 	cm->cm_flags = MFI_CMD_DATAIN;
1886 	dcmd = &cm->cm_frame->dcmd;
1887 	dcmd->mbox[0] = id;
1888 	if (mfi_wait_command(sc, cm) != 0) {
1889 		device_printf(sc->mfi_dev,
1890 		    "Failed to get logical drive: %d\n", id);
1891 		free(ld_info, M_MFIBUF);
1892 		return (0);
1893 	}
1894 	if (ld_info->ld_config.params.isSSCD != 1)
1895 		mfi_add_ld_complete(cm);
1896 	else {
1897 		mfi_release_command(cm);
1898 		if (ld_info)		/* SSCD drives ld_info free here */
1899 			free(ld_info, M_MFIBUF);
1900 	}
1901 	return (0);
1902 }
1903 
1904 static void
1905 mfi_add_ld_complete(struct mfi_command *cm)
1906 {
1907 	struct mfi_frame_header *hdr;
1908 	struct mfi_ld_info *ld_info;
1909 	struct mfi_softc *sc;
1910 	device_t child;
1911 
1912 	sc = cm->cm_sc;
1913 	hdr = &cm->cm_frame->header;
1914 	ld_info = cm->cm_private;
1915 
1916 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1917 		free(ld_info, M_MFIBUF);
1918 		wakeup(&sc->mfi_map_sync_cm);
1919 		mfi_release_command(cm);
1920 		return;
1921 	}
1922 	wakeup(&sc->mfi_map_sync_cm);
1923 	mfi_release_command(cm);
1924 
1925 	mtx_unlock(&sc->mfi_io_lock);
1926 	mtx_lock(&Giant);
1927 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1928 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1929 		free(ld_info, M_MFIBUF);
1930 		mtx_unlock(&Giant);
1931 		mtx_lock(&sc->mfi_io_lock);
1932 		return;
1933 	}
1934 
1935 	device_set_ivars(child, ld_info);
1936 	device_set_desc(child, "MFI Logical Disk");
1937 	bus_generic_attach(sc->mfi_dev);
1938 	mtx_unlock(&Giant);
1939 	mtx_lock(&sc->mfi_io_lock);
1940 }
1941 
1942 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1943 {
1944 	struct mfi_command *cm;
1945 	struct mfi_dcmd_frame *dcmd = NULL;
1946 	struct mfi_pd_info *pd_info = NULL;
1947 	struct mfi_system_pending *syspd_pend;
1948 	int error;
1949 
1950 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1951 
1952 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1953 	if (syspd_pend != NULL) {
1954 		syspd_pend->pd_id = id;
1955 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1956 	}
1957 
1958 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1959 		(void **)&pd_info, sizeof(*pd_info));
1960 	if (error) {
1961 		device_printf(sc->mfi_dev,
1962 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1963 		    error);
1964 		if (pd_info)
1965 			free(pd_info, M_MFIBUF);
1966 		return (error);
1967 	}
1968 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1969 	dcmd = &cm->cm_frame->dcmd;
1970 	dcmd->mbox[0]=id;
1971 	dcmd->header.scsi_status = 0;
1972 	dcmd->header.pad0 = 0;
1973 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1974 		device_printf(sc->mfi_dev,
1975 		    "Failed to get physical drive info %d\n", id);
1976 		free(pd_info, M_MFIBUF);
1977 		mfi_release_command(cm);
1978 		return (error);
1979 	}
1980 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1981 	    BUS_DMASYNC_POSTREAD);
1982 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1983 	mfi_add_sys_pd_complete(cm);
1984 	return (0);
1985 }
1986 
1987 static void
1988 mfi_add_sys_pd_complete(struct mfi_command *cm)
1989 {
1990 	struct mfi_frame_header *hdr;
1991 	struct mfi_pd_info *pd_info;
1992 	struct mfi_softc *sc;
1993 	device_t child;
1994 
1995 	sc = cm->cm_sc;
1996 	hdr = &cm->cm_frame->header;
1997 	pd_info = cm->cm_private;
1998 
1999 	if (hdr->cmd_status != MFI_STAT_OK) {
2000 		free(pd_info, M_MFIBUF);
2001 		mfi_release_command(cm);
2002 		return;
2003 	}
2004 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2005 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2006 		    pd_info->ref.v.device_id);
2007 		free(pd_info, M_MFIBUF);
2008 		mfi_release_command(cm);
2009 		return;
2010 	}
2011 	mfi_release_command(cm);
2012 
2013 	mtx_unlock(&sc->mfi_io_lock);
2014 	mtx_lock(&Giant);
2015 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2016 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
2017 		free(pd_info, M_MFIBUF);
2018 		mtx_unlock(&Giant);
2019 		mtx_lock(&sc->mfi_io_lock);
2020 		return;
2021 	}
2022 
2023 	device_set_ivars(child, pd_info);
2024 	device_set_desc(child, "MFI System PD");
2025 	bus_generic_attach(sc->mfi_dev);
2026 	mtx_unlock(&Giant);
2027 	mtx_lock(&sc->mfi_io_lock);
2028 }
2029 
2030 static struct mfi_command *
2031 mfi_bio_command(struct mfi_softc *sc)
2032 {
2033 	struct bio *bio;
2034 	struct mfi_command *cm = NULL;
2035 
2036 	/*reserving two commands to avoid starvation for IOCTL*/
2037 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2038 		return (NULL);
2039 	}
2040 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2041 		return (NULL);
2042 	}
2043 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2044 		cm = mfi_build_ldio(sc, bio);
2045 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2046 		cm = mfi_build_syspdio(sc, bio);
2047 	}
2048 	if (!cm)
2049 	    mfi_enqueue_bio(sc, bio);
2050 	return cm;
2051 }
2052 
2053 /*
2054  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2055  */
2056 
2057 int
2058 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2059 {
2060 	int cdb_len;
2061 
2062 	if (((lba & 0x1fffff) == lba)
2063          && ((block_count & 0xff) == block_count)
2064          && (byte2 == 0)) {
2065 		/* We can fit in a 6 byte cdb */
2066 		struct scsi_rw_6 *scsi_cmd;
2067 
2068 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2069 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2070 		scsi_ulto3b(lba, scsi_cmd->addr);
2071 		scsi_cmd->length = block_count & 0xff;
2072 		scsi_cmd->control = 0;
2073 		cdb_len = sizeof(*scsi_cmd);
2074 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2075 		/* Need a 10 byte CDB */
2076 		struct scsi_rw_10 *scsi_cmd;
2077 
2078 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2079 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2080 		scsi_cmd->byte2 = byte2;
2081 		scsi_ulto4b(lba, scsi_cmd->addr);
2082 		scsi_cmd->reserved = 0;
2083 		scsi_ulto2b(block_count, scsi_cmd->length);
2084 		scsi_cmd->control = 0;
2085 		cdb_len = sizeof(*scsi_cmd);
2086 	} else if (((block_count & 0xffffffff) == block_count) &&
2087 	    ((lba & 0xffffffff) == lba)) {
2088 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2089 		struct scsi_rw_12 *scsi_cmd;
2090 
2091 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2092 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2093 		scsi_cmd->byte2 = byte2;
2094 		scsi_ulto4b(lba, scsi_cmd->addr);
2095 		scsi_cmd->reserved = 0;
2096 		scsi_ulto4b(block_count, scsi_cmd->length);
2097 		scsi_cmd->control = 0;
2098 		cdb_len = sizeof(*scsi_cmd);
2099 	} else {
2100 		/*
2101 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2102 		 * than 2^32
2103 		 */
2104 		struct scsi_rw_16 *scsi_cmd;
2105 
2106 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2107 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2108 		scsi_cmd->byte2 = byte2;
2109 		scsi_u64to8b(lba, scsi_cmd->addr);
2110 		scsi_cmd->reserved = 0;
2111 		scsi_ulto4b(block_count, scsi_cmd->length);
2112 		scsi_cmd->control = 0;
2113 		cdb_len = sizeof(*scsi_cmd);
2114 	}
2115 
2116 	return cdb_len;
2117 }
2118 
2119 extern char *unmapped_buf;
2120 
2121 static struct mfi_command *
2122 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2123 {
2124 	struct mfi_command *cm;
2125 	struct mfi_pass_frame *pass;
2126 	uint32_t context = 0;
2127 	int flags = 0, blkcount = 0, readop;
2128 	uint8_t cdb_len;
2129 
2130 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2131 
2132 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2133 	    return (NULL);
2134 
2135 	/* Zero out the MFI frame */
2136 	context = cm->cm_frame->header.context;
2137 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2138 	cm->cm_frame->header.context = context;
2139 	pass = &cm->cm_frame->pass;
2140 	bzero(pass->cdb, 16);
2141 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2142 	switch (bio->bio_cmd & 0x03) {
2143 	case BIO_READ:
2144 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2145 		readop = 1;
2146 		break;
2147 	case BIO_WRITE:
2148 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2149 		readop = 0;
2150 		break;
2151 	default:
2152 		/* TODO: what about BIO_DELETE??? */
2153 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2154 	}
2155 
2156 	/* Cheat with the sector length to avoid a non-constant division */
2157 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2158 	/* Fill the LBA and Transfer length in CDB */
2159 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2160 	    pass->cdb);
2161 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2162 	pass->header.lun_id = 0;
2163 	pass->header.timeout = 0;
2164 	pass->header.flags = 0;
2165 	pass->header.scsi_status = 0;
2166 	pass->header.sense_len = MFI_SENSE_LEN;
2167 	pass->header.data_len = bio->bio_bcount;
2168 	pass->header.cdb_len = cdb_len;
2169 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2170 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2171 	cm->cm_complete = mfi_bio_complete;
2172 	cm->cm_private = bio;
2173 	cm->cm_data = unmapped_buf;
2174 	cm->cm_len = bio->bio_bcount;
2175 	cm->cm_sg = &pass->sgl;
2176 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2177 	cm->cm_flags = flags;
2178 
2179 	return (cm);
2180 }
2181 
2182 static struct mfi_command *
2183 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2184 {
2185 	struct mfi_io_frame *io;
2186 	struct mfi_command *cm;
2187 	int flags;
2188 	uint32_t blkcount;
2189 	uint32_t context = 0;
2190 
2191 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2192 
2193 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2194 	    return (NULL);
2195 
2196 	/* Zero out the MFI frame */
2197 	context = cm->cm_frame->header.context;
2198 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2199 	cm->cm_frame->header.context = context;
2200 	io = &cm->cm_frame->io;
2201 	switch (bio->bio_cmd & 0x03) {
2202 	case BIO_READ:
2203 		io->header.cmd = MFI_CMD_LD_READ;
2204 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2205 		break;
2206 	case BIO_WRITE:
2207 		io->header.cmd = MFI_CMD_LD_WRITE;
2208 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2209 		break;
2210 	default:
2211 		/* TODO: what about BIO_DELETE??? */
2212 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2213 	}
2214 
2215 	/* Cheat with the sector length to avoid a non-constant division */
2216 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2217 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2218 	io->header.timeout = 0;
2219 	io->header.flags = 0;
2220 	io->header.scsi_status = 0;
2221 	io->header.sense_len = MFI_SENSE_LEN;
2222 	io->header.data_len = blkcount;
2223 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2224 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2225 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2226 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2227 	cm->cm_complete = mfi_bio_complete;
2228 	cm->cm_private = bio;
2229 	cm->cm_data = unmapped_buf;
2230 	cm->cm_len = bio->bio_bcount;
2231 	cm->cm_sg = &io->sgl;
2232 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2233 	cm->cm_flags = flags;
2234 
2235 	return (cm);
2236 }
2237 
2238 static void
2239 mfi_bio_complete(struct mfi_command *cm)
2240 {
2241 	struct bio *bio;
2242 	struct mfi_frame_header *hdr;
2243 	struct mfi_softc *sc;
2244 
2245 	bio = cm->cm_private;
2246 	hdr = &cm->cm_frame->header;
2247 	sc = cm->cm_sc;
2248 
2249 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2250 		bio->bio_flags |= BIO_ERROR;
2251 		bio->bio_error = EIO;
2252 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2253 		    "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2254 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2255 	} else if (cm->cm_error != 0) {
2256 		bio->bio_flags |= BIO_ERROR;
2257 		bio->bio_error = cm->cm_error;
2258 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2259 		    cm, cm->cm_error);
2260 	}
2261 
2262 	mfi_release_command(cm);
2263 	mfi_disk_complete(bio);
2264 }
2265 
2266 void
2267 mfi_startio(struct mfi_softc *sc)
2268 {
2269 	struct mfi_command *cm;
2270 	struct ccb_hdr *ccbh;
2271 
2272 	for (;;) {
2273 		/* Don't bother if we're short on resources */
2274 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2275 			break;
2276 
2277 		/* Try a command that has already been prepared */
2278 		cm = mfi_dequeue_ready(sc);
2279 
2280 		if (cm == NULL) {
2281 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2282 				cm = sc->mfi_cam_start(ccbh);
2283 		}
2284 
2285 		/* Nope, so look for work on the bioq */
2286 		if (cm == NULL)
2287 			cm = mfi_bio_command(sc);
2288 
2289 		/* No work available, so exit */
2290 		if (cm == NULL)
2291 			break;
2292 
2293 		/* Send the command to the controller */
2294 		if (mfi_mapcmd(sc, cm) != 0) {
2295 			device_printf(sc->mfi_dev, "Failed to startio\n");
2296 			mfi_requeue_ready(cm);
2297 			break;
2298 		}
2299 	}
2300 }
2301 
2302 int
2303 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2304 {
2305 	int error, polled;
2306 
2307 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2308 
2309 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2310 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2311 		if (cm->cm_flags & MFI_CMD_CCB)
2312 			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2313 			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2314 			    polled);
2315 		else if (cm->cm_flags & MFI_CMD_BIO)
2316 			error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2317 			    cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2318 			    polled);
2319 		else
2320 			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2321 			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2322 			    mfi_data_cb, cm, polled);
2323 		if (error == EINPROGRESS) {
2324 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2325 			return (0);
2326 		}
2327 	} else {
2328 		error = mfi_send_frame(sc, cm);
2329 	}
2330 
2331 	return (error);
2332 }
2333 
2334 static void
2335 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2336 {
2337 	struct mfi_frame_header *hdr;
2338 	struct mfi_command *cm;
2339 	union mfi_sgl *sgl;
2340 	struct mfi_softc *sc;
2341 	int i, j, first, dir;
2342 	int sge_size, locked;
2343 
2344 	cm = (struct mfi_command *)arg;
2345 	sc = cm->cm_sc;
2346 	hdr = &cm->cm_frame->header;
2347 	sgl = cm->cm_sg;
2348 
2349 	/*
2350 	 * We need to check if we have the lock as this is async
2351 	 * callback so even though our caller mfi_mapcmd asserts
2352 	 * it has the lock, there is no garantee that hasn't been
2353 	 * dropped if bus_dmamap_load returned prior to our
2354 	 * completion.
2355 	 */
2356 	if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2357 		mtx_lock(&sc->mfi_io_lock);
2358 
2359 	if (error) {
2360 		printf("error %d in callback\n", error);
2361 		cm->cm_error = error;
2362 		mfi_complete(sc, cm);
2363 		goto out;
2364 	}
2365 	/* Use IEEE sgl only for IO's on a SKINNY controller
2366 	 * For other commands on a SKINNY controller use either
2367 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2368 	 * Also calculate the total frame size based on the type
2369 	 * of SGL used.
2370 	 */
2371 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2372 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2373 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2374 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2375 		for (i = 0; i < nsegs; i++) {
2376 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2377 			sgl->sg_skinny[i].len = segs[i].ds_len;
2378 			sgl->sg_skinny[i].flag = 0;
2379 		}
2380 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2381 		sge_size = sizeof(struct mfi_sg_skinny);
2382 		hdr->sg_count = nsegs;
2383 	} else {
2384 		j = 0;
2385 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2386 			first = cm->cm_stp_len;
2387 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2388 				sgl->sg32[j].addr = segs[0].ds_addr;
2389 				sgl->sg32[j++].len = first;
2390 			} else {
2391 				sgl->sg64[j].addr = segs[0].ds_addr;
2392 				sgl->sg64[j++].len = first;
2393 			}
2394 		} else
2395 			first = 0;
2396 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2397 			for (i = 0; i < nsegs; i++) {
2398 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2399 				sgl->sg32[j++].len = segs[i].ds_len - first;
2400 				first = 0;
2401 			}
2402 		} else {
2403 			for (i = 0; i < nsegs; i++) {
2404 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2405 				sgl->sg64[j++].len = segs[i].ds_len - first;
2406 				first = 0;
2407 			}
2408 			hdr->flags |= MFI_FRAME_SGL64;
2409 		}
2410 		hdr->sg_count = j;
2411 		sge_size = sc->mfi_sge_size;
2412 	}
2413 
2414 	dir = 0;
2415 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2416 		dir |= BUS_DMASYNC_PREREAD;
2417 		hdr->flags |= MFI_FRAME_DIR_READ;
2418 	}
2419 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2420 		dir |= BUS_DMASYNC_PREWRITE;
2421 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2422 	}
2423 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2424 	cm->cm_flags |= MFI_CMD_MAPPED;
2425 
2426 	/*
2427 	 * Instead of calculating the total number of frames in the
2428 	 * compound frame, it's already assumed that there will be at
2429 	 * least 1 frame, so don't compensate for the modulo of the
2430 	 * following division.
2431 	 */
2432 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2433 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2434 
2435 	if ((error = mfi_send_frame(sc, cm)) != 0) {
2436 		printf("error %d in callback from mfi_send_frame\n", error);
2437 		cm->cm_error = error;
2438 		mfi_complete(sc, cm);
2439 		goto out;
2440 	}
2441 
2442 out:
2443 	/* leave the lock in the state we found it */
2444 	if (locked == 0)
2445 		mtx_unlock(&sc->mfi_io_lock);
2446 
2447 	return;
2448 }
2449 
2450 static int
2451 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2452 {
2453 	int error;
2454 
2455 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2456 
2457 	if (sc->MFA_enabled)
2458 		error = mfi_tbolt_send_frame(sc, cm);
2459 	else
2460 		error = mfi_std_send_frame(sc, cm);
2461 
2462 	if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2463 		mfi_remove_busy(cm);
2464 
2465 	return (error);
2466 }
2467 
2468 static int
2469 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2470 {
2471 	struct mfi_frame_header *hdr;
2472 	int tm = mfi_polled_cmd_timeout * 1000;
2473 
2474 	hdr = &cm->cm_frame->header;
2475 
2476 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2477 		cm->cm_timestamp = time_uptime;
2478 		mfi_enqueue_busy(cm);
2479 	} else {
2480 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2481 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2482 	}
2483 
2484 	/*
2485 	 * The bus address of the command is aligned on a 64 byte boundary,
2486 	 * leaving the least 6 bits as zero.  For whatever reason, the
2487 	 * hardware wants the address shifted right by three, leaving just
2488 	 * 3 zero bits.  These three bits are then used as a prefetching
2489 	 * hint for the hardware to predict how many frames need to be
2490 	 * fetched across the bus.  If a command has more than 8 frames
2491 	 * then the 3 bits are set to 0x7 and the firmware uses other
2492 	 * information in the command to determine the total amount to fetch.
2493 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2494 	 * is enough for both 32bit and 64bit systems.
2495 	 */
2496 	if (cm->cm_extra_frames > 7)
2497 		cm->cm_extra_frames = 7;
2498 
2499 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2500 
2501 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2502 		return (0);
2503 
2504 	/* This is a polled command, so busy-wait for it to complete. */
2505 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2506 		DELAY(1000);
2507 		tm -= 1;
2508 		if (tm <= 0)
2509 			break;
2510 	}
2511 
2512 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2513 		device_printf(sc->mfi_dev, "Frame %p timed out "
2514 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2515 		return (ETIMEDOUT);
2516 	}
2517 
2518 	return (0);
2519 }
2520 
2521 
2522 void
2523 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2524 {
2525 	int dir;
2526 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2527 
2528 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2529 		dir = 0;
2530 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2531 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2532 			dir |= BUS_DMASYNC_POSTREAD;
2533 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2534 			dir |= BUS_DMASYNC_POSTWRITE;
2535 
2536 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2537 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2538 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2539 	}
2540 
2541 	cm->cm_flags |= MFI_CMD_COMPLETED;
2542 
2543 	if (cm->cm_complete != NULL)
2544 		cm->cm_complete(cm);
2545 	else
2546 		wakeup(cm);
2547 }
2548 
2549 static int
2550 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2551 {
2552 	struct mfi_command *cm;
2553 	struct mfi_abort_frame *abort;
2554 	int i = 0, error;
2555 	uint32_t context = 0;
2556 
2557 	mtx_lock(&sc->mfi_io_lock);
2558 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2559 		mtx_unlock(&sc->mfi_io_lock);
2560 		return (EBUSY);
2561 	}
2562 
2563 	/* Zero out the MFI frame */
2564 	context = cm->cm_frame->header.context;
2565 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2566 	cm->cm_frame->header.context = context;
2567 
2568 	abort = &cm->cm_frame->abort;
2569 	abort->header.cmd = MFI_CMD_ABORT;
2570 	abort->header.flags = 0;
2571 	abort->header.scsi_status = 0;
2572 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2573 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2574 	abort->abort_mfi_addr_hi =
2575 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2576 	cm->cm_data = NULL;
2577 	cm->cm_flags = MFI_CMD_POLLED;
2578 
2579 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2580 		device_printf(sc->mfi_dev, "failed to abort command\n");
2581 	mfi_release_command(cm);
2582 
2583 	mtx_unlock(&sc->mfi_io_lock);
2584 	while (i < 5 && *cm_abort != NULL) {
2585 		tsleep(cm_abort, 0, "mfiabort",
2586 		    5 * hz);
2587 		i++;
2588 	}
2589 	if (*cm_abort != NULL) {
2590 		/* Force a complete if command didn't abort */
2591 		mtx_lock(&sc->mfi_io_lock);
2592 		(*cm_abort)->cm_complete(*cm_abort);
2593 		mtx_unlock(&sc->mfi_io_lock);
2594 	}
2595 
2596 	return (error);
2597 }
2598 
2599 int
2600 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2601      int len)
2602 {
2603 	struct mfi_command *cm;
2604 	struct mfi_io_frame *io;
2605 	int error;
2606 	uint32_t context = 0;
2607 
2608 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2609 		return (EBUSY);
2610 
2611 	/* Zero out the MFI frame */
2612 	context = cm->cm_frame->header.context;
2613 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2614 	cm->cm_frame->header.context = context;
2615 
2616 	io = &cm->cm_frame->io;
2617 	io->header.cmd = MFI_CMD_LD_WRITE;
2618 	io->header.target_id = id;
2619 	io->header.timeout = 0;
2620 	io->header.flags = 0;
2621 	io->header.scsi_status = 0;
2622 	io->header.sense_len = MFI_SENSE_LEN;
2623 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2624 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2625 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2626 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2627 	io->lba_lo = lba & 0xffffffff;
2628 	cm->cm_data = virt;
2629 	cm->cm_len = len;
2630 	cm->cm_sg = &io->sgl;
2631 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2632 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2633 
2634 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2635 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2636 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2637 	    BUS_DMASYNC_POSTWRITE);
2638 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2639 	mfi_release_command(cm);
2640 
2641 	return (error);
2642 }
2643 
2644 int
2645 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2646     int len)
2647 {
2648 	struct mfi_command *cm;
2649 	struct mfi_pass_frame *pass;
2650 	int error, readop, cdb_len;
2651 	uint32_t blkcount;
2652 
2653 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2654 		return (EBUSY);
2655 
2656 	pass = &cm->cm_frame->pass;
2657 	bzero(pass->cdb, 16);
2658 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2659 
2660 	readop = 0;
2661 	blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2662 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2663 	pass->header.target_id = id;
2664 	pass->header.timeout = 0;
2665 	pass->header.flags = 0;
2666 	pass->header.scsi_status = 0;
2667 	pass->header.sense_len = MFI_SENSE_LEN;
2668 	pass->header.data_len = len;
2669 	pass->header.cdb_len = cdb_len;
2670 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2671 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2672 	cm->cm_data = virt;
2673 	cm->cm_len = len;
2674 	cm->cm_sg = &pass->sgl;
2675 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2676 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2677 
2678 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2679 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2680 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2681 	    BUS_DMASYNC_POSTWRITE);
2682 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2683 	mfi_release_command(cm);
2684 
2685 	return (error);
2686 }
2687 
2688 static int
2689 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2690 {
2691 	struct mfi_softc *sc;
2692 	int error;
2693 
2694 	sc = dev->si_drv1;
2695 
2696 	mtx_lock(&sc->mfi_io_lock);
2697 	if (sc->mfi_detaching)
2698 		error = ENXIO;
2699 	else {
2700 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2701 		error = 0;
2702 	}
2703 	mtx_unlock(&sc->mfi_io_lock);
2704 
2705 	return (error);
2706 }
2707 
2708 static int
2709 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2710 {
2711 	struct mfi_softc *sc;
2712 	struct mfi_aen *mfi_aen_entry, *tmp;
2713 
2714 	sc = dev->si_drv1;
2715 
2716 	mtx_lock(&sc->mfi_io_lock);
2717 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2718 
2719 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2720 		if (mfi_aen_entry->p == curproc) {
2721 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2722 			    aen_link);
2723 			free(mfi_aen_entry, M_MFIBUF);
2724 		}
2725 	}
2726 	mtx_unlock(&sc->mfi_io_lock);
2727 	return (0);
2728 }
2729 
2730 static int
2731 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2732 {
2733 
2734 	switch (opcode) {
2735 	case MFI_DCMD_LD_DELETE:
2736 	case MFI_DCMD_CFG_ADD:
2737 	case MFI_DCMD_CFG_CLEAR:
2738 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2739 		sx_xlock(&sc->mfi_config_lock);
2740 		return (1);
2741 	default:
2742 		return (0);
2743 	}
2744 }
2745 
2746 static void
2747 mfi_config_unlock(struct mfi_softc *sc, int locked)
2748 {
2749 
2750 	if (locked)
2751 		sx_xunlock(&sc->mfi_config_lock);
2752 }
2753 
2754 /*
2755  * Perform pre-issue checks on commands from userland and possibly veto
2756  * them.
2757  */
2758 static int
2759 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2760 {
2761 	struct mfi_disk *ld, *ld2;
2762 	int error;
2763 	struct mfi_system_pd *syspd = NULL;
2764 	uint16_t syspd_id;
2765 	uint16_t *mbox;
2766 
2767 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2768 	error = 0;
2769 	switch (cm->cm_frame->dcmd.opcode) {
2770 	case MFI_DCMD_LD_DELETE:
2771 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2772 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2773 				break;
2774 		}
2775 		if (ld == NULL)
2776 			error = ENOENT;
2777 		else
2778 			error = mfi_disk_disable(ld);
2779 		break;
2780 	case MFI_DCMD_CFG_CLEAR:
2781 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2782 			error = mfi_disk_disable(ld);
2783 			if (error)
2784 				break;
2785 		}
2786 		if (error) {
2787 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2788 				if (ld2 == ld)
2789 					break;
2790 				mfi_disk_enable(ld2);
2791 			}
2792 		}
2793 		break;
2794 	case MFI_DCMD_PD_STATE_SET:
2795 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2796 		syspd_id = mbox[0];
2797 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2798 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2799 				if (syspd->pd_id == syspd_id)
2800 					break;
2801 			}
2802 		}
2803 		else
2804 			break;
2805 		if (syspd)
2806 			error = mfi_syspd_disable(syspd);
2807 		break;
2808 	default:
2809 		break;
2810 	}
2811 	return (error);
2812 }
2813 
2814 /* Perform post-issue checks on commands from userland. */
2815 static void
2816 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2817 {
2818 	struct mfi_disk *ld, *ldn;
2819 	struct mfi_system_pd *syspd = NULL;
2820 	uint16_t syspd_id;
2821 	uint16_t *mbox;
2822 
2823 	switch (cm->cm_frame->dcmd.opcode) {
2824 	case MFI_DCMD_LD_DELETE:
2825 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2826 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2827 				break;
2828 		}
2829 		KASSERT(ld != NULL, ("volume dissappeared"));
2830 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2831 			mtx_unlock(&sc->mfi_io_lock);
2832 			mtx_lock(&Giant);
2833 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2834 			mtx_unlock(&Giant);
2835 			mtx_lock(&sc->mfi_io_lock);
2836 		} else
2837 			mfi_disk_enable(ld);
2838 		break;
2839 	case MFI_DCMD_CFG_CLEAR:
2840 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2841 			mtx_unlock(&sc->mfi_io_lock);
2842 			mtx_lock(&Giant);
2843 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2844 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2845 			}
2846 			mtx_unlock(&Giant);
2847 			mtx_lock(&sc->mfi_io_lock);
2848 		} else {
2849 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2850 				mfi_disk_enable(ld);
2851 		}
2852 		break;
2853 	case MFI_DCMD_CFG_ADD:
2854 		mfi_ldprobe(sc);
2855 		break;
2856 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2857 		mfi_ldprobe(sc);
2858 		break;
2859 	case MFI_DCMD_PD_STATE_SET:
2860 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2861 		syspd_id = mbox[0];
2862 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2863 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2864 				if (syspd->pd_id == syspd_id)
2865 					break;
2866 			}
2867 		}
2868 		else
2869 			break;
2870 		/* If the transition fails then enable the syspd again */
2871 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2872 			mfi_syspd_enable(syspd);
2873 		break;
2874 	}
2875 }
2876 
2877 static int
2878 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2879 {
2880 	struct mfi_config_data *conf_data;
2881 	struct mfi_command *ld_cm = NULL;
2882 	struct mfi_ld_info *ld_info = NULL;
2883 	struct mfi_ld_config *ld;
2884 	char *p;
2885 	int error = 0;
2886 
2887 	conf_data = (struct mfi_config_data *)cm->cm_data;
2888 
2889 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2890 		p = (char *)conf_data->array;
2891 		p += conf_data->array_size * conf_data->array_count;
2892 		ld = (struct mfi_ld_config *)p;
2893 		if (ld->params.isSSCD == 1)
2894 			error = 1;
2895 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2896 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2897 		    (void **)&ld_info, sizeof(*ld_info));
2898 		if (error) {
2899 			device_printf(sc->mfi_dev, "Failed to allocate"
2900 			    "MFI_DCMD_LD_GET_INFO %d", error);
2901 			if (ld_info)
2902 				free(ld_info, M_MFIBUF);
2903 			return 0;
2904 		}
2905 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2906 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2907 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2908 		if (mfi_wait_command(sc, ld_cm) != 0) {
2909 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2910 			mfi_release_command(ld_cm);
2911 			free(ld_info, M_MFIBUF);
2912 			return 0;
2913 		}
2914 
2915 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2916 			free(ld_info, M_MFIBUF);
2917 			mfi_release_command(ld_cm);
2918 			return 0;
2919 		}
2920 		else
2921 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2922 
2923 		if (ld_info->ld_config.params.isSSCD == 1)
2924 			error = 1;
2925 
2926 		mfi_release_command(ld_cm);
2927 		free(ld_info, M_MFIBUF);
2928 
2929 	}
2930 	return error;
2931 }
2932 
2933 static int
2934 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2935 {
2936 	uint8_t i;
2937 	struct mfi_ioc_packet *ioc;
2938 	ioc = (struct mfi_ioc_packet *)arg;
2939 	int sge_size, error;
2940 	struct megasas_sge *kern_sge;
2941 
2942 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2943 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2944 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2945 
2946 	if (sizeof(bus_addr_t) == 8) {
2947 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2948 		cm->cm_extra_frames = 2;
2949 		sge_size = sizeof(struct mfi_sg64);
2950 	} else {
2951 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2952 		sge_size = sizeof(struct mfi_sg32);
2953 	}
2954 
2955 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2956 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2957 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2958 			1, 0,			/* algnmnt, boundary */
2959 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2960 			BUS_SPACE_MAXADDR,	/* highaddr */
2961 			NULL, NULL,		/* filter, filterarg */
2962 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2963 			2,			/* nsegments */
2964 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2965 			BUS_DMA_ALLOCNOW,	/* flags */
2966 			NULL, NULL,		/* lockfunc, lockarg */
2967 			&sc->mfi_kbuff_arr_dmat[i])) {
2968 			device_printf(sc->mfi_dev,
2969 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2970 			return (ENOMEM);
2971 		}
2972 
2973 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2974 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2975 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2976 			device_printf(sc->mfi_dev,
2977 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2978 			return (ENOMEM);
2979 		}
2980 
2981 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2982 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2983 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2984 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2985 
2986 		if (!sc->kbuff_arr[i]) {
2987 			device_printf(sc->mfi_dev,
2988 			    "Could not allocate memory for kbuff_arr info\n");
2989 			return -1;
2990 		}
2991 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2992 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2993 
2994 		if (sizeof(bus_addr_t) == 8) {
2995 			cm->cm_frame->stp.sgl.sg64[i].addr =
2996 			    kern_sge[i].phys_addr;
2997 			cm->cm_frame->stp.sgl.sg64[i].len =
2998 			    ioc->mfi_sgl[i].iov_len;
2999 		} else {
3000 			cm->cm_frame->stp.sgl.sg32[i].addr =
3001 			    kern_sge[i].phys_addr;
3002 			cm->cm_frame->stp.sgl.sg32[i].len =
3003 			    ioc->mfi_sgl[i].iov_len;
3004 		}
3005 
3006 		error = copyin(ioc->mfi_sgl[i].iov_base,
3007 		    sc->kbuff_arr[i],
3008 		    ioc->mfi_sgl[i].iov_len);
3009 		if (error != 0) {
3010 			device_printf(sc->mfi_dev, "Copy in failed\n");
3011 			return error;
3012 		}
3013 	}
3014 
3015 	cm->cm_flags |=MFI_CMD_MAPPED;
3016 	return 0;
3017 }
3018 
3019 static int
3020 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3021 {
3022 	struct mfi_command *cm;
3023 	struct mfi_dcmd_frame *dcmd;
3024 	void *ioc_buf = NULL;
3025 	uint32_t context;
3026 	int error = 0, locked;
3027 
3028 
3029 	if (ioc->buf_size > 0) {
3030 		if (ioc->buf_size > 1024 * 1024)
3031 			return (ENOMEM);
3032 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3033 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3034 		if (error) {
3035 			device_printf(sc->mfi_dev, "failed to copyin\n");
3036 			free(ioc_buf, M_MFIBUF);
3037 			return (error);
3038 		}
3039 	}
3040 
3041 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3042 
3043 	mtx_lock(&sc->mfi_io_lock);
3044 	while ((cm = mfi_dequeue_free(sc)) == NULL)
3045 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3046 
3047 	/* Save context for later */
3048 	context = cm->cm_frame->header.context;
3049 
3050 	dcmd = &cm->cm_frame->dcmd;
3051 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3052 
3053 	cm->cm_sg = &dcmd->sgl;
3054 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3055 	cm->cm_data = ioc_buf;
3056 	cm->cm_len = ioc->buf_size;
3057 
3058 	/* restore context */
3059 	cm->cm_frame->header.context = context;
3060 
3061 	/* Cheat since we don't know if we're writing or reading */
3062 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3063 
3064 	error = mfi_check_command_pre(sc, cm);
3065 	if (error)
3066 		goto out;
3067 
3068 	error = mfi_wait_command(sc, cm);
3069 	if (error) {
3070 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3071 		goto out;
3072 	}
3073 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3074 	mfi_check_command_post(sc, cm);
3075 out:
3076 	mfi_release_command(cm);
3077 	mtx_unlock(&sc->mfi_io_lock);
3078 	mfi_config_unlock(sc, locked);
3079 	if (ioc->buf_size > 0)
3080 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3081 	if (ioc_buf)
3082 		free(ioc_buf, M_MFIBUF);
3083 	return (error);
3084 }
3085 
3086 #define	PTRIN(p)		((void *)(uintptr_t)(p))
3087 
3088 static int
3089 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3090 {
3091 	struct mfi_softc *sc;
3092 	union mfi_statrequest *ms;
3093 	struct mfi_ioc_packet *ioc;
3094 #ifdef COMPAT_FREEBSD32
3095 	struct mfi_ioc_packet32 *ioc32;
3096 #endif
3097 	struct mfi_ioc_aen *aen;
3098 	struct mfi_command *cm = NULL;
3099 	uint32_t context = 0;
3100 	union mfi_sense_ptr sense_ptr;
3101 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3102 	size_t len;
3103 	int i, res;
3104 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3105 #ifdef COMPAT_FREEBSD32
3106 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3107 	struct mfi_ioc_passthru iop_swab;
3108 #endif
3109 	int error, locked;
3110 	union mfi_sgl *sgl;
3111 	sc = dev->si_drv1;
3112 	error = 0;
3113 
3114 	if (sc->adpreset)
3115 		return EBUSY;
3116 
3117 	if (sc->hw_crit_error)
3118 		return EBUSY;
3119 
3120 	if (sc->issuepend_done == 0)
3121 		return EBUSY;
3122 
3123 	switch (cmd) {
3124 	case MFIIO_STATS:
3125 		ms = (union mfi_statrequest *)arg;
3126 		switch (ms->ms_item) {
3127 		case MFIQ_FREE:
3128 		case MFIQ_BIO:
3129 		case MFIQ_READY:
3130 		case MFIQ_BUSY:
3131 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3132 			    sizeof(struct mfi_qstat));
3133 			break;
3134 		default:
3135 			error = ENOIOCTL;
3136 			break;
3137 		}
3138 		break;
3139 	case MFIIO_QUERY_DISK:
3140 	{
3141 		struct mfi_query_disk *qd;
3142 		struct mfi_disk *ld;
3143 
3144 		qd = (struct mfi_query_disk *)arg;
3145 		mtx_lock(&sc->mfi_io_lock);
3146 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3147 			if (ld->ld_id == qd->array_id)
3148 				break;
3149 		}
3150 		if (ld == NULL) {
3151 			qd->present = 0;
3152 			mtx_unlock(&sc->mfi_io_lock);
3153 			return (0);
3154 		}
3155 		qd->present = 1;
3156 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3157 			qd->open = 1;
3158 		bzero(qd->devname, SPECNAMELEN + 1);
3159 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3160 		mtx_unlock(&sc->mfi_io_lock);
3161 		break;
3162 	}
3163 	case MFI_CMD:
3164 #ifdef COMPAT_FREEBSD32
3165 	case MFI_CMD32:
3166 #endif
3167 		{
3168 		devclass_t devclass;
3169 		ioc = (struct mfi_ioc_packet *)arg;
3170 		int adapter;
3171 
3172 		adapter = ioc->mfi_adapter_no;
3173 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3174 			devclass = devclass_find("mfi");
3175 			sc = devclass_get_softc(devclass, adapter);
3176 		}
3177 		mtx_lock(&sc->mfi_io_lock);
3178 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3179 			mtx_unlock(&sc->mfi_io_lock);
3180 			return (EBUSY);
3181 		}
3182 		mtx_unlock(&sc->mfi_io_lock);
3183 		locked = 0;
3184 
3185 		/*
3186 		 * save off original context since copying from user
3187 		 * will clobber some data
3188 		 */
3189 		context = cm->cm_frame->header.context;
3190 		cm->cm_frame->header.context = cm->cm_index;
3191 
3192 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3193 		    2 * MEGAMFI_FRAME_SIZE);
3194 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3195 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3196 		cm->cm_frame->header.scsi_status = 0;
3197 		cm->cm_frame->header.pad0 = 0;
3198 		if (ioc->mfi_sge_count) {
3199 			cm->cm_sg =
3200 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3201 		}
3202 		sgl = cm->cm_sg;
3203 		cm->cm_flags = 0;
3204 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3205 			cm->cm_flags |= MFI_CMD_DATAIN;
3206 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3207 			cm->cm_flags |= MFI_CMD_DATAOUT;
3208 		/* Legacy app shim */
3209 		if (cm->cm_flags == 0)
3210 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3211 		cm->cm_len = cm->cm_frame->header.data_len;
3212 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3213 #ifdef COMPAT_FREEBSD32
3214 			if (cmd == MFI_CMD) {
3215 #endif
3216 				/* Native */
3217 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3218 #ifdef COMPAT_FREEBSD32
3219 			} else {
3220 				/* 32bit on 64bit */
3221 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3222 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3223 			}
3224 #endif
3225 			cm->cm_len += cm->cm_stp_len;
3226 		}
3227 		if (cm->cm_len &&
3228 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3229 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3230 			    M_WAITOK | M_ZERO);
3231 			if (cm->cm_data == NULL) {
3232 				device_printf(sc->mfi_dev, "Malloc failed\n");
3233 				goto out;
3234 			}
3235 		} else {
3236 			cm->cm_data = 0;
3237 		}
3238 
3239 		/* restore header context */
3240 		cm->cm_frame->header.context = context;
3241 
3242 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3243 			res = mfi_stp_cmd(sc, cm, arg);
3244 			if (res != 0)
3245 				goto out;
3246 		} else {
3247 			temp = data;
3248 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3249 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3250 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3251 #ifdef COMPAT_FREEBSD32
3252 					if (cmd == MFI_CMD) {
3253 #endif
3254 						/* Native */
3255 						addr = ioc->mfi_sgl[i].iov_base;
3256 						len = ioc->mfi_sgl[i].iov_len;
3257 #ifdef COMPAT_FREEBSD32
3258 					} else {
3259 						/* 32bit on 64bit */
3260 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3261 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3262 						len = ioc32->mfi_sgl[i].iov_len;
3263 					}
3264 #endif
3265 					error = copyin(addr, temp, len);
3266 					if (error != 0) {
3267 						device_printf(sc->mfi_dev,
3268 						    "Copy in failed\n");
3269 						goto out;
3270 					}
3271 					temp = &temp[len];
3272 				}
3273 			}
3274 		}
3275 
3276 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3277 			locked = mfi_config_lock(sc,
3278 			     cm->cm_frame->dcmd.opcode);
3279 
3280 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3281 			cm->cm_frame->pass.sense_addr_lo =
3282 			    (uint32_t)cm->cm_sense_busaddr;
3283 			cm->cm_frame->pass.sense_addr_hi =
3284 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3285 		}
3286 		mtx_lock(&sc->mfi_io_lock);
3287 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3288 		if (!skip_pre_post) {
3289 			error = mfi_check_command_pre(sc, cm);
3290 			if (error) {
3291 				mtx_unlock(&sc->mfi_io_lock);
3292 				goto out;
3293 			}
3294 		}
3295 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3296 			device_printf(sc->mfi_dev,
3297 			    "Controller polled failed\n");
3298 			mtx_unlock(&sc->mfi_io_lock);
3299 			goto out;
3300 		}
3301 		if (!skip_pre_post) {
3302 			mfi_check_command_post(sc, cm);
3303 		}
3304 		mtx_unlock(&sc->mfi_io_lock);
3305 
3306 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3307 			temp = data;
3308 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3309 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3310 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3311 #ifdef COMPAT_FREEBSD32
3312 					if (cmd == MFI_CMD) {
3313 #endif
3314 						/* Native */
3315 						addr = ioc->mfi_sgl[i].iov_base;
3316 						len = ioc->mfi_sgl[i].iov_len;
3317 #ifdef COMPAT_FREEBSD32
3318 					} else {
3319 						/* 32bit on 64bit */
3320 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3321 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3322 						len = ioc32->mfi_sgl[i].iov_len;
3323 					}
3324 #endif
3325 					error = copyout(temp, addr, len);
3326 					if (error != 0) {
3327 						device_printf(sc->mfi_dev,
3328 						    "Copy out failed\n");
3329 						goto out;
3330 					}
3331 					temp = &temp[len];
3332 				}
3333 			}
3334 		}
3335 
3336 		if (ioc->mfi_sense_len) {
3337 			/* get user-space sense ptr then copy out sense */
3338 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3339 			    &sense_ptr.sense_ptr_data[0],
3340 			    sizeof(sense_ptr.sense_ptr_data));
3341 #ifdef COMPAT_FREEBSD32
3342 			if (cmd != MFI_CMD) {
3343 				/*
3344 				 * not 64bit native so zero out any address
3345 				 * over 32bit */
3346 				sense_ptr.addr.high = 0;
3347 			}
3348 #endif
3349 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3350 			    ioc->mfi_sense_len);
3351 			if (error != 0) {
3352 				device_printf(sc->mfi_dev,
3353 				    "Copy out failed\n");
3354 				goto out;
3355 			}
3356 		}
3357 
3358 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3359 out:
3360 		mfi_config_unlock(sc, locked);
3361 		if (data)
3362 			free(data, M_MFIBUF);
3363 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3364 			for (i = 0; i < 2; i++) {
3365 				if (sc->kbuff_arr[i]) {
3366 					if (sc->mfi_kbuff_arr_busaddr != 0)
3367 						bus_dmamap_unload(
3368 						    sc->mfi_kbuff_arr_dmat[i],
3369 						    sc->mfi_kbuff_arr_dmamap[i]
3370 						    );
3371 					if (sc->kbuff_arr[i] != NULL)
3372 						bus_dmamem_free(
3373 						    sc->mfi_kbuff_arr_dmat[i],
3374 						    sc->kbuff_arr[i],
3375 						    sc->mfi_kbuff_arr_dmamap[i]
3376 						    );
3377 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3378 						bus_dma_tag_destroy(
3379 						    sc->mfi_kbuff_arr_dmat[i]);
3380 				}
3381 			}
3382 		}
3383 		if (cm) {
3384 			mtx_lock(&sc->mfi_io_lock);
3385 			mfi_release_command(cm);
3386 			mtx_unlock(&sc->mfi_io_lock);
3387 		}
3388 
3389 		break;
3390 		}
3391 	case MFI_SET_AEN:
3392 		aen = (struct mfi_ioc_aen *)arg;
3393 		mtx_lock(&sc->mfi_io_lock);
3394 		error = mfi_aen_register(sc, aen->aen_seq_num,
3395 		    aen->aen_class_locale);
3396 		mtx_unlock(&sc->mfi_io_lock);
3397 
3398 		break;
3399 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3400 		{
3401 			devclass_t devclass;
3402 			struct mfi_linux_ioc_packet l_ioc;
3403 			int adapter;
3404 
3405 			devclass = devclass_find("mfi");
3406 			if (devclass == NULL)
3407 				return (ENOENT);
3408 
3409 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3410 			if (error)
3411 				return (error);
3412 			adapter = l_ioc.lioc_adapter_no;
3413 			sc = devclass_get_softc(devclass, adapter);
3414 			if (sc == NULL)
3415 				return (ENOENT);
3416 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3417 			    cmd, arg, flag, td));
3418 			break;
3419 		}
3420 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3421 		{
3422 			devclass_t devclass;
3423 			struct mfi_linux_ioc_aen l_aen;
3424 			int adapter;
3425 
3426 			devclass = devclass_find("mfi");
3427 			if (devclass == NULL)
3428 				return (ENOENT);
3429 
3430 			error = copyin(arg, &l_aen, sizeof(l_aen));
3431 			if (error)
3432 				return (error);
3433 			adapter = l_aen.laen_adapter_no;
3434 			sc = devclass_get_softc(devclass, adapter);
3435 			if (sc == NULL)
3436 				return (ENOENT);
3437 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3438 			    cmd, arg, flag, td));
3439 			break;
3440 		}
3441 #ifdef COMPAT_FREEBSD32
3442 	case MFIIO_PASSTHRU32:
3443 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3444 			error = ENOTTY;
3445 			break;
3446 		}
3447 		iop_swab.ioc_frame	= iop32->ioc_frame;
3448 		iop_swab.buf_size	= iop32->buf_size;
3449 		iop_swab.buf		= PTRIN(iop32->buf);
3450 		iop			= &iop_swab;
3451 		/* FALLTHROUGH */
3452 #endif
3453 	case MFIIO_PASSTHRU:
3454 		error = mfi_user_command(sc, iop);
3455 #ifdef COMPAT_FREEBSD32
3456 		if (cmd == MFIIO_PASSTHRU32)
3457 			iop32->ioc_frame = iop_swab.ioc_frame;
3458 #endif
3459 		break;
3460 	default:
3461 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3462 		error = ENOTTY;
3463 		break;
3464 	}
3465 
3466 	return (error);
3467 }
3468 
3469 static int
3470 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3471 {
3472 	struct mfi_softc *sc;
3473 	struct mfi_linux_ioc_packet l_ioc;
3474 	struct mfi_linux_ioc_aen l_aen;
3475 	struct mfi_command *cm = NULL;
3476 	struct mfi_aen *mfi_aen_entry;
3477 	union mfi_sense_ptr sense_ptr;
3478 	uint32_t context = 0;
3479 	uint8_t *data = NULL, *temp;
3480 	int i;
3481 	int error, locked;
3482 
3483 	sc = dev->si_drv1;
3484 	error = 0;
3485 	switch (cmd) {
3486 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3487 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3488 		if (error != 0)
3489 			return (error);
3490 
3491 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3492 			return (EINVAL);
3493 		}
3494 
3495 		mtx_lock(&sc->mfi_io_lock);
3496 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3497 			mtx_unlock(&sc->mfi_io_lock);
3498 			return (EBUSY);
3499 		}
3500 		mtx_unlock(&sc->mfi_io_lock);
3501 		locked = 0;
3502 
3503 		/*
3504 		 * save off original context since copying from user
3505 		 * will clobber some data
3506 		 */
3507 		context = cm->cm_frame->header.context;
3508 
3509 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3510 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3511 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3512 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3513 		cm->cm_frame->header.scsi_status = 0;
3514 		cm->cm_frame->header.pad0 = 0;
3515 		if (l_ioc.lioc_sge_count)
3516 			cm->cm_sg =
3517 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3518 		cm->cm_flags = 0;
3519 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3520 			cm->cm_flags |= MFI_CMD_DATAIN;
3521 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3522 			cm->cm_flags |= MFI_CMD_DATAOUT;
3523 		cm->cm_len = cm->cm_frame->header.data_len;
3524 		if (cm->cm_len &&
3525 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3526 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3527 			    M_WAITOK | M_ZERO);
3528 			if (cm->cm_data == NULL) {
3529 				device_printf(sc->mfi_dev, "Malloc failed\n");
3530 				goto out;
3531 			}
3532 		} else {
3533 			cm->cm_data = 0;
3534 		}
3535 
3536 		/* restore header context */
3537 		cm->cm_frame->header.context = context;
3538 
3539 		temp = data;
3540 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3541 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3542 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3543 				       temp,
3544 				       l_ioc.lioc_sgl[i].iov_len);
3545 				if (error != 0) {
3546 					device_printf(sc->mfi_dev,
3547 					    "Copy in failed\n");
3548 					goto out;
3549 				}
3550 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3551 			}
3552 		}
3553 
3554 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3555 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3556 
3557 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3558 			cm->cm_frame->pass.sense_addr_lo =
3559 			    (uint32_t)cm->cm_sense_busaddr;
3560 			cm->cm_frame->pass.sense_addr_hi =
3561 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3562 		}
3563 
3564 		mtx_lock(&sc->mfi_io_lock);
3565 		error = mfi_check_command_pre(sc, cm);
3566 		if (error) {
3567 			mtx_unlock(&sc->mfi_io_lock);
3568 			goto out;
3569 		}
3570 
3571 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3572 			device_printf(sc->mfi_dev,
3573 			    "Controller polled failed\n");
3574 			mtx_unlock(&sc->mfi_io_lock);
3575 			goto out;
3576 		}
3577 
3578 		mfi_check_command_post(sc, cm);
3579 		mtx_unlock(&sc->mfi_io_lock);
3580 
3581 		temp = data;
3582 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3583 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3584 				error = copyout(temp,
3585 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3586 					l_ioc.lioc_sgl[i].iov_len);
3587 				if (error != 0) {
3588 					device_printf(sc->mfi_dev,
3589 					    "Copy out failed\n");
3590 					goto out;
3591 				}
3592 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3593 			}
3594 		}
3595 
3596 		if (l_ioc.lioc_sense_len) {
3597 			/* get user-space sense ptr then copy out sense */
3598 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3599                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3600 			    &sense_ptr.sense_ptr_data[0],
3601 			    sizeof(sense_ptr.sense_ptr_data));
3602 #ifdef __amd64__
3603 			/*
3604 			 * only 32bit Linux support so zero out any
3605 			 * address over 32bit
3606 			 */
3607 			sense_ptr.addr.high = 0;
3608 #endif
3609 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3610 			    l_ioc.lioc_sense_len);
3611 			if (error != 0) {
3612 				device_printf(sc->mfi_dev,
3613 				    "Copy out failed\n");
3614 				goto out;
3615 			}
3616 		}
3617 
3618 		error = copyout(&cm->cm_frame->header.cmd_status,
3619 			&((struct mfi_linux_ioc_packet*)arg)
3620 			->lioc_frame.hdr.cmd_status,
3621 			1);
3622 		if (error != 0) {
3623 			device_printf(sc->mfi_dev,
3624 				      "Copy out failed\n");
3625 			goto out;
3626 		}
3627 
3628 out:
3629 		mfi_config_unlock(sc, locked);
3630 		if (data)
3631 			free(data, M_MFIBUF);
3632 		if (cm) {
3633 			mtx_lock(&sc->mfi_io_lock);
3634 			mfi_release_command(cm);
3635 			mtx_unlock(&sc->mfi_io_lock);
3636 		}
3637 
3638 		return (error);
3639 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3640 		error = copyin(arg, &l_aen, sizeof(l_aen));
3641 		if (error != 0)
3642 			return (error);
3643 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3644 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3645 		    M_WAITOK);
3646 		mtx_lock(&sc->mfi_io_lock);
3647 		if (mfi_aen_entry != NULL) {
3648 			mfi_aen_entry->p = curproc;
3649 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3650 			    aen_link);
3651 		}
3652 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3653 		    l_aen.laen_class_locale);
3654 
3655 		if (error != 0) {
3656 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3657 			    aen_link);
3658 			free(mfi_aen_entry, M_MFIBUF);
3659 		}
3660 		mtx_unlock(&sc->mfi_io_lock);
3661 
3662 		return (error);
3663 	default:
3664 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3665 		error = ENOENT;
3666 		break;
3667 	}
3668 
3669 	return (error);
3670 }
3671 
3672 static int
3673 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3674 {
3675 	struct mfi_softc *sc;
3676 	int revents = 0;
3677 
3678 	sc = dev->si_drv1;
3679 
3680 	if (poll_events & (POLLIN | POLLRDNORM)) {
3681 		if (sc->mfi_aen_triggered != 0) {
3682 			revents |= poll_events & (POLLIN | POLLRDNORM);
3683 			sc->mfi_aen_triggered = 0;
3684 		}
3685 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3686 			revents |= POLLERR;
3687 		}
3688 	}
3689 
3690 	if (revents == 0) {
3691 		if (poll_events & (POLLIN | POLLRDNORM)) {
3692 			sc->mfi_poll_waiting = 1;
3693 			selrecord(td, &sc->mfi_select);
3694 		}
3695 	}
3696 
3697 	return revents;
3698 }
3699 
3700 static void
3701 mfi_dump_all(void)
3702 {
3703 	struct mfi_softc *sc;
3704 	struct mfi_command *cm;
3705 	devclass_t dc;
3706 	time_t deadline;
3707 	int timedout;
3708 	int i;
3709 
3710 	dc = devclass_find("mfi");
3711 	if (dc == NULL) {
3712 		printf("No mfi dev class\n");
3713 		return;
3714 	}
3715 
3716 	for (i = 0; ; i++) {
3717 		sc = devclass_get_softc(dc, i);
3718 		if (sc == NULL)
3719 			break;
3720 		device_printf(sc->mfi_dev, "Dumping\n\n");
3721 		timedout = 0;
3722 		deadline = time_uptime - mfi_cmd_timeout;
3723 		mtx_lock(&sc->mfi_io_lock);
3724 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3725 			if (cm->cm_timestamp <= deadline) {
3726 				device_printf(sc->mfi_dev,
3727 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3728 				    cm, (int)(time_uptime - cm->cm_timestamp));
3729 				MFI_PRINT_CMD(cm);
3730 				timedout++;
3731 			}
3732 		}
3733 
3734 #if 0
3735 		if (timedout)
3736 			MFI_DUMP_CMDS(sc);
3737 #endif
3738 
3739 		mtx_unlock(&sc->mfi_io_lock);
3740 	}
3741 
3742 	return;
3743 }
3744 
3745 static void
3746 mfi_timeout(void *data)
3747 {
3748 	struct mfi_softc *sc = (struct mfi_softc *)data;
3749 	struct mfi_command *cm, *tmp;
3750 	time_t deadline;
3751 	int timedout = 0;
3752 
3753 	deadline = time_uptime - mfi_cmd_timeout;
3754 	if (sc->adpreset == 0) {
3755 		if (!mfi_tbolt_reset(sc)) {
3756 			callout_reset(&sc->mfi_watchdog_callout,
3757 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3758 			return;
3759 		}
3760 	}
3761 	mtx_lock(&sc->mfi_io_lock);
3762 	TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3763 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3764 			continue;
3765 		if (cm->cm_timestamp <= deadline) {
3766 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3767 				cm->cm_timestamp = time_uptime;
3768 			} else {
3769 				device_printf(sc->mfi_dev,
3770 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3771 				     cm, (int)(time_uptime - cm->cm_timestamp)
3772 				     );
3773 				MFI_PRINT_CMD(cm);
3774 				MFI_VALIDATE_CMD(sc, cm);
3775 				/*
3776 				 * While commands can get stuck forever we do
3777 				 * not fail them as there is no way to tell if
3778 				 * the controller has actually processed them
3779 				 * or not.
3780 				 *
3781 				 * In addition its very likely that force
3782 				 * failing a command here would cause a panic
3783 				 * e.g. in UFS.
3784 				 */
3785 				timedout++;
3786 			}
3787 		}
3788 	}
3789 
3790 #if 0
3791 	if (timedout)
3792 		MFI_DUMP_CMDS(sc);
3793 #endif
3794 
3795 	mtx_unlock(&sc->mfi_io_lock);
3796 
3797 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3798 	    mfi_timeout, sc);
3799 
3800 	if (0)
3801 		mfi_dump_all();
3802 	return;
3803 }
3804