xref: /freebsd/sys/dev/mfi/mfi.c (revision 8aac90f18aef7c9eea906c3ff9a001ca7b94f375)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006 IronPort Systems
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2007 LSI Corp.
30  * Copyright (c) 2007 Rajesh Prabhakaran.
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52  * SUCH DAMAGE.
53  */
54 
55 #include <sys/cdefs.h>
56 #include "opt_mfi.h"
57 
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sysctl.h>
61 #include <sys/malloc.h>
62 #include <sys/kernel.h>
63 #include <sys/poll.h>
64 #include <sys/selinfo.h>
65 #include <sys/bus.h>
66 #include <sys/conf.h>
67 #include <sys/eventhandler.h>
68 #include <sys/rman.h>
69 #include <sys/bio.h>
70 #include <sys/ioccom.h>
71 #include <sys/uio.h>
72 #include <sys/proc.h>
73 #include <sys/signalvar.h>
74 #include <sys/sysent.h>
75 #include <sys/taskqueue.h>
76 
77 #include <machine/bus.h>
78 #include <machine/resource.h>
79 
80 #include <dev/mfi/mfireg.h>
81 #include <dev/mfi/mfi_ioctl.h>
82 #include <dev/mfi/mfivar.h>
83 #include <sys/interrupt.h>
84 #include <sys/priority.h>
85 
86 static int	mfi_alloc_commands(struct mfi_softc *);
87 static int	mfi_comms_init(struct mfi_softc *);
88 static int	mfi_get_controller_info(struct mfi_softc *);
89 static int	mfi_get_log_state(struct mfi_softc *,
90 		    struct mfi_evt_log_state **);
91 static int	mfi_parse_entries(struct mfi_softc *, int, int);
92 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
93 static void	mfi_startup(void *arg);
94 static void	mfi_intr(void *arg);
95 static void	mfi_ldprobe(struct mfi_softc *sc);
96 static void	mfi_syspdprobe(struct mfi_softc *sc);
97 static void	mfi_handle_evt(void *context, int pending);
98 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
99 static void	mfi_aen_complete(struct mfi_command *);
100 static int	mfi_add_ld(struct mfi_softc *sc, int);
101 static void	mfi_add_ld_complete(struct mfi_command *);
102 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
103 static void	mfi_add_sys_pd_complete(struct mfi_command *);
104 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
105 static void	mfi_bio_complete(struct mfi_command *);
106 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
107 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
108 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
109 static int	mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
110 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
111 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
112 static void	mfi_timeout(void *);
113 static int	mfi_user_command(struct mfi_softc *,
114 		    struct mfi_ioc_passthru *);
115 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
116 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
117 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
118 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
119 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
120 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
121 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
122 		    uint32_t frame_cnt);
123 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
124 		    uint32_t frame_cnt);
125 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
126 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
127 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
128 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
129 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
130 
131 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
132     "MFI driver parameters");
133 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
134 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
135            0, "event message locale");
136 
137 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
138 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
139            0, "event message class");
140 
141 static int	mfi_max_cmds = 128;
142 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
143 	   0, "Max commands limit (-1 = controller limit)");
144 
145 static int	mfi_detect_jbod_change = 1;
146 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
147 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
148 
149 int		mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
150 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
151 	   &mfi_polled_cmd_timeout, 0,
152 	   "Polled command timeout - used for firmware flash etc (in seconds)");
153 
154 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
155 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
156 	   0, "Command timeout (in seconds)");
157 
158 /* Management interface */
159 static d_open_t		mfi_open;
160 static d_close_t	mfi_close;
161 static d_ioctl_t	mfi_ioctl;
162 static d_poll_t		mfi_poll;
163 
164 static struct cdevsw mfi_cdevsw = {
165 	.d_version = 	D_VERSION,
166 	.d_flags =	0,
167 	.d_open = 	mfi_open,
168 	.d_close =	mfi_close,
169 	.d_ioctl =	mfi_ioctl,
170 	.d_poll =	mfi_poll,
171 	.d_name =	"mfi",
172 };
173 
174 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
175 
176 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
177 struct mfi_skinny_dma_info mfi_skinny;
178 
179 static void
180 mfi_enable_intr_xscale(struct mfi_softc *sc)
181 {
182 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
183 }
184 
185 static void
186 mfi_enable_intr_ppc(struct mfi_softc *sc)
187 {
188 	if (sc->mfi_flags & MFI_FLAGS_1078) {
189 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
190 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
191 	}
192 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
193 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
194 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
195 	}
196 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
197 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
198 	}
199 }
200 
201 static int32_t
202 mfi_read_fw_status_xscale(struct mfi_softc *sc)
203 {
204 	return MFI_READ4(sc, MFI_OMSG0);
205 }
206 
207 static int32_t
208 mfi_read_fw_status_ppc(struct mfi_softc *sc)
209 {
210 	return MFI_READ4(sc, MFI_OSP0);
211 }
212 
213 static int
214 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
215 {
216 	int32_t status;
217 
218 	status = MFI_READ4(sc, MFI_OSTS);
219 	if ((status & MFI_OSTS_INTR_VALID) == 0)
220 		return 1;
221 
222 	MFI_WRITE4(sc, MFI_OSTS, status);
223 	return 0;
224 }
225 
226 static int
227 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
228 {
229 	int32_t status;
230 
231 	status = MFI_READ4(sc, MFI_OSTS);
232 	if (sc->mfi_flags & MFI_FLAGS_1078) {
233 		if (!(status & MFI_1078_RM)) {
234 			return 1;
235 		}
236 	}
237 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
238 		if (!(status & MFI_GEN2_RM)) {
239 			return 1;
240 		}
241 	}
242 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
243 		if (!(status & MFI_SKINNY_RM)) {
244 			return 1;
245 		}
246 	}
247 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
248 		MFI_WRITE4(sc, MFI_OSTS, status);
249 	else
250 		MFI_WRITE4(sc, MFI_ODCR0, status);
251 	return 0;
252 }
253 
254 static void
255 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
256 {
257 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
258 }
259 
260 static void
261 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
262 {
263 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
264 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
265 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
266 	} else {
267 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
268 	}
269 }
270 
271 int
272 mfi_transition_firmware(struct mfi_softc *sc)
273 {
274 	uint32_t fw_state, cur_state;
275 	int max_wait, i;
276 	uint32_t cur_abs_reg_val = 0;
277 	uint32_t prev_abs_reg_val = 0;
278 
279 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
280 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
281 	while (fw_state != MFI_FWSTATE_READY) {
282 		if (bootverbose)
283 			device_printf(sc->mfi_dev, "Waiting for firmware to "
284 			"become ready\n");
285 		cur_state = fw_state;
286 		switch (fw_state) {
287 		case MFI_FWSTATE_FAULT:
288 			device_printf(sc->mfi_dev, "Firmware fault\n");
289 			return (ENXIO);
290 		case MFI_FWSTATE_WAIT_HANDSHAKE:
291 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
292 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
293 			else
294 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
295 			max_wait = MFI_RESET_WAIT_TIME;
296 			break;
297 		case MFI_FWSTATE_OPERATIONAL:
298 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
299 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
300 			else
301 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
302 			max_wait = MFI_RESET_WAIT_TIME;
303 			break;
304 		case MFI_FWSTATE_UNDEFINED:
305 		case MFI_FWSTATE_BB_INIT:
306 			max_wait = MFI_RESET_WAIT_TIME;
307 			break;
308 		case MFI_FWSTATE_FW_INIT_2:
309 			max_wait = MFI_RESET_WAIT_TIME;
310 			break;
311 		case MFI_FWSTATE_FW_INIT:
312 		case MFI_FWSTATE_FLUSH_CACHE:
313 			max_wait = MFI_RESET_WAIT_TIME;
314 			break;
315 		case MFI_FWSTATE_DEVICE_SCAN:
316 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
317 			prev_abs_reg_val = cur_abs_reg_val;
318 			break;
319 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
320 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
321 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
322 			else
323 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
324 			max_wait = MFI_RESET_WAIT_TIME;
325 			break;
326 		default:
327 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
328 			    fw_state);
329 			return (ENXIO);
330 		}
331 		for (i = 0; i < (max_wait * 10); i++) {
332 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
333 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
334 			if (fw_state == cur_state)
335 				DELAY(100000);
336 			else
337 				break;
338 		}
339 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
340 			/* Check the device scanning progress */
341 			if (prev_abs_reg_val != cur_abs_reg_val) {
342 				continue;
343 			}
344 		}
345 		if (fw_state == cur_state) {
346 			device_printf(sc->mfi_dev, "Firmware stuck in state "
347 			    "%#x\n", fw_state);
348 			return (ENXIO);
349 		}
350 	}
351 	return (0);
352 }
353 
354 static void
355 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
356 {
357 	bus_addr_t *addr;
358 
359 	addr = arg;
360 	*addr = segs[0].ds_addr;
361 }
362 
363 int
364 mfi_attach(struct mfi_softc *sc)
365 {
366 	uint32_t status;
367 	int error, commsz, framessz, sensesz;
368 	int frames, unit, max_fw_sge, max_fw_cmds;
369 	uint32_t tb_mem_size = 0;
370 	struct cdev *dev_t;
371 
372 	if (sc == NULL)
373 		return EINVAL;
374 
375 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
376 	    MEGASAS_VERSION);
377 
378 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
379 	sx_init(&sc->mfi_config_lock, "MFI config");
380 	TAILQ_INIT(&sc->mfi_ld_tqh);
381 	TAILQ_INIT(&sc->mfi_syspd_tqh);
382 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
383 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
384 	TAILQ_INIT(&sc->mfi_evt_queue);
385 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
386 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
387 	TAILQ_INIT(&sc->mfi_aen_pids);
388 	TAILQ_INIT(&sc->mfi_cam_ccbq);
389 
390 	mfi_initq_free(sc);
391 	mfi_initq_ready(sc);
392 	mfi_initq_busy(sc);
393 	mfi_initq_bio(sc);
394 
395 	sc->adpreset = 0;
396 	sc->last_seq_num = 0;
397 	sc->disableOnlineCtrlReset = 1;
398 	sc->issuepend_done = 1;
399 	sc->hw_crit_error = 0;
400 
401 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
402 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
403 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
404 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
405 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
406 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
407 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
408 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
409 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
410 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
411 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
412 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
413 		sc->mfi_tbolt = 1;
414 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
415 	} else {
416 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
417 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
418 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
419 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
420 	}
421 
422 	/* Before we get too far, see if the firmware is working */
423 	if ((error = mfi_transition_firmware(sc)) != 0) {
424 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
425 		    "error %d\n", error);
426 		return (ENXIO);
427 	}
428 
429 	/* Start: LSIP200113393 */
430 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
431 				1, 0,			/* algnmnt, boundary */
432 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
433 				BUS_SPACE_MAXADDR,	/* highaddr */
434 				NULL, NULL,		/* filter, filterarg */
435 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
436 				1,			/* msegments */
437 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
438 				0,			/* flags */
439 				NULL, NULL,		/* lockfunc, lockarg */
440 				&sc->verbuf_h_dmat)) {
441 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
442 		return (ENOMEM);
443 	}
444 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
445 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
446 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
447 		return (ENOMEM);
448 	}
449 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
450 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
451 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
452 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
453 	/* End: LSIP200113393 */
454 
455 	/*
456 	 * Get information needed for sizing the contiguous memory for the
457 	 * frame pool.  Size down the sgl parameter since we know that
458 	 * we will never need more than what's required for MFI_MAXPHYS.
459 	 * It would be nice if these constants were available at runtime
460 	 * instead of compile time.
461 	 */
462 	status = sc->mfi_read_fw_status(sc);
463 	max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
464 	if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
465 		device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
466 		    max_fw_cmds, mfi_max_cmds);
467 		sc->mfi_max_fw_cmds = mfi_max_cmds;
468 	} else {
469 		sc->mfi_max_fw_cmds = max_fw_cmds;
470 	}
471 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
472 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
473 
474 	/* ThunderBolt Support get the contiguous memory */
475 
476 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
477 		mfi_tbolt_init_globals(sc);
478 		device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
479 		    "MaxSgl = %d, state = %#x\n", max_fw_cmds,
480 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
481 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
482 
483 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
484 				1, 0,			/* algnmnt, boundary */
485 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
486 				BUS_SPACE_MAXADDR,	/* highaddr */
487 				NULL, NULL,		/* filter, filterarg */
488 				tb_mem_size,		/* maxsize */
489 				1,			/* msegments */
490 				tb_mem_size,		/* maxsegsize */
491 				0,			/* flags */
492 				NULL, NULL,		/* lockfunc, lockarg */
493 				&sc->mfi_tb_dmat)) {
494 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
495 			return (ENOMEM);
496 		}
497 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
498 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
499 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
500 			return (ENOMEM);
501 		}
502 		bzero(sc->request_message_pool, tb_mem_size);
503 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
504 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
505 
506 		/* For ThunderBolt memory init */
507 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
508 				0x100, 0,		/* alignmnt, boundary */
509 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
510 				BUS_SPACE_MAXADDR,	/* highaddr */
511 				NULL, NULL,		/* filter, filterarg */
512 				MFI_FRAME_SIZE,		/* maxsize */
513 				1,			/* msegments */
514 				MFI_FRAME_SIZE,		/* maxsegsize */
515 				0,			/* flags */
516 				NULL, NULL,		/* lockfunc, lockarg */
517 				&sc->mfi_tb_init_dmat)) {
518 			device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
519 			return (ENOMEM);
520 		}
521 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
522 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
523 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
524 			return (ENOMEM);
525 		}
526 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
527 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
528 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
529 		    &sc->mfi_tb_init_busaddr, 0);
530 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
531 		    tb_mem_size)) {
532 			device_printf(sc->mfi_dev,
533 			    "Thunderbolt pool preparation error\n");
534 			return 0;
535 		}
536 
537 		/*
538 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
539 		  we are taking it different from what we have allocated for Request
540 		  and reply descriptors to avoid confusion later
541 		*/
542 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
543 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
544 				1, 0,			/* algnmnt, boundary */
545 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
546 				BUS_SPACE_MAXADDR,	/* highaddr */
547 				NULL, NULL,		/* filter, filterarg */
548 				tb_mem_size,		/* maxsize */
549 				1,			/* msegments */
550 				tb_mem_size,		/* maxsegsize */
551 				0,			/* flags */
552 				NULL, NULL,		/* lockfunc, lockarg */
553 				&sc->mfi_tb_ioc_init_dmat)) {
554 			device_printf(sc->mfi_dev,
555 			    "Cannot allocate comms DMA tag\n");
556 			return (ENOMEM);
557 		}
558 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
559 		    (void **)&sc->mfi_tb_ioc_init_desc,
560 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
561 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
562 			return (ENOMEM);
563 		}
564 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
565 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
566 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
567 		    &sc->mfi_tb_ioc_init_busaddr, 0);
568 	}
569 	/*
570 	 * Create the dma tag for data buffers.  Used both for block I/O
571 	 * and for various internal data queries.
572 	 */
573 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
574 				1, 0,			/* algnmnt, boundary */
575 				BUS_SPACE_MAXADDR,	/* lowaddr */
576 				BUS_SPACE_MAXADDR,	/* highaddr */
577 				NULL, NULL,		/* filter, filterarg */
578 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
579 				sc->mfi_max_sge,	/* nsegments */
580 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
581 				BUS_DMA_ALLOCNOW,	/* flags */
582 				busdma_lock_mutex,	/* lockfunc */
583 				&sc->mfi_io_lock,	/* lockfuncarg */
584 				&sc->mfi_buffer_dmat)) {
585 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
586 		return (ENOMEM);
587 	}
588 
589 	/*
590 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
591 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
592 	 * entry, so the calculated size here will be will be 1 more than
593 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
594 	 */
595 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
596 	    sizeof(struct mfi_hwcomms);
597 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
598 				1, 0,			/* algnmnt, boundary */
599 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
600 				BUS_SPACE_MAXADDR,	/* highaddr */
601 				NULL, NULL,		/* filter, filterarg */
602 				commsz,			/* maxsize */
603 				1,			/* msegments */
604 				commsz,			/* maxsegsize */
605 				0,			/* flags */
606 				NULL, NULL,		/* lockfunc, lockarg */
607 				&sc->mfi_comms_dmat)) {
608 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
609 		return (ENOMEM);
610 	}
611 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
612 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
613 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
614 		return (ENOMEM);
615 	}
616 	bzero(sc->mfi_comms, commsz);
617 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
618 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
619 	/*
620 	 * Allocate DMA memory for the command frames.  Keep them in the
621 	 * lower 4GB for efficiency.  Calculate the size of the commands at
622 	 * the same time; each command is one 64 byte frame plus a set of
623          * additional frames for holding sg lists or other data.
624 	 * The assumption here is that the SG list will start at the second
625 	 * frame and not use the unused bytes in the first frame.  While this
626 	 * isn't technically correct, it simplifies the calculation and allows
627 	 * for command frames that might be larger than an mfi_io_frame.
628 	 */
629 	if (sizeof(bus_addr_t) == 8) {
630 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
631 		sc->mfi_flags |= MFI_FLAGS_SG64;
632 	} else {
633 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
634 	}
635 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
636 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
637 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
638 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
639 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
640 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
641 				64, 0,			/* algnmnt, boundary */
642 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
643 				BUS_SPACE_MAXADDR,	/* highaddr */
644 				NULL, NULL,		/* filter, filterarg */
645 				framessz,		/* maxsize */
646 				1,			/* nsegments */
647 				framessz,		/* maxsegsize */
648 				0,			/* flags */
649 				NULL, NULL,		/* lockfunc, lockarg */
650 				&sc->mfi_frames_dmat)) {
651 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
652 		return (ENOMEM);
653 	}
654 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
655 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
656 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
657 		return (ENOMEM);
658 	}
659 	bzero(sc->mfi_frames, framessz);
660 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
661 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
662 	/*
663 	 * Allocate DMA memory for the frame sense data.  Keep them in the
664 	 * lower 4GB for efficiency
665 	 */
666 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
667 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
668 				4, 0,			/* algnmnt, boundary */
669 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
670 				BUS_SPACE_MAXADDR,	/* highaddr */
671 				NULL, NULL,		/* filter, filterarg */
672 				sensesz,		/* maxsize */
673 				1,			/* nsegments */
674 				sensesz,		/* maxsegsize */
675 				0,			/* flags */
676 				NULL, NULL,		/* lockfunc, lockarg */
677 				&sc->mfi_sense_dmat)) {
678 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
679 		return (ENOMEM);
680 	}
681 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
682 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
683 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
684 		return (ENOMEM);
685 	}
686 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
687 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
688 	if ((error = mfi_alloc_commands(sc)) != 0)
689 		return (error);
690 
691 	/* Before moving the FW to operational state, check whether
692 	 * hostmemory is required by the FW or not
693 	 */
694 
695 	/* ThunderBolt MFI_IOC2 INIT */
696 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
697 		sc->mfi_disable_intr(sc);
698 		mtx_lock(&sc->mfi_io_lock);
699 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
700 			device_printf(sc->mfi_dev,
701 			    "TB Init has failed with error %d\n",error);
702 			mtx_unlock(&sc->mfi_io_lock);
703 			return error;
704 		}
705 		mtx_unlock(&sc->mfi_io_lock);
706 
707 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
708 			return error;
709 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
710 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
711 		    &sc->mfi_intr)) {
712 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
713 			return (EINVAL);
714 		}
715 		sc->mfi_intr_ptr = mfi_intr_tbolt;
716 		sc->mfi_enable_intr(sc);
717 	} else {
718 		if ((error = mfi_comms_init(sc)) != 0)
719 			return (error);
720 
721 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
722 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
723 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
724 			return (EINVAL);
725 		}
726 		sc->mfi_intr_ptr = mfi_intr;
727 		sc->mfi_enable_intr(sc);
728 	}
729 	if ((error = mfi_get_controller_info(sc)) != 0)
730 		return (error);
731 	sc->disableOnlineCtrlReset = 0;
732 
733 	/* Register a config hook to probe the bus for arrays */
734 	sc->mfi_ich.ich_func = mfi_startup;
735 	sc->mfi_ich.ich_arg = sc;
736 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
737 		device_printf(sc->mfi_dev, "Cannot establish configuration "
738 		    "hook\n");
739 		return (EINVAL);
740 	}
741 	mtx_lock(&sc->mfi_io_lock);
742 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
743 		mtx_unlock(&sc->mfi_io_lock);
744 		return (error);
745 	}
746 	mtx_unlock(&sc->mfi_io_lock);
747 
748 	/*
749 	 * Register a shutdown handler.
750 	 */
751 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
752 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
753 		device_printf(sc->mfi_dev, "Warning: shutdown event "
754 		    "registration failed\n");
755 	}
756 
757 	/*
758 	 * Create the control device for doing management
759 	 */
760 	unit = device_get_unit(sc->mfi_dev);
761 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
762 	    0640, "mfi%d", unit);
763 	if (unit == 0)
764 		make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
765 		    sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
766 	if (sc->mfi_cdev != NULL)
767 		sc->mfi_cdev->si_drv1 = sc;
768 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
769 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
770 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
771 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
772 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
773 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
774 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
775 	    &sc->mfi_keep_deleted_volumes, 0,
776 	    "Don't detach the mfid device for a busy volume that is deleted");
777 
778 	device_add_child(sc->mfi_dev, "mfip", -1);
779 	bus_generic_attach(sc->mfi_dev);
780 
781 	/* Start the timeout watchdog */
782 	callout_init(&sc->mfi_watchdog_callout, 1);
783 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
784 	    mfi_timeout, sc);
785 
786 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
787 		mtx_lock(&sc->mfi_io_lock);
788 		mfi_tbolt_sync_map_info(sc);
789 		mtx_unlock(&sc->mfi_io_lock);
790 	}
791 
792 	return (0);
793 }
794 
795 static int
796 mfi_alloc_commands(struct mfi_softc *sc)
797 {
798 	struct mfi_command *cm;
799 	int i, j;
800 
801 	/*
802 	 * XXX Should we allocate all the commands up front, or allocate on
803 	 * demand later like 'aac' does?
804 	 */
805 	sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
806 	    sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
807 
808 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
809 		cm = &sc->mfi_commands[i];
810 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
811 		    sc->mfi_cmd_size * i);
812 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
813 		    sc->mfi_cmd_size * i;
814 		cm->cm_frame->header.context = i;
815 		cm->cm_sense = &sc->mfi_sense[i];
816 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
817 		cm->cm_sc = sc;
818 		cm->cm_index = i;
819 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
820 		    &cm->cm_dmamap) == 0) {
821 			mtx_lock(&sc->mfi_io_lock);
822 			mfi_release_command(cm);
823 			mtx_unlock(&sc->mfi_io_lock);
824 		} else {
825 			device_printf(sc->mfi_dev, "Failed to allocate %d "
826 			   "command blocks, only allocated %d\n",
827 			    sc->mfi_max_fw_cmds, i - 1);
828 			for (j = 0; j < i; j++) {
829 				cm = &sc->mfi_commands[i];
830 				bus_dmamap_destroy(sc->mfi_buffer_dmat,
831 				    cm->cm_dmamap);
832 			}
833 			free(sc->mfi_commands, M_MFIBUF);
834 			sc->mfi_commands = NULL;
835 
836 			return (ENOMEM);
837 		}
838 	}
839 
840 	return (0);
841 }
842 
843 void
844 mfi_release_command(struct mfi_command *cm)
845 {
846 	struct mfi_frame_header *hdr;
847 	uint32_t *hdr_data;
848 
849 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
850 
851 	/*
852 	 * Zero out the important fields of the frame, but make sure the
853 	 * context field is preserved.  For efficiency, handle the fields
854 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
855 	 */
856 	hdr = &cm->cm_frame->header;
857 	if (cm->cm_data != NULL && hdr->sg_count) {
858 		cm->cm_sg->sg32[0].len = 0;
859 		cm->cm_sg->sg32[0].addr = 0;
860 	}
861 
862 	/*
863 	 * Command may be on other queues e.g. busy queue depending on the
864 	 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
865 	 * properly
866 	 */
867 	if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
868 		mfi_remove_busy(cm);
869 	if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
870 		mfi_remove_ready(cm);
871 
872 	/* We're not expecting it to be on any other queue but check */
873 	if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
874 		panic("Command %p is still on another queue, flags = %#x",
875 		    cm, cm->cm_flags);
876 	}
877 
878 	/* tbolt cleanup */
879 	if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
880 		mfi_tbolt_return_cmd(cm->cm_sc,
881 		    cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
882 		    cm);
883 	}
884 
885 	hdr_data = (uint32_t *)cm->cm_frame;
886 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
887 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
888 	hdr_data[4] = 0;	/* flags, timeout */
889 	hdr_data[5] = 0;	/* data_len */
890 
891 	cm->cm_extra_frames = 0;
892 	cm->cm_flags = 0;
893 	cm->cm_complete = NULL;
894 	cm->cm_private = NULL;
895 	cm->cm_data = NULL;
896 	cm->cm_sg = 0;
897 	cm->cm_total_frame_size = 0;
898 	cm->retry_for_fw_reset = 0;
899 
900 	mfi_enqueue_free(cm);
901 }
902 
903 int
904 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
905     uint32_t opcode, void **bufp, size_t bufsize)
906 {
907 	struct mfi_command *cm;
908 	struct mfi_dcmd_frame *dcmd;
909 	void *buf = NULL;
910 	uint32_t context = 0;
911 
912 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
913 
914 	cm = mfi_dequeue_free(sc);
915 	if (cm == NULL)
916 		return (EBUSY);
917 
918 	/* Zero out the MFI frame */
919 	context = cm->cm_frame->header.context;
920 	bzero(cm->cm_frame, sizeof(union mfi_frame));
921 	cm->cm_frame->header.context = context;
922 
923 	if ((bufsize > 0) && (bufp != NULL)) {
924 		if (*bufp == NULL) {
925 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
926 			if (buf == NULL) {
927 				mfi_release_command(cm);
928 				return (ENOMEM);
929 			}
930 			*bufp = buf;
931 		} else {
932 			buf = *bufp;
933 		}
934 	}
935 
936 	dcmd =  &cm->cm_frame->dcmd;
937 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
938 	dcmd->header.cmd = MFI_CMD_DCMD;
939 	dcmd->header.timeout = 0;
940 	dcmd->header.flags = 0;
941 	dcmd->header.data_len = bufsize;
942 	dcmd->header.scsi_status = 0;
943 	dcmd->opcode = opcode;
944 	cm->cm_sg = &dcmd->sgl;
945 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
946 	cm->cm_flags = 0;
947 	cm->cm_data = buf;
948 	cm->cm_private = buf;
949 	cm->cm_len = bufsize;
950 
951 	*cmp = cm;
952 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
953 		*bufp = buf;
954 	return (0);
955 }
956 
957 static int
958 mfi_comms_init(struct mfi_softc *sc)
959 {
960 	struct mfi_command *cm;
961 	struct mfi_init_frame *init;
962 	struct mfi_init_qinfo *qinfo;
963 	int error;
964 	uint32_t context = 0;
965 
966 	mtx_lock(&sc->mfi_io_lock);
967 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
968 		mtx_unlock(&sc->mfi_io_lock);
969 		return (EBUSY);
970 	}
971 
972 	/* Zero out the MFI frame */
973 	context = cm->cm_frame->header.context;
974 	bzero(cm->cm_frame, sizeof(union mfi_frame));
975 	cm->cm_frame->header.context = context;
976 
977 	/*
978 	 * Abuse the SG list area of the frame to hold the init_qinfo
979 	 * object;
980 	 */
981 	init = &cm->cm_frame->init;
982 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
983 
984 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
985 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
986 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
987 	    offsetof(struct mfi_hwcomms, hw_reply_q);
988 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
989 	    offsetof(struct mfi_hwcomms, hw_pi);
990 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
991 	    offsetof(struct mfi_hwcomms, hw_ci);
992 
993 	init->header.cmd = MFI_CMD_INIT;
994 	init->header.data_len = sizeof(struct mfi_init_qinfo);
995 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
996 	cm->cm_data = NULL;
997 	cm->cm_flags = MFI_CMD_POLLED;
998 
999 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1000 		device_printf(sc->mfi_dev, "failed to send init command\n");
1001 	mfi_release_command(cm);
1002 	mtx_unlock(&sc->mfi_io_lock);
1003 
1004 	return (error);
1005 }
1006 
1007 static int
1008 mfi_get_controller_info(struct mfi_softc *sc)
1009 {
1010 	struct mfi_command *cm = NULL;
1011 	struct mfi_ctrl_info *ci = NULL;
1012 	uint32_t max_sectors_1, max_sectors_2;
1013 	int error;
1014 
1015 	mtx_lock(&sc->mfi_io_lock);
1016 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1017 	    (void **)&ci, sizeof(*ci));
1018 	if (error)
1019 		goto out;
1020 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1021 
1022 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1023 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
1024 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1025 		    MFI_SECTOR_LEN;
1026 		error = 0;
1027 		goto out;
1028 	}
1029 
1030 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1031 	    BUS_DMASYNC_POSTREAD);
1032 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1033 
1034 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1035 	max_sectors_2 = ci->max_request_size;
1036 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1037 	sc->disableOnlineCtrlReset =
1038 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
1039 
1040 out:
1041 	if (ci)
1042 		free(ci, M_MFIBUF);
1043 	if (cm)
1044 		mfi_release_command(cm);
1045 	mtx_unlock(&sc->mfi_io_lock);
1046 	return (error);
1047 }
1048 
1049 static int
1050 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1051 {
1052 	struct mfi_command *cm = NULL;
1053 	int error;
1054 
1055 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1056 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1057 	    (void **)log_state, sizeof(**log_state));
1058 	if (error)
1059 		goto out;
1060 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1061 
1062 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1063 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1064 		goto out;
1065 	}
1066 
1067 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1068 	    BUS_DMASYNC_POSTREAD);
1069 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1070 
1071 out:
1072 	if (cm)
1073 		mfi_release_command(cm);
1074 
1075 	return (error);
1076 }
1077 
1078 int
1079 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1080 {
1081 	struct mfi_evt_log_state *log_state = NULL;
1082 	union mfi_evt class_locale;
1083 	int error = 0;
1084 	uint32_t seq;
1085 
1086 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1087 
1088 	class_locale.members.reserved = 0;
1089 	class_locale.members.locale = mfi_event_locale;
1090 	class_locale.members.evt_class  = mfi_event_class;
1091 
1092 	if (seq_start == 0) {
1093 		if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1094 			goto out;
1095 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1096 
1097 		/*
1098 		 * Walk through any events that fired since the last
1099 		 * shutdown.
1100 		 */
1101 		if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1102 		    log_state->newest_seq_num)) != 0)
1103 			goto out;
1104 		seq = log_state->newest_seq_num;
1105 	} else
1106 		seq = seq_start;
1107 	error = mfi_aen_register(sc, seq, class_locale.word);
1108 out:
1109 	free(log_state, M_MFIBUF);
1110 
1111 	return (error);
1112 }
1113 
1114 int
1115 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1116 {
1117 
1118 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1119 	cm->cm_complete = NULL;
1120 
1121 	/*
1122 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1123 	 * and return 0 to it as status
1124 	 */
1125 	if (cm->cm_frame->dcmd.opcode == 0) {
1126 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1127 		cm->cm_error = 0;
1128 		return (cm->cm_error);
1129 	}
1130 	mfi_enqueue_ready(cm);
1131 	mfi_startio(sc);
1132 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1133 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1134 	return (cm->cm_error);
1135 }
1136 
1137 void
1138 mfi_free(struct mfi_softc *sc)
1139 {
1140 	struct mfi_command *cm;
1141 	int i;
1142 
1143 	callout_drain(&sc->mfi_watchdog_callout);
1144 
1145 	if (sc->mfi_cdev != NULL)
1146 		destroy_dev(sc->mfi_cdev);
1147 
1148 	if (sc->mfi_commands != NULL) {
1149 		for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1150 			cm = &sc->mfi_commands[i];
1151 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1152 		}
1153 		free(sc->mfi_commands, M_MFIBUF);
1154 		sc->mfi_commands = NULL;
1155 	}
1156 
1157 	if (sc->mfi_intr)
1158 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1159 	if (sc->mfi_irq != NULL)
1160 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1161 		    sc->mfi_irq);
1162 
1163 	if (sc->mfi_sense_busaddr != 0)
1164 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1165 	if (sc->mfi_sense != NULL)
1166 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1167 		    sc->mfi_sense_dmamap);
1168 	if (sc->mfi_sense_dmat != NULL)
1169 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1170 
1171 	if (sc->mfi_frames_busaddr != 0)
1172 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1173 	if (sc->mfi_frames != NULL)
1174 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1175 		    sc->mfi_frames_dmamap);
1176 	if (sc->mfi_frames_dmat != NULL)
1177 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1178 
1179 	if (sc->mfi_comms_busaddr != 0)
1180 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1181 	if (sc->mfi_comms != NULL)
1182 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1183 		    sc->mfi_comms_dmamap);
1184 	if (sc->mfi_comms_dmat != NULL)
1185 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1186 
1187 	/* ThunderBolt contiguous memory free here */
1188 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1189 		if (sc->mfi_tb_busaddr != 0)
1190 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1191 		if (sc->request_message_pool != NULL)
1192 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1193 			    sc->mfi_tb_dmamap);
1194 		if (sc->mfi_tb_dmat != NULL)
1195 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1196 
1197 		/* Version buffer memory free */
1198 		/* Start LSIP200113393 */
1199 		if (sc->verbuf_h_busaddr != 0)
1200 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1201 		if (sc->verbuf != NULL)
1202 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1203 			    sc->verbuf_h_dmamap);
1204 		if (sc->verbuf_h_dmat != NULL)
1205 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1206 
1207 		/* End LSIP200113393 */
1208 		/* ThunderBolt INIT packet memory Free */
1209 		if (sc->mfi_tb_init_busaddr != 0)
1210 			bus_dmamap_unload(sc->mfi_tb_init_dmat,
1211 			    sc->mfi_tb_init_dmamap);
1212 		if (sc->mfi_tb_init != NULL)
1213 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1214 			    sc->mfi_tb_init_dmamap);
1215 		if (sc->mfi_tb_init_dmat != NULL)
1216 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1217 
1218 		/* ThunderBolt IOC Init Desc memory free here */
1219 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1220 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1221 			    sc->mfi_tb_ioc_init_dmamap);
1222 		if (sc->mfi_tb_ioc_init_desc != NULL)
1223 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1224 			    sc->mfi_tb_ioc_init_desc,
1225 			    sc->mfi_tb_ioc_init_dmamap);
1226 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1227 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1228 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1229 			for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1230 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1231 					free(sc->mfi_cmd_pool_tbolt[i],
1232 					    M_MFIBUF);
1233 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1234 				}
1235 			}
1236 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1237 			sc->mfi_cmd_pool_tbolt = NULL;
1238 		}
1239 		if (sc->request_desc_pool != NULL) {
1240 			free(sc->request_desc_pool, M_MFIBUF);
1241 			sc->request_desc_pool = NULL;
1242 		}
1243 	}
1244 	if (sc->mfi_buffer_dmat != NULL)
1245 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1246 	if (sc->mfi_parent_dmat != NULL)
1247 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1248 
1249 	if (mtx_initialized(&sc->mfi_io_lock)) {
1250 		mtx_destroy(&sc->mfi_io_lock);
1251 		sx_destroy(&sc->mfi_config_lock);
1252 	}
1253 
1254 	return;
1255 }
1256 
1257 static void
1258 mfi_startup(void *arg)
1259 {
1260 	struct mfi_softc *sc;
1261 
1262 	sc = (struct mfi_softc *)arg;
1263 
1264 	sc->mfi_enable_intr(sc);
1265 	sx_xlock(&sc->mfi_config_lock);
1266 	mtx_lock(&sc->mfi_io_lock);
1267 	mfi_ldprobe(sc);
1268 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1269 	    mfi_syspdprobe(sc);
1270 	mtx_unlock(&sc->mfi_io_lock);
1271 	sx_xunlock(&sc->mfi_config_lock);
1272 
1273 	config_intrhook_disestablish(&sc->mfi_ich);
1274 }
1275 
1276 static void
1277 mfi_intr(void *arg)
1278 {
1279 	struct mfi_softc *sc;
1280 	struct mfi_command *cm;
1281 	uint32_t pi, ci, context;
1282 
1283 	sc = (struct mfi_softc *)arg;
1284 
1285 	if (sc->mfi_check_clear_intr(sc))
1286 		return;
1287 
1288 restart:
1289 	pi = sc->mfi_comms->hw_pi;
1290 	ci = sc->mfi_comms->hw_ci;
1291 	mtx_lock(&sc->mfi_io_lock);
1292 	while (ci != pi) {
1293 		context = sc->mfi_comms->hw_reply_q[ci];
1294 		if (context < sc->mfi_max_fw_cmds) {
1295 			cm = &sc->mfi_commands[context];
1296 			mfi_remove_busy(cm);
1297 			cm->cm_error = 0;
1298 			mfi_complete(sc, cm);
1299 		}
1300 		if (++ci == (sc->mfi_max_fw_cmds + 1))
1301 			ci = 0;
1302 	}
1303 
1304 	sc->mfi_comms->hw_ci = ci;
1305 
1306 	/* Give defered I/O a chance to run */
1307 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1308 	mfi_startio(sc);
1309 	mtx_unlock(&sc->mfi_io_lock);
1310 
1311 	/*
1312 	 * Dummy read to flush the bus; this ensures that the indexes are up
1313 	 * to date.  Restart processing if more commands have come it.
1314 	 */
1315 	(void)sc->mfi_read_fw_status(sc);
1316 	if (pi != sc->mfi_comms->hw_pi)
1317 		goto restart;
1318 
1319 	return;
1320 }
1321 
1322 int
1323 mfi_shutdown(struct mfi_softc *sc)
1324 {
1325 	struct mfi_dcmd_frame *dcmd;
1326 	struct mfi_command *cm;
1327 	int error;
1328 
1329 	if (sc->mfi_aen_cm != NULL) {
1330 		sc->cm_aen_abort = 1;
1331 		mfi_abort(sc, &sc->mfi_aen_cm);
1332 	}
1333 
1334 	if (sc->mfi_map_sync_cm != NULL) {
1335 		sc->cm_map_abort = 1;
1336 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1337 	}
1338 
1339 	mtx_lock(&sc->mfi_io_lock);
1340 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1341 	if (error) {
1342 		mtx_unlock(&sc->mfi_io_lock);
1343 		return (error);
1344 	}
1345 
1346 	dcmd = &cm->cm_frame->dcmd;
1347 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1348 	cm->cm_flags = MFI_CMD_POLLED;
1349 	cm->cm_data = NULL;
1350 
1351 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1352 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1353 
1354 	mfi_release_command(cm);
1355 	mtx_unlock(&sc->mfi_io_lock);
1356 	return (error);
1357 }
1358 
1359 static void
1360 mfi_syspdprobe(struct mfi_softc *sc)
1361 {
1362 	struct mfi_frame_header *hdr;
1363 	struct mfi_command *cm = NULL;
1364 	struct mfi_pd_list *pdlist = NULL;
1365 	struct mfi_system_pd *syspd, *tmp;
1366 	struct mfi_system_pending *syspd_pend;
1367 	int error, i, found;
1368 
1369 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1370 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1371 	/* Add SYSTEM PD's */
1372 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1373 	    (void **)&pdlist, sizeof(*pdlist));
1374 	if (error) {
1375 		device_printf(sc->mfi_dev,
1376 		    "Error while forming SYSTEM PD list\n");
1377 		goto out;
1378 	}
1379 
1380 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1381 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1382 	cm->cm_frame->dcmd.mbox[1] = 0;
1383 	if (mfi_mapcmd(sc, cm) != 0) {
1384 		device_printf(sc->mfi_dev,
1385 		    "Failed to get syspd device listing\n");
1386 		goto out;
1387 	}
1388 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1389 	    BUS_DMASYNC_POSTREAD);
1390 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1391 	hdr = &cm->cm_frame->header;
1392 	if (hdr->cmd_status != MFI_STAT_OK) {
1393 		device_printf(sc->mfi_dev,
1394 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1395 		goto out;
1396 	}
1397 	/* Get each PD and add it to the system */
1398 	for (i = 0; i < pdlist->count; i++) {
1399 		if (pdlist->addr[i].device_id ==
1400 		    pdlist->addr[i].encl_device_id)
1401 			continue;
1402 		found = 0;
1403 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1404 			if (syspd->pd_id == pdlist->addr[i].device_id)
1405 				found = 1;
1406 		}
1407 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1408 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1409 				found = 1;
1410 		}
1411 		if (found == 0)
1412 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1413 	}
1414 	/* Delete SYSPD's whose state has been changed */
1415 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1416 		found = 0;
1417 		for (i = 0; i < pdlist->count; i++) {
1418 			if (syspd->pd_id == pdlist->addr[i].device_id) {
1419 				found = 1;
1420 				break;
1421 			}
1422 		}
1423 		if (found == 0) {
1424 			printf("DELETE\n");
1425 			mtx_unlock(&sc->mfi_io_lock);
1426 			bus_topo_lock();
1427 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1428 			bus_topo_unlock();
1429 			mtx_lock(&sc->mfi_io_lock);
1430 		}
1431 	}
1432 out:
1433 	if (pdlist)
1434 	    free(pdlist, M_MFIBUF);
1435 	if (cm)
1436 	    mfi_release_command(cm);
1437 
1438 	return;
1439 }
1440 
1441 static void
1442 mfi_ldprobe(struct mfi_softc *sc)
1443 {
1444 	struct mfi_frame_header *hdr;
1445 	struct mfi_command *cm = NULL;
1446 	struct mfi_ld_list *list = NULL;
1447 	struct mfi_disk *ld;
1448 	struct mfi_disk_pending *ld_pend;
1449 	int error, i;
1450 
1451 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1452 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1453 
1454 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1455 	    (void **)&list, sizeof(*list));
1456 	if (error)
1457 		goto out;
1458 
1459 	cm->cm_flags = MFI_CMD_DATAIN;
1460 	if (mfi_wait_command(sc, cm) != 0) {
1461 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1462 		goto out;
1463 	}
1464 
1465 	hdr = &cm->cm_frame->header;
1466 	if (hdr->cmd_status != MFI_STAT_OK) {
1467 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1468 		    hdr->cmd_status);
1469 		goto out;
1470 	}
1471 
1472 	for (i = 0; i < list->ld_count; i++) {
1473 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1474 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1475 				goto skip_add;
1476 		}
1477 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1478 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1479 				goto skip_add;
1480 		}
1481 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1482 	skip_add:;
1483 	}
1484 out:
1485 	if (list)
1486 		free(list, M_MFIBUF);
1487 	if (cm)
1488 		mfi_release_command(cm);
1489 
1490 	return;
1491 }
1492 
1493 /*
1494  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1495  * the bits in 24-31 are all set, then it is the number of seconds since
1496  * boot.
1497  */
1498 static const char *
1499 format_timestamp(uint32_t timestamp)
1500 {
1501 	static char buffer[32];
1502 
1503 	if ((timestamp & 0xff000000) == 0xff000000)
1504 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1505 		    0x00ffffff);
1506 	else
1507 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1508 	return (buffer);
1509 }
1510 
1511 static const char *
1512 format_class(int8_t class)
1513 {
1514 	static char buffer[6];
1515 
1516 	switch (class) {
1517 	case MFI_EVT_CLASS_DEBUG:
1518 		return ("debug");
1519 	case MFI_EVT_CLASS_PROGRESS:
1520 		return ("progress");
1521 	case MFI_EVT_CLASS_INFO:
1522 		return ("info");
1523 	case MFI_EVT_CLASS_WARNING:
1524 		return ("WARN");
1525 	case MFI_EVT_CLASS_CRITICAL:
1526 		return ("CRIT");
1527 	case MFI_EVT_CLASS_FATAL:
1528 		return ("FATAL");
1529 	case MFI_EVT_CLASS_DEAD:
1530 		return ("DEAD");
1531 	default:
1532 		snprintf(buffer, sizeof(buffer), "%d", class);
1533 		return (buffer);
1534 	}
1535 }
1536 
1537 static void
1538 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1539 {
1540 	struct mfi_system_pd *syspd = NULL;
1541 
1542 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1543 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1544 	    format_class(detail->evt_class.members.evt_class),
1545 	    detail->description);
1546 
1547         /* Don't act on old AEN's or while shutting down */
1548         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1549                 return;
1550 
1551 	switch (detail->arg_type) {
1552 	case MR_EVT_ARGS_NONE:
1553 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1554 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1555 			if (mfi_detect_jbod_change) {
1556 				/*
1557 				 * Probe for new SYSPD's and Delete
1558 				 * invalid SYSPD's
1559 				 */
1560 				sx_xlock(&sc->mfi_config_lock);
1561 				mtx_lock(&sc->mfi_io_lock);
1562 				mfi_syspdprobe(sc);
1563 				mtx_unlock(&sc->mfi_io_lock);
1564 				sx_xunlock(&sc->mfi_config_lock);
1565 			}
1566 		}
1567 		break;
1568 	case MR_EVT_ARGS_LD_STATE:
1569 		/* During load time driver reads all the events starting
1570 		 * from the one that has been logged after shutdown. Avoid
1571 		 * these old events.
1572 		 */
1573 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1574 			/* Remove the LD */
1575 			struct mfi_disk *ld;
1576 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1577 				if (ld->ld_id ==
1578 				    detail->args.ld_state.ld.target_id)
1579 					break;
1580 			}
1581 			/*
1582 			Fix: for kernel panics when SSCD is removed
1583 			KASSERT(ld != NULL, ("volume dissappeared"));
1584 			*/
1585 			if (ld != NULL) {
1586 				bus_topo_lock();
1587 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1588 				bus_topo_unlock();
1589 			}
1590 		}
1591 		break;
1592 	case MR_EVT_ARGS_PD:
1593 		if (detail->code == MR_EVT_PD_REMOVED) {
1594 			if (mfi_detect_jbod_change) {
1595 				/*
1596 				 * If the removed device is a SYSPD then
1597 				 * delete it
1598 				 */
1599 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1600 				    pd_link) {
1601 					if (syspd->pd_id ==
1602 					    detail->args.pd.device_id) {
1603 						bus_topo_lock();
1604 						device_delete_child(
1605 						    sc->mfi_dev,
1606 						    syspd->pd_dev);
1607 						bus_topo_unlock();
1608 						break;
1609 					}
1610 				}
1611 			}
1612 		}
1613 		if (detail->code == MR_EVT_PD_INSERTED) {
1614 			if (mfi_detect_jbod_change) {
1615 				/* Probe for new SYSPD's */
1616 				sx_xlock(&sc->mfi_config_lock);
1617 				mtx_lock(&sc->mfi_io_lock);
1618 				mfi_syspdprobe(sc);
1619 				mtx_unlock(&sc->mfi_io_lock);
1620 				sx_xunlock(&sc->mfi_config_lock);
1621 			}
1622 		}
1623 		if (sc->mfi_cam_rescan_cb != NULL &&
1624 		    (detail->code == MR_EVT_PD_INSERTED ||
1625 		    detail->code == MR_EVT_PD_REMOVED)) {
1626 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1627 		}
1628 		break;
1629 	}
1630 }
1631 
1632 static void
1633 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1634 {
1635 	struct mfi_evt_queue_elm *elm;
1636 
1637 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1638 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1639 	if (elm == NULL)
1640 		return;
1641 	memcpy(&elm->detail, detail, sizeof(*detail));
1642 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1643 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1644 }
1645 
1646 static void
1647 mfi_handle_evt(void *context, int pending)
1648 {
1649 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1650 	struct mfi_softc *sc;
1651 	struct mfi_evt_queue_elm *elm;
1652 
1653 	sc = context;
1654 	TAILQ_INIT(&queue);
1655 	mtx_lock(&sc->mfi_io_lock);
1656 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1657 	mtx_unlock(&sc->mfi_io_lock);
1658 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1659 		TAILQ_REMOVE(&queue, elm, link);
1660 		mfi_decode_evt(sc, &elm->detail);
1661 		free(elm, M_MFIBUF);
1662 	}
1663 }
1664 
1665 static int
1666 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1667 {
1668 	struct mfi_command *cm;
1669 	struct mfi_dcmd_frame *dcmd;
1670 	union mfi_evt current_aen, prior_aen;
1671 	struct mfi_evt_detail *ed = NULL;
1672 	int error = 0;
1673 
1674 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1675 
1676 	current_aen.word = locale;
1677 	if (sc->mfi_aen_cm != NULL) {
1678 		prior_aen.word =
1679 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1680 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1681 		    !((prior_aen.members.locale & current_aen.members.locale)
1682 		    ^current_aen.members.locale)) {
1683 			return (0);
1684 		} else {
1685 			prior_aen.members.locale |= current_aen.members.locale;
1686 			if (prior_aen.members.evt_class
1687 			    < current_aen.members.evt_class)
1688 				current_aen.members.evt_class =
1689 				    prior_aen.members.evt_class;
1690 			mfi_abort(sc, &sc->mfi_aen_cm);
1691 		}
1692 	}
1693 
1694 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1695 	    (void **)&ed, sizeof(*ed));
1696 	if (error)
1697 		goto out;
1698 
1699 	dcmd = &cm->cm_frame->dcmd;
1700 	((uint32_t *)&dcmd->mbox)[0] = seq;
1701 	((uint32_t *)&dcmd->mbox)[1] = locale;
1702 	cm->cm_flags = MFI_CMD_DATAIN;
1703 	cm->cm_complete = mfi_aen_complete;
1704 
1705 	sc->last_seq_num = seq;
1706 	sc->mfi_aen_cm = cm;
1707 
1708 	mfi_enqueue_ready(cm);
1709 	mfi_startio(sc);
1710 
1711 out:
1712 	return (error);
1713 }
1714 
1715 static void
1716 mfi_aen_complete(struct mfi_command *cm)
1717 {
1718 	struct mfi_frame_header *hdr;
1719 	struct mfi_softc *sc;
1720 	struct mfi_evt_detail *detail;
1721 	struct mfi_aen *mfi_aen_entry, *tmp;
1722 	int seq = 0, aborted = 0;
1723 
1724 	sc = cm->cm_sc;
1725 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1726 
1727 	if (sc->mfi_aen_cm == NULL)
1728 		return;
1729 
1730 	hdr = &cm->cm_frame->header;
1731 
1732 	if (sc->cm_aen_abort ||
1733 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1734 		sc->cm_aen_abort = 0;
1735 		aborted = 1;
1736 	} else {
1737 		sc->mfi_aen_triggered = 1;
1738 		if (sc->mfi_poll_waiting) {
1739 			sc->mfi_poll_waiting = 0;
1740 			selwakeup(&sc->mfi_select);
1741 		}
1742 		detail = cm->cm_data;
1743 		mfi_queue_evt(sc, detail);
1744 		seq = detail->seq + 1;
1745 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1746 		    tmp) {
1747 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1748 			    aen_link);
1749 			PROC_LOCK(mfi_aen_entry->p);
1750 			kern_psignal(mfi_aen_entry->p, SIGIO);
1751 			PROC_UNLOCK(mfi_aen_entry->p);
1752 			free(mfi_aen_entry, M_MFIBUF);
1753 		}
1754 	}
1755 
1756 	free(cm->cm_data, M_MFIBUF);
1757 	wakeup(&sc->mfi_aen_cm);
1758 	sc->mfi_aen_cm = NULL;
1759 	mfi_release_command(cm);
1760 
1761 	/* set it up again so the driver can catch more events */
1762 	if (!aborted)
1763 		mfi_aen_setup(sc, seq);
1764 }
1765 
1766 #define MAX_EVENTS 15
1767 
1768 static int
1769 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1770 {
1771 	struct mfi_command *cm;
1772 	struct mfi_dcmd_frame *dcmd;
1773 	struct mfi_evt_list *el;
1774 	union mfi_evt class_locale;
1775 	int error, i, seq, size;
1776 
1777 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1778 
1779 	class_locale.members.reserved = 0;
1780 	class_locale.members.locale = mfi_event_locale;
1781 	class_locale.members.evt_class  = mfi_event_class;
1782 
1783 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1784 		* (MAX_EVENTS - 1);
1785 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1786 	if (el == NULL)
1787 		return (ENOMEM);
1788 
1789 	for (seq = start_seq;;) {
1790 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1791 			free(el, M_MFIBUF);
1792 			return (EBUSY);
1793 		}
1794 
1795 		dcmd = &cm->cm_frame->dcmd;
1796 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1797 		dcmd->header.cmd = MFI_CMD_DCMD;
1798 		dcmd->header.timeout = 0;
1799 		dcmd->header.data_len = size;
1800 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1801 		((uint32_t *)&dcmd->mbox)[0] = seq;
1802 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1803 		cm->cm_sg = &dcmd->sgl;
1804 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1805 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1806 		cm->cm_data = el;
1807 		cm->cm_len = size;
1808 
1809 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1810 			device_printf(sc->mfi_dev,
1811 			    "Failed to get controller entries\n");
1812 			mfi_release_command(cm);
1813 			break;
1814 		}
1815 
1816 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1817 		    BUS_DMASYNC_POSTREAD);
1818 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1819 
1820 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1821 			mfi_release_command(cm);
1822 			break;
1823 		}
1824 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1825 			device_printf(sc->mfi_dev,
1826 			    "Error %d fetching controller entries\n",
1827 			    dcmd->header.cmd_status);
1828 			mfi_release_command(cm);
1829 			error = EIO;
1830 			break;
1831 		}
1832 		mfi_release_command(cm);
1833 
1834 		for (i = 0; i < el->count; i++) {
1835 			/*
1836 			 * If this event is newer than 'stop_seq' then
1837 			 * break out of the loop.  Note that the log
1838 			 * is a circular buffer so we have to handle
1839 			 * the case that our stop point is earlier in
1840 			 * the buffer than our start point.
1841 			 */
1842 			if (el->event[i].seq >= stop_seq) {
1843 				if (start_seq <= stop_seq)
1844 					break;
1845 				else if (el->event[i].seq < start_seq)
1846 					break;
1847 			}
1848 			mfi_queue_evt(sc, &el->event[i]);
1849 		}
1850 		seq = el->event[el->count - 1].seq + 1;
1851 	}
1852 
1853 	free(el, M_MFIBUF);
1854 	return (error);
1855 }
1856 
1857 static int
1858 mfi_add_ld(struct mfi_softc *sc, int id)
1859 {
1860 	struct mfi_command *cm;
1861 	struct mfi_dcmd_frame *dcmd = NULL;
1862 	struct mfi_ld_info *ld_info = NULL;
1863 	struct mfi_disk_pending *ld_pend;
1864 	int error;
1865 
1866 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1867 
1868 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1869 	if (ld_pend != NULL) {
1870 		ld_pend->ld_id = id;
1871 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1872 	}
1873 
1874 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1875 	    (void **)&ld_info, sizeof(*ld_info));
1876 	if (error) {
1877 		device_printf(sc->mfi_dev,
1878 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1879 		if (ld_info)
1880 			free(ld_info, M_MFIBUF);
1881 		return (error);
1882 	}
1883 	cm->cm_flags = MFI_CMD_DATAIN;
1884 	dcmd = &cm->cm_frame->dcmd;
1885 	dcmd->mbox[0] = id;
1886 	if (mfi_wait_command(sc, cm) != 0) {
1887 		device_printf(sc->mfi_dev,
1888 		    "Failed to get logical drive: %d\n", id);
1889 		free(ld_info, M_MFIBUF);
1890 		return (0);
1891 	}
1892 	if (ld_info->ld_config.params.isSSCD != 1)
1893 		mfi_add_ld_complete(cm);
1894 	else {
1895 		mfi_release_command(cm);
1896 		if (ld_info)		/* SSCD drives ld_info free here */
1897 			free(ld_info, M_MFIBUF);
1898 	}
1899 	return (0);
1900 }
1901 
1902 static void
1903 mfi_add_ld_complete(struct mfi_command *cm)
1904 {
1905 	struct mfi_frame_header *hdr;
1906 	struct mfi_ld_info *ld_info;
1907 	struct mfi_softc *sc;
1908 	device_t child;
1909 
1910 	sc = cm->cm_sc;
1911 	hdr = &cm->cm_frame->header;
1912 	ld_info = cm->cm_private;
1913 
1914 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1915 		free(ld_info, M_MFIBUF);
1916 		wakeup(&sc->mfi_map_sync_cm);
1917 		mfi_release_command(cm);
1918 		return;
1919 	}
1920 	wakeup(&sc->mfi_map_sync_cm);
1921 	mfi_release_command(cm);
1922 
1923 	mtx_unlock(&sc->mfi_io_lock);
1924 	bus_topo_lock();
1925 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1926 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1927 		free(ld_info, M_MFIBUF);
1928 		bus_topo_unlock();
1929 		mtx_lock(&sc->mfi_io_lock);
1930 		return;
1931 	}
1932 
1933 	device_set_ivars(child, ld_info);
1934 	device_set_desc(child, "MFI Logical Disk");
1935 	bus_generic_attach(sc->mfi_dev);
1936 	bus_topo_unlock();
1937 	mtx_lock(&sc->mfi_io_lock);
1938 }
1939 
1940 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1941 {
1942 	struct mfi_command *cm;
1943 	struct mfi_dcmd_frame *dcmd = NULL;
1944 	struct mfi_pd_info *pd_info = NULL;
1945 	struct mfi_system_pending *syspd_pend;
1946 	int error;
1947 
1948 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1949 
1950 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1951 	if (syspd_pend != NULL) {
1952 		syspd_pend->pd_id = id;
1953 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1954 	}
1955 
1956 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1957 		(void **)&pd_info, sizeof(*pd_info));
1958 	if (error) {
1959 		device_printf(sc->mfi_dev,
1960 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1961 		    error);
1962 		if (pd_info)
1963 			free(pd_info, M_MFIBUF);
1964 		return (error);
1965 	}
1966 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1967 	dcmd = &cm->cm_frame->dcmd;
1968 	dcmd->mbox[0]=id;
1969 	dcmd->header.scsi_status = 0;
1970 	dcmd->header.pad0 = 0;
1971 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1972 		device_printf(sc->mfi_dev,
1973 		    "Failed to get physical drive info %d\n", id);
1974 		free(pd_info, M_MFIBUF);
1975 		mfi_release_command(cm);
1976 		return (error);
1977 	}
1978 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1979 	    BUS_DMASYNC_POSTREAD);
1980 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1981 	mfi_add_sys_pd_complete(cm);
1982 	return (0);
1983 }
1984 
1985 static void
1986 mfi_add_sys_pd_complete(struct mfi_command *cm)
1987 {
1988 	struct mfi_frame_header *hdr;
1989 	struct mfi_pd_info *pd_info;
1990 	struct mfi_softc *sc;
1991 	device_t child;
1992 
1993 	sc = cm->cm_sc;
1994 	hdr = &cm->cm_frame->header;
1995 	pd_info = cm->cm_private;
1996 
1997 	if (hdr->cmd_status != MFI_STAT_OK) {
1998 		free(pd_info, M_MFIBUF);
1999 		mfi_release_command(cm);
2000 		return;
2001 	}
2002 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2003 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2004 		    pd_info->ref.v.device_id);
2005 		free(pd_info, M_MFIBUF);
2006 		mfi_release_command(cm);
2007 		return;
2008 	}
2009 	mfi_release_command(cm);
2010 
2011 	mtx_unlock(&sc->mfi_io_lock);
2012 	bus_topo_lock();
2013 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2014 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
2015 		free(pd_info, M_MFIBUF);
2016 		bus_topo_unlock();
2017 		mtx_lock(&sc->mfi_io_lock);
2018 		return;
2019 	}
2020 
2021 	device_set_ivars(child, pd_info);
2022 	device_set_desc(child, "MFI System PD");
2023 	bus_generic_attach(sc->mfi_dev);
2024 	bus_topo_unlock();
2025 	mtx_lock(&sc->mfi_io_lock);
2026 }
2027 
2028 static struct mfi_command *
2029 mfi_bio_command(struct mfi_softc *sc)
2030 {
2031 	struct bio *bio;
2032 	struct mfi_command *cm = NULL;
2033 
2034 	/*reserving two commands to avoid starvation for IOCTL*/
2035 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2036 		return (NULL);
2037 	}
2038 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2039 		return (NULL);
2040 	}
2041 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2042 		cm = mfi_build_ldio(sc, bio);
2043 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2044 		cm = mfi_build_syspdio(sc, bio);
2045 	}
2046 	if (!cm)
2047 	    mfi_enqueue_bio(sc, bio);
2048 	return cm;
2049 }
2050 
2051 /*
2052  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2053  */
2054 
2055 int
2056 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2057 {
2058 	int cdb_len;
2059 
2060 	if (((lba & 0x1fffff) == lba)
2061          && ((block_count & 0xff) == block_count)
2062          && (byte2 == 0)) {
2063 		/* We can fit in a 6 byte cdb */
2064 		struct scsi_rw_6 *scsi_cmd;
2065 
2066 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2067 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2068 		scsi_ulto3b(lba, scsi_cmd->addr);
2069 		scsi_cmd->length = block_count & 0xff;
2070 		scsi_cmd->control = 0;
2071 		cdb_len = sizeof(*scsi_cmd);
2072 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2073 		/* Need a 10 byte CDB */
2074 		struct scsi_rw_10 *scsi_cmd;
2075 
2076 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2077 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2078 		scsi_cmd->byte2 = byte2;
2079 		scsi_ulto4b(lba, scsi_cmd->addr);
2080 		scsi_cmd->reserved = 0;
2081 		scsi_ulto2b(block_count, scsi_cmd->length);
2082 		scsi_cmd->control = 0;
2083 		cdb_len = sizeof(*scsi_cmd);
2084 	} else if (((block_count & 0xffffffff) == block_count) &&
2085 	    ((lba & 0xffffffff) == lba)) {
2086 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2087 		struct scsi_rw_12 *scsi_cmd;
2088 
2089 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2090 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2091 		scsi_cmd->byte2 = byte2;
2092 		scsi_ulto4b(lba, scsi_cmd->addr);
2093 		scsi_cmd->reserved = 0;
2094 		scsi_ulto4b(block_count, scsi_cmd->length);
2095 		scsi_cmd->control = 0;
2096 		cdb_len = sizeof(*scsi_cmd);
2097 	} else {
2098 		/*
2099 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2100 		 * than 2^32
2101 		 */
2102 		struct scsi_rw_16 *scsi_cmd;
2103 
2104 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2105 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2106 		scsi_cmd->byte2 = byte2;
2107 		scsi_u64to8b(lba, scsi_cmd->addr);
2108 		scsi_cmd->reserved = 0;
2109 		scsi_ulto4b(block_count, scsi_cmd->length);
2110 		scsi_cmd->control = 0;
2111 		cdb_len = sizeof(*scsi_cmd);
2112 	}
2113 
2114 	return cdb_len;
2115 }
2116 
2117 extern char *unmapped_buf;
2118 
2119 static struct mfi_command *
2120 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2121 {
2122 	struct mfi_command *cm;
2123 	struct mfi_pass_frame *pass;
2124 	uint32_t context = 0;
2125 	int flags = 0, blkcount = 0, readop;
2126 	uint8_t cdb_len;
2127 
2128 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2129 
2130 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2131 	    return (NULL);
2132 
2133 	/* Zero out the MFI frame */
2134 	context = cm->cm_frame->header.context;
2135 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2136 	cm->cm_frame->header.context = context;
2137 	pass = &cm->cm_frame->pass;
2138 	bzero(pass->cdb, 16);
2139 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2140 	switch (bio->bio_cmd) {
2141 	case BIO_READ:
2142 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2143 		readop = 1;
2144 		break;
2145 	case BIO_WRITE:
2146 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2147 		readop = 0;
2148 		break;
2149 	default:
2150 		/* TODO: what about BIO_DELETE??? */
2151 		biofinish(bio, NULL, EOPNOTSUPP);
2152 		mfi_enqueue_free(cm);
2153 		return (NULL);
2154 	}
2155 
2156 	/* Cheat with the sector length to avoid a non-constant division */
2157 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2158 	/* Fill the LBA and Transfer length in CDB */
2159 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2160 	    pass->cdb);
2161 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2162 	pass->header.lun_id = 0;
2163 	pass->header.timeout = 0;
2164 	pass->header.flags = 0;
2165 	pass->header.scsi_status = 0;
2166 	pass->header.sense_len = MFI_SENSE_LEN;
2167 	pass->header.data_len = bio->bio_bcount;
2168 	pass->header.cdb_len = cdb_len;
2169 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2170 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2171 	cm->cm_complete = mfi_bio_complete;
2172 	cm->cm_private = bio;
2173 	cm->cm_data = unmapped_buf;
2174 	cm->cm_len = bio->bio_bcount;
2175 	cm->cm_sg = &pass->sgl;
2176 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2177 	cm->cm_flags = flags;
2178 
2179 	return (cm);
2180 }
2181 
2182 static struct mfi_command *
2183 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2184 {
2185 	struct mfi_io_frame *io;
2186 	struct mfi_command *cm;
2187 	int flags;
2188 	uint32_t blkcount;
2189 	uint32_t context = 0;
2190 
2191 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2192 
2193 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2194 	    return (NULL);
2195 
2196 	/* Zero out the MFI frame */
2197 	context = cm->cm_frame->header.context;
2198 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2199 	cm->cm_frame->header.context = context;
2200 	io = &cm->cm_frame->io;
2201 	switch (bio->bio_cmd) {
2202 	case BIO_READ:
2203 		io->header.cmd = MFI_CMD_LD_READ;
2204 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2205 		break;
2206 	case BIO_WRITE:
2207 		io->header.cmd = MFI_CMD_LD_WRITE;
2208 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2209 		break;
2210 	default:
2211 		/* TODO: what about BIO_DELETE??? */
2212 		biofinish(bio, NULL, EOPNOTSUPP);
2213 		mfi_enqueue_free(cm);
2214 		return (NULL);
2215 	}
2216 
2217 	/* Cheat with the sector length to avoid a non-constant division */
2218 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2219 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2220 	io->header.timeout = 0;
2221 	io->header.flags = 0;
2222 	io->header.scsi_status = 0;
2223 	io->header.sense_len = MFI_SENSE_LEN;
2224 	io->header.data_len = blkcount;
2225 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2226 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2227 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2228 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2229 	cm->cm_complete = mfi_bio_complete;
2230 	cm->cm_private = bio;
2231 	cm->cm_data = unmapped_buf;
2232 	cm->cm_len = bio->bio_bcount;
2233 	cm->cm_sg = &io->sgl;
2234 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2235 	cm->cm_flags = flags;
2236 
2237 	return (cm);
2238 }
2239 
2240 static void
2241 mfi_bio_complete(struct mfi_command *cm)
2242 {
2243 	struct bio *bio;
2244 	struct mfi_frame_header *hdr;
2245 	struct mfi_softc *sc;
2246 
2247 	bio = cm->cm_private;
2248 	hdr = &cm->cm_frame->header;
2249 	sc = cm->cm_sc;
2250 
2251 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2252 		bio->bio_flags |= BIO_ERROR;
2253 		bio->bio_error = EIO;
2254 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2255 		    "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2256 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2257 	} else if (cm->cm_error != 0) {
2258 		bio->bio_flags |= BIO_ERROR;
2259 		bio->bio_error = cm->cm_error;
2260 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2261 		    cm, cm->cm_error);
2262 	}
2263 
2264 	mfi_release_command(cm);
2265 	mfi_disk_complete(bio);
2266 }
2267 
2268 void
2269 mfi_startio(struct mfi_softc *sc)
2270 {
2271 	struct mfi_command *cm;
2272 	struct ccb_hdr *ccbh;
2273 
2274 	for (;;) {
2275 		/* Don't bother if we're short on resources */
2276 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2277 			break;
2278 
2279 		/* Try a command that has already been prepared */
2280 		cm = mfi_dequeue_ready(sc);
2281 
2282 		if (cm == NULL) {
2283 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2284 				cm = sc->mfi_cam_start(ccbh);
2285 		}
2286 
2287 		/* Nope, so look for work on the bioq */
2288 		if (cm == NULL)
2289 			cm = mfi_bio_command(sc);
2290 
2291 		/* No work available, so exit */
2292 		if (cm == NULL)
2293 			break;
2294 
2295 		/* Send the command to the controller */
2296 		if (mfi_mapcmd(sc, cm) != 0) {
2297 			device_printf(sc->mfi_dev, "Failed to startio\n");
2298 			mfi_requeue_ready(cm);
2299 			break;
2300 		}
2301 	}
2302 }
2303 
2304 int
2305 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2306 {
2307 	int error, polled;
2308 
2309 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2310 
2311 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2312 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2313 		if (cm->cm_flags & MFI_CMD_CCB)
2314 			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2315 			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2316 			    polled);
2317 		else if (cm->cm_flags & MFI_CMD_BIO)
2318 			error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2319 			    cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2320 			    polled);
2321 		else
2322 			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2323 			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2324 			    mfi_data_cb, cm, polled);
2325 		if (error == EINPROGRESS) {
2326 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2327 			return (0);
2328 		}
2329 	} else {
2330 		error = mfi_send_frame(sc, cm);
2331 	}
2332 
2333 	return (error);
2334 }
2335 
2336 static void
2337 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2338 {
2339 	struct mfi_frame_header *hdr;
2340 	struct mfi_command *cm;
2341 	union mfi_sgl *sgl;
2342 	struct mfi_softc *sc;
2343 	int i, j, first, dir;
2344 	int sge_size, locked;
2345 
2346 	cm = (struct mfi_command *)arg;
2347 	sc = cm->cm_sc;
2348 	hdr = &cm->cm_frame->header;
2349 	sgl = cm->cm_sg;
2350 
2351 	/*
2352 	 * We need to check if we have the lock as this is async
2353 	 * callback so even though our caller mfi_mapcmd asserts
2354 	 * it has the lock, there is no guarantee that hasn't been
2355 	 * dropped if bus_dmamap_load returned prior to our
2356 	 * completion.
2357 	 */
2358 	if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2359 		mtx_lock(&sc->mfi_io_lock);
2360 
2361 	if (error) {
2362 		printf("error %d in callback\n", error);
2363 		cm->cm_error = error;
2364 		mfi_complete(sc, cm);
2365 		goto out;
2366 	}
2367 	/* Use IEEE sgl only for IO's on a SKINNY controller
2368 	 * For other commands on a SKINNY controller use either
2369 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2370 	 * Also calculate the total frame size based on the type
2371 	 * of SGL used.
2372 	 */
2373 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2374 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2375 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2376 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2377 		for (i = 0; i < nsegs; i++) {
2378 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2379 			sgl->sg_skinny[i].len = segs[i].ds_len;
2380 			sgl->sg_skinny[i].flag = 0;
2381 		}
2382 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2383 		sge_size = sizeof(struct mfi_sg_skinny);
2384 		hdr->sg_count = nsegs;
2385 	} else {
2386 		j = 0;
2387 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2388 			first = cm->cm_stp_len;
2389 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2390 				sgl->sg32[j].addr = segs[0].ds_addr;
2391 				sgl->sg32[j++].len = first;
2392 			} else {
2393 				sgl->sg64[j].addr = segs[0].ds_addr;
2394 				sgl->sg64[j++].len = first;
2395 			}
2396 		} else
2397 			first = 0;
2398 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2399 			for (i = 0; i < nsegs; i++) {
2400 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2401 				sgl->sg32[j++].len = segs[i].ds_len - first;
2402 				first = 0;
2403 			}
2404 		} else {
2405 			for (i = 0; i < nsegs; i++) {
2406 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2407 				sgl->sg64[j++].len = segs[i].ds_len - first;
2408 				first = 0;
2409 			}
2410 			hdr->flags |= MFI_FRAME_SGL64;
2411 		}
2412 		hdr->sg_count = j;
2413 		sge_size = sc->mfi_sge_size;
2414 	}
2415 
2416 	dir = 0;
2417 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2418 		dir |= BUS_DMASYNC_PREREAD;
2419 		hdr->flags |= MFI_FRAME_DIR_READ;
2420 	}
2421 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2422 		dir |= BUS_DMASYNC_PREWRITE;
2423 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2424 	}
2425 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2426 	cm->cm_flags |= MFI_CMD_MAPPED;
2427 
2428 	/*
2429 	 * Instead of calculating the total number of frames in the
2430 	 * compound frame, it's already assumed that there will be at
2431 	 * least 1 frame, so don't compensate for the modulo of the
2432 	 * following division.
2433 	 */
2434 	cm->cm_total_frame_size += (sge_size * nsegs);
2435 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2436 
2437 	if ((error = mfi_send_frame(sc, cm)) != 0) {
2438 		printf("error %d in callback from mfi_send_frame\n", error);
2439 		cm->cm_error = error;
2440 		mfi_complete(sc, cm);
2441 		goto out;
2442 	}
2443 
2444 out:
2445 	/* leave the lock in the state we found it */
2446 	if (locked == 0)
2447 		mtx_unlock(&sc->mfi_io_lock);
2448 
2449 	return;
2450 }
2451 
2452 static int
2453 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2454 {
2455 	int error;
2456 
2457 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2458 
2459 	if (sc->MFA_enabled)
2460 		error = mfi_tbolt_send_frame(sc, cm);
2461 	else
2462 		error = mfi_std_send_frame(sc, cm);
2463 
2464 	if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2465 		mfi_remove_busy(cm);
2466 
2467 	return (error);
2468 }
2469 
2470 static int
2471 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2472 {
2473 	struct mfi_frame_header *hdr;
2474 	int tm = mfi_polled_cmd_timeout * 1000;
2475 
2476 	hdr = &cm->cm_frame->header;
2477 
2478 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2479 		cm->cm_timestamp = time_uptime;
2480 		mfi_enqueue_busy(cm);
2481 	} else {
2482 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2483 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2484 	}
2485 
2486 	/*
2487 	 * The bus address of the command is aligned on a 64 byte boundary,
2488 	 * leaving the least 6 bits as zero.  For whatever reason, the
2489 	 * hardware wants the address shifted right by three, leaving just
2490 	 * 3 zero bits.  These three bits are then used as a prefetching
2491 	 * hint for the hardware to predict how many frames need to be
2492 	 * fetched across the bus.  If a command has more than 8 frames
2493 	 * then the 3 bits are set to 0x7 and the firmware uses other
2494 	 * information in the command to determine the total amount to fetch.
2495 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2496 	 * is enough for both 32bit and 64bit systems.
2497 	 */
2498 	if (cm->cm_extra_frames > 7)
2499 		cm->cm_extra_frames = 7;
2500 
2501 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2502 
2503 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2504 		return (0);
2505 
2506 	/* This is a polled command, so busy-wait for it to complete. */
2507 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2508 		DELAY(1000);
2509 		tm -= 1;
2510 		if (tm <= 0)
2511 			break;
2512 	}
2513 
2514 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2515 		device_printf(sc->mfi_dev, "Frame %p timed out "
2516 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2517 		return (ETIMEDOUT);
2518 	}
2519 
2520 	return (0);
2521 }
2522 
2523 void
2524 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2525 {
2526 	int dir;
2527 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2528 
2529 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2530 		dir = 0;
2531 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2532 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2533 			dir |= BUS_DMASYNC_POSTREAD;
2534 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2535 			dir |= BUS_DMASYNC_POSTWRITE;
2536 
2537 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2538 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2539 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2540 	}
2541 
2542 	cm->cm_flags |= MFI_CMD_COMPLETED;
2543 
2544 	if (cm->cm_complete != NULL)
2545 		cm->cm_complete(cm);
2546 	else
2547 		wakeup(cm);
2548 }
2549 
2550 static int
2551 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2552 {
2553 	struct mfi_command *cm;
2554 	struct mfi_abort_frame *abort;
2555 	int i = 0, error;
2556 	uint32_t context = 0;
2557 
2558 	mtx_lock(&sc->mfi_io_lock);
2559 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2560 		mtx_unlock(&sc->mfi_io_lock);
2561 		return (EBUSY);
2562 	}
2563 
2564 	/* Zero out the MFI frame */
2565 	context = cm->cm_frame->header.context;
2566 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2567 	cm->cm_frame->header.context = context;
2568 
2569 	abort = &cm->cm_frame->abort;
2570 	abort->header.cmd = MFI_CMD_ABORT;
2571 	abort->header.flags = 0;
2572 	abort->header.scsi_status = 0;
2573 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2574 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2575 	abort->abort_mfi_addr_hi =
2576 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2577 	cm->cm_data = NULL;
2578 	cm->cm_flags = MFI_CMD_POLLED;
2579 
2580 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2581 		device_printf(sc->mfi_dev, "failed to abort command\n");
2582 	mfi_release_command(cm);
2583 
2584 	mtx_unlock(&sc->mfi_io_lock);
2585 	while (i < 5 && *cm_abort != NULL) {
2586 		tsleep(cm_abort, 0, "mfiabort",
2587 		    5 * hz);
2588 		i++;
2589 	}
2590 	if (*cm_abort != NULL) {
2591 		/* Force a complete if command didn't abort */
2592 		mtx_lock(&sc->mfi_io_lock);
2593 		(*cm_abort)->cm_complete(*cm_abort);
2594 		mtx_unlock(&sc->mfi_io_lock);
2595 	}
2596 
2597 	return (error);
2598 }
2599 
2600 int
2601 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2602      int len)
2603 {
2604 	struct mfi_command *cm;
2605 	struct mfi_io_frame *io;
2606 	int error;
2607 	uint32_t context = 0;
2608 
2609 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2610 		return (EBUSY);
2611 
2612 	/* Zero out the MFI frame */
2613 	context = cm->cm_frame->header.context;
2614 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2615 	cm->cm_frame->header.context = context;
2616 
2617 	io = &cm->cm_frame->io;
2618 	io->header.cmd = MFI_CMD_LD_WRITE;
2619 	io->header.target_id = id;
2620 	io->header.timeout = 0;
2621 	io->header.flags = 0;
2622 	io->header.scsi_status = 0;
2623 	io->header.sense_len = MFI_SENSE_LEN;
2624 	io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2625 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2626 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2627 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2628 	io->lba_lo = lba & 0xffffffff;
2629 	cm->cm_data = virt;
2630 	cm->cm_len = len;
2631 	cm->cm_sg = &io->sgl;
2632 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2633 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2634 
2635 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2636 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2637 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2638 	    BUS_DMASYNC_POSTWRITE);
2639 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2640 	mfi_release_command(cm);
2641 
2642 	return (error);
2643 }
2644 
2645 int
2646 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2647     int len)
2648 {
2649 	struct mfi_command *cm;
2650 	struct mfi_pass_frame *pass;
2651 	int error, readop, cdb_len;
2652 	uint32_t blkcount;
2653 
2654 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2655 		return (EBUSY);
2656 
2657 	pass = &cm->cm_frame->pass;
2658 	bzero(pass->cdb, 16);
2659 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2660 
2661 	readop = 0;
2662 	blkcount = howmany(len, MFI_SECTOR_LEN);
2663 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2664 	pass->header.target_id = id;
2665 	pass->header.timeout = 0;
2666 	pass->header.flags = 0;
2667 	pass->header.scsi_status = 0;
2668 	pass->header.sense_len = MFI_SENSE_LEN;
2669 	pass->header.data_len = len;
2670 	pass->header.cdb_len = cdb_len;
2671 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2672 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2673 	cm->cm_data = virt;
2674 	cm->cm_len = len;
2675 	cm->cm_sg = &pass->sgl;
2676 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2677 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2678 
2679 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2680 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2681 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2682 	    BUS_DMASYNC_POSTWRITE);
2683 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2684 	mfi_release_command(cm);
2685 
2686 	return (error);
2687 }
2688 
2689 static int
2690 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2691 {
2692 	struct mfi_softc *sc;
2693 	int error;
2694 
2695 	sc = dev->si_drv1;
2696 
2697 	mtx_lock(&sc->mfi_io_lock);
2698 	if (sc->mfi_detaching)
2699 		error = ENXIO;
2700 	else {
2701 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2702 		error = 0;
2703 	}
2704 	mtx_unlock(&sc->mfi_io_lock);
2705 
2706 	return (error);
2707 }
2708 
2709 static int
2710 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2711 {
2712 	struct mfi_softc *sc;
2713 	struct mfi_aen *mfi_aen_entry, *tmp;
2714 
2715 	sc = dev->si_drv1;
2716 
2717 	mtx_lock(&sc->mfi_io_lock);
2718 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2719 
2720 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2721 		if (mfi_aen_entry->p == curproc) {
2722 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2723 			    aen_link);
2724 			free(mfi_aen_entry, M_MFIBUF);
2725 		}
2726 	}
2727 	mtx_unlock(&sc->mfi_io_lock);
2728 	return (0);
2729 }
2730 
2731 static int
2732 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2733 {
2734 
2735 	switch (opcode) {
2736 	case MFI_DCMD_LD_DELETE:
2737 	case MFI_DCMD_CFG_ADD:
2738 	case MFI_DCMD_CFG_CLEAR:
2739 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2740 		sx_xlock(&sc->mfi_config_lock);
2741 		return (1);
2742 	default:
2743 		return (0);
2744 	}
2745 }
2746 
2747 static void
2748 mfi_config_unlock(struct mfi_softc *sc, int locked)
2749 {
2750 
2751 	if (locked)
2752 		sx_xunlock(&sc->mfi_config_lock);
2753 }
2754 
2755 /*
2756  * Perform pre-issue checks on commands from userland and possibly veto
2757  * them.
2758  */
2759 static int
2760 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2761 {
2762 	struct mfi_disk *ld, *ld2;
2763 	int error;
2764 	struct mfi_system_pd *syspd = NULL;
2765 	uint16_t syspd_id;
2766 	uint16_t *mbox;
2767 
2768 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2769 	error = 0;
2770 	switch (cm->cm_frame->dcmd.opcode) {
2771 	case MFI_DCMD_LD_DELETE:
2772 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2773 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2774 				break;
2775 		}
2776 		if (ld == NULL)
2777 			error = ENOENT;
2778 		else
2779 			error = mfi_disk_disable(ld);
2780 		break;
2781 	case MFI_DCMD_CFG_CLEAR:
2782 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2783 			error = mfi_disk_disable(ld);
2784 			if (error)
2785 				break;
2786 		}
2787 		if (error) {
2788 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2789 				if (ld2 == ld)
2790 					break;
2791 				mfi_disk_enable(ld2);
2792 			}
2793 		}
2794 		break;
2795 	case MFI_DCMD_PD_STATE_SET:
2796 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2797 		syspd_id = mbox[0];
2798 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2799 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2800 				if (syspd->pd_id == syspd_id)
2801 					break;
2802 			}
2803 		}
2804 		else
2805 			break;
2806 		if (syspd)
2807 			error = mfi_syspd_disable(syspd);
2808 		break;
2809 	default:
2810 		break;
2811 	}
2812 	return (error);
2813 }
2814 
2815 /* Perform post-issue checks on commands from userland. */
2816 static void
2817 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2818 {
2819 	struct mfi_disk *ld, *ldn;
2820 	struct mfi_system_pd *syspd = NULL;
2821 	uint16_t syspd_id;
2822 	uint16_t *mbox;
2823 
2824 	switch (cm->cm_frame->dcmd.opcode) {
2825 	case MFI_DCMD_LD_DELETE:
2826 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2827 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2828 				break;
2829 		}
2830 		KASSERT(ld != NULL, ("volume dissappeared"));
2831 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2832 			mtx_unlock(&sc->mfi_io_lock);
2833 			bus_topo_lock();
2834 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2835 			bus_topo_unlock();
2836 			mtx_lock(&sc->mfi_io_lock);
2837 		} else
2838 			mfi_disk_enable(ld);
2839 		break;
2840 	case MFI_DCMD_CFG_CLEAR:
2841 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2842 			mtx_unlock(&sc->mfi_io_lock);
2843 			bus_topo_lock();
2844 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2845 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2846 			}
2847 			bus_topo_unlock();
2848 			mtx_lock(&sc->mfi_io_lock);
2849 		} else {
2850 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2851 				mfi_disk_enable(ld);
2852 		}
2853 		break;
2854 	case MFI_DCMD_CFG_ADD:
2855 		mfi_ldprobe(sc);
2856 		break;
2857 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2858 		mfi_ldprobe(sc);
2859 		break;
2860 	case MFI_DCMD_PD_STATE_SET:
2861 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2862 		syspd_id = mbox[0];
2863 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2864 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2865 				if (syspd->pd_id == syspd_id)
2866 					break;
2867 			}
2868 		}
2869 		else
2870 			break;
2871 		/* If the transition fails then enable the syspd again */
2872 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2873 			mfi_syspd_enable(syspd);
2874 		break;
2875 	}
2876 }
2877 
2878 static int
2879 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2880 {
2881 	struct mfi_config_data *conf_data;
2882 	struct mfi_command *ld_cm = NULL;
2883 	struct mfi_ld_info *ld_info = NULL;
2884 	struct mfi_ld_config *ld;
2885 	char *p;
2886 	int error = 0;
2887 
2888 	conf_data = (struct mfi_config_data *)cm->cm_data;
2889 
2890 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2891 		p = (char *)conf_data->array;
2892 		p += conf_data->array_size * conf_data->array_count;
2893 		ld = (struct mfi_ld_config *)p;
2894 		if (ld->params.isSSCD == 1)
2895 			error = 1;
2896 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2897 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2898 		    (void **)&ld_info, sizeof(*ld_info));
2899 		if (error) {
2900 			device_printf(sc->mfi_dev, "Failed to allocate"
2901 			    "MFI_DCMD_LD_GET_INFO %d", error);
2902 			if (ld_info)
2903 				free(ld_info, M_MFIBUF);
2904 			return 0;
2905 		}
2906 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2907 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2908 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2909 		if (mfi_wait_command(sc, ld_cm) != 0) {
2910 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2911 			mfi_release_command(ld_cm);
2912 			free(ld_info, M_MFIBUF);
2913 			return 0;
2914 		}
2915 
2916 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2917 			free(ld_info, M_MFIBUF);
2918 			mfi_release_command(ld_cm);
2919 			return 0;
2920 		}
2921 		else
2922 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2923 
2924 		if (ld_info->ld_config.params.isSSCD == 1)
2925 			error = 1;
2926 
2927 		mfi_release_command(ld_cm);
2928 		free(ld_info, M_MFIBUF);
2929 	}
2930 	return error;
2931 }
2932 
2933 static int
2934 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2935 {
2936 	uint8_t i;
2937 	struct mfi_ioc_packet *ioc;
2938 	ioc = (struct mfi_ioc_packet *)arg;
2939 	int sge_size, error;
2940 	struct megasas_sge *kern_sge;
2941 
2942 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2943 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2944 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2945 
2946 	if (sizeof(bus_addr_t) == 8) {
2947 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2948 		cm->cm_extra_frames = 2;
2949 		sge_size = sizeof(struct mfi_sg64);
2950 	} else {
2951 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2952 		sge_size = sizeof(struct mfi_sg32);
2953 	}
2954 
2955 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2956 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2957 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2958 			1, 0,			/* algnmnt, boundary */
2959 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2960 			BUS_SPACE_MAXADDR,	/* highaddr */
2961 			NULL, NULL,		/* filter, filterarg */
2962 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2963 			2,			/* nsegments */
2964 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2965 			BUS_DMA_ALLOCNOW,	/* flags */
2966 			NULL, NULL,		/* lockfunc, lockarg */
2967 			&sc->mfi_kbuff_arr_dmat[i])) {
2968 			device_printf(sc->mfi_dev,
2969 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2970 			return (ENOMEM);
2971 		}
2972 
2973 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2974 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2975 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2976 			device_printf(sc->mfi_dev,
2977 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2978 			return (ENOMEM);
2979 		}
2980 
2981 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2982 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2983 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2984 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2985 
2986 		if (!sc->kbuff_arr[i]) {
2987 			device_printf(sc->mfi_dev,
2988 			    "Could not allocate memory for kbuff_arr info\n");
2989 			return -1;
2990 		}
2991 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2992 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2993 
2994 		if (sizeof(bus_addr_t) == 8) {
2995 			cm->cm_frame->stp.sgl.sg64[i].addr =
2996 			    kern_sge[i].phys_addr;
2997 			cm->cm_frame->stp.sgl.sg64[i].len =
2998 			    ioc->mfi_sgl[i].iov_len;
2999 		} else {
3000 			cm->cm_frame->stp.sgl.sg32[i].addr =
3001 			    kern_sge[i].phys_addr;
3002 			cm->cm_frame->stp.sgl.sg32[i].len =
3003 			    ioc->mfi_sgl[i].iov_len;
3004 		}
3005 
3006 		error = copyin(ioc->mfi_sgl[i].iov_base,
3007 		    sc->kbuff_arr[i],
3008 		    ioc->mfi_sgl[i].iov_len);
3009 		if (error != 0) {
3010 			device_printf(sc->mfi_dev, "Copy in failed\n");
3011 			return error;
3012 		}
3013 	}
3014 
3015 	cm->cm_flags |=MFI_CMD_MAPPED;
3016 	return 0;
3017 }
3018 
3019 static int
3020 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3021 {
3022 	struct mfi_command *cm;
3023 	struct mfi_dcmd_frame *dcmd;
3024 	void *ioc_buf = NULL;
3025 	uint32_t context;
3026 	int error = 0, locked;
3027 
3028 	if (ioc->buf_size > 0) {
3029 		if (ioc->buf_size > 1024 * 1024)
3030 			return (ENOMEM);
3031 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3032 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3033 		if (error) {
3034 			device_printf(sc->mfi_dev, "failed to copyin\n");
3035 			free(ioc_buf, M_MFIBUF);
3036 			return (error);
3037 		}
3038 	}
3039 
3040 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3041 
3042 	mtx_lock(&sc->mfi_io_lock);
3043 	while ((cm = mfi_dequeue_free(sc)) == NULL)
3044 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3045 
3046 	/* Save context for later */
3047 	context = cm->cm_frame->header.context;
3048 
3049 	dcmd = &cm->cm_frame->dcmd;
3050 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3051 
3052 	cm->cm_sg = &dcmd->sgl;
3053 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3054 	cm->cm_data = ioc_buf;
3055 	cm->cm_len = ioc->buf_size;
3056 
3057 	/* restore context */
3058 	cm->cm_frame->header.context = context;
3059 
3060 	/* Cheat since we don't know if we're writing or reading */
3061 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3062 
3063 	error = mfi_check_command_pre(sc, cm);
3064 	if (error)
3065 		goto out;
3066 
3067 	error = mfi_wait_command(sc, cm);
3068 	if (error) {
3069 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3070 		goto out;
3071 	}
3072 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3073 	mfi_check_command_post(sc, cm);
3074 out:
3075 	mfi_release_command(cm);
3076 	mtx_unlock(&sc->mfi_io_lock);
3077 	mfi_config_unlock(sc, locked);
3078 	if (ioc->buf_size > 0)
3079 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3080 	if (ioc_buf)
3081 		free(ioc_buf, M_MFIBUF);
3082 	return (error);
3083 }
3084 
3085 #define	PTRIN(p)		((void *)(uintptr_t)(p))
3086 
3087 static int
3088 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3089 {
3090 	struct mfi_softc *sc;
3091 	union mfi_statrequest *ms;
3092 	struct mfi_ioc_packet *ioc;
3093 #ifdef COMPAT_FREEBSD32
3094 	struct mfi_ioc_packet32 *ioc32;
3095 #endif
3096 	struct mfi_ioc_aen *aen;
3097 	struct mfi_command *cm = NULL;
3098 	uint32_t context = 0;
3099 	union mfi_sense_ptr sense_ptr;
3100 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3101 	size_t len;
3102 	int i, res;
3103 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3104 #ifdef COMPAT_FREEBSD32
3105 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3106 	struct mfi_ioc_passthru iop_swab;
3107 #endif
3108 	int error, locked;
3109 	sc = dev->si_drv1;
3110 	error = 0;
3111 
3112 	if (sc->adpreset)
3113 		return EBUSY;
3114 
3115 	if (sc->hw_crit_error)
3116 		return EBUSY;
3117 
3118 	if (sc->issuepend_done == 0)
3119 		return EBUSY;
3120 
3121 	switch (cmd) {
3122 	case MFIIO_STATS:
3123 		ms = (union mfi_statrequest *)arg;
3124 		switch (ms->ms_item) {
3125 		case MFIQ_FREE:
3126 		case MFIQ_BIO:
3127 		case MFIQ_READY:
3128 		case MFIQ_BUSY:
3129 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3130 			    sizeof(struct mfi_qstat));
3131 			break;
3132 		default:
3133 			error = ENOIOCTL;
3134 			break;
3135 		}
3136 		break;
3137 	case MFIIO_QUERY_DISK:
3138 	{
3139 		struct mfi_query_disk *qd;
3140 		struct mfi_disk *ld;
3141 
3142 		qd = (struct mfi_query_disk *)arg;
3143 		mtx_lock(&sc->mfi_io_lock);
3144 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3145 			if (ld->ld_id == qd->array_id)
3146 				break;
3147 		}
3148 		if (ld == NULL) {
3149 			qd->present = 0;
3150 			mtx_unlock(&sc->mfi_io_lock);
3151 			return (0);
3152 		}
3153 		qd->present = 1;
3154 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3155 			qd->open = 1;
3156 		bzero(qd->devname, SPECNAMELEN + 1);
3157 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3158 		mtx_unlock(&sc->mfi_io_lock);
3159 		break;
3160 	}
3161 	case MFI_CMD:
3162 #ifdef COMPAT_FREEBSD32
3163 	case MFI_CMD32:
3164 #endif
3165 		{
3166 		devclass_t devclass;
3167 		ioc = (struct mfi_ioc_packet *)arg;
3168 		int adapter;
3169 
3170 		adapter = ioc->mfi_adapter_no;
3171 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3172 			devclass = devclass_find("mfi");
3173 			sc = devclass_get_softc(devclass, adapter);
3174 		}
3175 		mtx_lock(&sc->mfi_io_lock);
3176 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3177 			mtx_unlock(&sc->mfi_io_lock);
3178 			return (EBUSY);
3179 		}
3180 		mtx_unlock(&sc->mfi_io_lock);
3181 		locked = 0;
3182 
3183 		/*
3184 		 * save off original context since copying from user
3185 		 * will clobber some data
3186 		 */
3187 		context = cm->cm_frame->header.context;
3188 		cm->cm_frame->header.context = cm->cm_index;
3189 
3190 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3191 		    2 * MEGAMFI_FRAME_SIZE);
3192 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3193 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3194 		cm->cm_frame->header.scsi_status = 0;
3195 		cm->cm_frame->header.pad0 = 0;
3196 		if (ioc->mfi_sge_count) {
3197 			cm->cm_sg =
3198 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3199 		}
3200 		cm->cm_flags = 0;
3201 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3202 			cm->cm_flags |= MFI_CMD_DATAIN;
3203 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3204 			cm->cm_flags |= MFI_CMD_DATAOUT;
3205 		/* Legacy app shim */
3206 		if (cm->cm_flags == 0)
3207 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3208 		cm->cm_len = cm->cm_frame->header.data_len;
3209 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3210 #ifdef COMPAT_FREEBSD32
3211 			if (cmd == MFI_CMD) {
3212 #endif
3213 				/* Native */
3214 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3215 #ifdef COMPAT_FREEBSD32
3216 			} else {
3217 				/* 32bit on 64bit */
3218 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3219 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3220 			}
3221 #endif
3222 			cm->cm_len += cm->cm_stp_len;
3223 		}
3224 		if (cm->cm_len &&
3225 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3226 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3227 			    M_WAITOK | M_ZERO);
3228 		} else {
3229 			cm->cm_data = 0;
3230 		}
3231 
3232 		/* restore header context */
3233 		cm->cm_frame->header.context = context;
3234 
3235 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3236 			res = mfi_stp_cmd(sc, cm, arg);
3237 			if (res != 0)
3238 				goto out;
3239 		} else {
3240 			temp = data;
3241 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3242 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3243 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3244 #ifdef COMPAT_FREEBSD32
3245 					if (cmd == MFI_CMD) {
3246 #endif
3247 						/* Native */
3248 						addr = ioc->mfi_sgl[i].iov_base;
3249 						len = ioc->mfi_sgl[i].iov_len;
3250 #ifdef COMPAT_FREEBSD32
3251 					} else {
3252 						/* 32bit on 64bit */
3253 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3254 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3255 						len = ioc32->mfi_sgl[i].iov_len;
3256 					}
3257 #endif
3258 					error = copyin(addr, temp, len);
3259 					if (error != 0) {
3260 						device_printf(sc->mfi_dev,
3261 						    "Copy in failed\n");
3262 						goto out;
3263 					}
3264 					temp = &temp[len];
3265 				}
3266 			}
3267 		}
3268 
3269 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3270 			locked = mfi_config_lock(sc,
3271 			     cm->cm_frame->dcmd.opcode);
3272 
3273 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3274 			cm->cm_frame->pass.sense_addr_lo =
3275 			    (uint32_t)cm->cm_sense_busaddr;
3276 			cm->cm_frame->pass.sense_addr_hi =
3277 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3278 		}
3279 		mtx_lock(&sc->mfi_io_lock);
3280 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3281 		if (!skip_pre_post) {
3282 			error = mfi_check_command_pre(sc, cm);
3283 			if (error) {
3284 				mtx_unlock(&sc->mfi_io_lock);
3285 				goto out;
3286 			}
3287 		}
3288 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3289 			device_printf(sc->mfi_dev,
3290 			    "Controller polled failed\n");
3291 			mtx_unlock(&sc->mfi_io_lock);
3292 			goto out;
3293 		}
3294 		if (!skip_pre_post) {
3295 			mfi_check_command_post(sc, cm);
3296 		}
3297 		mtx_unlock(&sc->mfi_io_lock);
3298 
3299 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3300 			temp = data;
3301 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3302 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3303 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3304 #ifdef COMPAT_FREEBSD32
3305 					if (cmd == MFI_CMD) {
3306 #endif
3307 						/* Native */
3308 						addr = ioc->mfi_sgl[i].iov_base;
3309 						len = ioc->mfi_sgl[i].iov_len;
3310 #ifdef COMPAT_FREEBSD32
3311 					} else {
3312 						/* 32bit on 64bit */
3313 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3314 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3315 						len = ioc32->mfi_sgl[i].iov_len;
3316 					}
3317 #endif
3318 					error = copyout(temp, addr, len);
3319 					if (error != 0) {
3320 						device_printf(sc->mfi_dev,
3321 						    "Copy out failed\n");
3322 						goto out;
3323 					}
3324 					temp = &temp[len];
3325 				}
3326 			}
3327 		}
3328 
3329 		if (ioc->mfi_sense_len) {
3330 			/* get user-space sense ptr then copy out sense */
3331 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3332 			    &sense_ptr.sense_ptr_data[0],
3333 			    sizeof(sense_ptr.sense_ptr_data));
3334 #ifdef COMPAT_FREEBSD32
3335 			if (cmd != MFI_CMD) {
3336 				/*
3337 				 * not 64bit native so zero out any address
3338 				 * over 32bit */
3339 				sense_ptr.addr.high = 0;
3340 			}
3341 #endif
3342 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3343 			    ioc->mfi_sense_len);
3344 			if (error != 0) {
3345 				device_printf(sc->mfi_dev,
3346 				    "Copy out failed\n");
3347 				goto out;
3348 			}
3349 		}
3350 
3351 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3352 out:
3353 		mfi_config_unlock(sc, locked);
3354 		if (data)
3355 			free(data, M_MFIBUF);
3356 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3357 			for (i = 0; i < 2; i++) {
3358 				if (sc->kbuff_arr[i]) {
3359 					if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3360 						bus_dmamap_unload(
3361 						    sc->mfi_kbuff_arr_dmat[i],
3362 						    sc->mfi_kbuff_arr_dmamap[i]
3363 						    );
3364 					if (sc->kbuff_arr[i] != NULL)
3365 						bus_dmamem_free(
3366 						    sc->mfi_kbuff_arr_dmat[i],
3367 						    sc->kbuff_arr[i],
3368 						    sc->mfi_kbuff_arr_dmamap[i]
3369 						    );
3370 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3371 						bus_dma_tag_destroy(
3372 						    sc->mfi_kbuff_arr_dmat[i]);
3373 				}
3374 			}
3375 		}
3376 		if (cm) {
3377 			mtx_lock(&sc->mfi_io_lock);
3378 			mfi_release_command(cm);
3379 			mtx_unlock(&sc->mfi_io_lock);
3380 		}
3381 
3382 		break;
3383 		}
3384 	case MFI_SET_AEN:
3385 		aen = (struct mfi_ioc_aen *)arg;
3386 		mtx_lock(&sc->mfi_io_lock);
3387 		error = mfi_aen_register(sc, aen->aen_seq_num,
3388 		    aen->aen_class_locale);
3389 		mtx_unlock(&sc->mfi_io_lock);
3390 
3391 		break;
3392 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3393 		{
3394 			devclass_t devclass;
3395 			struct mfi_linux_ioc_packet l_ioc;
3396 			int adapter;
3397 
3398 			devclass = devclass_find("mfi");
3399 			if (devclass == NULL)
3400 				return (ENOENT);
3401 
3402 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3403 			if (error)
3404 				return (error);
3405 			adapter = l_ioc.lioc_adapter_no;
3406 			sc = devclass_get_softc(devclass, adapter);
3407 			if (sc == NULL)
3408 				return (ENOENT);
3409 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3410 			    cmd, arg, flag, td));
3411 			break;
3412 		}
3413 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3414 		{
3415 			devclass_t devclass;
3416 			struct mfi_linux_ioc_aen l_aen;
3417 			int adapter;
3418 
3419 			devclass = devclass_find("mfi");
3420 			if (devclass == NULL)
3421 				return (ENOENT);
3422 
3423 			error = copyin(arg, &l_aen, sizeof(l_aen));
3424 			if (error)
3425 				return (error);
3426 			adapter = l_aen.laen_adapter_no;
3427 			sc = devclass_get_softc(devclass, adapter);
3428 			if (sc == NULL)
3429 				return (ENOENT);
3430 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3431 			    cmd, arg, flag, td));
3432 			break;
3433 		}
3434 #ifdef COMPAT_FREEBSD32
3435 	case MFIIO_PASSTHRU32:
3436 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3437 			error = ENOTTY;
3438 			break;
3439 		}
3440 		iop_swab.ioc_frame	= iop32->ioc_frame;
3441 		iop_swab.buf_size	= iop32->buf_size;
3442 		iop_swab.buf		= PTRIN(iop32->buf);
3443 		iop			= &iop_swab;
3444 		/* FALLTHROUGH */
3445 #endif
3446 	case MFIIO_PASSTHRU:
3447 		error = mfi_user_command(sc, iop);
3448 #ifdef COMPAT_FREEBSD32
3449 		if (cmd == MFIIO_PASSTHRU32)
3450 			iop32->ioc_frame = iop_swab.ioc_frame;
3451 #endif
3452 		break;
3453 	default:
3454 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3455 		error = ENOTTY;
3456 		break;
3457 	}
3458 
3459 	return (error);
3460 }
3461 
3462 static int
3463 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3464 {
3465 	struct mfi_softc *sc;
3466 	struct mfi_linux_ioc_packet l_ioc;
3467 	struct mfi_linux_ioc_aen l_aen;
3468 	struct mfi_command *cm = NULL;
3469 	struct mfi_aen *mfi_aen_entry;
3470 	union mfi_sense_ptr sense_ptr;
3471 	uint32_t context = 0;
3472 	uint8_t *data = NULL, *temp;
3473 	int i;
3474 	int error, locked;
3475 
3476 	sc = dev->si_drv1;
3477 	error = 0;
3478 	switch (cmd) {
3479 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3480 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3481 		if (error != 0)
3482 			return (error);
3483 
3484 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3485 			return (EINVAL);
3486 		}
3487 
3488 		mtx_lock(&sc->mfi_io_lock);
3489 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3490 			mtx_unlock(&sc->mfi_io_lock);
3491 			return (EBUSY);
3492 		}
3493 		mtx_unlock(&sc->mfi_io_lock);
3494 		locked = 0;
3495 
3496 		/*
3497 		 * save off original context since copying from user
3498 		 * will clobber some data
3499 		 */
3500 		context = cm->cm_frame->header.context;
3501 
3502 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3503 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3504 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3505 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3506 		cm->cm_frame->header.scsi_status = 0;
3507 		cm->cm_frame->header.pad0 = 0;
3508 		if (l_ioc.lioc_sge_count)
3509 			cm->cm_sg =
3510 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3511 		cm->cm_flags = 0;
3512 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3513 			cm->cm_flags |= MFI_CMD_DATAIN;
3514 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3515 			cm->cm_flags |= MFI_CMD_DATAOUT;
3516 		cm->cm_len = cm->cm_frame->header.data_len;
3517 		if (cm->cm_len &&
3518 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3519 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3520 			    M_WAITOK | M_ZERO);
3521 		} else {
3522 			cm->cm_data = 0;
3523 		}
3524 
3525 		/* restore header context */
3526 		cm->cm_frame->header.context = context;
3527 
3528 		temp = data;
3529 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3530 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3531 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3532 				       temp,
3533 				       l_ioc.lioc_sgl[i].iov_len);
3534 				if (error != 0) {
3535 					device_printf(sc->mfi_dev,
3536 					    "Copy in failed\n");
3537 					goto out;
3538 				}
3539 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3540 			}
3541 		}
3542 
3543 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3544 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3545 
3546 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3547 			cm->cm_frame->pass.sense_addr_lo =
3548 			    (uint32_t)cm->cm_sense_busaddr;
3549 			cm->cm_frame->pass.sense_addr_hi =
3550 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3551 		}
3552 
3553 		mtx_lock(&sc->mfi_io_lock);
3554 		error = mfi_check_command_pre(sc, cm);
3555 		if (error) {
3556 			mtx_unlock(&sc->mfi_io_lock);
3557 			goto out;
3558 		}
3559 
3560 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3561 			device_printf(sc->mfi_dev,
3562 			    "Controller polled failed\n");
3563 			mtx_unlock(&sc->mfi_io_lock);
3564 			goto out;
3565 		}
3566 
3567 		mfi_check_command_post(sc, cm);
3568 		mtx_unlock(&sc->mfi_io_lock);
3569 
3570 		temp = data;
3571 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3572 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3573 				error = copyout(temp,
3574 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3575 					l_ioc.lioc_sgl[i].iov_len);
3576 				if (error != 0) {
3577 					device_printf(sc->mfi_dev,
3578 					    "Copy out failed\n");
3579 					goto out;
3580 				}
3581 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3582 			}
3583 		}
3584 
3585 		if (l_ioc.lioc_sense_len) {
3586 			/* get user-space sense ptr then copy out sense */
3587 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3588                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3589 			    &sense_ptr.sense_ptr_data[0],
3590 			    sizeof(sense_ptr.sense_ptr_data));
3591 #ifdef __amd64__
3592 			/*
3593 			 * only 32bit Linux support so zero out any
3594 			 * address over 32bit
3595 			 */
3596 			sense_ptr.addr.high = 0;
3597 #endif
3598 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3599 			    l_ioc.lioc_sense_len);
3600 			if (error != 0) {
3601 				device_printf(sc->mfi_dev,
3602 				    "Copy out failed\n");
3603 				goto out;
3604 			}
3605 		}
3606 
3607 		error = copyout(&cm->cm_frame->header.cmd_status,
3608 			&((struct mfi_linux_ioc_packet*)arg)
3609 			->lioc_frame.hdr.cmd_status,
3610 			1);
3611 		if (error != 0) {
3612 			device_printf(sc->mfi_dev,
3613 				      "Copy out failed\n");
3614 			goto out;
3615 		}
3616 
3617 out:
3618 		mfi_config_unlock(sc, locked);
3619 		if (data)
3620 			free(data, M_MFIBUF);
3621 		if (cm) {
3622 			mtx_lock(&sc->mfi_io_lock);
3623 			mfi_release_command(cm);
3624 			mtx_unlock(&sc->mfi_io_lock);
3625 		}
3626 
3627 		return (error);
3628 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3629 		error = copyin(arg, &l_aen, sizeof(l_aen));
3630 		if (error != 0)
3631 			return (error);
3632 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3633 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3634 		    M_WAITOK);
3635 		mtx_lock(&sc->mfi_io_lock);
3636 		if (mfi_aen_entry != NULL) {
3637 			mfi_aen_entry->p = curproc;
3638 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3639 			    aen_link);
3640 		}
3641 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3642 		    l_aen.laen_class_locale);
3643 
3644 		if (error != 0) {
3645 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3646 			    aen_link);
3647 			free(mfi_aen_entry, M_MFIBUF);
3648 		}
3649 		mtx_unlock(&sc->mfi_io_lock);
3650 
3651 		return (error);
3652 	default:
3653 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3654 		error = ENOENT;
3655 		break;
3656 	}
3657 
3658 	return (error);
3659 }
3660 
3661 static int
3662 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3663 {
3664 	struct mfi_softc *sc;
3665 	int revents = 0;
3666 
3667 	sc = dev->si_drv1;
3668 
3669 	if (poll_events & (POLLIN | POLLRDNORM)) {
3670 		if (sc->mfi_aen_triggered != 0) {
3671 			revents |= poll_events & (POLLIN | POLLRDNORM);
3672 			sc->mfi_aen_triggered = 0;
3673 		}
3674 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3675 			revents |= POLLERR;
3676 		}
3677 	}
3678 
3679 	if (revents == 0) {
3680 		if (poll_events & (POLLIN | POLLRDNORM)) {
3681 			sc->mfi_poll_waiting = 1;
3682 			selrecord(td, &sc->mfi_select);
3683 		}
3684 	}
3685 
3686 	return revents;
3687 }
3688 
3689 static void
3690 mfi_dump_all(void)
3691 {
3692 	struct mfi_softc *sc;
3693 	struct mfi_command *cm;
3694 	devclass_t dc;
3695 	time_t deadline;
3696 	int timedout __unused;
3697 	int i;
3698 
3699 	dc = devclass_find("mfi");
3700 	if (dc == NULL) {
3701 		printf("No mfi dev class\n");
3702 		return;
3703 	}
3704 
3705 	for (i = 0; ; i++) {
3706 		sc = devclass_get_softc(dc, i);
3707 		if (sc == NULL)
3708 			break;
3709 		device_printf(sc->mfi_dev, "Dumping\n\n");
3710 		timedout = 0;
3711 		deadline = time_uptime - mfi_cmd_timeout;
3712 		mtx_lock(&sc->mfi_io_lock);
3713 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3714 			if (cm->cm_timestamp <= deadline) {
3715 				device_printf(sc->mfi_dev,
3716 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3717 				    cm, (int)(time_uptime - cm->cm_timestamp));
3718 				MFI_PRINT_CMD(cm);
3719 				timedout++;
3720 			}
3721 		}
3722 
3723 #if 0
3724 		if (timedout)
3725 			MFI_DUMP_CMDS(sc);
3726 #endif
3727 
3728 		mtx_unlock(&sc->mfi_io_lock);
3729 	}
3730 
3731 	return;
3732 }
3733 
3734 static void
3735 mfi_timeout(void *data)
3736 {
3737 	struct mfi_softc *sc = (struct mfi_softc *)data;
3738 	struct mfi_command *cm, *tmp;
3739 	time_t deadline;
3740 	int timedout __unused = 0;
3741 
3742 	deadline = time_uptime - mfi_cmd_timeout;
3743 	if (sc->adpreset == 0) {
3744 		if (!mfi_tbolt_reset(sc)) {
3745 			callout_reset(&sc->mfi_watchdog_callout,
3746 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3747 			return;
3748 		}
3749 	}
3750 	mtx_lock(&sc->mfi_io_lock);
3751 	TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3752 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3753 			continue;
3754 		if (cm->cm_timestamp <= deadline) {
3755 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3756 				cm->cm_timestamp = time_uptime;
3757 			} else {
3758 				device_printf(sc->mfi_dev,
3759 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3760 				     cm, (int)(time_uptime - cm->cm_timestamp)
3761 				     );
3762 				MFI_PRINT_CMD(cm);
3763 				MFI_VALIDATE_CMD(sc, cm);
3764 				/*
3765 				 * While commands can get stuck forever we do
3766 				 * not fail them as there is no way to tell if
3767 				 * the controller has actually processed them
3768 				 * or not.
3769 				 *
3770 				 * In addition its very likely that force
3771 				 * failing a command here would cause a panic
3772 				 * e.g. in UFS.
3773 				 */
3774 				timedout++;
3775 			}
3776 		}
3777 	}
3778 
3779 #if 0
3780 	if (timedout)
3781 		MFI_DUMP_CMDS(sc);
3782 #endif
3783 
3784 	mtx_unlock(&sc->mfi_io_lock);
3785 
3786 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3787 	    mfi_timeout, sc);
3788 
3789 	if (0)
3790 		mfi_dump_all();
3791 	return;
3792 }
3793