xref: /freebsd/sys/dev/mfi/mfi.c (revision bf603dbbb2c9964bba3d5bf2ba2371a75a0cc521)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006 IronPort Systems
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2007 LSI Corp.
30  * Copyright (c) 2007 Rajesh Prabhakaran.
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52  * SUCH DAMAGE.
53  */
54 
55 #include "opt_mfi.h"
56 
57 #include <sys/systm.h>
58 #include <sys/abi_compat.h>
59 #include <sys/sysctl.h>
60 #include <sys/malloc.h>
61 #include <sys/kernel.h>
62 #include <sys/poll.h>
63 #include <sys/selinfo.h>
64 #include <sys/bus.h>
65 #include <sys/conf.h>
66 #include <sys/eventhandler.h>
67 #include <sys/rman.h>
68 #include <sys/bio.h>
69 #include <sys/ioccom.h>
70 #include <sys/uio.h>
71 #include <sys/proc.h>
72 #include <sys/signalvar.h>
73 #include <sys/sysent.h>
74 #include <sys/taskqueue.h>
75 
76 #include <machine/bus.h>
77 #include <machine/resource.h>
78 
79 #include <dev/mfi/mfireg.h>
80 #include <dev/mfi/mfi_ioctl.h>
81 #include <dev/mfi/mfivar.h>
82 #include <sys/interrupt.h>
83 #include <sys/priority.h>
84 
85 static int	mfi_alloc_commands(struct mfi_softc *);
86 static int	mfi_comms_init(struct mfi_softc *);
87 static int	mfi_get_controller_info(struct mfi_softc *);
88 static int	mfi_get_log_state(struct mfi_softc *,
89 		    struct mfi_evt_log_state **);
90 static int	mfi_parse_entries(struct mfi_softc *, int, int);
91 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
92 static void	mfi_startup(void *arg);
93 static void	mfi_intr(void *arg);
94 static void	mfi_ldprobe(struct mfi_softc *sc);
95 static void	mfi_syspdprobe(struct mfi_softc *sc);
96 static void	mfi_handle_evt(void *context, int pending);
97 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
98 static void	mfi_aen_complete(struct mfi_command *);
99 static int	mfi_add_ld(struct mfi_softc *sc, int);
100 static void	mfi_add_ld_complete(struct mfi_command *);
101 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
102 static void	mfi_add_sys_pd_complete(struct mfi_command *);
103 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
104 static void	mfi_bio_complete(struct mfi_command *);
105 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
106 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
107 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
108 static int	mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
109 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
110 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
111 static void	mfi_timeout(void *);
112 static int	mfi_user_command(struct mfi_softc *,
113 		    struct mfi_ioc_passthru *);
114 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
115 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
116 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
117 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
118 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
119 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
120 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
121 		    uint32_t frame_cnt);
122 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
123 		    uint32_t frame_cnt);
124 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
125 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
126 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
127 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
128 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
129 
130 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
131     "MFI driver parameters");
132 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
133 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
134            0, "event message locale");
135 
136 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
137 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
138            0, "event message class");
139 
140 static int	mfi_max_cmds = 128;
141 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
142 	   0, "Max commands limit (-1 = controller limit)");
143 
144 static int	mfi_detect_jbod_change = 1;
145 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
146 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
147 
148 int		mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
149 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
150 	   &mfi_polled_cmd_timeout, 0,
151 	   "Polled command timeout - used for firmware flash etc (in seconds)");
152 
153 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
154 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
155 	   0, "Command timeout (in seconds)");
156 
157 /* Management interface */
158 static d_open_t		mfi_open;
159 static d_close_t	mfi_close;
160 static d_ioctl_t	mfi_ioctl;
161 static d_poll_t		mfi_poll;
162 
163 static struct cdevsw mfi_cdevsw = {
164 	.d_version = 	D_VERSION,
165 	.d_flags =	0,
166 	.d_open = 	mfi_open,
167 	.d_close =	mfi_close,
168 	.d_ioctl =	mfi_ioctl,
169 	.d_poll =	mfi_poll,
170 	.d_name =	"mfi",
171 };
172 
173 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
174 
175 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
176 struct mfi_skinny_dma_info mfi_skinny;
177 
178 static void
179 mfi_enable_intr_xscale(struct mfi_softc *sc)
180 {
181 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
182 }
183 
184 static void
185 mfi_enable_intr_ppc(struct mfi_softc *sc)
186 {
187 	if (sc->mfi_flags & MFI_FLAGS_1078) {
188 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
189 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
190 	}
191 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
192 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
193 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
194 	}
195 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
196 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
197 	}
198 }
199 
200 static int32_t
201 mfi_read_fw_status_xscale(struct mfi_softc *sc)
202 {
203 	return MFI_READ4(sc, MFI_OMSG0);
204 }
205 
206 static int32_t
207 mfi_read_fw_status_ppc(struct mfi_softc *sc)
208 {
209 	return MFI_READ4(sc, MFI_OSP0);
210 }
211 
212 static int
213 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
214 {
215 	int32_t status;
216 
217 	status = MFI_READ4(sc, MFI_OSTS);
218 	if ((status & MFI_OSTS_INTR_VALID) == 0)
219 		return 1;
220 
221 	MFI_WRITE4(sc, MFI_OSTS, status);
222 	return 0;
223 }
224 
225 static int
226 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
227 {
228 	int32_t status;
229 
230 	status = MFI_READ4(sc, MFI_OSTS);
231 	if (sc->mfi_flags & MFI_FLAGS_1078) {
232 		if (!(status & MFI_1078_RM)) {
233 			return 1;
234 		}
235 	}
236 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
237 		if (!(status & MFI_GEN2_RM)) {
238 			return 1;
239 		}
240 	}
241 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
242 		if (!(status & MFI_SKINNY_RM)) {
243 			return 1;
244 		}
245 	}
246 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
247 		MFI_WRITE4(sc, MFI_OSTS, status);
248 	else
249 		MFI_WRITE4(sc, MFI_ODCR0, status);
250 	return 0;
251 }
252 
253 static void
254 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
255 {
256 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
257 }
258 
259 static void
260 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
261 {
262 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
263 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
264 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
265 	} else {
266 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
267 	}
268 }
269 
270 int
271 mfi_transition_firmware(struct mfi_softc *sc)
272 {
273 	uint32_t fw_state, cur_state;
274 	int max_wait, i;
275 	uint32_t cur_abs_reg_val = 0;
276 	uint32_t prev_abs_reg_val = 0;
277 
278 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
279 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
280 	while (fw_state != MFI_FWSTATE_READY) {
281 		if (bootverbose)
282 			device_printf(sc->mfi_dev, "Waiting for firmware to "
283 			"become ready\n");
284 		cur_state = fw_state;
285 		switch (fw_state) {
286 		case MFI_FWSTATE_FAULT:
287 			device_printf(sc->mfi_dev, "Firmware fault\n");
288 			return (ENXIO);
289 		case MFI_FWSTATE_WAIT_HANDSHAKE:
290 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
291 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
292 			else
293 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
294 			max_wait = MFI_RESET_WAIT_TIME;
295 			break;
296 		case MFI_FWSTATE_OPERATIONAL:
297 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
298 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
299 			else
300 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
301 			max_wait = MFI_RESET_WAIT_TIME;
302 			break;
303 		case MFI_FWSTATE_UNDEFINED:
304 		case MFI_FWSTATE_BB_INIT:
305 			max_wait = MFI_RESET_WAIT_TIME;
306 			break;
307 		case MFI_FWSTATE_FW_INIT_2:
308 			max_wait = MFI_RESET_WAIT_TIME;
309 			break;
310 		case MFI_FWSTATE_FW_INIT:
311 		case MFI_FWSTATE_FLUSH_CACHE:
312 			max_wait = MFI_RESET_WAIT_TIME;
313 			break;
314 		case MFI_FWSTATE_DEVICE_SCAN:
315 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
316 			prev_abs_reg_val = cur_abs_reg_val;
317 			break;
318 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
319 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
320 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
321 			else
322 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
323 			max_wait = MFI_RESET_WAIT_TIME;
324 			break;
325 		default:
326 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
327 			    fw_state);
328 			return (ENXIO);
329 		}
330 		for (i = 0; i < (max_wait * 10); i++) {
331 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
332 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
333 			if (fw_state == cur_state)
334 				DELAY(100000);
335 			else
336 				break;
337 		}
338 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
339 			/* Check the device scanning progress */
340 			if (prev_abs_reg_val != cur_abs_reg_val) {
341 				continue;
342 			}
343 		}
344 		if (fw_state == cur_state) {
345 			device_printf(sc->mfi_dev, "Firmware stuck in state "
346 			    "%#x\n", fw_state);
347 			return (ENXIO);
348 		}
349 	}
350 	return (0);
351 }
352 
353 static void
354 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
355 {
356 	bus_addr_t *addr;
357 
358 	addr = arg;
359 	*addr = segs[0].ds_addr;
360 }
361 
362 int
363 mfi_attach(struct mfi_softc *sc)
364 {
365 	uint32_t status;
366 	int error, commsz, framessz, sensesz;
367 	int frames, unit, max_fw_sge, max_fw_cmds;
368 	uint32_t tb_mem_size = 0;
369 	struct cdev *dev_t;
370 
371 	if (sc == NULL)
372 		return EINVAL;
373 
374 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
375 	    MEGASAS_VERSION);
376 
377 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
378 	sx_init(&sc->mfi_config_lock, "MFI config");
379 	TAILQ_INIT(&sc->mfi_ld_tqh);
380 	TAILQ_INIT(&sc->mfi_syspd_tqh);
381 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
382 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
383 	TAILQ_INIT(&sc->mfi_evt_queue);
384 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
385 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
386 	TAILQ_INIT(&sc->mfi_aen_pids);
387 	TAILQ_INIT(&sc->mfi_cam_ccbq);
388 
389 	mfi_initq_free(sc);
390 	mfi_initq_ready(sc);
391 	mfi_initq_busy(sc);
392 	mfi_initq_bio(sc);
393 
394 	sc->adpreset = 0;
395 	sc->last_seq_num = 0;
396 	sc->disableOnlineCtrlReset = 1;
397 	sc->issuepend_done = 1;
398 	sc->hw_crit_error = 0;
399 
400 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
401 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
402 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
403 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
404 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
405 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
406 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
407 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
408 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
409 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
410 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
411 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
412 		sc->mfi_tbolt = 1;
413 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
414 	} else {
415 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
416 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
417 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
418 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
419 	}
420 
421 	/* Before we get too far, see if the firmware is working */
422 	if ((error = mfi_transition_firmware(sc)) != 0) {
423 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
424 		    "error %d\n", error);
425 		return (ENXIO);
426 	}
427 
428 	/* Start: LSIP200113393 */
429 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
430 				1, 0,			/* algnmnt, boundary */
431 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
432 				BUS_SPACE_MAXADDR,	/* highaddr */
433 				NULL, NULL,		/* filter, filterarg */
434 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
435 				1,			/* msegments */
436 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
437 				0,			/* flags */
438 				NULL, NULL,		/* lockfunc, lockarg */
439 				&sc->verbuf_h_dmat)) {
440 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
441 		return (ENOMEM);
442 	}
443 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
444 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
445 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
446 		return (ENOMEM);
447 	}
448 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
449 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
450 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
451 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
452 	/* End: LSIP200113393 */
453 
454 	/*
455 	 * Get information needed for sizing the contiguous memory for the
456 	 * frame pool.  Size down the sgl parameter since we know that
457 	 * we will never need more than what's required for MFI_MAXPHYS.
458 	 * It would be nice if these constants were available at runtime
459 	 * instead of compile time.
460 	 */
461 	status = sc->mfi_read_fw_status(sc);
462 	max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
463 	if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
464 		device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
465 		    max_fw_cmds, mfi_max_cmds);
466 		sc->mfi_max_fw_cmds = mfi_max_cmds;
467 	} else {
468 		sc->mfi_max_fw_cmds = max_fw_cmds;
469 	}
470 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
471 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
472 
473 	/* ThunderBolt Support get the contiguous memory */
474 
475 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
476 		mfi_tbolt_init_globals(sc);
477 		device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
478 		    "MaxSgl = %d, state = %#x\n", max_fw_cmds,
479 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
480 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
481 
482 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
483 				1, 0,			/* algnmnt, boundary */
484 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
485 				BUS_SPACE_MAXADDR,	/* highaddr */
486 				NULL, NULL,		/* filter, filterarg */
487 				tb_mem_size,		/* maxsize */
488 				1,			/* msegments */
489 				tb_mem_size,		/* maxsegsize */
490 				0,			/* flags */
491 				NULL, NULL,		/* lockfunc, lockarg */
492 				&sc->mfi_tb_dmat)) {
493 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
494 			return (ENOMEM);
495 		}
496 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
497 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
498 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
499 			return (ENOMEM);
500 		}
501 		bzero(sc->request_message_pool, tb_mem_size);
502 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
503 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
504 
505 		/* For ThunderBolt memory init */
506 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
507 				0x100, 0,		/* alignmnt, boundary */
508 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
509 				BUS_SPACE_MAXADDR,	/* highaddr */
510 				NULL, NULL,		/* filter, filterarg */
511 				MFI_FRAME_SIZE,		/* maxsize */
512 				1,			/* msegments */
513 				MFI_FRAME_SIZE,		/* maxsegsize */
514 				0,			/* flags */
515 				NULL, NULL,		/* lockfunc, lockarg */
516 				&sc->mfi_tb_init_dmat)) {
517 			device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
518 			return (ENOMEM);
519 		}
520 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
521 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
522 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
523 			return (ENOMEM);
524 		}
525 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
526 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
527 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
528 		    &sc->mfi_tb_init_busaddr, 0);
529 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
530 		    tb_mem_size)) {
531 			device_printf(sc->mfi_dev,
532 			    "Thunderbolt pool preparation error\n");
533 			return 0;
534 		}
535 
536 		/*
537 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
538 		  we are taking it different from what we have allocated for Request
539 		  and reply descriptors to avoid confusion later
540 		*/
541 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
542 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
543 				1, 0,			/* algnmnt, boundary */
544 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
545 				BUS_SPACE_MAXADDR,	/* highaddr */
546 				NULL, NULL,		/* filter, filterarg */
547 				tb_mem_size,		/* maxsize */
548 				1,			/* msegments */
549 				tb_mem_size,		/* maxsegsize */
550 				0,			/* flags */
551 				NULL, NULL,		/* lockfunc, lockarg */
552 				&sc->mfi_tb_ioc_init_dmat)) {
553 			device_printf(sc->mfi_dev,
554 			    "Cannot allocate comms DMA tag\n");
555 			return (ENOMEM);
556 		}
557 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
558 		    (void **)&sc->mfi_tb_ioc_init_desc,
559 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
560 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
561 			return (ENOMEM);
562 		}
563 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
564 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
565 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
566 		    &sc->mfi_tb_ioc_init_busaddr, 0);
567 	}
568 	/*
569 	 * Create the dma tag for data buffers.  Used both for block I/O
570 	 * and for various internal data queries.
571 	 */
572 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
573 				1, 0,			/* algnmnt, boundary */
574 				BUS_SPACE_MAXADDR,	/* lowaddr */
575 				BUS_SPACE_MAXADDR,	/* highaddr */
576 				NULL, NULL,		/* filter, filterarg */
577 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
578 				sc->mfi_max_sge,	/* nsegments */
579 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
580 				BUS_DMA_ALLOCNOW,	/* flags */
581 				busdma_lock_mutex,	/* lockfunc */
582 				&sc->mfi_io_lock,	/* lockfuncarg */
583 				&sc->mfi_buffer_dmat)) {
584 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
585 		return (ENOMEM);
586 	}
587 
588 	/*
589 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
590 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
591 	 * entry, so the calculated size here will be will be 1 more than
592 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
593 	 */
594 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
595 	    sizeof(struct mfi_hwcomms);
596 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
597 				1, 0,			/* algnmnt, boundary */
598 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
599 				BUS_SPACE_MAXADDR,	/* highaddr */
600 				NULL, NULL,		/* filter, filterarg */
601 				commsz,			/* maxsize */
602 				1,			/* msegments */
603 				commsz,			/* maxsegsize */
604 				0,			/* flags */
605 				NULL, NULL,		/* lockfunc, lockarg */
606 				&sc->mfi_comms_dmat)) {
607 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
608 		return (ENOMEM);
609 	}
610 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
611 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
612 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
613 		return (ENOMEM);
614 	}
615 	bzero(sc->mfi_comms, commsz);
616 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
617 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
618 	/*
619 	 * Allocate DMA memory for the command frames.  Keep them in the
620 	 * lower 4GB for efficiency.  Calculate the size of the commands at
621 	 * the same time; each command is one 64 byte frame plus a set of
622          * additional frames for holding sg lists or other data.
623 	 * The assumption here is that the SG list will start at the second
624 	 * frame and not use the unused bytes in the first frame.  While this
625 	 * isn't technically correct, it simplifies the calculation and allows
626 	 * for command frames that might be larger than an mfi_io_frame.
627 	 */
628 	if (sizeof(bus_addr_t) == 8) {
629 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
630 		sc->mfi_flags |= MFI_FLAGS_SG64;
631 	} else {
632 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
633 	}
634 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
635 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
636 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
637 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
638 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
639 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
640 				64, 0,			/* algnmnt, boundary */
641 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
642 				BUS_SPACE_MAXADDR,	/* highaddr */
643 				NULL, NULL,		/* filter, filterarg */
644 				framessz,		/* maxsize */
645 				1,			/* nsegments */
646 				framessz,		/* maxsegsize */
647 				0,			/* flags */
648 				NULL, NULL,		/* lockfunc, lockarg */
649 				&sc->mfi_frames_dmat)) {
650 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
651 		return (ENOMEM);
652 	}
653 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
654 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
655 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
656 		return (ENOMEM);
657 	}
658 	bzero(sc->mfi_frames, framessz);
659 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
660 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
661 	/*
662 	 * Allocate DMA memory for the frame sense data.  Keep them in the
663 	 * lower 4GB for efficiency
664 	 */
665 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
666 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
667 				4, 0,			/* algnmnt, boundary */
668 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
669 				BUS_SPACE_MAXADDR,	/* highaddr */
670 				NULL, NULL,		/* filter, filterarg */
671 				sensesz,		/* maxsize */
672 				1,			/* nsegments */
673 				sensesz,		/* maxsegsize */
674 				0,			/* flags */
675 				NULL, NULL,		/* lockfunc, lockarg */
676 				&sc->mfi_sense_dmat)) {
677 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
678 		return (ENOMEM);
679 	}
680 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
681 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
682 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
683 		return (ENOMEM);
684 	}
685 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
686 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
687 	if ((error = mfi_alloc_commands(sc)) != 0)
688 		return (error);
689 
690 	/* Before moving the FW to operational state, check whether
691 	 * hostmemory is required by the FW or not
692 	 */
693 
694 	/* ThunderBolt MFI_IOC2 INIT */
695 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
696 		sc->mfi_disable_intr(sc);
697 		mtx_lock(&sc->mfi_io_lock);
698 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
699 			device_printf(sc->mfi_dev,
700 			    "TB Init has failed with error %d\n",error);
701 			mtx_unlock(&sc->mfi_io_lock);
702 			return error;
703 		}
704 		mtx_unlock(&sc->mfi_io_lock);
705 
706 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
707 			return error;
708 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
709 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
710 		    &sc->mfi_intr)) {
711 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
712 			return (EINVAL);
713 		}
714 		sc->mfi_intr_ptr = mfi_intr_tbolt;
715 		sc->mfi_enable_intr(sc);
716 	} else {
717 		if ((error = mfi_comms_init(sc)) != 0)
718 			return (error);
719 
720 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
721 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
722 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
723 			return (EINVAL);
724 		}
725 		sc->mfi_intr_ptr = mfi_intr;
726 		sc->mfi_enable_intr(sc);
727 	}
728 	if ((error = mfi_get_controller_info(sc)) != 0)
729 		return (error);
730 	sc->disableOnlineCtrlReset = 0;
731 
732 	/* Register a config hook to probe the bus for arrays */
733 	sc->mfi_ich.ich_func = mfi_startup;
734 	sc->mfi_ich.ich_arg = sc;
735 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
736 		device_printf(sc->mfi_dev, "Cannot establish configuration "
737 		    "hook\n");
738 		return (EINVAL);
739 	}
740 	mtx_lock(&sc->mfi_io_lock);
741 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
742 		mtx_unlock(&sc->mfi_io_lock);
743 		return (error);
744 	}
745 	mtx_unlock(&sc->mfi_io_lock);
746 
747 	/*
748 	 * Register a shutdown handler.
749 	 */
750 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
751 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
752 		device_printf(sc->mfi_dev, "Warning: shutdown event "
753 		    "registration failed\n");
754 	}
755 
756 	/*
757 	 * Create the control device for doing management
758 	 */
759 	unit = device_get_unit(sc->mfi_dev);
760 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
761 	    0640, "mfi%d", unit);
762 	if (unit == 0)
763 		make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
764 		    sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
765 	if (sc->mfi_cdev != NULL)
766 		sc->mfi_cdev->si_drv1 = sc;
767 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
768 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
769 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
770 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
771 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
772 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
773 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
774 	    &sc->mfi_keep_deleted_volumes, 0,
775 	    "Don't detach the mfid device for a busy volume that is deleted");
776 
777 	device_add_child(sc->mfi_dev, "mfip", DEVICE_UNIT_ANY);
778 	bus_attach_children(sc->mfi_dev);
779 
780 	/* Start the timeout watchdog */
781 	callout_init(&sc->mfi_watchdog_callout, 1);
782 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
783 	    mfi_timeout, sc);
784 
785 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
786 		mtx_lock(&sc->mfi_io_lock);
787 		mfi_tbolt_sync_map_info(sc);
788 		mtx_unlock(&sc->mfi_io_lock);
789 	}
790 
791 	return (0);
792 }
793 
794 static int
795 mfi_alloc_commands(struct mfi_softc *sc)
796 {
797 	struct mfi_command *cm;
798 	int i, j;
799 
800 	/*
801 	 * XXX Should we allocate all the commands up front, or allocate on
802 	 * demand later like 'aac' does?
803 	 */
804 	sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
805 	    sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
806 
807 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
808 		cm = &sc->mfi_commands[i];
809 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
810 		    sc->mfi_cmd_size * i);
811 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
812 		    sc->mfi_cmd_size * i;
813 		cm->cm_frame->header.context = i;
814 		cm->cm_sense = &sc->mfi_sense[i];
815 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
816 		cm->cm_sc = sc;
817 		cm->cm_index = i;
818 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
819 		    &cm->cm_dmamap) == 0) {
820 			mtx_lock(&sc->mfi_io_lock);
821 			mfi_release_command(cm);
822 			mtx_unlock(&sc->mfi_io_lock);
823 		} else {
824 			device_printf(sc->mfi_dev, "Failed to allocate %d "
825 			   "command blocks, only allocated %d\n",
826 			    sc->mfi_max_fw_cmds, i - 1);
827 			for (j = 0; j < i; j++) {
828 				cm = &sc->mfi_commands[i];
829 				bus_dmamap_destroy(sc->mfi_buffer_dmat,
830 				    cm->cm_dmamap);
831 			}
832 			free(sc->mfi_commands, M_MFIBUF);
833 			sc->mfi_commands = NULL;
834 
835 			return (ENOMEM);
836 		}
837 	}
838 
839 	return (0);
840 }
841 
842 void
843 mfi_release_command(struct mfi_command *cm)
844 {
845 	struct mfi_frame_header *hdr;
846 	uint32_t *hdr_data;
847 
848 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
849 
850 	/*
851 	 * Zero out the important fields of the frame, but make sure the
852 	 * context field is preserved.  For efficiency, handle the fields
853 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
854 	 */
855 	hdr = &cm->cm_frame->header;
856 	if (cm->cm_data != NULL && hdr->sg_count) {
857 		cm->cm_sg->sg32[0].len = 0;
858 		cm->cm_sg->sg32[0].addr = 0;
859 	}
860 
861 	/*
862 	 * Command may be on other queues e.g. busy queue depending on the
863 	 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
864 	 * properly
865 	 */
866 	if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
867 		mfi_remove_busy(cm);
868 	if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
869 		mfi_remove_ready(cm);
870 
871 	/* We're not expecting it to be on any other queue but check */
872 	if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
873 		panic("Command %p is still on another queue, flags = %#x",
874 		    cm, cm->cm_flags);
875 	}
876 
877 	/* tbolt cleanup */
878 	if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
879 		mfi_tbolt_return_cmd(cm->cm_sc,
880 		    cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
881 		    cm);
882 	}
883 
884 	hdr_data = (uint32_t *)cm->cm_frame;
885 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
886 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
887 	hdr_data[4] = 0;	/* flags, timeout */
888 	hdr_data[5] = 0;	/* data_len */
889 
890 	cm->cm_extra_frames = 0;
891 	cm->cm_flags = 0;
892 	cm->cm_complete = NULL;
893 	cm->cm_private = NULL;
894 	cm->cm_data = NULL;
895 	cm->cm_sg = 0;
896 	cm->cm_total_frame_size = 0;
897 	cm->retry_for_fw_reset = 0;
898 
899 	mfi_enqueue_free(cm);
900 }
901 
902 int
903 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
904     uint32_t opcode, void **bufp, size_t bufsize)
905 {
906 	struct mfi_command *cm;
907 	struct mfi_dcmd_frame *dcmd;
908 	void *buf = NULL;
909 	uint32_t context = 0;
910 
911 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
912 
913 	cm = mfi_dequeue_free(sc);
914 	if (cm == NULL)
915 		return (EBUSY);
916 
917 	/* Zero out the MFI frame */
918 	context = cm->cm_frame->header.context;
919 	bzero(cm->cm_frame, sizeof(union mfi_frame));
920 	cm->cm_frame->header.context = context;
921 
922 	if ((bufsize > 0) && (bufp != NULL)) {
923 		if (*bufp == NULL) {
924 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
925 			if (buf == NULL) {
926 				mfi_release_command(cm);
927 				return (ENOMEM);
928 			}
929 			*bufp = buf;
930 		} else {
931 			buf = *bufp;
932 		}
933 	}
934 
935 	dcmd =  &cm->cm_frame->dcmd;
936 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
937 	dcmd->header.cmd = MFI_CMD_DCMD;
938 	dcmd->header.timeout = 0;
939 	dcmd->header.flags = 0;
940 	dcmd->header.data_len = bufsize;
941 	dcmd->header.scsi_status = 0;
942 	dcmd->opcode = opcode;
943 	cm->cm_sg = &dcmd->sgl;
944 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
945 	cm->cm_flags = 0;
946 	cm->cm_data = buf;
947 	cm->cm_private = buf;
948 	cm->cm_len = bufsize;
949 
950 	*cmp = cm;
951 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
952 		*bufp = buf;
953 	return (0);
954 }
955 
956 static int
957 mfi_comms_init(struct mfi_softc *sc)
958 {
959 	struct mfi_command *cm;
960 	struct mfi_init_frame *init;
961 	struct mfi_init_qinfo *qinfo;
962 	int error;
963 	uint32_t context = 0;
964 
965 	mtx_lock(&sc->mfi_io_lock);
966 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
967 		mtx_unlock(&sc->mfi_io_lock);
968 		return (EBUSY);
969 	}
970 
971 	/* Zero out the MFI frame */
972 	context = cm->cm_frame->header.context;
973 	bzero(cm->cm_frame, sizeof(union mfi_frame));
974 	cm->cm_frame->header.context = context;
975 
976 	/*
977 	 * Abuse the SG list area of the frame to hold the init_qinfo
978 	 * object;
979 	 */
980 	init = &cm->cm_frame->init;
981 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
982 
983 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
984 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
985 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
986 	    offsetof(struct mfi_hwcomms, hw_reply_q);
987 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
988 	    offsetof(struct mfi_hwcomms, hw_pi);
989 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
990 	    offsetof(struct mfi_hwcomms, hw_ci);
991 
992 	init->header.cmd = MFI_CMD_INIT;
993 	init->header.data_len = sizeof(struct mfi_init_qinfo);
994 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
995 	cm->cm_data = NULL;
996 	cm->cm_flags = MFI_CMD_POLLED;
997 
998 	if ((error = mfi_mapcmd(sc, cm)) != 0)
999 		device_printf(sc->mfi_dev, "failed to send init command\n");
1000 	mfi_release_command(cm);
1001 	mtx_unlock(&sc->mfi_io_lock);
1002 
1003 	return (error);
1004 }
1005 
1006 static int
1007 mfi_get_controller_info(struct mfi_softc *sc)
1008 {
1009 	struct mfi_command *cm = NULL;
1010 	struct mfi_ctrl_info *ci = NULL;
1011 	uint32_t max_sectors_1, max_sectors_2;
1012 	int error;
1013 
1014 	mtx_lock(&sc->mfi_io_lock);
1015 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1016 	    (void **)&ci, sizeof(*ci));
1017 	if (error)
1018 		goto out;
1019 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1020 
1021 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1022 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
1023 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1024 		    MFI_SECTOR_LEN;
1025 		error = 0;
1026 		goto out;
1027 	}
1028 
1029 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1030 	    BUS_DMASYNC_POSTREAD);
1031 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1032 
1033 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1034 	max_sectors_2 = ci->max_request_size;
1035 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1036 	sc->disableOnlineCtrlReset =
1037 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
1038 
1039 out:
1040 	if (ci)
1041 		free(ci, M_MFIBUF);
1042 	if (cm)
1043 		mfi_release_command(cm);
1044 	mtx_unlock(&sc->mfi_io_lock);
1045 	return (error);
1046 }
1047 
1048 static int
1049 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1050 {
1051 	struct mfi_command *cm = NULL;
1052 	int error;
1053 
1054 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1055 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1056 	    (void **)log_state, sizeof(**log_state));
1057 	if (error)
1058 		goto out;
1059 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1060 
1061 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1062 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1063 		goto out;
1064 	}
1065 
1066 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1067 	    BUS_DMASYNC_POSTREAD);
1068 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1069 
1070 out:
1071 	if (cm)
1072 		mfi_release_command(cm);
1073 
1074 	return (error);
1075 }
1076 
1077 int
1078 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1079 {
1080 	struct mfi_evt_log_state *log_state = NULL;
1081 	union mfi_evt class_locale;
1082 	int error = 0;
1083 	uint32_t seq;
1084 
1085 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1086 
1087 	class_locale.members.reserved = 0;
1088 	class_locale.members.locale = mfi_event_locale;
1089 	class_locale.members.evt_class  = mfi_event_class;
1090 
1091 	if (seq_start == 0) {
1092 		if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1093 			goto out;
1094 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1095 
1096 		/*
1097 		 * Walk through any events that fired since the last
1098 		 * shutdown.
1099 		 */
1100 		if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1101 		    log_state->newest_seq_num)) != 0)
1102 			goto out;
1103 		seq = log_state->newest_seq_num;
1104 	} else
1105 		seq = seq_start;
1106 	error = mfi_aen_register(sc, seq, class_locale.word);
1107 out:
1108 	free(log_state, M_MFIBUF);
1109 
1110 	return (error);
1111 }
1112 
1113 int
1114 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1115 {
1116 
1117 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1118 	cm->cm_complete = NULL;
1119 
1120 	/*
1121 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1122 	 * and return 0 to it as status
1123 	 */
1124 	if (cm->cm_frame->dcmd.opcode == 0) {
1125 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1126 		cm->cm_error = 0;
1127 		return (cm->cm_error);
1128 	}
1129 	mfi_enqueue_ready(cm);
1130 	mfi_startio(sc);
1131 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1132 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1133 	return (cm->cm_error);
1134 }
1135 
1136 void
1137 mfi_free(struct mfi_softc *sc)
1138 {
1139 	struct mfi_command *cm;
1140 	int i;
1141 
1142 	callout_drain(&sc->mfi_watchdog_callout);
1143 
1144 	if (sc->mfi_cdev != NULL)
1145 		destroy_dev(sc->mfi_cdev);
1146 
1147 	if (sc->mfi_commands != NULL) {
1148 		for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1149 			cm = &sc->mfi_commands[i];
1150 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1151 		}
1152 		free(sc->mfi_commands, M_MFIBUF);
1153 		sc->mfi_commands = NULL;
1154 	}
1155 
1156 	if (sc->mfi_intr)
1157 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1158 	if (sc->mfi_irq != NULL)
1159 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1160 		    sc->mfi_irq);
1161 
1162 	if (sc->mfi_sense_busaddr != 0)
1163 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1164 	if (sc->mfi_sense != NULL)
1165 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1166 		    sc->mfi_sense_dmamap);
1167 	if (sc->mfi_sense_dmat != NULL)
1168 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1169 
1170 	if (sc->mfi_frames_busaddr != 0)
1171 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1172 	if (sc->mfi_frames != NULL)
1173 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1174 		    sc->mfi_frames_dmamap);
1175 	if (sc->mfi_frames_dmat != NULL)
1176 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1177 
1178 	if (sc->mfi_comms_busaddr != 0)
1179 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1180 	if (sc->mfi_comms != NULL)
1181 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1182 		    sc->mfi_comms_dmamap);
1183 	if (sc->mfi_comms_dmat != NULL)
1184 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1185 
1186 	/* ThunderBolt contiguous memory free here */
1187 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1188 		if (sc->mfi_tb_busaddr != 0)
1189 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1190 		if (sc->request_message_pool != NULL)
1191 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1192 			    sc->mfi_tb_dmamap);
1193 		if (sc->mfi_tb_dmat != NULL)
1194 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1195 
1196 		/* Version buffer memory free */
1197 		/* Start LSIP200113393 */
1198 		if (sc->verbuf_h_busaddr != 0)
1199 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1200 		if (sc->verbuf != NULL)
1201 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1202 			    sc->verbuf_h_dmamap);
1203 		if (sc->verbuf_h_dmat != NULL)
1204 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1205 
1206 		/* End LSIP200113393 */
1207 		/* ThunderBolt INIT packet memory Free */
1208 		if (sc->mfi_tb_init_busaddr != 0)
1209 			bus_dmamap_unload(sc->mfi_tb_init_dmat,
1210 			    sc->mfi_tb_init_dmamap);
1211 		if (sc->mfi_tb_init != NULL)
1212 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1213 			    sc->mfi_tb_init_dmamap);
1214 		if (sc->mfi_tb_init_dmat != NULL)
1215 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1216 
1217 		/* ThunderBolt IOC Init Desc memory free here */
1218 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1219 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1220 			    sc->mfi_tb_ioc_init_dmamap);
1221 		if (sc->mfi_tb_ioc_init_desc != NULL)
1222 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1223 			    sc->mfi_tb_ioc_init_desc,
1224 			    sc->mfi_tb_ioc_init_dmamap);
1225 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1226 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1227 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1228 			for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1229 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1230 					free(sc->mfi_cmd_pool_tbolt[i],
1231 					    M_MFIBUF);
1232 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1233 				}
1234 			}
1235 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1236 			sc->mfi_cmd_pool_tbolt = NULL;
1237 		}
1238 		if (sc->request_desc_pool != NULL) {
1239 			free(sc->request_desc_pool, M_MFIBUF);
1240 			sc->request_desc_pool = NULL;
1241 		}
1242 	}
1243 	if (sc->mfi_buffer_dmat != NULL)
1244 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1245 	if (sc->mfi_parent_dmat != NULL)
1246 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1247 
1248 	if (mtx_initialized(&sc->mfi_io_lock)) {
1249 		mtx_destroy(&sc->mfi_io_lock);
1250 		sx_destroy(&sc->mfi_config_lock);
1251 	}
1252 
1253 	return;
1254 }
1255 
1256 static void
1257 mfi_startup(void *arg)
1258 {
1259 	struct mfi_softc *sc;
1260 
1261 	sc = (struct mfi_softc *)arg;
1262 
1263 	sc->mfi_enable_intr(sc);
1264 	sx_xlock(&sc->mfi_config_lock);
1265 	mtx_lock(&sc->mfi_io_lock);
1266 	mfi_ldprobe(sc);
1267 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1268 	    mfi_syspdprobe(sc);
1269 	mtx_unlock(&sc->mfi_io_lock);
1270 	sx_xunlock(&sc->mfi_config_lock);
1271 
1272 	config_intrhook_disestablish(&sc->mfi_ich);
1273 }
1274 
1275 static void
1276 mfi_intr(void *arg)
1277 {
1278 	struct mfi_softc *sc;
1279 	struct mfi_command *cm;
1280 	uint32_t pi, ci, context;
1281 
1282 	sc = (struct mfi_softc *)arg;
1283 
1284 	if (sc->mfi_check_clear_intr(sc))
1285 		return;
1286 
1287 restart:
1288 	pi = sc->mfi_comms->hw_pi;
1289 	ci = sc->mfi_comms->hw_ci;
1290 	mtx_lock(&sc->mfi_io_lock);
1291 	while (ci != pi) {
1292 		context = sc->mfi_comms->hw_reply_q[ci];
1293 		if (context < sc->mfi_max_fw_cmds) {
1294 			cm = &sc->mfi_commands[context];
1295 			mfi_remove_busy(cm);
1296 			cm->cm_error = 0;
1297 			mfi_complete(sc, cm);
1298 		}
1299 		if (++ci == (sc->mfi_max_fw_cmds + 1))
1300 			ci = 0;
1301 	}
1302 
1303 	sc->mfi_comms->hw_ci = ci;
1304 
1305 	/* Give defered I/O a chance to run */
1306 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1307 	mfi_startio(sc);
1308 	mtx_unlock(&sc->mfi_io_lock);
1309 
1310 	/*
1311 	 * Dummy read to flush the bus; this ensures that the indexes are up
1312 	 * to date.  Restart processing if more commands have come it.
1313 	 */
1314 	(void)sc->mfi_read_fw_status(sc);
1315 	if (pi != sc->mfi_comms->hw_pi)
1316 		goto restart;
1317 
1318 	return;
1319 }
1320 
1321 int
1322 mfi_shutdown(struct mfi_softc *sc)
1323 {
1324 	struct mfi_dcmd_frame *dcmd;
1325 	struct mfi_command *cm;
1326 	int error;
1327 
1328 	if (sc->mfi_aen_cm != NULL) {
1329 		sc->cm_aen_abort = 1;
1330 		mfi_abort(sc, &sc->mfi_aen_cm);
1331 	}
1332 
1333 	if (sc->mfi_map_sync_cm != NULL) {
1334 		sc->cm_map_abort = 1;
1335 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1336 	}
1337 
1338 	mtx_lock(&sc->mfi_io_lock);
1339 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1340 	if (error) {
1341 		mtx_unlock(&sc->mfi_io_lock);
1342 		return (error);
1343 	}
1344 
1345 	dcmd = &cm->cm_frame->dcmd;
1346 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1347 	cm->cm_flags = MFI_CMD_POLLED;
1348 	cm->cm_data = NULL;
1349 
1350 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1351 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1352 
1353 	mfi_release_command(cm);
1354 	mtx_unlock(&sc->mfi_io_lock);
1355 	return (error);
1356 }
1357 
1358 static void
1359 mfi_syspdprobe(struct mfi_softc *sc)
1360 {
1361 	struct mfi_frame_header *hdr;
1362 	struct mfi_command *cm = NULL;
1363 	struct mfi_pd_list *pdlist = NULL;
1364 	struct mfi_system_pd *syspd, *tmp;
1365 	struct mfi_system_pending *syspd_pend;
1366 	int error, i, found;
1367 
1368 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1369 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1370 	/* Add SYSTEM PD's */
1371 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1372 	    (void **)&pdlist, sizeof(*pdlist));
1373 	if (error) {
1374 		device_printf(sc->mfi_dev,
1375 		    "Error while forming SYSTEM PD list\n");
1376 		goto out;
1377 	}
1378 
1379 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1380 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1381 	cm->cm_frame->dcmd.mbox[1] = 0;
1382 	if (mfi_mapcmd(sc, cm) != 0) {
1383 		device_printf(sc->mfi_dev,
1384 		    "Failed to get syspd device listing\n");
1385 		goto out;
1386 	}
1387 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1388 	    BUS_DMASYNC_POSTREAD);
1389 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1390 	hdr = &cm->cm_frame->header;
1391 	if (hdr->cmd_status != MFI_STAT_OK) {
1392 		device_printf(sc->mfi_dev,
1393 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1394 		goto out;
1395 	}
1396 	/* Get each PD and add it to the system */
1397 	for (i = 0; i < pdlist->count; i++) {
1398 		if (pdlist->addr[i].device_id ==
1399 		    pdlist->addr[i].encl_device_id)
1400 			continue;
1401 		found = 0;
1402 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1403 			if (syspd->pd_id == pdlist->addr[i].device_id)
1404 				found = 1;
1405 		}
1406 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1407 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1408 				found = 1;
1409 		}
1410 		if (found == 0)
1411 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1412 	}
1413 	/* Delete SYSPD's whose state has been changed */
1414 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1415 		found = 0;
1416 		for (i = 0; i < pdlist->count; i++) {
1417 			if (syspd->pd_id == pdlist->addr[i].device_id) {
1418 				found = 1;
1419 				break;
1420 			}
1421 		}
1422 		if (found == 0) {
1423 			printf("DELETE\n");
1424 			mtx_unlock(&sc->mfi_io_lock);
1425 			bus_topo_lock();
1426 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1427 			bus_topo_unlock();
1428 			mtx_lock(&sc->mfi_io_lock);
1429 		}
1430 	}
1431 out:
1432 	if (pdlist)
1433 	    free(pdlist, M_MFIBUF);
1434 	if (cm)
1435 	    mfi_release_command(cm);
1436 
1437 	return;
1438 }
1439 
1440 static void
1441 mfi_ldprobe(struct mfi_softc *sc)
1442 {
1443 	struct mfi_frame_header *hdr;
1444 	struct mfi_command *cm = NULL;
1445 	struct mfi_ld_list *list = NULL;
1446 	struct mfi_disk *ld;
1447 	struct mfi_disk_pending *ld_pend;
1448 	int error, i;
1449 
1450 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1451 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1452 
1453 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1454 	    (void **)&list, sizeof(*list));
1455 	if (error)
1456 		goto out;
1457 
1458 	cm->cm_flags = MFI_CMD_DATAIN;
1459 	if (mfi_wait_command(sc, cm) != 0) {
1460 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1461 		goto out;
1462 	}
1463 
1464 	hdr = &cm->cm_frame->header;
1465 	if (hdr->cmd_status != MFI_STAT_OK) {
1466 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1467 		    hdr->cmd_status);
1468 		goto out;
1469 	}
1470 
1471 	for (i = 0; i < list->ld_count; i++) {
1472 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1473 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1474 				goto skip_add;
1475 		}
1476 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1477 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1478 				goto skip_add;
1479 		}
1480 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1481 	skip_add:;
1482 	}
1483 out:
1484 	if (list)
1485 		free(list, M_MFIBUF);
1486 	if (cm)
1487 		mfi_release_command(cm);
1488 
1489 	return;
1490 }
1491 
1492 /*
1493  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1494  * the bits in 24-31 are all set, then it is the number of seconds since
1495  * boot.
1496  */
1497 static const char *
1498 format_timestamp(uint32_t timestamp)
1499 {
1500 	static char buffer[32];
1501 
1502 	if ((timestamp & 0xff000000) == 0xff000000)
1503 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1504 		    0x00ffffff);
1505 	else
1506 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1507 	return (buffer);
1508 }
1509 
1510 static const char *
1511 format_class(int8_t class)
1512 {
1513 	static char buffer[6];
1514 
1515 	switch (class) {
1516 	case MFI_EVT_CLASS_DEBUG:
1517 		return ("debug");
1518 	case MFI_EVT_CLASS_PROGRESS:
1519 		return ("progress");
1520 	case MFI_EVT_CLASS_INFO:
1521 		return ("info");
1522 	case MFI_EVT_CLASS_WARNING:
1523 		return ("WARN");
1524 	case MFI_EVT_CLASS_CRITICAL:
1525 		return ("CRIT");
1526 	case MFI_EVT_CLASS_FATAL:
1527 		return ("FATAL");
1528 	case MFI_EVT_CLASS_DEAD:
1529 		return ("DEAD");
1530 	default:
1531 		snprintf(buffer, sizeof(buffer), "%d", class);
1532 		return (buffer);
1533 	}
1534 }
1535 
1536 static void
1537 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1538 {
1539 	struct mfi_system_pd *syspd = NULL;
1540 
1541 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1542 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1543 	    format_class(detail->evt_class.members.evt_class),
1544 	    detail->description);
1545 
1546         /* Don't act on old AEN's or while shutting down */
1547         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1548                 return;
1549 
1550 	switch (detail->arg_type) {
1551 	case MR_EVT_ARGS_NONE:
1552 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1553 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1554 			if (mfi_detect_jbod_change) {
1555 				/*
1556 				 * Probe for new SYSPD's and Delete
1557 				 * invalid SYSPD's
1558 				 */
1559 				sx_xlock(&sc->mfi_config_lock);
1560 				mtx_lock(&sc->mfi_io_lock);
1561 				mfi_syspdprobe(sc);
1562 				mtx_unlock(&sc->mfi_io_lock);
1563 				sx_xunlock(&sc->mfi_config_lock);
1564 			}
1565 		}
1566 		break;
1567 	case MR_EVT_ARGS_LD_STATE:
1568 		/* During load time driver reads all the events starting
1569 		 * from the one that has been logged after shutdown. Avoid
1570 		 * these old events.
1571 		 */
1572 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1573 			/* Remove the LD */
1574 			struct mfi_disk *ld;
1575 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1576 				if (ld->ld_id ==
1577 				    detail->args.ld_state.ld.target_id)
1578 					break;
1579 			}
1580 			/*
1581 			Fix: for kernel panics when SSCD is removed
1582 			KASSERT(ld != NULL, ("volume dissappeared"));
1583 			*/
1584 			if (ld != NULL) {
1585 				bus_topo_lock();
1586 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1587 				bus_topo_unlock();
1588 			}
1589 		}
1590 		break;
1591 	case MR_EVT_ARGS_PD:
1592 		if (detail->code == MR_EVT_PD_REMOVED) {
1593 			if (mfi_detect_jbod_change) {
1594 				/*
1595 				 * If the removed device is a SYSPD then
1596 				 * delete it
1597 				 */
1598 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1599 				    pd_link) {
1600 					if (syspd->pd_id ==
1601 					    detail->args.pd.device_id) {
1602 						bus_topo_lock();
1603 						device_delete_child(
1604 						    sc->mfi_dev,
1605 						    syspd->pd_dev);
1606 						bus_topo_unlock();
1607 						break;
1608 					}
1609 				}
1610 			}
1611 		}
1612 		if (detail->code == MR_EVT_PD_INSERTED) {
1613 			if (mfi_detect_jbod_change) {
1614 				/* Probe for new SYSPD's */
1615 				sx_xlock(&sc->mfi_config_lock);
1616 				mtx_lock(&sc->mfi_io_lock);
1617 				mfi_syspdprobe(sc);
1618 				mtx_unlock(&sc->mfi_io_lock);
1619 				sx_xunlock(&sc->mfi_config_lock);
1620 			}
1621 		}
1622 		if (sc->mfi_cam_rescan_cb != NULL &&
1623 		    (detail->code == MR_EVT_PD_INSERTED ||
1624 		    detail->code == MR_EVT_PD_REMOVED)) {
1625 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1626 		}
1627 		break;
1628 	}
1629 }
1630 
1631 static void
1632 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1633 {
1634 	struct mfi_evt_queue_elm *elm;
1635 
1636 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1637 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1638 	if (elm == NULL)
1639 		return;
1640 	memcpy(&elm->detail, detail, sizeof(*detail));
1641 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1642 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1643 }
1644 
1645 static void
1646 mfi_handle_evt(void *context, int pending)
1647 {
1648 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1649 	struct mfi_softc *sc;
1650 	struct mfi_evt_queue_elm *elm;
1651 
1652 	sc = context;
1653 	TAILQ_INIT(&queue);
1654 	mtx_lock(&sc->mfi_io_lock);
1655 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1656 	mtx_unlock(&sc->mfi_io_lock);
1657 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1658 		TAILQ_REMOVE(&queue, elm, link);
1659 		mfi_decode_evt(sc, &elm->detail);
1660 		free(elm, M_MFIBUF);
1661 	}
1662 }
1663 
1664 static int
1665 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1666 {
1667 	struct mfi_command *cm;
1668 	struct mfi_dcmd_frame *dcmd;
1669 	union mfi_evt current_aen, prior_aen;
1670 	struct mfi_evt_detail *ed = NULL;
1671 	int error = 0;
1672 
1673 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1674 
1675 	current_aen.word = locale;
1676 	if (sc->mfi_aen_cm != NULL) {
1677 		prior_aen.word =
1678 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1679 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1680 		    !((prior_aen.members.locale & current_aen.members.locale)
1681 		    ^current_aen.members.locale)) {
1682 			return (0);
1683 		} else {
1684 			prior_aen.members.locale |= current_aen.members.locale;
1685 			if (prior_aen.members.evt_class
1686 			    < current_aen.members.evt_class)
1687 				current_aen.members.evt_class =
1688 				    prior_aen.members.evt_class;
1689 			mfi_abort(sc, &sc->mfi_aen_cm);
1690 		}
1691 	}
1692 
1693 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1694 	    (void **)&ed, sizeof(*ed));
1695 	if (error)
1696 		goto out;
1697 
1698 	dcmd = &cm->cm_frame->dcmd;
1699 	((uint32_t *)&dcmd->mbox)[0] = seq;
1700 	((uint32_t *)&dcmd->mbox)[1] = locale;
1701 	cm->cm_flags = MFI_CMD_DATAIN;
1702 	cm->cm_complete = mfi_aen_complete;
1703 
1704 	sc->last_seq_num = seq;
1705 	sc->mfi_aen_cm = cm;
1706 
1707 	mfi_enqueue_ready(cm);
1708 	mfi_startio(sc);
1709 
1710 out:
1711 	return (error);
1712 }
1713 
1714 static void
1715 mfi_aen_complete(struct mfi_command *cm)
1716 {
1717 	struct mfi_frame_header *hdr;
1718 	struct mfi_softc *sc;
1719 	struct mfi_evt_detail *detail;
1720 	struct mfi_aen *mfi_aen_entry, *tmp;
1721 	int seq = 0, aborted = 0;
1722 
1723 	sc = cm->cm_sc;
1724 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1725 
1726 	if (sc->mfi_aen_cm == NULL)
1727 		return;
1728 
1729 	hdr = &cm->cm_frame->header;
1730 
1731 	if (sc->cm_aen_abort ||
1732 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1733 		sc->cm_aen_abort = 0;
1734 		aborted = 1;
1735 	} else {
1736 		sc->mfi_aen_triggered = 1;
1737 		if (sc->mfi_poll_waiting) {
1738 			sc->mfi_poll_waiting = 0;
1739 			selwakeup(&sc->mfi_select);
1740 		}
1741 		detail = cm->cm_data;
1742 		mfi_queue_evt(sc, detail);
1743 		seq = detail->seq + 1;
1744 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1745 		    tmp) {
1746 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1747 			    aen_link);
1748 			PROC_LOCK(mfi_aen_entry->p);
1749 			kern_psignal(mfi_aen_entry->p, SIGIO);
1750 			PROC_UNLOCK(mfi_aen_entry->p);
1751 			free(mfi_aen_entry, M_MFIBUF);
1752 		}
1753 	}
1754 
1755 	free(cm->cm_data, M_MFIBUF);
1756 	wakeup(&sc->mfi_aen_cm);
1757 	sc->mfi_aen_cm = NULL;
1758 	mfi_release_command(cm);
1759 
1760 	/* set it up again so the driver can catch more events */
1761 	if (!aborted)
1762 		mfi_aen_setup(sc, seq);
1763 }
1764 
1765 #define MAX_EVENTS 15
1766 
1767 static int
1768 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1769 {
1770 	struct mfi_command *cm;
1771 	struct mfi_dcmd_frame *dcmd;
1772 	struct mfi_evt_list *el;
1773 	union mfi_evt class_locale;
1774 	int error, i, seq, size;
1775 
1776 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1777 
1778 	class_locale.members.reserved = 0;
1779 	class_locale.members.locale = mfi_event_locale;
1780 	class_locale.members.evt_class  = mfi_event_class;
1781 
1782 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1783 		* (MAX_EVENTS - 1);
1784 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1785 	if (el == NULL)
1786 		return (ENOMEM);
1787 
1788 	for (seq = start_seq;;) {
1789 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1790 			free(el, M_MFIBUF);
1791 			return (EBUSY);
1792 		}
1793 
1794 		dcmd = &cm->cm_frame->dcmd;
1795 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1796 		dcmd->header.cmd = MFI_CMD_DCMD;
1797 		dcmd->header.timeout = 0;
1798 		dcmd->header.data_len = size;
1799 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1800 		((uint32_t *)&dcmd->mbox)[0] = seq;
1801 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1802 		cm->cm_sg = &dcmd->sgl;
1803 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1804 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1805 		cm->cm_data = el;
1806 		cm->cm_len = size;
1807 
1808 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1809 			device_printf(sc->mfi_dev,
1810 			    "Failed to get controller entries\n");
1811 			mfi_release_command(cm);
1812 			break;
1813 		}
1814 
1815 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1816 		    BUS_DMASYNC_POSTREAD);
1817 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1818 
1819 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1820 			mfi_release_command(cm);
1821 			break;
1822 		}
1823 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1824 			device_printf(sc->mfi_dev,
1825 			    "Error %d fetching controller entries\n",
1826 			    dcmd->header.cmd_status);
1827 			mfi_release_command(cm);
1828 			error = EIO;
1829 			break;
1830 		}
1831 		mfi_release_command(cm);
1832 
1833 		for (i = 0; i < el->count; i++) {
1834 			/*
1835 			 * If this event is newer than 'stop_seq' then
1836 			 * break out of the loop.  Note that the log
1837 			 * is a circular buffer so we have to handle
1838 			 * the case that our stop point is earlier in
1839 			 * the buffer than our start point.
1840 			 */
1841 			if (el->event[i].seq >= stop_seq) {
1842 				if (start_seq <= stop_seq)
1843 					break;
1844 				else if (el->event[i].seq < start_seq)
1845 					break;
1846 			}
1847 			mfi_queue_evt(sc, &el->event[i]);
1848 		}
1849 		seq = el->event[el->count - 1].seq + 1;
1850 	}
1851 
1852 	free(el, M_MFIBUF);
1853 	return (error);
1854 }
1855 
1856 static int
1857 mfi_add_ld(struct mfi_softc *sc, int id)
1858 {
1859 	struct mfi_command *cm;
1860 	struct mfi_dcmd_frame *dcmd = NULL;
1861 	struct mfi_ld_info *ld_info = NULL;
1862 	struct mfi_disk_pending *ld_pend;
1863 	int error;
1864 
1865 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1866 
1867 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1868 	if (ld_pend != NULL) {
1869 		ld_pend->ld_id = id;
1870 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1871 	}
1872 
1873 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1874 	    (void **)&ld_info, sizeof(*ld_info));
1875 	if (error) {
1876 		device_printf(sc->mfi_dev,
1877 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1878 		if (ld_info)
1879 			free(ld_info, M_MFIBUF);
1880 		return (error);
1881 	}
1882 	cm->cm_flags = MFI_CMD_DATAIN;
1883 	dcmd = &cm->cm_frame->dcmd;
1884 	dcmd->mbox[0] = id;
1885 	if (mfi_wait_command(sc, cm) != 0) {
1886 		device_printf(sc->mfi_dev,
1887 		    "Failed to get logical drive: %d\n", id);
1888 		free(ld_info, M_MFIBUF);
1889 		return (0);
1890 	}
1891 	if (ld_info->ld_config.params.isSSCD != 1)
1892 		mfi_add_ld_complete(cm);
1893 	else {
1894 		mfi_release_command(cm);
1895 		if (ld_info)		/* SSCD drives ld_info free here */
1896 			free(ld_info, M_MFIBUF);
1897 	}
1898 	return (0);
1899 }
1900 
1901 static void
1902 mfi_add_ld_complete(struct mfi_command *cm)
1903 {
1904 	struct mfi_frame_header *hdr;
1905 	struct mfi_ld_info *ld_info;
1906 	struct mfi_softc *sc;
1907 	device_t child;
1908 
1909 	sc = cm->cm_sc;
1910 	hdr = &cm->cm_frame->header;
1911 	ld_info = cm->cm_private;
1912 
1913 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1914 		free(ld_info, M_MFIBUF);
1915 		wakeup(&sc->mfi_map_sync_cm);
1916 		mfi_release_command(cm);
1917 		return;
1918 	}
1919 	wakeup(&sc->mfi_map_sync_cm);
1920 	mfi_release_command(cm);
1921 
1922 	mtx_unlock(&sc->mfi_io_lock);
1923 	bus_topo_lock();
1924 	if ((child = device_add_child(sc->mfi_dev, "mfid",
1925 	    DEVICE_UNIT_ANY)) == NULL) {
1926 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1927 		free(ld_info, M_MFIBUF);
1928 		bus_topo_unlock();
1929 		mtx_lock(&sc->mfi_io_lock);
1930 		return;
1931 	}
1932 
1933 	device_set_ivars(child, ld_info);
1934 	device_set_desc(child, "MFI Logical Disk");
1935 	bus_attach_children(sc->mfi_dev);
1936 	bus_topo_unlock();
1937 	mtx_lock(&sc->mfi_io_lock);
1938 }
1939 
1940 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1941 {
1942 	struct mfi_command *cm;
1943 	struct mfi_dcmd_frame *dcmd = NULL;
1944 	struct mfi_pd_info *pd_info = NULL;
1945 	struct mfi_system_pending *syspd_pend;
1946 	int error;
1947 
1948 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1949 
1950 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1951 	if (syspd_pend != NULL) {
1952 		syspd_pend->pd_id = id;
1953 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1954 	}
1955 
1956 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1957 		(void **)&pd_info, sizeof(*pd_info));
1958 	if (error) {
1959 		device_printf(sc->mfi_dev,
1960 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1961 		    error);
1962 		if (pd_info)
1963 			free(pd_info, M_MFIBUF);
1964 		return (error);
1965 	}
1966 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1967 	dcmd = &cm->cm_frame->dcmd;
1968 	dcmd->mbox[0]=id;
1969 	dcmd->header.scsi_status = 0;
1970 	dcmd->header.pad0 = 0;
1971 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1972 		device_printf(sc->mfi_dev,
1973 		    "Failed to get physical drive info %d\n", id);
1974 		free(pd_info, M_MFIBUF);
1975 		mfi_release_command(cm);
1976 		return (error);
1977 	}
1978 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1979 	    BUS_DMASYNC_POSTREAD);
1980 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1981 	mfi_add_sys_pd_complete(cm);
1982 	return (0);
1983 }
1984 
1985 static void
1986 mfi_add_sys_pd_complete(struct mfi_command *cm)
1987 {
1988 	struct mfi_frame_header *hdr;
1989 	struct mfi_pd_info *pd_info;
1990 	struct mfi_softc *sc;
1991 	device_t child;
1992 
1993 	sc = cm->cm_sc;
1994 	hdr = &cm->cm_frame->header;
1995 	pd_info = cm->cm_private;
1996 
1997 	if (hdr->cmd_status != MFI_STAT_OK) {
1998 		free(pd_info, M_MFIBUF);
1999 		mfi_release_command(cm);
2000 		return;
2001 	}
2002 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2003 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2004 		    pd_info->ref.v.device_id);
2005 		free(pd_info, M_MFIBUF);
2006 		mfi_release_command(cm);
2007 		return;
2008 	}
2009 	mfi_release_command(cm);
2010 
2011 	mtx_unlock(&sc->mfi_io_lock);
2012 	bus_topo_lock();
2013 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd",
2014 	    DEVICE_UNIT_ANY)) == NULL) {
2015 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
2016 		free(pd_info, M_MFIBUF);
2017 		bus_topo_unlock();
2018 		mtx_lock(&sc->mfi_io_lock);
2019 		return;
2020 	}
2021 
2022 	device_set_ivars(child, pd_info);
2023 	device_set_desc(child, "MFI System PD");
2024 	bus_attach_children(sc->mfi_dev);
2025 	bus_topo_unlock();
2026 	mtx_lock(&sc->mfi_io_lock);
2027 }
2028 
2029 static struct mfi_command *
2030 mfi_bio_command(struct mfi_softc *sc)
2031 {
2032 	struct bio *bio;
2033 	struct mfi_command *cm = NULL;
2034 
2035 	/*reserving two commands to avoid starvation for IOCTL*/
2036 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2037 		return (NULL);
2038 	}
2039 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2040 		return (NULL);
2041 	}
2042 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2043 		cm = mfi_build_ldio(sc, bio);
2044 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2045 		cm = mfi_build_syspdio(sc, bio);
2046 	}
2047 	if (!cm)
2048 	    mfi_enqueue_bio(sc, bio);
2049 	return cm;
2050 }
2051 
2052 /*
2053  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2054  */
2055 
2056 int
2057 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2058 {
2059 	int cdb_len;
2060 
2061 	if (((lba & 0x1fffff) == lba)
2062          && ((block_count & 0xff) == block_count)
2063          && (byte2 == 0)) {
2064 		/* We can fit in a 6 byte cdb */
2065 		struct scsi_rw_6 *scsi_cmd;
2066 
2067 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2068 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2069 		scsi_ulto3b(lba, scsi_cmd->addr);
2070 		scsi_cmd->length = block_count & 0xff;
2071 		scsi_cmd->control = 0;
2072 		cdb_len = sizeof(*scsi_cmd);
2073 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2074 		/* Need a 10 byte CDB */
2075 		struct scsi_rw_10 *scsi_cmd;
2076 
2077 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2078 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2079 		scsi_cmd->byte2 = byte2;
2080 		scsi_ulto4b(lba, scsi_cmd->addr);
2081 		scsi_cmd->reserved = 0;
2082 		scsi_ulto2b(block_count, scsi_cmd->length);
2083 		scsi_cmd->control = 0;
2084 		cdb_len = sizeof(*scsi_cmd);
2085 	} else if (((block_count & 0xffffffff) == block_count) &&
2086 	    ((lba & 0xffffffff) == lba)) {
2087 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2088 		struct scsi_rw_12 *scsi_cmd;
2089 
2090 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2091 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2092 		scsi_cmd->byte2 = byte2;
2093 		scsi_ulto4b(lba, scsi_cmd->addr);
2094 		scsi_cmd->reserved = 0;
2095 		scsi_ulto4b(block_count, scsi_cmd->length);
2096 		scsi_cmd->control = 0;
2097 		cdb_len = sizeof(*scsi_cmd);
2098 	} else {
2099 		/*
2100 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2101 		 * than 2^32
2102 		 */
2103 		struct scsi_rw_16 *scsi_cmd;
2104 
2105 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2106 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2107 		scsi_cmd->byte2 = byte2;
2108 		scsi_u64to8b(lba, scsi_cmd->addr);
2109 		scsi_cmd->reserved = 0;
2110 		scsi_ulto4b(block_count, scsi_cmd->length);
2111 		scsi_cmd->control = 0;
2112 		cdb_len = sizeof(*scsi_cmd);
2113 	}
2114 
2115 	return cdb_len;
2116 }
2117 
2118 extern char *unmapped_buf;
2119 
2120 static struct mfi_command *
2121 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2122 {
2123 	struct mfi_command *cm;
2124 	struct mfi_pass_frame *pass;
2125 	uint32_t context = 0;
2126 	int flags = 0, blkcount = 0, readop;
2127 	uint8_t cdb_len;
2128 
2129 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2130 
2131 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2132 	    return (NULL);
2133 
2134 	/* Zero out the MFI frame */
2135 	context = cm->cm_frame->header.context;
2136 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2137 	cm->cm_frame->header.context = context;
2138 	pass = &cm->cm_frame->pass;
2139 	bzero(pass->cdb, 16);
2140 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2141 	switch (bio->bio_cmd) {
2142 	case BIO_READ:
2143 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2144 		readop = 1;
2145 		break;
2146 	case BIO_WRITE:
2147 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2148 		readop = 0;
2149 		break;
2150 	default:
2151 		/* TODO: what about BIO_DELETE??? */
2152 		biofinish(bio, NULL, EOPNOTSUPP);
2153 		mfi_enqueue_free(cm);
2154 		return (NULL);
2155 	}
2156 
2157 	/* Cheat with the sector length to avoid a non-constant division */
2158 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2159 	/* Fill the LBA and Transfer length in CDB */
2160 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2161 	    pass->cdb);
2162 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2163 	pass->header.lun_id = 0;
2164 	pass->header.timeout = 0;
2165 	pass->header.flags = 0;
2166 	pass->header.scsi_status = 0;
2167 	pass->header.sense_len = MFI_SENSE_LEN;
2168 	pass->header.data_len = bio->bio_bcount;
2169 	pass->header.cdb_len = cdb_len;
2170 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2171 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2172 	cm->cm_complete = mfi_bio_complete;
2173 	cm->cm_private = bio;
2174 	cm->cm_data = unmapped_buf;
2175 	cm->cm_len = bio->bio_bcount;
2176 	cm->cm_sg = &pass->sgl;
2177 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2178 	cm->cm_flags = flags;
2179 
2180 	return (cm);
2181 }
2182 
2183 static struct mfi_command *
2184 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2185 {
2186 	struct mfi_io_frame *io;
2187 	struct mfi_command *cm;
2188 	int flags;
2189 	uint32_t blkcount;
2190 	uint32_t context = 0;
2191 
2192 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2193 
2194 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2195 	    return (NULL);
2196 
2197 	/* Zero out the MFI frame */
2198 	context = cm->cm_frame->header.context;
2199 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2200 	cm->cm_frame->header.context = context;
2201 	io = &cm->cm_frame->io;
2202 	switch (bio->bio_cmd) {
2203 	case BIO_READ:
2204 		io->header.cmd = MFI_CMD_LD_READ;
2205 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2206 		break;
2207 	case BIO_WRITE:
2208 		io->header.cmd = MFI_CMD_LD_WRITE;
2209 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2210 		break;
2211 	default:
2212 		/* TODO: what about BIO_DELETE??? */
2213 		biofinish(bio, NULL, EOPNOTSUPP);
2214 		mfi_enqueue_free(cm);
2215 		return (NULL);
2216 	}
2217 
2218 	/* Cheat with the sector length to avoid a non-constant division */
2219 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2220 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2221 	io->header.timeout = 0;
2222 	io->header.flags = 0;
2223 	io->header.scsi_status = 0;
2224 	io->header.sense_len = MFI_SENSE_LEN;
2225 	io->header.data_len = blkcount;
2226 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2227 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2228 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2229 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2230 	cm->cm_complete = mfi_bio_complete;
2231 	cm->cm_private = bio;
2232 	cm->cm_data = unmapped_buf;
2233 	cm->cm_len = bio->bio_bcount;
2234 	cm->cm_sg = &io->sgl;
2235 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2236 	cm->cm_flags = flags;
2237 
2238 	return (cm);
2239 }
2240 
2241 static void
2242 mfi_bio_complete(struct mfi_command *cm)
2243 {
2244 	struct bio *bio;
2245 	struct mfi_frame_header *hdr;
2246 	struct mfi_softc *sc;
2247 
2248 	bio = cm->cm_private;
2249 	hdr = &cm->cm_frame->header;
2250 	sc = cm->cm_sc;
2251 
2252 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2253 		bio->bio_flags |= BIO_ERROR;
2254 		bio->bio_error = EIO;
2255 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2256 		    "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2257 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2258 	} else if (cm->cm_error != 0) {
2259 		bio->bio_flags |= BIO_ERROR;
2260 		bio->bio_error = cm->cm_error;
2261 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2262 		    cm, cm->cm_error);
2263 	}
2264 
2265 	mfi_release_command(cm);
2266 	mfi_disk_complete(bio);
2267 }
2268 
2269 void
2270 mfi_startio(struct mfi_softc *sc)
2271 {
2272 	struct mfi_command *cm;
2273 	struct ccb_hdr *ccbh;
2274 
2275 	for (;;) {
2276 		/* Don't bother if we're short on resources */
2277 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2278 			break;
2279 
2280 		/* Try a command that has already been prepared */
2281 		cm = mfi_dequeue_ready(sc);
2282 
2283 		if (cm == NULL) {
2284 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2285 				cm = sc->mfi_cam_start(ccbh);
2286 		}
2287 
2288 		/* Nope, so look for work on the bioq */
2289 		if (cm == NULL)
2290 			cm = mfi_bio_command(sc);
2291 
2292 		/* No work available, so exit */
2293 		if (cm == NULL)
2294 			break;
2295 
2296 		/* Send the command to the controller */
2297 		if (mfi_mapcmd(sc, cm) != 0) {
2298 			device_printf(sc->mfi_dev, "Failed to startio\n");
2299 			mfi_requeue_ready(cm);
2300 			break;
2301 		}
2302 	}
2303 }
2304 
2305 int
2306 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2307 {
2308 	int error, polled;
2309 
2310 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2311 
2312 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2313 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2314 		if (cm->cm_flags & MFI_CMD_CCB)
2315 			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2316 			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2317 			    polled);
2318 		else if (cm->cm_flags & MFI_CMD_BIO)
2319 			error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2320 			    cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2321 			    polled);
2322 		else
2323 			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2324 			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2325 			    mfi_data_cb, cm, polled);
2326 		if (error == EINPROGRESS) {
2327 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2328 			return (0);
2329 		}
2330 	} else {
2331 		error = mfi_send_frame(sc, cm);
2332 	}
2333 
2334 	return (error);
2335 }
2336 
2337 static void
2338 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2339 {
2340 	struct mfi_frame_header *hdr;
2341 	struct mfi_command *cm;
2342 	union mfi_sgl *sgl;
2343 	struct mfi_softc *sc;
2344 	int i, j, first, dir;
2345 	int sge_size, locked;
2346 
2347 	cm = (struct mfi_command *)arg;
2348 	sc = cm->cm_sc;
2349 	hdr = &cm->cm_frame->header;
2350 	sgl = cm->cm_sg;
2351 
2352 	/*
2353 	 * We need to check if we have the lock as this is async
2354 	 * callback so even though our caller mfi_mapcmd asserts
2355 	 * it has the lock, there is no guarantee that hasn't been
2356 	 * dropped if bus_dmamap_load returned prior to our
2357 	 * completion.
2358 	 */
2359 	if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2360 		mtx_lock(&sc->mfi_io_lock);
2361 
2362 	if (error) {
2363 		printf("error %d in callback\n", error);
2364 		cm->cm_error = error;
2365 		mfi_complete(sc, cm);
2366 		goto out;
2367 	}
2368 	/* Use IEEE sgl only for IO's on a SKINNY controller
2369 	 * For other commands on a SKINNY controller use either
2370 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2371 	 * Also calculate the total frame size based on the type
2372 	 * of SGL used.
2373 	 */
2374 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2375 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2376 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2377 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2378 		for (i = 0; i < nsegs; i++) {
2379 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2380 			sgl->sg_skinny[i].len = segs[i].ds_len;
2381 			sgl->sg_skinny[i].flag = 0;
2382 		}
2383 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2384 		sge_size = sizeof(struct mfi_sg_skinny);
2385 		hdr->sg_count = nsegs;
2386 	} else {
2387 		j = 0;
2388 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2389 			first = cm->cm_stp_len;
2390 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2391 				sgl->sg32[j].addr = segs[0].ds_addr;
2392 				sgl->sg32[j++].len = first;
2393 			} else {
2394 				sgl->sg64[j].addr = segs[0].ds_addr;
2395 				sgl->sg64[j++].len = first;
2396 			}
2397 		} else
2398 			first = 0;
2399 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2400 			for (i = 0; i < nsegs; i++) {
2401 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2402 				sgl->sg32[j++].len = segs[i].ds_len - first;
2403 				first = 0;
2404 			}
2405 		} else {
2406 			for (i = 0; i < nsegs; i++) {
2407 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2408 				sgl->sg64[j++].len = segs[i].ds_len - first;
2409 				first = 0;
2410 			}
2411 			hdr->flags |= MFI_FRAME_SGL64;
2412 		}
2413 		hdr->sg_count = j;
2414 		sge_size = sc->mfi_sge_size;
2415 	}
2416 
2417 	dir = 0;
2418 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2419 		dir |= BUS_DMASYNC_PREREAD;
2420 		hdr->flags |= MFI_FRAME_DIR_READ;
2421 	}
2422 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2423 		dir |= BUS_DMASYNC_PREWRITE;
2424 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2425 	}
2426 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2427 	cm->cm_flags |= MFI_CMD_MAPPED;
2428 
2429 	/*
2430 	 * Instead of calculating the total number of frames in the
2431 	 * compound frame, it's already assumed that there will be at
2432 	 * least 1 frame, so don't compensate for the modulo of the
2433 	 * following division.
2434 	 */
2435 	cm->cm_total_frame_size += (sge_size * nsegs);
2436 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2437 
2438 	if ((error = mfi_send_frame(sc, cm)) != 0) {
2439 		printf("error %d in callback from mfi_send_frame\n", error);
2440 		cm->cm_error = error;
2441 		mfi_complete(sc, cm);
2442 		goto out;
2443 	}
2444 
2445 out:
2446 	/* leave the lock in the state we found it */
2447 	if (locked == 0)
2448 		mtx_unlock(&sc->mfi_io_lock);
2449 
2450 	return;
2451 }
2452 
2453 static int
2454 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2455 {
2456 	int error;
2457 
2458 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2459 
2460 	if (sc->MFA_enabled)
2461 		error = mfi_tbolt_send_frame(sc, cm);
2462 	else
2463 		error = mfi_std_send_frame(sc, cm);
2464 
2465 	if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2466 		mfi_remove_busy(cm);
2467 
2468 	return (error);
2469 }
2470 
2471 static int
2472 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2473 {
2474 	struct mfi_frame_header *hdr;
2475 	int tm = mfi_polled_cmd_timeout * 1000;
2476 
2477 	hdr = &cm->cm_frame->header;
2478 
2479 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2480 		cm->cm_timestamp = time_uptime;
2481 		mfi_enqueue_busy(cm);
2482 	} else {
2483 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2484 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2485 	}
2486 
2487 	/*
2488 	 * The bus address of the command is aligned on a 64 byte boundary,
2489 	 * leaving the least 6 bits as zero.  For whatever reason, the
2490 	 * hardware wants the address shifted right by three, leaving just
2491 	 * 3 zero bits.  These three bits are then used as a prefetching
2492 	 * hint for the hardware to predict how many frames need to be
2493 	 * fetched across the bus.  If a command has more than 8 frames
2494 	 * then the 3 bits are set to 0x7 and the firmware uses other
2495 	 * information in the command to determine the total amount to fetch.
2496 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2497 	 * is enough for both 32bit and 64bit systems.
2498 	 */
2499 	if (cm->cm_extra_frames > 7)
2500 		cm->cm_extra_frames = 7;
2501 
2502 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2503 
2504 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2505 		return (0);
2506 
2507 	/* This is a polled command, so busy-wait for it to complete. */
2508 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2509 		DELAY(1000);
2510 		tm -= 1;
2511 		if (tm <= 0)
2512 			break;
2513 	}
2514 
2515 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2516 		device_printf(sc->mfi_dev, "Frame %p timed out "
2517 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2518 		return (ETIMEDOUT);
2519 	}
2520 
2521 	return (0);
2522 }
2523 
2524 void
2525 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2526 {
2527 	int dir;
2528 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2529 
2530 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2531 		dir = 0;
2532 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2533 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2534 			dir |= BUS_DMASYNC_POSTREAD;
2535 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2536 			dir |= BUS_DMASYNC_POSTWRITE;
2537 
2538 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2539 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2540 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2541 	}
2542 
2543 	cm->cm_flags |= MFI_CMD_COMPLETED;
2544 
2545 	if (cm->cm_complete != NULL)
2546 		cm->cm_complete(cm);
2547 	else
2548 		wakeup(cm);
2549 }
2550 
2551 static int
2552 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2553 {
2554 	struct mfi_command *cm;
2555 	struct mfi_abort_frame *abort;
2556 	int i = 0, error;
2557 	uint32_t context = 0;
2558 
2559 	mtx_lock(&sc->mfi_io_lock);
2560 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2561 		mtx_unlock(&sc->mfi_io_lock);
2562 		return (EBUSY);
2563 	}
2564 
2565 	/* Zero out the MFI frame */
2566 	context = cm->cm_frame->header.context;
2567 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2568 	cm->cm_frame->header.context = context;
2569 
2570 	abort = &cm->cm_frame->abort;
2571 	abort->header.cmd = MFI_CMD_ABORT;
2572 	abort->header.flags = 0;
2573 	abort->header.scsi_status = 0;
2574 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2575 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2576 	abort->abort_mfi_addr_hi =
2577 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2578 	cm->cm_data = NULL;
2579 	cm->cm_flags = MFI_CMD_POLLED;
2580 
2581 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2582 		device_printf(sc->mfi_dev, "failed to abort command\n");
2583 	mfi_release_command(cm);
2584 
2585 	mtx_unlock(&sc->mfi_io_lock);
2586 	while (i < 5 && *cm_abort != NULL) {
2587 		tsleep(cm_abort, 0, "mfiabort",
2588 		    5 * hz);
2589 		i++;
2590 	}
2591 	if (*cm_abort != NULL) {
2592 		/* Force a complete if command didn't abort */
2593 		mtx_lock(&sc->mfi_io_lock);
2594 		(*cm_abort)->cm_complete(*cm_abort);
2595 		mtx_unlock(&sc->mfi_io_lock);
2596 	}
2597 
2598 	return (error);
2599 }
2600 
2601 int
2602 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2603      int len)
2604 {
2605 	struct mfi_command *cm;
2606 	struct mfi_io_frame *io;
2607 	int error;
2608 	uint32_t context = 0;
2609 
2610 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2611 		return (EBUSY);
2612 
2613 	/* Zero out the MFI frame */
2614 	context = cm->cm_frame->header.context;
2615 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2616 	cm->cm_frame->header.context = context;
2617 
2618 	io = &cm->cm_frame->io;
2619 	io->header.cmd = MFI_CMD_LD_WRITE;
2620 	io->header.target_id = id;
2621 	io->header.timeout = 0;
2622 	io->header.flags = 0;
2623 	io->header.scsi_status = 0;
2624 	io->header.sense_len = MFI_SENSE_LEN;
2625 	io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2626 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2627 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2628 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2629 	io->lba_lo = lba & 0xffffffff;
2630 	cm->cm_data = virt;
2631 	cm->cm_len = len;
2632 	cm->cm_sg = &io->sgl;
2633 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2634 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2635 
2636 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2637 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2638 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2639 	    BUS_DMASYNC_POSTWRITE);
2640 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2641 	mfi_release_command(cm);
2642 
2643 	return (error);
2644 }
2645 
2646 int
2647 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2648     int len)
2649 {
2650 	struct mfi_command *cm;
2651 	struct mfi_pass_frame *pass;
2652 	int error, readop, cdb_len;
2653 	uint32_t blkcount;
2654 
2655 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2656 		return (EBUSY);
2657 
2658 	pass = &cm->cm_frame->pass;
2659 	bzero(pass->cdb, 16);
2660 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2661 
2662 	readop = 0;
2663 	blkcount = howmany(len, MFI_SECTOR_LEN);
2664 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2665 	pass->header.target_id = id;
2666 	pass->header.timeout = 0;
2667 	pass->header.flags = 0;
2668 	pass->header.scsi_status = 0;
2669 	pass->header.sense_len = MFI_SENSE_LEN;
2670 	pass->header.data_len = len;
2671 	pass->header.cdb_len = cdb_len;
2672 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2673 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2674 	cm->cm_data = virt;
2675 	cm->cm_len = len;
2676 	cm->cm_sg = &pass->sgl;
2677 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2678 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2679 
2680 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2681 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2682 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2683 	    BUS_DMASYNC_POSTWRITE);
2684 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2685 	mfi_release_command(cm);
2686 
2687 	return (error);
2688 }
2689 
2690 static int
2691 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2692 {
2693 	struct mfi_softc *sc;
2694 	int error;
2695 
2696 	sc = dev->si_drv1;
2697 
2698 	mtx_lock(&sc->mfi_io_lock);
2699 	if (sc->mfi_detaching)
2700 		error = ENXIO;
2701 	else {
2702 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2703 		error = 0;
2704 	}
2705 	mtx_unlock(&sc->mfi_io_lock);
2706 
2707 	return (error);
2708 }
2709 
2710 static int
2711 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2712 {
2713 	struct mfi_softc *sc;
2714 	struct mfi_aen *mfi_aen_entry, *tmp;
2715 
2716 	sc = dev->si_drv1;
2717 
2718 	mtx_lock(&sc->mfi_io_lock);
2719 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2720 
2721 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2722 		if (mfi_aen_entry->p == curproc) {
2723 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2724 			    aen_link);
2725 			free(mfi_aen_entry, M_MFIBUF);
2726 		}
2727 	}
2728 	mtx_unlock(&sc->mfi_io_lock);
2729 	return (0);
2730 }
2731 
2732 static int
2733 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2734 {
2735 
2736 	switch (opcode) {
2737 	case MFI_DCMD_LD_DELETE:
2738 	case MFI_DCMD_CFG_ADD:
2739 	case MFI_DCMD_CFG_CLEAR:
2740 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2741 		sx_xlock(&sc->mfi_config_lock);
2742 		return (1);
2743 	default:
2744 		return (0);
2745 	}
2746 }
2747 
2748 static void
2749 mfi_config_unlock(struct mfi_softc *sc, int locked)
2750 {
2751 
2752 	if (locked)
2753 		sx_xunlock(&sc->mfi_config_lock);
2754 }
2755 
2756 /*
2757  * Perform pre-issue checks on commands from userland and possibly veto
2758  * them.
2759  */
2760 static int
2761 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2762 {
2763 	struct mfi_disk *ld, *ld2;
2764 	int error;
2765 	struct mfi_system_pd *syspd = NULL;
2766 	uint16_t syspd_id;
2767 	uint16_t *mbox;
2768 
2769 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2770 	error = 0;
2771 	switch (cm->cm_frame->dcmd.opcode) {
2772 	case MFI_DCMD_LD_DELETE:
2773 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2774 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2775 				break;
2776 		}
2777 		if (ld == NULL)
2778 			error = ENOENT;
2779 		else
2780 			error = mfi_disk_disable(ld);
2781 		break;
2782 	case MFI_DCMD_CFG_CLEAR:
2783 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2784 			error = mfi_disk_disable(ld);
2785 			if (error)
2786 				break;
2787 		}
2788 		if (error) {
2789 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2790 				if (ld2 == ld)
2791 					break;
2792 				mfi_disk_enable(ld2);
2793 			}
2794 		}
2795 		break;
2796 	case MFI_DCMD_PD_STATE_SET:
2797 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2798 		syspd_id = mbox[0];
2799 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2800 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2801 				if (syspd->pd_id == syspd_id)
2802 					break;
2803 			}
2804 		}
2805 		else
2806 			break;
2807 		if (syspd)
2808 			error = mfi_syspd_disable(syspd);
2809 		break;
2810 	default:
2811 		break;
2812 	}
2813 	return (error);
2814 }
2815 
2816 /* Perform post-issue checks on commands from userland. */
2817 static void
2818 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2819 {
2820 	struct mfi_disk *ld, *ldn;
2821 	struct mfi_system_pd *syspd = NULL;
2822 	uint16_t syspd_id;
2823 	uint16_t *mbox;
2824 
2825 	switch (cm->cm_frame->dcmd.opcode) {
2826 	case MFI_DCMD_LD_DELETE:
2827 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2828 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2829 				break;
2830 		}
2831 		KASSERT(ld != NULL, ("volume disappeared"));
2832 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2833 			mtx_unlock(&sc->mfi_io_lock);
2834 			bus_topo_lock();
2835 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2836 			bus_topo_unlock();
2837 			mtx_lock(&sc->mfi_io_lock);
2838 		} else
2839 			mfi_disk_enable(ld);
2840 		break;
2841 	case MFI_DCMD_CFG_CLEAR:
2842 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2843 			mtx_unlock(&sc->mfi_io_lock);
2844 			bus_topo_lock();
2845 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2846 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2847 			}
2848 			bus_topo_unlock();
2849 			mtx_lock(&sc->mfi_io_lock);
2850 		} else {
2851 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2852 				mfi_disk_enable(ld);
2853 		}
2854 		break;
2855 	case MFI_DCMD_CFG_ADD:
2856 		mfi_ldprobe(sc);
2857 		break;
2858 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2859 		mfi_ldprobe(sc);
2860 		break;
2861 	case MFI_DCMD_PD_STATE_SET:
2862 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2863 		syspd_id = mbox[0];
2864 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2865 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2866 				if (syspd->pd_id == syspd_id)
2867 					break;
2868 			}
2869 		}
2870 		else
2871 			break;
2872 		/* If the transition fails then enable the syspd again */
2873 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2874 			mfi_syspd_enable(syspd);
2875 		break;
2876 	}
2877 }
2878 
2879 static int
2880 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2881 {
2882 	struct mfi_config_data *conf_data;
2883 	struct mfi_command *ld_cm = NULL;
2884 	struct mfi_ld_info *ld_info = NULL;
2885 	struct mfi_ld_config *ld;
2886 	char *p;
2887 	int error = 0;
2888 
2889 	conf_data = (struct mfi_config_data *)cm->cm_data;
2890 
2891 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2892 		p = (char *)conf_data->array;
2893 		p += conf_data->array_size * conf_data->array_count;
2894 		ld = (struct mfi_ld_config *)p;
2895 		if (ld->params.isSSCD == 1)
2896 			error = 1;
2897 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2898 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2899 		    (void **)&ld_info, sizeof(*ld_info));
2900 		if (error) {
2901 			device_printf(sc->mfi_dev, "Failed to allocate"
2902 			    "MFI_DCMD_LD_GET_INFO %d", error);
2903 			if (ld_info)
2904 				free(ld_info, M_MFIBUF);
2905 			return 0;
2906 		}
2907 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2908 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2909 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2910 		if (mfi_wait_command(sc, ld_cm) != 0) {
2911 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2912 			mfi_release_command(ld_cm);
2913 			free(ld_info, M_MFIBUF);
2914 			return 0;
2915 		}
2916 
2917 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2918 			free(ld_info, M_MFIBUF);
2919 			mfi_release_command(ld_cm);
2920 			return 0;
2921 		}
2922 		else
2923 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2924 
2925 		if (ld_info->ld_config.params.isSSCD == 1)
2926 			error = 1;
2927 
2928 		mfi_release_command(ld_cm);
2929 		free(ld_info, M_MFIBUF);
2930 	}
2931 	return error;
2932 }
2933 
2934 static int
2935 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2936 {
2937 	uint8_t i;
2938 	struct mfi_ioc_packet *ioc;
2939 	ioc = (struct mfi_ioc_packet *)arg;
2940 	int sge_size, error;
2941 	struct megasas_sge *kern_sge;
2942 
2943 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2944 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2945 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2946 
2947 	if (sizeof(bus_addr_t) == 8) {
2948 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2949 		cm->cm_extra_frames = 2;
2950 		sge_size = sizeof(struct mfi_sg64);
2951 	} else {
2952 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2953 		sge_size = sizeof(struct mfi_sg32);
2954 	}
2955 
2956 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2957 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2958 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2959 			1, 0,			/* algnmnt, boundary */
2960 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2961 			BUS_SPACE_MAXADDR,	/* highaddr */
2962 			NULL, NULL,		/* filter, filterarg */
2963 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2964 			2,			/* nsegments */
2965 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2966 			BUS_DMA_ALLOCNOW,	/* flags */
2967 			NULL, NULL,		/* lockfunc, lockarg */
2968 			&sc->mfi_kbuff_arr_dmat[i])) {
2969 			device_printf(sc->mfi_dev,
2970 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2971 			return (ENOMEM);
2972 		}
2973 
2974 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2975 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2976 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2977 			device_printf(sc->mfi_dev,
2978 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2979 			return (ENOMEM);
2980 		}
2981 
2982 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2983 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2984 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2985 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2986 
2987 		if (!sc->kbuff_arr[i]) {
2988 			device_printf(sc->mfi_dev,
2989 			    "Could not allocate memory for kbuff_arr info\n");
2990 			return -1;
2991 		}
2992 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2993 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2994 
2995 		if (sizeof(bus_addr_t) == 8) {
2996 			cm->cm_frame->stp.sgl.sg64[i].addr =
2997 			    kern_sge[i].phys_addr;
2998 			cm->cm_frame->stp.sgl.sg64[i].len =
2999 			    ioc->mfi_sgl[i].iov_len;
3000 		} else {
3001 			cm->cm_frame->stp.sgl.sg32[i].addr =
3002 			    kern_sge[i].phys_addr;
3003 			cm->cm_frame->stp.sgl.sg32[i].len =
3004 			    ioc->mfi_sgl[i].iov_len;
3005 		}
3006 
3007 		error = copyin(ioc->mfi_sgl[i].iov_base,
3008 		    sc->kbuff_arr[i],
3009 		    ioc->mfi_sgl[i].iov_len);
3010 		if (error != 0) {
3011 			device_printf(sc->mfi_dev, "Copy in failed\n");
3012 			return error;
3013 		}
3014 	}
3015 
3016 	cm->cm_flags |=MFI_CMD_MAPPED;
3017 	return 0;
3018 }
3019 
3020 static int
3021 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3022 {
3023 	struct mfi_command *cm;
3024 	struct mfi_dcmd_frame *dcmd;
3025 	void *ioc_buf = NULL;
3026 	uint32_t context;
3027 	int error = 0, locked;
3028 
3029 	if (ioc->buf_size > 0) {
3030 		if (ioc->buf_size > 1024 * 1024)
3031 			return (ENOMEM);
3032 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3033 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3034 		if (error) {
3035 			device_printf(sc->mfi_dev, "failed to copyin\n");
3036 			free(ioc_buf, M_MFIBUF);
3037 			return (error);
3038 		}
3039 	}
3040 
3041 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3042 
3043 	mtx_lock(&sc->mfi_io_lock);
3044 	while ((cm = mfi_dequeue_free(sc)) == NULL)
3045 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3046 
3047 	/* Save context for later */
3048 	context = cm->cm_frame->header.context;
3049 
3050 	dcmd = &cm->cm_frame->dcmd;
3051 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3052 
3053 	cm->cm_sg = &dcmd->sgl;
3054 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3055 	cm->cm_data = ioc_buf;
3056 	cm->cm_len = ioc->buf_size;
3057 
3058 	/* restore context */
3059 	cm->cm_frame->header.context = context;
3060 
3061 	/* Cheat since we don't know if we're writing or reading */
3062 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3063 
3064 	error = mfi_check_command_pre(sc, cm);
3065 	if (error)
3066 		goto out;
3067 
3068 	error = mfi_wait_command(sc, cm);
3069 	if (error) {
3070 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3071 		goto out;
3072 	}
3073 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3074 	mfi_check_command_post(sc, cm);
3075 out:
3076 	mfi_release_command(cm);
3077 	mtx_unlock(&sc->mfi_io_lock);
3078 	mfi_config_unlock(sc, locked);
3079 	if (ioc->buf_size > 0)
3080 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3081 	if (ioc_buf)
3082 		free(ioc_buf, M_MFIBUF);
3083 	return (error);
3084 }
3085 
3086 static int
3087 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3088 {
3089 	struct mfi_softc *sc;
3090 	union mfi_statrequest *ms;
3091 	struct mfi_ioc_packet *ioc;
3092 #ifdef COMPAT_FREEBSD32
3093 	struct mfi_ioc_packet32 *ioc32;
3094 #endif
3095 	struct mfi_ioc_aen *aen;
3096 	struct mfi_command *cm = NULL;
3097 	uint32_t context = 0;
3098 	union mfi_sense_ptr sense_ptr;
3099 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3100 	size_t len;
3101 	int i, res;
3102 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3103 #ifdef COMPAT_FREEBSD32
3104 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3105 	struct mfi_ioc_passthru iop_swab;
3106 #endif
3107 	int error, locked;
3108 	sc = dev->si_drv1;
3109 	error = 0;
3110 
3111 	if (sc->adpreset)
3112 		return EBUSY;
3113 
3114 	if (sc->hw_crit_error)
3115 		return EBUSY;
3116 
3117 	if (sc->issuepend_done == 0)
3118 		return EBUSY;
3119 
3120 	switch (cmd) {
3121 	case MFIIO_STATS:
3122 		ms = (union mfi_statrequest *)arg;
3123 		switch (ms->ms_item) {
3124 		case MFIQ_FREE:
3125 		case MFIQ_BIO:
3126 		case MFIQ_READY:
3127 		case MFIQ_BUSY:
3128 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3129 			    sizeof(struct mfi_qstat));
3130 			break;
3131 		default:
3132 			error = ENOIOCTL;
3133 			break;
3134 		}
3135 		break;
3136 	case MFIIO_QUERY_DISK:
3137 	{
3138 		struct mfi_query_disk *qd;
3139 		struct mfi_disk *ld;
3140 
3141 		qd = (struct mfi_query_disk *)arg;
3142 		mtx_lock(&sc->mfi_io_lock);
3143 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3144 			if (ld->ld_id == qd->array_id)
3145 				break;
3146 		}
3147 		if (ld == NULL) {
3148 			qd->present = 0;
3149 			mtx_unlock(&sc->mfi_io_lock);
3150 			return (0);
3151 		}
3152 		qd->present = 1;
3153 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3154 			qd->open = 1;
3155 		bzero(qd->devname, SPECNAMELEN + 1);
3156 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3157 		mtx_unlock(&sc->mfi_io_lock);
3158 		break;
3159 	}
3160 	case MFI_CMD:
3161 #ifdef COMPAT_FREEBSD32
3162 	case MFI_CMD32:
3163 #endif
3164 		{
3165 		devclass_t devclass;
3166 		ioc = (struct mfi_ioc_packet *)arg;
3167 		int adapter;
3168 
3169 		adapter = ioc->mfi_adapter_no;
3170 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3171 			devclass = devclass_find("mfi");
3172 			sc = devclass_get_softc(devclass, adapter);
3173 		}
3174 		mtx_lock(&sc->mfi_io_lock);
3175 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3176 			mtx_unlock(&sc->mfi_io_lock);
3177 			return (EBUSY);
3178 		}
3179 		mtx_unlock(&sc->mfi_io_lock);
3180 		locked = 0;
3181 
3182 		/*
3183 		 * save off original context since copying from user
3184 		 * will clobber some data
3185 		 */
3186 		context = cm->cm_frame->header.context;
3187 		cm->cm_frame->header.context = cm->cm_index;
3188 
3189 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3190 		    2 * MEGAMFI_FRAME_SIZE);
3191 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3192 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3193 		cm->cm_frame->header.scsi_status = 0;
3194 		cm->cm_frame->header.pad0 = 0;
3195 		if (ioc->mfi_sge_count) {
3196 			cm->cm_sg =
3197 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3198 		}
3199 		cm->cm_flags = 0;
3200 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3201 			cm->cm_flags |= MFI_CMD_DATAIN;
3202 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3203 			cm->cm_flags |= MFI_CMD_DATAOUT;
3204 		/* Legacy app shim */
3205 		if (cm->cm_flags == 0)
3206 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3207 		cm->cm_len = cm->cm_frame->header.data_len;
3208 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3209 #ifdef COMPAT_FREEBSD32
3210 			if (cmd == MFI_CMD) {
3211 #endif
3212 				/* Native */
3213 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3214 #ifdef COMPAT_FREEBSD32
3215 			} else {
3216 				/* 32bit on 64bit */
3217 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3218 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3219 			}
3220 #endif
3221 			cm->cm_len += cm->cm_stp_len;
3222 		}
3223 		if (cm->cm_len &&
3224 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3225 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3226 			    M_WAITOK | M_ZERO);
3227 		} else {
3228 			cm->cm_data = 0;
3229 		}
3230 
3231 		/* restore header context */
3232 		cm->cm_frame->header.context = context;
3233 
3234 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3235 			res = mfi_stp_cmd(sc, cm, arg);
3236 			if (res != 0)
3237 				goto out;
3238 		} else {
3239 			temp = data;
3240 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3241 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3242 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3243 #ifdef COMPAT_FREEBSD32
3244 					if (cmd == MFI_CMD) {
3245 #endif
3246 						/* Native */
3247 						addr = ioc->mfi_sgl[i].iov_base;
3248 						len = ioc->mfi_sgl[i].iov_len;
3249 #ifdef COMPAT_FREEBSD32
3250 					} else {
3251 						/* 32bit on 64bit */
3252 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3253 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3254 						len = ioc32->mfi_sgl[i].iov_len;
3255 					}
3256 #endif
3257 					error = copyin(addr, temp, len);
3258 					if (error != 0) {
3259 						device_printf(sc->mfi_dev,
3260 						    "Copy in failed\n");
3261 						goto out;
3262 					}
3263 					temp = &temp[len];
3264 				}
3265 			}
3266 		}
3267 
3268 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3269 			locked = mfi_config_lock(sc,
3270 			     cm->cm_frame->dcmd.opcode);
3271 
3272 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3273 			cm->cm_frame->pass.sense_addr_lo =
3274 			    (uint32_t)cm->cm_sense_busaddr;
3275 			cm->cm_frame->pass.sense_addr_hi =
3276 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3277 		}
3278 		mtx_lock(&sc->mfi_io_lock);
3279 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3280 		if (!skip_pre_post) {
3281 			error = mfi_check_command_pre(sc, cm);
3282 			if (error) {
3283 				mtx_unlock(&sc->mfi_io_lock);
3284 				goto out;
3285 			}
3286 		}
3287 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3288 			device_printf(sc->mfi_dev,
3289 			    "Controller polled failed\n");
3290 			mtx_unlock(&sc->mfi_io_lock);
3291 			goto out;
3292 		}
3293 		if (!skip_pre_post) {
3294 			mfi_check_command_post(sc, cm);
3295 		}
3296 		mtx_unlock(&sc->mfi_io_lock);
3297 
3298 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3299 			temp = data;
3300 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3301 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3302 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3303 #ifdef COMPAT_FREEBSD32
3304 					if (cmd == MFI_CMD) {
3305 #endif
3306 						/* Native */
3307 						addr = ioc->mfi_sgl[i].iov_base;
3308 						len = ioc->mfi_sgl[i].iov_len;
3309 #ifdef COMPAT_FREEBSD32
3310 					} else {
3311 						/* 32bit on 64bit */
3312 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3313 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3314 						len = ioc32->mfi_sgl[i].iov_len;
3315 					}
3316 #endif
3317 					error = copyout(temp, addr, len);
3318 					if (error != 0) {
3319 						device_printf(sc->mfi_dev,
3320 						    "Copy out failed\n");
3321 						goto out;
3322 					}
3323 					temp = &temp[len];
3324 				}
3325 			}
3326 		}
3327 
3328 		if (ioc->mfi_sense_len) {
3329 			/* get user-space sense ptr then copy out sense */
3330 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3331 			    &sense_ptr.sense_ptr_data[0],
3332 			    sizeof(sense_ptr.sense_ptr_data));
3333 #ifdef COMPAT_FREEBSD32
3334 			if (cmd != MFI_CMD) {
3335 				/*
3336 				 * not 64bit native so zero out any address
3337 				 * over 32bit */
3338 				sense_ptr.addr.high = 0;
3339 			}
3340 #endif
3341 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3342 			    ioc->mfi_sense_len);
3343 			if (error != 0) {
3344 				device_printf(sc->mfi_dev,
3345 				    "Copy out failed\n");
3346 				goto out;
3347 			}
3348 		}
3349 
3350 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3351 out:
3352 		mfi_config_unlock(sc, locked);
3353 		if (data)
3354 			free(data, M_MFIBUF);
3355 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3356 			for (i = 0; i < 2; i++) {
3357 				if (sc->kbuff_arr[i]) {
3358 					if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3359 						bus_dmamap_unload(
3360 						    sc->mfi_kbuff_arr_dmat[i],
3361 						    sc->mfi_kbuff_arr_dmamap[i]
3362 						    );
3363 					if (sc->kbuff_arr[i] != NULL)
3364 						bus_dmamem_free(
3365 						    sc->mfi_kbuff_arr_dmat[i],
3366 						    sc->kbuff_arr[i],
3367 						    sc->mfi_kbuff_arr_dmamap[i]
3368 						    );
3369 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3370 						bus_dma_tag_destroy(
3371 						    sc->mfi_kbuff_arr_dmat[i]);
3372 				}
3373 			}
3374 		}
3375 		if (cm) {
3376 			mtx_lock(&sc->mfi_io_lock);
3377 			mfi_release_command(cm);
3378 			mtx_unlock(&sc->mfi_io_lock);
3379 		}
3380 
3381 		break;
3382 		}
3383 	case MFI_SET_AEN:
3384 		aen = (struct mfi_ioc_aen *)arg;
3385 		mtx_lock(&sc->mfi_io_lock);
3386 		error = mfi_aen_register(sc, aen->aen_seq_num,
3387 		    aen->aen_class_locale);
3388 		mtx_unlock(&sc->mfi_io_lock);
3389 
3390 		break;
3391 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3392 		{
3393 			devclass_t devclass;
3394 			struct mfi_linux_ioc_packet l_ioc;
3395 			int adapter;
3396 
3397 			devclass = devclass_find("mfi");
3398 			if (devclass == NULL)
3399 				return (ENOENT);
3400 
3401 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3402 			if (error)
3403 				return (error);
3404 			adapter = l_ioc.lioc_adapter_no;
3405 			sc = devclass_get_softc(devclass, adapter);
3406 			if (sc == NULL)
3407 				return (ENOENT);
3408 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3409 			    cmd, arg, flag, td));
3410 			break;
3411 		}
3412 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3413 		{
3414 			devclass_t devclass;
3415 			struct mfi_linux_ioc_aen l_aen;
3416 			int adapter;
3417 
3418 			devclass = devclass_find("mfi");
3419 			if (devclass == NULL)
3420 				return (ENOENT);
3421 
3422 			error = copyin(arg, &l_aen, sizeof(l_aen));
3423 			if (error)
3424 				return (error);
3425 			adapter = l_aen.laen_adapter_no;
3426 			sc = devclass_get_softc(devclass, adapter);
3427 			if (sc == NULL)
3428 				return (ENOENT);
3429 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3430 			    cmd, arg, flag, td));
3431 			break;
3432 		}
3433 #ifdef COMPAT_FREEBSD32
3434 	case MFIIO_PASSTHRU32:
3435 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3436 			error = ENOTTY;
3437 			break;
3438 		}
3439 		iop_swab.ioc_frame	= iop32->ioc_frame;
3440 		iop_swab.buf_size	= iop32->buf_size;
3441 		iop_swab.buf		= PTRIN(iop32->buf);
3442 		iop			= &iop_swab;
3443 		/* FALLTHROUGH */
3444 #endif
3445 	case MFIIO_PASSTHRU:
3446 		error = mfi_user_command(sc, iop);
3447 #ifdef COMPAT_FREEBSD32
3448 		if (cmd == MFIIO_PASSTHRU32)
3449 			iop32->ioc_frame = iop_swab.ioc_frame;
3450 #endif
3451 		break;
3452 	default:
3453 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3454 		error = ENOTTY;
3455 		break;
3456 	}
3457 
3458 	return (error);
3459 }
3460 
3461 static int
3462 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3463 {
3464 	struct mfi_softc *sc;
3465 	struct mfi_linux_ioc_packet l_ioc;
3466 	struct mfi_linux_ioc_aen l_aen;
3467 	struct mfi_command *cm = NULL;
3468 	struct mfi_aen *mfi_aen_entry;
3469 	union mfi_sense_ptr sense_ptr;
3470 	uint32_t context = 0;
3471 	uint8_t *data = NULL, *temp;
3472 	int i;
3473 	int error, locked;
3474 
3475 	sc = dev->si_drv1;
3476 	error = 0;
3477 	switch (cmd) {
3478 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3479 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3480 		if (error != 0)
3481 			return (error);
3482 
3483 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3484 			return (EINVAL);
3485 		}
3486 
3487 		mtx_lock(&sc->mfi_io_lock);
3488 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3489 			mtx_unlock(&sc->mfi_io_lock);
3490 			return (EBUSY);
3491 		}
3492 		mtx_unlock(&sc->mfi_io_lock);
3493 		locked = 0;
3494 
3495 		/*
3496 		 * save off original context since copying from user
3497 		 * will clobber some data
3498 		 */
3499 		context = cm->cm_frame->header.context;
3500 
3501 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3502 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3503 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3504 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3505 		cm->cm_frame->header.scsi_status = 0;
3506 		cm->cm_frame->header.pad0 = 0;
3507 		if (l_ioc.lioc_sge_count)
3508 			cm->cm_sg =
3509 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3510 		cm->cm_flags = 0;
3511 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3512 			cm->cm_flags |= MFI_CMD_DATAIN;
3513 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3514 			cm->cm_flags |= MFI_CMD_DATAOUT;
3515 		cm->cm_len = cm->cm_frame->header.data_len;
3516 		if (cm->cm_len &&
3517 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3518 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3519 			    M_WAITOK | M_ZERO);
3520 		} else {
3521 			cm->cm_data = 0;
3522 		}
3523 
3524 		/* restore header context */
3525 		cm->cm_frame->header.context = context;
3526 
3527 		temp = data;
3528 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3529 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3530 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3531 				       temp,
3532 				       l_ioc.lioc_sgl[i].iov_len);
3533 				if (error != 0) {
3534 					device_printf(sc->mfi_dev,
3535 					    "Copy in failed\n");
3536 					goto out;
3537 				}
3538 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3539 			}
3540 		}
3541 
3542 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3543 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3544 
3545 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3546 			cm->cm_frame->pass.sense_addr_lo =
3547 			    (uint32_t)cm->cm_sense_busaddr;
3548 			cm->cm_frame->pass.sense_addr_hi =
3549 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3550 		}
3551 
3552 		mtx_lock(&sc->mfi_io_lock);
3553 		error = mfi_check_command_pre(sc, cm);
3554 		if (error) {
3555 			mtx_unlock(&sc->mfi_io_lock);
3556 			goto out;
3557 		}
3558 
3559 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3560 			device_printf(sc->mfi_dev,
3561 			    "Controller polled failed\n");
3562 			mtx_unlock(&sc->mfi_io_lock);
3563 			goto out;
3564 		}
3565 
3566 		mfi_check_command_post(sc, cm);
3567 		mtx_unlock(&sc->mfi_io_lock);
3568 
3569 		temp = data;
3570 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3571 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3572 				error = copyout(temp,
3573 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3574 					l_ioc.lioc_sgl[i].iov_len);
3575 				if (error != 0) {
3576 					device_printf(sc->mfi_dev,
3577 					    "Copy out failed\n");
3578 					goto out;
3579 				}
3580 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3581 			}
3582 		}
3583 
3584 		if (l_ioc.lioc_sense_len) {
3585 			/* get user-space sense ptr then copy out sense */
3586 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3587                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3588 			    &sense_ptr.sense_ptr_data[0],
3589 			    sizeof(sense_ptr.sense_ptr_data));
3590 #ifdef __amd64__
3591 			/*
3592 			 * only 32bit Linux support so zero out any
3593 			 * address over 32bit
3594 			 */
3595 			sense_ptr.addr.high = 0;
3596 #endif
3597 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3598 			    l_ioc.lioc_sense_len);
3599 			if (error != 0) {
3600 				device_printf(sc->mfi_dev,
3601 				    "Copy out failed\n");
3602 				goto out;
3603 			}
3604 		}
3605 
3606 		error = copyout(&cm->cm_frame->header.cmd_status,
3607 			&((struct mfi_linux_ioc_packet*)arg)
3608 			->lioc_frame.hdr.cmd_status,
3609 			1);
3610 		if (error != 0) {
3611 			device_printf(sc->mfi_dev,
3612 				      "Copy out failed\n");
3613 			goto out;
3614 		}
3615 
3616 out:
3617 		mfi_config_unlock(sc, locked);
3618 		if (data)
3619 			free(data, M_MFIBUF);
3620 		if (cm) {
3621 			mtx_lock(&sc->mfi_io_lock);
3622 			mfi_release_command(cm);
3623 			mtx_unlock(&sc->mfi_io_lock);
3624 		}
3625 
3626 		return (error);
3627 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3628 		error = copyin(arg, &l_aen, sizeof(l_aen));
3629 		if (error != 0)
3630 			return (error);
3631 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3632 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3633 		    M_WAITOK);
3634 		mtx_lock(&sc->mfi_io_lock);
3635 		mfi_aen_entry->p = curproc;
3636 		TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry, aen_link);
3637 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3638 		    l_aen.laen_class_locale);
3639 
3640 		if (error != 0) {
3641 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3642 			    aen_link);
3643 			free(mfi_aen_entry, M_MFIBUF);
3644 		}
3645 		mtx_unlock(&sc->mfi_io_lock);
3646 
3647 		return (error);
3648 	default:
3649 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3650 		error = ENOENT;
3651 		break;
3652 	}
3653 
3654 	return (error);
3655 }
3656 
3657 static int
3658 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3659 {
3660 	struct mfi_softc *sc;
3661 	int revents = 0;
3662 
3663 	sc = dev->si_drv1;
3664 
3665 	if (poll_events & (POLLIN | POLLRDNORM)) {
3666 		if (sc->mfi_aen_triggered != 0) {
3667 			revents |= poll_events & (POLLIN | POLLRDNORM);
3668 			sc->mfi_aen_triggered = 0;
3669 		}
3670 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3671 			revents |= POLLERR;
3672 		}
3673 	}
3674 
3675 	if (revents == 0) {
3676 		if (poll_events & (POLLIN | POLLRDNORM)) {
3677 			sc->mfi_poll_waiting = 1;
3678 			selrecord(td, &sc->mfi_select);
3679 		}
3680 	}
3681 
3682 	return revents;
3683 }
3684 
3685 static void
3686 mfi_dump_all(void)
3687 {
3688 	struct mfi_softc *sc;
3689 	struct mfi_command *cm;
3690 	devclass_t dc;
3691 	time_t deadline;
3692 	int timedout __unused;
3693 	int i;
3694 
3695 	dc = devclass_find("mfi");
3696 	if (dc == NULL) {
3697 		printf("No mfi dev class\n");
3698 		return;
3699 	}
3700 
3701 	for (i = 0; ; i++) {
3702 		sc = devclass_get_softc(dc, i);
3703 		if (sc == NULL)
3704 			break;
3705 		device_printf(sc->mfi_dev, "Dumping\n\n");
3706 		timedout = 0;
3707 		deadline = time_uptime - mfi_cmd_timeout;
3708 		mtx_lock(&sc->mfi_io_lock);
3709 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3710 			if (cm->cm_timestamp <= deadline) {
3711 				device_printf(sc->mfi_dev,
3712 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3713 				    cm, (int)(time_uptime - cm->cm_timestamp));
3714 				MFI_PRINT_CMD(cm);
3715 				timedout++;
3716 			}
3717 		}
3718 
3719 #if 0
3720 		if (timedout)
3721 			MFI_DUMP_CMDS(sc);
3722 #endif
3723 
3724 		mtx_unlock(&sc->mfi_io_lock);
3725 	}
3726 
3727 	return;
3728 }
3729 
3730 static void
3731 mfi_timeout(void *data)
3732 {
3733 	struct mfi_softc *sc = (struct mfi_softc *)data;
3734 	struct mfi_command *cm, *tmp;
3735 	time_t deadline;
3736 	int timedout __unused = 0;
3737 
3738 	deadline = time_uptime - mfi_cmd_timeout;
3739 	if (sc->adpreset == 0) {
3740 		if (!mfi_tbolt_reset(sc)) {
3741 			callout_reset(&sc->mfi_watchdog_callout,
3742 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3743 			return;
3744 		}
3745 	}
3746 	mtx_lock(&sc->mfi_io_lock);
3747 	TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3748 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3749 			continue;
3750 		if (cm->cm_timestamp <= deadline) {
3751 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3752 				cm->cm_timestamp = time_uptime;
3753 			} else {
3754 				device_printf(sc->mfi_dev,
3755 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3756 				     cm, (int)(time_uptime - cm->cm_timestamp)
3757 				     );
3758 				MFI_PRINT_CMD(cm);
3759 				MFI_VALIDATE_CMD(sc, cm);
3760 				/*
3761 				 * While commands can get stuck forever we do
3762 				 * not fail them as there is no way to tell if
3763 				 * the controller has actually processed them
3764 				 * or not.
3765 				 *
3766 				 * In addition its very likely that force
3767 				 * failing a command here would cause a panic
3768 				 * e.g. in UFS.
3769 				 */
3770 				timedout++;
3771 			}
3772 		}
3773 	}
3774 
3775 #if 0
3776 	if (timedout)
3777 		MFI_DUMP_CMDS(sc);
3778 #endif
3779 
3780 	mtx_unlock(&sc->mfi_io_lock);
3781 
3782 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3783 	    mfi_timeout, sc);
3784 
3785 	if (0)
3786 		mfi_dump_all();
3787 	return;
3788 }
3789