xref: /freebsd/sys/dev/mfi/mfi.c (revision d93a896ef95946b0bf1219866fcb324b78543444)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2007 LSI Corp.
28  * Copyright (c) 2007 Rajesh Prabhakaran.
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52 
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
55 
56 #include "opt_compat.h"
57 #include "opt_mfi.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
64 #include <sys/poll.h>
65 #include <sys/selinfo.h>
66 #include <sys/bus.h>
67 #include <sys/conf.h>
68 #include <sys/eventhandler.h>
69 #include <sys/rman.h>
70 #include <sys/bio.h>
71 #include <sys/ioccom.h>
72 #include <sys/uio.h>
73 #include <sys/proc.h>
74 #include <sys/signalvar.h>
75 #include <sys/sysent.h>
76 #include <sys/taskqueue.h>
77 
78 #include <machine/bus.h>
79 #include <machine/resource.h>
80 
81 #include <dev/mfi/mfireg.h>
82 #include <dev/mfi/mfi_ioctl.h>
83 #include <dev/mfi/mfivar.h>
84 #include <sys/interrupt.h>
85 #include <sys/priority.h>
86 
87 static int	mfi_alloc_commands(struct mfi_softc *);
88 static int	mfi_comms_init(struct mfi_softc *);
89 static int	mfi_get_controller_info(struct mfi_softc *);
90 static int	mfi_get_log_state(struct mfi_softc *,
91 		    struct mfi_evt_log_state **);
92 static int	mfi_parse_entries(struct mfi_softc *, int, int);
93 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
94 static void	mfi_startup(void *arg);
95 static void	mfi_intr(void *arg);
96 static void	mfi_ldprobe(struct mfi_softc *sc);
97 static void	mfi_syspdprobe(struct mfi_softc *sc);
98 static void	mfi_handle_evt(void *context, int pending);
99 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
100 static void	mfi_aen_complete(struct mfi_command *);
101 static int	mfi_add_ld(struct mfi_softc *sc, int);
102 static void	mfi_add_ld_complete(struct mfi_command *);
103 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
104 static void	mfi_add_sys_pd_complete(struct mfi_command *);
105 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
106 static void	mfi_bio_complete(struct mfi_command *);
107 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
108 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
109 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
110 static int	mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
112 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
113 static void	mfi_timeout(void *);
114 static int	mfi_user_command(struct mfi_softc *,
115 		    struct mfi_ioc_passthru *);
116 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
117 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
118 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
119 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
120 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
121 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
122 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
123 		    uint32_t frame_cnt);
124 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
125 		    uint32_t frame_cnt);
126 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
127 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
128 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
129 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
130 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
131 
132 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
133 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
134 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
135            0, "event message locale");
136 
137 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
138 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
139            0, "event message class");
140 
141 static int	mfi_max_cmds = 128;
142 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
143 	   0, "Max commands limit (-1 = controller limit)");
144 
145 static int	mfi_detect_jbod_change = 1;
146 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
147 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
148 
149 int		mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
150 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
151 	   &mfi_polled_cmd_timeout, 0,
152 	   "Polled command timeout - used for firmware flash etc (in seconds)");
153 
154 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
155 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
156 	   0, "Command timeout (in seconds)");
157 
158 /* Management interface */
159 static d_open_t		mfi_open;
160 static d_close_t	mfi_close;
161 static d_ioctl_t	mfi_ioctl;
162 static d_poll_t		mfi_poll;
163 
164 static struct cdevsw mfi_cdevsw = {
165 	.d_version = 	D_VERSION,
166 	.d_flags =	0,
167 	.d_open = 	mfi_open,
168 	.d_close =	mfi_close,
169 	.d_ioctl =	mfi_ioctl,
170 	.d_poll =	mfi_poll,
171 	.d_name =	"mfi",
172 };
173 
174 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
175 
176 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
177 struct mfi_skinny_dma_info mfi_skinny;
178 
179 static void
180 mfi_enable_intr_xscale(struct mfi_softc *sc)
181 {
182 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
183 }
184 
185 static void
186 mfi_enable_intr_ppc(struct mfi_softc *sc)
187 {
188 	if (sc->mfi_flags & MFI_FLAGS_1078) {
189 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
190 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
191 	}
192 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
193 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
194 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
195 	}
196 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
197 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
198 	}
199 }
200 
201 static int32_t
202 mfi_read_fw_status_xscale(struct mfi_softc *sc)
203 {
204 	return MFI_READ4(sc, MFI_OMSG0);
205 }
206 
207 static int32_t
208 mfi_read_fw_status_ppc(struct mfi_softc *sc)
209 {
210 	return MFI_READ4(sc, MFI_OSP0);
211 }
212 
213 static int
214 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
215 {
216 	int32_t status;
217 
218 	status = MFI_READ4(sc, MFI_OSTS);
219 	if ((status & MFI_OSTS_INTR_VALID) == 0)
220 		return 1;
221 
222 	MFI_WRITE4(sc, MFI_OSTS, status);
223 	return 0;
224 }
225 
226 static int
227 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
228 {
229 	int32_t status;
230 
231 	status = MFI_READ4(sc, MFI_OSTS);
232 	if (sc->mfi_flags & MFI_FLAGS_1078) {
233 		if (!(status & MFI_1078_RM)) {
234 			return 1;
235 		}
236 	}
237 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
238 		if (!(status & MFI_GEN2_RM)) {
239 			return 1;
240 		}
241 	}
242 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
243 		if (!(status & MFI_SKINNY_RM)) {
244 			return 1;
245 		}
246 	}
247 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
248 		MFI_WRITE4(sc, MFI_OSTS, status);
249 	else
250 		MFI_WRITE4(sc, MFI_ODCR0, status);
251 	return 0;
252 }
253 
254 static void
255 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
256 {
257 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
258 }
259 
260 static void
261 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
262 {
263 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
264 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
265 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
266 	} else {
267 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
268 	}
269 }
270 
271 int
272 mfi_transition_firmware(struct mfi_softc *sc)
273 {
274 	uint32_t fw_state, cur_state;
275 	int max_wait, i;
276 	uint32_t cur_abs_reg_val = 0;
277 	uint32_t prev_abs_reg_val = 0;
278 
279 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
280 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
281 	while (fw_state != MFI_FWSTATE_READY) {
282 		if (bootverbose)
283 			device_printf(sc->mfi_dev, "Waiting for firmware to "
284 			"become ready\n");
285 		cur_state = fw_state;
286 		switch (fw_state) {
287 		case MFI_FWSTATE_FAULT:
288 			device_printf(sc->mfi_dev, "Firmware fault\n");
289 			return (ENXIO);
290 		case MFI_FWSTATE_WAIT_HANDSHAKE:
291 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
292 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
293 			else
294 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
295 			max_wait = MFI_RESET_WAIT_TIME;
296 			break;
297 		case MFI_FWSTATE_OPERATIONAL:
298 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
299 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
300 			else
301 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
302 			max_wait = MFI_RESET_WAIT_TIME;
303 			break;
304 		case MFI_FWSTATE_UNDEFINED:
305 		case MFI_FWSTATE_BB_INIT:
306 			max_wait = MFI_RESET_WAIT_TIME;
307 			break;
308 		case MFI_FWSTATE_FW_INIT_2:
309 			max_wait = MFI_RESET_WAIT_TIME;
310 			break;
311 		case MFI_FWSTATE_FW_INIT:
312 		case MFI_FWSTATE_FLUSH_CACHE:
313 			max_wait = MFI_RESET_WAIT_TIME;
314 			break;
315 		case MFI_FWSTATE_DEVICE_SCAN:
316 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
317 			prev_abs_reg_val = cur_abs_reg_val;
318 			break;
319 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
320 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
321 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
322 			else
323 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
324 			max_wait = MFI_RESET_WAIT_TIME;
325 			break;
326 		default:
327 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
328 			    fw_state);
329 			return (ENXIO);
330 		}
331 		for (i = 0; i < (max_wait * 10); i++) {
332 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
333 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
334 			if (fw_state == cur_state)
335 				DELAY(100000);
336 			else
337 				break;
338 		}
339 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
340 			/* Check the device scanning progress */
341 			if (prev_abs_reg_val != cur_abs_reg_val) {
342 				continue;
343 			}
344 		}
345 		if (fw_state == cur_state) {
346 			device_printf(sc->mfi_dev, "Firmware stuck in state "
347 			    "%#x\n", fw_state);
348 			return (ENXIO);
349 		}
350 	}
351 	return (0);
352 }
353 
354 static void
355 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
356 {
357 	bus_addr_t *addr;
358 
359 	addr = arg;
360 	*addr = segs[0].ds_addr;
361 }
362 
363 
364 int
365 mfi_attach(struct mfi_softc *sc)
366 {
367 	uint32_t status;
368 	int error, commsz, framessz, sensesz;
369 	int frames, unit, max_fw_sge, max_fw_cmds;
370 	uint32_t tb_mem_size = 0;
371 	struct cdev *dev_t;
372 
373 	if (sc == NULL)
374 		return EINVAL;
375 
376 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
377 	    MEGASAS_VERSION);
378 
379 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
380 	sx_init(&sc->mfi_config_lock, "MFI config");
381 	TAILQ_INIT(&sc->mfi_ld_tqh);
382 	TAILQ_INIT(&sc->mfi_syspd_tqh);
383 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
384 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
385 	TAILQ_INIT(&sc->mfi_evt_queue);
386 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
387 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
388 	TAILQ_INIT(&sc->mfi_aen_pids);
389 	TAILQ_INIT(&sc->mfi_cam_ccbq);
390 
391 	mfi_initq_free(sc);
392 	mfi_initq_ready(sc);
393 	mfi_initq_busy(sc);
394 	mfi_initq_bio(sc);
395 
396 	sc->adpreset = 0;
397 	sc->last_seq_num = 0;
398 	sc->disableOnlineCtrlReset = 1;
399 	sc->issuepend_done = 1;
400 	sc->hw_crit_error = 0;
401 
402 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
403 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
404 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
405 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
406 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
407 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
408 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
409 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
410 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
411 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
412 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
413 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
414 		sc->mfi_tbolt = 1;
415 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
416 	} else {
417 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
418 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
419 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
420 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
421 	}
422 
423 
424 	/* Before we get too far, see if the firmware is working */
425 	if ((error = mfi_transition_firmware(sc)) != 0) {
426 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
427 		    "error %d\n", error);
428 		return (ENXIO);
429 	}
430 
431 	/* Start: LSIP200113393 */
432 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
433 				1, 0,			/* algnmnt, boundary */
434 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
435 				BUS_SPACE_MAXADDR,	/* highaddr */
436 				NULL, NULL,		/* filter, filterarg */
437 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
438 				1,			/* msegments */
439 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
440 				0,			/* flags */
441 				NULL, NULL,		/* lockfunc, lockarg */
442 				&sc->verbuf_h_dmat)) {
443 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
444 		return (ENOMEM);
445 	}
446 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
447 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
448 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
449 		return (ENOMEM);
450 	}
451 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
452 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
453 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
454 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
455 	/* End: LSIP200113393 */
456 
457 	/*
458 	 * Get information needed for sizing the contiguous memory for the
459 	 * frame pool.  Size down the sgl parameter since we know that
460 	 * we will never need more than what's required for MAXPHYS.
461 	 * It would be nice if these constants were available at runtime
462 	 * instead of compile time.
463 	 */
464 	status = sc->mfi_read_fw_status(sc);
465 	max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
466 	if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
467 		device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
468 		    max_fw_cmds, mfi_max_cmds);
469 		sc->mfi_max_fw_cmds = mfi_max_cmds;
470 	} else {
471 		sc->mfi_max_fw_cmds = max_fw_cmds;
472 	}
473 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
474 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
475 
476 	/* ThunderBolt Support get the contiguous memory */
477 
478 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
479 		mfi_tbolt_init_globals(sc);
480 		device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
481 		    "MaxSgl = %d, state = %#x\n", max_fw_cmds,
482 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
483 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
484 
485 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
486 				1, 0,			/* algnmnt, boundary */
487 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
488 				BUS_SPACE_MAXADDR,	/* highaddr */
489 				NULL, NULL,		/* filter, filterarg */
490 				tb_mem_size,		/* maxsize */
491 				1,			/* msegments */
492 				tb_mem_size,		/* maxsegsize */
493 				0,			/* flags */
494 				NULL, NULL,		/* lockfunc, lockarg */
495 				&sc->mfi_tb_dmat)) {
496 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
497 			return (ENOMEM);
498 		}
499 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
500 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
501 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
502 			return (ENOMEM);
503 		}
504 		bzero(sc->request_message_pool, tb_mem_size);
505 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
506 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
507 
508 		/* For ThunderBolt memory init */
509 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
510 				0x100, 0,		/* alignmnt, boundary */
511 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
512 				BUS_SPACE_MAXADDR,	/* highaddr */
513 				NULL, NULL,		/* filter, filterarg */
514 				MFI_FRAME_SIZE,		/* maxsize */
515 				1,			/* msegments */
516 				MFI_FRAME_SIZE,		/* maxsegsize */
517 				0,			/* flags */
518 				NULL, NULL,		/* lockfunc, lockarg */
519 				&sc->mfi_tb_init_dmat)) {
520 			device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
521 			return (ENOMEM);
522 		}
523 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
524 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
525 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
526 			return (ENOMEM);
527 		}
528 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
529 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
530 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
531 		    &sc->mfi_tb_init_busaddr, 0);
532 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
533 		    tb_mem_size)) {
534 			device_printf(sc->mfi_dev,
535 			    "Thunderbolt pool preparation error\n");
536 			return 0;
537 		}
538 
539 		/*
540 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
541 		  we are taking it different from what we have allocated for Request
542 		  and reply descriptors to avoid confusion later
543 		*/
544 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
545 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
546 				1, 0,			/* algnmnt, boundary */
547 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
548 				BUS_SPACE_MAXADDR,	/* highaddr */
549 				NULL, NULL,		/* filter, filterarg */
550 				tb_mem_size,		/* maxsize */
551 				1,			/* msegments */
552 				tb_mem_size,		/* maxsegsize */
553 				0,			/* flags */
554 				NULL, NULL,		/* lockfunc, lockarg */
555 				&sc->mfi_tb_ioc_init_dmat)) {
556 			device_printf(sc->mfi_dev,
557 			    "Cannot allocate comms DMA tag\n");
558 			return (ENOMEM);
559 		}
560 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
561 		    (void **)&sc->mfi_tb_ioc_init_desc,
562 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
563 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
564 			return (ENOMEM);
565 		}
566 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
567 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
568 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
569 		    &sc->mfi_tb_ioc_init_busaddr, 0);
570 	}
571 	/*
572 	 * Create the dma tag for data buffers.  Used both for block I/O
573 	 * and for various internal data queries.
574 	 */
575 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
576 				1, 0,			/* algnmnt, boundary */
577 				BUS_SPACE_MAXADDR,	/* lowaddr */
578 				BUS_SPACE_MAXADDR,	/* highaddr */
579 				NULL, NULL,		/* filter, filterarg */
580 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
581 				sc->mfi_max_sge,	/* nsegments */
582 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
583 				BUS_DMA_ALLOCNOW,	/* flags */
584 				busdma_lock_mutex,	/* lockfunc */
585 				&sc->mfi_io_lock,	/* lockfuncarg */
586 				&sc->mfi_buffer_dmat)) {
587 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
588 		return (ENOMEM);
589 	}
590 
591 	/*
592 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
593 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
594 	 * entry, so the calculated size here will be will be 1 more than
595 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
596 	 */
597 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
598 	    sizeof(struct mfi_hwcomms);
599 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
600 				1, 0,			/* algnmnt, boundary */
601 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
602 				BUS_SPACE_MAXADDR,	/* highaddr */
603 				NULL, NULL,		/* filter, filterarg */
604 				commsz,			/* maxsize */
605 				1,			/* msegments */
606 				commsz,			/* maxsegsize */
607 				0,			/* flags */
608 				NULL, NULL,		/* lockfunc, lockarg */
609 				&sc->mfi_comms_dmat)) {
610 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
611 		return (ENOMEM);
612 	}
613 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
614 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
615 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
616 		return (ENOMEM);
617 	}
618 	bzero(sc->mfi_comms, commsz);
619 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
620 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
621 	/*
622 	 * Allocate DMA memory for the command frames.  Keep them in the
623 	 * lower 4GB for efficiency.  Calculate the size of the commands at
624 	 * the same time; each command is one 64 byte frame plus a set of
625          * additional frames for holding sg lists or other data.
626 	 * The assumption here is that the SG list will start at the second
627 	 * frame and not use the unused bytes in the first frame.  While this
628 	 * isn't technically correct, it simplifies the calculation and allows
629 	 * for command frames that might be larger than an mfi_io_frame.
630 	 */
631 	if (sizeof(bus_addr_t) == 8) {
632 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
633 		sc->mfi_flags |= MFI_FLAGS_SG64;
634 	} else {
635 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
636 	}
637 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
638 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
639 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
640 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
641 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
642 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
643 				64, 0,			/* algnmnt, boundary */
644 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
645 				BUS_SPACE_MAXADDR,	/* highaddr */
646 				NULL, NULL,		/* filter, filterarg */
647 				framessz,		/* maxsize */
648 				1,			/* nsegments */
649 				framessz,		/* maxsegsize */
650 				0,			/* flags */
651 				NULL, NULL,		/* lockfunc, lockarg */
652 				&sc->mfi_frames_dmat)) {
653 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
654 		return (ENOMEM);
655 	}
656 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
657 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
658 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
659 		return (ENOMEM);
660 	}
661 	bzero(sc->mfi_frames, framessz);
662 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
663 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
664 	/*
665 	 * Allocate DMA memory for the frame sense data.  Keep them in the
666 	 * lower 4GB for efficiency
667 	 */
668 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
669 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
670 				4, 0,			/* algnmnt, boundary */
671 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
672 				BUS_SPACE_MAXADDR,	/* highaddr */
673 				NULL, NULL,		/* filter, filterarg */
674 				sensesz,		/* maxsize */
675 				1,			/* nsegments */
676 				sensesz,		/* maxsegsize */
677 				0,			/* flags */
678 				NULL, NULL,		/* lockfunc, lockarg */
679 				&sc->mfi_sense_dmat)) {
680 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
681 		return (ENOMEM);
682 	}
683 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
684 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
685 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
686 		return (ENOMEM);
687 	}
688 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
689 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
690 	if ((error = mfi_alloc_commands(sc)) != 0)
691 		return (error);
692 
693 	/* Before moving the FW to operational state, check whether
694 	 * hostmemory is required by the FW or not
695 	 */
696 
697 	/* ThunderBolt MFI_IOC2 INIT */
698 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
699 		sc->mfi_disable_intr(sc);
700 		mtx_lock(&sc->mfi_io_lock);
701 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
702 			device_printf(sc->mfi_dev,
703 			    "TB Init has failed with error %d\n",error);
704 			mtx_unlock(&sc->mfi_io_lock);
705 			return error;
706 		}
707 		mtx_unlock(&sc->mfi_io_lock);
708 
709 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
710 			return error;
711 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
712 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
713 		    &sc->mfi_intr)) {
714 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
715 			return (EINVAL);
716 		}
717 		sc->mfi_intr_ptr = mfi_intr_tbolt;
718 		sc->mfi_enable_intr(sc);
719 	} else {
720 		if ((error = mfi_comms_init(sc)) != 0)
721 			return (error);
722 
723 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
724 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
725 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
726 			return (EINVAL);
727 		}
728 		sc->mfi_intr_ptr = mfi_intr;
729 		sc->mfi_enable_intr(sc);
730 	}
731 	if ((error = mfi_get_controller_info(sc)) != 0)
732 		return (error);
733 	sc->disableOnlineCtrlReset = 0;
734 
735 	/* Register a config hook to probe the bus for arrays */
736 	sc->mfi_ich.ich_func = mfi_startup;
737 	sc->mfi_ich.ich_arg = sc;
738 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
739 		device_printf(sc->mfi_dev, "Cannot establish configuration "
740 		    "hook\n");
741 		return (EINVAL);
742 	}
743 	mtx_lock(&sc->mfi_io_lock);
744 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
745 		mtx_unlock(&sc->mfi_io_lock);
746 		return (error);
747 	}
748 	mtx_unlock(&sc->mfi_io_lock);
749 
750 	/*
751 	 * Register a shutdown handler.
752 	 */
753 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
754 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
755 		device_printf(sc->mfi_dev, "Warning: shutdown event "
756 		    "registration failed\n");
757 	}
758 
759 	/*
760 	 * Create the control device for doing management
761 	 */
762 	unit = device_get_unit(sc->mfi_dev);
763 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
764 	    0640, "mfi%d", unit);
765 	if (unit == 0)
766 		make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
767 		    sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
768 	if (sc->mfi_cdev != NULL)
769 		sc->mfi_cdev->si_drv1 = sc;
770 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
771 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
772 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
773 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
774 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
775 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
776 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
777 	    &sc->mfi_keep_deleted_volumes, 0,
778 	    "Don't detach the mfid device for a busy volume that is deleted");
779 
780 	device_add_child(sc->mfi_dev, "mfip", -1);
781 	bus_generic_attach(sc->mfi_dev);
782 
783 	/* Start the timeout watchdog */
784 	callout_init(&sc->mfi_watchdog_callout, 1);
785 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
786 	    mfi_timeout, sc);
787 
788 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
789 		mtx_lock(&sc->mfi_io_lock);
790 		mfi_tbolt_sync_map_info(sc);
791 		mtx_unlock(&sc->mfi_io_lock);
792 	}
793 
794 	return (0);
795 }
796 
797 static int
798 mfi_alloc_commands(struct mfi_softc *sc)
799 {
800 	struct mfi_command *cm;
801 	int i, j;
802 
803 	/*
804 	 * XXX Should we allocate all the commands up front, or allocate on
805 	 * demand later like 'aac' does?
806 	 */
807 	sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
808 	    sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
809 
810 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
811 		cm = &sc->mfi_commands[i];
812 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
813 		    sc->mfi_cmd_size * i);
814 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
815 		    sc->mfi_cmd_size * i;
816 		cm->cm_frame->header.context = i;
817 		cm->cm_sense = &sc->mfi_sense[i];
818 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
819 		cm->cm_sc = sc;
820 		cm->cm_index = i;
821 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
822 		    &cm->cm_dmamap) == 0) {
823 			mtx_lock(&sc->mfi_io_lock);
824 			mfi_release_command(cm);
825 			mtx_unlock(&sc->mfi_io_lock);
826 		} else {
827 			device_printf(sc->mfi_dev, "Failed to allocate %d "
828 			   "command blocks, only allocated %d\n",
829 			    sc->mfi_max_fw_cmds, i - 1);
830 			for (j = 0; j < i; j++) {
831 				cm = &sc->mfi_commands[i];
832 				bus_dmamap_destroy(sc->mfi_buffer_dmat,
833 				    cm->cm_dmamap);
834 			}
835 			free(sc->mfi_commands, M_MFIBUF);
836 			sc->mfi_commands = NULL;
837 
838 			return (ENOMEM);
839 		}
840 	}
841 
842 	return (0);
843 }
844 
845 void
846 mfi_release_command(struct mfi_command *cm)
847 {
848 	struct mfi_frame_header *hdr;
849 	uint32_t *hdr_data;
850 
851 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
852 
853 	/*
854 	 * Zero out the important fields of the frame, but make sure the
855 	 * context field is preserved.  For efficiency, handle the fields
856 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
857 	 */
858 	hdr = &cm->cm_frame->header;
859 	if (cm->cm_data != NULL && hdr->sg_count) {
860 		cm->cm_sg->sg32[0].len = 0;
861 		cm->cm_sg->sg32[0].addr = 0;
862 	}
863 
864 	/*
865 	 * Command may be on other queues e.g. busy queue depending on the
866 	 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
867 	 * properly
868 	 */
869 	if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
870 		mfi_remove_busy(cm);
871 	if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
872 		mfi_remove_ready(cm);
873 
874 	/* We're not expecting it to be on any other queue but check */
875 	if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
876 		panic("Command %p is still on another queue, flags = %#x",
877 		    cm, cm->cm_flags);
878 	}
879 
880 	/* tbolt cleanup */
881 	if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
882 		mfi_tbolt_return_cmd(cm->cm_sc,
883 		    cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
884 		    cm);
885 	}
886 
887 	hdr_data = (uint32_t *)cm->cm_frame;
888 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
889 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
890 	hdr_data[4] = 0;	/* flags, timeout */
891 	hdr_data[5] = 0;	/* data_len */
892 
893 	cm->cm_extra_frames = 0;
894 	cm->cm_flags = 0;
895 	cm->cm_complete = NULL;
896 	cm->cm_private = NULL;
897 	cm->cm_data = NULL;
898 	cm->cm_sg = 0;
899 	cm->cm_total_frame_size = 0;
900 	cm->retry_for_fw_reset = 0;
901 
902 	mfi_enqueue_free(cm);
903 }
904 
905 int
906 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
907     uint32_t opcode, void **bufp, size_t bufsize)
908 {
909 	struct mfi_command *cm;
910 	struct mfi_dcmd_frame *dcmd;
911 	void *buf = NULL;
912 	uint32_t context = 0;
913 
914 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
915 
916 	cm = mfi_dequeue_free(sc);
917 	if (cm == NULL)
918 		return (EBUSY);
919 
920 	/* Zero out the MFI frame */
921 	context = cm->cm_frame->header.context;
922 	bzero(cm->cm_frame, sizeof(union mfi_frame));
923 	cm->cm_frame->header.context = context;
924 
925 	if ((bufsize > 0) && (bufp != NULL)) {
926 		if (*bufp == NULL) {
927 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
928 			if (buf == NULL) {
929 				mfi_release_command(cm);
930 				return (ENOMEM);
931 			}
932 			*bufp = buf;
933 		} else {
934 			buf = *bufp;
935 		}
936 	}
937 
938 	dcmd =  &cm->cm_frame->dcmd;
939 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
940 	dcmd->header.cmd = MFI_CMD_DCMD;
941 	dcmd->header.timeout = 0;
942 	dcmd->header.flags = 0;
943 	dcmd->header.data_len = bufsize;
944 	dcmd->header.scsi_status = 0;
945 	dcmd->opcode = opcode;
946 	cm->cm_sg = &dcmd->sgl;
947 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
948 	cm->cm_flags = 0;
949 	cm->cm_data = buf;
950 	cm->cm_private = buf;
951 	cm->cm_len = bufsize;
952 
953 	*cmp = cm;
954 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
955 		*bufp = buf;
956 	return (0);
957 }
958 
959 static int
960 mfi_comms_init(struct mfi_softc *sc)
961 {
962 	struct mfi_command *cm;
963 	struct mfi_init_frame *init;
964 	struct mfi_init_qinfo *qinfo;
965 	int error;
966 	uint32_t context = 0;
967 
968 	mtx_lock(&sc->mfi_io_lock);
969 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
970 		mtx_unlock(&sc->mfi_io_lock);
971 		return (EBUSY);
972 	}
973 
974 	/* Zero out the MFI frame */
975 	context = cm->cm_frame->header.context;
976 	bzero(cm->cm_frame, sizeof(union mfi_frame));
977 	cm->cm_frame->header.context = context;
978 
979 	/*
980 	 * Abuse the SG list area of the frame to hold the init_qinfo
981 	 * object;
982 	 */
983 	init = &cm->cm_frame->init;
984 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
985 
986 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
987 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
988 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
989 	    offsetof(struct mfi_hwcomms, hw_reply_q);
990 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
991 	    offsetof(struct mfi_hwcomms, hw_pi);
992 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
993 	    offsetof(struct mfi_hwcomms, hw_ci);
994 
995 	init->header.cmd = MFI_CMD_INIT;
996 	init->header.data_len = sizeof(struct mfi_init_qinfo);
997 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
998 	cm->cm_data = NULL;
999 	cm->cm_flags = MFI_CMD_POLLED;
1000 
1001 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1002 		device_printf(sc->mfi_dev, "failed to send init command\n");
1003 	mfi_release_command(cm);
1004 	mtx_unlock(&sc->mfi_io_lock);
1005 
1006 	return (error);
1007 }
1008 
1009 static int
1010 mfi_get_controller_info(struct mfi_softc *sc)
1011 {
1012 	struct mfi_command *cm = NULL;
1013 	struct mfi_ctrl_info *ci = NULL;
1014 	uint32_t max_sectors_1, max_sectors_2;
1015 	int error;
1016 
1017 	mtx_lock(&sc->mfi_io_lock);
1018 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1019 	    (void **)&ci, sizeof(*ci));
1020 	if (error)
1021 		goto out;
1022 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1023 
1024 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1025 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
1026 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1027 		    MFI_SECTOR_LEN;
1028 		error = 0;
1029 		goto out;
1030 	}
1031 
1032 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1033 	    BUS_DMASYNC_POSTREAD);
1034 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1035 
1036 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1037 	max_sectors_2 = ci->max_request_size;
1038 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1039 	sc->disableOnlineCtrlReset =
1040 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
1041 
1042 out:
1043 	if (ci)
1044 		free(ci, M_MFIBUF);
1045 	if (cm)
1046 		mfi_release_command(cm);
1047 	mtx_unlock(&sc->mfi_io_lock);
1048 	return (error);
1049 }
1050 
1051 static int
1052 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1053 {
1054 	struct mfi_command *cm = NULL;
1055 	int error;
1056 
1057 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1058 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1059 	    (void **)log_state, sizeof(**log_state));
1060 	if (error)
1061 		goto out;
1062 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1063 
1064 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1065 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1066 		goto out;
1067 	}
1068 
1069 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1070 	    BUS_DMASYNC_POSTREAD);
1071 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1072 
1073 out:
1074 	if (cm)
1075 		mfi_release_command(cm);
1076 
1077 	return (error);
1078 }
1079 
1080 int
1081 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1082 {
1083 	struct mfi_evt_log_state *log_state = NULL;
1084 	union mfi_evt class_locale;
1085 	int error = 0;
1086 	uint32_t seq;
1087 
1088 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1089 
1090 	class_locale.members.reserved = 0;
1091 	class_locale.members.locale = mfi_event_locale;
1092 	class_locale.members.evt_class  = mfi_event_class;
1093 
1094 	if (seq_start == 0) {
1095 		if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1096 			goto out;
1097 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1098 
1099 		/*
1100 		 * Walk through any events that fired since the last
1101 		 * shutdown.
1102 		 */
1103 		if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1104 		    log_state->newest_seq_num)) != 0)
1105 			goto out;
1106 		seq = log_state->newest_seq_num;
1107 	} else
1108 		seq = seq_start;
1109 	error = mfi_aen_register(sc, seq, class_locale.word);
1110 out:
1111 	free(log_state, M_MFIBUF);
1112 
1113 	return (error);
1114 }
1115 
1116 int
1117 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1118 {
1119 
1120 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1121 	cm->cm_complete = NULL;
1122 
1123 	/*
1124 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1125 	 * and return 0 to it as status
1126 	 */
1127 	if (cm->cm_frame->dcmd.opcode == 0) {
1128 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1129 		cm->cm_error = 0;
1130 		return (cm->cm_error);
1131 	}
1132 	mfi_enqueue_ready(cm);
1133 	mfi_startio(sc);
1134 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1135 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1136 	return (cm->cm_error);
1137 }
1138 
1139 void
1140 mfi_free(struct mfi_softc *sc)
1141 {
1142 	struct mfi_command *cm;
1143 	int i;
1144 
1145 	callout_drain(&sc->mfi_watchdog_callout);
1146 
1147 	if (sc->mfi_cdev != NULL)
1148 		destroy_dev(sc->mfi_cdev);
1149 
1150 	if (sc->mfi_commands != NULL) {
1151 		for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1152 			cm = &sc->mfi_commands[i];
1153 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1154 		}
1155 		free(sc->mfi_commands, M_MFIBUF);
1156 		sc->mfi_commands = NULL;
1157 	}
1158 
1159 	if (sc->mfi_intr)
1160 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1161 	if (sc->mfi_irq != NULL)
1162 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1163 		    sc->mfi_irq);
1164 
1165 	if (sc->mfi_sense_busaddr != 0)
1166 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1167 	if (sc->mfi_sense != NULL)
1168 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1169 		    sc->mfi_sense_dmamap);
1170 	if (sc->mfi_sense_dmat != NULL)
1171 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1172 
1173 	if (sc->mfi_frames_busaddr != 0)
1174 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1175 	if (sc->mfi_frames != NULL)
1176 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1177 		    sc->mfi_frames_dmamap);
1178 	if (sc->mfi_frames_dmat != NULL)
1179 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1180 
1181 	if (sc->mfi_comms_busaddr != 0)
1182 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1183 	if (sc->mfi_comms != NULL)
1184 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1185 		    sc->mfi_comms_dmamap);
1186 	if (sc->mfi_comms_dmat != NULL)
1187 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1188 
1189 	/* ThunderBolt contiguous memory free here */
1190 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1191 		if (sc->mfi_tb_busaddr != 0)
1192 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1193 		if (sc->request_message_pool != NULL)
1194 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1195 			    sc->mfi_tb_dmamap);
1196 		if (sc->mfi_tb_dmat != NULL)
1197 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1198 
1199 		/* Version buffer memory free */
1200 		/* Start LSIP200113393 */
1201 		if (sc->verbuf_h_busaddr != 0)
1202 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1203 		if (sc->verbuf != NULL)
1204 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1205 			    sc->verbuf_h_dmamap);
1206 		if (sc->verbuf_h_dmat != NULL)
1207 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1208 
1209 		/* End LSIP200113393 */
1210 		/* ThunderBolt INIT packet memory Free */
1211 		if (sc->mfi_tb_init_busaddr != 0)
1212 			bus_dmamap_unload(sc->mfi_tb_init_dmat,
1213 			    sc->mfi_tb_init_dmamap);
1214 		if (sc->mfi_tb_init != NULL)
1215 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1216 			    sc->mfi_tb_init_dmamap);
1217 		if (sc->mfi_tb_init_dmat != NULL)
1218 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1219 
1220 		/* ThunderBolt IOC Init Desc memory free here */
1221 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1222 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1223 			    sc->mfi_tb_ioc_init_dmamap);
1224 		if (sc->mfi_tb_ioc_init_desc != NULL)
1225 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1226 			    sc->mfi_tb_ioc_init_desc,
1227 			    sc->mfi_tb_ioc_init_dmamap);
1228 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1229 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1230 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1231 			for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1232 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1233 					free(sc->mfi_cmd_pool_tbolt[i],
1234 					    M_MFIBUF);
1235 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1236 				}
1237 			}
1238 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1239 			sc->mfi_cmd_pool_tbolt = NULL;
1240 		}
1241 		if (sc->request_desc_pool != NULL) {
1242 			free(sc->request_desc_pool, M_MFIBUF);
1243 			sc->request_desc_pool = NULL;
1244 		}
1245 	}
1246 	if (sc->mfi_buffer_dmat != NULL)
1247 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1248 	if (sc->mfi_parent_dmat != NULL)
1249 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1250 
1251 	if (mtx_initialized(&sc->mfi_io_lock)) {
1252 		mtx_destroy(&sc->mfi_io_lock);
1253 		sx_destroy(&sc->mfi_config_lock);
1254 	}
1255 
1256 	return;
1257 }
1258 
1259 static void
1260 mfi_startup(void *arg)
1261 {
1262 	struct mfi_softc *sc;
1263 
1264 	sc = (struct mfi_softc *)arg;
1265 
1266 	config_intrhook_disestablish(&sc->mfi_ich);
1267 
1268 	sc->mfi_enable_intr(sc);
1269 	sx_xlock(&sc->mfi_config_lock);
1270 	mtx_lock(&sc->mfi_io_lock);
1271 	mfi_ldprobe(sc);
1272 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1273 	    mfi_syspdprobe(sc);
1274 	mtx_unlock(&sc->mfi_io_lock);
1275 	sx_xunlock(&sc->mfi_config_lock);
1276 }
1277 
1278 static void
1279 mfi_intr(void *arg)
1280 {
1281 	struct mfi_softc *sc;
1282 	struct mfi_command *cm;
1283 	uint32_t pi, ci, context;
1284 
1285 	sc = (struct mfi_softc *)arg;
1286 
1287 	if (sc->mfi_check_clear_intr(sc))
1288 		return;
1289 
1290 restart:
1291 	pi = sc->mfi_comms->hw_pi;
1292 	ci = sc->mfi_comms->hw_ci;
1293 	mtx_lock(&sc->mfi_io_lock);
1294 	while (ci != pi) {
1295 		context = sc->mfi_comms->hw_reply_q[ci];
1296 		if (context < sc->mfi_max_fw_cmds) {
1297 			cm = &sc->mfi_commands[context];
1298 			mfi_remove_busy(cm);
1299 			cm->cm_error = 0;
1300 			mfi_complete(sc, cm);
1301 		}
1302 		if (++ci == (sc->mfi_max_fw_cmds + 1))
1303 			ci = 0;
1304 	}
1305 
1306 	sc->mfi_comms->hw_ci = ci;
1307 
1308 	/* Give defered I/O a chance to run */
1309 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1310 	mfi_startio(sc);
1311 	mtx_unlock(&sc->mfi_io_lock);
1312 
1313 	/*
1314 	 * Dummy read to flush the bus; this ensures that the indexes are up
1315 	 * to date.  Restart processing if more commands have come it.
1316 	 */
1317 	(void)sc->mfi_read_fw_status(sc);
1318 	if (pi != sc->mfi_comms->hw_pi)
1319 		goto restart;
1320 
1321 	return;
1322 }
1323 
1324 int
1325 mfi_shutdown(struct mfi_softc *sc)
1326 {
1327 	struct mfi_dcmd_frame *dcmd;
1328 	struct mfi_command *cm;
1329 	int error;
1330 
1331 
1332 	if (sc->mfi_aen_cm != NULL) {
1333 		sc->cm_aen_abort = 1;
1334 		mfi_abort(sc, &sc->mfi_aen_cm);
1335 	}
1336 
1337 	if (sc->mfi_map_sync_cm != NULL) {
1338 		sc->cm_map_abort = 1;
1339 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1340 	}
1341 
1342 	mtx_lock(&sc->mfi_io_lock);
1343 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1344 	if (error) {
1345 		mtx_unlock(&sc->mfi_io_lock);
1346 		return (error);
1347 	}
1348 
1349 	dcmd = &cm->cm_frame->dcmd;
1350 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1351 	cm->cm_flags = MFI_CMD_POLLED;
1352 	cm->cm_data = NULL;
1353 
1354 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1355 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1356 
1357 	mfi_release_command(cm);
1358 	mtx_unlock(&sc->mfi_io_lock);
1359 	return (error);
1360 }
1361 
1362 static void
1363 mfi_syspdprobe(struct mfi_softc *sc)
1364 {
1365 	struct mfi_frame_header *hdr;
1366 	struct mfi_command *cm = NULL;
1367 	struct mfi_pd_list *pdlist = NULL;
1368 	struct mfi_system_pd *syspd, *tmp;
1369 	struct mfi_system_pending *syspd_pend;
1370 	int error, i, found;
1371 
1372 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1373 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1374 	/* Add SYSTEM PD's */
1375 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1376 	    (void **)&pdlist, sizeof(*pdlist));
1377 	if (error) {
1378 		device_printf(sc->mfi_dev,
1379 		    "Error while forming SYSTEM PD list\n");
1380 		goto out;
1381 	}
1382 
1383 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1384 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1385 	cm->cm_frame->dcmd.mbox[1] = 0;
1386 	if (mfi_mapcmd(sc, cm) != 0) {
1387 		device_printf(sc->mfi_dev,
1388 		    "Failed to get syspd device listing\n");
1389 		goto out;
1390 	}
1391 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1392 	    BUS_DMASYNC_POSTREAD);
1393 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1394 	hdr = &cm->cm_frame->header;
1395 	if (hdr->cmd_status != MFI_STAT_OK) {
1396 		device_printf(sc->mfi_dev,
1397 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1398 		goto out;
1399 	}
1400 	/* Get each PD and add it to the system */
1401 	for (i = 0; i < pdlist->count; i++) {
1402 		if (pdlist->addr[i].device_id ==
1403 		    pdlist->addr[i].encl_device_id)
1404 			continue;
1405 		found = 0;
1406 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1407 			if (syspd->pd_id == pdlist->addr[i].device_id)
1408 				found = 1;
1409 		}
1410 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1411 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1412 				found = 1;
1413 		}
1414 		if (found == 0)
1415 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1416 	}
1417 	/* Delete SYSPD's whose state has been changed */
1418 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1419 		found = 0;
1420 		for (i = 0; i < pdlist->count; i++) {
1421 			if (syspd->pd_id == pdlist->addr[i].device_id) {
1422 				found = 1;
1423 				break;
1424 			}
1425 		}
1426 		if (found == 0) {
1427 			printf("DELETE\n");
1428 			mtx_unlock(&sc->mfi_io_lock);
1429 			mtx_lock(&Giant);
1430 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1431 			mtx_unlock(&Giant);
1432 			mtx_lock(&sc->mfi_io_lock);
1433 		}
1434 	}
1435 out:
1436 	if (pdlist)
1437 	    free(pdlist, M_MFIBUF);
1438 	if (cm)
1439 	    mfi_release_command(cm);
1440 
1441 	return;
1442 }
1443 
1444 static void
1445 mfi_ldprobe(struct mfi_softc *sc)
1446 {
1447 	struct mfi_frame_header *hdr;
1448 	struct mfi_command *cm = NULL;
1449 	struct mfi_ld_list *list = NULL;
1450 	struct mfi_disk *ld;
1451 	struct mfi_disk_pending *ld_pend;
1452 	int error, i;
1453 
1454 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1455 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1456 
1457 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1458 	    (void **)&list, sizeof(*list));
1459 	if (error)
1460 		goto out;
1461 
1462 	cm->cm_flags = MFI_CMD_DATAIN;
1463 	if (mfi_wait_command(sc, cm) != 0) {
1464 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1465 		goto out;
1466 	}
1467 
1468 	hdr = &cm->cm_frame->header;
1469 	if (hdr->cmd_status != MFI_STAT_OK) {
1470 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1471 		    hdr->cmd_status);
1472 		goto out;
1473 	}
1474 
1475 	for (i = 0; i < list->ld_count; i++) {
1476 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1477 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1478 				goto skip_add;
1479 		}
1480 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1481 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1482 				goto skip_add;
1483 		}
1484 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1485 	skip_add:;
1486 	}
1487 out:
1488 	if (list)
1489 		free(list, M_MFIBUF);
1490 	if (cm)
1491 		mfi_release_command(cm);
1492 
1493 	return;
1494 }
1495 
1496 /*
1497  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1498  * the bits in 24-31 are all set, then it is the number of seconds since
1499  * boot.
1500  */
1501 static const char *
1502 format_timestamp(uint32_t timestamp)
1503 {
1504 	static char buffer[32];
1505 
1506 	if ((timestamp & 0xff000000) == 0xff000000)
1507 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1508 		    0x00ffffff);
1509 	else
1510 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1511 	return (buffer);
1512 }
1513 
1514 static const char *
1515 format_class(int8_t class)
1516 {
1517 	static char buffer[6];
1518 
1519 	switch (class) {
1520 	case MFI_EVT_CLASS_DEBUG:
1521 		return ("debug");
1522 	case MFI_EVT_CLASS_PROGRESS:
1523 		return ("progress");
1524 	case MFI_EVT_CLASS_INFO:
1525 		return ("info");
1526 	case MFI_EVT_CLASS_WARNING:
1527 		return ("WARN");
1528 	case MFI_EVT_CLASS_CRITICAL:
1529 		return ("CRIT");
1530 	case MFI_EVT_CLASS_FATAL:
1531 		return ("FATAL");
1532 	case MFI_EVT_CLASS_DEAD:
1533 		return ("DEAD");
1534 	default:
1535 		snprintf(buffer, sizeof(buffer), "%d", class);
1536 		return (buffer);
1537 	}
1538 }
1539 
1540 static void
1541 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1542 {
1543 	struct mfi_system_pd *syspd = NULL;
1544 
1545 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1546 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1547 	    format_class(detail->evt_class.members.evt_class),
1548 	    detail->description);
1549 
1550         /* Don't act on old AEN's or while shutting down */
1551         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1552                 return;
1553 
1554 	switch (detail->arg_type) {
1555 	case MR_EVT_ARGS_NONE:
1556 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1557 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1558 			if (mfi_detect_jbod_change) {
1559 				/*
1560 				 * Probe for new SYSPD's and Delete
1561 				 * invalid SYSPD's
1562 				 */
1563 				sx_xlock(&sc->mfi_config_lock);
1564 				mtx_lock(&sc->mfi_io_lock);
1565 				mfi_syspdprobe(sc);
1566 				mtx_unlock(&sc->mfi_io_lock);
1567 				sx_xunlock(&sc->mfi_config_lock);
1568 			}
1569 		}
1570 		break;
1571 	case MR_EVT_ARGS_LD_STATE:
1572 		/* During load time driver reads all the events starting
1573 		 * from the one that has been logged after shutdown. Avoid
1574 		 * these old events.
1575 		 */
1576 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1577 			/* Remove the LD */
1578 			struct mfi_disk *ld;
1579 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1580 				if (ld->ld_id ==
1581 				    detail->args.ld_state.ld.target_id)
1582 					break;
1583 			}
1584 			/*
1585 			Fix: for kernel panics when SSCD is removed
1586 			KASSERT(ld != NULL, ("volume dissappeared"));
1587 			*/
1588 			if (ld != NULL) {
1589 				mtx_lock(&Giant);
1590 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1591 				mtx_unlock(&Giant);
1592 			}
1593 		}
1594 		break;
1595 	case MR_EVT_ARGS_PD:
1596 		if (detail->code == MR_EVT_PD_REMOVED) {
1597 			if (mfi_detect_jbod_change) {
1598 				/*
1599 				 * If the removed device is a SYSPD then
1600 				 * delete it
1601 				 */
1602 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1603 				    pd_link) {
1604 					if (syspd->pd_id ==
1605 					    detail->args.pd.device_id) {
1606 						mtx_lock(&Giant);
1607 						device_delete_child(
1608 						    sc->mfi_dev,
1609 						    syspd->pd_dev);
1610 						mtx_unlock(&Giant);
1611 						break;
1612 					}
1613 				}
1614 			}
1615 		}
1616 		if (detail->code == MR_EVT_PD_INSERTED) {
1617 			if (mfi_detect_jbod_change) {
1618 				/* Probe for new SYSPD's */
1619 				sx_xlock(&sc->mfi_config_lock);
1620 				mtx_lock(&sc->mfi_io_lock);
1621 				mfi_syspdprobe(sc);
1622 				mtx_unlock(&sc->mfi_io_lock);
1623 				sx_xunlock(&sc->mfi_config_lock);
1624 			}
1625 		}
1626 		if (sc->mfi_cam_rescan_cb != NULL &&
1627 		    (detail->code == MR_EVT_PD_INSERTED ||
1628 		    detail->code == MR_EVT_PD_REMOVED)) {
1629 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1630 		}
1631 		break;
1632 	}
1633 }
1634 
1635 static void
1636 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1637 {
1638 	struct mfi_evt_queue_elm *elm;
1639 
1640 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1641 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1642 	if (elm == NULL)
1643 		return;
1644 	memcpy(&elm->detail, detail, sizeof(*detail));
1645 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1646 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1647 }
1648 
1649 static void
1650 mfi_handle_evt(void *context, int pending)
1651 {
1652 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1653 	struct mfi_softc *sc;
1654 	struct mfi_evt_queue_elm *elm;
1655 
1656 	sc = context;
1657 	TAILQ_INIT(&queue);
1658 	mtx_lock(&sc->mfi_io_lock);
1659 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1660 	mtx_unlock(&sc->mfi_io_lock);
1661 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1662 		TAILQ_REMOVE(&queue, elm, link);
1663 		mfi_decode_evt(sc, &elm->detail);
1664 		free(elm, M_MFIBUF);
1665 	}
1666 }
1667 
1668 static int
1669 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1670 {
1671 	struct mfi_command *cm;
1672 	struct mfi_dcmd_frame *dcmd;
1673 	union mfi_evt current_aen, prior_aen;
1674 	struct mfi_evt_detail *ed = NULL;
1675 	int error = 0;
1676 
1677 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1678 
1679 	current_aen.word = locale;
1680 	if (sc->mfi_aen_cm != NULL) {
1681 		prior_aen.word =
1682 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1683 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1684 		    !((prior_aen.members.locale & current_aen.members.locale)
1685 		    ^current_aen.members.locale)) {
1686 			return (0);
1687 		} else {
1688 			prior_aen.members.locale |= current_aen.members.locale;
1689 			if (prior_aen.members.evt_class
1690 			    < current_aen.members.evt_class)
1691 				current_aen.members.evt_class =
1692 				    prior_aen.members.evt_class;
1693 			mfi_abort(sc, &sc->mfi_aen_cm);
1694 		}
1695 	}
1696 
1697 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1698 	    (void **)&ed, sizeof(*ed));
1699 	if (error)
1700 		goto out;
1701 
1702 	dcmd = &cm->cm_frame->dcmd;
1703 	((uint32_t *)&dcmd->mbox)[0] = seq;
1704 	((uint32_t *)&dcmd->mbox)[1] = locale;
1705 	cm->cm_flags = MFI_CMD_DATAIN;
1706 	cm->cm_complete = mfi_aen_complete;
1707 
1708 	sc->last_seq_num = seq;
1709 	sc->mfi_aen_cm = cm;
1710 
1711 	mfi_enqueue_ready(cm);
1712 	mfi_startio(sc);
1713 
1714 out:
1715 	return (error);
1716 }
1717 
1718 static void
1719 mfi_aen_complete(struct mfi_command *cm)
1720 {
1721 	struct mfi_frame_header *hdr;
1722 	struct mfi_softc *sc;
1723 	struct mfi_evt_detail *detail;
1724 	struct mfi_aen *mfi_aen_entry, *tmp;
1725 	int seq = 0, aborted = 0;
1726 
1727 	sc = cm->cm_sc;
1728 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1729 
1730 	if (sc->mfi_aen_cm == NULL)
1731 		return;
1732 
1733 	hdr = &cm->cm_frame->header;
1734 
1735 	if (sc->cm_aen_abort ||
1736 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1737 		sc->cm_aen_abort = 0;
1738 		aborted = 1;
1739 	} else {
1740 		sc->mfi_aen_triggered = 1;
1741 		if (sc->mfi_poll_waiting) {
1742 			sc->mfi_poll_waiting = 0;
1743 			selwakeup(&sc->mfi_select);
1744 		}
1745 		detail = cm->cm_data;
1746 		mfi_queue_evt(sc, detail);
1747 		seq = detail->seq + 1;
1748 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1749 		    tmp) {
1750 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1751 			    aen_link);
1752 			PROC_LOCK(mfi_aen_entry->p);
1753 			kern_psignal(mfi_aen_entry->p, SIGIO);
1754 			PROC_UNLOCK(mfi_aen_entry->p);
1755 			free(mfi_aen_entry, M_MFIBUF);
1756 		}
1757 	}
1758 
1759 	free(cm->cm_data, M_MFIBUF);
1760 	wakeup(&sc->mfi_aen_cm);
1761 	sc->mfi_aen_cm = NULL;
1762 	mfi_release_command(cm);
1763 
1764 	/* set it up again so the driver can catch more events */
1765 	if (!aborted)
1766 		mfi_aen_setup(sc, seq);
1767 }
1768 
1769 #define MAX_EVENTS 15
1770 
1771 static int
1772 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1773 {
1774 	struct mfi_command *cm;
1775 	struct mfi_dcmd_frame *dcmd;
1776 	struct mfi_evt_list *el;
1777 	union mfi_evt class_locale;
1778 	int error, i, seq, size;
1779 
1780 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1781 
1782 	class_locale.members.reserved = 0;
1783 	class_locale.members.locale = mfi_event_locale;
1784 	class_locale.members.evt_class  = mfi_event_class;
1785 
1786 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1787 		* (MAX_EVENTS - 1);
1788 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1789 	if (el == NULL)
1790 		return (ENOMEM);
1791 
1792 	for (seq = start_seq;;) {
1793 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1794 			free(el, M_MFIBUF);
1795 			return (EBUSY);
1796 		}
1797 
1798 		dcmd = &cm->cm_frame->dcmd;
1799 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1800 		dcmd->header.cmd = MFI_CMD_DCMD;
1801 		dcmd->header.timeout = 0;
1802 		dcmd->header.data_len = size;
1803 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1804 		((uint32_t *)&dcmd->mbox)[0] = seq;
1805 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1806 		cm->cm_sg = &dcmd->sgl;
1807 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1808 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1809 		cm->cm_data = el;
1810 		cm->cm_len = size;
1811 
1812 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1813 			device_printf(sc->mfi_dev,
1814 			    "Failed to get controller entries\n");
1815 			mfi_release_command(cm);
1816 			break;
1817 		}
1818 
1819 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1820 		    BUS_DMASYNC_POSTREAD);
1821 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1822 
1823 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1824 			mfi_release_command(cm);
1825 			break;
1826 		}
1827 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1828 			device_printf(sc->mfi_dev,
1829 			    "Error %d fetching controller entries\n",
1830 			    dcmd->header.cmd_status);
1831 			mfi_release_command(cm);
1832 			error = EIO;
1833 			break;
1834 		}
1835 		mfi_release_command(cm);
1836 
1837 		for (i = 0; i < el->count; i++) {
1838 			/*
1839 			 * If this event is newer than 'stop_seq' then
1840 			 * break out of the loop.  Note that the log
1841 			 * is a circular buffer so we have to handle
1842 			 * the case that our stop point is earlier in
1843 			 * the buffer than our start point.
1844 			 */
1845 			if (el->event[i].seq >= stop_seq) {
1846 				if (start_seq <= stop_seq)
1847 					break;
1848 				else if (el->event[i].seq < start_seq)
1849 					break;
1850 			}
1851 			mfi_queue_evt(sc, &el->event[i]);
1852 		}
1853 		seq = el->event[el->count - 1].seq + 1;
1854 	}
1855 
1856 	free(el, M_MFIBUF);
1857 	return (error);
1858 }
1859 
1860 static int
1861 mfi_add_ld(struct mfi_softc *sc, int id)
1862 {
1863 	struct mfi_command *cm;
1864 	struct mfi_dcmd_frame *dcmd = NULL;
1865 	struct mfi_ld_info *ld_info = NULL;
1866 	struct mfi_disk_pending *ld_pend;
1867 	int error;
1868 
1869 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1870 
1871 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1872 	if (ld_pend != NULL) {
1873 		ld_pend->ld_id = id;
1874 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1875 	}
1876 
1877 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1878 	    (void **)&ld_info, sizeof(*ld_info));
1879 	if (error) {
1880 		device_printf(sc->mfi_dev,
1881 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1882 		if (ld_info)
1883 			free(ld_info, M_MFIBUF);
1884 		return (error);
1885 	}
1886 	cm->cm_flags = MFI_CMD_DATAIN;
1887 	dcmd = &cm->cm_frame->dcmd;
1888 	dcmd->mbox[0] = id;
1889 	if (mfi_wait_command(sc, cm) != 0) {
1890 		device_printf(sc->mfi_dev,
1891 		    "Failed to get logical drive: %d\n", id);
1892 		free(ld_info, M_MFIBUF);
1893 		return (0);
1894 	}
1895 	if (ld_info->ld_config.params.isSSCD != 1)
1896 		mfi_add_ld_complete(cm);
1897 	else {
1898 		mfi_release_command(cm);
1899 		if (ld_info)		/* SSCD drives ld_info free here */
1900 			free(ld_info, M_MFIBUF);
1901 	}
1902 	return (0);
1903 }
1904 
1905 static void
1906 mfi_add_ld_complete(struct mfi_command *cm)
1907 {
1908 	struct mfi_frame_header *hdr;
1909 	struct mfi_ld_info *ld_info;
1910 	struct mfi_softc *sc;
1911 	device_t child;
1912 
1913 	sc = cm->cm_sc;
1914 	hdr = &cm->cm_frame->header;
1915 	ld_info = cm->cm_private;
1916 
1917 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1918 		free(ld_info, M_MFIBUF);
1919 		wakeup(&sc->mfi_map_sync_cm);
1920 		mfi_release_command(cm);
1921 		return;
1922 	}
1923 	wakeup(&sc->mfi_map_sync_cm);
1924 	mfi_release_command(cm);
1925 
1926 	mtx_unlock(&sc->mfi_io_lock);
1927 	mtx_lock(&Giant);
1928 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1929 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1930 		free(ld_info, M_MFIBUF);
1931 		mtx_unlock(&Giant);
1932 		mtx_lock(&sc->mfi_io_lock);
1933 		return;
1934 	}
1935 
1936 	device_set_ivars(child, ld_info);
1937 	device_set_desc(child, "MFI Logical Disk");
1938 	bus_generic_attach(sc->mfi_dev);
1939 	mtx_unlock(&Giant);
1940 	mtx_lock(&sc->mfi_io_lock);
1941 }
1942 
1943 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1944 {
1945 	struct mfi_command *cm;
1946 	struct mfi_dcmd_frame *dcmd = NULL;
1947 	struct mfi_pd_info *pd_info = NULL;
1948 	struct mfi_system_pending *syspd_pend;
1949 	int error;
1950 
1951 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1952 
1953 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1954 	if (syspd_pend != NULL) {
1955 		syspd_pend->pd_id = id;
1956 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1957 	}
1958 
1959 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1960 		(void **)&pd_info, sizeof(*pd_info));
1961 	if (error) {
1962 		device_printf(sc->mfi_dev,
1963 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1964 		    error);
1965 		if (pd_info)
1966 			free(pd_info, M_MFIBUF);
1967 		return (error);
1968 	}
1969 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1970 	dcmd = &cm->cm_frame->dcmd;
1971 	dcmd->mbox[0]=id;
1972 	dcmd->header.scsi_status = 0;
1973 	dcmd->header.pad0 = 0;
1974 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1975 		device_printf(sc->mfi_dev,
1976 		    "Failed to get physical drive info %d\n", id);
1977 		free(pd_info, M_MFIBUF);
1978 		mfi_release_command(cm);
1979 		return (error);
1980 	}
1981 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1982 	    BUS_DMASYNC_POSTREAD);
1983 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1984 	mfi_add_sys_pd_complete(cm);
1985 	return (0);
1986 }
1987 
1988 static void
1989 mfi_add_sys_pd_complete(struct mfi_command *cm)
1990 {
1991 	struct mfi_frame_header *hdr;
1992 	struct mfi_pd_info *pd_info;
1993 	struct mfi_softc *sc;
1994 	device_t child;
1995 
1996 	sc = cm->cm_sc;
1997 	hdr = &cm->cm_frame->header;
1998 	pd_info = cm->cm_private;
1999 
2000 	if (hdr->cmd_status != MFI_STAT_OK) {
2001 		free(pd_info, M_MFIBUF);
2002 		mfi_release_command(cm);
2003 		return;
2004 	}
2005 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2006 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2007 		    pd_info->ref.v.device_id);
2008 		free(pd_info, M_MFIBUF);
2009 		mfi_release_command(cm);
2010 		return;
2011 	}
2012 	mfi_release_command(cm);
2013 
2014 	mtx_unlock(&sc->mfi_io_lock);
2015 	mtx_lock(&Giant);
2016 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2017 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
2018 		free(pd_info, M_MFIBUF);
2019 		mtx_unlock(&Giant);
2020 		mtx_lock(&sc->mfi_io_lock);
2021 		return;
2022 	}
2023 
2024 	device_set_ivars(child, pd_info);
2025 	device_set_desc(child, "MFI System PD");
2026 	bus_generic_attach(sc->mfi_dev);
2027 	mtx_unlock(&Giant);
2028 	mtx_lock(&sc->mfi_io_lock);
2029 }
2030 
2031 static struct mfi_command *
2032 mfi_bio_command(struct mfi_softc *sc)
2033 {
2034 	struct bio *bio;
2035 	struct mfi_command *cm = NULL;
2036 
2037 	/*reserving two commands to avoid starvation for IOCTL*/
2038 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2039 		return (NULL);
2040 	}
2041 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2042 		return (NULL);
2043 	}
2044 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2045 		cm = mfi_build_ldio(sc, bio);
2046 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2047 		cm = mfi_build_syspdio(sc, bio);
2048 	}
2049 	if (!cm)
2050 	    mfi_enqueue_bio(sc, bio);
2051 	return cm;
2052 }
2053 
2054 /*
2055  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2056  */
2057 
2058 int
2059 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2060 {
2061 	int cdb_len;
2062 
2063 	if (((lba & 0x1fffff) == lba)
2064          && ((block_count & 0xff) == block_count)
2065          && (byte2 == 0)) {
2066 		/* We can fit in a 6 byte cdb */
2067 		struct scsi_rw_6 *scsi_cmd;
2068 
2069 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2070 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2071 		scsi_ulto3b(lba, scsi_cmd->addr);
2072 		scsi_cmd->length = block_count & 0xff;
2073 		scsi_cmd->control = 0;
2074 		cdb_len = sizeof(*scsi_cmd);
2075 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2076 		/* Need a 10 byte CDB */
2077 		struct scsi_rw_10 *scsi_cmd;
2078 
2079 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2080 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2081 		scsi_cmd->byte2 = byte2;
2082 		scsi_ulto4b(lba, scsi_cmd->addr);
2083 		scsi_cmd->reserved = 0;
2084 		scsi_ulto2b(block_count, scsi_cmd->length);
2085 		scsi_cmd->control = 0;
2086 		cdb_len = sizeof(*scsi_cmd);
2087 	} else if (((block_count & 0xffffffff) == block_count) &&
2088 	    ((lba & 0xffffffff) == lba)) {
2089 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2090 		struct scsi_rw_12 *scsi_cmd;
2091 
2092 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2093 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2094 		scsi_cmd->byte2 = byte2;
2095 		scsi_ulto4b(lba, scsi_cmd->addr);
2096 		scsi_cmd->reserved = 0;
2097 		scsi_ulto4b(block_count, scsi_cmd->length);
2098 		scsi_cmd->control = 0;
2099 		cdb_len = sizeof(*scsi_cmd);
2100 	} else {
2101 		/*
2102 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2103 		 * than 2^32
2104 		 */
2105 		struct scsi_rw_16 *scsi_cmd;
2106 
2107 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2108 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2109 		scsi_cmd->byte2 = byte2;
2110 		scsi_u64to8b(lba, scsi_cmd->addr);
2111 		scsi_cmd->reserved = 0;
2112 		scsi_ulto4b(block_count, scsi_cmd->length);
2113 		scsi_cmd->control = 0;
2114 		cdb_len = sizeof(*scsi_cmd);
2115 	}
2116 
2117 	return cdb_len;
2118 }
2119 
2120 extern char *unmapped_buf;
2121 
2122 static struct mfi_command *
2123 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2124 {
2125 	struct mfi_command *cm;
2126 	struct mfi_pass_frame *pass;
2127 	uint32_t context = 0;
2128 	int flags = 0, blkcount = 0, readop;
2129 	uint8_t cdb_len;
2130 
2131 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2132 
2133 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2134 	    return (NULL);
2135 
2136 	/* Zero out the MFI frame */
2137 	context = cm->cm_frame->header.context;
2138 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2139 	cm->cm_frame->header.context = context;
2140 	pass = &cm->cm_frame->pass;
2141 	bzero(pass->cdb, 16);
2142 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2143 	switch (bio->bio_cmd) {
2144 	case BIO_READ:
2145 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2146 		readop = 1;
2147 		break;
2148 	case BIO_WRITE:
2149 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2150 		readop = 0;
2151 		break;
2152 	default:
2153 		/* TODO: what about BIO_DELETE??? */
2154 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2155 	}
2156 
2157 	/* Cheat with the sector length to avoid a non-constant division */
2158 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2159 	/* Fill the LBA and Transfer length in CDB */
2160 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2161 	    pass->cdb);
2162 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2163 	pass->header.lun_id = 0;
2164 	pass->header.timeout = 0;
2165 	pass->header.flags = 0;
2166 	pass->header.scsi_status = 0;
2167 	pass->header.sense_len = MFI_SENSE_LEN;
2168 	pass->header.data_len = bio->bio_bcount;
2169 	pass->header.cdb_len = cdb_len;
2170 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2171 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2172 	cm->cm_complete = mfi_bio_complete;
2173 	cm->cm_private = bio;
2174 	cm->cm_data = unmapped_buf;
2175 	cm->cm_len = bio->bio_bcount;
2176 	cm->cm_sg = &pass->sgl;
2177 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2178 	cm->cm_flags = flags;
2179 
2180 	return (cm);
2181 }
2182 
2183 static struct mfi_command *
2184 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2185 {
2186 	struct mfi_io_frame *io;
2187 	struct mfi_command *cm;
2188 	int flags;
2189 	uint32_t blkcount;
2190 	uint32_t context = 0;
2191 
2192 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2193 
2194 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2195 	    return (NULL);
2196 
2197 	/* Zero out the MFI frame */
2198 	context = cm->cm_frame->header.context;
2199 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2200 	cm->cm_frame->header.context = context;
2201 	io = &cm->cm_frame->io;
2202 	switch (bio->bio_cmd) {
2203 	case BIO_READ:
2204 		io->header.cmd = MFI_CMD_LD_READ;
2205 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2206 		break;
2207 	case BIO_WRITE:
2208 		io->header.cmd = MFI_CMD_LD_WRITE;
2209 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2210 		break;
2211 	default:
2212 		/* TODO: what about BIO_DELETE??? */
2213 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2214 	}
2215 
2216 	/* Cheat with the sector length to avoid a non-constant division */
2217 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2218 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2219 	io->header.timeout = 0;
2220 	io->header.flags = 0;
2221 	io->header.scsi_status = 0;
2222 	io->header.sense_len = MFI_SENSE_LEN;
2223 	io->header.data_len = blkcount;
2224 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2225 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2226 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2227 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2228 	cm->cm_complete = mfi_bio_complete;
2229 	cm->cm_private = bio;
2230 	cm->cm_data = unmapped_buf;
2231 	cm->cm_len = bio->bio_bcount;
2232 	cm->cm_sg = &io->sgl;
2233 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2234 	cm->cm_flags = flags;
2235 
2236 	return (cm);
2237 }
2238 
2239 static void
2240 mfi_bio_complete(struct mfi_command *cm)
2241 {
2242 	struct bio *bio;
2243 	struct mfi_frame_header *hdr;
2244 	struct mfi_softc *sc;
2245 
2246 	bio = cm->cm_private;
2247 	hdr = &cm->cm_frame->header;
2248 	sc = cm->cm_sc;
2249 
2250 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2251 		bio->bio_flags |= BIO_ERROR;
2252 		bio->bio_error = EIO;
2253 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2254 		    "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2255 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2256 	} else if (cm->cm_error != 0) {
2257 		bio->bio_flags |= BIO_ERROR;
2258 		bio->bio_error = cm->cm_error;
2259 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2260 		    cm, cm->cm_error);
2261 	}
2262 
2263 	mfi_release_command(cm);
2264 	mfi_disk_complete(bio);
2265 }
2266 
2267 void
2268 mfi_startio(struct mfi_softc *sc)
2269 {
2270 	struct mfi_command *cm;
2271 	struct ccb_hdr *ccbh;
2272 
2273 	for (;;) {
2274 		/* Don't bother if we're short on resources */
2275 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2276 			break;
2277 
2278 		/* Try a command that has already been prepared */
2279 		cm = mfi_dequeue_ready(sc);
2280 
2281 		if (cm == NULL) {
2282 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2283 				cm = sc->mfi_cam_start(ccbh);
2284 		}
2285 
2286 		/* Nope, so look for work on the bioq */
2287 		if (cm == NULL)
2288 			cm = mfi_bio_command(sc);
2289 
2290 		/* No work available, so exit */
2291 		if (cm == NULL)
2292 			break;
2293 
2294 		/* Send the command to the controller */
2295 		if (mfi_mapcmd(sc, cm) != 0) {
2296 			device_printf(sc->mfi_dev, "Failed to startio\n");
2297 			mfi_requeue_ready(cm);
2298 			break;
2299 		}
2300 	}
2301 }
2302 
2303 int
2304 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2305 {
2306 	int error, polled;
2307 
2308 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2309 
2310 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2311 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2312 		if (cm->cm_flags & MFI_CMD_CCB)
2313 			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2314 			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2315 			    polled);
2316 		else if (cm->cm_flags & MFI_CMD_BIO)
2317 			error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2318 			    cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2319 			    polled);
2320 		else
2321 			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2322 			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2323 			    mfi_data_cb, cm, polled);
2324 		if (error == EINPROGRESS) {
2325 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2326 			return (0);
2327 		}
2328 	} else {
2329 		error = mfi_send_frame(sc, cm);
2330 	}
2331 
2332 	return (error);
2333 }
2334 
2335 static void
2336 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2337 {
2338 	struct mfi_frame_header *hdr;
2339 	struct mfi_command *cm;
2340 	union mfi_sgl *sgl;
2341 	struct mfi_softc *sc;
2342 	int i, j, first, dir;
2343 	int sge_size, locked;
2344 
2345 	cm = (struct mfi_command *)arg;
2346 	sc = cm->cm_sc;
2347 	hdr = &cm->cm_frame->header;
2348 	sgl = cm->cm_sg;
2349 
2350 	/*
2351 	 * We need to check if we have the lock as this is async
2352 	 * callback so even though our caller mfi_mapcmd asserts
2353 	 * it has the lock, there is no guarantee that hasn't been
2354 	 * dropped if bus_dmamap_load returned prior to our
2355 	 * completion.
2356 	 */
2357 	if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2358 		mtx_lock(&sc->mfi_io_lock);
2359 
2360 	if (error) {
2361 		printf("error %d in callback\n", error);
2362 		cm->cm_error = error;
2363 		mfi_complete(sc, cm);
2364 		goto out;
2365 	}
2366 	/* Use IEEE sgl only for IO's on a SKINNY controller
2367 	 * For other commands on a SKINNY controller use either
2368 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2369 	 * Also calculate the total frame size based on the type
2370 	 * of SGL used.
2371 	 */
2372 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2373 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2374 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2375 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2376 		for (i = 0; i < nsegs; i++) {
2377 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2378 			sgl->sg_skinny[i].len = segs[i].ds_len;
2379 			sgl->sg_skinny[i].flag = 0;
2380 		}
2381 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2382 		sge_size = sizeof(struct mfi_sg_skinny);
2383 		hdr->sg_count = nsegs;
2384 	} else {
2385 		j = 0;
2386 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2387 			first = cm->cm_stp_len;
2388 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2389 				sgl->sg32[j].addr = segs[0].ds_addr;
2390 				sgl->sg32[j++].len = first;
2391 			} else {
2392 				sgl->sg64[j].addr = segs[0].ds_addr;
2393 				sgl->sg64[j++].len = first;
2394 			}
2395 		} else
2396 			first = 0;
2397 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2398 			for (i = 0; i < nsegs; i++) {
2399 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2400 				sgl->sg32[j++].len = segs[i].ds_len - first;
2401 				first = 0;
2402 			}
2403 		} else {
2404 			for (i = 0; i < nsegs; i++) {
2405 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2406 				sgl->sg64[j++].len = segs[i].ds_len - first;
2407 				first = 0;
2408 			}
2409 			hdr->flags |= MFI_FRAME_SGL64;
2410 		}
2411 		hdr->sg_count = j;
2412 		sge_size = sc->mfi_sge_size;
2413 	}
2414 
2415 	dir = 0;
2416 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2417 		dir |= BUS_DMASYNC_PREREAD;
2418 		hdr->flags |= MFI_FRAME_DIR_READ;
2419 	}
2420 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2421 		dir |= BUS_DMASYNC_PREWRITE;
2422 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2423 	}
2424 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2425 	cm->cm_flags |= MFI_CMD_MAPPED;
2426 
2427 	/*
2428 	 * Instead of calculating the total number of frames in the
2429 	 * compound frame, it's already assumed that there will be at
2430 	 * least 1 frame, so don't compensate for the modulo of the
2431 	 * following division.
2432 	 */
2433 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2434 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2435 
2436 	if ((error = mfi_send_frame(sc, cm)) != 0) {
2437 		printf("error %d in callback from mfi_send_frame\n", error);
2438 		cm->cm_error = error;
2439 		mfi_complete(sc, cm);
2440 		goto out;
2441 	}
2442 
2443 out:
2444 	/* leave the lock in the state we found it */
2445 	if (locked == 0)
2446 		mtx_unlock(&sc->mfi_io_lock);
2447 
2448 	return;
2449 }
2450 
2451 static int
2452 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2453 {
2454 	int error;
2455 
2456 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2457 
2458 	if (sc->MFA_enabled)
2459 		error = mfi_tbolt_send_frame(sc, cm);
2460 	else
2461 		error = mfi_std_send_frame(sc, cm);
2462 
2463 	if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2464 		mfi_remove_busy(cm);
2465 
2466 	return (error);
2467 }
2468 
2469 static int
2470 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2471 {
2472 	struct mfi_frame_header *hdr;
2473 	int tm = mfi_polled_cmd_timeout * 1000;
2474 
2475 	hdr = &cm->cm_frame->header;
2476 
2477 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2478 		cm->cm_timestamp = time_uptime;
2479 		mfi_enqueue_busy(cm);
2480 	} else {
2481 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2482 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2483 	}
2484 
2485 	/*
2486 	 * The bus address of the command is aligned on a 64 byte boundary,
2487 	 * leaving the least 6 bits as zero.  For whatever reason, the
2488 	 * hardware wants the address shifted right by three, leaving just
2489 	 * 3 zero bits.  These three bits are then used as a prefetching
2490 	 * hint for the hardware to predict how many frames need to be
2491 	 * fetched across the bus.  If a command has more than 8 frames
2492 	 * then the 3 bits are set to 0x7 and the firmware uses other
2493 	 * information in the command to determine the total amount to fetch.
2494 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2495 	 * is enough for both 32bit and 64bit systems.
2496 	 */
2497 	if (cm->cm_extra_frames > 7)
2498 		cm->cm_extra_frames = 7;
2499 
2500 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2501 
2502 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2503 		return (0);
2504 
2505 	/* This is a polled command, so busy-wait for it to complete. */
2506 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2507 		DELAY(1000);
2508 		tm -= 1;
2509 		if (tm <= 0)
2510 			break;
2511 	}
2512 
2513 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2514 		device_printf(sc->mfi_dev, "Frame %p timed out "
2515 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2516 		return (ETIMEDOUT);
2517 	}
2518 
2519 	return (0);
2520 }
2521 
2522 
2523 void
2524 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2525 {
2526 	int dir;
2527 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2528 
2529 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2530 		dir = 0;
2531 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2532 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2533 			dir |= BUS_DMASYNC_POSTREAD;
2534 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2535 			dir |= BUS_DMASYNC_POSTWRITE;
2536 
2537 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2538 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2539 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2540 	}
2541 
2542 	cm->cm_flags |= MFI_CMD_COMPLETED;
2543 
2544 	if (cm->cm_complete != NULL)
2545 		cm->cm_complete(cm);
2546 	else
2547 		wakeup(cm);
2548 }
2549 
2550 static int
2551 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2552 {
2553 	struct mfi_command *cm;
2554 	struct mfi_abort_frame *abort;
2555 	int i = 0, error;
2556 	uint32_t context = 0;
2557 
2558 	mtx_lock(&sc->mfi_io_lock);
2559 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2560 		mtx_unlock(&sc->mfi_io_lock);
2561 		return (EBUSY);
2562 	}
2563 
2564 	/* Zero out the MFI frame */
2565 	context = cm->cm_frame->header.context;
2566 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2567 	cm->cm_frame->header.context = context;
2568 
2569 	abort = &cm->cm_frame->abort;
2570 	abort->header.cmd = MFI_CMD_ABORT;
2571 	abort->header.flags = 0;
2572 	abort->header.scsi_status = 0;
2573 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2574 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2575 	abort->abort_mfi_addr_hi =
2576 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2577 	cm->cm_data = NULL;
2578 	cm->cm_flags = MFI_CMD_POLLED;
2579 
2580 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2581 		device_printf(sc->mfi_dev, "failed to abort command\n");
2582 	mfi_release_command(cm);
2583 
2584 	mtx_unlock(&sc->mfi_io_lock);
2585 	while (i < 5 && *cm_abort != NULL) {
2586 		tsleep(cm_abort, 0, "mfiabort",
2587 		    5 * hz);
2588 		i++;
2589 	}
2590 	if (*cm_abort != NULL) {
2591 		/* Force a complete if command didn't abort */
2592 		mtx_lock(&sc->mfi_io_lock);
2593 		(*cm_abort)->cm_complete(*cm_abort);
2594 		mtx_unlock(&sc->mfi_io_lock);
2595 	}
2596 
2597 	return (error);
2598 }
2599 
2600 int
2601 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2602      int len)
2603 {
2604 	struct mfi_command *cm;
2605 	struct mfi_io_frame *io;
2606 	int error;
2607 	uint32_t context = 0;
2608 
2609 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2610 		return (EBUSY);
2611 
2612 	/* Zero out the MFI frame */
2613 	context = cm->cm_frame->header.context;
2614 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2615 	cm->cm_frame->header.context = context;
2616 
2617 	io = &cm->cm_frame->io;
2618 	io->header.cmd = MFI_CMD_LD_WRITE;
2619 	io->header.target_id = id;
2620 	io->header.timeout = 0;
2621 	io->header.flags = 0;
2622 	io->header.scsi_status = 0;
2623 	io->header.sense_len = MFI_SENSE_LEN;
2624 	io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2625 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2626 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2627 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2628 	io->lba_lo = lba & 0xffffffff;
2629 	cm->cm_data = virt;
2630 	cm->cm_len = len;
2631 	cm->cm_sg = &io->sgl;
2632 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2633 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2634 
2635 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2636 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2637 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2638 	    BUS_DMASYNC_POSTWRITE);
2639 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2640 	mfi_release_command(cm);
2641 
2642 	return (error);
2643 }
2644 
2645 int
2646 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2647     int len)
2648 {
2649 	struct mfi_command *cm;
2650 	struct mfi_pass_frame *pass;
2651 	int error, readop, cdb_len;
2652 	uint32_t blkcount;
2653 
2654 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2655 		return (EBUSY);
2656 
2657 	pass = &cm->cm_frame->pass;
2658 	bzero(pass->cdb, 16);
2659 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2660 
2661 	readop = 0;
2662 	blkcount = howmany(len, MFI_SECTOR_LEN);
2663 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2664 	pass->header.target_id = id;
2665 	pass->header.timeout = 0;
2666 	pass->header.flags = 0;
2667 	pass->header.scsi_status = 0;
2668 	pass->header.sense_len = MFI_SENSE_LEN;
2669 	pass->header.data_len = len;
2670 	pass->header.cdb_len = cdb_len;
2671 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2672 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2673 	cm->cm_data = virt;
2674 	cm->cm_len = len;
2675 	cm->cm_sg = &pass->sgl;
2676 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2677 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2678 
2679 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2680 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2681 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2682 	    BUS_DMASYNC_POSTWRITE);
2683 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2684 	mfi_release_command(cm);
2685 
2686 	return (error);
2687 }
2688 
2689 static int
2690 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2691 {
2692 	struct mfi_softc *sc;
2693 	int error;
2694 
2695 	sc = dev->si_drv1;
2696 
2697 	mtx_lock(&sc->mfi_io_lock);
2698 	if (sc->mfi_detaching)
2699 		error = ENXIO;
2700 	else {
2701 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2702 		error = 0;
2703 	}
2704 	mtx_unlock(&sc->mfi_io_lock);
2705 
2706 	return (error);
2707 }
2708 
2709 static int
2710 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2711 {
2712 	struct mfi_softc *sc;
2713 	struct mfi_aen *mfi_aen_entry, *tmp;
2714 
2715 	sc = dev->si_drv1;
2716 
2717 	mtx_lock(&sc->mfi_io_lock);
2718 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2719 
2720 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2721 		if (mfi_aen_entry->p == curproc) {
2722 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2723 			    aen_link);
2724 			free(mfi_aen_entry, M_MFIBUF);
2725 		}
2726 	}
2727 	mtx_unlock(&sc->mfi_io_lock);
2728 	return (0);
2729 }
2730 
2731 static int
2732 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2733 {
2734 
2735 	switch (opcode) {
2736 	case MFI_DCMD_LD_DELETE:
2737 	case MFI_DCMD_CFG_ADD:
2738 	case MFI_DCMD_CFG_CLEAR:
2739 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2740 		sx_xlock(&sc->mfi_config_lock);
2741 		return (1);
2742 	default:
2743 		return (0);
2744 	}
2745 }
2746 
2747 static void
2748 mfi_config_unlock(struct mfi_softc *sc, int locked)
2749 {
2750 
2751 	if (locked)
2752 		sx_xunlock(&sc->mfi_config_lock);
2753 }
2754 
2755 /*
2756  * Perform pre-issue checks on commands from userland and possibly veto
2757  * them.
2758  */
2759 static int
2760 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2761 {
2762 	struct mfi_disk *ld, *ld2;
2763 	int error;
2764 	struct mfi_system_pd *syspd = NULL;
2765 	uint16_t syspd_id;
2766 	uint16_t *mbox;
2767 
2768 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2769 	error = 0;
2770 	switch (cm->cm_frame->dcmd.opcode) {
2771 	case MFI_DCMD_LD_DELETE:
2772 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2773 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2774 				break;
2775 		}
2776 		if (ld == NULL)
2777 			error = ENOENT;
2778 		else
2779 			error = mfi_disk_disable(ld);
2780 		break;
2781 	case MFI_DCMD_CFG_CLEAR:
2782 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2783 			error = mfi_disk_disable(ld);
2784 			if (error)
2785 				break;
2786 		}
2787 		if (error) {
2788 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2789 				if (ld2 == ld)
2790 					break;
2791 				mfi_disk_enable(ld2);
2792 			}
2793 		}
2794 		break;
2795 	case MFI_DCMD_PD_STATE_SET:
2796 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2797 		syspd_id = mbox[0];
2798 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2799 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2800 				if (syspd->pd_id == syspd_id)
2801 					break;
2802 			}
2803 		}
2804 		else
2805 			break;
2806 		if (syspd)
2807 			error = mfi_syspd_disable(syspd);
2808 		break;
2809 	default:
2810 		break;
2811 	}
2812 	return (error);
2813 }
2814 
2815 /* Perform post-issue checks on commands from userland. */
2816 static void
2817 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2818 {
2819 	struct mfi_disk *ld, *ldn;
2820 	struct mfi_system_pd *syspd = NULL;
2821 	uint16_t syspd_id;
2822 	uint16_t *mbox;
2823 
2824 	switch (cm->cm_frame->dcmd.opcode) {
2825 	case MFI_DCMD_LD_DELETE:
2826 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2827 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2828 				break;
2829 		}
2830 		KASSERT(ld != NULL, ("volume dissappeared"));
2831 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2832 			mtx_unlock(&sc->mfi_io_lock);
2833 			mtx_lock(&Giant);
2834 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2835 			mtx_unlock(&Giant);
2836 			mtx_lock(&sc->mfi_io_lock);
2837 		} else
2838 			mfi_disk_enable(ld);
2839 		break;
2840 	case MFI_DCMD_CFG_CLEAR:
2841 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2842 			mtx_unlock(&sc->mfi_io_lock);
2843 			mtx_lock(&Giant);
2844 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2845 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2846 			}
2847 			mtx_unlock(&Giant);
2848 			mtx_lock(&sc->mfi_io_lock);
2849 		} else {
2850 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2851 				mfi_disk_enable(ld);
2852 		}
2853 		break;
2854 	case MFI_DCMD_CFG_ADD:
2855 		mfi_ldprobe(sc);
2856 		break;
2857 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2858 		mfi_ldprobe(sc);
2859 		break;
2860 	case MFI_DCMD_PD_STATE_SET:
2861 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2862 		syspd_id = mbox[0];
2863 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2864 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2865 				if (syspd->pd_id == syspd_id)
2866 					break;
2867 			}
2868 		}
2869 		else
2870 			break;
2871 		/* If the transition fails then enable the syspd again */
2872 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2873 			mfi_syspd_enable(syspd);
2874 		break;
2875 	}
2876 }
2877 
2878 static int
2879 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2880 {
2881 	struct mfi_config_data *conf_data;
2882 	struct mfi_command *ld_cm = NULL;
2883 	struct mfi_ld_info *ld_info = NULL;
2884 	struct mfi_ld_config *ld;
2885 	char *p;
2886 	int error = 0;
2887 
2888 	conf_data = (struct mfi_config_data *)cm->cm_data;
2889 
2890 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2891 		p = (char *)conf_data->array;
2892 		p += conf_data->array_size * conf_data->array_count;
2893 		ld = (struct mfi_ld_config *)p;
2894 		if (ld->params.isSSCD == 1)
2895 			error = 1;
2896 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2897 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2898 		    (void **)&ld_info, sizeof(*ld_info));
2899 		if (error) {
2900 			device_printf(sc->mfi_dev, "Failed to allocate"
2901 			    "MFI_DCMD_LD_GET_INFO %d", error);
2902 			if (ld_info)
2903 				free(ld_info, M_MFIBUF);
2904 			return 0;
2905 		}
2906 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2907 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2908 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2909 		if (mfi_wait_command(sc, ld_cm) != 0) {
2910 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2911 			mfi_release_command(ld_cm);
2912 			free(ld_info, M_MFIBUF);
2913 			return 0;
2914 		}
2915 
2916 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2917 			free(ld_info, M_MFIBUF);
2918 			mfi_release_command(ld_cm);
2919 			return 0;
2920 		}
2921 		else
2922 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2923 
2924 		if (ld_info->ld_config.params.isSSCD == 1)
2925 			error = 1;
2926 
2927 		mfi_release_command(ld_cm);
2928 		free(ld_info, M_MFIBUF);
2929 
2930 	}
2931 	return error;
2932 }
2933 
2934 static int
2935 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2936 {
2937 	uint8_t i;
2938 	struct mfi_ioc_packet *ioc;
2939 	ioc = (struct mfi_ioc_packet *)arg;
2940 	int sge_size, error;
2941 	struct megasas_sge *kern_sge;
2942 
2943 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2944 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2945 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2946 
2947 	if (sizeof(bus_addr_t) == 8) {
2948 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2949 		cm->cm_extra_frames = 2;
2950 		sge_size = sizeof(struct mfi_sg64);
2951 	} else {
2952 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2953 		sge_size = sizeof(struct mfi_sg32);
2954 	}
2955 
2956 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2957 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2958 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2959 			1, 0,			/* algnmnt, boundary */
2960 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2961 			BUS_SPACE_MAXADDR,	/* highaddr */
2962 			NULL, NULL,		/* filter, filterarg */
2963 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2964 			2,			/* nsegments */
2965 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2966 			BUS_DMA_ALLOCNOW,	/* flags */
2967 			NULL, NULL,		/* lockfunc, lockarg */
2968 			&sc->mfi_kbuff_arr_dmat[i])) {
2969 			device_printf(sc->mfi_dev,
2970 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2971 			return (ENOMEM);
2972 		}
2973 
2974 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2975 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2976 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2977 			device_printf(sc->mfi_dev,
2978 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2979 			return (ENOMEM);
2980 		}
2981 
2982 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2983 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2984 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2985 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2986 
2987 		if (!sc->kbuff_arr[i]) {
2988 			device_printf(sc->mfi_dev,
2989 			    "Could not allocate memory for kbuff_arr info\n");
2990 			return -1;
2991 		}
2992 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2993 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2994 
2995 		if (sizeof(bus_addr_t) == 8) {
2996 			cm->cm_frame->stp.sgl.sg64[i].addr =
2997 			    kern_sge[i].phys_addr;
2998 			cm->cm_frame->stp.sgl.sg64[i].len =
2999 			    ioc->mfi_sgl[i].iov_len;
3000 		} else {
3001 			cm->cm_frame->stp.sgl.sg32[i].addr =
3002 			    kern_sge[i].phys_addr;
3003 			cm->cm_frame->stp.sgl.sg32[i].len =
3004 			    ioc->mfi_sgl[i].iov_len;
3005 		}
3006 
3007 		error = copyin(ioc->mfi_sgl[i].iov_base,
3008 		    sc->kbuff_arr[i],
3009 		    ioc->mfi_sgl[i].iov_len);
3010 		if (error != 0) {
3011 			device_printf(sc->mfi_dev, "Copy in failed\n");
3012 			return error;
3013 		}
3014 	}
3015 
3016 	cm->cm_flags |=MFI_CMD_MAPPED;
3017 	return 0;
3018 }
3019 
3020 static int
3021 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3022 {
3023 	struct mfi_command *cm;
3024 	struct mfi_dcmd_frame *dcmd;
3025 	void *ioc_buf = NULL;
3026 	uint32_t context;
3027 	int error = 0, locked;
3028 
3029 
3030 	if (ioc->buf_size > 0) {
3031 		if (ioc->buf_size > 1024 * 1024)
3032 			return (ENOMEM);
3033 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3034 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3035 		if (error) {
3036 			device_printf(sc->mfi_dev, "failed to copyin\n");
3037 			free(ioc_buf, M_MFIBUF);
3038 			return (error);
3039 		}
3040 	}
3041 
3042 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3043 
3044 	mtx_lock(&sc->mfi_io_lock);
3045 	while ((cm = mfi_dequeue_free(sc)) == NULL)
3046 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3047 
3048 	/* Save context for later */
3049 	context = cm->cm_frame->header.context;
3050 
3051 	dcmd = &cm->cm_frame->dcmd;
3052 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3053 
3054 	cm->cm_sg = &dcmd->sgl;
3055 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3056 	cm->cm_data = ioc_buf;
3057 	cm->cm_len = ioc->buf_size;
3058 
3059 	/* restore context */
3060 	cm->cm_frame->header.context = context;
3061 
3062 	/* Cheat since we don't know if we're writing or reading */
3063 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3064 
3065 	error = mfi_check_command_pre(sc, cm);
3066 	if (error)
3067 		goto out;
3068 
3069 	error = mfi_wait_command(sc, cm);
3070 	if (error) {
3071 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3072 		goto out;
3073 	}
3074 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3075 	mfi_check_command_post(sc, cm);
3076 out:
3077 	mfi_release_command(cm);
3078 	mtx_unlock(&sc->mfi_io_lock);
3079 	mfi_config_unlock(sc, locked);
3080 	if (ioc->buf_size > 0)
3081 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3082 	if (ioc_buf)
3083 		free(ioc_buf, M_MFIBUF);
3084 	return (error);
3085 }
3086 
3087 #define	PTRIN(p)		((void *)(uintptr_t)(p))
3088 
3089 static int
3090 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3091 {
3092 	struct mfi_softc *sc;
3093 	union mfi_statrequest *ms;
3094 	struct mfi_ioc_packet *ioc;
3095 #ifdef COMPAT_FREEBSD32
3096 	struct mfi_ioc_packet32 *ioc32;
3097 #endif
3098 	struct mfi_ioc_aen *aen;
3099 	struct mfi_command *cm = NULL;
3100 	uint32_t context = 0;
3101 	union mfi_sense_ptr sense_ptr;
3102 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3103 	size_t len;
3104 	int i, res;
3105 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3106 #ifdef COMPAT_FREEBSD32
3107 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3108 	struct mfi_ioc_passthru iop_swab;
3109 #endif
3110 	int error, locked;
3111 	union mfi_sgl *sgl;
3112 	sc = dev->si_drv1;
3113 	error = 0;
3114 
3115 	if (sc->adpreset)
3116 		return EBUSY;
3117 
3118 	if (sc->hw_crit_error)
3119 		return EBUSY;
3120 
3121 	if (sc->issuepend_done == 0)
3122 		return EBUSY;
3123 
3124 	switch (cmd) {
3125 	case MFIIO_STATS:
3126 		ms = (union mfi_statrequest *)arg;
3127 		switch (ms->ms_item) {
3128 		case MFIQ_FREE:
3129 		case MFIQ_BIO:
3130 		case MFIQ_READY:
3131 		case MFIQ_BUSY:
3132 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3133 			    sizeof(struct mfi_qstat));
3134 			break;
3135 		default:
3136 			error = ENOIOCTL;
3137 			break;
3138 		}
3139 		break;
3140 	case MFIIO_QUERY_DISK:
3141 	{
3142 		struct mfi_query_disk *qd;
3143 		struct mfi_disk *ld;
3144 
3145 		qd = (struct mfi_query_disk *)arg;
3146 		mtx_lock(&sc->mfi_io_lock);
3147 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3148 			if (ld->ld_id == qd->array_id)
3149 				break;
3150 		}
3151 		if (ld == NULL) {
3152 			qd->present = 0;
3153 			mtx_unlock(&sc->mfi_io_lock);
3154 			return (0);
3155 		}
3156 		qd->present = 1;
3157 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3158 			qd->open = 1;
3159 		bzero(qd->devname, SPECNAMELEN + 1);
3160 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3161 		mtx_unlock(&sc->mfi_io_lock);
3162 		break;
3163 	}
3164 	case MFI_CMD:
3165 #ifdef COMPAT_FREEBSD32
3166 	case MFI_CMD32:
3167 #endif
3168 		{
3169 		devclass_t devclass;
3170 		ioc = (struct mfi_ioc_packet *)arg;
3171 		int adapter;
3172 
3173 		adapter = ioc->mfi_adapter_no;
3174 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3175 			devclass = devclass_find("mfi");
3176 			sc = devclass_get_softc(devclass, adapter);
3177 		}
3178 		mtx_lock(&sc->mfi_io_lock);
3179 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3180 			mtx_unlock(&sc->mfi_io_lock);
3181 			return (EBUSY);
3182 		}
3183 		mtx_unlock(&sc->mfi_io_lock);
3184 		locked = 0;
3185 
3186 		/*
3187 		 * save off original context since copying from user
3188 		 * will clobber some data
3189 		 */
3190 		context = cm->cm_frame->header.context;
3191 		cm->cm_frame->header.context = cm->cm_index;
3192 
3193 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3194 		    2 * MEGAMFI_FRAME_SIZE);
3195 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3196 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3197 		cm->cm_frame->header.scsi_status = 0;
3198 		cm->cm_frame->header.pad0 = 0;
3199 		if (ioc->mfi_sge_count) {
3200 			cm->cm_sg =
3201 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3202 		}
3203 		sgl = cm->cm_sg;
3204 		cm->cm_flags = 0;
3205 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3206 			cm->cm_flags |= MFI_CMD_DATAIN;
3207 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3208 			cm->cm_flags |= MFI_CMD_DATAOUT;
3209 		/* Legacy app shim */
3210 		if (cm->cm_flags == 0)
3211 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3212 		cm->cm_len = cm->cm_frame->header.data_len;
3213 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3214 #ifdef COMPAT_FREEBSD32
3215 			if (cmd == MFI_CMD) {
3216 #endif
3217 				/* Native */
3218 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3219 #ifdef COMPAT_FREEBSD32
3220 			} else {
3221 				/* 32bit on 64bit */
3222 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3223 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3224 			}
3225 #endif
3226 			cm->cm_len += cm->cm_stp_len;
3227 		}
3228 		if (cm->cm_len &&
3229 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3230 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3231 			    M_WAITOK | M_ZERO);
3232 		} else {
3233 			cm->cm_data = 0;
3234 		}
3235 
3236 		/* restore header context */
3237 		cm->cm_frame->header.context = context;
3238 
3239 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3240 			res = mfi_stp_cmd(sc, cm, arg);
3241 			if (res != 0)
3242 				goto out;
3243 		} else {
3244 			temp = data;
3245 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3246 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3247 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3248 #ifdef COMPAT_FREEBSD32
3249 					if (cmd == MFI_CMD) {
3250 #endif
3251 						/* Native */
3252 						addr = ioc->mfi_sgl[i].iov_base;
3253 						len = ioc->mfi_sgl[i].iov_len;
3254 #ifdef COMPAT_FREEBSD32
3255 					} else {
3256 						/* 32bit on 64bit */
3257 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3258 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3259 						len = ioc32->mfi_sgl[i].iov_len;
3260 					}
3261 #endif
3262 					error = copyin(addr, temp, len);
3263 					if (error != 0) {
3264 						device_printf(sc->mfi_dev,
3265 						    "Copy in failed\n");
3266 						goto out;
3267 					}
3268 					temp = &temp[len];
3269 				}
3270 			}
3271 		}
3272 
3273 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3274 			locked = mfi_config_lock(sc,
3275 			     cm->cm_frame->dcmd.opcode);
3276 
3277 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3278 			cm->cm_frame->pass.sense_addr_lo =
3279 			    (uint32_t)cm->cm_sense_busaddr;
3280 			cm->cm_frame->pass.sense_addr_hi =
3281 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3282 		}
3283 		mtx_lock(&sc->mfi_io_lock);
3284 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3285 		if (!skip_pre_post) {
3286 			error = mfi_check_command_pre(sc, cm);
3287 			if (error) {
3288 				mtx_unlock(&sc->mfi_io_lock);
3289 				goto out;
3290 			}
3291 		}
3292 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3293 			device_printf(sc->mfi_dev,
3294 			    "Controller polled failed\n");
3295 			mtx_unlock(&sc->mfi_io_lock);
3296 			goto out;
3297 		}
3298 		if (!skip_pre_post) {
3299 			mfi_check_command_post(sc, cm);
3300 		}
3301 		mtx_unlock(&sc->mfi_io_lock);
3302 
3303 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3304 			temp = data;
3305 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3306 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3307 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3308 #ifdef COMPAT_FREEBSD32
3309 					if (cmd == MFI_CMD) {
3310 #endif
3311 						/* Native */
3312 						addr = ioc->mfi_sgl[i].iov_base;
3313 						len = ioc->mfi_sgl[i].iov_len;
3314 #ifdef COMPAT_FREEBSD32
3315 					} else {
3316 						/* 32bit on 64bit */
3317 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3318 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3319 						len = ioc32->mfi_sgl[i].iov_len;
3320 					}
3321 #endif
3322 					error = copyout(temp, addr, len);
3323 					if (error != 0) {
3324 						device_printf(sc->mfi_dev,
3325 						    "Copy out failed\n");
3326 						goto out;
3327 					}
3328 					temp = &temp[len];
3329 				}
3330 			}
3331 		}
3332 
3333 		if (ioc->mfi_sense_len) {
3334 			/* get user-space sense ptr then copy out sense */
3335 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3336 			    &sense_ptr.sense_ptr_data[0],
3337 			    sizeof(sense_ptr.sense_ptr_data));
3338 #ifdef COMPAT_FREEBSD32
3339 			if (cmd != MFI_CMD) {
3340 				/*
3341 				 * not 64bit native so zero out any address
3342 				 * over 32bit */
3343 				sense_ptr.addr.high = 0;
3344 			}
3345 #endif
3346 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3347 			    ioc->mfi_sense_len);
3348 			if (error != 0) {
3349 				device_printf(sc->mfi_dev,
3350 				    "Copy out failed\n");
3351 				goto out;
3352 			}
3353 		}
3354 
3355 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3356 out:
3357 		mfi_config_unlock(sc, locked);
3358 		if (data)
3359 			free(data, M_MFIBUF);
3360 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3361 			for (i = 0; i < 2; i++) {
3362 				if (sc->kbuff_arr[i]) {
3363 					if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3364 						bus_dmamap_unload(
3365 						    sc->mfi_kbuff_arr_dmat[i],
3366 						    sc->mfi_kbuff_arr_dmamap[i]
3367 						    );
3368 					if (sc->kbuff_arr[i] != NULL)
3369 						bus_dmamem_free(
3370 						    sc->mfi_kbuff_arr_dmat[i],
3371 						    sc->kbuff_arr[i],
3372 						    sc->mfi_kbuff_arr_dmamap[i]
3373 						    );
3374 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3375 						bus_dma_tag_destroy(
3376 						    sc->mfi_kbuff_arr_dmat[i]);
3377 				}
3378 			}
3379 		}
3380 		if (cm) {
3381 			mtx_lock(&sc->mfi_io_lock);
3382 			mfi_release_command(cm);
3383 			mtx_unlock(&sc->mfi_io_lock);
3384 		}
3385 
3386 		break;
3387 		}
3388 	case MFI_SET_AEN:
3389 		aen = (struct mfi_ioc_aen *)arg;
3390 		mtx_lock(&sc->mfi_io_lock);
3391 		error = mfi_aen_register(sc, aen->aen_seq_num,
3392 		    aen->aen_class_locale);
3393 		mtx_unlock(&sc->mfi_io_lock);
3394 
3395 		break;
3396 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3397 		{
3398 			devclass_t devclass;
3399 			struct mfi_linux_ioc_packet l_ioc;
3400 			int adapter;
3401 
3402 			devclass = devclass_find("mfi");
3403 			if (devclass == NULL)
3404 				return (ENOENT);
3405 
3406 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3407 			if (error)
3408 				return (error);
3409 			adapter = l_ioc.lioc_adapter_no;
3410 			sc = devclass_get_softc(devclass, adapter);
3411 			if (sc == NULL)
3412 				return (ENOENT);
3413 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3414 			    cmd, arg, flag, td));
3415 			break;
3416 		}
3417 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3418 		{
3419 			devclass_t devclass;
3420 			struct mfi_linux_ioc_aen l_aen;
3421 			int adapter;
3422 
3423 			devclass = devclass_find("mfi");
3424 			if (devclass == NULL)
3425 				return (ENOENT);
3426 
3427 			error = copyin(arg, &l_aen, sizeof(l_aen));
3428 			if (error)
3429 				return (error);
3430 			adapter = l_aen.laen_adapter_no;
3431 			sc = devclass_get_softc(devclass, adapter);
3432 			if (sc == NULL)
3433 				return (ENOENT);
3434 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3435 			    cmd, arg, flag, td));
3436 			break;
3437 		}
3438 #ifdef COMPAT_FREEBSD32
3439 	case MFIIO_PASSTHRU32:
3440 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3441 			error = ENOTTY;
3442 			break;
3443 		}
3444 		iop_swab.ioc_frame	= iop32->ioc_frame;
3445 		iop_swab.buf_size	= iop32->buf_size;
3446 		iop_swab.buf		= PTRIN(iop32->buf);
3447 		iop			= &iop_swab;
3448 		/* FALLTHROUGH */
3449 #endif
3450 	case MFIIO_PASSTHRU:
3451 		error = mfi_user_command(sc, iop);
3452 #ifdef COMPAT_FREEBSD32
3453 		if (cmd == MFIIO_PASSTHRU32)
3454 			iop32->ioc_frame = iop_swab.ioc_frame;
3455 #endif
3456 		break;
3457 	default:
3458 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3459 		error = ENOTTY;
3460 		break;
3461 	}
3462 
3463 	return (error);
3464 }
3465 
3466 static int
3467 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3468 {
3469 	struct mfi_softc *sc;
3470 	struct mfi_linux_ioc_packet l_ioc;
3471 	struct mfi_linux_ioc_aen l_aen;
3472 	struct mfi_command *cm = NULL;
3473 	struct mfi_aen *mfi_aen_entry;
3474 	union mfi_sense_ptr sense_ptr;
3475 	uint32_t context = 0;
3476 	uint8_t *data = NULL, *temp;
3477 	int i;
3478 	int error, locked;
3479 
3480 	sc = dev->si_drv1;
3481 	error = 0;
3482 	switch (cmd) {
3483 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3484 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3485 		if (error != 0)
3486 			return (error);
3487 
3488 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3489 			return (EINVAL);
3490 		}
3491 
3492 		mtx_lock(&sc->mfi_io_lock);
3493 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3494 			mtx_unlock(&sc->mfi_io_lock);
3495 			return (EBUSY);
3496 		}
3497 		mtx_unlock(&sc->mfi_io_lock);
3498 		locked = 0;
3499 
3500 		/*
3501 		 * save off original context since copying from user
3502 		 * will clobber some data
3503 		 */
3504 		context = cm->cm_frame->header.context;
3505 
3506 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3507 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3508 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3509 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3510 		cm->cm_frame->header.scsi_status = 0;
3511 		cm->cm_frame->header.pad0 = 0;
3512 		if (l_ioc.lioc_sge_count)
3513 			cm->cm_sg =
3514 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3515 		cm->cm_flags = 0;
3516 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3517 			cm->cm_flags |= MFI_CMD_DATAIN;
3518 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3519 			cm->cm_flags |= MFI_CMD_DATAOUT;
3520 		cm->cm_len = cm->cm_frame->header.data_len;
3521 		if (cm->cm_len &&
3522 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3523 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3524 			    M_WAITOK | M_ZERO);
3525 		} else {
3526 			cm->cm_data = 0;
3527 		}
3528 
3529 		/* restore header context */
3530 		cm->cm_frame->header.context = context;
3531 
3532 		temp = data;
3533 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3534 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3535 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3536 				       temp,
3537 				       l_ioc.lioc_sgl[i].iov_len);
3538 				if (error != 0) {
3539 					device_printf(sc->mfi_dev,
3540 					    "Copy in failed\n");
3541 					goto out;
3542 				}
3543 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3544 			}
3545 		}
3546 
3547 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3548 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3549 
3550 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3551 			cm->cm_frame->pass.sense_addr_lo =
3552 			    (uint32_t)cm->cm_sense_busaddr;
3553 			cm->cm_frame->pass.sense_addr_hi =
3554 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3555 		}
3556 
3557 		mtx_lock(&sc->mfi_io_lock);
3558 		error = mfi_check_command_pre(sc, cm);
3559 		if (error) {
3560 			mtx_unlock(&sc->mfi_io_lock);
3561 			goto out;
3562 		}
3563 
3564 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3565 			device_printf(sc->mfi_dev,
3566 			    "Controller polled failed\n");
3567 			mtx_unlock(&sc->mfi_io_lock);
3568 			goto out;
3569 		}
3570 
3571 		mfi_check_command_post(sc, cm);
3572 		mtx_unlock(&sc->mfi_io_lock);
3573 
3574 		temp = data;
3575 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3576 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3577 				error = copyout(temp,
3578 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3579 					l_ioc.lioc_sgl[i].iov_len);
3580 				if (error != 0) {
3581 					device_printf(sc->mfi_dev,
3582 					    "Copy out failed\n");
3583 					goto out;
3584 				}
3585 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3586 			}
3587 		}
3588 
3589 		if (l_ioc.lioc_sense_len) {
3590 			/* get user-space sense ptr then copy out sense */
3591 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3592                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3593 			    &sense_ptr.sense_ptr_data[0],
3594 			    sizeof(sense_ptr.sense_ptr_data));
3595 #ifdef __amd64__
3596 			/*
3597 			 * only 32bit Linux support so zero out any
3598 			 * address over 32bit
3599 			 */
3600 			sense_ptr.addr.high = 0;
3601 #endif
3602 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3603 			    l_ioc.lioc_sense_len);
3604 			if (error != 0) {
3605 				device_printf(sc->mfi_dev,
3606 				    "Copy out failed\n");
3607 				goto out;
3608 			}
3609 		}
3610 
3611 		error = copyout(&cm->cm_frame->header.cmd_status,
3612 			&((struct mfi_linux_ioc_packet*)arg)
3613 			->lioc_frame.hdr.cmd_status,
3614 			1);
3615 		if (error != 0) {
3616 			device_printf(sc->mfi_dev,
3617 				      "Copy out failed\n");
3618 			goto out;
3619 		}
3620 
3621 out:
3622 		mfi_config_unlock(sc, locked);
3623 		if (data)
3624 			free(data, M_MFIBUF);
3625 		if (cm) {
3626 			mtx_lock(&sc->mfi_io_lock);
3627 			mfi_release_command(cm);
3628 			mtx_unlock(&sc->mfi_io_lock);
3629 		}
3630 
3631 		return (error);
3632 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3633 		error = copyin(arg, &l_aen, sizeof(l_aen));
3634 		if (error != 0)
3635 			return (error);
3636 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3637 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3638 		    M_WAITOK);
3639 		mtx_lock(&sc->mfi_io_lock);
3640 		if (mfi_aen_entry != NULL) {
3641 			mfi_aen_entry->p = curproc;
3642 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3643 			    aen_link);
3644 		}
3645 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3646 		    l_aen.laen_class_locale);
3647 
3648 		if (error != 0) {
3649 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3650 			    aen_link);
3651 			free(mfi_aen_entry, M_MFIBUF);
3652 		}
3653 		mtx_unlock(&sc->mfi_io_lock);
3654 
3655 		return (error);
3656 	default:
3657 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3658 		error = ENOENT;
3659 		break;
3660 	}
3661 
3662 	return (error);
3663 }
3664 
3665 static int
3666 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3667 {
3668 	struct mfi_softc *sc;
3669 	int revents = 0;
3670 
3671 	sc = dev->si_drv1;
3672 
3673 	if (poll_events & (POLLIN | POLLRDNORM)) {
3674 		if (sc->mfi_aen_triggered != 0) {
3675 			revents |= poll_events & (POLLIN | POLLRDNORM);
3676 			sc->mfi_aen_triggered = 0;
3677 		}
3678 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3679 			revents |= POLLERR;
3680 		}
3681 	}
3682 
3683 	if (revents == 0) {
3684 		if (poll_events & (POLLIN | POLLRDNORM)) {
3685 			sc->mfi_poll_waiting = 1;
3686 			selrecord(td, &sc->mfi_select);
3687 		}
3688 	}
3689 
3690 	return revents;
3691 }
3692 
3693 static void
3694 mfi_dump_all(void)
3695 {
3696 	struct mfi_softc *sc;
3697 	struct mfi_command *cm;
3698 	devclass_t dc;
3699 	time_t deadline;
3700 	int timedout;
3701 	int i;
3702 
3703 	dc = devclass_find("mfi");
3704 	if (dc == NULL) {
3705 		printf("No mfi dev class\n");
3706 		return;
3707 	}
3708 
3709 	for (i = 0; ; i++) {
3710 		sc = devclass_get_softc(dc, i);
3711 		if (sc == NULL)
3712 			break;
3713 		device_printf(sc->mfi_dev, "Dumping\n\n");
3714 		timedout = 0;
3715 		deadline = time_uptime - mfi_cmd_timeout;
3716 		mtx_lock(&sc->mfi_io_lock);
3717 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3718 			if (cm->cm_timestamp <= deadline) {
3719 				device_printf(sc->mfi_dev,
3720 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3721 				    cm, (int)(time_uptime - cm->cm_timestamp));
3722 				MFI_PRINT_CMD(cm);
3723 				timedout++;
3724 			}
3725 		}
3726 
3727 #if 0
3728 		if (timedout)
3729 			MFI_DUMP_CMDS(sc);
3730 #endif
3731 
3732 		mtx_unlock(&sc->mfi_io_lock);
3733 	}
3734 
3735 	return;
3736 }
3737 
3738 static void
3739 mfi_timeout(void *data)
3740 {
3741 	struct mfi_softc *sc = (struct mfi_softc *)data;
3742 	struct mfi_command *cm, *tmp;
3743 	time_t deadline;
3744 	int timedout = 0;
3745 
3746 	deadline = time_uptime - mfi_cmd_timeout;
3747 	if (sc->adpreset == 0) {
3748 		if (!mfi_tbolt_reset(sc)) {
3749 			callout_reset(&sc->mfi_watchdog_callout,
3750 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3751 			return;
3752 		}
3753 	}
3754 	mtx_lock(&sc->mfi_io_lock);
3755 	TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3756 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3757 			continue;
3758 		if (cm->cm_timestamp <= deadline) {
3759 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3760 				cm->cm_timestamp = time_uptime;
3761 			} else {
3762 				device_printf(sc->mfi_dev,
3763 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3764 				     cm, (int)(time_uptime - cm->cm_timestamp)
3765 				     );
3766 				MFI_PRINT_CMD(cm);
3767 				MFI_VALIDATE_CMD(sc, cm);
3768 				/*
3769 				 * While commands can get stuck forever we do
3770 				 * not fail them as there is no way to tell if
3771 				 * the controller has actually processed them
3772 				 * or not.
3773 				 *
3774 				 * In addition its very likely that force
3775 				 * failing a command here would cause a panic
3776 				 * e.g. in UFS.
3777 				 */
3778 				timedout++;
3779 			}
3780 		}
3781 	}
3782 
3783 #if 0
3784 	if (timedout)
3785 		MFI_DUMP_CMDS(sc);
3786 #endif
3787 
3788 	mtx_unlock(&sc->mfi_io_lock);
3789 
3790 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3791 	    mfi_timeout, sc);
3792 
3793 	if (0)
3794 		mfi_dump_all();
3795 	return;
3796 }
3797