xref: /freebsd/sys/dev/mfi/mfi.c (revision 80ff58b89dcacfe07fe20b045890df9db5ca0af0)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2007 LSI Corp.
28  * Copyright (c) 2007 Rajesh Prabhakaran.
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52 
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
55 
56 #include "opt_compat.h"
57 #include "opt_mfi.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
64 #include <sys/poll.h>
65 #include <sys/selinfo.h>
66 #include <sys/bus.h>
67 #include <sys/conf.h>
68 #include <sys/eventhandler.h>
69 #include <sys/rman.h>
70 #include <sys/bus_dma.h>
71 #include <sys/bio.h>
72 #include <sys/ioccom.h>
73 #include <sys/uio.h>
74 #include <sys/proc.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
78 
79 #include <machine/bus.h>
80 #include <machine/resource.h>
81 
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
87 
88 static int	mfi_alloc_commands(struct mfi_softc *);
89 static int	mfi_comms_init(struct mfi_softc *);
90 static int	mfi_get_controller_info(struct mfi_softc *);
91 static int	mfi_get_log_state(struct mfi_softc *,
92 		    struct mfi_evt_log_state **);
93 static int	mfi_parse_entries(struct mfi_softc *, int, int);
94 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void	mfi_startup(void *arg);
96 static void	mfi_intr(void *arg);
97 static void	mfi_ldprobe(struct mfi_softc *sc);
98 static void	mfi_syspdprobe(struct mfi_softc *sc);
99 static void	mfi_handle_evt(void *context, int pending);
100 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void	mfi_aen_complete(struct mfi_command *);
102 static int	mfi_add_ld(struct mfi_softc *sc, int);
103 static void	mfi_add_ld_complete(struct mfi_command *);
104 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void	mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void	mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int	mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void	mfi_timeout(void *);
115 static int	mfi_user_command(struct mfi_softc *,
116 		    struct mfi_ioc_passthru *);
117 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
124 		    uint32_t frame_cnt);
125 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
126 		    uint32_t frame_cnt);
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
132 
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
137            0, "event message locale");
138 
139 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
142            0, "event message class");
143 
144 static int	mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
147 	   0, "Max commands limit (-1 = controller limit)");
148 
149 static int	mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
152 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
153 
154 int		mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
155 TUNABLE_INT("hw.mfi.polled_cmd_timeout", &mfi_polled_cmd_timeout);
156 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
157 	   &mfi_polled_cmd_timeout, 0,
158 	   "Polled command timeout - used for firmware flash etc (in seconds)");
159 
160 /* Management interface */
161 static d_open_t		mfi_open;
162 static d_close_t	mfi_close;
163 static d_ioctl_t	mfi_ioctl;
164 static d_poll_t		mfi_poll;
165 
166 static struct cdevsw mfi_cdevsw = {
167 	.d_version = 	D_VERSION,
168 	.d_flags =	0,
169 	.d_open = 	mfi_open,
170 	.d_close =	mfi_close,
171 	.d_ioctl =	mfi_ioctl,
172 	.d_poll =	mfi_poll,
173 	.d_name =	"mfi",
174 };
175 
176 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
177 
178 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
179 struct mfi_skinny_dma_info mfi_skinny;
180 
181 static void
182 mfi_enable_intr_xscale(struct mfi_softc *sc)
183 {
184 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
185 }
186 
187 static void
188 mfi_enable_intr_ppc(struct mfi_softc *sc)
189 {
190 	if (sc->mfi_flags & MFI_FLAGS_1078) {
191 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
192 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
193 	}
194 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
195 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
196 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
197 	}
198 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
200 	}
201 }
202 
203 static int32_t
204 mfi_read_fw_status_xscale(struct mfi_softc *sc)
205 {
206 	return MFI_READ4(sc, MFI_OMSG0);
207 }
208 
209 static int32_t
210 mfi_read_fw_status_ppc(struct mfi_softc *sc)
211 {
212 	return MFI_READ4(sc, MFI_OSP0);
213 }
214 
215 static int
216 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
217 {
218 	int32_t status;
219 
220 	status = MFI_READ4(sc, MFI_OSTS);
221 	if ((status & MFI_OSTS_INTR_VALID) == 0)
222 		return 1;
223 
224 	MFI_WRITE4(sc, MFI_OSTS, status);
225 	return 0;
226 }
227 
228 static int
229 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
230 {
231 	int32_t status;
232 
233 	status = MFI_READ4(sc, MFI_OSTS);
234 	if (sc->mfi_flags & MFI_FLAGS_1078) {
235 		if (!(status & MFI_1078_RM)) {
236 			return 1;
237 		}
238 	}
239 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
240 		if (!(status & MFI_GEN2_RM)) {
241 			return 1;
242 		}
243 	}
244 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
245 		if (!(status & MFI_SKINNY_RM)) {
246 			return 1;
247 		}
248 	}
249 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
250 		MFI_WRITE4(sc, MFI_OSTS, status);
251 	else
252 		MFI_WRITE4(sc, MFI_ODCR0, status);
253 	return 0;
254 }
255 
256 static void
257 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
258 {
259 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
260 }
261 
262 static void
263 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
264 {
265 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
266 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
267 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
268 	} else {
269 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
270 	}
271 }
272 
273 int
274 mfi_transition_firmware(struct mfi_softc *sc)
275 {
276 	uint32_t fw_state, cur_state;
277 	int max_wait, i;
278 	uint32_t cur_abs_reg_val = 0;
279 	uint32_t prev_abs_reg_val = 0;
280 
281 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
282 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
283 	while (fw_state != MFI_FWSTATE_READY) {
284 		if (bootverbose)
285 			device_printf(sc->mfi_dev, "Waiting for firmware to "
286 			"become ready\n");
287 		cur_state = fw_state;
288 		switch (fw_state) {
289 		case MFI_FWSTATE_FAULT:
290 			device_printf(sc->mfi_dev, "Firmware fault\n");
291 			return (ENXIO);
292 		case MFI_FWSTATE_WAIT_HANDSHAKE:
293 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
294 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
295 			else
296 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
297 			max_wait = MFI_RESET_WAIT_TIME;
298 			break;
299 		case MFI_FWSTATE_OPERATIONAL:
300 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
301 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
302 			else
303 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
304 			max_wait = MFI_RESET_WAIT_TIME;
305 			break;
306 		case MFI_FWSTATE_UNDEFINED:
307 		case MFI_FWSTATE_BB_INIT:
308 			max_wait = MFI_RESET_WAIT_TIME;
309 			break;
310 		case MFI_FWSTATE_FW_INIT_2:
311 			max_wait = MFI_RESET_WAIT_TIME;
312 			break;
313 		case MFI_FWSTATE_FW_INIT:
314 		case MFI_FWSTATE_FLUSH_CACHE:
315 			max_wait = MFI_RESET_WAIT_TIME;
316 			break;
317 		case MFI_FWSTATE_DEVICE_SCAN:
318 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
319 			prev_abs_reg_val = cur_abs_reg_val;
320 			break;
321 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
322 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
323 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
324 			else
325 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
326 			max_wait = MFI_RESET_WAIT_TIME;
327 			break;
328 		default:
329 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
330 			    fw_state);
331 			return (ENXIO);
332 		}
333 		for (i = 0; i < (max_wait * 10); i++) {
334 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
335 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
336 			if (fw_state == cur_state)
337 				DELAY(100000);
338 			else
339 				break;
340 		}
341 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
342 			/* Check the device scanning progress */
343 			if (prev_abs_reg_val != cur_abs_reg_val) {
344 				continue;
345 			}
346 		}
347 		if (fw_state == cur_state) {
348 			device_printf(sc->mfi_dev, "Firmware stuck in state "
349 			    "%#x\n", fw_state);
350 			return (ENXIO);
351 		}
352 	}
353 	return (0);
354 }
355 
356 static void
357 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
358 {
359 	bus_addr_t *addr;
360 
361 	addr = arg;
362 	*addr = segs[0].ds_addr;
363 }
364 
365 
366 int
367 mfi_attach(struct mfi_softc *sc)
368 {
369 	uint32_t status;
370 	int error, commsz, framessz, sensesz;
371 	int frames, unit, max_fw_sge, max_fw_cmds;
372 	uint32_t tb_mem_size = 0;
373 
374 	if (sc == NULL)
375 		return EINVAL;
376 
377 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
378 	    MEGASAS_VERSION);
379 
380 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
381 	sx_init(&sc->mfi_config_lock, "MFI config");
382 	TAILQ_INIT(&sc->mfi_ld_tqh);
383 	TAILQ_INIT(&sc->mfi_syspd_tqh);
384 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
385 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
386 	TAILQ_INIT(&sc->mfi_evt_queue);
387 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
388 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
389 	TAILQ_INIT(&sc->mfi_aen_pids);
390 	TAILQ_INIT(&sc->mfi_cam_ccbq);
391 
392 	mfi_initq_free(sc);
393 	mfi_initq_ready(sc);
394 	mfi_initq_busy(sc);
395 	mfi_initq_bio(sc);
396 
397 	sc->adpreset = 0;
398 	sc->last_seq_num = 0;
399 	sc->disableOnlineCtrlReset = 1;
400 	sc->issuepend_done = 1;
401 	sc->hw_crit_error = 0;
402 
403 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
404 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
405 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
406 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
407 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
408 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
409 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
410 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
411 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
412 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
413 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
414 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
415 		sc->mfi_tbolt = 1;
416 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
417 	} else {
418 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
419 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
420 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
421 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
422 	}
423 
424 
425 	/* Before we get too far, see if the firmware is working */
426 	if ((error = mfi_transition_firmware(sc)) != 0) {
427 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
428 		    "error %d\n", error);
429 		return (ENXIO);
430 	}
431 
432 	/* Start: LSIP200113393 */
433 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
434 				1, 0,			/* algnmnt, boundary */
435 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
436 				BUS_SPACE_MAXADDR,	/* highaddr */
437 				NULL, NULL,		/* filter, filterarg */
438 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
439 				1,			/* msegments */
440 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
441 				0,			/* flags */
442 				NULL, NULL,		/* lockfunc, lockarg */
443 				&sc->verbuf_h_dmat)) {
444 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
445 		return (ENOMEM);
446 	}
447 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
448 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
449 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
450 		return (ENOMEM);
451 	}
452 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
453 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
454 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
455 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
456 	/* End: LSIP200113393 */
457 
458 	/*
459 	 * Get information needed for sizing the contiguous memory for the
460 	 * frame pool.  Size down the sgl parameter since we know that
461 	 * we will never need more than what's required for MAXPHYS.
462 	 * It would be nice if these constants were available at runtime
463 	 * instead of compile time.
464 	 */
465 	status = sc->mfi_read_fw_status(sc);
466 	max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
467 	if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
468 		device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
469 		    max_fw_cmds, mfi_max_cmds);
470 		sc->mfi_max_fw_cmds = mfi_max_cmds;
471 	} else {
472 		sc->mfi_max_fw_cmds = max_fw_cmds;
473 	}
474 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
475 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
476 
477 	/* ThunderBolt Support get the contiguous memory */
478 
479 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
480 		mfi_tbolt_init_globals(sc);
481 		device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
482 		    "MaxSgl = %d, state = %#x\n", max_fw_cmds,
483 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
484 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
485 
486 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
487 				1, 0,			/* algnmnt, boundary */
488 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
489 				BUS_SPACE_MAXADDR,	/* highaddr */
490 				NULL, NULL,		/* filter, filterarg */
491 				tb_mem_size,		/* maxsize */
492 				1,			/* msegments */
493 				tb_mem_size,		/* maxsegsize */
494 				0,			/* flags */
495 				NULL, NULL,		/* lockfunc, lockarg */
496 				&sc->mfi_tb_dmat)) {
497 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
498 			return (ENOMEM);
499 		}
500 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
501 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
502 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
503 			return (ENOMEM);
504 		}
505 		bzero(sc->request_message_pool, tb_mem_size);
506 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
507 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
508 
509 		/* For ThunderBolt memory init */
510 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
511 				0x100, 0,		/* alignmnt, boundary */
512 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
513 				BUS_SPACE_MAXADDR,	/* highaddr */
514 				NULL, NULL,		/* filter, filterarg */
515 				MFI_FRAME_SIZE,		/* maxsize */
516 				1,			/* msegments */
517 				MFI_FRAME_SIZE,		/* maxsegsize */
518 				0,			/* flags */
519 				NULL, NULL,		/* lockfunc, lockarg */
520 				&sc->mfi_tb_init_dmat)) {
521 			device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
522 			return (ENOMEM);
523 		}
524 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
525 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
526 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
527 			return (ENOMEM);
528 		}
529 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
530 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
531 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
532 		    &sc->mfi_tb_init_busaddr, 0);
533 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
534 		    tb_mem_size)) {
535 			device_printf(sc->mfi_dev,
536 			    "Thunderbolt pool preparation error\n");
537 			return 0;
538 		}
539 
540 		/*
541 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
542 		  we are taking it diffrent from what we have allocated for Request
543 		  and reply descriptors to avoid confusion later
544 		*/
545 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
546 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
547 				1, 0,			/* algnmnt, boundary */
548 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
549 				BUS_SPACE_MAXADDR,	/* highaddr */
550 				NULL, NULL,		/* filter, filterarg */
551 				tb_mem_size,		/* maxsize */
552 				1,			/* msegments */
553 				tb_mem_size,		/* maxsegsize */
554 				0,			/* flags */
555 				NULL, NULL,		/* lockfunc, lockarg */
556 				&sc->mfi_tb_ioc_init_dmat)) {
557 			device_printf(sc->mfi_dev,
558 			    "Cannot allocate comms DMA tag\n");
559 			return (ENOMEM);
560 		}
561 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
562 		    (void **)&sc->mfi_tb_ioc_init_desc,
563 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
564 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
565 			return (ENOMEM);
566 		}
567 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
568 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
569 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
570 		    &sc->mfi_tb_ioc_init_busaddr, 0);
571 	}
572 	/*
573 	 * Create the dma tag for data buffers.  Used both for block I/O
574 	 * and for various internal data queries.
575 	 */
576 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
577 				1, 0,			/* algnmnt, boundary */
578 				BUS_SPACE_MAXADDR,	/* lowaddr */
579 				BUS_SPACE_MAXADDR,	/* highaddr */
580 				NULL, NULL,		/* filter, filterarg */
581 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
582 				sc->mfi_max_sge,	/* nsegments */
583 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
584 				BUS_DMA_ALLOCNOW,	/* flags */
585 				busdma_lock_mutex,	/* lockfunc */
586 				&sc->mfi_io_lock,	/* lockfuncarg */
587 				&sc->mfi_buffer_dmat)) {
588 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
589 		return (ENOMEM);
590 	}
591 
592 	/*
593 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
594 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
595 	 * entry, so the calculated size here will be will be 1 more than
596 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
597 	 */
598 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
599 	    sizeof(struct mfi_hwcomms);
600 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
601 				1, 0,			/* algnmnt, boundary */
602 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
603 				BUS_SPACE_MAXADDR,	/* highaddr */
604 				NULL, NULL,		/* filter, filterarg */
605 				commsz,			/* maxsize */
606 				1,			/* msegments */
607 				commsz,			/* maxsegsize */
608 				0,			/* flags */
609 				NULL, NULL,		/* lockfunc, lockarg */
610 				&sc->mfi_comms_dmat)) {
611 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
612 		return (ENOMEM);
613 	}
614 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
615 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
616 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
617 		return (ENOMEM);
618 	}
619 	bzero(sc->mfi_comms, commsz);
620 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
621 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
622 	/*
623 	 * Allocate DMA memory for the command frames.  Keep them in the
624 	 * lower 4GB for efficiency.  Calculate the size of the commands at
625 	 * the same time; each command is one 64 byte frame plus a set of
626          * additional frames for holding sg lists or other data.
627 	 * The assumption here is that the SG list will start at the second
628 	 * frame and not use the unused bytes in the first frame.  While this
629 	 * isn't technically correct, it simplifies the calculation and allows
630 	 * for command frames that might be larger than an mfi_io_frame.
631 	 */
632 	if (sizeof(bus_addr_t) == 8) {
633 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
634 		sc->mfi_flags |= MFI_FLAGS_SG64;
635 	} else {
636 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
637 	}
638 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
639 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
640 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
641 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
642 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
643 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
644 				64, 0,			/* algnmnt, boundary */
645 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
646 				BUS_SPACE_MAXADDR,	/* highaddr */
647 				NULL, NULL,		/* filter, filterarg */
648 				framessz,		/* maxsize */
649 				1,			/* nsegments */
650 				framessz,		/* maxsegsize */
651 				0,			/* flags */
652 				NULL, NULL,		/* lockfunc, lockarg */
653 				&sc->mfi_frames_dmat)) {
654 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
655 		return (ENOMEM);
656 	}
657 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
658 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
659 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
660 		return (ENOMEM);
661 	}
662 	bzero(sc->mfi_frames, framessz);
663 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
664 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
665 	/*
666 	 * Allocate DMA memory for the frame sense data.  Keep them in the
667 	 * lower 4GB for efficiency
668 	 */
669 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
670 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
671 				4, 0,			/* algnmnt, boundary */
672 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
673 				BUS_SPACE_MAXADDR,	/* highaddr */
674 				NULL, NULL,		/* filter, filterarg */
675 				sensesz,		/* maxsize */
676 				1,			/* nsegments */
677 				sensesz,		/* maxsegsize */
678 				0,			/* flags */
679 				NULL, NULL,		/* lockfunc, lockarg */
680 				&sc->mfi_sense_dmat)) {
681 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
682 		return (ENOMEM);
683 	}
684 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
685 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
686 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
687 		return (ENOMEM);
688 	}
689 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
690 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
691 	if ((error = mfi_alloc_commands(sc)) != 0)
692 		return (error);
693 
694 	/* Before moving the FW to operational state, check whether
695 	 * hostmemory is required by the FW or not
696 	 */
697 
698 	/* ThunderBolt MFI_IOC2 INIT */
699 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
700 		sc->mfi_disable_intr(sc);
701 		mtx_lock(&sc->mfi_io_lock);
702 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
703 			device_printf(sc->mfi_dev,
704 			    "TB Init has failed with error %d\n",error);
705 			mtx_unlock(&sc->mfi_io_lock);
706 			return error;
707 		}
708 		mtx_unlock(&sc->mfi_io_lock);
709 
710 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
711 			return error;
712 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
713 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
714 		    &sc->mfi_intr)) {
715 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
716 			return (EINVAL);
717 		}
718 		sc->mfi_intr_ptr = mfi_intr_tbolt;
719 		sc->mfi_enable_intr(sc);
720 	} else {
721 		if ((error = mfi_comms_init(sc)) != 0)
722 			return (error);
723 
724 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
725 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
726 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
727 			return (EINVAL);
728 		}
729 		sc->mfi_intr_ptr = mfi_intr;
730 		sc->mfi_enable_intr(sc);
731 	}
732 	if ((error = mfi_get_controller_info(sc)) != 0)
733 		return (error);
734 	sc->disableOnlineCtrlReset = 0;
735 
736 	/* Register a config hook to probe the bus for arrays */
737 	sc->mfi_ich.ich_func = mfi_startup;
738 	sc->mfi_ich.ich_arg = sc;
739 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
740 		device_printf(sc->mfi_dev, "Cannot establish configuration "
741 		    "hook\n");
742 		return (EINVAL);
743 	}
744 	mtx_lock(&sc->mfi_io_lock);
745 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
746 		mtx_unlock(&sc->mfi_io_lock);
747 		return (error);
748 	}
749 	mtx_unlock(&sc->mfi_io_lock);
750 
751 	/*
752 	 * Register a shutdown handler.
753 	 */
754 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
755 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
756 		device_printf(sc->mfi_dev, "Warning: shutdown event "
757 		    "registration failed\n");
758 	}
759 
760 	/*
761 	 * Create the control device for doing management
762 	 */
763 	unit = device_get_unit(sc->mfi_dev);
764 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
765 	    0640, "mfi%d", unit);
766 	if (unit == 0)
767 		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
768 	if (sc->mfi_cdev != NULL)
769 		sc->mfi_cdev->si_drv1 = sc;
770 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
771 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
772 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
773 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
774 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
775 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
776 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
777 	    &sc->mfi_keep_deleted_volumes, 0,
778 	    "Don't detach the mfid device for a busy volume that is deleted");
779 
780 	device_add_child(sc->mfi_dev, "mfip", -1);
781 	bus_generic_attach(sc->mfi_dev);
782 
783 	/* Start the timeout watchdog */
784 	callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
785 	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
786 	    mfi_timeout, sc);
787 
788 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
789 		mtx_lock(&sc->mfi_io_lock);
790 		mfi_tbolt_sync_map_info(sc);
791 		mtx_unlock(&sc->mfi_io_lock);
792 	}
793 
794 	return (0);
795 }
796 
797 static int
798 mfi_alloc_commands(struct mfi_softc *sc)
799 {
800 	struct mfi_command *cm;
801 	int i, j;
802 
803 	/*
804 	 * XXX Should we allocate all the commands up front, or allocate on
805 	 * demand later like 'aac' does?
806 	 */
807 	sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
808 	    sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
809 
810 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
811 		cm = &sc->mfi_commands[i];
812 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
813 		    sc->mfi_cmd_size * i);
814 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
815 		    sc->mfi_cmd_size * i;
816 		cm->cm_frame->header.context = i;
817 		cm->cm_sense = &sc->mfi_sense[i];
818 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
819 		cm->cm_sc = sc;
820 		cm->cm_index = i;
821 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
822 		    &cm->cm_dmamap) == 0) {
823 			mtx_lock(&sc->mfi_io_lock);
824 			mfi_release_command(cm);
825 			mtx_unlock(&sc->mfi_io_lock);
826 		} else {
827 			device_printf(sc->mfi_dev, "Failed to allocate %d "
828 			   "command blocks, only allocated %d\n",
829 			    sc->mfi_max_fw_cmds, i - 1);
830 			for (j = 0; j < i; j++) {
831 				cm = &sc->mfi_commands[i];
832 				bus_dmamap_destroy(sc->mfi_buffer_dmat,
833 				    cm->cm_dmamap);
834 			}
835 			free(sc->mfi_commands, M_MFIBUF);
836 			sc->mfi_commands = NULL;
837 
838 			return (ENOMEM);
839 		}
840 	}
841 
842 	return (0);
843 }
844 
845 void
846 mfi_release_command(struct mfi_command *cm)
847 {
848 	struct mfi_frame_header *hdr;
849 	uint32_t *hdr_data;
850 
851 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
852 
853 	/*
854 	 * Zero out the important fields of the frame, but make sure the
855 	 * context field is preserved.  For efficiency, handle the fields
856 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
857 	 */
858 	hdr = &cm->cm_frame->header;
859 	if (cm->cm_data != NULL && hdr->sg_count) {
860 		cm->cm_sg->sg32[0].len = 0;
861 		cm->cm_sg->sg32[0].addr = 0;
862 	}
863 
864 	/*
865 	 * Command may be on other queues e.g. busy queue depending on the
866 	 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
867 	 * properly
868 	 */
869 	if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
870 		mfi_remove_busy(cm);
871 	if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
872 		mfi_remove_ready(cm);
873 
874 	/* We're not expecting it to be on any other queue but check */
875 	if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
876 		panic("Command %p is still on another queue, flags = %#x",
877 		    cm, cm->cm_flags);
878 	}
879 
880 	/* tbolt cleanup */
881 	if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
882 		mfi_tbolt_return_cmd(cm->cm_sc,
883 		    cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
884 		    cm);
885 	}
886 
887 	hdr_data = (uint32_t *)cm->cm_frame;
888 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
889 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
890 	hdr_data[4] = 0;	/* flags, timeout */
891 	hdr_data[5] = 0;	/* data_len */
892 
893 	cm->cm_extra_frames = 0;
894 	cm->cm_flags = 0;
895 	cm->cm_complete = NULL;
896 	cm->cm_private = NULL;
897 	cm->cm_data = NULL;
898 	cm->cm_sg = 0;
899 	cm->cm_total_frame_size = 0;
900 	cm->retry_for_fw_reset = 0;
901 
902 	mfi_enqueue_free(cm);
903 }
904 
905 int
906 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
907     uint32_t opcode, void **bufp, size_t bufsize)
908 {
909 	struct mfi_command *cm;
910 	struct mfi_dcmd_frame *dcmd;
911 	void *buf = NULL;
912 	uint32_t context = 0;
913 
914 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
915 
916 	cm = mfi_dequeue_free(sc);
917 	if (cm == NULL)
918 		return (EBUSY);
919 
920 	/* Zero out the MFI frame */
921 	context = cm->cm_frame->header.context;
922 	bzero(cm->cm_frame, sizeof(union mfi_frame));
923 	cm->cm_frame->header.context = context;
924 
925 	if ((bufsize > 0) && (bufp != NULL)) {
926 		if (*bufp == NULL) {
927 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
928 			if (buf == NULL) {
929 				mfi_release_command(cm);
930 				return (ENOMEM);
931 			}
932 			*bufp = buf;
933 		} else {
934 			buf = *bufp;
935 		}
936 	}
937 
938 	dcmd =  &cm->cm_frame->dcmd;
939 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
940 	dcmd->header.cmd = MFI_CMD_DCMD;
941 	dcmd->header.timeout = 0;
942 	dcmd->header.flags = 0;
943 	dcmd->header.data_len = bufsize;
944 	dcmd->header.scsi_status = 0;
945 	dcmd->opcode = opcode;
946 	cm->cm_sg = &dcmd->sgl;
947 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
948 	cm->cm_flags = 0;
949 	cm->cm_data = buf;
950 	cm->cm_private = buf;
951 	cm->cm_len = bufsize;
952 
953 	*cmp = cm;
954 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
955 		*bufp = buf;
956 	return (0);
957 }
958 
959 static int
960 mfi_comms_init(struct mfi_softc *sc)
961 {
962 	struct mfi_command *cm;
963 	struct mfi_init_frame *init;
964 	struct mfi_init_qinfo *qinfo;
965 	int error;
966 	uint32_t context = 0;
967 
968 	mtx_lock(&sc->mfi_io_lock);
969 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
970 		mtx_unlock(&sc->mfi_io_lock);
971 		return (EBUSY);
972 	}
973 
974 	/* Zero out the MFI frame */
975 	context = cm->cm_frame->header.context;
976 	bzero(cm->cm_frame, sizeof(union mfi_frame));
977 	cm->cm_frame->header.context = context;
978 
979 	/*
980 	 * Abuse the SG list area of the frame to hold the init_qinfo
981 	 * object;
982 	 */
983 	init = &cm->cm_frame->init;
984 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
985 
986 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
987 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
988 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
989 	    offsetof(struct mfi_hwcomms, hw_reply_q);
990 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
991 	    offsetof(struct mfi_hwcomms, hw_pi);
992 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
993 	    offsetof(struct mfi_hwcomms, hw_ci);
994 
995 	init->header.cmd = MFI_CMD_INIT;
996 	init->header.data_len = sizeof(struct mfi_init_qinfo);
997 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
998 	cm->cm_data = NULL;
999 	cm->cm_flags = MFI_CMD_POLLED;
1000 
1001 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1002 		device_printf(sc->mfi_dev, "failed to send init command\n");
1003 	mfi_release_command(cm);
1004 	mtx_unlock(&sc->mfi_io_lock);
1005 
1006 	return (error);
1007 }
1008 
1009 static int
1010 mfi_get_controller_info(struct mfi_softc *sc)
1011 {
1012 	struct mfi_command *cm = NULL;
1013 	struct mfi_ctrl_info *ci = NULL;
1014 	uint32_t max_sectors_1, max_sectors_2;
1015 	int error;
1016 
1017 	mtx_lock(&sc->mfi_io_lock);
1018 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1019 	    (void **)&ci, sizeof(*ci));
1020 	if (error)
1021 		goto out;
1022 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1023 
1024 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1025 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
1026 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1027 		    MFI_SECTOR_LEN;
1028 		error = 0;
1029 		goto out;
1030 	}
1031 
1032 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1033 	    BUS_DMASYNC_POSTREAD);
1034 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1035 
1036 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1037 	max_sectors_2 = ci->max_request_size;
1038 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1039 	sc->disableOnlineCtrlReset =
1040 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
1041 
1042 out:
1043 	if (ci)
1044 		free(ci, M_MFIBUF);
1045 	if (cm)
1046 		mfi_release_command(cm);
1047 	mtx_unlock(&sc->mfi_io_lock);
1048 	return (error);
1049 }
1050 
1051 static int
1052 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1053 {
1054 	struct mfi_command *cm = NULL;
1055 	int error;
1056 
1057 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1058 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1059 	    (void **)log_state, sizeof(**log_state));
1060 	if (error)
1061 		goto out;
1062 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1063 
1064 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1065 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1066 		goto out;
1067 	}
1068 
1069 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1070 	    BUS_DMASYNC_POSTREAD);
1071 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1072 
1073 out:
1074 	if (cm)
1075 		mfi_release_command(cm);
1076 
1077 	return (error);
1078 }
1079 
1080 int
1081 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1082 {
1083 	struct mfi_evt_log_state *log_state = NULL;
1084 	union mfi_evt class_locale;
1085 	int error = 0;
1086 	uint32_t seq;
1087 
1088 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1089 
1090 	class_locale.members.reserved = 0;
1091 	class_locale.members.locale = mfi_event_locale;
1092 	class_locale.members.evt_class  = mfi_event_class;
1093 
1094 	if (seq_start == 0) {
1095 		if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1096 			goto out;
1097 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1098 
1099 		/*
1100 		 * Walk through any events that fired since the last
1101 		 * shutdown.
1102 		 */
1103 		if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1104 		    log_state->newest_seq_num)) != 0)
1105 			goto out;
1106 		seq = log_state->newest_seq_num;
1107 	} else
1108 		seq = seq_start;
1109 	error = mfi_aen_register(sc, seq, class_locale.word);
1110 out:
1111 	free(log_state, M_MFIBUF);
1112 
1113 	return (error);
1114 }
1115 
1116 int
1117 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1118 {
1119 
1120 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1121 	cm->cm_complete = NULL;
1122 
1123 	/*
1124 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1125 	 * and return 0 to it as status
1126 	 */
1127 	if (cm->cm_frame->dcmd.opcode == 0) {
1128 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1129 		cm->cm_error = 0;
1130 		return (cm->cm_error);
1131 	}
1132 	mfi_enqueue_ready(cm);
1133 	mfi_startio(sc);
1134 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1135 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1136 	return (cm->cm_error);
1137 }
1138 
1139 void
1140 mfi_free(struct mfi_softc *sc)
1141 {
1142 	struct mfi_command *cm;
1143 	int i;
1144 
1145 	callout_drain(&sc->mfi_watchdog_callout);
1146 
1147 	if (sc->mfi_cdev != NULL)
1148 		destroy_dev(sc->mfi_cdev);
1149 
1150 	if (sc->mfi_commands != NULL) {
1151 		for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1152 			cm = &sc->mfi_commands[i];
1153 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1154 		}
1155 		free(sc->mfi_commands, M_MFIBUF);
1156 		sc->mfi_commands = NULL;
1157 	}
1158 
1159 	if (sc->mfi_intr)
1160 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1161 	if (sc->mfi_irq != NULL)
1162 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1163 		    sc->mfi_irq);
1164 
1165 	if (sc->mfi_sense_busaddr != 0)
1166 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1167 	if (sc->mfi_sense != NULL)
1168 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1169 		    sc->mfi_sense_dmamap);
1170 	if (sc->mfi_sense_dmat != NULL)
1171 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1172 
1173 	if (sc->mfi_frames_busaddr != 0)
1174 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1175 	if (sc->mfi_frames != NULL)
1176 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1177 		    sc->mfi_frames_dmamap);
1178 	if (sc->mfi_frames_dmat != NULL)
1179 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1180 
1181 	if (sc->mfi_comms_busaddr != 0)
1182 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1183 	if (sc->mfi_comms != NULL)
1184 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1185 		    sc->mfi_comms_dmamap);
1186 	if (sc->mfi_comms_dmat != NULL)
1187 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1188 
1189 	/* ThunderBolt contiguous memory free here */
1190 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1191 		if (sc->mfi_tb_busaddr != 0)
1192 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1193 		if (sc->request_message_pool != NULL)
1194 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1195 			    sc->mfi_tb_dmamap);
1196 		if (sc->mfi_tb_dmat != NULL)
1197 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1198 
1199 		/* Version buffer memory free */
1200 		/* Start LSIP200113393 */
1201 		if (sc->verbuf_h_busaddr != 0)
1202 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1203 		if (sc->verbuf != NULL)
1204 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1205 			    sc->verbuf_h_dmamap);
1206 		if (sc->verbuf_h_dmat != NULL)
1207 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1208 
1209 		/* End LSIP200113393 */
1210 		/* ThunderBolt INIT packet memory Free */
1211 		if (sc->mfi_tb_init_busaddr != 0)
1212 			bus_dmamap_unload(sc->mfi_tb_init_dmat,
1213 			    sc->mfi_tb_init_dmamap);
1214 		if (sc->mfi_tb_init != NULL)
1215 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1216 			    sc->mfi_tb_init_dmamap);
1217 		if (sc->mfi_tb_init_dmat != NULL)
1218 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1219 
1220 		/* ThunderBolt IOC Init Desc memory free here */
1221 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1222 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1223 			    sc->mfi_tb_ioc_init_dmamap);
1224 		if (sc->mfi_tb_ioc_init_desc != NULL)
1225 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1226 			    sc->mfi_tb_ioc_init_desc,
1227 			    sc->mfi_tb_ioc_init_dmamap);
1228 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1229 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1230 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1231 			for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1232 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1233 					free(sc->mfi_cmd_pool_tbolt[i],
1234 					    M_MFIBUF);
1235 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1236 				}
1237 			}
1238 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1239 			sc->mfi_cmd_pool_tbolt = NULL;
1240 		}
1241 		if (sc->request_desc_pool != NULL) {
1242 			free(sc->request_desc_pool, M_MFIBUF);
1243 			sc->request_desc_pool = NULL;
1244 		}
1245 	}
1246 	if (sc->mfi_buffer_dmat != NULL)
1247 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1248 	if (sc->mfi_parent_dmat != NULL)
1249 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1250 
1251 	if (mtx_initialized(&sc->mfi_io_lock)) {
1252 		mtx_destroy(&sc->mfi_io_lock);
1253 		sx_destroy(&sc->mfi_config_lock);
1254 	}
1255 
1256 	return;
1257 }
1258 
1259 static void
1260 mfi_startup(void *arg)
1261 {
1262 	struct mfi_softc *sc;
1263 
1264 	sc = (struct mfi_softc *)arg;
1265 
1266 	config_intrhook_disestablish(&sc->mfi_ich);
1267 
1268 	sc->mfi_enable_intr(sc);
1269 	sx_xlock(&sc->mfi_config_lock);
1270 	mtx_lock(&sc->mfi_io_lock);
1271 	mfi_ldprobe(sc);
1272 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1273 	    mfi_syspdprobe(sc);
1274 	mtx_unlock(&sc->mfi_io_lock);
1275 	sx_xunlock(&sc->mfi_config_lock);
1276 }
1277 
1278 static void
1279 mfi_intr(void *arg)
1280 {
1281 	struct mfi_softc *sc;
1282 	struct mfi_command *cm;
1283 	uint32_t pi, ci, context;
1284 
1285 	sc = (struct mfi_softc *)arg;
1286 
1287 	if (sc->mfi_check_clear_intr(sc))
1288 		return;
1289 
1290 restart:
1291 	pi = sc->mfi_comms->hw_pi;
1292 	ci = sc->mfi_comms->hw_ci;
1293 	mtx_lock(&sc->mfi_io_lock);
1294 	while (ci != pi) {
1295 		context = sc->mfi_comms->hw_reply_q[ci];
1296 		if (context < sc->mfi_max_fw_cmds) {
1297 			cm = &sc->mfi_commands[context];
1298 			mfi_remove_busy(cm);
1299 			cm->cm_error = 0;
1300 			mfi_complete(sc, cm);
1301 		}
1302 		if (++ci == (sc->mfi_max_fw_cmds + 1))
1303 			ci = 0;
1304 	}
1305 
1306 	sc->mfi_comms->hw_ci = ci;
1307 
1308 	/* Give defered I/O a chance to run */
1309 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1310 	mfi_startio(sc);
1311 	mtx_unlock(&sc->mfi_io_lock);
1312 
1313 	/*
1314 	 * Dummy read to flush the bus; this ensures that the indexes are up
1315 	 * to date.  Restart processing if more commands have come it.
1316 	 */
1317 	(void)sc->mfi_read_fw_status(sc);
1318 	if (pi != sc->mfi_comms->hw_pi)
1319 		goto restart;
1320 
1321 	return;
1322 }
1323 
1324 int
1325 mfi_shutdown(struct mfi_softc *sc)
1326 {
1327 	struct mfi_dcmd_frame *dcmd;
1328 	struct mfi_command *cm;
1329 	int error;
1330 
1331 
1332 	if (sc->mfi_aen_cm != NULL) {
1333 		sc->cm_aen_abort = 1;
1334 		mfi_abort(sc, &sc->mfi_aen_cm);
1335 	}
1336 
1337 	if (sc->mfi_map_sync_cm != NULL) {
1338 		sc->cm_map_abort = 1;
1339 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1340 	}
1341 
1342 	mtx_lock(&sc->mfi_io_lock);
1343 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1344 	if (error) {
1345 		mtx_unlock(&sc->mfi_io_lock);
1346 		return (error);
1347 	}
1348 
1349 	dcmd = &cm->cm_frame->dcmd;
1350 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1351 	cm->cm_flags = MFI_CMD_POLLED;
1352 	cm->cm_data = NULL;
1353 
1354 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1355 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1356 
1357 	mfi_release_command(cm);
1358 	mtx_unlock(&sc->mfi_io_lock);
1359 	return (error);
1360 }
1361 
1362 static void
1363 mfi_syspdprobe(struct mfi_softc *sc)
1364 {
1365 	struct mfi_frame_header *hdr;
1366 	struct mfi_command *cm = NULL;
1367 	struct mfi_pd_list *pdlist = NULL;
1368 	struct mfi_system_pd *syspd, *tmp;
1369 	struct mfi_system_pending *syspd_pend;
1370 	int error, i, found;
1371 
1372 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1373 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1374 	/* Add SYSTEM PD's */
1375 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1376 	    (void **)&pdlist, sizeof(*pdlist));
1377 	if (error) {
1378 		device_printf(sc->mfi_dev,
1379 		    "Error while forming SYSTEM PD list\n");
1380 		goto out;
1381 	}
1382 
1383 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1384 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1385 	cm->cm_frame->dcmd.mbox[1] = 0;
1386 	if (mfi_mapcmd(sc, cm) != 0) {
1387 		device_printf(sc->mfi_dev,
1388 		    "Failed to get syspd device listing\n");
1389 		goto out;
1390 	}
1391 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1392 	    BUS_DMASYNC_POSTREAD);
1393 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1394 	hdr = &cm->cm_frame->header;
1395 	if (hdr->cmd_status != MFI_STAT_OK) {
1396 		device_printf(sc->mfi_dev,
1397 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1398 		goto out;
1399 	}
1400 	/* Get each PD and add it to the system */
1401 	for (i = 0; i < pdlist->count; i++) {
1402 		if (pdlist->addr[i].device_id ==
1403 		    pdlist->addr[i].encl_device_id)
1404 			continue;
1405 		found = 0;
1406 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1407 			if (syspd->pd_id == pdlist->addr[i].device_id)
1408 				found = 1;
1409 		}
1410 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1411 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1412 				found = 1;
1413 		}
1414 		if (found == 0)
1415 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1416 	}
1417 	/* Delete SYSPD's whose state has been changed */
1418 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1419 		found = 0;
1420 		for (i = 0; i < pdlist->count; i++) {
1421 			if (syspd->pd_id == pdlist->addr[i].device_id) {
1422 				found = 1;
1423 				break;
1424 			}
1425 		}
1426 		if (found == 0) {
1427 			printf("DELETE\n");
1428 			mtx_unlock(&sc->mfi_io_lock);
1429 			mtx_lock(&Giant);
1430 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1431 			mtx_unlock(&Giant);
1432 			mtx_lock(&sc->mfi_io_lock);
1433 		}
1434 	}
1435 out:
1436 	if (pdlist)
1437 	    free(pdlist, M_MFIBUF);
1438 	if (cm)
1439 	    mfi_release_command(cm);
1440 
1441 	return;
1442 }
1443 
1444 static void
1445 mfi_ldprobe(struct mfi_softc *sc)
1446 {
1447 	struct mfi_frame_header *hdr;
1448 	struct mfi_command *cm = NULL;
1449 	struct mfi_ld_list *list = NULL;
1450 	struct mfi_disk *ld;
1451 	struct mfi_disk_pending *ld_pend;
1452 	int error, i;
1453 
1454 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1455 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1456 
1457 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1458 	    (void **)&list, sizeof(*list));
1459 	if (error)
1460 		goto out;
1461 
1462 	cm->cm_flags = MFI_CMD_DATAIN;
1463 	if (mfi_wait_command(sc, cm) != 0) {
1464 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1465 		goto out;
1466 	}
1467 
1468 	hdr = &cm->cm_frame->header;
1469 	if (hdr->cmd_status != MFI_STAT_OK) {
1470 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1471 		    hdr->cmd_status);
1472 		goto out;
1473 	}
1474 
1475 	for (i = 0; i < list->ld_count; i++) {
1476 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1477 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1478 				goto skip_add;
1479 		}
1480 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1481 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1482 				goto skip_add;
1483 		}
1484 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1485 	skip_add:;
1486 	}
1487 out:
1488 	if (list)
1489 		free(list, M_MFIBUF);
1490 	if (cm)
1491 		mfi_release_command(cm);
1492 
1493 	return;
1494 }
1495 
1496 /*
1497  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1498  * the bits in 24-31 are all set, then it is the number of seconds since
1499  * boot.
1500  */
1501 static const char *
1502 format_timestamp(uint32_t timestamp)
1503 {
1504 	static char buffer[32];
1505 
1506 	if ((timestamp & 0xff000000) == 0xff000000)
1507 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1508 		    0x00ffffff);
1509 	else
1510 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1511 	return (buffer);
1512 }
1513 
1514 static const char *
1515 format_class(int8_t class)
1516 {
1517 	static char buffer[6];
1518 
1519 	switch (class) {
1520 	case MFI_EVT_CLASS_DEBUG:
1521 		return ("debug");
1522 	case MFI_EVT_CLASS_PROGRESS:
1523 		return ("progress");
1524 	case MFI_EVT_CLASS_INFO:
1525 		return ("info");
1526 	case MFI_EVT_CLASS_WARNING:
1527 		return ("WARN");
1528 	case MFI_EVT_CLASS_CRITICAL:
1529 		return ("CRIT");
1530 	case MFI_EVT_CLASS_FATAL:
1531 		return ("FATAL");
1532 	case MFI_EVT_CLASS_DEAD:
1533 		return ("DEAD");
1534 	default:
1535 		snprintf(buffer, sizeof(buffer), "%d", class);
1536 		return (buffer);
1537 	}
1538 }
1539 
1540 static void
1541 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1542 {
1543 	struct mfi_system_pd *syspd = NULL;
1544 
1545 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1546 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1547 	    format_class(detail->evt_class.members.evt_class),
1548 	    detail->description);
1549 
1550         /* Don't act on old AEN's or while shutting down */
1551         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1552                 return;
1553 
1554 	switch (detail->arg_type) {
1555 	case MR_EVT_ARGS_NONE:
1556 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1557 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1558 			if (mfi_detect_jbod_change) {
1559 				/*
1560 				 * Probe for new SYSPD's and Delete
1561 				 * invalid SYSPD's
1562 				 */
1563 				sx_xlock(&sc->mfi_config_lock);
1564 				mtx_lock(&sc->mfi_io_lock);
1565 				mfi_syspdprobe(sc);
1566 				mtx_unlock(&sc->mfi_io_lock);
1567 				sx_xunlock(&sc->mfi_config_lock);
1568 			}
1569 		}
1570 		break;
1571 	case MR_EVT_ARGS_LD_STATE:
1572 		/* During load time driver reads all the events starting
1573 		 * from the one that has been logged after shutdown. Avoid
1574 		 * these old events.
1575 		 */
1576 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1577 			/* Remove the LD */
1578 			struct mfi_disk *ld;
1579 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1580 				if (ld->ld_id ==
1581 				    detail->args.ld_state.ld.target_id)
1582 					break;
1583 			}
1584 			/*
1585 			Fix: for kernel panics when SSCD is removed
1586 			KASSERT(ld != NULL, ("volume dissappeared"));
1587 			*/
1588 			if (ld != NULL) {
1589 				mtx_lock(&Giant);
1590 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1591 				mtx_unlock(&Giant);
1592 			}
1593 		}
1594 		break;
1595 	case MR_EVT_ARGS_PD:
1596 		if (detail->code == MR_EVT_PD_REMOVED) {
1597 			if (mfi_detect_jbod_change) {
1598 				/*
1599 				 * If the removed device is a SYSPD then
1600 				 * delete it
1601 				 */
1602 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1603 				    pd_link) {
1604 					if (syspd->pd_id ==
1605 					    detail->args.pd.device_id) {
1606 						mtx_lock(&Giant);
1607 						device_delete_child(
1608 						    sc->mfi_dev,
1609 						    syspd->pd_dev);
1610 						mtx_unlock(&Giant);
1611 						break;
1612 					}
1613 				}
1614 			}
1615 		}
1616 		if (detail->code == MR_EVT_PD_INSERTED) {
1617 			if (mfi_detect_jbod_change) {
1618 				/* Probe for new SYSPD's */
1619 				sx_xlock(&sc->mfi_config_lock);
1620 				mtx_lock(&sc->mfi_io_lock);
1621 				mfi_syspdprobe(sc);
1622 				mtx_unlock(&sc->mfi_io_lock);
1623 				sx_xunlock(&sc->mfi_config_lock);
1624 			}
1625 		}
1626 		if (sc->mfi_cam_rescan_cb != NULL &&
1627 		    (detail->code == MR_EVT_PD_INSERTED ||
1628 		    detail->code == MR_EVT_PD_REMOVED)) {
1629 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1630 		}
1631 		break;
1632 	}
1633 }
1634 
1635 static void
1636 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1637 {
1638 	struct mfi_evt_queue_elm *elm;
1639 
1640 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1641 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1642 	if (elm == NULL)
1643 		return;
1644 	memcpy(&elm->detail, detail, sizeof(*detail));
1645 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1646 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1647 }
1648 
1649 static void
1650 mfi_handle_evt(void *context, int pending)
1651 {
1652 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1653 	struct mfi_softc *sc;
1654 	struct mfi_evt_queue_elm *elm;
1655 
1656 	sc = context;
1657 	TAILQ_INIT(&queue);
1658 	mtx_lock(&sc->mfi_io_lock);
1659 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1660 	mtx_unlock(&sc->mfi_io_lock);
1661 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1662 		TAILQ_REMOVE(&queue, elm, link);
1663 		mfi_decode_evt(sc, &elm->detail);
1664 		free(elm, M_MFIBUF);
1665 	}
1666 }
1667 
1668 static int
1669 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1670 {
1671 	struct mfi_command *cm;
1672 	struct mfi_dcmd_frame *dcmd;
1673 	union mfi_evt current_aen, prior_aen;
1674 	struct mfi_evt_detail *ed = NULL;
1675 	int error = 0;
1676 
1677 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1678 
1679 	current_aen.word = locale;
1680 	if (sc->mfi_aen_cm != NULL) {
1681 		prior_aen.word =
1682 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1683 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1684 		    !((prior_aen.members.locale & current_aen.members.locale)
1685 		    ^current_aen.members.locale)) {
1686 			return (0);
1687 		} else {
1688 			prior_aen.members.locale |= current_aen.members.locale;
1689 			if (prior_aen.members.evt_class
1690 			    < current_aen.members.evt_class)
1691 				current_aen.members.evt_class =
1692 				    prior_aen.members.evt_class;
1693 			mfi_abort(sc, &sc->mfi_aen_cm);
1694 		}
1695 	}
1696 
1697 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1698 	    (void **)&ed, sizeof(*ed));
1699 	if (error)
1700 		goto out;
1701 
1702 	dcmd = &cm->cm_frame->dcmd;
1703 	((uint32_t *)&dcmd->mbox)[0] = seq;
1704 	((uint32_t *)&dcmd->mbox)[1] = locale;
1705 	cm->cm_flags = MFI_CMD_DATAIN;
1706 	cm->cm_complete = mfi_aen_complete;
1707 
1708 	sc->last_seq_num = seq;
1709 	sc->mfi_aen_cm = cm;
1710 
1711 	mfi_enqueue_ready(cm);
1712 	mfi_startio(sc);
1713 
1714 out:
1715 	return (error);
1716 }
1717 
1718 static void
1719 mfi_aen_complete(struct mfi_command *cm)
1720 {
1721 	struct mfi_frame_header *hdr;
1722 	struct mfi_softc *sc;
1723 	struct mfi_evt_detail *detail;
1724 	struct mfi_aen *mfi_aen_entry, *tmp;
1725 	int seq = 0, aborted = 0;
1726 
1727 	sc = cm->cm_sc;
1728 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1729 
1730 	if (sc->mfi_aen_cm == NULL)
1731 		return;
1732 
1733 	hdr = &cm->cm_frame->header;
1734 
1735 	if (sc->cm_aen_abort ||
1736 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1737 		sc->cm_aen_abort = 0;
1738 		aborted = 1;
1739 	} else {
1740 		sc->mfi_aen_triggered = 1;
1741 		if (sc->mfi_poll_waiting) {
1742 			sc->mfi_poll_waiting = 0;
1743 			selwakeup(&sc->mfi_select);
1744 		}
1745 		detail = cm->cm_data;
1746 		mfi_queue_evt(sc, detail);
1747 		seq = detail->seq + 1;
1748 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1749 		    tmp) {
1750 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1751 			    aen_link);
1752 			PROC_LOCK(mfi_aen_entry->p);
1753 			kern_psignal(mfi_aen_entry->p, SIGIO);
1754 			PROC_UNLOCK(mfi_aen_entry->p);
1755 			free(mfi_aen_entry, M_MFIBUF);
1756 		}
1757 	}
1758 
1759 	free(cm->cm_data, M_MFIBUF);
1760 	wakeup(&sc->mfi_aen_cm);
1761 	sc->mfi_aen_cm = NULL;
1762 	mfi_release_command(cm);
1763 
1764 	/* set it up again so the driver can catch more events */
1765 	if (!aborted)
1766 		mfi_aen_setup(sc, seq);
1767 }
1768 
1769 #define MAX_EVENTS 15
1770 
1771 static int
1772 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1773 {
1774 	struct mfi_command *cm;
1775 	struct mfi_dcmd_frame *dcmd;
1776 	struct mfi_evt_list *el;
1777 	union mfi_evt class_locale;
1778 	int error, i, seq, size;
1779 
1780 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1781 
1782 	class_locale.members.reserved = 0;
1783 	class_locale.members.locale = mfi_event_locale;
1784 	class_locale.members.evt_class  = mfi_event_class;
1785 
1786 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1787 		* (MAX_EVENTS - 1);
1788 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1789 	if (el == NULL)
1790 		return (ENOMEM);
1791 
1792 	for (seq = start_seq;;) {
1793 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1794 			free(el, M_MFIBUF);
1795 			return (EBUSY);
1796 		}
1797 
1798 		dcmd = &cm->cm_frame->dcmd;
1799 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1800 		dcmd->header.cmd = MFI_CMD_DCMD;
1801 		dcmd->header.timeout = 0;
1802 		dcmd->header.data_len = size;
1803 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1804 		((uint32_t *)&dcmd->mbox)[0] = seq;
1805 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1806 		cm->cm_sg = &dcmd->sgl;
1807 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1808 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1809 		cm->cm_data = el;
1810 		cm->cm_len = size;
1811 
1812 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1813 			device_printf(sc->mfi_dev,
1814 			    "Failed to get controller entries\n");
1815 			mfi_release_command(cm);
1816 			break;
1817 		}
1818 
1819 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1820 		    BUS_DMASYNC_POSTREAD);
1821 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1822 
1823 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1824 			mfi_release_command(cm);
1825 			break;
1826 		}
1827 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1828 			device_printf(sc->mfi_dev,
1829 			    "Error %d fetching controller entries\n",
1830 			    dcmd->header.cmd_status);
1831 			mfi_release_command(cm);
1832 			error = EIO;
1833 			break;
1834 		}
1835 		mfi_release_command(cm);
1836 
1837 		for (i = 0; i < el->count; i++) {
1838 			/*
1839 			 * If this event is newer than 'stop_seq' then
1840 			 * break out of the loop.  Note that the log
1841 			 * is a circular buffer so we have to handle
1842 			 * the case that our stop point is earlier in
1843 			 * the buffer than our start point.
1844 			 */
1845 			if (el->event[i].seq >= stop_seq) {
1846 				if (start_seq <= stop_seq)
1847 					break;
1848 				else if (el->event[i].seq < start_seq)
1849 					break;
1850 			}
1851 			mfi_queue_evt(sc, &el->event[i]);
1852 		}
1853 		seq = el->event[el->count - 1].seq + 1;
1854 	}
1855 
1856 	free(el, M_MFIBUF);
1857 	return (error);
1858 }
1859 
1860 static int
1861 mfi_add_ld(struct mfi_softc *sc, int id)
1862 {
1863 	struct mfi_command *cm;
1864 	struct mfi_dcmd_frame *dcmd = NULL;
1865 	struct mfi_ld_info *ld_info = NULL;
1866 	struct mfi_disk_pending *ld_pend;
1867 	int error;
1868 
1869 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1870 
1871 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1872 	if (ld_pend != NULL) {
1873 		ld_pend->ld_id = id;
1874 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1875 	}
1876 
1877 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1878 	    (void **)&ld_info, sizeof(*ld_info));
1879 	if (error) {
1880 		device_printf(sc->mfi_dev,
1881 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1882 		if (ld_info)
1883 			free(ld_info, M_MFIBUF);
1884 		return (error);
1885 	}
1886 	cm->cm_flags = MFI_CMD_DATAIN;
1887 	dcmd = &cm->cm_frame->dcmd;
1888 	dcmd->mbox[0] = id;
1889 	if (mfi_wait_command(sc, cm) != 0) {
1890 		device_printf(sc->mfi_dev,
1891 		    "Failed to get logical drive: %d\n", id);
1892 		free(ld_info, M_MFIBUF);
1893 		return (0);
1894 	}
1895 	if (ld_info->ld_config.params.isSSCD != 1)
1896 		mfi_add_ld_complete(cm);
1897 	else {
1898 		mfi_release_command(cm);
1899 		if (ld_info)		/* SSCD drives ld_info free here */
1900 			free(ld_info, M_MFIBUF);
1901 	}
1902 	return (0);
1903 }
1904 
1905 static void
1906 mfi_add_ld_complete(struct mfi_command *cm)
1907 {
1908 	struct mfi_frame_header *hdr;
1909 	struct mfi_ld_info *ld_info;
1910 	struct mfi_softc *sc;
1911 	device_t child;
1912 
1913 	sc = cm->cm_sc;
1914 	hdr = &cm->cm_frame->header;
1915 	ld_info = cm->cm_private;
1916 
1917 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1918 		free(ld_info, M_MFIBUF);
1919 		wakeup(&sc->mfi_map_sync_cm);
1920 		mfi_release_command(cm);
1921 		return;
1922 	}
1923 	wakeup(&sc->mfi_map_sync_cm);
1924 	mfi_release_command(cm);
1925 
1926 	mtx_unlock(&sc->mfi_io_lock);
1927 	mtx_lock(&Giant);
1928 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1929 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1930 		free(ld_info, M_MFIBUF);
1931 		mtx_unlock(&Giant);
1932 		mtx_lock(&sc->mfi_io_lock);
1933 		return;
1934 	}
1935 
1936 	device_set_ivars(child, ld_info);
1937 	device_set_desc(child, "MFI Logical Disk");
1938 	bus_generic_attach(sc->mfi_dev);
1939 	mtx_unlock(&Giant);
1940 	mtx_lock(&sc->mfi_io_lock);
1941 }
1942 
1943 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1944 {
1945 	struct mfi_command *cm;
1946 	struct mfi_dcmd_frame *dcmd = NULL;
1947 	struct mfi_pd_info *pd_info = NULL;
1948 	struct mfi_system_pending *syspd_pend;
1949 	int error;
1950 
1951 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1952 
1953 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1954 	if (syspd_pend != NULL) {
1955 		syspd_pend->pd_id = id;
1956 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1957 	}
1958 
1959 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1960 		(void **)&pd_info, sizeof(*pd_info));
1961 	if (error) {
1962 		device_printf(sc->mfi_dev,
1963 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1964 		    error);
1965 		if (pd_info)
1966 			free(pd_info, M_MFIBUF);
1967 		return (error);
1968 	}
1969 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1970 	dcmd = &cm->cm_frame->dcmd;
1971 	dcmd->mbox[0]=id;
1972 	dcmd->header.scsi_status = 0;
1973 	dcmd->header.pad0 = 0;
1974 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1975 		device_printf(sc->mfi_dev,
1976 		    "Failed to get physical drive info %d\n", id);
1977 		free(pd_info, M_MFIBUF);
1978 		mfi_release_command(cm);
1979 		return (error);
1980 	}
1981 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1982 	    BUS_DMASYNC_POSTREAD);
1983 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1984 	mfi_add_sys_pd_complete(cm);
1985 	return (0);
1986 }
1987 
1988 static void
1989 mfi_add_sys_pd_complete(struct mfi_command *cm)
1990 {
1991 	struct mfi_frame_header *hdr;
1992 	struct mfi_pd_info *pd_info;
1993 	struct mfi_softc *sc;
1994 	device_t child;
1995 
1996 	sc = cm->cm_sc;
1997 	hdr = &cm->cm_frame->header;
1998 	pd_info = cm->cm_private;
1999 
2000 	if (hdr->cmd_status != MFI_STAT_OK) {
2001 		free(pd_info, M_MFIBUF);
2002 		mfi_release_command(cm);
2003 		return;
2004 	}
2005 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2006 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2007 		    pd_info->ref.v.device_id);
2008 		free(pd_info, M_MFIBUF);
2009 		mfi_release_command(cm);
2010 		return;
2011 	}
2012 	mfi_release_command(cm);
2013 
2014 	mtx_unlock(&sc->mfi_io_lock);
2015 	mtx_lock(&Giant);
2016 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2017 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
2018 		free(pd_info, M_MFIBUF);
2019 		mtx_unlock(&Giant);
2020 		mtx_lock(&sc->mfi_io_lock);
2021 		return;
2022 	}
2023 
2024 	device_set_ivars(child, pd_info);
2025 	device_set_desc(child, "MFI System PD");
2026 	bus_generic_attach(sc->mfi_dev);
2027 	mtx_unlock(&Giant);
2028 	mtx_lock(&sc->mfi_io_lock);
2029 }
2030 
2031 static struct mfi_command *
2032 mfi_bio_command(struct mfi_softc *sc)
2033 {
2034 	struct bio *bio;
2035 	struct mfi_command *cm = NULL;
2036 
2037 	/*reserving two commands to avoid starvation for IOCTL*/
2038 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2039 		return (NULL);
2040 	}
2041 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2042 		return (NULL);
2043 	}
2044 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2045 		cm = mfi_build_ldio(sc, bio);
2046 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2047 		cm = mfi_build_syspdio(sc, bio);
2048 	}
2049 	if (!cm)
2050 	    mfi_enqueue_bio(sc, bio);
2051 	return cm;
2052 }
2053 
2054 /*
2055  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2056  */
2057 
2058 int
2059 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2060 {
2061 	int cdb_len;
2062 
2063 	if (((lba & 0x1fffff) == lba)
2064          && ((block_count & 0xff) == block_count)
2065          && (byte2 == 0)) {
2066 		/* We can fit in a 6 byte cdb */
2067 		struct scsi_rw_6 *scsi_cmd;
2068 
2069 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2070 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2071 		scsi_ulto3b(lba, scsi_cmd->addr);
2072 		scsi_cmd->length = block_count & 0xff;
2073 		scsi_cmd->control = 0;
2074 		cdb_len = sizeof(*scsi_cmd);
2075 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2076 		/* Need a 10 byte CDB */
2077 		struct scsi_rw_10 *scsi_cmd;
2078 
2079 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2080 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2081 		scsi_cmd->byte2 = byte2;
2082 		scsi_ulto4b(lba, scsi_cmd->addr);
2083 		scsi_cmd->reserved = 0;
2084 		scsi_ulto2b(block_count, scsi_cmd->length);
2085 		scsi_cmd->control = 0;
2086 		cdb_len = sizeof(*scsi_cmd);
2087 	} else if (((block_count & 0xffffffff) == block_count) &&
2088 	    ((lba & 0xffffffff) == lba)) {
2089 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2090 		struct scsi_rw_12 *scsi_cmd;
2091 
2092 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2093 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2094 		scsi_cmd->byte2 = byte2;
2095 		scsi_ulto4b(lba, scsi_cmd->addr);
2096 		scsi_cmd->reserved = 0;
2097 		scsi_ulto4b(block_count, scsi_cmd->length);
2098 		scsi_cmd->control = 0;
2099 		cdb_len = sizeof(*scsi_cmd);
2100 	} else {
2101 		/*
2102 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2103 		 * than 2^32
2104 		 */
2105 		struct scsi_rw_16 *scsi_cmd;
2106 
2107 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2108 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2109 		scsi_cmd->byte2 = byte2;
2110 		scsi_u64to8b(lba, scsi_cmd->addr);
2111 		scsi_cmd->reserved = 0;
2112 		scsi_ulto4b(block_count, scsi_cmd->length);
2113 		scsi_cmd->control = 0;
2114 		cdb_len = sizeof(*scsi_cmd);
2115 	}
2116 
2117 	return cdb_len;
2118 }
2119 
2120 static struct mfi_command *
2121 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2122 {
2123 	struct mfi_command *cm;
2124 	struct mfi_pass_frame *pass;
2125 	uint32_t context = 0;
2126 	int flags = 0, blkcount = 0, readop;
2127 	uint8_t cdb_len;
2128 
2129 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2130 
2131 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2132 	    return (NULL);
2133 
2134 	/* Zero out the MFI frame */
2135 	context = cm->cm_frame->header.context;
2136 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2137 	cm->cm_frame->header.context = context;
2138 	pass = &cm->cm_frame->pass;
2139 	bzero(pass->cdb, 16);
2140 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2141 	switch (bio->bio_cmd & 0x03) {
2142 	case BIO_READ:
2143 		flags = MFI_CMD_DATAIN;
2144 		readop = 1;
2145 		break;
2146 	case BIO_WRITE:
2147 		flags = MFI_CMD_DATAOUT;
2148 		readop = 0;
2149 		break;
2150 	default:
2151 		/* TODO: what about BIO_DELETE??? */
2152 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2153 	}
2154 
2155 	/* Cheat with the sector length to avoid a non-constant division */
2156 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2157 	/* Fill the LBA and Transfer length in CDB */
2158 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2159 	    pass->cdb);
2160 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2161 	pass->header.lun_id = 0;
2162 	pass->header.timeout = 0;
2163 	pass->header.flags = 0;
2164 	pass->header.scsi_status = 0;
2165 	pass->header.sense_len = MFI_SENSE_LEN;
2166 	pass->header.data_len = bio->bio_bcount;
2167 	pass->header.cdb_len = cdb_len;
2168 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2169 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2170 	cm->cm_complete = mfi_bio_complete;
2171 	cm->cm_private = bio;
2172 	cm->cm_data = bio->bio_data;
2173 	cm->cm_len = bio->bio_bcount;
2174 	cm->cm_sg = &pass->sgl;
2175 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2176 	cm->cm_flags = flags;
2177 
2178 	return (cm);
2179 }
2180 
2181 static struct mfi_command *
2182 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2183 {
2184 	struct mfi_io_frame *io;
2185 	struct mfi_command *cm;
2186 	int flags;
2187 	uint32_t blkcount;
2188 	uint32_t context = 0;
2189 
2190 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2191 
2192 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2193 	    return (NULL);
2194 
2195 	/* Zero out the MFI frame */
2196 	context = cm->cm_frame->header.context;
2197 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2198 	cm->cm_frame->header.context = context;
2199 	io = &cm->cm_frame->io;
2200 	switch (bio->bio_cmd & 0x03) {
2201 	case BIO_READ:
2202 		io->header.cmd = MFI_CMD_LD_READ;
2203 		flags = MFI_CMD_DATAIN;
2204 		break;
2205 	case BIO_WRITE:
2206 		io->header.cmd = MFI_CMD_LD_WRITE;
2207 		flags = MFI_CMD_DATAOUT;
2208 		break;
2209 	default:
2210 		/* TODO: what about BIO_DELETE??? */
2211 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2212 	}
2213 
2214 	/* Cheat with the sector length to avoid a non-constant division */
2215 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2216 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2217 	io->header.timeout = 0;
2218 	io->header.flags = 0;
2219 	io->header.scsi_status = 0;
2220 	io->header.sense_len = MFI_SENSE_LEN;
2221 	io->header.data_len = blkcount;
2222 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2223 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2224 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2225 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2226 	cm->cm_complete = mfi_bio_complete;
2227 	cm->cm_private = bio;
2228 	cm->cm_data = bio->bio_data;
2229 	cm->cm_len = bio->bio_bcount;
2230 	cm->cm_sg = &io->sgl;
2231 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2232 	cm->cm_flags = flags;
2233 
2234 	return (cm);
2235 }
2236 
2237 static void
2238 mfi_bio_complete(struct mfi_command *cm)
2239 {
2240 	struct bio *bio;
2241 	struct mfi_frame_header *hdr;
2242 	struct mfi_softc *sc;
2243 
2244 	bio = cm->cm_private;
2245 	hdr = &cm->cm_frame->header;
2246 	sc = cm->cm_sc;
2247 
2248 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2249 		bio->bio_flags |= BIO_ERROR;
2250 		bio->bio_error = EIO;
2251 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2252 		    "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2253 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2254 	} else if (cm->cm_error != 0) {
2255 		bio->bio_flags |= BIO_ERROR;
2256 		bio->bio_error = cm->cm_error;
2257 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2258 		    cm, cm->cm_error);
2259 	}
2260 
2261 	mfi_release_command(cm);
2262 	mfi_disk_complete(bio);
2263 }
2264 
2265 void
2266 mfi_startio(struct mfi_softc *sc)
2267 {
2268 	struct mfi_command *cm;
2269 	struct ccb_hdr *ccbh;
2270 
2271 	for (;;) {
2272 		/* Don't bother if we're short on resources */
2273 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2274 			break;
2275 
2276 		/* Try a command that has already been prepared */
2277 		cm = mfi_dequeue_ready(sc);
2278 
2279 		if (cm == NULL) {
2280 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2281 				cm = sc->mfi_cam_start(ccbh);
2282 		}
2283 
2284 		/* Nope, so look for work on the bioq */
2285 		if (cm == NULL)
2286 			cm = mfi_bio_command(sc);
2287 
2288 		/* No work available, so exit */
2289 		if (cm == NULL)
2290 			break;
2291 
2292 		/* Send the command to the controller */
2293 		if (mfi_mapcmd(sc, cm) != 0) {
2294 			device_printf(sc->mfi_dev, "Failed to startio\n");
2295 			mfi_requeue_ready(cm);
2296 			break;
2297 		}
2298 	}
2299 }
2300 
2301 int
2302 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2303 {
2304 	int error, polled;
2305 
2306 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2307 
2308 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2309 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2310 		if (cm->cm_flags & MFI_CMD_CCB)
2311 			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2312 			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2313 			    polled);
2314 		else
2315 			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2316 			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2317 			    mfi_data_cb, cm, polled);
2318 		if (error == EINPROGRESS) {
2319 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2320 			return (0);
2321 		}
2322 	} else {
2323 		error = mfi_send_frame(sc, cm);
2324 	}
2325 
2326 	return (error);
2327 }
2328 
2329 static void
2330 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2331 {
2332 	struct mfi_frame_header *hdr;
2333 	struct mfi_command *cm;
2334 	union mfi_sgl *sgl;
2335 	struct mfi_softc *sc;
2336 	int i, j, first, dir;
2337 	int sge_size, locked;
2338 
2339 	cm = (struct mfi_command *)arg;
2340 	sc = cm->cm_sc;
2341 	hdr = &cm->cm_frame->header;
2342 	sgl = cm->cm_sg;
2343 
2344 	/*
2345 	 * We need to check if we have the lock as this is async
2346 	 * callback so even though our caller mfi_mapcmd asserts
2347 	 * it has the lock, there is no garantee that hasn't been
2348 	 * dropped if bus_dmamap_load returned prior to our
2349 	 * completion.
2350 	 */
2351 	if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2352 		mtx_lock(&sc->mfi_io_lock);
2353 
2354 	if (error) {
2355 		printf("error %d in callback\n", error);
2356 		cm->cm_error = error;
2357 		mfi_complete(sc, cm);
2358 		goto out;
2359 	}
2360 	/* Use IEEE sgl only for IO's on a SKINNY controller
2361 	 * For other commands on a SKINNY controller use either
2362 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2363 	 * Also calculate the total frame size based on the type
2364 	 * of SGL used.
2365 	 */
2366 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2367 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2368 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2369 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2370 		for (i = 0; i < nsegs; i++) {
2371 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2372 			sgl->sg_skinny[i].len = segs[i].ds_len;
2373 			sgl->sg_skinny[i].flag = 0;
2374 		}
2375 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2376 		sge_size = sizeof(struct mfi_sg_skinny);
2377 		hdr->sg_count = nsegs;
2378 	} else {
2379 		j = 0;
2380 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2381 			first = cm->cm_stp_len;
2382 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2383 				sgl->sg32[j].addr = segs[0].ds_addr;
2384 				sgl->sg32[j++].len = first;
2385 			} else {
2386 				sgl->sg64[j].addr = segs[0].ds_addr;
2387 				sgl->sg64[j++].len = first;
2388 			}
2389 		} else
2390 			first = 0;
2391 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2392 			for (i = 0; i < nsegs; i++) {
2393 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2394 				sgl->sg32[j++].len = segs[i].ds_len - first;
2395 				first = 0;
2396 			}
2397 		} else {
2398 			for (i = 0; i < nsegs; i++) {
2399 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2400 				sgl->sg64[j++].len = segs[i].ds_len - first;
2401 				first = 0;
2402 			}
2403 			hdr->flags |= MFI_FRAME_SGL64;
2404 		}
2405 		hdr->sg_count = j;
2406 		sge_size = sc->mfi_sge_size;
2407 	}
2408 
2409 	dir = 0;
2410 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2411 		dir |= BUS_DMASYNC_PREREAD;
2412 		hdr->flags |= MFI_FRAME_DIR_READ;
2413 	}
2414 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2415 		dir |= BUS_DMASYNC_PREWRITE;
2416 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2417 	}
2418 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2419 	cm->cm_flags |= MFI_CMD_MAPPED;
2420 
2421 	/*
2422 	 * Instead of calculating the total number of frames in the
2423 	 * compound frame, it's already assumed that there will be at
2424 	 * least 1 frame, so don't compensate for the modulo of the
2425 	 * following division.
2426 	 */
2427 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2428 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2429 
2430 	if ((error = mfi_send_frame(sc, cm)) != 0) {
2431 		printf("error %d in callback from mfi_send_frame\n", error);
2432 		cm->cm_error = error;
2433 		mfi_complete(sc, cm);
2434 		goto out;
2435 	}
2436 
2437 out:
2438 	/* leave the lock in the state we found it */
2439 	if (locked == 0)
2440 		mtx_unlock(&sc->mfi_io_lock);
2441 
2442 	return;
2443 }
2444 
2445 static int
2446 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2447 {
2448 	int error;
2449 
2450 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2451 
2452 	if (sc->MFA_enabled)
2453 		error = mfi_tbolt_send_frame(sc, cm);
2454 	else
2455 		error = mfi_std_send_frame(sc, cm);
2456 
2457 	if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2458 		mfi_remove_busy(cm);
2459 
2460 	return (error);
2461 }
2462 
2463 static int
2464 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2465 {
2466 	struct mfi_frame_header *hdr;
2467 	int tm = mfi_polled_cmd_timeout * 1000;
2468 
2469 	hdr = &cm->cm_frame->header;
2470 
2471 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2472 		cm->cm_timestamp = time_uptime;
2473 		mfi_enqueue_busy(cm);
2474 	} else {
2475 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2476 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2477 	}
2478 
2479 	/*
2480 	 * The bus address of the command is aligned on a 64 byte boundary,
2481 	 * leaving the least 6 bits as zero.  For whatever reason, the
2482 	 * hardware wants the address shifted right by three, leaving just
2483 	 * 3 zero bits.  These three bits are then used as a prefetching
2484 	 * hint for the hardware to predict how many frames need to be
2485 	 * fetched across the bus.  If a command has more than 8 frames
2486 	 * then the 3 bits are set to 0x7 and the firmware uses other
2487 	 * information in the command to determine the total amount to fetch.
2488 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2489 	 * is enough for both 32bit and 64bit systems.
2490 	 */
2491 	if (cm->cm_extra_frames > 7)
2492 		cm->cm_extra_frames = 7;
2493 
2494 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2495 
2496 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2497 		return (0);
2498 
2499 	/* This is a polled command, so busy-wait for it to complete. */
2500 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2501 		DELAY(1000);
2502 		tm -= 1;
2503 		if (tm <= 0)
2504 			break;
2505 	}
2506 
2507 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2508 		device_printf(sc->mfi_dev, "Frame %p timed out "
2509 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2510 		return (ETIMEDOUT);
2511 	}
2512 
2513 	return (0);
2514 }
2515 
2516 
2517 void
2518 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2519 {
2520 	int dir;
2521 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2522 
2523 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2524 		dir = 0;
2525 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2526 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2527 			dir |= BUS_DMASYNC_POSTREAD;
2528 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2529 			dir |= BUS_DMASYNC_POSTWRITE;
2530 
2531 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2532 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2533 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2534 	}
2535 
2536 	cm->cm_flags |= MFI_CMD_COMPLETED;
2537 
2538 	if (cm->cm_complete != NULL)
2539 		cm->cm_complete(cm);
2540 	else
2541 		wakeup(cm);
2542 }
2543 
2544 static int
2545 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2546 {
2547 	struct mfi_command *cm;
2548 	struct mfi_abort_frame *abort;
2549 	int i = 0, error;
2550 	uint32_t context = 0;
2551 
2552 	mtx_lock(&sc->mfi_io_lock);
2553 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2554 		mtx_unlock(&sc->mfi_io_lock);
2555 		return (EBUSY);
2556 	}
2557 
2558 	/* Zero out the MFI frame */
2559 	context = cm->cm_frame->header.context;
2560 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2561 	cm->cm_frame->header.context = context;
2562 
2563 	abort = &cm->cm_frame->abort;
2564 	abort->header.cmd = MFI_CMD_ABORT;
2565 	abort->header.flags = 0;
2566 	abort->header.scsi_status = 0;
2567 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2568 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2569 	abort->abort_mfi_addr_hi =
2570 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2571 	cm->cm_data = NULL;
2572 	cm->cm_flags = MFI_CMD_POLLED;
2573 
2574 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2575 		device_printf(sc->mfi_dev, "failed to abort command\n");
2576 	mfi_release_command(cm);
2577 
2578 	mtx_unlock(&sc->mfi_io_lock);
2579 	while (i < 5 && *cm_abort != NULL) {
2580 		tsleep(cm_abort, 0, "mfiabort",
2581 		    5 * hz);
2582 		i++;
2583 	}
2584 	if (*cm_abort != NULL) {
2585 		/* Force a complete if command didn't abort */
2586 		mtx_lock(&sc->mfi_io_lock);
2587 		(*cm_abort)->cm_complete(*cm_abort);
2588 		mtx_unlock(&sc->mfi_io_lock);
2589 	}
2590 
2591 	return (error);
2592 }
2593 
2594 int
2595 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2596      int len)
2597 {
2598 	struct mfi_command *cm;
2599 	struct mfi_io_frame *io;
2600 	int error;
2601 	uint32_t context = 0;
2602 
2603 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2604 		return (EBUSY);
2605 
2606 	/* Zero out the MFI frame */
2607 	context = cm->cm_frame->header.context;
2608 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2609 	cm->cm_frame->header.context = context;
2610 
2611 	io = &cm->cm_frame->io;
2612 	io->header.cmd = MFI_CMD_LD_WRITE;
2613 	io->header.target_id = id;
2614 	io->header.timeout = 0;
2615 	io->header.flags = 0;
2616 	io->header.scsi_status = 0;
2617 	io->header.sense_len = MFI_SENSE_LEN;
2618 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2619 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2620 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2621 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2622 	io->lba_lo = lba & 0xffffffff;
2623 	cm->cm_data = virt;
2624 	cm->cm_len = len;
2625 	cm->cm_sg = &io->sgl;
2626 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2627 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2628 
2629 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2630 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2631 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2632 	    BUS_DMASYNC_POSTWRITE);
2633 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2634 	mfi_release_command(cm);
2635 
2636 	return (error);
2637 }
2638 
2639 int
2640 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2641     int len)
2642 {
2643 	struct mfi_command *cm;
2644 	struct mfi_pass_frame *pass;
2645 	int error, readop, cdb_len;
2646 	uint32_t blkcount;
2647 
2648 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2649 		return (EBUSY);
2650 
2651 	pass = &cm->cm_frame->pass;
2652 	bzero(pass->cdb, 16);
2653 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2654 
2655 	readop = 0;
2656 	blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2657 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2658 	pass->header.target_id = id;
2659 	pass->header.timeout = 0;
2660 	pass->header.flags = 0;
2661 	pass->header.scsi_status = 0;
2662 	pass->header.sense_len = MFI_SENSE_LEN;
2663 	pass->header.data_len = len;
2664 	pass->header.cdb_len = cdb_len;
2665 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2666 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2667 	cm->cm_data = virt;
2668 	cm->cm_len = len;
2669 	cm->cm_sg = &pass->sgl;
2670 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2671 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2672 
2673 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2674 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2675 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2676 	    BUS_DMASYNC_POSTWRITE);
2677 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2678 	mfi_release_command(cm);
2679 
2680 	return (error);
2681 }
2682 
2683 static int
2684 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2685 {
2686 	struct mfi_softc *sc;
2687 	int error;
2688 
2689 	sc = dev->si_drv1;
2690 
2691 	mtx_lock(&sc->mfi_io_lock);
2692 	if (sc->mfi_detaching)
2693 		error = ENXIO;
2694 	else {
2695 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2696 		error = 0;
2697 	}
2698 	mtx_unlock(&sc->mfi_io_lock);
2699 
2700 	return (error);
2701 }
2702 
2703 static int
2704 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2705 {
2706 	struct mfi_softc *sc;
2707 	struct mfi_aen *mfi_aen_entry, *tmp;
2708 
2709 	sc = dev->si_drv1;
2710 
2711 	mtx_lock(&sc->mfi_io_lock);
2712 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2713 
2714 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2715 		if (mfi_aen_entry->p == curproc) {
2716 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2717 			    aen_link);
2718 			free(mfi_aen_entry, M_MFIBUF);
2719 		}
2720 	}
2721 	mtx_unlock(&sc->mfi_io_lock);
2722 	return (0);
2723 }
2724 
2725 static int
2726 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2727 {
2728 
2729 	switch (opcode) {
2730 	case MFI_DCMD_LD_DELETE:
2731 	case MFI_DCMD_CFG_ADD:
2732 	case MFI_DCMD_CFG_CLEAR:
2733 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2734 		sx_xlock(&sc->mfi_config_lock);
2735 		return (1);
2736 	default:
2737 		return (0);
2738 	}
2739 }
2740 
2741 static void
2742 mfi_config_unlock(struct mfi_softc *sc, int locked)
2743 {
2744 
2745 	if (locked)
2746 		sx_xunlock(&sc->mfi_config_lock);
2747 }
2748 
2749 /*
2750  * Perform pre-issue checks on commands from userland and possibly veto
2751  * them.
2752  */
2753 static int
2754 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2755 {
2756 	struct mfi_disk *ld, *ld2;
2757 	int error;
2758 	struct mfi_system_pd *syspd = NULL;
2759 	uint16_t syspd_id;
2760 	uint16_t *mbox;
2761 
2762 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2763 	error = 0;
2764 	switch (cm->cm_frame->dcmd.opcode) {
2765 	case MFI_DCMD_LD_DELETE:
2766 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2767 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2768 				break;
2769 		}
2770 		if (ld == NULL)
2771 			error = ENOENT;
2772 		else
2773 			error = mfi_disk_disable(ld);
2774 		break;
2775 	case MFI_DCMD_CFG_CLEAR:
2776 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2777 			error = mfi_disk_disable(ld);
2778 			if (error)
2779 				break;
2780 		}
2781 		if (error) {
2782 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2783 				if (ld2 == ld)
2784 					break;
2785 				mfi_disk_enable(ld2);
2786 			}
2787 		}
2788 		break;
2789 	case MFI_DCMD_PD_STATE_SET:
2790 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2791 		syspd_id = mbox[0];
2792 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2793 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2794 				if (syspd->pd_id == syspd_id)
2795 					break;
2796 			}
2797 		}
2798 		else
2799 			break;
2800 		if (syspd)
2801 			error = mfi_syspd_disable(syspd);
2802 		break;
2803 	default:
2804 		break;
2805 	}
2806 	return (error);
2807 }
2808 
2809 /* Perform post-issue checks on commands from userland. */
2810 static void
2811 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2812 {
2813 	struct mfi_disk *ld, *ldn;
2814 	struct mfi_system_pd *syspd = NULL;
2815 	uint16_t syspd_id;
2816 	uint16_t *mbox;
2817 
2818 	switch (cm->cm_frame->dcmd.opcode) {
2819 	case MFI_DCMD_LD_DELETE:
2820 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2821 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2822 				break;
2823 		}
2824 		KASSERT(ld != NULL, ("volume dissappeared"));
2825 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2826 			mtx_unlock(&sc->mfi_io_lock);
2827 			mtx_lock(&Giant);
2828 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2829 			mtx_unlock(&Giant);
2830 			mtx_lock(&sc->mfi_io_lock);
2831 		} else
2832 			mfi_disk_enable(ld);
2833 		break;
2834 	case MFI_DCMD_CFG_CLEAR:
2835 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2836 			mtx_unlock(&sc->mfi_io_lock);
2837 			mtx_lock(&Giant);
2838 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2839 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2840 			}
2841 			mtx_unlock(&Giant);
2842 			mtx_lock(&sc->mfi_io_lock);
2843 		} else {
2844 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2845 				mfi_disk_enable(ld);
2846 		}
2847 		break;
2848 	case MFI_DCMD_CFG_ADD:
2849 		mfi_ldprobe(sc);
2850 		break;
2851 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2852 		mfi_ldprobe(sc);
2853 		break;
2854 	case MFI_DCMD_PD_STATE_SET:
2855 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2856 		syspd_id = mbox[0];
2857 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2858 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2859 				if (syspd->pd_id == syspd_id)
2860 					break;
2861 			}
2862 		}
2863 		else
2864 			break;
2865 		/* If the transition fails then enable the syspd again */
2866 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2867 			mfi_syspd_enable(syspd);
2868 		break;
2869 	}
2870 }
2871 
2872 static int
2873 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2874 {
2875 	struct mfi_config_data *conf_data;
2876 	struct mfi_command *ld_cm = NULL;
2877 	struct mfi_ld_info *ld_info = NULL;
2878 	struct mfi_ld_config *ld;
2879 	char *p;
2880 	int error = 0;
2881 
2882 	conf_data = (struct mfi_config_data *)cm->cm_data;
2883 
2884 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2885 		p = (char *)conf_data->array;
2886 		p += conf_data->array_size * conf_data->array_count;
2887 		ld = (struct mfi_ld_config *)p;
2888 		if (ld->params.isSSCD == 1)
2889 			error = 1;
2890 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2891 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2892 		    (void **)&ld_info, sizeof(*ld_info));
2893 		if (error) {
2894 			device_printf(sc->mfi_dev, "Failed to allocate"
2895 			    "MFI_DCMD_LD_GET_INFO %d", error);
2896 			if (ld_info)
2897 				free(ld_info, M_MFIBUF);
2898 			return 0;
2899 		}
2900 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2901 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2902 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2903 		if (mfi_wait_command(sc, ld_cm) != 0) {
2904 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2905 			mfi_release_command(ld_cm);
2906 			free(ld_info, M_MFIBUF);
2907 			return 0;
2908 		}
2909 
2910 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2911 			free(ld_info, M_MFIBUF);
2912 			mfi_release_command(ld_cm);
2913 			return 0;
2914 		}
2915 		else
2916 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2917 
2918 		if (ld_info->ld_config.params.isSSCD == 1)
2919 			error = 1;
2920 
2921 		mfi_release_command(ld_cm);
2922 		free(ld_info, M_MFIBUF);
2923 
2924 	}
2925 	return error;
2926 }
2927 
2928 static int
2929 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2930 {
2931 	uint8_t i;
2932 	struct mfi_ioc_packet *ioc;
2933 	ioc = (struct mfi_ioc_packet *)arg;
2934 	int sge_size, error;
2935 	struct megasas_sge *kern_sge;
2936 
2937 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2938 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2939 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2940 
2941 	if (sizeof(bus_addr_t) == 8) {
2942 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2943 		cm->cm_extra_frames = 2;
2944 		sge_size = sizeof(struct mfi_sg64);
2945 	} else {
2946 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2947 		sge_size = sizeof(struct mfi_sg32);
2948 	}
2949 
2950 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2951 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2952 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2953 			1, 0,			/* algnmnt, boundary */
2954 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2955 			BUS_SPACE_MAXADDR,	/* highaddr */
2956 			NULL, NULL,		/* filter, filterarg */
2957 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2958 			2,			/* nsegments */
2959 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2960 			BUS_DMA_ALLOCNOW,	/* flags */
2961 			NULL, NULL,		/* lockfunc, lockarg */
2962 			&sc->mfi_kbuff_arr_dmat[i])) {
2963 			device_printf(sc->mfi_dev,
2964 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2965 			return (ENOMEM);
2966 		}
2967 
2968 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2969 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2970 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2971 			device_printf(sc->mfi_dev,
2972 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2973 			return (ENOMEM);
2974 		}
2975 
2976 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2977 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2978 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2979 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2980 
2981 		if (!sc->kbuff_arr[i]) {
2982 			device_printf(sc->mfi_dev,
2983 			    "Could not allocate memory for kbuff_arr info\n");
2984 			return -1;
2985 		}
2986 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2987 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2988 
2989 		if (sizeof(bus_addr_t) == 8) {
2990 			cm->cm_frame->stp.sgl.sg64[i].addr =
2991 			    kern_sge[i].phys_addr;
2992 			cm->cm_frame->stp.sgl.sg64[i].len =
2993 			    ioc->mfi_sgl[i].iov_len;
2994 		} else {
2995 			cm->cm_frame->stp.sgl.sg32[i].len =
2996 			    kern_sge[i].phys_addr;
2997 			cm->cm_frame->stp.sgl.sg32[i].len =
2998 			    ioc->mfi_sgl[i].iov_len;
2999 		}
3000 
3001 		error = copyin(ioc->mfi_sgl[i].iov_base,
3002 		    sc->kbuff_arr[i],
3003 		    ioc->mfi_sgl[i].iov_len);
3004 		if (error != 0) {
3005 			device_printf(sc->mfi_dev, "Copy in failed\n");
3006 			return error;
3007 		}
3008 	}
3009 
3010 	cm->cm_flags |=MFI_CMD_MAPPED;
3011 	return 0;
3012 }
3013 
3014 static int
3015 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3016 {
3017 	struct mfi_command *cm;
3018 	struct mfi_dcmd_frame *dcmd;
3019 	void *ioc_buf = NULL;
3020 	uint32_t context;
3021 	int error = 0, locked;
3022 
3023 
3024 	if (ioc->buf_size > 0) {
3025 		if (ioc->buf_size > 1024 * 1024)
3026 			return (ENOMEM);
3027 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3028 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3029 		if (error) {
3030 			device_printf(sc->mfi_dev, "failed to copyin\n");
3031 			free(ioc_buf, M_MFIBUF);
3032 			return (error);
3033 		}
3034 	}
3035 
3036 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3037 
3038 	mtx_lock(&sc->mfi_io_lock);
3039 	while ((cm = mfi_dequeue_free(sc)) == NULL)
3040 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3041 
3042 	/* Save context for later */
3043 	context = cm->cm_frame->header.context;
3044 
3045 	dcmd = &cm->cm_frame->dcmd;
3046 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3047 
3048 	cm->cm_sg = &dcmd->sgl;
3049 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3050 	cm->cm_data = ioc_buf;
3051 	cm->cm_len = ioc->buf_size;
3052 
3053 	/* restore context */
3054 	cm->cm_frame->header.context = context;
3055 
3056 	/* Cheat since we don't know if we're writing or reading */
3057 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3058 
3059 	error = mfi_check_command_pre(sc, cm);
3060 	if (error)
3061 		goto out;
3062 
3063 	error = mfi_wait_command(sc, cm);
3064 	if (error) {
3065 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3066 		goto out;
3067 	}
3068 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3069 	mfi_check_command_post(sc, cm);
3070 out:
3071 	mfi_release_command(cm);
3072 	mtx_unlock(&sc->mfi_io_lock);
3073 	mfi_config_unlock(sc, locked);
3074 	if (ioc->buf_size > 0)
3075 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3076 	if (ioc_buf)
3077 		free(ioc_buf, M_MFIBUF);
3078 	return (error);
3079 }
3080 
3081 #define	PTRIN(p)		((void *)(uintptr_t)(p))
3082 
3083 static int
3084 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3085 {
3086 	struct mfi_softc *sc;
3087 	union mfi_statrequest *ms;
3088 	struct mfi_ioc_packet *ioc;
3089 #ifdef COMPAT_FREEBSD32
3090 	struct mfi_ioc_packet32 *ioc32;
3091 #endif
3092 	struct mfi_ioc_aen *aen;
3093 	struct mfi_command *cm = NULL;
3094 	uint32_t context = 0;
3095 	union mfi_sense_ptr sense_ptr;
3096 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3097 	size_t len;
3098 	int i, res;
3099 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3100 #ifdef COMPAT_FREEBSD32
3101 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3102 	struct mfi_ioc_passthru iop_swab;
3103 #endif
3104 	int error, locked;
3105 	union mfi_sgl *sgl;
3106 	sc = dev->si_drv1;
3107 	error = 0;
3108 
3109 	if (sc->adpreset)
3110 		return EBUSY;
3111 
3112 	if (sc->hw_crit_error)
3113 		return EBUSY;
3114 
3115 	if (sc->issuepend_done == 0)
3116 		return EBUSY;
3117 
3118 	switch (cmd) {
3119 	case MFIIO_STATS:
3120 		ms = (union mfi_statrequest *)arg;
3121 		switch (ms->ms_item) {
3122 		case MFIQ_FREE:
3123 		case MFIQ_BIO:
3124 		case MFIQ_READY:
3125 		case MFIQ_BUSY:
3126 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3127 			    sizeof(struct mfi_qstat));
3128 			break;
3129 		default:
3130 			error = ENOIOCTL;
3131 			break;
3132 		}
3133 		break;
3134 	case MFIIO_QUERY_DISK:
3135 	{
3136 		struct mfi_query_disk *qd;
3137 		struct mfi_disk *ld;
3138 
3139 		qd = (struct mfi_query_disk *)arg;
3140 		mtx_lock(&sc->mfi_io_lock);
3141 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3142 			if (ld->ld_id == qd->array_id)
3143 				break;
3144 		}
3145 		if (ld == NULL) {
3146 			qd->present = 0;
3147 			mtx_unlock(&sc->mfi_io_lock);
3148 			return (0);
3149 		}
3150 		qd->present = 1;
3151 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3152 			qd->open = 1;
3153 		bzero(qd->devname, SPECNAMELEN + 1);
3154 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3155 		mtx_unlock(&sc->mfi_io_lock);
3156 		break;
3157 	}
3158 	case MFI_CMD:
3159 #ifdef COMPAT_FREEBSD32
3160 	case MFI_CMD32:
3161 #endif
3162 		{
3163 		devclass_t devclass;
3164 		ioc = (struct mfi_ioc_packet *)arg;
3165 		int adapter;
3166 
3167 		adapter = ioc->mfi_adapter_no;
3168 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3169 			devclass = devclass_find("mfi");
3170 			sc = devclass_get_softc(devclass, adapter);
3171 		}
3172 		mtx_lock(&sc->mfi_io_lock);
3173 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3174 			mtx_unlock(&sc->mfi_io_lock);
3175 			return (EBUSY);
3176 		}
3177 		mtx_unlock(&sc->mfi_io_lock);
3178 		locked = 0;
3179 
3180 		/*
3181 		 * save off original context since copying from user
3182 		 * will clobber some data
3183 		 */
3184 		context = cm->cm_frame->header.context;
3185 		cm->cm_frame->header.context = cm->cm_index;
3186 
3187 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3188 		    2 * MEGAMFI_FRAME_SIZE);
3189 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3190 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3191 		cm->cm_frame->header.scsi_status = 0;
3192 		cm->cm_frame->header.pad0 = 0;
3193 		if (ioc->mfi_sge_count) {
3194 			cm->cm_sg =
3195 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3196 		}
3197 		sgl = cm->cm_sg;
3198 		cm->cm_flags = 0;
3199 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3200 			cm->cm_flags |= MFI_CMD_DATAIN;
3201 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3202 			cm->cm_flags |= MFI_CMD_DATAOUT;
3203 		/* Legacy app shim */
3204 		if (cm->cm_flags == 0)
3205 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3206 		cm->cm_len = cm->cm_frame->header.data_len;
3207 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3208 #ifdef COMPAT_FREEBSD32
3209 			if (cmd == MFI_CMD) {
3210 #endif
3211 				/* Native */
3212 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3213 #ifdef COMPAT_FREEBSD32
3214 			} else {
3215 				/* 32bit on 64bit */
3216 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3217 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3218 			}
3219 #endif
3220 			cm->cm_len += cm->cm_stp_len;
3221 		}
3222 		if (cm->cm_len &&
3223 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3224 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3225 			    M_WAITOK | M_ZERO);
3226 			if (cm->cm_data == NULL) {
3227 				device_printf(sc->mfi_dev, "Malloc failed\n");
3228 				goto out;
3229 			}
3230 		} else {
3231 			cm->cm_data = 0;
3232 		}
3233 
3234 		/* restore header context */
3235 		cm->cm_frame->header.context = context;
3236 
3237 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3238 			res = mfi_stp_cmd(sc, cm, arg);
3239 			if (res != 0)
3240 				goto out;
3241 		} else {
3242 			temp = data;
3243 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3244 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3245 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3246 #ifdef COMPAT_FREEBSD32
3247 					if (cmd == MFI_CMD) {
3248 #endif
3249 						/* Native */
3250 						addr = ioc->mfi_sgl[i].iov_base;
3251 						len = ioc->mfi_sgl[i].iov_len;
3252 #ifdef COMPAT_FREEBSD32
3253 					} else {
3254 						/* 32bit on 64bit */
3255 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3256 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3257 						len = ioc32->mfi_sgl[i].iov_len;
3258 					}
3259 #endif
3260 					error = copyin(addr, temp, len);
3261 					if (error != 0) {
3262 						device_printf(sc->mfi_dev,
3263 						    "Copy in failed\n");
3264 						goto out;
3265 					}
3266 					temp = &temp[len];
3267 				}
3268 			}
3269 		}
3270 
3271 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3272 			locked = mfi_config_lock(sc,
3273 			     cm->cm_frame->dcmd.opcode);
3274 
3275 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3276 			cm->cm_frame->pass.sense_addr_lo =
3277 			    (uint32_t)cm->cm_sense_busaddr;
3278 			cm->cm_frame->pass.sense_addr_hi =
3279 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3280 		}
3281 		mtx_lock(&sc->mfi_io_lock);
3282 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3283 		if (!skip_pre_post) {
3284 			error = mfi_check_command_pre(sc, cm);
3285 			if (error) {
3286 				mtx_unlock(&sc->mfi_io_lock);
3287 				goto out;
3288 			}
3289 		}
3290 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3291 			device_printf(sc->mfi_dev,
3292 			    "Controller polled failed\n");
3293 			mtx_unlock(&sc->mfi_io_lock);
3294 			goto out;
3295 		}
3296 		if (!skip_pre_post) {
3297 			mfi_check_command_post(sc, cm);
3298 		}
3299 		mtx_unlock(&sc->mfi_io_lock);
3300 
3301 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3302 			temp = data;
3303 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3304 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3305 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3306 #ifdef COMPAT_FREEBSD32
3307 					if (cmd == MFI_CMD) {
3308 #endif
3309 						/* Native */
3310 						addr = ioc->mfi_sgl[i].iov_base;
3311 						len = ioc->mfi_sgl[i].iov_len;
3312 #ifdef COMPAT_FREEBSD32
3313 					} else {
3314 						/* 32bit on 64bit */
3315 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3316 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3317 						len = ioc32->mfi_sgl[i].iov_len;
3318 					}
3319 #endif
3320 					error = copyout(temp, addr, len);
3321 					if (error != 0) {
3322 						device_printf(sc->mfi_dev,
3323 						    "Copy out failed\n");
3324 						goto out;
3325 					}
3326 					temp = &temp[len];
3327 				}
3328 			}
3329 		}
3330 
3331 		if (ioc->mfi_sense_len) {
3332 			/* get user-space sense ptr then copy out sense */
3333 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3334 			    &sense_ptr.sense_ptr_data[0],
3335 			    sizeof(sense_ptr.sense_ptr_data));
3336 #ifdef COMPAT_FREEBSD32
3337 			if (cmd != MFI_CMD) {
3338 				/*
3339 				 * not 64bit native so zero out any address
3340 				 * over 32bit */
3341 				sense_ptr.addr.high = 0;
3342 			}
3343 #endif
3344 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3345 			    ioc->mfi_sense_len);
3346 			if (error != 0) {
3347 				device_printf(sc->mfi_dev,
3348 				    "Copy out failed\n");
3349 				goto out;
3350 			}
3351 		}
3352 
3353 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3354 out:
3355 		mfi_config_unlock(sc, locked);
3356 		if (data)
3357 			free(data, M_MFIBUF);
3358 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3359 			for (i = 0; i < 2; i++) {
3360 				if (sc->kbuff_arr[i]) {
3361 					if (sc->mfi_kbuff_arr_busaddr != 0)
3362 						bus_dmamap_unload(
3363 						    sc->mfi_kbuff_arr_dmat[i],
3364 						    sc->mfi_kbuff_arr_dmamap[i]
3365 						    );
3366 					if (sc->kbuff_arr[i] != NULL)
3367 						bus_dmamem_free(
3368 						    sc->mfi_kbuff_arr_dmat[i],
3369 						    sc->kbuff_arr[i],
3370 						    sc->mfi_kbuff_arr_dmamap[i]
3371 						    );
3372 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3373 						bus_dma_tag_destroy(
3374 						    sc->mfi_kbuff_arr_dmat[i]);
3375 				}
3376 			}
3377 		}
3378 		if (cm) {
3379 			mtx_lock(&sc->mfi_io_lock);
3380 			mfi_release_command(cm);
3381 			mtx_unlock(&sc->mfi_io_lock);
3382 		}
3383 
3384 		break;
3385 		}
3386 	case MFI_SET_AEN:
3387 		aen = (struct mfi_ioc_aen *)arg;
3388 		mtx_lock(&sc->mfi_io_lock);
3389 		error = mfi_aen_register(sc, aen->aen_seq_num,
3390 		    aen->aen_class_locale);
3391 		mtx_unlock(&sc->mfi_io_lock);
3392 
3393 		break;
3394 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3395 		{
3396 			devclass_t devclass;
3397 			struct mfi_linux_ioc_packet l_ioc;
3398 			int adapter;
3399 
3400 			devclass = devclass_find("mfi");
3401 			if (devclass == NULL)
3402 				return (ENOENT);
3403 
3404 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3405 			if (error)
3406 				return (error);
3407 			adapter = l_ioc.lioc_adapter_no;
3408 			sc = devclass_get_softc(devclass, adapter);
3409 			if (sc == NULL)
3410 				return (ENOENT);
3411 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3412 			    cmd, arg, flag, td));
3413 			break;
3414 		}
3415 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3416 		{
3417 			devclass_t devclass;
3418 			struct mfi_linux_ioc_aen l_aen;
3419 			int adapter;
3420 
3421 			devclass = devclass_find("mfi");
3422 			if (devclass == NULL)
3423 				return (ENOENT);
3424 
3425 			error = copyin(arg, &l_aen, sizeof(l_aen));
3426 			if (error)
3427 				return (error);
3428 			adapter = l_aen.laen_adapter_no;
3429 			sc = devclass_get_softc(devclass, adapter);
3430 			if (sc == NULL)
3431 				return (ENOENT);
3432 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3433 			    cmd, arg, flag, td));
3434 			break;
3435 		}
3436 #ifdef COMPAT_FREEBSD32
3437 	case MFIIO_PASSTHRU32:
3438 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3439 			error = ENOTTY;
3440 			break;
3441 		}
3442 		iop_swab.ioc_frame	= iop32->ioc_frame;
3443 		iop_swab.buf_size	= iop32->buf_size;
3444 		iop_swab.buf		= PTRIN(iop32->buf);
3445 		iop			= &iop_swab;
3446 		/* FALLTHROUGH */
3447 #endif
3448 	case MFIIO_PASSTHRU:
3449 		error = mfi_user_command(sc, iop);
3450 #ifdef COMPAT_FREEBSD32
3451 		if (cmd == MFIIO_PASSTHRU32)
3452 			iop32->ioc_frame = iop_swab.ioc_frame;
3453 #endif
3454 		break;
3455 	default:
3456 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3457 		error = ENOTTY;
3458 		break;
3459 	}
3460 
3461 	return (error);
3462 }
3463 
3464 static int
3465 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3466 {
3467 	struct mfi_softc *sc;
3468 	struct mfi_linux_ioc_packet l_ioc;
3469 	struct mfi_linux_ioc_aen l_aen;
3470 	struct mfi_command *cm = NULL;
3471 	struct mfi_aen *mfi_aen_entry;
3472 	union mfi_sense_ptr sense_ptr;
3473 	uint32_t context = 0;
3474 	uint8_t *data = NULL, *temp;
3475 	int i;
3476 	int error, locked;
3477 
3478 	sc = dev->si_drv1;
3479 	error = 0;
3480 	switch (cmd) {
3481 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3482 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3483 		if (error != 0)
3484 			return (error);
3485 
3486 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3487 			return (EINVAL);
3488 		}
3489 
3490 		mtx_lock(&sc->mfi_io_lock);
3491 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3492 			mtx_unlock(&sc->mfi_io_lock);
3493 			return (EBUSY);
3494 		}
3495 		mtx_unlock(&sc->mfi_io_lock);
3496 		locked = 0;
3497 
3498 		/*
3499 		 * save off original context since copying from user
3500 		 * will clobber some data
3501 		 */
3502 		context = cm->cm_frame->header.context;
3503 
3504 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3505 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3506 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3507 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3508 		cm->cm_frame->header.scsi_status = 0;
3509 		cm->cm_frame->header.pad0 = 0;
3510 		if (l_ioc.lioc_sge_count)
3511 			cm->cm_sg =
3512 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3513 		cm->cm_flags = 0;
3514 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3515 			cm->cm_flags |= MFI_CMD_DATAIN;
3516 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3517 			cm->cm_flags |= MFI_CMD_DATAOUT;
3518 		cm->cm_len = cm->cm_frame->header.data_len;
3519 		if (cm->cm_len &&
3520 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3521 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3522 			    M_WAITOK | M_ZERO);
3523 			if (cm->cm_data == NULL) {
3524 				device_printf(sc->mfi_dev, "Malloc failed\n");
3525 				goto out;
3526 			}
3527 		} else {
3528 			cm->cm_data = 0;
3529 		}
3530 
3531 		/* restore header context */
3532 		cm->cm_frame->header.context = context;
3533 
3534 		temp = data;
3535 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3536 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3537 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3538 				       temp,
3539 				       l_ioc.lioc_sgl[i].iov_len);
3540 				if (error != 0) {
3541 					device_printf(sc->mfi_dev,
3542 					    "Copy in failed\n");
3543 					goto out;
3544 				}
3545 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3546 			}
3547 		}
3548 
3549 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3550 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3551 
3552 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3553 			cm->cm_frame->pass.sense_addr_lo =
3554 			    (uint32_t)cm->cm_sense_busaddr;
3555 			cm->cm_frame->pass.sense_addr_hi =
3556 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3557 		}
3558 
3559 		mtx_lock(&sc->mfi_io_lock);
3560 		error = mfi_check_command_pre(sc, cm);
3561 		if (error) {
3562 			mtx_unlock(&sc->mfi_io_lock);
3563 			goto out;
3564 		}
3565 
3566 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3567 			device_printf(sc->mfi_dev,
3568 			    "Controller polled failed\n");
3569 			mtx_unlock(&sc->mfi_io_lock);
3570 			goto out;
3571 		}
3572 
3573 		mfi_check_command_post(sc, cm);
3574 		mtx_unlock(&sc->mfi_io_lock);
3575 
3576 		temp = data;
3577 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3578 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3579 				error = copyout(temp,
3580 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3581 					l_ioc.lioc_sgl[i].iov_len);
3582 				if (error != 0) {
3583 					device_printf(sc->mfi_dev,
3584 					    "Copy out failed\n");
3585 					goto out;
3586 				}
3587 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3588 			}
3589 		}
3590 
3591 		if (l_ioc.lioc_sense_len) {
3592 			/* get user-space sense ptr then copy out sense */
3593 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3594                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3595 			    &sense_ptr.sense_ptr_data[0],
3596 			    sizeof(sense_ptr.sense_ptr_data));
3597 #ifdef __amd64__
3598 			/*
3599 			 * only 32bit Linux support so zero out any
3600 			 * address over 32bit
3601 			 */
3602 			sense_ptr.addr.high = 0;
3603 #endif
3604 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3605 			    l_ioc.lioc_sense_len);
3606 			if (error != 0) {
3607 				device_printf(sc->mfi_dev,
3608 				    "Copy out failed\n");
3609 				goto out;
3610 			}
3611 		}
3612 
3613 		error = copyout(&cm->cm_frame->header.cmd_status,
3614 			&((struct mfi_linux_ioc_packet*)arg)
3615 			->lioc_frame.hdr.cmd_status,
3616 			1);
3617 		if (error != 0) {
3618 			device_printf(sc->mfi_dev,
3619 				      "Copy out failed\n");
3620 			goto out;
3621 		}
3622 
3623 out:
3624 		mfi_config_unlock(sc, locked);
3625 		if (data)
3626 			free(data, M_MFIBUF);
3627 		if (cm) {
3628 			mtx_lock(&sc->mfi_io_lock);
3629 			mfi_release_command(cm);
3630 			mtx_unlock(&sc->mfi_io_lock);
3631 		}
3632 
3633 		return (error);
3634 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3635 		error = copyin(arg, &l_aen, sizeof(l_aen));
3636 		if (error != 0)
3637 			return (error);
3638 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3639 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3640 		    M_WAITOK);
3641 		mtx_lock(&sc->mfi_io_lock);
3642 		if (mfi_aen_entry != NULL) {
3643 			mfi_aen_entry->p = curproc;
3644 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3645 			    aen_link);
3646 		}
3647 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3648 		    l_aen.laen_class_locale);
3649 
3650 		if (error != 0) {
3651 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3652 			    aen_link);
3653 			free(mfi_aen_entry, M_MFIBUF);
3654 		}
3655 		mtx_unlock(&sc->mfi_io_lock);
3656 
3657 		return (error);
3658 	default:
3659 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3660 		error = ENOENT;
3661 		break;
3662 	}
3663 
3664 	return (error);
3665 }
3666 
3667 static int
3668 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3669 {
3670 	struct mfi_softc *sc;
3671 	int revents = 0;
3672 
3673 	sc = dev->si_drv1;
3674 
3675 	if (poll_events & (POLLIN | POLLRDNORM)) {
3676 		if (sc->mfi_aen_triggered != 0) {
3677 			revents |= poll_events & (POLLIN | POLLRDNORM);
3678 			sc->mfi_aen_triggered = 0;
3679 		}
3680 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3681 			revents |= POLLERR;
3682 		}
3683 	}
3684 
3685 	if (revents == 0) {
3686 		if (poll_events & (POLLIN | POLLRDNORM)) {
3687 			sc->mfi_poll_waiting = 1;
3688 			selrecord(td, &sc->mfi_select);
3689 		}
3690 	}
3691 
3692 	return revents;
3693 }
3694 
3695 static void
3696 mfi_dump_all(void)
3697 {
3698 	struct mfi_softc *sc;
3699 	struct mfi_command *cm;
3700 	devclass_t dc;
3701 	time_t deadline;
3702 	int timedout;
3703 	int i;
3704 
3705 	dc = devclass_find("mfi");
3706 	if (dc == NULL) {
3707 		printf("No mfi dev class\n");
3708 		return;
3709 	}
3710 
3711 	for (i = 0; ; i++) {
3712 		sc = devclass_get_softc(dc, i);
3713 		if (sc == NULL)
3714 			break;
3715 		device_printf(sc->mfi_dev, "Dumping\n\n");
3716 		timedout = 0;
3717 		deadline = time_uptime - MFI_CMD_TIMEOUT;
3718 		mtx_lock(&sc->mfi_io_lock);
3719 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3720 			if (cm->cm_timestamp <= deadline) {
3721 				device_printf(sc->mfi_dev,
3722 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3723 				    cm, (int)(time_uptime - cm->cm_timestamp));
3724 				MFI_PRINT_CMD(cm);
3725 				timedout++;
3726 			}
3727 		}
3728 
3729 #if 0
3730 		if (timedout)
3731 			MFI_DUMP_CMDS(sc);
3732 #endif
3733 
3734 		mtx_unlock(&sc->mfi_io_lock);
3735 	}
3736 
3737 	return;
3738 }
3739 
3740 static void
3741 mfi_timeout(void *data)
3742 {
3743 	struct mfi_softc *sc = (struct mfi_softc *)data;
3744 	struct mfi_command *cm, *tmp;
3745 	time_t deadline;
3746 	int timedout = 0;
3747 
3748 	deadline = time_uptime - MFI_CMD_TIMEOUT;
3749 	if (sc->adpreset == 0) {
3750 		if (!mfi_tbolt_reset(sc)) {
3751 			callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
3752 			return;
3753 		}
3754 	}
3755 	mtx_lock(&sc->mfi_io_lock);
3756 	TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3757 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3758 			continue;
3759 		if (cm->cm_timestamp <= deadline) {
3760 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3761 				cm->cm_timestamp = time_uptime;
3762 			} else {
3763 				device_printf(sc->mfi_dev,
3764 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3765 				     cm, (int)(time_uptime - cm->cm_timestamp)
3766 				     );
3767 				MFI_PRINT_CMD(cm);
3768 				MFI_VALIDATE_CMD(sc, cm);
3769 				/*
3770 				 * Fail the command instead of leaving it on
3771 				 * the queue where it could remain stuck forever
3772 				 */
3773 				mfi_remove_busy(cm);
3774 				cm->cm_error = ETIMEDOUT;
3775 				mfi_complete(sc, cm);
3776 				timedout++;
3777 			}
3778 		}
3779 	}
3780 
3781 #if 0
3782 	if (timedout)
3783 		MFI_DUMP_CMDS(sc);
3784 #endif
3785 
3786 	mtx_unlock(&sc->mfi_io_lock);
3787 
3788 	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
3789 	    mfi_timeout, sc);
3790 
3791 	if (0)
3792 		mfi_dump_all();
3793 	return;
3794 }
3795