xref: /freebsd/sys/dev/mfi/mfi.c (revision bc7512cc58af2e8bbe5bbf5ca0059b1daa1da897)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause
3  *
4  * Copyright (c) 2006 IronPort Systems
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2007 LSI Corp.
30  * Copyright (c) 2007 Rajesh Prabhakaran.
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52  * SUCH DAMAGE.
53  */
54 
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
57 
58 #include "opt_mfi.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sysctl.h>
63 #include <sys/malloc.h>
64 #include <sys/kernel.h>
65 #include <sys/poll.h>
66 #include <sys/selinfo.h>
67 #include <sys/bus.h>
68 #include <sys/conf.h>
69 #include <sys/eventhandler.h>
70 #include <sys/rman.h>
71 #include <sys/bio.h>
72 #include <sys/ioccom.h>
73 #include <sys/uio.h>
74 #include <sys/proc.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
78 
79 #include <machine/bus.h>
80 #include <machine/resource.h>
81 
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
87 
88 static int	mfi_alloc_commands(struct mfi_softc *);
89 static int	mfi_comms_init(struct mfi_softc *);
90 static int	mfi_get_controller_info(struct mfi_softc *);
91 static int	mfi_get_log_state(struct mfi_softc *,
92 		    struct mfi_evt_log_state **);
93 static int	mfi_parse_entries(struct mfi_softc *, int, int);
94 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void	mfi_startup(void *arg);
96 static void	mfi_intr(void *arg);
97 static void	mfi_ldprobe(struct mfi_softc *sc);
98 static void	mfi_syspdprobe(struct mfi_softc *sc);
99 static void	mfi_handle_evt(void *context, int pending);
100 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void	mfi_aen_complete(struct mfi_command *);
102 static int	mfi_add_ld(struct mfi_softc *sc, int);
103 static void	mfi_add_ld_complete(struct mfi_command *);
104 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void	mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void	mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int	mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void	mfi_timeout(void *);
115 static int	mfi_user_command(struct mfi_softc *,
116 		    struct mfi_ioc_passthru *);
117 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
124 		    uint32_t frame_cnt);
125 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
126 		    uint32_t frame_cnt);
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
132 
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
134     "MFI driver parameters");
135 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
137            0, "event message locale");
138 
139 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
140 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
141            0, "event message class");
142 
143 static int	mfi_max_cmds = 128;
144 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
145 	   0, "Max commands limit (-1 = controller limit)");
146 
147 static int	mfi_detect_jbod_change = 1;
148 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
149 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
150 
151 int		mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
152 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
153 	   &mfi_polled_cmd_timeout, 0,
154 	   "Polled command timeout - used for firmware flash etc (in seconds)");
155 
156 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
157 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
158 	   0, "Command timeout (in seconds)");
159 
160 /* Management interface */
161 static d_open_t		mfi_open;
162 static d_close_t	mfi_close;
163 static d_ioctl_t	mfi_ioctl;
164 static d_poll_t		mfi_poll;
165 
166 static struct cdevsw mfi_cdevsw = {
167 	.d_version = 	D_VERSION,
168 	.d_flags =	0,
169 	.d_open = 	mfi_open,
170 	.d_close =	mfi_close,
171 	.d_ioctl =	mfi_ioctl,
172 	.d_poll =	mfi_poll,
173 	.d_name =	"mfi",
174 };
175 
176 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
177 
178 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
179 struct mfi_skinny_dma_info mfi_skinny;
180 
181 static void
182 mfi_enable_intr_xscale(struct mfi_softc *sc)
183 {
184 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
185 }
186 
187 static void
188 mfi_enable_intr_ppc(struct mfi_softc *sc)
189 {
190 	if (sc->mfi_flags & MFI_FLAGS_1078) {
191 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
192 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
193 	}
194 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
195 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
196 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
197 	}
198 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
200 	}
201 }
202 
203 static int32_t
204 mfi_read_fw_status_xscale(struct mfi_softc *sc)
205 {
206 	return MFI_READ4(sc, MFI_OMSG0);
207 }
208 
209 static int32_t
210 mfi_read_fw_status_ppc(struct mfi_softc *sc)
211 {
212 	return MFI_READ4(sc, MFI_OSP0);
213 }
214 
215 static int
216 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
217 {
218 	int32_t status;
219 
220 	status = MFI_READ4(sc, MFI_OSTS);
221 	if ((status & MFI_OSTS_INTR_VALID) == 0)
222 		return 1;
223 
224 	MFI_WRITE4(sc, MFI_OSTS, status);
225 	return 0;
226 }
227 
228 static int
229 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
230 {
231 	int32_t status;
232 
233 	status = MFI_READ4(sc, MFI_OSTS);
234 	if (sc->mfi_flags & MFI_FLAGS_1078) {
235 		if (!(status & MFI_1078_RM)) {
236 			return 1;
237 		}
238 	}
239 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
240 		if (!(status & MFI_GEN2_RM)) {
241 			return 1;
242 		}
243 	}
244 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
245 		if (!(status & MFI_SKINNY_RM)) {
246 			return 1;
247 		}
248 	}
249 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
250 		MFI_WRITE4(sc, MFI_OSTS, status);
251 	else
252 		MFI_WRITE4(sc, MFI_ODCR0, status);
253 	return 0;
254 }
255 
256 static void
257 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
258 {
259 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
260 }
261 
262 static void
263 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
264 {
265 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
266 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
267 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
268 	} else {
269 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
270 	}
271 }
272 
273 int
274 mfi_transition_firmware(struct mfi_softc *sc)
275 {
276 	uint32_t fw_state, cur_state;
277 	int max_wait, i;
278 	uint32_t cur_abs_reg_val = 0;
279 	uint32_t prev_abs_reg_val = 0;
280 
281 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
282 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
283 	while (fw_state != MFI_FWSTATE_READY) {
284 		if (bootverbose)
285 			device_printf(sc->mfi_dev, "Waiting for firmware to "
286 			"become ready\n");
287 		cur_state = fw_state;
288 		switch (fw_state) {
289 		case MFI_FWSTATE_FAULT:
290 			device_printf(sc->mfi_dev, "Firmware fault\n");
291 			return (ENXIO);
292 		case MFI_FWSTATE_WAIT_HANDSHAKE:
293 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
294 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
295 			else
296 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
297 			max_wait = MFI_RESET_WAIT_TIME;
298 			break;
299 		case MFI_FWSTATE_OPERATIONAL:
300 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
301 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
302 			else
303 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
304 			max_wait = MFI_RESET_WAIT_TIME;
305 			break;
306 		case MFI_FWSTATE_UNDEFINED:
307 		case MFI_FWSTATE_BB_INIT:
308 			max_wait = MFI_RESET_WAIT_TIME;
309 			break;
310 		case MFI_FWSTATE_FW_INIT_2:
311 			max_wait = MFI_RESET_WAIT_TIME;
312 			break;
313 		case MFI_FWSTATE_FW_INIT:
314 		case MFI_FWSTATE_FLUSH_CACHE:
315 			max_wait = MFI_RESET_WAIT_TIME;
316 			break;
317 		case MFI_FWSTATE_DEVICE_SCAN:
318 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
319 			prev_abs_reg_val = cur_abs_reg_val;
320 			break;
321 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
322 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
323 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
324 			else
325 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
326 			max_wait = MFI_RESET_WAIT_TIME;
327 			break;
328 		default:
329 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
330 			    fw_state);
331 			return (ENXIO);
332 		}
333 		for (i = 0; i < (max_wait * 10); i++) {
334 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
335 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
336 			if (fw_state == cur_state)
337 				DELAY(100000);
338 			else
339 				break;
340 		}
341 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
342 			/* Check the device scanning progress */
343 			if (prev_abs_reg_val != cur_abs_reg_val) {
344 				continue;
345 			}
346 		}
347 		if (fw_state == cur_state) {
348 			device_printf(sc->mfi_dev, "Firmware stuck in state "
349 			    "%#x\n", fw_state);
350 			return (ENXIO);
351 		}
352 	}
353 	return (0);
354 }
355 
356 static void
357 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
358 {
359 	bus_addr_t *addr;
360 
361 	addr = arg;
362 	*addr = segs[0].ds_addr;
363 }
364 
365 int
366 mfi_attach(struct mfi_softc *sc)
367 {
368 	uint32_t status;
369 	int error, commsz, framessz, sensesz;
370 	int frames, unit, max_fw_sge, max_fw_cmds;
371 	uint32_t tb_mem_size = 0;
372 	struct cdev *dev_t;
373 
374 	if (sc == NULL)
375 		return EINVAL;
376 
377 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
378 	    MEGASAS_VERSION);
379 
380 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
381 	sx_init(&sc->mfi_config_lock, "MFI config");
382 	TAILQ_INIT(&sc->mfi_ld_tqh);
383 	TAILQ_INIT(&sc->mfi_syspd_tqh);
384 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
385 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
386 	TAILQ_INIT(&sc->mfi_evt_queue);
387 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
388 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
389 	TAILQ_INIT(&sc->mfi_aen_pids);
390 	TAILQ_INIT(&sc->mfi_cam_ccbq);
391 
392 	mfi_initq_free(sc);
393 	mfi_initq_ready(sc);
394 	mfi_initq_busy(sc);
395 	mfi_initq_bio(sc);
396 
397 	sc->adpreset = 0;
398 	sc->last_seq_num = 0;
399 	sc->disableOnlineCtrlReset = 1;
400 	sc->issuepend_done = 1;
401 	sc->hw_crit_error = 0;
402 
403 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
404 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
405 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
406 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
407 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
408 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
409 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
410 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
411 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
412 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
413 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
414 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
415 		sc->mfi_tbolt = 1;
416 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
417 	} else {
418 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
419 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
420 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
421 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
422 	}
423 
424 	/* Before we get too far, see if the firmware is working */
425 	if ((error = mfi_transition_firmware(sc)) != 0) {
426 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
427 		    "error %d\n", error);
428 		return (ENXIO);
429 	}
430 
431 	/* Start: LSIP200113393 */
432 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
433 				1, 0,			/* algnmnt, boundary */
434 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
435 				BUS_SPACE_MAXADDR,	/* highaddr */
436 				NULL, NULL,		/* filter, filterarg */
437 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
438 				1,			/* msegments */
439 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
440 				0,			/* flags */
441 				NULL, NULL,		/* lockfunc, lockarg */
442 				&sc->verbuf_h_dmat)) {
443 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
444 		return (ENOMEM);
445 	}
446 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
447 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
448 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
449 		return (ENOMEM);
450 	}
451 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
452 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
453 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
454 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
455 	/* End: LSIP200113393 */
456 
457 	/*
458 	 * Get information needed for sizing the contiguous memory for the
459 	 * frame pool.  Size down the sgl parameter since we know that
460 	 * we will never need more than what's required for MFI_MAXPHYS.
461 	 * It would be nice if these constants were available at runtime
462 	 * instead of compile time.
463 	 */
464 	status = sc->mfi_read_fw_status(sc);
465 	max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
466 	if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
467 		device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
468 		    max_fw_cmds, mfi_max_cmds);
469 		sc->mfi_max_fw_cmds = mfi_max_cmds;
470 	} else {
471 		sc->mfi_max_fw_cmds = max_fw_cmds;
472 	}
473 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
474 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
475 
476 	/* ThunderBolt Support get the contiguous memory */
477 
478 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
479 		mfi_tbolt_init_globals(sc);
480 		device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
481 		    "MaxSgl = %d, state = %#x\n", max_fw_cmds,
482 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
483 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
484 
485 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
486 				1, 0,			/* algnmnt, boundary */
487 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
488 				BUS_SPACE_MAXADDR,	/* highaddr */
489 				NULL, NULL,		/* filter, filterarg */
490 				tb_mem_size,		/* maxsize */
491 				1,			/* msegments */
492 				tb_mem_size,		/* maxsegsize */
493 				0,			/* flags */
494 				NULL, NULL,		/* lockfunc, lockarg */
495 				&sc->mfi_tb_dmat)) {
496 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
497 			return (ENOMEM);
498 		}
499 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
500 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
501 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
502 			return (ENOMEM);
503 		}
504 		bzero(sc->request_message_pool, tb_mem_size);
505 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
506 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
507 
508 		/* For ThunderBolt memory init */
509 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
510 				0x100, 0,		/* alignmnt, boundary */
511 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
512 				BUS_SPACE_MAXADDR,	/* highaddr */
513 				NULL, NULL,		/* filter, filterarg */
514 				MFI_FRAME_SIZE,		/* maxsize */
515 				1,			/* msegments */
516 				MFI_FRAME_SIZE,		/* maxsegsize */
517 				0,			/* flags */
518 				NULL, NULL,		/* lockfunc, lockarg */
519 				&sc->mfi_tb_init_dmat)) {
520 			device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
521 			return (ENOMEM);
522 		}
523 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
524 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
525 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
526 			return (ENOMEM);
527 		}
528 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
529 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
530 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
531 		    &sc->mfi_tb_init_busaddr, 0);
532 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
533 		    tb_mem_size)) {
534 			device_printf(sc->mfi_dev,
535 			    "Thunderbolt pool preparation error\n");
536 			return 0;
537 		}
538 
539 		/*
540 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
541 		  we are taking it different from what we have allocated for Request
542 		  and reply descriptors to avoid confusion later
543 		*/
544 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
545 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
546 				1, 0,			/* algnmnt, boundary */
547 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
548 				BUS_SPACE_MAXADDR,	/* highaddr */
549 				NULL, NULL,		/* filter, filterarg */
550 				tb_mem_size,		/* maxsize */
551 				1,			/* msegments */
552 				tb_mem_size,		/* maxsegsize */
553 				0,			/* flags */
554 				NULL, NULL,		/* lockfunc, lockarg */
555 				&sc->mfi_tb_ioc_init_dmat)) {
556 			device_printf(sc->mfi_dev,
557 			    "Cannot allocate comms DMA tag\n");
558 			return (ENOMEM);
559 		}
560 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
561 		    (void **)&sc->mfi_tb_ioc_init_desc,
562 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
563 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
564 			return (ENOMEM);
565 		}
566 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
567 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
568 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
569 		    &sc->mfi_tb_ioc_init_busaddr, 0);
570 	}
571 	/*
572 	 * Create the dma tag for data buffers.  Used both for block I/O
573 	 * and for various internal data queries.
574 	 */
575 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
576 				1, 0,			/* algnmnt, boundary */
577 				BUS_SPACE_MAXADDR,	/* lowaddr */
578 				BUS_SPACE_MAXADDR,	/* highaddr */
579 				NULL, NULL,		/* filter, filterarg */
580 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
581 				sc->mfi_max_sge,	/* nsegments */
582 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
583 				BUS_DMA_ALLOCNOW,	/* flags */
584 				busdma_lock_mutex,	/* lockfunc */
585 				&sc->mfi_io_lock,	/* lockfuncarg */
586 				&sc->mfi_buffer_dmat)) {
587 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
588 		return (ENOMEM);
589 	}
590 
591 	/*
592 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
593 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
594 	 * entry, so the calculated size here will be will be 1 more than
595 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
596 	 */
597 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
598 	    sizeof(struct mfi_hwcomms);
599 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
600 				1, 0,			/* algnmnt, boundary */
601 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
602 				BUS_SPACE_MAXADDR,	/* highaddr */
603 				NULL, NULL,		/* filter, filterarg */
604 				commsz,			/* maxsize */
605 				1,			/* msegments */
606 				commsz,			/* maxsegsize */
607 				0,			/* flags */
608 				NULL, NULL,		/* lockfunc, lockarg */
609 				&sc->mfi_comms_dmat)) {
610 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
611 		return (ENOMEM);
612 	}
613 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
614 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
615 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
616 		return (ENOMEM);
617 	}
618 	bzero(sc->mfi_comms, commsz);
619 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
620 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
621 	/*
622 	 * Allocate DMA memory for the command frames.  Keep them in the
623 	 * lower 4GB for efficiency.  Calculate the size of the commands at
624 	 * the same time; each command is one 64 byte frame plus a set of
625          * additional frames for holding sg lists or other data.
626 	 * The assumption here is that the SG list will start at the second
627 	 * frame and not use the unused bytes in the first frame.  While this
628 	 * isn't technically correct, it simplifies the calculation and allows
629 	 * for command frames that might be larger than an mfi_io_frame.
630 	 */
631 	if (sizeof(bus_addr_t) == 8) {
632 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
633 		sc->mfi_flags |= MFI_FLAGS_SG64;
634 	} else {
635 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
636 	}
637 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
638 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
639 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
640 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
641 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
642 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
643 				64, 0,			/* algnmnt, boundary */
644 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
645 				BUS_SPACE_MAXADDR,	/* highaddr */
646 				NULL, NULL,		/* filter, filterarg */
647 				framessz,		/* maxsize */
648 				1,			/* nsegments */
649 				framessz,		/* maxsegsize */
650 				0,			/* flags */
651 				NULL, NULL,		/* lockfunc, lockarg */
652 				&sc->mfi_frames_dmat)) {
653 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
654 		return (ENOMEM);
655 	}
656 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
657 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
658 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
659 		return (ENOMEM);
660 	}
661 	bzero(sc->mfi_frames, framessz);
662 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
663 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
664 	/*
665 	 * Allocate DMA memory for the frame sense data.  Keep them in the
666 	 * lower 4GB for efficiency
667 	 */
668 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
669 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
670 				4, 0,			/* algnmnt, boundary */
671 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
672 				BUS_SPACE_MAXADDR,	/* highaddr */
673 				NULL, NULL,		/* filter, filterarg */
674 				sensesz,		/* maxsize */
675 				1,			/* nsegments */
676 				sensesz,		/* maxsegsize */
677 				0,			/* flags */
678 				NULL, NULL,		/* lockfunc, lockarg */
679 				&sc->mfi_sense_dmat)) {
680 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
681 		return (ENOMEM);
682 	}
683 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
684 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
685 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
686 		return (ENOMEM);
687 	}
688 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
689 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
690 	if ((error = mfi_alloc_commands(sc)) != 0)
691 		return (error);
692 
693 	/* Before moving the FW to operational state, check whether
694 	 * hostmemory is required by the FW or not
695 	 */
696 
697 	/* ThunderBolt MFI_IOC2 INIT */
698 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
699 		sc->mfi_disable_intr(sc);
700 		mtx_lock(&sc->mfi_io_lock);
701 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
702 			device_printf(sc->mfi_dev,
703 			    "TB Init has failed with error %d\n",error);
704 			mtx_unlock(&sc->mfi_io_lock);
705 			return error;
706 		}
707 		mtx_unlock(&sc->mfi_io_lock);
708 
709 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
710 			return error;
711 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
712 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
713 		    &sc->mfi_intr)) {
714 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
715 			return (EINVAL);
716 		}
717 		sc->mfi_intr_ptr = mfi_intr_tbolt;
718 		sc->mfi_enable_intr(sc);
719 	} else {
720 		if ((error = mfi_comms_init(sc)) != 0)
721 			return (error);
722 
723 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
724 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
725 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
726 			return (EINVAL);
727 		}
728 		sc->mfi_intr_ptr = mfi_intr;
729 		sc->mfi_enable_intr(sc);
730 	}
731 	if ((error = mfi_get_controller_info(sc)) != 0)
732 		return (error);
733 	sc->disableOnlineCtrlReset = 0;
734 
735 	/* Register a config hook to probe the bus for arrays */
736 	sc->mfi_ich.ich_func = mfi_startup;
737 	sc->mfi_ich.ich_arg = sc;
738 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
739 		device_printf(sc->mfi_dev, "Cannot establish configuration "
740 		    "hook\n");
741 		return (EINVAL);
742 	}
743 	mtx_lock(&sc->mfi_io_lock);
744 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
745 		mtx_unlock(&sc->mfi_io_lock);
746 		return (error);
747 	}
748 	mtx_unlock(&sc->mfi_io_lock);
749 
750 	/*
751 	 * Register a shutdown handler.
752 	 */
753 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
754 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
755 		device_printf(sc->mfi_dev, "Warning: shutdown event "
756 		    "registration failed\n");
757 	}
758 
759 	/*
760 	 * Create the control device for doing management
761 	 */
762 	unit = device_get_unit(sc->mfi_dev);
763 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
764 	    0640, "mfi%d", unit);
765 	if (unit == 0)
766 		make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
767 		    sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
768 	if (sc->mfi_cdev != NULL)
769 		sc->mfi_cdev->si_drv1 = sc;
770 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
771 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
772 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
773 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
774 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
775 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
776 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
777 	    &sc->mfi_keep_deleted_volumes, 0,
778 	    "Don't detach the mfid device for a busy volume that is deleted");
779 
780 	device_add_child(sc->mfi_dev, "mfip", -1);
781 	bus_generic_attach(sc->mfi_dev);
782 
783 	/* Start the timeout watchdog */
784 	callout_init(&sc->mfi_watchdog_callout, 1);
785 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
786 	    mfi_timeout, sc);
787 
788 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
789 		mtx_lock(&sc->mfi_io_lock);
790 		mfi_tbolt_sync_map_info(sc);
791 		mtx_unlock(&sc->mfi_io_lock);
792 	}
793 
794 	return (0);
795 }
796 
797 static int
798 mfi_alloc_commands(struct mfi_softc *sc)
799 {
800 	struct mfi_command *cm;
801 	int i, j;
802 
803 	/*
804 	 * XXX Should we allocate all the commands up front, or allocate on
805 	 * demand later like 'aac' does?
806 	 */
807 	sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
808 	    sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
809 
810 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
811 		cm = &sc->mfi_commands[i];
812 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
813 		    sc->mfi_cmd_size * i);
814 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
815 		    sc->mfi_cmd_size * i;
816 		cm->cm_frame->header.context = i;
817 		cm->cm_sense = &sc->mfi_sense[i];
818 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
819 		cm->cm_sc = sc;
820 		cm->cm_index = i;
821 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
822 		    &cm->cm_dmamap) == 0) {
823 			mtx_lock(&sc->mfi_io_lock);
824 			mfi_release_command(cm);
825 			mtx_unlock(&sc->mfi_io_lock);
826 		} else {
827 			device_printf(sc->mfi_dev, "Failed to allocate %d "
828 			   "command blocks, only allocated %d\n",
829 			    sc->mfi_max_fw_cmds, i - 1);
830 			for (j = 0; j < i; j++) {
831 				cm = &sc->mfi_commands[i];
832 				bus_dmamap_destroy(sc->mfi_buffer_dmat,
833 				    cm->cm_dmamap);
834 			}
835 			free(sc->mfi_commands, M_MFIBUF);
836 			sc->mfi_commands = NULL;
837 
838 			return (ENOMEM);
839 		}
840 	}
841 
842 	return (0);
843 }
844 
845 void
846 mfi_release_command(struct mfi_command *cm)
847 {
848 	struct mfi_frame_header *hdr;
849 	uint32_t *hdr_data;
850 
851 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
852 
853 	/*
854 	 * Zero out the important fields of the frame, but make sure the
855 	 * context field is preserved.  For efficiency, handle the fields
856 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
857 	 */
858 	hdr = &cm->cm_frame->header;
859 	if (cm->cm_data != NULL && hdr->sg_count) {
860 		cm->cm_sg->sg32[0].len = 0;
861 		cm->cm_sg->sg32[0].addr = 0;
862 	}
863 
864 	/*
865 	 * Command may be on other queues e.g. busy queue depending on the
866 	 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
867 	 * properly
868 	 */
869 	if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
870 		mfi_remove_busy(cm);
871 	if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
872 		mfi_remove_ready(cm);
873 
874 	/* We're not expecting it to be on any other queue but check */
875 	if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
876 		panic("Command %p is still on another queue, flags = %#x",
877 		    cm, cm->cm_flags);
878 	}
879 
880 	/* tbolt cleanup */
881 	if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
882 		mfi_tbolt_return_cmd(cm->cm_sc,
883 		    cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
884 		    cm);
885 	}
886 
887 	hdr_data = (uint32_t *)cm->cm_frame;
888 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
889 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
890 	hdr_data[4] = 0;	/* flags, timeout */
891 	hdr_data[5] = 0;	/* data_len */
892 
893 	cm->cm_extra_frames = 0;
894 	cm->cm_flags = 0;
895 	cm->cm_complete = NULL;
896 	cm->cm_private = NULL;
897 	cm->cm_data = NULL;
898 	cm->cm_sg = 0;
899 	cm->cm_total_frame_size = 0;
900 	cm->retry_for_fw_reset = 0;
901 
902 	mfi_enqueue_free(cm);
903 }
904 
905 int
906 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
907     uint32_t opcode, void **bufp, size_t bufsize)
908 {
909 	struct mfi_command *cm;
910 	struct mfi_dcmd_frame *dcmd;
911 	void *buf = NULL;
912 	uint32_t context = 0;
913 
914 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
915 
916 	cm = mfi_dequeue_free(sc);
917 	if (cm == NULL)
918 		return (EBUSY);
919 
920 	/* Zero out the MFI frame */
921 	context = cm->cm_frame->header.context;
922 	bzero(cm->cm_frame, sizeof(union mfi_frame));
923 	cm->cm_frame->header.context = context;
924 
925 	if ((bufsize > 0) && (bufp != NULL)) {
926 		if (*bufp == NULL) {
927 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
928 			if (buf == NULL) {
929 				mfi_release_command(cm);
930 				return (ENOMEM);
931 			}
932 			*bufp = buf;
933 		} else {
934 			buf = *bufp;
935 		}
936 	}
937 
938 	dcmd =  &cm->cm_frame->dcmd;
939 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
940 	dcmd->header.cmd = MFI_CMD_DCMD;
941 	dcmd->header.timeout = 0;
942 	dcmd->header.flags = 0;
943 	dcmd->header.data_len = bufsize;
944 	dcmd->header.scsi_status = 0;
945 	dcmd->opcode = opcode;
946 	cm->cm_sg = &dcmd->sgl;
947 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
948 	cm->cm_flags = 0;
949 	cm->cm_data = buf;
950 	cm->cm_private = buf;
951 	cm->cm_len = bufsize;
952 
953 	*cmp = cm;
954 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
955 		*bufp = buf;
956 	return (0);
957 }
958 
959 static int
960 mfi_comms_init(struct mfi_softc *sc)
961 {
962 	struct mfi_command *cm;
963 	struct mfi_init_frame *init;
964 	struct mfi_init_qinfo *qinfo;
965 	int error;
966 	uint32_t context = 0;
967 
968 	mtx_lock(&sc->mfi_io_lock);
969 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
970 		mtx_unlock(&sc->mfi_io_lock);
971 		return (EBUSY);
972 	}
973 
974 	/* Zero out the MFI frame */
975 	context = cm->cm_frame->header.context;
976 	bzero(cm->cm_frame, sizeof(union mfi_frame));
977 	cm->cm_frame->header.context = context;
978 
979 	/*
980 	 * Abuse the SG list area of the frame to hold the init_qinfo
981 	 * object;
982 	 */
983 	init = &cm->cm_frame->init;
984 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
985 
986 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
987 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
988 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
989 	    offsetof(struct mfi_hwcomms, hw_reply_q);
990 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
991 	    offsetof(struct mfi_hwcomms, hw_pi);
992 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
993 	    offsetof(struct mfi_hwcomms, hw_ci);
994 
995 	init->header.cmd = MFI_CMD_INIT;
996 	init->header.data_len = sizeof(struct mfi_init_qinfo);
997 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
998 	cm->cm_data = NULL;
999 	cm->cm_flags = MFI_CMD_POLLED;
1000 
1001 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1002 		device_printf(sc->mfi_dev, "failed to send init command\n");
1003 	mfi_release_command(cm);
1004 	mtx_unlock(&sc->mfi_io_lock);
1005 
1006 	return (error);
1007 }
1008 
1009 static int
1010 mfi_get_controller_info(struct mfi_softc *sc)
1011 {
1012 	struct mfi_command *cm = NULL;
1013 	struct mfi_ctrl_info *ci = NULL;
1014 	uint32_t max_sectors_1, max_sectors_2;
1015 	int error;
1016 
1017 	mtx_lock(&sc->mfi_io_lock);
1018 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1019 	    (void **)&ci, sizeof(*ci));
1020 	if (error)
1021 		goto out;
1022 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1023 
1024 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1025 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
1026 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1027 		    MFI_SECTOR_LEN;
1028 		error = 0;
1029 		goto out;
1030 	}
1031 
1032 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1033 	    BUS_DMASYNC_POSTREAD);
1034 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1035 
1036 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1037 	max_sectors_2 = ci->max_request_size;
1038 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1039 	sc->disableOnlineCtrlReset =
1040 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
1041 
1042 out:
1043 	if (ci)
1044 		free(ci, M_MFIBUF);
1045 	if (cm)
1046 		mfi_release_command(cm);
1047 	mtx_unlock(&sc->mfi_io_lock);
1048 	return (error);
1049 }
1050 
1051 static int
1052 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1053 {
1054 	struct mfi_command *cm = NULL;
1055 	int error;
1056 
1057 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1058 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1059 	    (void **)log_state, sizeof(**log_state));
1060 	if (error)
1061 		goto out;
1062 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1063 
1064 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1065 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1066 		goto out;
1067 	}
1068 
1069 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1070 	    BUS_DMASYNC_POSTREAD);
1071 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1072 
1073 out:
1074 	if (cm)
1075 		mfi_release_command(cm);
1076 
1077 	return (error);
1078 }
1079 
1080 int
1081 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1082 {
1083 	struct mfi_evt_log_state *log_state = NULL;
1084 	union mfi_evt class_locale;
1085 	int error = 0;
1086 	uint32_t seq;
1087 
1088 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1089 
1090 	class_locale.members.reserved = 0;
1091 	class_locale.members.locale = mfi_event_locale;
1092 	class_locale.members.evt_class  = mfi_event_class;
1093 
1094 	if (seq_start == 0) {
1095 		if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1096 			goto out;
1097 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1098 
1099 		/*
1100 		 * Walk through any events that fired since the last
1101 		 * shutdown.
1102 		 */
1103 		if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1104 		    log_state->newest_seq_num)) != 0)
1105 			goto out;
1106 		seq = log_state->newest_seq_num;
1107 	} else
1108 		seq = seq_start;
1109 	error = mfi_aen_register(sc, seq, class_locale.word);
1110 out:
1111 	free(log_state, M_MFIBUF);
1112 
1113 	return (error);
1114 }
1115 
1116 int
1117 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1118 {
1119 
1120 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1121 	cm->cm_complete = NULL;
1122 
1123 	/*
1124 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1125 	 * and return 0 to it as status
1126 	 */
1127 	if (cm->cm_frame->dcmd.opcode == 0) {
1128 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1129 		cm->cm_error = 0;
1130 		return (cm->cm_error);
1131 	}
1132 	mfi_enqueue_ready(cm);
1133 	mfi_startio(sc);
1134 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1135 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1136 	return (cm->cm_error);
1137 }
1138 
1139 void
1140 mfi_free(struct mfi_softc *sc)
1141 {
1142 	struct mfi_command *cm;
1143 	int i;
1144 
1145 	callout_drain(&sc->mfi_watchdog_callout);
1146 
1147 	if (sc->mfi_cdev != NULL)
1148 		destroy_dev(sc->mfi_cdev);
1149 
1150 	if (sc->mfi_commands != NULL) {
1151 		for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1152 			cm = &sc->mfi_commands[i];
1153 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1154 		}
1155 		free(sc->mfi_commands, M_MFIBUF);
1156 		sc->mfi_commands = NULL;
1157 	}
1158 
1159 	if (sc->mfi_intr)
1160 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1161 	if (sc->mfi_irq != NULL)
1162 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1163 		    sc->mfi_irq);
1164 
1165 	if (sc->mfi_sense_busaddr != 0)
1166 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1167 	if (sc->mfi_sense != NULL)
1168 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1169 		    sc->mfi_sense_dmamap);
1170 	if (sc->mfi_sense_dmat != NULL)
1171 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1172 
1173 	if (sc->mfi_frames_busaddr != 0)
1174 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1175 	if (sc->mfi_frames != NULL)
1176 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1177 		    sc->mfi_frames_dmamap);
1178 	if (sc->mfi_frames_dmat != NULL)
1179 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1180 
1181 	if (sc->mfi_comms_busaddr != 0)
1182 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1183 	if (sc->mfi_comms != NULL)
1184 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1185 		    sc->mfi_comms_dmamap);
1186 	if (sc->mfi_comms_dmat != NULL)
1187 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1188 
1189 	/* ThunderBolt contiguous memory free here */
1190 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1191 		if (sc->mfi_tb_busaddr != 0)
1192 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1193 		if (sc->request_message_pool != NULL)
1194 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1195 			    sc->mfi_tb_dmamap);
1196 		if (sc->mfi_tb_dmat != NULL)
1197 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1198 
1199 		/* Version buffer memory free */
1200 		/* Start LSIP200113393 */
1201 		if (sc->verbuf_h_busaddr != 0)
1202 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1203 		if (sc->verbuf != NULL)
1204 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1205 			    sc->verbuf_h_dmamap);
1206 		if (sc->verbuf_h_dmat != NULL)
1207 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1208 
1209 		/* End LSIP200113393 */
1210 		/* ThunderBolt INIT packet memory Free */
1211 		if (sc->mfi_tb_init_busaddr != 0)
1212 			bus_dmamap_unload(sc->mfi_tb_init_dmat,
1213 			    sc->mfi_tb_init_dmamap);
1214 		if (sc->mfi_tb_init != NULL)
1215 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1216 			    sc->mfi_tb_init_dmamap);
1217 		if (sc->mfi_tb_init_dmat != NULL)
1218 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1219 
1220 		/* ThunderBolt IOC Init Desc memory free here */
1221 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1222 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1223 			    sc->mfi_tb_ioc_init_dmamap);
1224 		if (sc->mfi_tb_ioc_init_desc != NULL)
1225 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1226 			    sc->mfi_tb_ioc_init_desc,
1227 			    sc->mfi_tb_ioc_init_dmamap);
1228 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1229 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1230 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1231 			for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1232 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1233 					free(sc->mfi_cmd_pool_tbolt[i],
1234 					    M_MFIBUF);
1235 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1236 				}
1237 			}
1238 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1239 			sc->mfi_cmd_pool_tbolt = NULL;
1240 		}
1241 		if (sc->request_desc_pool != NULL) {
1242 			free(sc->request_desc_pool, M_MFIBUF);
1243 			sc->request_desc_pool = NULL;
1244 		}
1245 	}
1246 	if (sc->mfi_buffer_dmat != NULL)
1247 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1248 	if (sc->mfi_parent_dmat != NULL)
1249 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1250 
1251 	if (mtx_initialized(&sc->mfi_io_lock)) {
1252 		mtx_destroy(&sc->mfi_io_lock);
1253 		sx_destroy(&sc->mfi_config_lock);
1254 	}
1255 
1256 	return;
1257 }
1258 
1259 static void
1260 mfi_startup(void *arg)
1261 {
1262 	struct mfi_softc *sc;
1263 
1264 	sc = (struct mfi_softc *)arg;
1265 
1266 	sc->mfi_enable_intr(sc);
1267 	sx_xlock(&sc->mfi_config_lock);
1268 	mtx_lock(&sc->mfi_io_lock);
1269 	mfi_ldprobe(sc);
1270 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1271 	    mfi_syspdprobe(sc);
1272 	mtx_unlock(&sc->mfi_io_lock);
1273 	sx_xunlock(&sc->mfi_config_lock);
1274 
1275 	config_intrhook_disestablish(&sc->mfi_ich);
1276 }
1277 
1278 static void
1279 mfi_intr(void *arg)
1280 {
1281 	struct mfi_softc *sc;
1282 	struct mfi_command *cm;
1283 	uint32_t pi, ci, context;
1284 
1285 	sc = (struct mfi_softc *)arg;
1286 
1287 	if (sc->mfi_check_clear_intr(sc))
1288 		return;
1289 
1290 restart:
1291 	pi = sc->mfi_comms->hw_pi;
1292 	ci = sc->mfi_comms->hw_ci;
1293 	mtx_lock(&sc->mfi_io_lock);
1294 	while (ci != pi) {
1295 		context = sc->mfi_comms->hw_reply_q[ci];
1296 		if (context < sc->mfi_max_fw_cmds) {
1297 			cm = &sc->mfi_commands[context];
1298 			mfi_remove_busy(cm);
1299 			cm->cm_error = 0;
1300 			mfi_complete(sc, cm);
1301 		}
1302 		if (++ci == (sc->mfi_max_fw_cmds + 1))
1303 			ci = 0;
1304 	}
1305 
1306 	sc->mfi_comms->hw_ci = ci;
1307 
1308 	/* Give defered I/O a chance to run */
1309 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1310 	mfi_startio(sc);
1311 	mtx_unlock(&sc->mfi_io_lock);
1312 
1313 	/*
1314 	 * Dummy read to flush the bus; this ensures that the indexes are up
1315 	 * to date.  Restart processing if more commands have come it.
1316 	 */
1317 	(void)sc->mfi_read_fw_status(sc);
1318 	if (pi != sc->mfi_comms->hw_pi)
1319 		goto restart;
1320 
1321 	return;
1322 }
1323 
1324 int
1325 mfi_shutdown(struct mfi_softc *sc)
1326 {
1327 	struct mfi_dcmd_frame *dcmd;
1328 	struct mfi_command *cm;
1329 	int error;
1330 
1331 	if (sc->mfi_aen_cm != NULL) {
1332 		sc->cm_aen_abort = 1;
1333 		mfi_abort(sc, &sc->mfi_aen_cm);
1334 	}
1335 
1336 	if (sc->mfi_map_sync_cm != NULL) {
1337 		sc->cm_map_abort = 1;
1338 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1339 	}
1340 
1341 	mtx_lock(&sc->mfi_io_lock);
1342 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1343 	if (error) {
1344 		mtx_unlock(&sc->mfi_io_lock);
1345 		return (error);
1346 	}
1347 
1348 	dcmd = &cm->cm_frame->dcmd;
1349 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1350 	cm->cm_flags = MFI_CMD_POLLED;
1351 	cm->cm_data = NULL;
1352 
1353 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1354 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1355 
1356 	mfi_release_command(cm);
1357 	mtx_unlock(&sc->mfi_io_lock);
1358 	return (error);
1359 }
1360 
1361 static void
1362 mfi_syspdprobe(struct mfi_softc *sc)
1363 {
1364 	struct mfi_frame_header *hdr;
1365 	struct mfi_command *cm = NULL;
1366 	struct mfi_pd_list *pdlist = NULL;
1367 	struct mfi_system_pd *syspd, *tmp;
1368 	struct mfi_system_pending *syspd_pend;
1369 	int error, i, found;
1370 
1371 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1372 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1373 	/* Add SYSTEM PD's */
1374 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1375 	    (void **)&pdlist, sizeof(*pdlist));
1376 	if (error) {
1377 		device_printf(sc->mfi_dev,
1378 		    "Error while forming SYSTEM PD list\n");
1379 		goto out;
1380 	}
1381 
1382 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1383 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1384 	cm->cm_frame->dcmd.mbox[1] = 0;
1385 	if (mfi_mapcmd(sc, cm) != 0) {
1386 		device_printf(sc->mfi_dev,
1387 		    "Failed to get syspd device listing\n");
1388 		goto out;
1389 	}
1390 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1391 	    BUS_DMASYNC_POSTREAD);
1392 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1393 	hdr = &cm->cm_frame->header;
1394 	if (hdr->cmd_status != MFI_STAT_OK) {
1395 		device_printf(sc->mfi_dev,
1396 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1397 		goto out;
1398 	}
1399 	/* Get each PD and add it to the system */
1400 	for (i = 0; i < pdlist->count; i++) {
1401 		if (pdlist->addr[i].device_id ==
1402 		    pdlist->addr[i].encl_device_id)
1403 			continue;
1404 		found = 0;
1405 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1406 			if (syspd->pd_id == pdlist->addr[i].device_id)
1407 				found = 1;
1408 		}
1409 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1410 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1411 				found = 1;
1412 		}
1413 		if (found == 0)
1414 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1415 	}
1416 	/* Delete SYSPD's whose state has been changed */
1417 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1418 		found = 0;
1419 		for (i = 0; i < pdlist->count; i++) {
1420 			if (syspd->pd_id == pdlist->addr[i].device_id) {
1421 				found = 1;
1422 				break;
1423 			}
1424 		}
1425 		if (found == 0) {
1426 			printf("DELETE\n");
1427 			mtx_unlock(&sc->mfi_io_lock);
1428 			bus_topo_lock();
1429 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1430 			bus_topo_unlock();
1431 			mtx_lock(&sc->mfi_io_lock);
1432 		}
1433 	}
1434 out:
1435 	if (pdlist)
1436 	    free(pdlist, M_MFIBUF);
1437 	if (cm)
1438 	    mfi_release_command(cm);
1439 
1440 	return;
1441 }
1442 
1443 static void
1444 mfi_ldprobe(struct mfi_softc *sc)
1445 {
1446 	struct mfi_frame_header *hdr;
1447 	struct mfi_command *cm = NULL;
1448 	struct mfi_ld_list *list = NULL;
1449 	struct mfi_disk *ld;
1450 	struct mfi_disk_pending *ld_pend;
1451 	int error, i;
1452 
1453 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1454 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1455 
1456 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1457 	    (void **)&list, sizeof(*list));
1458 	if (error)
1459 		goto out;
1460 
1461 	cm->cm_flags = MFI_CMD_DATAIN;
1462 	if (mfi_wait_command(sc, cm) != 0) {
1463 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1464 		goto out;
1465 	}
1466 
1467 	hdr = &cm->cm_frame->header;
1468 	if (hdr->cmd_status != MFI_STAT_OK) {
1469 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1470 		    hdr->cmd_status);
1471 		goto out;
1472 	}
1473 
1474 	for (i = 0; i < list->ld_count; i++) {
1475 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1476 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1477 				goto skip_add;
1478 		}
1479 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1480 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1481 				goto skip_add;
1482 		}
1483 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1484 	skip_add:;
1485 	}
1486 out:
1487 	if (list)
1488 		free(list, M_MFIBUF);
1489 	if (cm)
1490 		mfi_release_command(cm);
1491 
1492 	return;
1493 }
1494 
1495 /*
1496  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1497  * the bits in 24-31 are all set, then it is the number of seconds since
1498  * boot.
1499  */
1500 static const char *
1501 format_timestamp(uint32_t timestamp)
1502 {
1503 	static char buffer[32];
1504 
1505 	if ((timestamp & 0xff000000) == 0xff000000)
1506 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1507 		    0x00ffffff);
1508 	else
1509 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1510 	return (buffer);
1511 }
1512 
1513 static const char *
1514 format_class(int8_t class)
1515 {
1516 	static char buffer[6];
1517 
1518 	switch (class) {
1519 	case MFI_EVT_CLASS_DEBUG:
1520 		return ("debug");
1521 	case MFI_EVT_CLASS_PROGRESS:
1522 		return ("progress");
1523 	case MFI_EVT_CLASS_INFO:
1524 		return ("info");
1525 	case MFI_EVT_CLASS_WARNING:
1526 		return ("WARN");
1527 	case MFI_EVT_CLASS_CRITICAL:
1528 		return ("CRIT");
1529 	case MFI_EVT_CLASS_FATAL:
1530 		return ("FATAL");
1531 	case MFI_EVT_CLASS_DEAD:
1532 		return ("DEAD");
1533 	default:
1534 		snprintf(buffer, sizeof(buffer), "%d", class);
1535 		return (buffer);
1536 	}
1537 }
1538 
1539 static void
1540 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1541 {
1542 	struct mfi_system_pd *syspd = NULL;
1543 
1544 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1545 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1546 	    format_class(detail->evt_class.members.evt_class),
1547 	    detail->description);
1548 
1549         /* Don't act on old AEN's or while shutting down */
1550         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1551                 return;
1552 
1553 	switch (detail->arg_type) {
1554 	case MR_EVT_ARGS_NONE:
1555 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1556 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1557 			if (mfi_detect_jbod_change) {
1558 				/*
1559 				 * Probe for new SYSPD's and Delete
1560 				 * invalid SYSPD's
1561 				 */
1562 				sx_xlock(&sc->mfi_config_lock);
1563 				mtx_lock(&sc->mfi_io_lock);
1564 				mfi_syspdprobe(sc);
1565 				mtx_unlock(&sc->mfi_io_lock);
1566 				sx_xunlock(&sc->mfi_config_lock);
1567 			}
1568 		}
1569 		break;
1570 	case MR_EVT_ARGS_LD_STATE:
1571 		/* During load time driver reads all the events starting
1572 		 * from the one that has been logged after shutdown. Avoid
1573 		 * these old events.
1574 		 */
1575 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1576 			/* Remove the LD */
1577 			struct mfi_disk *ld;
1578 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1579 				if (ld->ld_id ==
1580 				    detail->args.ld_state.ld.target_id)
1581 					break;
1582 			}
1583 			/*
1584 			Fix: for kernel panics when SSCD is removed
1585 			KASSERT(ld != NULL, ("volume dissappeared"));
1586 			*/
1587 			if (ld != NULL) {
1588 				bus_topo_lock();
1589 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1590 				bus_topo_unlock();
1591 			}
1592 		}
1593 		break;
1594 	case MR_EVT_ARGS_PD:
1595 		if (detail->code == MR_EVT_PD_REMOVED) {
1596 			if (mfi_detect_jbod_change) {
1597 				/*
1598 				 * If the removed device is a SYSPD then
1599 				 * delete it
1600 				 */
1601 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1602 				    pd_link) {
1603 					if (syspd->pd_id ==
1604 					    detail->args.pd.device_id) {
1605 						bus_topo_lock();
1606 						device_delete_child(
1607 						    sc->mfi_dev,
1608 						    syspd->pd_dev);
1609 						bus_topo_unlock();
1610 						break;
1611 					}
1612 				}
1613 			}
1614 		}
1615 		if (detail->code == MR_EVT_PD_INSERTED) {
1616 			if (mfi_detect_jbod_change) {
1617 				/* Probe for new SYSPD's */
1618 				sx_xlock(&sc->mfi_config_lock);
1619 				mtx_lock(&sc->mfi_io_lock);
1620 				mfi_syspdprobe(sc);
1621 				mtx_unlock(&sc->mfi_io_lock);
1622 				sx_xunlock(&sc->mfi_config_lock);
1623 			}
1624 		}
1625 		if (sc->mfi_cam_rescan_cb != NULL &&
1626 		    (detail->code == MR_EVT_PD_INSERTED ||
1627 		    detail->code == MR_EVT_PD_REMOVED)) {
1628 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1629 		}
1630 		break;
1631 	}
1632 }
1633 
1634 static void
1635 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1636 {
1637 	struct mfi_evt_queue_elm *elm;
1638 
1639 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1640 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1641 	if (elm == NULL)
1642 		return;
1643 	memcpy(&elm->detail, detail, sizeof(*detail));
1644 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1645 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1646 }
1647 
1648 static void
1649 mfi_handle_evt(void *context, int pending)
1650 {
1651 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1652 	struct mfi_softc *sc;
1653 	struct mfi_evt_queue_elm *elm;
1654 
1655 	sc = context;
1656 	TAILQ_INIT(&queue);
1657 	mtx_lock(&sc->mfi_io_lock);
1658 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1659 	mtx_unlock(&sc->mfi_io_lock);
1660 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1661 		TAILQ_REMOVE(&queue, elm, link);
1662 		mfi_decode_evt(sc, &elm->detail);
1663 		free(elm, M_MFIBUF);
1664 	}
1665 }
1666 
1667 static int
1668 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1669 {
1670 	struct mfi_command *cm;
1671 	struct mfi_dcmd_frame *dcmd;
1672 	union mfi_evt current_aen, prior_aen;
1673 	struct mfi_evt_detail *ed = NULL;
1674 	int error = 0;
1675 
1676 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1677 
1678 	current_aen.word = locale;
1679 	if (sc->mfi_aen_cm != NULL) {
1680 		prior_aen.word =
1681 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1682 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1683 		    !((prior_aen.members.locale & current_aen.members.locale)
1684 		    ^current_aen.members.locale)) {
1685 			return (0);
1686 		} else {
1687 			prior_aen.members.locale |= current_aen.members.locale;
1688 			if (prior_aen.members.evt_class
1689 			    < current_aen.members.evt_class)
1690 				current_aen.members.evt_class =
1691 				    prior_aen.members.evt_class;
1692 			mfi_abort(sc, &sc->mfi_aen_cm);
1693 		}
1694 	}
1695 
1696 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1697 	    (void **)&ed, sizeof(*ed));
1698 	if (error)
1699 		goto out;
1700 
1701 	dcmd = &cm->cm_frame->dcmd;
1702 	((uint32_t *)&dcmd->mbox)[0] = seq;
1703 	((uint32_t *)&dcmd->mbox)[1] = locale;
1704 	cm->cm_flags = MFI_CMD_DATAIN;
1705 	cm->cm_complete = mfi_aen_complete;
1706 
1707 	sc->last_seq_num = seq;
1708 	sc->mfi_aen_cm = cm;
1709 
1710 	mfi_enqueue_ready(cm);
1711 	mfi_startio(sc);
1712 
1713 out:
1714 	return (error);
1715 }
1716 
1717 static void
1718 mfi_aen_complete(struct mfi_command *cm)
1719 {
1720 	struct mfi_frame_header *hdr;
1721 	struct mfi_softc *sc;
1722 	struct mfi_evt_detail *detail;
1723 	struct mfi_aen *mfi_aen_entry, *tmp;
1724 	int seq = 0, aborted = 0;
1725 
1726 	sc = cm->cm_sc;
1727 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1728 
1729 	if (sc->mfi_aen_cm == NULL)
1730 		return;
1731 
1732 	hdr = &cm->cm_frame->header;
1733 
1734 	if (sc->cm_aen_abort ||
1735 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1736 		sc->cm_aen_abort = 0;
1737 		aborted = 1;
1738 	} else {
1739 		sc->mfi_aen_triggered = 1;
1740 		if (sc->mfi_poll_waiting) {
1741 			sc->mfi_poll_waiting = 0;
1742 			selwakeup(&sc->mfi_select);
1743 		}
1744 		detail = cm->cm_data;
1745 		mfi_queue_evt(sc, detail);
1746 		seq = detail->seq + 1;
1747 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1748 		    tmp) {
1749 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1750 			    aen_link);
1751 			PROC_LOCK(mfi_aen_entry->p);
1752 			kern_psignal(mfi_aen_entry->p, SIGIO);
1753 			PROC_UNLOCK(mfi_aen_entry->p);
1754 			free(mfi_aen_entry, M_MFIBUF);
1755 		}
1756 	}
1757 
1758 	free(cm->cm_data, M_MFIBUF);
1759 	wakeup(&sc->mfi_aen_cm);
1760 	sc->mfi_aen_cm = NULL;
1761 	mfi_release_command(cm);
1762 
1763 	/* set it up again so the driver can catch more events */
1764 	if (!aborted)
1765 		mfi_aen_setup(sc, seq);
1766 }
1767 
1768 #define MAX_EVENTS 15
1769 
1770 static int
1771 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1772 {
1773 	struct mfi_command *cm;
1774 	struct mfi_dcmd_frame *dcmd;
1775 	struct mfi_evt_list *el;
1776 	union mfi_evt class_locale;
1777 	int error, i, seq, size;
1778 
1779 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1780 
1781 	class_locale.members.reserved = 0;
1782 	class_locale.members.locale = mfi_event_locale;
1783 	class_locale.members.evt_class  = mfi_event_class;
1784 
1785 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1786 		* (MAX_EVENTS - 1);
1787 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1788 	if (el == NULL)
1789 		return (ENOMEM);
1790 
1791 	for (seq = start_seq;;) {
1792 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1793 			free(el, M_MFIBUF);
1794 			return (EBUSY);
1795 		}
1796 
1797 		dcmd = &cm->cm_frame->dcmd;
1798 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1799 		dcmd->header.cmd = MFI_CMD_DCMD;
1800 		dcmd->header.timeout = 0;
1801 		dcmd->header.data_len = size;
1802 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1803 		((uint32_t *)&dcmd->mbox)[0] = seq;
1804 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1805 		cm->cm_sg = &dcmd->sgl;
1806 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1807 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1808 		cm->cm_data = el;
1809 		cm->cm_len = size;
1810 
1811 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1812 			device_printf(sc->mfi_dev,
1813 			    "Failed to get controller entries\n");
1814 			mfi_release_command(cm);
1815 			break;
1816 		}
1817 
1818 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1819 		    BUS_DMASYNC_POSTREAD);
1820 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1821 
1822 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1823 			mfi_release_command(cm);
1824 			break;
1825 		}
1826 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1827 			device_printf(sc->mfi_dev,
1828 			    "Error %d fetching controller entries\n",
1829 			    dcmd->header.cmd_status);
1830 			mfi_release_command(cm);
1831 			error = EIO;
1832 			break;
1833 		}
1834 		mfi_release_command(cm);
1835 
1836 		for (i = 0; i < el->count; i++) {
1837 			/*
1838 			 * If this event is newer than 'stop_seq' then
1839 			 * break out of the loop.  Note that the log
1840 			 * is a circular buffer so we have to handle
1841 			 * the case that our stop point is earlier in
1842 			 * the buffer than our start point.
1843 			 */
1844 			if (el->event[i].seq >= stop_seq) {
1845 				if (start_seq <= stop_seq)
1846 					break;
1847 				else if (el->event[i].seq < start_seq)
1848 					break;
1849 			}
1850 			mfi_queue_evt(sc, &el->event[i]);
1851 		}
1852 		seq = el->event[el->count - 1].seq + 1;
1853 	}
1854 
1855 	free(el, M_MFIBUF);
1856 	return (error);
1857 }
1858 
1859 static int
1860 mfi_add_ld(struct mfi_softc *sc, int id)
1861 {
1862 	struct mfi_command *cm;
1863 	struct mfi_dcmd_frame *dcmd = NULL;
1864 	struct mfi_ld_info *ld_info = NULL;
1865 	struct mfi_disk_pending *ld_pend;
1866 	int error;
1867 
1868 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1869 
1870 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1871 	if (ld_pend != NULL) {
1872 		ld_pend->ld_id = id;
1873 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1874 	}
1875 
1876 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1877 	    (void **)&ld_info, sizeof(*ld_info));
1878 	if (error) {
1879 		device_printf(sc->mfi_dev,
1880 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1881 		if (ld_info)
1882 			free(ld_info, M_MFIBUF);
1883 		return (error);
1884 	}
1885 	cm->cm_flags = MFI_CMD_DATAIN;
1886 	dcmd = &cm->cm_frame->dcmd;
1887 	dcmd->mbox[0] = id;
1888 	if (mfi_wait_command(sc, cm) != 0) {
1889 		device_printf(sc->mfi_dev,
1890 		    "Failed to get logical drive: %d\n", id);
1891 		free(ld_info, M_MFIBUF);
1892 		return (0);
1893 	}
1894 	if (ld_info->ld_config.params.isSSCD != 1)
1895 		mfi_add_ld_complete(cm);
1896 	else {
1897 		mfi_release_command(cm);
1898 		if (ld_info)		/* SSCD drives ld_info free here */
1899 			free(ld_info, M_MFIBUF);
1900 	}
1901 	return (0);
1902 }
1903 
1904 static void
1905 mfi_add_ld_complete(struct mfi_command *cm)
1906 {
1907 	struct mfi_frame_header *hdr;
1908 	struct mfi_ld_info *ld_info;
1909 	struct mfi_softc *sc;
1910 	device_t child;
1911 
1912 	sc = cm->cm_sc;
1913 	hdr = &cm->cm_frame->header;
1914 	ld_info = cm->cm_private;
1915 
1916 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1917 		free(ld_info, M_MFIBUF);
1918 		wakeup(&sc->mfi_map_sync_cm);
1919 		mfi_release_command(cm);
1920 		return;
1921 	}
1922 	wakeup(&sc->mfi_map_sync_cm);
1923 	mfi_release_command(cm);
1924 
1925 	mtx_unlock(&sc->mfi_io_lock);
1926 	bus_topo_lock();
1927 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1928 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1929 		free(ld_info, M_MFIBUF);
1930 		bus_topo_unlock();
1931 		mtx_lock(&sc->mfi_io_lock);
1932 		return;
1933 	}
1934 
1935 	device_set_ivars(child, ld_info);
1936 	device_set_desc(child, "MFI Logical Disk");
1937 	bus_generic_attach(sc->mfi_dev);
1938 	bus_topo_unlock();
1939 	mtx_lock(&sc->mfi_io_lock);
1940 }
1941 
1942 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1943 {
1944 	struct mfi_command *cm;
1945 	struct mfi_dcmd_frame *dcmd = NULL;
1946 	struct mfi_pd_info *pd_info = NULL;
1947 	struct mfi_system_pending *syspd_pend;
1948 	int error;
1949 
1950 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1951 
1952 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1953 	if (syspd_pend != NULL) {
1954 		syspd_pend->pd_id = id;
1955 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1956 	}
1957 
1958 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1959 		(void **)&pd_info, sizeof(*pd_info));
1960 	if (error) {
1961 		device_printf(sc->mfi_dev,
1962 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1963 		    error);
1964 		if (pd_info)
1965 			free(pd_info, M_MFIBUF);
1966 		return (error);
1967 	}
1968 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1969 	dcmd = &cm->cm_frame->dcmd;
1970 	dcmd->mbox[0]=id;
1971 	dcmd->header.scsi_status = 0;
1972 	dcmd->header.pad0 = 0;
1973 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1974 		device_printf(sc->mfi_dev,
1975 		    "Failed to get physical drive info %d\n", id);
1976 		free(pd_info, M_MFIBUF);
1977 		mfi_release_command(cm);
1978 		return (error);
1979 	}
1980 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1981 	    BUS_DMASYNC_POSTREAD);
1982 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1983 	mfi_add_sys_pd_complete(cm);
1984 	return (0);
1985 }
1986 
1987 static void
1988 mfi_add_sys_pd_complete(struct mfi_command *cm)
1989 {
1990 	struct mfi_frame_header *hdr;
1991 	struct mfi_pd_info *pd_info;
1992 	struct mfi_softc *sc;
1993 	device_t child;
1994 
1995 	sc = cm->cm_sc;
1996 	hdr = &cm->cm_frame->header;
1997 	pd_info = cm->cm_private;
1998 
1999 	if (hdr->cmd_status != MFI_STAT_OK) {
2000 		free(pd_info, M_MFIBUF);
2001 		mfi_release_command(cm);
2002 		return;
2003 	}
2004 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2005 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2006 		    pd_info->ref.v.device_id);
2007 		free(pd_info, M_MFIBUF);
2008 		mfi_release_command(cm);
2009 		return;
2010 	}
2011 	mfi_release_command(cm);
2012 
2013 	mtx_unlock(&sc->mfi_io_lock);
2014 	bus_topo_lock();
2015 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2016 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
2017 		free(pd_info, M_MFIBUF);
2018 		bus_topo_unlock();
2019 		mtx_lock(&sc->mfi_io_lock);
2020 		return;
2021 	}
2022 
2023 	device_set_ivars(child, pd_info);
2024 	device_set_desc(child, "MFI System PD");
2025 	bus_generic_attach(sc->mfi_dev);
2026 	bus_topo_unlock();
2027 	mtx_lock(&sc->mfi_io_lock);
2028 }
2029 
2030 static struct mfi_command *
2031 mfi_bio_command(struct mfi_softc *sc)
2032 {
2033 	struct bio *bio;
2034 	struct mfi_command *cm = NULL;
2035 
2036 	/*reserving two commands to avoid starvation for IOCTL*/
2037 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2038 		return (NULL);
2039 	}
2040 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2041 		return (NULL);
2042 	}
2043 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2044 		cm = mfi_build_ldio(sc, bio);
2045 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2046 		cm = mfi_build_syspdio(sc, bio);
2047 	}
2048 	if (!cm)
2049 	    mfi_enqueue_bio(sc, bio);
2050 	return cm;
2051 }
2052 
2053 /*
2054  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2055  */
2056 
2057 int
2058 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2059 {
2060 	int cdb_len;
2061 
2062 	if (((lba & 0x1fffff) == lba)
2063          && ((block_count & 0xff) == block_count)
2064          && (byte2 == 0)) {
2065 		/* We can fit in a 6 byte cdb */
2066 		struct scsi_rw_6 *scsi_cmd;
2067 
2068 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2069 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2070 		scsi_ulto3b(lba, scsi_cmd->addr);
2071 		scsi_cmd->length = block_count & 0xff;
2072 		scsi_cmd->control = 0;
2073 		cdb_len = sizeof(*scsi_cmd);
2074 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2075 		/* Need a 10 byte CDB */
2076 		struct scsi_rw_10 *scsi_cmd;
2077 
2078 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2079 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2080 		scsi_cmd->byte2 = byte2;
2081 		scsi_ulto4b(lba, scsi_cmd->addr);
2082 		scsi_cmd->reserved = 0;
2083 		scsi_ulto2b(block_count, scsi_cmd->length);
2084 		scsi_cmd->control = 0;
2085 		cdb_len = sizeof(*scsi_cmd);
2086 	} else if (((block_count & 0xffffffff) == block_count) &&
2087 	    ((lba & 0xffffffff) == lba)) {
2088 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2089 		struct scsi_rw_12 *scsi_cmd;
2090 
2091 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2092 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2093 		scsi_cmd->byte2 = byte2;
2094 		scsi_ulto4b(lba, scsi_cmd->addr);
2095 		scsi_cmd->reserved = 0;
2096 		scsi_ulto4b(block_count, scsi_cmd->length);
2097 		scsi_cmd->control = 0;
2098 		cdb_len = sizeof(*scsi_cmd);
2099 	} else {
2100 		/*
2101 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2102 		 * than 2^32
2103 		 */
2104 		struct scsi_rw_16 *scsi_cmd;
2105 
2106 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2107 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2108 		scsi_cmd->byte2 = byte2;
2109 		scsi_u64to8b(lba, scsi_cmd->addr);
2110 		scsi_cmd->reserved = 0;
2111 		scsi_ulto4b(block_count, scsi_cmd->length);
2112 		scsi_cmd->control = 0;
2113 		cdb_len = sizeof(*scsi_cmd);
2114 	}
2115 
2116 	return cdb_len;
2117 }
2118 
2119 extern char *unmapped_buf;
2120 
2121 static struct mfi_command *
2122 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2123 {
2124 	struct mfi_command *cm;
2125 	struct mfi_pass_frame *pass;
2126 	uint32_t context = 0;
2127 	int flags = 0, blkcount = 0, readop;
2128 	uint8_t cdb_len;
2129 
2130 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2131 
2132 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2133 	    return (NULL);
2134 
2135 	/* Zero out the MFI frame */
2136 	context = cm->cm_frame->header.context;
2137 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2138 	cm->cm_frame->header.context = context;
2139 	pass = &cm->cm_frame->pass;
2140 	bzero(pass->cdb, 16);
2141 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2142 	switch (bio->bio_cmd) {
2143 	case BIO_READ:
2144 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2145 		readop = 1;
2146 		break;
2147 	case BIO_WRITE:
2148 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2149 		readop = 0;
2150 		break;
2151 	default:
2152 		/* TODO: what about BIO_DELETE??? */
2153 		biofinish(bio, NULL, EOPNOTSUPP);
2154 		mfi_enqueue_free(cm);
2155 		return (NULL);
2156 	}
2157 
2158 	/* Cheat with the sector length to avoid a non-constant division */
2159 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2160 	/* Fill the LBA and Transfer length in CDB */
2161 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2162 	    pass->cdb);
2163 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2164 	pass->header.lun_id = 0;
2165 	pass->header.timeout = 0;
2166 	pass->header.flags = 0;
2167 	pass->header.scsi_status = 0;
2168 	pass->header.sense_len = MFI_SENSE_LEN;
2169 	pass->header.data_len = bio->bio_bcount;
2170 	pass->header.cdb_len = cdb_len;
2171 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2172 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2173 	cm->cm_complete = mfi_bio_complete;
2174 	cm->cm_private = bio;
2175 	cm->cm_data = unmapped_buf;
2176 	cm->cm_len = bio->bio_bcount;
2177 	cm->cm_sg = &pass->sgl;
2178 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2179 	cm->cm_flags = flags;
2180 
2181 	return (cm);
2182 }
2183 
2184 static struct mfi_command *
2185 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2186 {
2187 	struct mfi_io_frame *io;
2188 	struct mfi_command *cm;
2189 	int flags;
2190 	uint32_t blkcount;
2191 	uint32_t context = 0;
2192 
2193 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2194 
2195 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2196 	    return (NULL);
2197 
2198 	/* Zero out the MFI frame */
2199 	context = cm->cm_frame->header.context;
2200 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2201 	cm->cm_frame->header.context = context;
2202 	io = &cm->cm_frame->io;
2203 	switch (bio->bio_cmd) {
2204 	case BIO_READ:
2205 		io->header.cmd = MFI_CMD_LD_READ;
2206 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2207 		break;
2208 	case BIO_WRITE:
2209 		io->header.cmd = MFI_CMD_LD_WRITE;
2210 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2211 		break;
2212 	default:
2213 		/* TODO: what about BIO_DELETE??? */
2214 		biofinish(bio, NULL, EOPNOTSUPP);
2215 		mfi_enqueue_free(cm);
2216 		return (NULL);
2217 	}
2218 
2219 	/* Cheat with the sector length to avoid a non-constant division */
2220 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2221 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2222 	io->header.timeout = 0;
2223 	io->header.flags = 0;
2224 	io->header.scsi_status = 0;
2225 	io->header.sense_len = MFI_SENSE_LEN;
2226 	io->header.data_len = blkcount;
2227 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2228 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2229 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2230 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2231 	cm->cm_complete = mfi_bio_complete;
2232 	cm->cm_private = bio;
2233 	cm->cm_data = unmapped_buf;
2234 	cm->cm_len = bio->bio_bcount;
2235 	cm->cm_sg = &io->sgl;
2236 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2237 	cm->cm_flags = flags;
2238 
2239 	return (cm);
2240 }
2241 
2242 static void
2243 mfi_bio_complete(struct mfi_command *cm)
2244 {
2245 	struct bio *bio;
2246 	struct mfi_frame_header *hdr;
2247 	struct mfi_softc *sc;
2248 
2249 	bio = cm->cm_private;
2250 	hdr = &cm->cm_frame->header;
2251 	sc = cm->cm_sc;
2252 
2253 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2254 		bio->bio_flags |= BIO_ERROR;
2255 		bio->bio_error = EIO;
2256 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2257 		    "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2258 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2259 	} else if (cm->cm_error != 0) {
2260 		bio->bio_flags |= BIO_ERROR;
2261 		bio->bio_error = cm->cm_error;
2262 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2263 		    cm, cm->cm_error);
2264 	}
2265 
2266 	mfi_release_command(cm);
2267 	mfi_disk_complete(bio);
2268 }
2269 
2270 void
2271 mfi_startio(struct mfi_softc *sc)
2272 {
2273 	struct mfi_command *cm;
2274 	struct ccb_hdr *ccbh;
2275 
2276 	for (;;) {
2277 		/* Don't bother if we're short on resources */
2278 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2279 			break;
2280 
2281 		/* Try a command that has already been prepared */
2282 		cm = mfi_dequeue_ready(sc);
2283 
2284 		if (cm == NULL) {
2285 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2286 				cm = sc->mfi_cam_start(ccbh);
2287 		}
2288 
2289 		/* Nope, so look for work on the bioq */
2290 		if (cm == NULL)
2291 			cm = mfi_bio_command(sc);
2292 
2293 		/* No work available, so exit */
2294 		if (cm == NULL)
2295 			break;
2296 
2297 		/* Send the command to the controller */
2298 		if (mfi_mapcmd(sc, cm) != 0) {
2299 			device_printf(sc->mfi_dev, "Failed to startio\n");
2300 			mfi_requeue_ready(cm);
2301 			break;
2302 		}
2303 	}
2304 }
2305 
2306 int
2307 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2308 {
2309 	int error, polled;
2310 
2311 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2312 
2313 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2314 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2315 		if (cm->cm_flags & MFI_CMD_CCB)
2316 			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2317 			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2318 			    polled);
2319 		else if (cm->cm_flags & MFI_CMD_BIO)
2320 			error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2321 			    cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2322 			    polled);
2323 		else
2324 			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2325 			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2326 			    mfi_data_cb, cm, polled);
2327 		if (error == EINPROGRESS) {
2328 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2329 			return (0);
2330 		}
2331 	} else {
2332 		error = mfi_send_frame(sc, cm);
2333 	}
2334 
2335 	return (error);
2336 }
2337 
2338 static void
2339 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2340 {
2341 	struct mfi_frame_header *hdr;
2342 	struct mfi_command *cm;
2343 	union mfi_sgl *sgl;
2344 	struct mfi_softc *sc;
2345 	int i, j, first, dir;
2346 	int sge_size, locked;
2347 
2348 	cm = (struct mfi_command *)arg;
2349 	sc = cm->cm_sc;
2350 	hdr = &cm->cm_frame->header;
2351 	sgl = cm->cm_sg;
2352 
2353 	/*
2354 	 * We need to check if we have the lock as this is async
2355 	 * callback so even though our caller mfi_mapcmd asserts
2356 	 * it has the lock, there is no guarantee that hasn't been
2357 	 * dropped if bus_dmamap_load returned prior to our
2358 	 * completion.
2359 	 */
2360 	if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2361 		mtx_lock(&sc->mfi_io_lock);
2362 
2363 	if (error) {
2364 		printf("error %d in callback\n", error);
2365 		cm->cm_error = error;
2366 		mfi_complete(sc, cm);
2367 		goto out;
2368 	}
2369 	/* Use IEEE sgl only for IO's on a SKINNY controller
2370 	 * For other commands on a SKINNY controller use either
2371 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2372 	 * Also calculate the total frame size based on the type
2373 	 * of SGL used.
2374 	 */
2375 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2376 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2377 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2378 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2379 		for (i = 0; i < nsegs; i++) {
2380 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2381 			sgl->sg_skinny[i].len = segs[i].ds_len;
2382 			sgl->sg_skinny[i].flag = 0;
2383 		}
2384 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2385 		sge_size = sizeof(struct mfi_sg_skinny);
2386 		hdr->sg_count = nsegs;
2387 	} else {
2388 		j = 0;
2389 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2390 			first = cm->cm_stp_len;
2391 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2392 				sgl->sg32[j].addr = segs[0].ds_addr;
2393 				sgl->sg32[j++].len = first;
2394 			} else {
2395 				sgl->sg64[j].addr = segs[0].ds_addr;
2396 				sgl->sg64[j++].len = first;
2397 			}
2398 		} else
2399 			first = 0;
2400 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2401 			for (i = 0; i < nsegs; i++) {
2402 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2403 				sgl->sg32[j++].len = segs[i].ds_len - first;
2404 				first = 0;
2405 			}
2406 		} else {
2407 			for (i = 0; i < nsegs; i++) {
2408 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2409 				sgl->sg64[j++].len = segs[i].ds_len - first;
2410 				first = 0;
2411 			}
2412 			hdr->flags |= MFI_FRAME_SGL64;
2413 		}
2414 		hdr->sg_count = j;
2415 		sge_size = sc->mfi_sge_size;
2416 	}
2417 
2418 	dir = 0;
2419 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2420 		dir |= BUS_DMASYNC_PREREAD;
2421 		hdr->flags |= MFI_FRAME_DIR_READ;
2422 	}
2423 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2424 		dir |= BUS_DMASYNC_PREWRITE;
2425 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2426 	}
2427 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2428 	cm->cm_flags |= MFI_CMD_MAPPED;
2429 
2430 	/*
2431 	 * Instead of calculating the total number of frames in the
2432 	 * compound frame, it's already assumed that there will be at
2433 	 * least 1 frame, so don't compensate for the modulo of the
2434 	 * following division.
2435 	 */
2436 	cm->cm_total_frame_size += (sge_size * nsegs);
2437 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2438 
2439 	if ((error = mfi_send_frame(sc, cm)) != 0) {
2440 		printf("error %d in callback from mfi_send_frame\n", error);
2441 		cm->cm_error = error;
2442 		mfi_complete(sc, cm);
2443 		goto out;
2444 	}
2445 
2446 out:
2447 	/* leave the lock in the state we found it */
2448 	if (locked == 0)
2449 		mtx_unlock(&sc->mfi_io_lock);
2450 
2451 	return;
2452 }
2453 
2454 static int
2455 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2456 {
2457 	int error;
2458 
2459 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2460 
2461 	if (sc->MFA_enabled)
2462 		error = mfi_tbolt_send_frame(sc, cm);
2463 	else
2464 		error = mfi_std_send_frame(sc, cm);
2465 
2466 	if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2467 		mfi_remove_busy(cm);
2468 
2469 	return (error);
2470 }
2471 
2472 static int
2473 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2474 {
2475 	struct mfi_frame_header *hdr;
2476 	int tm = mfi_polled_cmd_timeout * 1000;
2477 
2478 	hdr = &cm->cm_frame->header;
2479 
2480 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2481 		cm->cm_timestamp = time_uptime;
2482 		mfi_enqueue_busy(cm);
2483 	} else {
2484 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2485 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2486 	}
2487 
2488 	/*
2489 	 * The bus address of the command is aligned on a 64 byte boundary,
2490 	 * leaving the least 6 bits as zero.  For whatever reason, the
2491 	 * hardware wants the address shifted right by three, leaving just
2492 	 * 3 zero bits.  These three bits are then used as a prefetching
2493 	 * hint for the hardware to predict how many frames need to be
2494 	 * fetched across the bus.  If a command has more than 8 frames
2495 	 * then the 3 bits are set to 0x7 and the firmware uses other
2496 	 * information in the command to determine the total amount to fetch.
2497 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2498 	 * is enough for both 32bit and 64bit systems.
2499 	 */
2500 	if (cm->cm_extra_frames > 7)
2501 		cm->cm_extra_frames = 7;
2502 
2503 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2504 
2505 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2506 		return (0);
2507 
2508 	/* This is a polled command, so busy-wait for it to complete. */
2509 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2510 		DELAY(1000);
2511 		tm -= 1;
2512 		if (tm <= 0)
2513 			break;
2514 	}
2515 
2516 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2517 		device_printf(sc->mfi_dev, "Frame %p timed out "
2518 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2519 		return (ETIMEDOUT);
2520 	}
2521 
2522 	return (0);
2523 }
2524 
2525 void
2526 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2527 {
2528 	int dir;
2529 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2530 
2531 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2532 		dir = 0;
2533 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2534 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2535 			dir |= BUS_DMASYNC_POSTREAD;
2536 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2537 			dir |= BUS_DMASYNC_POSTWRITE;
2538 
2539 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2540 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2541 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2542 	}
2543 
2544 	cm->cm_flags |= MFI_CMD_COMPLETED;
2545 
2546 	if (cm->cm_complete != NULL)
2547 		cm->cm_complete(cm);
2548 	else
2549 		wakeup(cm);
2550 }
2551 
2552 static int
2553 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2554 {
2555 	struct mfi_command *cm;
2556 	struct mfi_abort_frame *abort;
2557 	int i = 0, error;
2558 	uint32_t context = 0;
2559 
2560 	mtx_lock(&sc->mfi_io_lock);
2561 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2562 		mtx_unlock(&sc->mfi_io_lock);
2563 		return (EBUSY);
2564 	}
2565 
2566 	/* Zero out the MFI frame */
2567 	context = cm->cm_frame->header.context;
2568 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2569 	cm->cm_frame->header.context = context;
2570 
2571 	abort = &cm->cm_frame->abort;
2572 	abort->header.cmd = MFI_CMD_ABORT;
2573 	abort->header.flags = 0;
2574 	abort->header.scsi_status = 0;
2575 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2576 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2577 	abort->abort_mfi_addr_hi =
2578 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2579 	cm->cm_data = NULL;
2580 	cm->cm_flags = MFI_CMD_POLLED;
2581 
2582 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2583 		device_printf(sc->mfi_dev, "failed to abort command\n");
2584 	mfi_release_command(cm);
2585 
2586 	mtx_unlock(&sc->mfi_io_lock);
2587 	while (i < 5 && *cm_abort != NULL) {
2588 		tsleep(cm_abort, 0, "mfiabort",
2589 		    5 * hz);
2590 		i++;
2591 	}
2592 	if (*cm_abort != NULL) {
2593 		/* Force a complete if command didn't abort */
2594 		mtx_lock(&sc->mfi_io_lock);
2595 		(*cm_abort)->cm_complete(*cm_abort);
2596 		mtx_unlock(&sc->mfi_io_lock);
2597 	}
2598 
2599 	return (error);
2600 }
2601 
2602 int
2603 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2604      int len)
2605 {
2606 	struct mfi_command *cm;
2607 	struct mfi_io_frame *io;
2608 	int error;
2609 	uint32_t context = 0;
2610 
2611 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2612 		return (EBUSY);
2613 
2614 	/* Zero out the MFI frame */
2615 	context = cm->cm_frame->header.context;
2616 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2617 	cm->cm_frame->header.context = context;
2618 
2619 	io = &cm->cm_frame->io;
2620 	io->header.cmd = MFI_CMD_LD_WRITE;
2621 	io->header.target_id = id;
2622 	io->header.timeout = 0;
2623 	io->header.flags = 0;
2624 	io->header.scsi_status = 0;
2625 	io->header.sense_len = MFI_SENSE_LEN;
2626 	io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2627 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2628 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2629 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2630 	io->lba_lo = lba & 0xffffffff;
2631 	cm->cm_data = virt;
2632 	cm->cm_len = len;
2633 	cm->cm_sg = &io->sgl;
2634 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2635 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2636 
2637 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2638 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2639 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2640 	    BUS_DMASYNC_POSTWRITE);
2641 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2642 	mfi_release_command(cm);
2643 
2644 	return (error);
2645 }
2646 
2647 int
2648 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2649     int len)
2650 {
2651 	struct mfi_command *cm;
2652 	struct mfi_pass_frame *pass;
2653 	int error, readop, cdb_len;
2654 	uint32_t blkcount;
2655 
2656 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2657 		return (EBUSY);
2658 
2659 	pass = &cm->cm_frame->pass;
2660 	bzero(pass->cdb, 16);
2661 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2662 
2663 	readop = 0;
2664 	blkcount = howmany(len, MFI_SECTOR_LEN);
2665 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2666 	pass->header.target_id = id;
2667 	pass->header.timeout = 0;
2668 	pass->header.flags = 0;
2669 	pass->header.scsi_status = 0;
2670 	pass->header.sense_len = MFI_SENSE_LEN;
2671 	pass->header.data_len = len;
2672 	pass->header.cdb_len = cdb_len;
2673 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2674 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2675 	cm->cm_data = virt;
2676 	cm->cm_len = len;
2677 	cm->cm_sg = &pass->sgl;
2678 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2679 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2680 
2681 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2682 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2683 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2684 	    BUS_DMASYNC_POSTWRITE);
2685 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2686 	mfi_release_command(cm);
2687 
2688 	return (error);
2689 }
2690 
2691 static int
2692 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2693 {
2694 	struct mfi_softc *sc;
2695 	int error;
2696 
2697 	sc = dev->si_drv1;
2698 
2699 	mtx_lock(&sc->mfi_io_lock);
2700 	if (sc->mfi_detaching)
2701 		error = ENXIO;
2702 	else {
2703 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2704 		error = 0;
2705 	}
2706 	mtx_unlock(&sc->mfi_io_lock);
2707 
2708 	return (error);
2709 }
2710 
2711 static int
2712 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2713 {
2714 	struct mfi_softc *sc;
2715 	struct mfi_aen *mfi_aen_entry, *tmp;
2716 
2717 	sc = dev->si_drv1;
2718 
2719 	mtx_lock(&sc->mfi_io_lock);
2720 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2721 
2722 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2723 		if (mfi_aen_entry->p == curproc) {
2724 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2725 			    aen_link);
2726 			free(mfi_aen_entry, M_MFIBUF);
2727 		}
2728 	}
2729 	mtx_unlock(&sc->mfi_io_lock);
2730 	return (0);
2731 }
2732 
2733 static int
2734 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2735 {
2736 
2737 	switch (opcode) {
2738 	case MFI_DCMD_LD_DELETE:
2739 	case MFI_DCMD_CFG_ADD:
2740 	case MFI_DCMD_CFG_CLEAR:
2741 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2742 		sx_xlock(&sc->mfi_config_lock);
2743 		return (1);
2744 	default:
2745 		return (0);
2746 	}
2747 }
2748 
2749 static void
2750 mfi_config_unlock(struct mfi_softc *sc, int locked)
2751 {
2752 
2753 	if (locked)
2754 		sx_xunlock(&sc->mfi_config_lock);
2755 }
2756 
2757 /*
2758  * Perform pre-issue checks on commands from userland and possibly veto
2759  * them.
2760  */
2761 static int
2762 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2763 {
2764 	struct mfi_disk *ld, *ld2;
2765 	int error;
2766 	struct mfi_system_pd *syspd = NULL;
2767 	uint16_t syspd_id;
2768 	uint16_t *mbox;
2769 
2770 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2771 	error = 0;
2772 	switch (cm->cm_frame->dcmd.opcode) {
2773 	case MFI_DCMD_LD_DELETE:
2774 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2775 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2776 				break;
2777 		}
2778 		if (ld == NULL)
2779 			error = ENOENT;
2780 		else
2781 			error = mfi_disk_disable(ld);
2782 		break;
2783 	case MFI_DCMD_CFG_CLEAR:
2784 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2785 			error = mfi_disk_disable(ld);
2786 			if (error)
2787 				break;
2788 		}
2789 		if (error) {
2790 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2791 				if (ld2 == ld)
2792 					break;
2793 				mfi_disk_enable(ld2);
2794 			}
2795 		}
2796 		break;
2797 	case MFI_DCMD_PD_STATE_SET:
2798 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2799 		syspd_id = mbox[0];
2800 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2801 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2802 				if (syspd->pd_id == syspd_id)
2803 					break;
2804 			}
2805 		}
2806 		else
2807 			break;
2808 		if (syspd)
2809 			error = mfi_syspd_disable(syspd);
2810 		break;
2811 	default:
2812 		break;
2813 	}
2814 	return (error);
2815 }
2816 
2817 /* Perform post-issue checks on commands from userland. */
2818 static void
2819 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2820 {
2821 	struct mfi_disk *ld, *ldn;
2822 	struct mfi_system_pd *syspd = NULL;
2823 	uint16_t syspd_id;
2824 	uint16_t *mbox;
2825 
2826 	switch (cm->cm_frame->dcmd.opcode) {
2827 	case MFI_DCMD_LD_DELETE:
2828 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2829 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2830 				break;
2831 		}
2832 		KASSERT(ld != NULL, ("volume dissappeared"));
2833 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2834 			mtx_unlock(&sc->mfi_io_lock);
2835 			bus_topo_lock();
2836 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2837 			bus_topo_unlock();
2838 			mtx_lock(&sc->mfi_io_lock);
2839 		} else
2840 			mfi_disk_enable(ld);
2841 		break;
2842 	case MFI_DCMD_CFG_CLEAR:
2843 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2844 			mtx_unlock(&sc->mfi_io_lock);
2845 			bus_topo_lock();
2846 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2847 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2848 			}
2849 			bus_topo_unlock();
2850 			mtx_lock(&sc->mfi_io_lock);
2851 		} else {
2852 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2853 				mfi_disk_enable(ld);
2854 		}
2855 		break;
2856 	case MFI_DCMD_CFG_ADD:
2857 		mfi_ldprobe(sc);
2858 		break;
2859 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2860 		mfi_ldprobe(sc);
2861 		break;
2862 	case MFI_DCMD_PD_STATE_SET:
2863 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2864 		syspd_id = mbox[0];
2865 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2866 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2867 				if (syspd->pd_id == syspd_id)
2868 					break;
2869 			}
2870 		}
2871 		else
2872 			break;
2873 		/* If the transition fails then enable the syspd again */
2874 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2875 			mfi_syspd_enable(syspd);
2876 		break;
2877 	}
2878 }
2879 
2880 static int
2881 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2882 {
2883 	struct mfi_config_data *conf_data;
2884 	struct mfi_command *ld_cm = NULL;
2885 	struct mfi_ld_info *ld_info = NULL;
2886 	struct mfi_ld_config *ld;
2887 	char *p;
2888 	int error = 0;
2889 
2890 	conf_data = (struct mfi_config_data *)cm->cm_data;
2891 
2892 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2893 		p = (char *)conf_data->array;
2894 		p += conf_data->array_size * conf_data->array_count;
2895 		ld = (struct mfi_ld_config *)p;
2896 		if (ld->params.isSSCD == 1)
2897 			error = 1;
2898 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2899 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2900 		    (void **)&ld_info, sizeof(*ld_info));
2901 		if (error) {
2902 			device_printf(sc->mfi_dev, "Failed to allocate"
2903 			    "MFI_DCMD_LD_GET_INFO %d", error);
2904 			if (ld_info)
2905 				free(ld_info, M_MFIBUF);
2906 			return 0;
2907 		}
2908 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2909 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2910 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2911 		if (mfi_wait_command(sc, ld_cm) != 0) {
2912 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2913 			mfi_release_command(ld_cm);
2914 			free(ld_info, M_MFIBUF);
2915 			return 0;
2916 		}
2917 
2918 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2919 			free(ld_info, M_MFIBUF);
2920 			mfi_release_command(ld_cm);
2921 			return 0;
2922 		}
2923 		else
2924 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2925 
2926 		if (ld_info->ld_config.params.isSSCD == 1)
2927 			error = 1;
2928 
2929 		mfi_release_command(ld_cm);
2930 		free(ld_info, M_MFIBUF);
2931 	}
2932 	return error;
2933 }
2934 
2935 static int
2936 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2937 {
2938 	uint8_t i;
2939 	struct mfi_ioc_packet *ioc;
2940 	ioc = (struct mfi_ioc_packet *)arg;
2941 	int sge_size, error;
2942 	struct megasas_sge *kern_sge;
2943 
2944 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2945 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2946 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2947 
2948 	if (sizeof(bus_addr_t) == 8) {
2949 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2950 		cm->cm_extra_frames = 2;
2951 		sge_size = sizeof(struct mfi_sg64);
2952 	} else {
2953 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2954 		sge_size = sizeof(struct mfi_sg32);
2955 	}
2956 
2957 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2958 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2959 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2960 			1, 0,			/* algnmnt, boundary */
2961 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2962 			BUS_SPACE_MAXADDR,	/* highaddr */
2963 			NULL, NULL,		/* filter, filterarg */
2964 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2965 			2,			/* nsegments */
2966 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2967 			BUS_DMA_ALLOCNOW,	/* flags */
2968 			NULL, NULL,		/* lockfunc, lockarg */
2969 			&sc->mfi_kbuff_arr_dmat[i])) {
2970 			device_printf(sc->mfi_dev,
2971 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2972 			return (ENOMEM);
2973 		}
2974 
2975 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2976 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2977 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2978 			device_printf(sc->mfi_dev,
2979 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2980 			return (ENOMEM);
2981 		}
2982 
2983 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2984 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2985 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2986 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2987 
2988 		if (!sc->kbuff_arr[i]) {
2989 			device_printf(sc->mfi_dev,
2990 			    "Could not allocate memory for kbuff_arr info\n");
2991 			return -1;
2992 		}
2993 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2994 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2995 
2996 		if (sizeof(bus_addr_t) == 8) {
2997 			cm->cm_frame->stp.sgl.sg64[i].addr =
2998 			    kern_sge[i].phys_addr;
2999 			cm->cm_frame->stp.sgl.sg64[i].len =
3000 			    ioc->mfi_sgl[i].iov_len;
3001 		} else {
3002 			cm->cm_frame->stp.sgl.sg32[i].addr =
3003 			    kern_sge[i].phys_addr;
3004 			cm->cm_frame->stp.sgl.sg32[i].len =
3005 			    ioc->mfi_sgl[i].iov_len;
3006 		}
3007 
3008 		error = copyin(ioc->mfi_sgl[i].iov_base,
3009 		    sc->kbuff_arr[i],
3010 		    ioc->mfi_sgl[i].iov_len);
3011 		if (error != 0) {
3012 			device_printf(sc->mfi_dev, "Copy in failed\n");
3013 			return error;
3014 		}
3015 	}
3016 
3017 	cm->cm_flags |=MFI_CMD_MAPPED;
3018 	return 0;
3019 }
3020 
3021 static int
3022 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3023 {
3024 	struct mfi_command *cm;
3025 	struct mfi_dcmd_frame *dcmd;
3026 	void *ioc_buf = NULL;
3027 	uint32_t context;
3028 	int error = 0, locked;
3029 
3030 	if (ioc->buf_size > 0) {
3031 		if (ioc->buf_size > 1024 * 1024)
3032 			return (ENOMEM);
3033 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3034 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3035 		if (error) {
3036 			device_printf(sc->mfi_dev, "failed to copyin\n");
3037 			free(ioc_buf, M_MFIBUF);
3038 			return (error);
3039 		}
3040 	}
3041 
3042 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3043 
3044 	mtx_lock(&sc->mfi_io_lock);
3045 	while ((cm = mfi_dequeue_free(sc)) == NULL)
3046 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3047 
3048 	/* Save context for later */
3049 	context = cm->cm_frame->header.context;
3050 
3051 	dcmd = &cm->cm_frame->dcmd;
3052 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3053 
3054 	cm->cm_sg = &dcmd->sgl;
3055 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3056 	cm->cm_data = ioc_buf;
3057 	cm->cm_len = ioc->buf_size;
3058 
3059 	/* restore context */
3060 	cm->cm_frame->header.context = context;
3061 
3062 	/* Cheat since we don't know if we're writing or reading */
3063 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3064 
3065 	error = mfi_check_command_pre(sc, cm);
3066 	if (error)
3067 		goto out;
3068 
3069 	error = mfi_wait_command(sc, cm);
3070 	if (error) {
3071 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3072 		goto out;
3073 	}
3074 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3075 	mfi_check_command_post(sc, cm);
3076 out:
3077 	mfi_release_command(cm);
3078 	mtx_unlock(&sc->mfi_io_lock);
3079 	mfi_config_unlock(sc, locked);
3080 	if (ioc->buf_size > 0)
3081 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3082 	if (ioc_buf)
3083 		free(ioc_buf, M_MFIBUF);
3084 	return (error);
3085 }
3086 
3087 #define	PTRIN(p)		((void *)(uintptr_t)(p))
3088 
3089 static int
3090 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3091 {
3092 	struct mfi_softc *sc;
3093 	union mfi_statrequest *ms;
3094 	struct mfi_ioc_packet *ioc;
3095 #ifdef COMPAT_FREEBSD32
3096 	struct mfi_ioc_packet32 *ioc32;
3097 #endif
3098 	struct mfi_ioc_aen *aen;
3099 	struct mfi_command *cm = NULL;
3100 	uint32_t context = 0;
3101 	union mfi_sense_ptr sense_ptr;
3102 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3103 	size_t len;
3104 	int i, res;
3105 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3106 #ifdef COMPAT_FREEBSD32
3107 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3108 	struct mfi_ioc_passthru iop_swab;
3109 #endif
3110 	int error, locked;
3111 	sc = dev->si_drv1;
3112 	error = 0;
3113 
3114 	if (sc->adpreset)
3115 		return EBUSY;
3116 
3117 	if (sc->hw_crit_error)
3118 		return EBUSY;
3119 
3120 	if (sc->issuepend_done == 0)
3121 		return EBUSY;
3122 
3123 	switch (cmd) {
3124 	case MFIIO_STATS:
3125 		ms = (union mfi_statrequest *)arg;
3126 		switch (ms->ms_item) {
3127 		case MFIQ_FREE:
3128 		case MFIQ_BIO:
3129 		case MFIQ_READY:
3130 		case MFIQ_BUSY:
3131 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3132 			    sizeof(struct mfi_qstat));
3133 			break;
3134 		default:
3135 			error = ENOIOCTL;
3136 			break;
3137 		}
3138 		break;
3139 	case MFIIO_QUERY_DISK:
3140 	{
3141 		struct mfi_query_disk *qd;
3142 		struct mfi_disk *ld;
3143 
3144 		qd = (struct mfi_query_disk *)arg;
3145 		mtx_lock(&sc->mfi_io_lock);
3146 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3147 			if (ld->ld_id == qd->array_id)
3148 				break;
3149 		}
3150 		if (ld == NULL) {
3151 			qd->present = 0;
3152 			mtx_unlock(&sc->mfi_io_lock);
3153 			return (0);
3154 		}
3155 		qd->present = 1;
3156 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3157 			qd->open = 1;
3158 		bzero(qd->devname, SPECNAMELEN + 1);
3159 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3160 		mtx_unlock(&sc->mfi_io_lock);
3161 		break;
3162 	}
3163 	case MFI_CMD:
3164 #ifdef COMPAT_FREEBSD32
3165 	case MFI_CMD32:
3166 #endif
3167 		{
3168 		devclass_t devclass;
3169 		ioc = (struct mfi_ioc_packet *)arg;
3170 		int adapter;
3171 
3172 		adapter = ioc->mfi_adapter_no;
3173 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3174 			devclass = devclass_find("mfi");
3175 			sc = devclass_get_softc(devclass, adapter);
3176 		}
3177 		mtx_lock(&sc->mfi_io_lock);
3178 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3179 			mtx_unlock(&sc->mfi_io_lock);
3180 			return (EBUSY);
3181 		}
3182 		mtx_unlock(&sc->mfi_io_lock);
3183 		locked = 0;
3184 
3185 		/*
3186 		 * save off original context since copying from user
3187 		 * will clobber some data
3188 		 */
3189 		context = cm->cm_frame->header.context;
3190 		cm->cm_frame->header.context = cm->cm_index;
3191 
3192 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3193 		    2 * MEGAMFI_FRAME_SIZE);
3194 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3195 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3196 		cm->cm_frame->header.scsi_status = 0;
3197 		cm->cm_frame->header.pad0 = 0;
3198 		if (ioc->mfi_sge_count) {
3199 			cm->cm_sg =
3200 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3201 		}
3202 		cm->cm_flags = 0;
3203 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3204 			cm->cm_flags |= MFI_CMD_DATAIN;
3205 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3206 			cm->cm_flags |= MFI_CMD_DATAOUT;
3207 		/* Legacy app shim */
3208 		if (cm->cm_flags == 0)
3209 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3210 		cm->cm_len = cm->cm_frame->header.data_len;
3211 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3212 #ifdef COMPAT_FREEBSD32
3213 			if (cmd == MFI_CMD) {
3214 #endif
3215 				/* Native */
3216 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3217 #ifdef COMPAT_FREEBSD32
3218 			} else {
3219 				/* 32bit on 64bit */
3220 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3221 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3222 			}
3223 #endif
3224 			cm->cm_len += cm->cm_stp_len;
3225 		}
3226 		if (cm->cm_len &&
3227 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3228 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3229 			    M_WAITOK | M_ZERO);
3230 		} else {
3231 			cm->cm_data = 0;
3232 		}
3233 
3234 		/* restore header context */
3235 		cm->cm_frame->header.context = context;
3236 
3237 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3238 			res = mfi_stp_cmd(sc, cm, arg);
3239 			if (res != 0)
3240 				goto out;
3241 		} else {
3242 			temp = data;
3243 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3244 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3245 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3246 #ifdef COMPAT_FREEBSD32
3247 					if (cmd == MFI_CMD) {
3248 #endif
3249 						/* Native */
3250 						addr = ioc->mfi_sgl[i].iov_base;
3251 						len = ioc->mfi_sgl[i].iov_len;
3252 #ifdef COMPAT_FREEBSD32
3253 					} else {
3254 						/* 32bit on 64bit */
3255 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3256 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3257 						len = ioc32->mfi_sgl[i].iov_len;
3258 					}
3259 #endif
3260 					error = copyin(addr, temp, len);
3261 					if (error != 0) {
3262 						device_printf(sc->mfi_dev,
3263 						    "Copy in failed\n");
3264 						goto out;
3265 					}
3266 					temp = &temp[len];
3267 				}
3268 			}
3269 		}
3270 
3271 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3272 			locked = mfi_config_lock(sc,
3273 			     cm->cm_frame->dcmd.opcode);
3274 
3275 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3276 			cm->cm_frame->pass.sense_addr_lo =
3277 			    (uint32_t)cm->cm_sense_busaddr;
3278 			cm->cm_frame->pass.sense_addr_hi =
3279 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3280 		}
3281 		mtx_lock(&sc->mfi_io_lock);
3282 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3283 		if (!skip_pre_post) {
3284 			error = mfi_check_command_pre(sc, cm);
3285 			if (error) {
3286 				mtx_unlock(&sc->mfi_io_lock);
3287 				goto out;
3288 			}
3289 		}
3290 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3291 			device_printf(sc->mfi_dev,
3292 			    "Controller polled failed\n");
3293 			mtx_unlock(&sc->mfi_io_lock);
3294 			goto out;
3295 		}
3296 		if (!skip_pre_post) {
3297 			mfi_check_command_post(sc, cm);
3298 		}
3299 		mtx_unlock(&sc->mfi_io_lock);
3300 
3301 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3302 			temp = data;
3303 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3304 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3305 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3306 #ifdef COMPAT_FREEBSD32
3307 					if (cmd == MFI_CMD) {
3308 #endif
3309 						/* Native */
3310 						addr = ioc->mfi_sgl[i].iov_base;
3311 						len = ioc->mfi_sgl[i].iov_len;
3312 #ifdef COMPAT_FREEBSD32
3313 					} else {
3314 						/* 32bit on 64bit */
3315 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3316 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3317 						len = ioc32->mfi_sgl[i].iov_len;
3318 					}
3319 #endif
3320 					error = copyout(temp, addr, len);
3321 					if (error != 0) {
3322 						device_printf(sc->mfi_dev,
3323 						    "Copy out failed\n");
3324 						goto out;
3325 					}
3326 					temp = &temp[len];
3327 				}
3328 			}
3329 		}
3330 
3331 		if (ioc->mfi_sense_len) {
3332 			/* get user-space sense ptr then copy out sense */
3333 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3334 			    &sense_ptr.sense_ptr_data[0],
3335 			    sizeof(sense_ptr.sense_ptr_data));
3336 #ifdef COMPAT_FREEBSD32
3337 			if (cmd != MFI_CMD) {
3338 				/*
3339 				 * not 64bit native so zero out any address
3340 				 * over 32bit */
3341 				sense_ptr.addr.high = 0;
3342 			}
3343 #endif
3344 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3345 			    ioc->mfi_sense_len);
3346 			if (error != 0) {
3347 				device_printf(sc->mfi_dev,
3348 				    "Copy out failed\n");
3349 				goto out;
3350 			}
3351 		}
3352 
3353 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3354 out:
3355 		mfi_config_unlock(sc, locked);
3356 		if (data)
3357 			free(data, M_MFIBUF);
3358 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3359 			for (i = 0; i < 2; i++) {
3360 				if (sc->kbuff_arr[i]) {
3361 					if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3362 						bus_dmamap_unload(
3363 						    sc->mfi_kbuff_arr_dmat[i],
3364 						    sc->mfi_kbuff_arr_dmamap[i]
3365 						    );
3366 					if (sc->kbuff_arr[i] != NULL)
3367 						bus_dmamem_free(
3368 						    sc->mfi_kbuff_arr_dmat[i],
3369 						    sc->kbuff_arr[i],
3370 						    sc->mfi_kbuff_arr_dmamap[i]
3371 						    );
3372 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3373 						bus_dma_tag_destroy(
3374 						    sc->mfi_kbuff_arr_dmat[i]);
3375 				}
3376 			}
3377 		}
3378 		if (cm) {
3379 			mtx_lock(&sc->mfi_io_lock);
3380 			mfi_release_command(cm);
3381 			mtx_unlock(&sc->mfi_io_lock);
3382 		}
3383 
3384 		break;
3385 		}
3386 	case MFI_SET_AEN:
3387 		aen = (struct mfi_ioc_aen *)arg;
3388 		mtx_lock(&sc->mfi_io_lock);
3389 		error = mfi_aen_register(sc, aen->aen_seq_num,
3390 		    aen->aen_class_locale);
3391 		mtx_unlock(&sc->mfi_io_lock);
3392 
3393 		break;
3394 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3395 		{
3396 			devclass_t devclass;
3397 			struct mfi_linux_ioc_packet l_ioc;
3398 			int adapter;
3399 
3400 			devclass = devclass_find("mfi");
3401 			if (devclass == NULL)
3402 				return (ENOENT);
3403 
3404 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3405 			if (error)
3406 				return (error);
3407 			adapter = l_ioc.lioc_adapter_no;
3408 			sc = devclass_get_softc(devclass, adapter);
3409 			if (sc == NULL)
3410 				return (ENOENT);
3411 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3412 			    cmd, arg, flag, td));
3413 			break;
3414 		}
3415 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3416 		{
3417 			devclass_t devclass;
3418 			struct mfi_linux_ioc_aen l_aen;
3419 			int adapter;
3420 
3421 			devclass = devclass_find("mfi");
3422 			if (devclass == NULL)
3423 				return (ENOENT);
3424 
3425 			error = copyin(arg, &l_aen, sizeof(l_aen));
3426 			if (error)
3427 				return (error);
3428 			adapter = l_aen.laen_adapter_no;
3429 			sc = devclass_get_softc(devclass, adapter);
3430 			if (sc == NULL)
3431 				return (ENOENT);
3432 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3433 			    cmd, arg, flag, td));
3434 			break;
3435 		}
3436 #ifdef COMPAT_FREEBSD32
3437 	case MFIIO_PASSTHRU32:
3438 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3439 			error = ENOTTY;
3440 			break;
3441 		}
3442 		iop_swab.ioc_frame	= iop32->ioc_frame;
3443 		iop_swab.buf_size	= iop32->buf_size;
3444 		iop_swab.buf		= PTRIN(iop32->buf);
3445 		iop			= &iop_swab;
3446 		/* FALLTHROUGH */
3447 #endif
3448 	case MFIIO_PASSTHRU:
3449 		error = mfi_user_command(sc, iop);
3450 #ifdef COMPAT_FREEBSD32
3451 		if (cmd == MFIIO_PASSTHRU32)
3452 			iop32->ioc_frame = iop_swab.ioc_frame;
3453 #endif
3454 		break;
3455 	default:
3456 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3457 		error = ENOTTY;
3458 		break;
3459 	}
3460 
3461 	return (error);
3462 }
3463 
3464 static int
3465 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3466 {
3467 	struct mfi_softc *sc;
3468 	struct mfi_linux_ioc_packet l_ioc;
3469 	struct mfi_linux_ioc_aen l_aen;
3470 	struct mfi_command *cm = NULL;
3471 	struct mfi_aen *mfi_aen_entry;
3472 	union mfi_sense_ptr sense_ptr;
3473 	uint32_t context = 0;
3474 	uint8_t *data = NULL, *temp;
3475 	int i;
3476 	int error, locked;
3477 
3478 	sc = dev->si_drv1;
3479 	error = 0;
3480 	switch (cmd) {
3481 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3482 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3483 		if (error != 0)
3484 			return (error);
3485 
3486 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3487 			return (EINVAL);
3488 		}
3489 
3490 		mtx_lock(&sc->mfi_io_lock);
3491 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3492 			mtx_unlock(&sc->mfi_io_lock);
3493 			return (EBUSY);
3494 		}
3495 		mtx_unlock(&sc->mfi_io_lock);
3496 		locked = 0;
3497 
3498 		/*
3499 		 * save off original context since copying from user
3500 		 * will clobber some data
3501 		 */
3502 		context = cm->cm_frame->header.context;
3503 
3504 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3505 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3506 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3507 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3508 		cm->cm_frame->header.scsi_status = 0;
3509 		cm->cm_frame->header.pad0 = 0;
3510 		if (l_ioc.lioc_sge_count)
3511 			cm->cm_sg =
3512 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3513 		cm->cm_flags = 0;
3514 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3515 			cm->cm_flags |= MFI_CMD_DATAIN;
3516 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3517 			cm->cm_flags |= MFI_CMD_DATAOUT;
3518 		cm->cm_len = cm->cm_frame->header.data_len;
3519 		if (cm->cm_len &&
3520 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3521 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3522 			    M_WAITOK | M_ZERO);
3523 		} else {
3524 			cm->cm_data = 0;
3525 		}
3526 
3527 		/* restore header context */
3528 		cm->cm_frame->header.context = context;
3529 
3530 		temp = data;
3531 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3532 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3533 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3534 				       temp,
3535 				       l_ioc.lioc_sgl[i].iov_len);
3536 				if (error != 0) {
3537 					device_printf(sc->mfi_dev,
3538 					    "Copy in failed\n");
3539 					goto out;
3540 				}
3541 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3542 			}
3543 		}
3544 
3545 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3546 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3547 
3548 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3549 			cm->cm_frame->pass.sense_addr_lo =
3550 			    (uint32_t)cm->cm_sense_busaddr;
3551 			cm->cm_frame->pass.sense_addr_hi =
3552 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3553 		}
3554 
3555 		mtx_lock(&sc->mfi_io_lock);
3556 		error = mfi_check_command_pre(sc, cm);
3557 		if (error) {
3558 			mtx_unlock(&sc->mfi_io_lock);
3559 			goto out;
3560 		}
3561 
3562 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3563 			device_printf(sc->mfi_dev,
3564 			    "Controller polled failed\n");
3565 			mtx_unlock(&sc->mfi_io_lock);
3566 			goto out;
3567 		}
3568 
3569 		mfi_check_command_post(sc, cm);
3570 		mtx_unlock(&sc->mfi_io_lock);
3571 
3572 		temp = data;
3573 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3574 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3575 				error = copyout(temp,
3576 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3577 					l_ioc.lioc_sgl[i].iov_len);
3578 				if (error != 0) {
3579 					device_printf(sc->mfi_dev,
3580 					    "Copy out failed\n");
3581 					goto out;
3582 				}
3583 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3584 			}
3585 		}
3586 
3587 		if (l_ioc.lioc_sense_len) {
3588 			/* get user-space sense ptr then copy out sense */
3589 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3590                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3591 			    &sense_ptr.sense_ptr_data[0],
3592 			    sizeof(sense_ptr.sense_ptr_data));
3593 #ifdef __amd64__
3594 			/*
3595 			 * only 32bit Linux support so zero out any
3596 			 * address over 32bit
3597 			 */
3598 			sense_ptr.addr.high = 0;
3599 #endif
3600 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3601 			    l_ioc.lioc_sense_len);
3602 			if (error != 0) {
3603 				device_printf(sc->mfi_dev,
3604 				    "Copy out failed\n");
3605 				goto out;
3606 			}
3607 		}
3608 
3609 		error = copyout(&cm->cm_frame->header.cmd_status,
3610 			&((struct mfi_linux_ioc_packet*)arg)
3611 			->lioc_frame.hdr.cmd_status,
3612 			1);
3613 		if (error != 0) {
3614 			device_printf(sc->mfi_dev,
3615 				      "Copy out failed\n");
3616 			goto out;
3617 		}
3618 
3619 out:
3620 		mfi_config_unlock(sc, locked);
3621 		if (data)
3622 			free(data, M_MFIBUF);
3623 		if (cm) {
3624 			mtx_lock(&sc->mfi_io_lock);
3625 			mfi_release_command(cm);
3626 			mtx_unlock(&sc->mfi_io_lock);
3627 		}
3628 
3629 		return (error);
3630 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3631 		error = copyin(arg, &l_aen, sizeof(l_aen));
3632 		if (error != 0)
3633 			return (error);
3634 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3635 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3636 		    M_WAITOK);
3637 		mtx_lock(&sc->mfi_io_lock);
3638 		if (mfi_aen_entry != NULL) {
3639 			mfi_aen_entry->p = curproc;
3640 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3641 			    aen_link);
3642 		}
3643 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3644 		    l_aen.laen_class_locale);
3645 
3646 		if (error != 0) {
3647 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3648 			    aen_link);
3649 			free(mfi_aen_entry, M_MFIBUF);
3650 		}
3651 		mtx_unlock(&sc->mfi_io_lock);
3652 
3653 		return (error);
3654 	default:
3655 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3656 		error = ENOENT;
3657 		break;
3658 	}
3659 
3660 	return (error);
3661 }
3662 
3663 static int
3664 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3665 {
3666 	struct mfi_softc *sc;
3667 	int revents = 0;
3668 
3669 	sc = dev->si_drv1;
3670 
3671 	if (poll_events & (POLLIN | POLLRDNORM)) {
3672 		if (sc->mfi_aen_triggered != 0) {
3673 			revents |= poll_events & (POLLIN | POLLRDNORM);
3674 			sc->mfi_aen_triggered = 0;
3675 		}
3676 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3677 			revents |= POLLERR;
3678 		}
3679 	}
3680 
3681 	if (revents == 0) {
3682 		if (poll_events & (POLLIN | POLLRDNORM)) {
3683 			sc->mfi_poll_waiting = 1;
3684 			selrecord(td, &sc->mfi_select);
3685 		}
3686 	}
3687 
3688 	return revents;
3689 }
3690 
3691 static void
3692 mfi_dump_all(void)
3693 {
3694 	struct mfi_softc *sc;
3695 	struct mfi_command *cm;
3696 	devclass_t dc;
3697 	time_t deadline;
3698 	int timedout;
3699 	int i;
3700 
3701 	dc = devclass_find("mfi");
3702 	if (dc == NULL) {
3703 		printf("No mfi dev class\n");
3704 		return;
3705 	}
3706 
3707 	for (i = 0; ; i++) {
3708 		sc = devclass_get_softc(dc, i);
3709 		if (sc == NULL)
3710 			break;
3711 		device_printf(sc->mfi_dev, "Dumping\n\n");
3712 		timedout = 0;
3713 		deadline = time_uptime - mfi_cmd_timeout;
3714 		mtx_lock(&sc->mfi_io_lock);
3715 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3716 			if (cm->cm_timestamp <= deadline) {
3717 				device_printf(sc->mfi_dev,
3718 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3719 				    cm, (int)(time_uptime - cm->cm_timestamp));
3720 				MFI_PRINT_CMD(cm);
3721 				timedout++;
3722 			}
3723 		}
3724 
3725 #if 0
3726 		if (timedout)
3727 			MFI_DUMP_CMDS(sc);
3728 #endif
3729 
3730 		mtx_unlock(&sc->mfi_io_lock);
3731 	}
3732 
3733 	return;
3734 }
3735 
3736 static void
3737 mfi_timeout(void *data)
3738 {
3739 	struct mfi_softc *sc = (struct mfi_softc *)data;
3740 	struct mfi_command *cm, *tmp;
3741 	time_t deadline;
3742 	int timedout = 0;
3743 
3744 	deadline = time_uptime - mfi_cmd_timeout;
3745 	if (sc->adpreset == 0) {
3746 		if (!mfi_tbolt_reset(sc)) {
3747 			callout_reset(&sc->mfi_watchdog_callout,
3748 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3749 			return;
3750 		}
3751 	}
3752 	mtx_lock(&sc->mfi_io_lock);
3753 	TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3754 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3755 			continue;
3756 		if (cm->cm_timestamp <= deadline) {
3757 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3758 				cm->cm_timestamp = time_uptime;
3759 			} else {
3760 				device_printf(sc->mfi_dev,
3761 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3762 				     cm, (int)(time_uptime - cm->cm_timestamp)
3763 				     );
3764 				MFI_PRINT_CMD(cm);
3765 				MFI_VALIDATE_CMD(sc, cm);
3766 				/*
3767 				 * While commands can get stuck forever we do
3768 				 * not fail them as there is no way to tell if
3769 				 * the controller has actually processed them
3770 				 * or not.
3771 				 *
3772 				 * In addition its very likely that force
3773 				 * failing a command here would cause a panic
3774 				 * e.g. in UFS.
3775 				 */
3776 				timedout++;
3777 			}
3778 		}
3779 	}
3780 
3781 #if 0
3782 	if (timedout)
3783 		MFI_DUMP_CMDS(sc);
3784 #endif
3785 
3786 	mtx_unlock(&sc->mfi_io_lock);
3787 
3788 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3789 	    mfi_timeout, sc);
3790 
3791 	if (0)
3792 		mfi_dump_all();
3793 	return;
3794 }
3795