xref: /freebsd/sys/dev/mfi/mfi.c (revision f0cfa1b168014f56c02b83e5f28412cc5f78d117)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause
3  *
4  * Copyright (c) 2006 IronPort Systems
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2007 LSI Corp.
30  * Copyright (c) 2007 Rajesh Prabhakaran.
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52  * SUCH DAMAGE.
53  */
54 
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
57 
58 #include "opt_compat.h"
59 #include "opt_mfi.h"
60 
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/sysctl.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/poll.h>
67 #include <sys/selinfo.h>
68 #include <sys/bus.h>
69 #include <sys/conf.h>
70 #include <sys/eventhandler.h>
71 #include <sys/rman.h>
72 #include <sys/bio.h>
73 #include <sys/ioccom.h>
74 #include <sys/uio.h>
75 #include <sys/proc.h>
76 #include <sys/signalvar.h>
77 #include <sys/sysent.h>
78 #include <sys/taskqueue.h>
79 
80 #include <machine/bus.h>
81 #include <machine/resource.h>
82 
83 #include <dev/mfi/mfireg.h>
84 #include <dev/mfi/mfi_ioctl.h>
85 #include <dev/mfi/mfivar.h>
86 #include <sys/interrupt.h>
87 #include <sys/priority.h>
88 
89 static int	mfi_alloc_commands(struct mfi_softc *);
90 static int	mfi_comms_init(struct mfi_softc *);
91 static int	mfi_get_controller_info(struct mfi_softc *);
92 static int	mfi_get_log_state(struct mfi_softc *,
93 		    struct mfi_evt_log_state **);
94 static int	mfi_parse_entries(struct mfi_softc *, int, int);
95 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
96 static void	mfi_startup(void *arg);
97 static void	mfi_intr(void *arg);
98 static void	mfi_ldprobe(struct mfi_softc *sc);
99 static void	mfi_syspdprobe(struct mfi_softc *sc);
100 static void	mfi_handle_evt(void *context, int pending);
101 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
102 static void	mfi_aen_complete(struct mfi_command *);
103 static int	mfi_add_ld(struct mfi_softc *sc, int);
104 static void	mfi_add_ld_complete(struct mfi_command *);
105 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
106 static void	mfi_add_sys_pd_complete(struct mfi_command *);
107 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
108 static void	mfi_bio_complete(struct mfi_command *);
109 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
110 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
111 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int	mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
113 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
114 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
115 static void	mfi_timeout(void *);
116 static int	mfi_user_command(struct mfi_softc *,
117 		    struct mfi_ioc_passthru *);
118 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
119 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
120 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
121 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
122 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
123 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
124 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
125 		    uint32_t frame_cnt);
126 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
127 		    uint32_t frame_cnt);
128 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
129 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
130 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
131 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
132 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
133 
134 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
135 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
137            0, "event message locale");
138 
139 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
140 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
141            0, "event message class");
142 
143 static int	mfi_max_cmds = 128;
144 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
145 	   0, "Max commands limit (-1 = controller limit)");
146 
147 static int	mfi_detect_jbod_change = 1;
148 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
149 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
150 
151 int		mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
152 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
153 	   &mfi_polled_cmd_timeout, 0,
154 	   "Polled command timeout - used for firmware flash etc (in seconds)");
155 
156 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
157 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
158 	   0, "Command timeout (in seconds)");
159 
160 /* Management interface */
161 static d_open_t		mfi_open;
162 static d_close_t	mfi_close;
163 static d_ioctl_t	mfi_ioctl;
164 static d_poll_t		mfi_poll;
165 
166 static struct cdevsw mfi_cdevsw = {
167 	.d_version = 	D_VERSION,
168 	.d_flags =	0,
169 	.d_open = 	mfi_open,
170 	.d_close =	mfi_close,
171 	.d_ioctl =	mfi_ioctl,
172 	.d_poll =	mfi_poll,
173 	.d_name =	"mfi",
174 };
175 
176 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
177 
178 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
179 struct mfi_skinny_dma_info mfi_skinny;
180 
181 static void
182 mfi_enable_intr_xscale(struct mfi_softc *sc)
183 {
184 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
185 }
186 
187 static void
188 mfi_enable_intr_ppc(struct mfi_softc *sc)
189 {
190 	if (sc->mfi_flags & MFI_FLAGS_1078) {
191 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
192 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
193 	}
194 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
195 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
196 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
197 	}
198 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
200 	}
201 }
202 
203 static int32_t
204 mfi_read_fw_status_xscale(struct mfi_softc *sc)
205 {
206 	return MFI_READ4(sc, MFI_OMSG0);
207 }
208 
209 static int32_t
210 mfi_read_fw_status_ppc(struct mfi_softc *sc)
211 {
212 	return MFI_READ4(sc, MFI_OSP0);
213 }
214 
215 static int
216 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
217 {
218 	int32_t status;
219 
220 	status = MFI_READ4(sc, MFI_OSTS);
221 	if ((status & MFI_OSTS_INTR_VALID) == 0)
222 		return 1;
223 
224 	MFI_WRITE4(sc, MFI_OSTS, status);
225 	return 0;
226 }
227 
228 static int
229 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
230 {
231 	int32_t status;
232 
233 	status = MFI_READ4(sc, MFI_OSTS);
234 	if (sc->mfi_flags & MFI_FLAGS_1078) {
235 		if (!(status & MFI_1078_RM)) {
236 			return 1;
237 		}
238 	}
239 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
240 		if (!(status & MFI_GEN2_RM)) {
241 			return 1;
242 		}
243 	}
244 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
245 		if (!(status & MFI_SKINNY_RM)) {
246 			return 1;
247 		}
248 	}
249 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
250 		MFI_WRITE4(sc, MFI_OSTS, status);
251 	else
252 		MFI_WRITE4(sc, MFI_ODCR0, status);
253 	return 0;
254 }
255 
256 static void
257 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
258 {
259 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
260 }
261 
262 static void
263 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
264 {
265 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
266 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
267 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
268 	} else {
269 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
270 	}
271 }
272 
273 int
274 mfi_transition_firmware(struct mfi_softc *sc)
275 {
276 	uint32_t fw_state, cur_state;
277 	int max_wait, i;
278 	uint32_t cur_abs_reg_val = 0;
279 	uint32_t prev_abs_reg_val = 0;
280 
281 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
282 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
283 	while (fw_state != MFI_FWSTATE_READY) {
284 		if (bootverbose)
285 			device_printf(sc->mfi_dev, "Waiting for firmware to "
286 			"become ready\n");
287 		cur_state = fw_state;
288 		switch (fw_state) {
289 		case MFI_FWSTATE_FAULT:
290 			device_printf(sc->mfi_dev, "Firmware fault\n");
291 			return (ENXIO);
292 		case MFI_FWSTATE_WAIT_HANDSHAKE:
293 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
294 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
295 			else
296 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
297 			max_wait = MFI_RESET_WAIT_TIME;
298 			break;
299 		case MFI_FWSTATE_OPERATIONAL:
300 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
301 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
302 			else
303 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
304 			max_wait = MFI_RESET_WAIT_TIME;
305 			break;
306 		case MFI_FWSTATE_UNDEFINED:
307 		case MFI_FWSTATE_BB_INIT:
308 			max_wait = MFI_RESET_WAIT_TIME;
309 			break;
310 		case MFI_FWSTATE_FW_INIT_2:
311 			max_wait = MFI_RESET_WAIT_TIME;
312 			break;
313 		case MFI_FWSTATE_FW_INIT:
314 		case MFI_FWSTATE_FLUSH_CACHE:
315 			max_wait = MFI_RESET_WAIT_TIME;
316 			break;
317 		case MFI_FWSTATE_DEVICE_SCAN:
318 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
319 			prev_abs_reg_val = cur_abs_reg_val;
320 			break;
321 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
322 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
323 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
324 			else
325 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
326 			max_wait = MFI_RESET_WAIT_TIME;
327 			break;
328 		default:
329 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
330 			    fw_state);
331 			return (ENXIO);
332 		}
333 		for (i = 0; i < (max_wait * 10); i++) {
334 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
335 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
336 			if (fw_state == cur_state)
337 				DELAY(100000);
338 			else
339 				break;
340 		}
341 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
342 			/* Check the device scanning progress */
343 			if (prev_abs_reg_val != cur_abs_reg_val) {
344 				continue;
345 			}
346 		}
347 		if (fw_state == cur_state) {
348 			device_printf(sc->mfi_dev, "Firmware stuck in state "
349 			    "%#x\n", fw_state);
350 			return (ENXIO);
351 		}
352 	}
353 	return (0);
354 }
355 
356 static void
357 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
358 {
359 	bus_addr_t *addr;
360 
361 	addr = arg;
362 	*addr = segs[0].ds_addr;
363 }
364 
365 
366 int
367 mfi_attach(struct mfi_softc *sc)
368 {
369 	uint32_t status;
370 	int error, commsz, framessz, sensesz;
371 	int frames, unit, max_fw_sge, max_fw_cmds;
372 	uint32_t tb_mem_size = 0;
373 	struct cdev *dev_t;
374 
375 	if (sc == NULL)
376 		return EINVAL;
377 
378 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
379 	    MEGASAS_VERSION);
380 
381 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
382 	sx_init(&sc->mfi_config_lock, "MFI config");
383 	TAILQ_INIT(&sc->mfi_ld_tqh);
384 	TAILQ_INIT(&sc->mfi_syspd_tqh);
385 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
386 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
387 	TAILQ_INIT(&sc->mfi_evt_queue);
388 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
389 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
390 	TAILQ_INIT(&sc->mfi_aen_pids);
391 	TAILQ_INIT(&sc->mfi_cam_ccbq);
392 
393 	mfi_initq_free(sc);
394 	mfi_initq_ready(sc);
395 	mfi_initq_busy(sc);
396 	mfi_initq_bio(sc);
397 
398 	sc->adpreset = 0;
399 	sc->last_seq_num = 0;
400 	sc->disableOnlineCtrlReset = 1;
401 	sc->issuepend_done = 1;
402 	sc->hw_crit_error = 0;
403 
404 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
405 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
406 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
407 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
408 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
409 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
410 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
411 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
412 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
413 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
414 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
415 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
416 		sc->mfi_tbolt = 1;
417 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
418 	} else {
419 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
420 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
421 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
422 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
423 	}
424 
425 
426 	/* Before we get too far, see if the firmware is working */
427 	if ((error = mfi_transition_firmware(sc)) != 0) {
428 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
429 		    "error %d\n", error);
430 		return (ENXIO);
431 	}
432 
433 	/* Start: LSIP200113393 */
434 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
435 				1, 0,			/* algnmnt, boundary */
436 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
437 				BUS_SPACE_MAXADDR,	/* highaddr */
438 				NULL, NULL,		/* filter, filterarg */
439 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
440 				1,			/* msegments */
441 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
442 				0,			/* flags */
443 				NULL, NULL,		/* lockfunc, lockarg */
444 				&sc->verbuf_h_dmat)) {
445 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
446 		return (ENOMEM);
447 	}
448 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
449 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
450 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
451 		return (ENOMEM);
452 	}
453 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
454 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
455 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
456 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
457 	/* End: LSIP200113393 */
458 
459 	/*
460 	 * Get information needed for sizing the contiguous memory for the
461 	 * frame pool.  Size down the sgl parameter since we know that
462 	 * we will never need more than what's required for MAXPHYS.
463 	 * It would be nice if these constants were available at runtime
464 	 * instead of compile time.
465 	 */
466 	status = sc->mfi_read_fw_status(sc);
467 	max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
468 	if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
469 		device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
470 		    max_fw_cmds, mfi_max_cmds);
471 		sc->mfi_max_fw_cmds = mfi_max_cmds;
472 	} else {
473 		sc->mfi_max_fw_cmds = max_fw_cmds;
474 	}
475 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
476 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
477 
478 	/* ThunderBolt Support get the contiguous memory */
479 
480 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
481 		mfi_tbolt_init_globals(sc);
482 		device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
483 		    "MaxSgl = %d, state = %#x\n", max_fw_cmds,
484 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
485 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
486 
487 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
488 				1, 0,			/* algnmnt, boundary */
489 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
490 				BUS_SPACE_MAXADDR,	/* highaddr */
491 				NULL, NULL,		/* filter, filterarg */
492 				tb_mem_size,		/* maxsize */
493 				1,			/* msegments */
494 				tb_mem_size,		/* maxsegsize */
495 				0,			/* flags */
496 				NULL, NULL,		/* lockfunc, lockarg */
497 				&sc->mfi_tb_dmat)) {
498 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
499 			return (ENOMEM);
500 		}
501 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
502 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
503 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
504 			return (ENOMEM);
505 		}
506 		bzero(sc->request_message_pool, tb_mem_size);
507 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
508 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
509 
510 		/* For ThunderBolt memory init */
511 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
512 				0x100, 0,		/* alignmnt, boundary */
513 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
514 				BUS_SPACE_MAXADDR,	/* highaddr */
515 				NULL, NULL,		/* filter, filterarg */
516 				MFI_FRAME_SIZE,		/* maxsize */
517 				1,			/* msegments */
518 				MFI_FRAME_SIZE,		/* maxsegsize */
519 				0,			/* flags */
520 				NULL, NULL,		/* lockfunc, lockarg */
521 				&sc->mfi_tb_init_dmat)) {
522 			device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
523 			return (ENOMEM);
524 		}
525 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
526 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
527 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
528 			return (ENOMEM);
529 		}
530 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
531 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
532 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
533 		    &sc->mfi_tb_init_busaddr, 0);
534 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
535 		    tb_mem_size)) {
536 			device_printf(sc->mfi_dev,
537 			    "Thunderbolt pool preparation error\n");
538 			return 0;
539 		}
540 
541 		/*
542 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
543 		  we are taking it different from what we have allocated for Request
544 		  and reply descriptors to avoid confusion later
545 		*/
546 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
547 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
548 				1, 0,			/* algnmnt, boundary */
549 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
550 				BUS_SPACE_MAXADDR,	/* highaddr */
551 				NULL, NULL,		/* filter, filterarg */
552 				tb_mem_size,		/* maxsize */
553 				1,			/* msegments */
554 				tb_mem_size,		/* maxsegsize */
555 				0,			/* flags */
556 				NULL, NULL,		/* lockfunc, lockarg */
557 				&sc->mfi_tb_ioc_init_dmat)) {
558 			device_printf(sc->mfi_dev,
559 			    "Cannot allocate comms DMA tag\n");
560 			return (ENOMEM);
561 		}
562 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
563 		    (void **)&sc->mfi_tb_ioc_init_desc,
564 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
565 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
566 			return (ENOMEM);
567 		}
568 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
569 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
570 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
571 		    &sc->mfi_tb_ioc_init_busaddr, 0);
572 	}
573 	/*
574 	 * Create the dma tag for data buffers.  Used both for block I/O
575 	 * and for various internal data queries.
576 	 */
577 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
578 				1, 0,			/* algnmnt, boundary */
579 				BUS_SPACE_MAXADDR,	/* lowaddr */
580 				BUS_SPACE_MAXADDR,	/* highaddr */
581 				NULL, NULL,		/* filter, filterarg */
582 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
583 				sc->mfi_max_sge,	/* nsegments */
584 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
585 				BUS_DMA_ALLOCNOW,	/* flags */
586 				busdma_lock_mutex,	/* lockfunc */
587 				&sc->mfi_io_lock,	/* lockfuncarg */
588 				&sc->mfi_buffer_dmat)) {
589 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
590 		return (ENOMEM);
591 	}
592 
593 	/*
594 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
595 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
596 	 * entry, so the calculated size here will be will be 1 more than
597 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
598 	 */
599 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
600 	    sizeof(struct mfi_hwcomms);
601 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
602 				1, 0,			/* algnmnt, boundary */
603 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
604 				BUS_SPACE_MAXADDR,	/* highaddr */
605 				NULL, NULL,		/* filter, filterarg */
606 				commsz,			/* maxsize */
607 				1,			/* msegments */
608 				commsz,			/* maxsegsize */
609 				0,			/* flags */
610 				NULL, NULL,		/* lockfunc, lockarg */
611 				&sc->mfi_comms_dmat)) {
612 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
613 		return (ENOMEM);
614 	}
615 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
616 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
617 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
618 		return (ENOMEM);
619 	}
620 	bzero(sc->mfi_comms, commsz);
621 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
622 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
623 	/*
624 	 * Allocate DMA memory for the command frames.  Keep them in the
625 	 * lower 4GB for efficiency.  Calculate the size of the commands at
626 	 * the same time; each command is one 64 byte frame plus a set of
627          * additional frames for holding sg lists or other data.
628 	 * The assumption here is that the SG list will start at the second
629 	 * frame and not use the unused bytes in the first frame.  While this
630 	 * isn't technically correct, it simplifies the calculation and allows
631 	 * for command frames that might be larger than an mfi_io_frame.
632 	 */
633 	if (sizeof(bus_addr_t) == 8) {
634 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
635 		sc->mfi_flags |= MFI_FLAGS_SG64;
636 	} else {
637 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
638 	}
639 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
640 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
641 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
642 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
643 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
644 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
645 				64, 0,			/* algnmnt, boundary */
646 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
647 				BUS_SPACE_MAXADDR,	/* highaddr */
648 				NULL, NULL,		/* filter, filterarg */
649 				framessz,		/* maxsize */
650 				1,			/* nsegments */
651 				framessz,		/* maxsegsize */
652 				0,			/* flags */
653 				NULL, NULL,		/* lockfunc, lockarg */
654 				&sc->mfi_frames_dmat)) {
655 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
656 		return (ENOMEM);
657 	}
658 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
659 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
660 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
661 		return (ENOMEM);
662 	}
663 	bzero(sc->mfi_frames, framessz);
664 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
665 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
666 	/*
667 	 * Allocate DMA memory for the frame sense data.  Keep them in the
668 	 * lower 4GB for efficiency
669 	 */
670 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
671 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
672 				4, 0,			/* algnmnt, boundary */
673 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
674 				BUS_SPACE_MAXADDR,	/* highaddr */
675 				NULL, NULL,		/* filter, filterarg */
676 				sensesz,		/* maxsize */
677 				1,			/* nsegments */
678 				sensesz,		/* maxsegsize */
679 				0,			/* flags */
680 				NULL, NULL,		/* lockfunc, lockarg */
681 				&sc->mfi_sense_dmat)) {
682 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
683 		return (ENOMEM);
684 	}
685 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
686 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
687 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
688 		return (ENOMEM);
689 	}
690 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
691 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
692 	if ((error = mfi_alloc_commands(sc)) != 0)
693 		return (error);
694 
695 	/* Before moving the FW to operational state, check whether
696 	 * hostmemory is required by the FW or not
697 	 */
698 
699 	/* ThunderBolt MFI_IOC2 INIT */
700 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
701 		sc->mfi_disable_intr(sc);
702 		mtx_lock(&sc->mfi_io_lock);
703 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
704 			device_printf(sc->mfi_dev,
705 			    "TB Init has failed with error %d\n",error);
706 			mtx_unlock(&sc->mfi_io_lock);
707 			return error;
708 		}
709 		mtx_unlock(&sc->mfi_io_lock);
710 
711 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
712 			return error;
713 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
714 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
715 		    &sc->mfi_intr)) {
716 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
717 			return (EINVAL);
718 		}
719 		sc->mfi_intr_ptr = mfi_intr_tbolt;
720 		sc->mfi_enable_intr(sc);
721 	} else {
722 		if ((error = mfi_comms_init(sc)) != 0)
723 			return (error);
724 
725 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
726 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
727 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
728 			return (EINVAL);
729 		}
730 		sc->mfi_intr_ptr = mfi_intr;
731 		sc->mfi_enable_intr(sc);
732 	}
733 	if ((error = mfi_get_controller_info(sc)) != 0)
734 		return (error);
735 	sc->disableOnlineCtrlReset = 0;
736 
737 	/* Register a config hook to probe the bus for arrays */
738 	sc->mfi_ich.ich_func = mfi_startup;
739 	sc->mfi_ich.ich_arg = sc;
740 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
741 		device_printf(sc->mfi_dev, "Cannot establish configuration "
742 		    "hook\n");
743 		return (EINVAL);
744 	}
745 	mtx_lock(&sc->mfi_io_lock);
746 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
747 		mtx_unlock(&sc->mfi_io_lock);
748 		return (error);
749 	}
750 	mtx_unlock(&sc->mfi_io_lock);
751 
752 	/*
753 	 * Register a shutdown handler.
754 	 */
755 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
756 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
757 		device_printf(sc->mfi_dev, "Warning: shutdown event "
758 		    "registration failed\n");
759 	}
760 
761 	/*
762 	 * Create the control device for doing management
763 	 */
764 	unit = device_get_unit(sc->mfi_dev);
765 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
766 	    0640, "mfi%d", unit);
767 	if (unit == 0)
768 		make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
769 		    sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
770 	if (sc->mfi_cdev != NULL)
771 		sc->mfi_cdev->si_drv1 = sc;
772 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
773 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
774 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
775 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
776 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
777 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
778 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
779 	    &sc->mfi_keep_deleted_volumes, 0,
780 	    "Don't detach the mfid device for a busy volume that is deleted");
781 
782 	device_add_child(sc->mfi_dev, "mfip", -1);
783 	bus_generic_attach(sc->mfi_dev);
784 
785 	/* Start the timeout watchdog */
786 	callout_init(&sc->mfi_watchdog_callout, 1);
787 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
788 	    mfi_timeout, sc);
789 
790 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
791 		mtx_lock(&sc->mfi_io_lock);
792 		mfi_tbolt_sync_map_info(sc);
793 		mtx_unlock(&sc->mfi_io_lock);
794 	}
795 
796 	return (0);
797 }
798 
799 static int
800 mfi_alloc_commands(struct mfi_softc *sc)
801 {
802 	struct mfi_command *cm;
803 	int i, j;
804 
805 	/*
806 	 * XXX Should we allocate all the commands up front, or allocate on
807 	 * demand later like 'aac' does?
808 	 */
809 	sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
810 	    sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
811 
812 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
813 		cm = &sc->mfi_commands[i];
814 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
815 		    sc->mfi_cmd_size * i);
816 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
817 		    sc->mfi_cmd_size * i;
818 		cm->cm_frame->header.context = i;
819 		cm->cm_sense = &sc->mfi_sense[i];
820 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
821 		cm->cm_sc = sc;
822 		cm->cm_index = i;
823 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
824 		    &cm->cm_dmamap) == 0) {
825 			mtx_lock(&sc->mfi_io_lock);
826 			mfi_release_command(cm);
827 			mtx_unlock(&sc->mfi_io_lock);
828 		} else {
829 			device_printf(sc->mfi_dev, "Failed to allocate %d "
830 			   "command blocks, only allocated %d\n",
831 			    sc->mfi_max_fw_cmds, i - 1);
832 			for (j = 0; j < i; j++) {
833 				cm = &sc->mfi_commands[i];
834 				bus_dmamap_destroy(sc->mfi_buffer_dmat,
835 				    cm->cm_dmamap);
836 			}
837 			free(sc->mfi_commands, M_MFIBUF);
838 			sc->mfi_commands = NULL;
839 
840 			return (ENOMEM);
841 		}
842 	}
843 
844 	return (0);
845 }
846 
847 void
848 mfi_release_command(struct mfi_command *cm)
849 {
850 	struct mfi_frame_header *hdr;
851 	uint32_t *hdr_data;
852 
853 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
854 
855 	/*
856 	 * Zero out the important fields of the frame, but make sure the
857 	 * context field is preserved.  For efficiency, handle the fields
858 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
859 	 */
860 	hdr = &cm->cm_frame->header;
861 	if (cm->cm_data != NULL && hdr->sg_count) {
862 		cm->cm_sg->sg32[0].len = 0;
863 		cm->cm_sg->sg32[0].addr = 0;
864 	}
865 
866 	/*
867 	 * Command may be on other queues e.g. busy queue depending on the
868 	 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
869 	 * properly
870 	 */
871 	if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
872 		mfi_remove_busy(cm);
873 	if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
874 		mfi_remove_ready(cm);
875 
876 	/* We're not expecting it to be on any other queue but check */
877 	if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
878 		panic("Command %p is still on another queue, flags = %#x",
879 		    cm, cm->cm_flags);
880 	}
881 
882 	/* tbolt cleanup */
883 	if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
884 		mfi_tbolt_return_cmd(cm->cm_sc,
885 		    cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
886 		    cm);
887 	}
888 
889 	hdr_data = (uint32_t *)cm->cm_frame;
890 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
891 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
892 	hdr_data[4] = 0;	/* flags, timeout */
893 	hdr_data[5] = 0;	/* data_len */
894 
895 	cm->cm_extra_frames = 0;
896 	cm->cm_flags = 0;
897 	cm->cm_complete = NULL;
898 	cm->cm_private = NULL;
899 	cm->cm_data = NULL;
900 	cm->cm_sg = 0;
901 	cm->cm_total_frame_size = 0;
902 	cm->retry_for_fw_reset = 0;
903 
904 	mfi_enqueue_free(cm);
905 }
906 
907 int
908 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
909     uint32_t opcode, void **bufp, size_t bufsize)
910 {
911 	struct mfi_command *cm;
912 	struct mfi_dcmd_frame *dcmd;
913 	void *buf = NULL;
914 	uint32_t context = 0;
915 
916 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
917 
918 	cm = mfi_dequeue_free(sc);
919 	if (cm == NULL)
920 		return (EBUSY);
921 
922 	/* Zero out the MFI frame */
923 	context = cm->cm_frame->header.context;
924 	bzero(cm->cm_frame, sizeof(union mfi_frame));
925 	cm->cm_frame->header.context = context;
926 
927 	if ((bufsize > 0) && (bufp != NULL)) {
928 		if (*bufp == NULL) {
929 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
930 			if (buf == NULL) {
931 				mfi_release_command(cm);
932 				return (ENOMEM);
933 			}
934 			*bufp = buf;
935 		} else {
936 			buf = *bufp;
937 		}
938 	}
939 
940 	dcmd =  &cm->cm_frame->dcmd;
941 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
942 	dcmd->header.cmd = MFI_CMD_DCMD;
943 	dcmd->header.timeout = 0;
944 	dcmd->header.flags = 0;
945 	dcmd->header.data_len = bufsize;
946 	dcmd->header.scsi_status = 0;
947 	dcmd->opcode = opcode;
948 	cm->cm_sg = &dcmd->sgl;
949 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
950 	cm->cm_flags = 0;
951 	cm->cm_data = buf;
952 	cm->cm_private = buf;
953 	cm->cm_len = bufsize;
954 
955 	*cmp = cm;
956 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
957 		*bufp = buf;
958 	return (0);
959 }
960 
961 static int
962 mfi_comms_init(struct mfi_softc *sc)
963 {
964 	struct mfi_command *cm;
965 	struct mfi_init_frame *init;
966 	struct mfi_init_qinfo *qinfo;
967 	int error;
968 	uint32_t context = 0;
969 
970 	mtx_lock(&sc->mfi_io_lock);
971 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
972 		mtx_unlock(&sc->mfi_io_lock);
973 		return (EBUSY);
974 	}
975 
976 	/* Zero out the MFI frame */
977 	context = cm->cm_frame->header.context;
978 	bzero(cm->cm_frame, sizeof(union mfi_frame));
979 	cm->cm_frame->header.context = context;
980 
981 	/*
982 	 * Abuse the SG list area of the frame to hold the init_qinfo
983 	 * object;
984 	 */
985 	init = &cm->cm_frame->init;
986 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
987 
988 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
989 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
990 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
991 	    offsetof(struct mfi_hwcomms, hw_reply_q);
992 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
993 	    offsetof(struct mfi_hwcomms, hw_pi);
994 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
995 	    offsetof(struct mfi_hwcomms, hw_ci);
996 
997 	init->header.cmd = MFI_CMD_INIT;
998 	init->header.data_len = sizeof(struct mfi_init_qinfo);
999 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
1000 	cm->cm_data = NULL;
1001 	cm->cm_flags = MFI_CMD_POLLED;
1002 
1003 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1004 		device_printf(sc->mfi_dev, "failed to send init command\n");
1005 	mfi_release_command(cm);
1006 	mtx_unlock(&sc->mfi_io_lock);
1007 
1008 	return (error);
1009 }
1010 
1011 static int
1012 mfi_get_controller_info(struct mfi_softc *sc)
1013 {
1014 	struct mfi_command *cm = NULL;
1015 	struct mfi_ctrl_info *ci = NULL;
1016 	uint32_t max_sectors_1, max_sectors_2;
1017 	int error;
1018 
1019 	mtx_lock(&sc->mfi_io_lock);
1020 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1021 	    (void **)&ci, sizeof(*ci));
1022 	if (error)
1023 		goto out;
1024 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1025 
1026 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1027 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
1028 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1029 		    MFI_SECTOR_LEN;
1030 		error = 0;
1031 		goto out;
1032 	}
1033 
1034 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1035 	    BUS_DMASYNC_POSTREAD);
1036 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1037 
1038 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1039 	max_sectors_2 = ci->max_request_size;
1040 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1041 	sc->disableOnlineCtrlReset =
1042 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
1043 
1044 out:
1045 	if (ci)
1046 		free(ci, M_MFIBUF);
1047 	if (cm)
1048 		mfi_release_command(cm);
1049 	mtx_unlock(&sc->mfi_io_lock);
1050 	return (error);
1051 }
1052 
1053 static int
1054 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1055 {
1056 	struct mfi_command *cm = NULL;
1057 	int error;
1058 
1059 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1060 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1061 	    (void **)log_state, sizeof(**log_state));
1062 	if (error)
1063 		goto out;
1064 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1065 
1066 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1067 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1068 		goto out;
1069 	}
1070 
1071 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1072 	    BUS_DMASYNC_POSTREAD);
1073 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1074 
1075 out:
1076 	if (cm)
1077 		mfi_release_command(cm);
1078 
1079 	return (error);
1080 }
1081 
1082 int
1083 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1084 {
1085 	struct mfi_evt_log_state *log_state = NULL;
1086 	union mfi_evt class_locale;
1087 	int error = 0;
1088 	uint32_t seq;
1089 
1090 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1091 
1092 	class_locale.members.reserved = 0;
1093 	class_locale.members.locale = mfi_event_locale;
1094 	class_locale.members.evt_class  = mfi_event_class;
1095 
1096 	if (seq_start == 0) {
1097 		if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1098 			goto out;
1099 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1100 
1101 		/*
1102 		 * Walk through any events that fired since the last
1103 		 * shutdown.
1104 		 */
1105 		if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1106 		    log_state->newest_seq_num)) != 0)
1107 			goto out;
1108 		seq = log_state->newest_seq_num;
1109 	} else
1110 		seq = seq_start;
1111 	error = mfi_aen_register(sc, seq, class_locale.word);
1112 out:
1113 	free(log_state, M_MFIBUF);
1114 
1115 	return (error);
1116 }
1117 
1118 int
1119 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1120 {
1121 
1122 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1123 	cm->cm_complete = NULL;
1124 
1125 	/*
1126 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1127 	 * and return 0 to it as status
1128 	 */
1129 	if (cm->cm_frame->dcmd.opcode == 0) {
1130 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1131 		cm->cm_error = 0;
1132 		return (cm->cm_error);
1133 	}
1134 	mfi_enqueue_ready(cm);
1135 	mfi_startio(sc);
1136 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1137 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1138 	return (cm->cm_error);
1139 }
1140 
1141 void
1142 mfi_free(struct mfi_softc *sc)
1143 {
1144 	struct mfi_command *cm;
1145 	int i;
1146 
1147 	callout_drain(&sc->mfi_watchdog_callout);
1148 
1149 	if (sc->mfi_cdev != NULL)
1150 		destroy_dev(sc->mfi_cdev);
1151 
1152 	if (sc->mfi_commands != NULL) {
1153 		for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1154 			cm = &sc->mfi_commands[i];
1155 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1156 		}
1157 		free(sc->mfi_commands, M_MFIBUF);
1158 		sc->mfi_commands = NULL;
1159 	}
1160 
1161 	if (sc->mfi_intr)
1162 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1163 	if (sc->mfi_irq != NULL)
1164 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1165 		    sc->mfi_irq);
1166 
1167 	if (sc->mfi_sense_busaddr != 0)
1168 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1169 	if (sc->mfi_sense != NULL)
1170 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1171 		    sc->mfi_sense_dmamap);
1172 	if (sc->mfi_sense_dmat != NULL)
1173 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1174 
1175 	if (sc->mfi_frames_busaddr != 0)
1176 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1177 	if (sc->mfi_frames != NULL)
1178 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1179 		    sc->mfi_frames_dmamap);
1180 	if (sc->mfi_frames_dmat != NULL)
1181 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1182 
1183 	if (sc->mfi_comms_busaddr != 0)
1184 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1185 	if (sc->mfi_comms != NULL)
1186 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1187 		    sc->mfi_comms_dmamap);
1188 	if (sc->mfi_comms_dmat != NULL)
1189 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1190 
1191 	/* ThunderBolt contiguous memory free here */
1192 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1193 		if (sc->mfi_tb_busaddr != 0)
1194 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1195 		if (sc->request_message_pool != NULL)
1196 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1197 			    sc->mfi_tb_dmamap);
1198 		if (sc->mfi_tb_dmat != NULL)
1199 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1200 
1201 		/* Version buffer memory free */
1202 		/* Start LSIP200113393 */
1203 		if (sc->verbuf_h_busaddr != 0)
1204 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1205 		if (sc->verbuf != NULL)
1206 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1207 			    sc->verbuf_h_dmamap);
1208 		if (sc->verbuf_h_dmat != NULL)
1209 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1210 
1211 		/* End LSIP200113393 */
1212 		/* ThunderBolt INIT packet memory Free */
1213 		if (sc->mfi_tb_init_busaddr != 0)
1214 			bus_dmamap_unload(sc->mfi_tb_init_dmat,
1215 			    sc->mfi_tb_init_dmamap);
1216 		if (sc->mfi_tb_init != NULL)
1217 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1218 			    sc->mfi_tb_init_dmamap);
1219 		if (sc->mfi_tb_init_dmat != NULL)
1220 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1221 
1222 		/* ThunderBolt IOC Init Desc memory free here */
1223 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1224 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1225 			    sc->mfi_tb_ioc_init_dmamap);
1226 		if (sc->mfi_tb_ioc_init_desc != NULL)
1227 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1228 			    sc->mfi_tb_ioc_init_desc,
1229 			    sc->mfi_tb_ioc_init_dmamap);
1230 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1231 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1232 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1233 			for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1234 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1235 					free(sc->mfi_cmd_pool_tbolt[i],
1236 					    M_MFIBUF);
1237 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1238 				}
1239 			}
1240 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1241 			sc->mfi_cmd_pool_tbolt = NULL;
1242 		}
1243 		if (sc->request_desc_pool != NULL) {
1244 			free(sc->request_desc_pool, M_MFIBUF);
1245 			sc->request_desc_pool = NULL;
1246 		}
1247 	}
1248 	if (sc->mfi_buffer_dmat != NULL)
1249 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1250 	if (sc->mfi_parent_dmat != NULL)
1251 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1252 
1253 	if (mtx_initialized(&sc->mfi_io_lock)) {
1254 		mtx_destroy(&sc->mfi_io_lock);
1255 		sx_destroy(&sc->mfi_config_lock);
1256 	}
1257 
1258 	return;
1259 }
1260 
1261 static void
1262 mfi_startup(void *arg)
1263 {
1264 	struct mfi_softc *sc;
1265 
1266 	sc = (struct mfi_softc *)arg;
1267 
1268 	sc->mfi_enable_intr(sc);
1269 	sx_xlock(&sc->mfi_config_lock);
1270 	mtx_lock(&sc->mfi_io_lock);
1271 	mfi_ldprobe(sc);
1272 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1273 	    mfi_syspdprobe(sc);
1274 	mtx_unlock(&sc->mfi_io_lock);
1275 	sx_xunlock(&sc->mfi_config_lock);
1276 
1277 	config_intrhook_disestablish(&sc->mfi_ich);
1278 }
1279 
1280 static void
1281 mfi_intr(void *arg)
1282 {
1283 	struct mfi_softc *sc;
1284 	struct mfi_command *cm;
1285 	uint32_t pi, ci, context;
1286 
1287 	sc = (struct mfi_softc *)arg;
1288 
1289 	if (sc->mfi_check_clear_intr(sc))
1290 		return;
1291 
1292 restart:
1293 	pi = sc->mfi_comms->hw_pi;
1294 	ci = sc->mfi_comms->hw_ci;
1295 	mtx_lock(&sc->mfi_io_lock);
1296 	while (ci != pi) {
1297 		context = sc->mfi_comms->hw_reply_q[ci];
1298 		if (context < sc->mfi_max_fw_cmds) {
1299 			cm = &sc->mfi_commands[context];
1300 			mfi_remove_busy(cm);
1301 			cm->cm_error = 0;
1302 			mfi_complete(sc, cm);
1303 		}
1304 		if (++ci == (sc->mfi_max_fw_cmds + 1))
1305 			ci = 0;
1306 	}
1307 
1308 	sc->mfi_comms->hw_ci = ci;
1309 
1310 	/* Give defered I/O a chance to run */
1311 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1312 	mfi_startio(sc);
1313 	mtx_unlock(&sc->mfi_io_lock);
1314 
1315 	/*
1316 	 * Dummy read to flush the bus; this ensures that the indexes are up
1317 	 * to date.  Restart processing if more commands have come it.
1318 	 */
1319 	(void)sc->mfi_read_fw_status(sc);
1320 	if (pi != sc->mfi_comms->hw_pi)
1321 		goto restart;
1322 
1323 	return;
1324 }
1325 
1326 int
1327 mfi_shutdown(struct mfi_softc *sc)
1328 {
1329 	struct mfi_dcmd_frame *dcmd;
1330 	struct mfi_command *cm;
1331 	int error;
1332 
1333 
1334 	if (sc->mfi_aen_cm != NULL) {
1335 		sc->cm_aen_abort = 1;
1336 		mfi_abort(sc, &sc->mfi_aen_cm);
1337 	}
1338 
1339 	if (sc->mfi_map_sync_cm != NULL) {
1340 		sc->cm_map_abort = 1;
1341 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1342 	}
1343 
1344 	mtx_lock(&sc->mfi_io_lock);
1345 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1346 	if (error) {
1347 		mtx_unlock(&sc->mfi_io_lock);
1348 		return (error);
1349 	}
1350 
1351 	dcmd = &cm->cm_frame->dcmd;
1352 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1353 	cm->cm_flags = MFI_CMD_POLLED;
1354 	cm->cm_data = NULL;
1355 
1356 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1357 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1358 
1359 	mfi_release_command(cm);
1360 	mtx_unlock(&sc->mfi_io_lock);
1361 	return (error);
1362 }
1363 
1364 static void
1365 mfi_syspdprobe(struct mfi_softc *sc)
1366 {
1367 	struct mfi_frame_header *hdr;
1368 	struct mfi_command *cm = NULL;
1369 	struct mfi_pd_list *pdlist = NULL;
1370 	struct mfi_system_pd *syspd, *tmp;
1371 	struct mfi_system_pending *syspd_pend;
1372 	int error, i, found;
1373 
1374 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1375 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1376 	/* Add SYSTEM PD's */
1377 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1378 	    (void **)&pdlist, sizeof(*pdlist));
1379 	if (error) {
1380 		device_printf(sc->mfi_dev,
1381 		    "Error while forming SYSTEM PD list\n");
1382 		goto out;
1383 	}
1384 
1385 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1386 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1387 	cm->cm_frame->dcmd.mbox[1] = 0;
1388 	if (mfi_mapcmd(sc, cm) != 0) {
1389 		device_printf(sc->mfi_dev,
1390 		    "Failed to get syspd device listing\n");
1391 		goto out;
1392 	}
1393 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1394 	    BUS_DMASYNC_POSTREAD);
1395 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1396 	hdr = &cm->cm_frame->header;
1397 	if (hdr->cmd_status != MFI_STAT_OK) {
1398 		device_printf(sc->mfi_dev,
1399 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1400 		goto out;
1401 	}
1402 	/* Get each PD and add it to the system */
1403 	for (i = 0; i < pdlist->count; i++) {
1404 		if (pdlist->addr[i].device_id ==
1405 		    pdlist->addr[i].encl_device_id)
1406 			continue;
1407 		found = 0;
1408 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1409 			if (syspd->pd_id == pdlist->addr[i].device_id)
1410 				found = 1;
1411 		}
1412 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1413 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1414 				found = 1;
1415 		}
1416 		if (found == 0)
1417 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1418 	}
1419 	/* Delete SYSPD's whose state has been changed */
1420 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1421 		found = 0;
1422 		for (i = 0; i < pdlist->count; i++) {
1423 			if (syspd->pd_id == pdlist->addr[i].device_id) {
1424 				found = 1;
1425 				break;
1426 			}
1427 		}
1428 		if (found == 0) {
1429 			printf("DELETE\n");
1430 			mtx_unlock(&sc->mfi_io_lock);
1431 			mtx_lock(&Giant);
1432 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1433 			mtx_unlock(&Giant);
1434 			mtx_lock(&sc->mfi_io_lock);
1435 		}
1436 	}
1437 out:
1438 	if (pdlist)
1439 	    free(pdlist, M_MFIBUF);
1440 	if (cm)
1441 	    mfi_release_command(cm);
1442 
1443 	return;
1444 }
1445 
1446 static void
1447 mfi_ldprobe(struct mfi_softc *sc)
1448 {
1449 	struct mfi_frame_header *hdr;
1450 	struct mfi_command *cm = NULL;
1451 	struct mfi_ld_list *list = NULL;
1452 	struct mfi_disk *ld;
1453 	struct mfi_disk_pending *ld_pend;
1454 	int error, i;
1455 
1456 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1457 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1458 
1459 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1460 	    (void **)&list, sizeof(*list));
1461 	if (error)
1462 		goto out;
1463 
1464 	cm->cm_flags = MFI_CMD_DATAIN;
1465 	if (mfi_wait_command(sc, cm) != 0) {
1466 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1467 		goto out;
1468 	}
1469 
1470 	hdr = &cm->cm_frame->header;
1471 	if (hdr->cmd_status != MFI_STAT_OK) {
1472 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1473 		    hdr->cmd_status);
1474 		goto out;
1475 	}
1476 
1477 	for (i = 0; i < list->ld_count; i++) {
1478 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1479 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1480 				goto skip_add;
1481 		}
1482 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1483 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1484 				goto skip_add;
1485 		}
1486 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1487 	skip_add:;
1488 	}
1489 out:
1490 	if (list)
1491 		free(list, M_MFIBUF);
1492 	if (cm)
1493 		mfi_release_command(cm);
1494 
1495 	return;
1496 }
1497 
1498 /*
1499  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1500  * the bits in 24-31 are all set, then it is the number of seconds since
1501  * boot.
1502  */
1503 static const char *
1504 format_timestamp(uint32_t timestamp)
1505 {
1506 	static char buffer[32];
1507 
1508 	if ((timestamp & 0xff000000) == 0xff000000)
1509 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1510 		    0x00ffffff);
1511 	else
1512 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1513 	return (buffer);
1514 }
1515 
1516 static const char *
1517 format_class(int8_t class)
1518 {
1519 	static char buffer[6];
1520 
1521 	switch (class) {
1522 	case MFI_EVT_CLASS_DEBUG:
1523 		return ("debug");
1524 	case MFI_EVT_CLASS_PROGRESS:
1525 		return ("progress");
1526 	case MFI_EVT_CLASS_INFO:
1527 		return ("info");
1528 	case MFI_EVT_CLASS_WARNING:
1529 		return ("WARN");
1530 	case MFI_EVT_CLASS_CRITICAL:
1531 		return ("CRIT");
1532 	case MFI_EVT_CLASS_FATAL:
1533 		return ("FATAL");
1534 	case MFI_EVT_CLASS_DEAD:
1535 		return ("DEAD");
1536 	default:
1537 		snprintf(buffer, sizeof(buffer), "%d", class);
1538 		return (buffer);
1539 	}
1540 }
1541 
1542 static void
1543 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1544 {
1545 	struct mfi_system_pd *syspd = NULL;
1546 
1547 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1548 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1549 	    format_class(detail->evt_class.members.evt_class),
1550 	    detail->description);
1551 
1552         /* Don't act on old AEN's or while shutting down */
1553         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1554                 return;
1555 
1556 	switch (detail->arg_type) {
1557 	case MR_EVT_ARGS_NONE:
1558 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1559 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1560 			if (mfi_detect_jbod_change) {
1561 				/*
1562 				 * Probe for new SYSPD's and Delete
1563 				 * invalid SYSPD's
1564 				 */
1565 				sx_xlock(&sc->mfi_config_lock);
1566 				mtx_lock(&sc->mfi_io_lock);
1567 				mfi_syspdprobe(sc);
1568 				mtx_unlock(&sc->mfi_io_lock);
1569 				sx_xunlock(&sc->mfi_config_lock);
1570 			}
1571 		}
1572 		break;
1573 	case MR_EVT_ARGS_LD_STATE:
1574 		/* During load time driver reads all the events starting
1575 		 * from the one that has been logged after shutdown. Avoid
1576 		 * these old events.
1577 		 */
1578 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1579 			/* Remove the LD */
1580 			struct mfi_disk *ld;
1581 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1582 				if (ld->ld_id ==
1583 				    detail->args.ld_state.ld.target_id)
1584 					break;
1585 			}
1586 			/*
1587 			Fix: for kernel panics when SSCD is removed
1588 			KASSERT(ld != NULL, ("volume dissappeared"));
1589 			*/
1590 			if (ld != NULL) {
1591 				mtx_lock(&Giant);
1592 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1593 				mtx_unlock(&Giant);
1594 			}
1595 		}
1596 		break;
1597 	case MR_EVT_ARGS_PD:
1598 		if (detail->code == MR_EVT_PD_REMOVED) {
1599 			if (mfi_detect_jbod_change) {
1600 				/*
1601 				 * If the removed device is a SYSPD then
1602 				 * delete it
1603 				 */
1604 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1605 				    pd_link) {
1606 					if (syspd->pd_id ==
1607 					    detail->args.pd.device_id) {
1608 						mtx_lock(&Giant);
1609 						device_delete_child(
1610 						    sc->mfi_dev,
1611 						    syspd->pd_dev);
1612 						mtx_unlock(&Giant);
1613 						break;
1614 					}
1615 				}
1616 			}
1617 		}
1618 		if (detail->code == MR_EVT_PD_INSERTED) {
1619 			if (mfi_detect_jbod_change) {
1620 				/* Probe for new SYSPD's */
1621 				sx_xlock(&sc->mfi_config_lock);
1622 				mtx_lock(&sc->mfi_io_lock);
1623 				mfi_syspdprobe(sc);
1624 				mtx_unlock(&sc->mfi_io_lock);
1625 				sx_xunlock(&sc->mfi_config_lock);
1626 			}
1627 		}
1628 		if (sc->mfi_cam_rescan_cb != NULL &&
1629 		    (detail->code == MR_EVT_PD_INSERTED ||
1630 		    detail->code == MR_EVT_PD_REMOVED)) {
1631 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1632 		}
1633 		break;
1634 	}
1635 }
1636 
1637 static void
1638 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1639 {
1640 	struct mfi_evt_queue_elm *elm;
1641 
1642 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1643 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1644 	if (elm == NULL)
1645 		return;
1646 	memcpy(&elm->detail, detail, sizeof(*detail));
1647 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1648 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1649 }
1650 
1651 static void
1652 mfi_handle_evt(void *context, int pending)
1653 {
1654 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1655 	struct mfi_softc *sc;
1656 	struct mfi_evt_queue_elm *elm;
1657 
1658 	sc = context;
1659 	TAILQ_INIT(&queue);
1660 	mtx_lock(&sc->mfi_io_lock);
1661 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1662 	mtx_unlock(&sc->mfi_io_lock);
1663 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1664 		TAILQ_REMOVE(&queue, elm, link);
1665 		mfi_decode_evt(sc, &elm->detail);
1666 		free(elm, M_MFIBUF);
1667 	}
1668 }
1669 
1670 static int
1671 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1672 {
1673 	struct mfi_command *cm;
1674 	struct mfi_dcmd_frame *dcmd;
1675 	union mfi_evt current_aen, prior_aen;
1676 	struct mfi_evt_detail *ed = NULL;
1677 	int error = 0;
1678 
1679 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1680 
1681 	current_aen.word = locale;
1682 	if (sc->mfi_aen_cm != NULL) {
1683 		prior_aen.word =
1684 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1685 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1686 		    !((prior_aen.members.locale & current_aen.members.locale)
1687 		    ^current_aen.members.locale)) {
1688 			return (0);
1689 		} else {
1690 			prior_aen.members.locale |= current_aen.members.locale;
1691 			if (prior_aen.members.evt_class
1692 			    < current_aen.members.evt_class)
1693 				current_aen.members.evt_class =
1694 				    prior_aen.members.evt_class;
1695 			mfi_abort(sc, &sc->mfi_aen_cm);
1696 		}
1697 	}
1698 
1699 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1700 	    (void **)&ed, sizeof(*ed));
1701 	if (error)
1702 		goto out;
1703 
1704 	dcmd = &cm->cm_frame->dcmd;
1705 	((uint32_t *)&dcmd->mbox)[0] = seq;
1706 	((uint32_t *)&dcmd->mbox)[1] = locale;
1707 	cm->cm_flags = MFI_CMD_DATAIN;
1708 	cm->cm_complete = mfi_aen_complete;
1709 
1710 	sc->last_seq_num = seq;
1711 	sc->mfi_aen_cm = cm;
1712 
1713 	mfi_enqueue_ready(cm);
1714 	mfi_startio(sc);
1715 
1716 out:
1717 	return (error);
1718 }
1719 
1720 static void
1721 mfi_aen_complete(struct mfi_command *cm)
1722 {
1723 	struct mfi_frame_header *hdr;
1724 	struct mfi_softc *sc;
1725 	struct mfi_evt_detail *detail;
1726 	struct mfi_aen *mfi_aen_entry, *tmp;
1727 	int seq = 0, aborted = 0;
1728 
1729 	sc = cm->cm_sc;
1730 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1731 
1732 	if (sc->mfi_aen_cm == NULL)
1733 		return;
1734 
1735 	hdr = &cm->cm_frame->header;
1736 
1737 	if (sc->cm_aen_abort ||
1738 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1739 		sc->cm_aen_abort = 0;
1740 		aborted = 1;
1741 	} else {
1742 		sc->mfi_aen_triggered = 1;
1743 		if (sc->mfi_poll_waiting) {
1744 			sc->mfi_poll_waiting = 0;
1745 			selwakeup(&sc->mfi_select);
1746 		}
1747 		detail = cm->cm_data;
1748 		mfi_queue_evt(sc, detail);
1749 		seq = detail->seq + 1;
1750 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1751 		    tmp) {
1752 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1753 			    aen_link);
1754 			PROC_LOCK(mfi_aen_entry->p);
1755 			kern_psignal(mfi_aen_entry->p, SIGIO);
1756 			PROC_UNLOCK(mfi_aen_entry->p);
1757 			free(mfi_aen_entry, M_MFIBUF);
1758 		}
1759 	}
1760 
1761 	free(cm->cm_data, M_MFIBUF);
1762 	wakeup(&sc->mfi_aen_cm);
1763 	sc->mfi_aen_cm = NULL;
1764 	mfi_release_command(cm);
1765 
1766 	/* set it up again so the driver can catch more events */
1767 	if (!aborted)
1768 		mfi_aen_setup(sc, seq);
1769 }
1770 
1771 #define MAX_EVENTS 15
1772 
1773 static int
1774 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1775 {
1776 	struct mfi_command *cm;
1777 	struct mfi_dcmd_frame *dcmd;
1778 	struct mfi_evt_list *el;
1779 	union mfi_evt class_locale;
1780 	int error, i, seq, size;
1781 
1782 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1783 
1784 	class_locale.members.reserved = 0;
1785 	class_locale.members.locale = mfi_event_locale;
1786 	class_locale.members.evt_class  = mfi_event_class;
1787 
1788 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1789 		* (MAX_EVENTS - 1);
1790 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1791 	if (el == NULL)
1792 		return (ENOMEM);
1793 
1794 	for (seq = start_seq;;) {
1795 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1796 			free(el, M_MFIBUF);
1797 			return (EBUSY);
1798 		}
1799 
1800 		dcmd = &cm->cm_frame->dcmd;
1801 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1802 		dcmd->header.cmd = MFI_CMD_DCMD;
1803 		dcmd->header.timeout = 0;
1804 		dcmd->header.data_len = size;
1805 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1806 		((uint32_t *)&dcmd->mbox)[0] = seq;
1807 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1808 		cm->cm_sg = &dcmd->sgl;
1809 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1810 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1811 		cm->cm_data = el;
1812 		cm->cm_len = size;
1813 
1814 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1815 			device_printf(sc->mfi_dev,
1816 			    "Failed to get controller entries\n");
1817 			mfi_release_command(cm);
1818 			break;
1819 		}
1820 
1821 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1822 		    BUS_DMASYNC_POSTREAD);
1823 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1824 
1825 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1826 			mfi_release_command(cm);
1827 			break;
1828 		}
1829 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1830 			device_printf(sc->mfi_dev,
1831 			    "Error %d fetching controller entries\n",
1832 			    dcmd->header.cmd_status);
1833 			mfi_release_command(cm);
1834 			error = EIO;
1835 			break;
1836 		}
1837 		mfi_release_command(cm);
1838 
1839 		for (i = 0; i < el->count; i++) {
1840 			/*
1841 			 * If this event is newer than 'stop_seq' then
1842 			 * break out of the loop.  Note that the log
1843 			 * is a circular buffer so we have to handle
1844 			 * the case that our stop point is earlier in
1845 			 * the buffer than our start point.
1846 			 */
1847 			if (el->event[i].seq >= stop_seq) {
1848 				if (start_seq <= stop_seq)
1849 					break;
1850 				else if (el->event[i].seq < start_seq)
1851 					break;
1852 			}
1853 			mfi_queue_evt(sc, &el->event[i]);
1854 		}
1855 		seq = el->event[el->count - 1].seq + 1;
1856 	}
1857 
1858 	free(el, M_MFIBUF);
1859 	return (error);
1860 }
1861 
1862 static int
1863 mfi_add_ld(struct mfi_softc *sc, int id)
1864 {
1865 	struct mfi_command *cm;
1866 	struct mfi_dcmd_frame *dcmd = NULL;
1867 	struct mfi_ld_info *ld_info = NULL;
1868 	struct mfi_disk_pending *ld_pend;
1869 	int error;
1870 
1871 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1872 
1873 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1874 	if (ld_pend != NULL) {
1875 		ld_pend->ld_id = id;
1876 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1877 	}
1878 
1879 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1880 	    (void **)&ld_info, sizeof(*ld_info));
1881 	if (error) {
1882 		device_printf(sc->mfi_dev,
1883 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1884 		if (ld_info)
1885 			free(ld_info, M_MFIBUF);
1886 		return (error);
1887 	}
1888 	cm->cm_flags = MFI_CMD_DATAIN;
1889 	dcmd = &cm->cm_frame->dcmd;
1890 	dcmd->mbox[0] = id;
1891 	if (mfi_wait_command(sc, cm) != 0) {
1892 		device_printf(sc->mfi_dev,
1893 		    "Failed to get logical drive: %d\n", id);
1894 		free(ld_info, M_MFIBUF);
1895 		return (0);
1896 	}
1897 	if (ld_info->ld_config.params.isSSCD != 1)
1898 		mfi_add_ld_complete(cm);
1899 	else {
1900 		mfi_release_command(cm);
1901 		if (ld_info)		/* SSCD drives ld_info free here */
1902 			free(ld_info, M_MFIBUF);
1903 	}
1904 	return (0);
1905 }
1906 
1907 static void
1908 mfi_add_ld_complete(struct mfi_command *cm)
1909 {
1910 	struct mfi_frame_header *hdr;
1911 	struct mfi_ld_info *ld_info;
1912 	struct mfi_softc *sc;
1913 	device_t child;
1914 
1915 	sc = cm->cm_sc;
1916 	hdr = &cm->cm_frame->header;
1917 	ld_info = cm->cm_private;
1918 
1919 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1920 		free(ld_info, M_MFIBUF);
1921 		wakeup(&sc->mfi_map_sync_cm);
1922 		mfi_release_command(cm);
1923 		return;
1924 	}
1925 	wakeup(&sc->mfi_map_sync_cm);
1926 	mfi_release_command(cm);
1927 
1928 	mtx_unlock(&sc->mfi_io_lock);
1929 	mtx_lock(&Giant);
1930 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1931 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1932 		free(ld_info, M_MFIBUF);
1933 		mtx_unlock(&Giant);
1934 		mtx_lock(&sc->mfi_io_lock);
1935 		return;
1936 	}
1937 
1938 	device_set_ivars(child, ld_info);
1939 	device_set_desc(child, "MFI Logical Disk");
1940 	bus_generic_attach(sc->mfi_dev);
1941 	mtx_unlock(&Giant);
1942 	mtx_lock(&sc->mfi_io_lock);
1943 }
1944 
1945 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1946 {
1947 	struct mfi_command *cm;
1948 	struct mfi_dcmd_frame *dcmd = NULL;
1949 	struct mfi_pd_info *pd_info = NULL;
1950 	struct mfi_system_pending *syspd_pend;
1951 	int error;
1952 
1953 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1954 
1955 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1956 	if (syspd_pend != NULL) {
1957 		syspd_pend->pd_id = id;
1958 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1959 	}
1960 
1961 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1962 		(void **)&pd_info, sizeof(*pd_info));
1963 	if (error) {
1964 		device_printf(sc->mfi_dev,
1965 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1966 		    error);
1967 		if (pd_info)
1968 			free(pd_info, M_MFIBUF);
1969 		return (error);
1970 	}
1971 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1972 	dcmd = &cm->cm_frame->dcmd;
1973 	dcmd->mbox[0]=id;
1974 	dcmd->header.scsi_status = 0;
1975 	dcmd->header.pad0 = 0;
1976 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1977 		device_printf(sc->mfi_dev,
1978 		    "Failed to get physical drive info %d\n", id);
1979 		free(pd_info, M_MFIBUF);
1980 		mfi_release_command(cm);
1981 		return (error);
1982 	}
1983 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1984 	    BUS_DMASYNC_POSTREAD);
1985 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1986 	mfi_add_sys_pd_complete(cm);
1987 	return (0);
1988 }
1989 
1990 static void
1991 mfi_add_sys_pd_complete(struct mfi_command *cm)
1992 {
1993 	struct mfi_frame_header *hdr;
1994 	struct mfi_pd_info *pd_info;
1995 	struct mfi_softc *sc;
1996 	device_t child;
1997 
1998 	sc = cm->cm_sc;
1999 	hdr = &cm->cm_frame->header;
2000 	pd_info = cm->cm_private;
2001 
2002 	if (hdr->cmd_status != MFI_STAT_OK) {
2003 		free(pd_info, M_MFIBUF);
2004 		mfi_release_command(cm);
2005 		return;
2006 	}
2007 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2008 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2009 		    pd_info->ref.v.device_id);
2010 		free(pd_info, M_MFIBUF);
2011 		mfi_release_command(cm);
2012 		return;
2013 	}
2014 	mfi_release_command(cm);
2015 
2016 	mtx_unlock(&sc->mfi_io_lock);
2017 	mtx_lock(&Giant);
2018 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2019 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
2020 		free(pd_info, M_MFIBUF);
2021 		mtx_unlock(&Giant);
2022 		mtx_lock(&sc->mfi_io_lock);
2023 		return;
2024 	}
2025 
2026 	device_set_ivars(child, pd_info);
2027 	device_set_desc(child, "MFI System PD");
2028 	bus_generic_attach(sc->mfi_dev);
2029 	mtx_unlock(&Giant);
2030 	mtx_lock(&sc->mfi_io_lock);
2031 }
2032 
2033 static struct mfi_command *
2034 mfi_bio_command(struct mfi_softc *sc)
2035 {
2036 	struct bio *bio;
2037 	struct mfi_command *cm = NULL;
2038 
2039 	/*reserving two commands to avoid starvation for IOCTL*/
2040 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2041 		return (NULL);
2042 	}
2043 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2044 		return (NULL);
2045 	}
2046 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2047 		cm = mfi_build_ldio(sc, bio);
2048 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2049 		cm = mfi_build_syspdio(sc, bio);
2050 	}
2051 	if (!cm)
2052 	    mfi_enqueue_bio(sc, bio);
2053 	return cm;
2054 }
2055 
2056 /*
2057  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2058  */
2059 
2060 int
2061 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2062 {
2063 	int cdb_len;
2064 
2065 	if (((lba & 0x1fffff) == lba)
2066          && ((block_count & 0xff) == block_count)
2067          && (byte2 == 0)) {
2068 		/* We can fit in a 6 byte cdb */
2069 		struct scsi_rw_6 *scsi_cmd;
2070 
2071 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2072 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2073 		scsi_ulto3b(lba, scsi_cmd->addr);
2074 		scsi_cmd->length = block_count & 0xff;
2075 		scsi_cmd->control = 0;
2076 		cdb_len = sizeof(*scsi_cmd);
2077 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2078 		/* Need a 10 byte CDB */
2079 		struct scsi_rw_10 *scsi_cmd;
2080 
2081 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2082 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2083 		scsi_cmd->byte2 = byte2;
2084 		scsi_ulto4b(lba, scsi_cmd->addr);
2085 		scsi_cmd->reserved = 0;
2086 		scsi_ulto2b(block_count, scsi_cmd->length);
2087 		scsi_cmd->control = 0;
2088 		cdb_len = sizeof(*scsi_cmd);
2089 	} else if (((block_count & 0xffffffff) == block_count) &&
2090 	    ((lba & 0xffffffff) == lba)) {
2091 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2092 		struct scsi_rw_12 *scsi_cmd;
2093 
2094 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2095 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2096 		scsi_cmd->byte2 = byte2;
2097 		scsi_ulto4b(lba, scsi_cmd->addr);
2098 		scsi_cmd->reserved = 0;
2099 		scsi_ulto4b(block_count, scsi_cmd->length);
2100 		scsi_cmd->control = 0;
2101 		cdb_len = sizeof(*scsi_cmd);
2102 	} else {
2103 		/*
2104 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2105 		 * than 2^32
2106 		 */
2107 		struct scsi_rw_16 *scsi_cmd;
2108 
2109 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2110 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2111 		scsi_cmd->byte2 = byte2;
2112 		scsi_u64to8b(lba, scsi_cmd->addr);
2113 		scsi_cmd->reserved = 0;
2114 		scsi_ulto4b(block_count, scsi_cmd->length);
2115 		scsi_cmd->control = 0;
2116 		cdb_len = sizeof(*scsi_cmd);
2117 	}
2118 
2119 	return cdb_len;
2120 }
2121 
2122 extern char *unmapped_buf;
2123 
2124 static struct mfi_command *
2125 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2126 {
2127 	struct mfi_command *cm;
2128 	struct mfi_pass_frame *pass;
2129 	uint32_t context = 0;
2130 	int flags = 0, blkcount = 0, readop;
2131 	uint8_t cdb_len;
2132 
2133 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2134 
2135 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2136 	    return (NULL);
2137 
2138 	/* Zero out the MFI frame */
2139 	context = cm->cm_frame->header.context;
2140 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2141 	cm->cm_frame->header.context = context;
2142 	pass = &cm->cm_frame->pass;
2143 	bzero(pass->cdb, 16);
2144 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2145 	switch (bio->bio_cmd) {
2146 	case BIO_READ:
2147 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2148 		readop = 1;
2149 		break;
2150 	case BIO_WRITE:
2151 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2152 		readop = 0;
2153 		break;
2154 	default:
2155 		/* TODO: what about BIO_DELETE??? */
2156 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2157 	}
2158 
2159 	/* Cheat with the sector length to avoid a non-constant division */
2160 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2161 	/* Fill the LBA and Transfer length in CDB */
2162 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2163 	    pass->cdb);
2164 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2165 	pass->header.lun_id = 0;
2166 	pass->header.timeout = 0;
2167 	pass->header.flags = 0;
2168 	pass->header.scsi_status = 0;
2169 	pass->header.sense_len = MFI_SENSE_LEN;
2170 	pass->header.data_len = bio->bio_bcount;
2171 	pass->header.cdb_len = cdb_len;
2172 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2173 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2174 	cm->cm_complete = mfi_bio_complete;
2175 	cm->cm_private = bio;
2176 	cm->cm_data = unmapped_buf;
2177 	cm->cm_len = bio->bio_bcount;
2178 	cm->cm_sg = &pass->sgl;
2179 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2180 	cm->cm_flags = flags;
2181 
2182 	return (cm);
2183 }
2184 
2185 static struct mfi_command *
2186 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2187 {
2188 	struct mfi_io_frame *io;
2189 	struct mfi_command *cm;
2190 	int flags;
2191 	uint32_t blkcount;
2192 	uint32_t context = 0;
2193 
2194 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2195 
2196 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2197 	    return (NULL);
2198 
2199 	/* Zero out the MFI frame */
2200 	context = cm->cm_frame->header.context;
2201 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2202 	cm->cm_frame->header.context = context;
2203 	io = &cm->cm_frame->io;
2204 	switch (bio->bio_cmd) {
2205 	case BIO_READ:
2206 		io->header.cmd = MFI_CMD_LD_READ;
2207 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2208 		break;
2209 	case BIO_WRITE:
2210 		io->header.cmd = MFI_CMD_LD_WRITE;
2211 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2212 		break;
2213 	default:
2214 		/* TODO: what about BIO_DELETE??? */
2215 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2216 	}
2217 
2218 	/* Cheat with the sector length to avoid a non-constant division */
2219 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2220 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2221 	io->header.timeout = 0;
2222 	io->header.flags = 0;
2223 	io->header.scsi_status = 0;
2224 	io->header.sense_len = MFI_SENSE_LEN;
2225 	io->header.data_len = blkcount;
2226 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2227 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2228 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2229 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2230 	cm->cm_complete = mfi_bio_complete;
2231 	cm->cm_private = bio;
2232 	cm->cm_data = unmapped_buf;
2233 	cm->cm_len = bio->bio_bcount;
2234 	cm->cm_sg = &io->sgl;
2235 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2236 	cm->cm_flags = flags;
2237 
2238 	return (cm);
2239 }
2240 
2241 static void
2242 mfi_bio_complete(struct mfi_command *cm)
2243 {
2244 	struct bio *bio;
2245 	struct mfi_frame_header *hdr;
2246 	struct mfi_softc *sc;
2247 
2248 	bio = cm->cm_private;
2249 	hdr = &cm->cm_frame->header;
2250 	sc = cm->cm_sc;
2251 
2252 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2253 		bio->bio_flags |= BIO_ERROR;
2254 		bio->bio_error = EIO;
2255 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2256 		    "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2257 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2258 	} else if (cm->cm_error != 0) {
2259 		bio->bio_flags |= BIO_ERROR;
2260 		bio->bio_error = cm->cm_error;
2261 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2262 		    cm, cm->cm_error);
2263 	}
2264 
2265 	mfi_release_command(cm);
2266 	mfi_disk_complete(bio);
2267 }
2268 
2269 void
2270 mfi_startio(struct mfi_softc *sc)
2271 {
2272 	struct mfi_command *cm;
2273 	struct ccb_hdr *ccbh;
2274 
2275 	for (;;) {
2276 		/* Don't bother if we're short on resources */
2277 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2278 			break;
2279 
2280 		/* Try a command that has already been prepared */
2281 		cm = mfi_dequeue_ready(sc);
2282 
2283 		if (cm == NULL) {
2284 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2285 				cm = sc->mfi_cam_start(ccbh);
2286 		}
2287 
2288 		/* Nope, so look for work on the bioq */
2289 		if (cm == NULL)
2290 			cm = mfi_bio_command(sc);
2291 
2292 		/* No work available, so exit */
2293 		if (cm == NULL)
2294 			break;
2295 
2296 		/* Send the command to the controller */
2297 		if (mfi_mapcmd(sc, cm) != 0) {
2298 			device_printf(sc->mfi_dev, "Failed to startio\n");
2299 			mfi_requeue_ready(cm);
2300 			break;
2301 		}
2302 	}
2303 }
2304 
2305 int
2306 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2307 {
2308 	int error, polled;
2309 
2310 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2311 
2312 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2313 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2314 		if (cm->cm_flags & MFI_CMD_CCB)
2315 			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2316 			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2317 			    polled);
2318 		else if (cm->cm_flags & MFI_CMD_BIO)
2319 			error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2320 			    cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2321 			    polled);
2322 		else
2323 			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2324 			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2325 			    mfi_data_cb, cm, polled);
2326 		if (error == EINPROGRESS) {
2327 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2328 			return (0);
2329 		}
2330 	} else {
2331 		error = mfi_send_frame(sc, cm);
2332 	}
2333 
2334 	return (error);
2335 }
2336 
2337 static void
2338 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2339 {
2340 	struct mfi_frame_header *hdr;
2341 	struct mfi_command *cm;
2342 	union mfi_sgl *sgl;
2343 	struct mfi_softc *sc;
2344 	int i, j, first, dir;
2345 	int sge_size, locked;
2346 
2347 	cm = (struct mfi_command *)arg;
2348 	sc = cm->cm_sc;
2349 	hdr = &cm->cm_frame->header;
2350 	sgl = cm->cm_sg;
2351 
2352 	/*
2353 	 * We need to check if we have the lock as this is async
2354 	 * callback so even though our caller mfi_mapcmd asserts
2355 	 * it has the lock, there is no guarantee that hasn't been
2356 	 * dropped if bus_dmamap_load returned prior to our
2357 	 * completion.
2358 	 */
2359 	if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2360 		mtx_lock(&sc->mfi_io_lock);
2361 
2362 	if (error) {
2363 		printf("error %d in callback\n", error);
2364 		cm->cm_error = error;
2365 		mfi_complete(sc, cm);
2366 		goto out;
2367 	}
2368 	/* Use IEEE sgl only for IO's on a SKINNY controller
2369 	 * For other commands on a SKINNY controller use either
2370 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2371 	 * Also calculate the total frame size based on the type
2372 	 * of SGL used.
2373 	 */
2374 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2375 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2376 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2377 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2378 		for (i = 0; i < nsegs; i++) {
2379 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2380 			sgl->sg_skinny[i].len = segs[i].ds_len;
2381 			sgl->sg_skinny[i].flag = 0;
2382 		}
2383 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2384 		sge_size = sizeof(struct mfi_sg_skinny);
2385 		hdr->sg_count = nsegs;
2386 	} else {
2387 		j = 0;
2388 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2389 			first = cm->cm_stp_len;
2390 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2391 				sgl->sg32[j].addr = segs[0].ds_addr;
2392 				sgl->sg32[j++].len = first;
2393 			} else {
2394 				sgl->sg64[j].addr = segs[0].ds_addr;
2395 				sgl->sg64[j++].len = first;
2396 			}
2397 		} else
2398 			first = 0;
2399 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2400 			for (i = 0; i < nsegs; i++) {
2401 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2402 				sgl->sg32[j++].len = segs[i].ds_len - first;
2403 				first = 0;
2404 			}
2405 		} else {
2406 			for (i = 0; i < nsegs; i++) {
2407 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2408 				sgl->sg64[j++].len = segs[i].ds_len - first;
2409 				first = 0;
2410 			}
2411 			hdr->flags |= MFI_FRAME_SGL64;
2412 		}
2413 		hdr->sg_count = j;
2414 		sge_size = sc->mfi_sge_size;
2415 	}
2416 
2417 	dir = 0;
2418 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2419 		dir |= BUS_DMASYNC_PREREAD;
2420 		hdr->flags |= MFI_FRAME_DIR_READ;
2421 	}
2422 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2423 		dir |= BUS_DMASYNC_PREWRITE;
2424 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2425 	}
2426 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2427 	cm->cm_flags |= MFI_CMD_MAPPED;
2428 
2429 	/*
2430 	 * Instead of calculating the total number of frames in the
2431 	 * compound frame, it's already assumed that there will be at
2432 	 * least 1 frame, so don't compensate for the modulo of the
2433 	 * following division.
2434 	 */
2435 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2436 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2437 
2438 	if ((error = mfi_send_frame(sc, cm)) != 0) {
2439 		printf("error %d in callback from mfi_send_frame\n", error);
2440 		cm->cm_error = error;
2441 		mfi_complete(sc, cm);
2442 		goto out;
2443 	}
2444 
2445 out:
2446 	/* leave the lock in the state we found it */
2447 	if (locked == 0)
2448 		mtx_unlock(&sc->mfi_io_lock);
2449 
2450 	return;
2451 }
2452 
2453 static int
2454 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2455 {
2456 	int error;
2457 
2458 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2459 
2460 	if (sc->MFA_enabled)
2461 		error = mfi_tbolt_send_frame(sc, cm);
2462 	else
2463 		error = mfi_std_send_frame(sc, cm);
2464 
2465 	if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2466 		mfi_remove_busy(cm);
2467 
2468 	return (error);
2469 }
2470 
2471 static int
2472 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2473 {
2474 	struct mfi_frame_header *hdr;
2475 	int tm = mfi_polled_cmd_timeout * 1000;
2476 
2477 	hdr = &cm->cm_frame->header;
2478 
2479 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2480 		cm->cm_timestamp = time_uptime;
2481 		mfi_enqueue_busy(cm);
2482 	} else {
2483 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2484 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2485 	}
2486 
2487 	/*
2488 	 * The bus address of the command is aligned on a 64 byte boundary,
2489 	 * leaving the least 6 bits as zero.  For whatever reason, the
2490 	 * hardware wants the address shifted right by three, leaving just
2491 	 * 3 zero bits.  These three bits are then used as a prefetching
2492 	 * hint for the hardware to predict how many frames need to be
2493 	 * fetched across the bus.  If a command has more than 8 frames
2494 	 * then the 3 bits are set to 0x7 and the firmware uses other
2495 	 * information in the command to determine the total amount to fetch.
2496 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2497 	 * is enough for both 32bit and 64bit systems.
2498 	 */
2499 	if (cm->cm_extra_frames > 7)
2500 		cm->cm_extra_frames = 7;
2501 
2502 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2503 
2504 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2505 		return (0);
2506 
2507 	/* This is a polled command, so busy-wait for it to complete. */
2508 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2509 		DELAY(1000);
2510 		tm -= 1;
2511 		if (tm <= 0)
2512 			break;
2513 	}
2514 
2515 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2516 		device_printf(sc->mfi_dev, "Frame %p timed out "
2517 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2518 		return (ETIMEDOUT);
2519 	}
2520 
2521 	return (0);
2522 }
2523 
2524 
2525 void
2526 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2527 {
2528 	int dir;
2529 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2530 
2531 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2532 		dir = 0;
2533 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2534 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2535 			dir |= BUS_DMASYNC_POSTREAD;
2536 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2537 			dir |= BUS_DMASYNC_POSTWRITE;
2538 
2539 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2540 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2541 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2542 	}
2543 
2544 	cm->cm_flags |= MFI_CMD_COMPLETED;
2545 
2546 	if (cm->cm_complete != NULL)
2547 		cm->cm_complete(cm);
2548 	else
2549 		wakeup(cm);
2550 }
2551 
2552 static int
2553 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2554 {
2555 	struct mfi_command *cm;
2556 	struct mfi_abort_frame *abort;
2557 	int i = 0, error;
2558 	uint32_t context = 0;
2559 
2560 	mtx_lock(&sc->mfi_io_lock);
2561 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2562 		mtx_unlock(&sc->mfi_io_lock);
2563 		return (EBUSY);
2564 	}
2565 
2566 	/* Zero out the MFI frame */
2567 	context = cm->cm_frame->header.context;
2568 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2569 	cm->cm_frame->header.context = context;
2570 
2571 	abort = &cm->cm_frame->abort;
2572 	abort->header.cmd = MFI_CMD_ABORT;
2573 	abort->header.flags = 0;
2574 	abort->header.scsi_status = 0;
2575 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2576 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2577 	abort->abort_mfi_addr_hi =
2578 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2579 	cm->cm_data = NULL;
2580 	cm->cm_flags = MFI_CMD_POLLED;
2581 
2582 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2583 		device_printf(sc->mfi_dev, "failed to abort command\n");
2584 	mfi_release_command(cm);
2585 
2586 	mtx_unlock(&sc->mfi_io_lock);
2587 	while (i < 5 && *cm_abort != NULL) {
2588 		tsleep(cm_abort, 0, "mfiabort",
2589 		    5 * hz);
2590 		i++;
2591 	}
2592 	if (*cm_abort != NULL) {
2593 		/* Force a complete if command didn't abort */
2594 		mtx_lock(&sc->mfi_io_lock);
2595 		(*cm_abort)->cm_complete(*cm_abort);
2596 		mtx_unlock(&sc->mfi_io_lock);
2597 	}
2598 
2599 	return (error);
2600 }
2601 
2602 int
2603 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2604      int len)
2605 {
2606 	struct mfi_command *cm;
2607 	struct mfi_io_frame *io;
2608 	int error;
2609 	uint32_t context = 0;
2610 
2611 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2612 		return (EBUSY);
2613 
2614 	/* Zero out the MFI frame */
2615 	context = cm->cm_frame->header.context;
2616 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2617 	cm->cm_frame->header.context = context;
2618 
2619 	io = &cm->cm_frame->io;
2620 	io->header.cmd = MFI_CMD_LD_WRITE;
2621 	io->header.target_id = id;
2622 	io->header.timeout = 0;
2623 	io->header.flags = 0;
2624 	io->header.scsi_status = 0;
2625 	io->header.sense_len = MFI_SENSE_LEN;
2626 	io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2627 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2628 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2629 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2630 	io->lba_lo = lba & 0xffffffff;
2631 	cm->cm_data = virt;
2632 	cm->cm_len = len;
2633 	cm->cm_sg = &io->sgl;
2634 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2635 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2636 
2637 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2638 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2639 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2640 	    BUS_DMASYNC_POSTWRITE);
2641 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2642 	mfi_release_command(cm);
2643 
2644 	return (error);
2645 }
2646 
2647 int
2648 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2649     int len)
2650 {
2651 	struct mfi_command *cm;
2652 	struct mfi_pass_frame *pass;
2653 	int error, readop, cdb_len;
2654 	uint32_t blkcount;
2655 
2656 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2657 		return (EBUSY);
2658 
2659 	pass = &cm->cm_frame->pass;
2660 	bzero(pass->cdb, 16);
2661 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2662 
2663 	readop = 0;
2664 	blkcount = howmany(len, MFI_SECTOR_LEN);
2665 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2666 	pass->header.target_id = id;
2667 	pass->header.timeout = 0;
2668 	pass->header.flags = 0;
2669 	pass->header.scsi_status = 0;
2670 	pass->header.sense_len = MFI_SENSE_LEN;
2671 	pass->header.data_len = len;
2672 	pass->header.cdb_len = cdb_len;
2673 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2674 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2675 	cm->cm_data = virt;
2676 	cm->cm_len = len;
2677 	cm->cm_sg = &pass->sgl;
2678 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2679 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2680 
2681 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2682 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2683 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2684 	    BUS_DMASYNC_POSTWRITE);
2685 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2686 	mfi_release_command(cm);
2687 
2688 	return (error);
2689 }
2690 
2691 static int
2692 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2693 {
2694 	struct mfi_softc *sc;
2695 	int error;
2696 
2697 	sc = dev->si_drv1;
2698 
2699 	mtx_lock(&sc->mfi_io_lock);
2700 	if (sc->mfi_detaching)
2701 		error = ENXIO;
2702 	else {
2703 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2704 		error = 0;
2705 	}
2706 	mtx_unlock(&sc->mfi_io_lock);
2707 
2708 	return (error);
2709 }
2710 
2711 static int
2712 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2713 {
2714 	struct mfi_softc *sc;
2715 	struct mfi_aen *mfi_aen_entry, *tmp;
2716 
2717 	sc = dev->si_drv1;
2718 
2719 	mtx_lock(&sc->mfi_io_lock);
2720 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2721 
2722 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2723 		if (mfi_aen_entry->p == curproc) {
2724 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2725 			    aen_link);
2726 			free(mfi_aen_entry, M_MFIBUF);
2727 		}
2728 	}
2729 	mtx_unlock(&sc->mfi_io_lock);
2730 	return (0);
2731 }
2732 
2733 static int
2734 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2735 {
2736 
2737 	switch (opcode) {
2738 	case MFI_DCMD_LD_DELETE:
2739 	case MFI_DCMD_CFG_ADD:
2740 	case MFI_DCMD_CFG_CLEAR:
2741 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2742 		sx_xlock(&sc->mfi_config_lock);
2743 		return (1);
2744 	default:
2745 		return (0);
2746 	}
2747 }
2748 
2749 static void
2750 mfi_config_unlock(struct mfi_softc *sc, int locked)
2751 {
2752 
2753 	if (locked)
2754 		sx_xunlock(&sc->mfi_config_lock);
2755 }
2756 
2757 /*
2758  * Perform pre-issue checks on commands from userland and possibly veto
2759  * them.
2760  */
2761 static int
2762 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2763 {
2764 	struct mfi_disk *ld, *ld2;
2765 	int error;
2766 	struct mfi_system_pd *syspd = NULL;
2767 	uint16_t syspd_id;
2768 	uint16_t *mbox;
2769 
2770 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2771 	error = 0;
2772 	switch (cm->cm_frame->dcmd.opcode) {
2773 	case MFI_DCMD_LD_DELETE:
2774 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2775 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2776 				break;
2777 		}
2778 		if (ld == NULL)
2779 			error = ENOENT;
2780 		else
2781 			error = mfi_disk_disable(ld);
2782 		break;
2783 	case MFI_DCMD_CFG_CLEAR:
2784 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2785 			error = mfi_disk_disable(ld);
2786 			if (error)
2787 				break;
2788 		}
2789 		if (error) {
2790 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2791 				if (ld2 == ld)
2792 					break;
2793 				mfi_disk_enable(ld2);
2794 			}
2795 		}
2796 		break;
2797 	case MFI_DCMD_PD_STATE_SET:
2798 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2799 		syspd_id = mbox[0];
2800 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2801 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2802 				if (syspd->pd_id == syspd_id)
2803 					break;
2804 			}
2805 		}
2806 		else
2807 			break;
2808 		if (syspd)
2809 			error = mfi_syspd_disable(syspd);
2810 		break;
2811 	default:
2812 		break;
2813 	}
2814 	return (error);
2815 }
2816 
2817 /* Perform post-issue checks on commands from userland. */
2818 static void
2819 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2820 {
2821 	struct mfi_disk *ld, *ldn;
2822 	struct mfi_system_pd *syspd = NULL;
2823 	uint16_t syspd_id;
2824 	uint16_t *mbox;
2825 
2826 	switch (cm->cm_frame->dcmd.opcode) {
2827 	case MFI_DCMD_LD_DELETE:
2828 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2829 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2830 				break;
2831 		}
2832 		KASSERT(ld != NULL, ("volume dissappeared"));
2833 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2834 			mtx_unlock(&sc->mfi_io_lock);
2835 			mtx_lock(&Giant);
2836 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2837 			mtx_unlock(&Giant);
2838 			mtx_lock(&sc->mfi_io_lock);
2839 		} else
2840 			mfi_disk_enable(ld);
2841 		break;
2842 	case MFI_DCMD_CFG_CLEAR:
2843 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2844 			mtx_unlock(&sc->mfi_io_lock);
2845 			mtx_lock(&Giant);
2846 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2847 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2848 			}
2849 			mtx_unlock(&Giant);
2850 			mtx_lock(&sc->mfi_io_lock);
2851 		} else {
2852 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2853 				mfi_disk_enable(ld);
2854 		}
2855 		break;
2856 	case MFI_DCMD_CFG_ADD:
2857 		mfi_ldprobe(sc);
2858 		break;
2859 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2860 		mfi_ldprobe(sc);
2861 		break;
2862 	case MFI_DCMD_PD_STATE_SET:
2863 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2864 		syspd_id = mbox[0];
2865 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2866 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2867 				if (syspd->pd_id == syspd_id)
2868 					break;
2869 			}
2870 		}
2871 		else
2872 			break;
2873 		/* If the transition fails then enable the syspd again */
2874 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2875 			mfi_syspd_enable(syspd);
2876 		break;
2877 	}
2878 }
2879 
2880 static int
2881 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2882 {
2883 	struct mfi_config_data *conf_data;
2884 	struct mfi_command *ld_cm = NULL;
2885 	struct mfi_ld_info *ld_info = NULL;
2886 	struct mfi_ld_config *ld;
2887 	char *p;
2888 	int error = 0;
2889 
2890 	conf_data = (struct mfi_config_data *)cm->cm_data;
2891 
2892 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2893 		p = (char *)conf_data->array;
2894 		p += conf_data->array_size * conf_data->array_count;
2895 		ld = (struct mfi_ld_config *)p;
2896 		if (ld->params.isSSCD == 1)
2897 			error = 1;
2898 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2899 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2900 		    (void **)&ld_info, sizeof(*ld_info));
2901 		if (error) {
2902 			device_printf(sc->mfi_dev, "Failed to allocate"
2903 			    "MFI_DCMD_LD_GET_INFO %d", error);
2904 			if (ld_info)
2905 				free(ld_info, M_MFIBUF);
2906 			return 0;
2907 		}
2908 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2909 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2910 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2911 		if (mfi_wait_command(sc, ld_cm) != 0) {
2912 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2913 			mfi_release_command(ld_cm);
2914 			free(ld_info, M_MFIBUF);
2915 			return 0;
2916 		}
2917 
2918 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2919 			free(ld_info, M_MFIBUF);
2920 			mfi_release_command(ld_cm);
2921 			return 0;
2922 		}
2923 		else
2924 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2925 
2926 		if (ld_info->ld_config.params.isSSCD == 1)
2927 			error = 1;
2928 
2929 		mfi_release_command(ld_cm);
2930 		free(ld_info, M_MFIBUF);
2931 
2932 	}
2933 	return error;
2934 }
2935 
2936 static int
2937 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2938 {
2939 	uint8_t i;
2940 	struct mfi_ioc_packet *ioc;
2941 	ioc = (struct mfi_ioc_packet *)arg;
2942 	int sge_size, error;
2943 	struct megasas_sge *kern_sge;
2944 
2945 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2946 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2947 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2948 
2949 	if (sizeof(bus_addr_t) == 8) {
2950 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2951 		cm->cm_extra_frames = 2;
2952 		sge_size = sizeof(struct mfi_sg64);
2953 	} else {
2954 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2955 		sge_size = sizeof(struct mfi_sg32);
2956 	}
2957 
2958 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2959 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2960 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2961 			1, 0,			/* algnmnt, boundary */
2962 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2963 			BUS_SPACE_MAXADDR,	/* highaddr */
2964 			NULL, NULL,		/* filter, filterarg */
2965 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2966 			2,			/* nsegments */
2967 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2968 			BUS_DMA_ALLOCNOW,	/* flags */
2969 			NULL, NULL,		/* lockfunc, lockarg */
2970 			&sc->mfi_kbuff_arr_dmat[i])) {
2971 			device_printf(sc->mfi_dev,
2972 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2973 			return (ENOMEM);
2974 		}
2975 
2976 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2977 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2978 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2979 			device_printf(sc->mfi_dev,
2980 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2981 			return (ENOMEM);
2982 		}
2983 
2984 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2985 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2986 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2987 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2988 
2989 		if (!sc->kbuff_arr[i]) {
2990 			device_printf(sc->mfi_dev,
2991 			    "Could not allocate memory for kbuff_arr info\n");
2992 			return -1;
2993 		}
2994 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2995 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2996 
2997 		if (sizeof(bus_addr_t) == 8) {
2998 			cm->cm_frame->stp.sgl.sg64[i].addr =
2999 			    kern_sge[i].phys_addr;
3000 			cm->cm_frame->stp.sgl.sg64[i].len =
3001 			    ioc->mfi_sgl[i].iov_len;
3002 		} else {
3003 			cm->cm_frame->stp.sgl.sg32[i].addr =
3004 			    kern_sge[i].phys_addr;
3005 			cm->cm_frame->stp.sgl.sg32[i].len =
3006 			    ioc->mfi_sgl[i].iov_len;
3007 		}
3008 
3009 		error = copyin(ioc->mfi_sgl[i].iov_base,
3010 		    sc->kbuff_arr[i],
3011 		    ioc->mfi_sgl[i].iov_len);
3012 		if (error != 0) {
3013 			device_printf(sc->mfi_dev, "Copy in failed\n");
3014 			return error;
3015 		}
3016 	}
3017 
3018 	cm->cm_flags |=MFI_CMD_MAPPED;
3019 	return 0;
3020 }
3021 
3022 static int
3023 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3024 {
3025 	struct mfi_command *cm;
3026 	struct mfi_dcmd_frame *dcmd;
3027 	void *ioc_buf = NULL;
3028 	uint32_t context;
3029 	int error = 0, locked;
3030 
3031 
3032 	if (ioc->buf_size > 0) {
3033 		if (ioc->buf_size > 1024 * 1024)
3034 			return (ENOMEM);
3035 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3036 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3037 		if (error) {
3038 			device_printf(sc->mfi_dev, "failed to copyin\n");
3039 			free(ioc_buf, M_MFIBUF);
3040 			return (error);
3041 		}
3042 	}
3043 
3044 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3045 
3046 	mtx_lock(&sc->mfi_io_lock);
3047 	while ((cm = mfi_dequeue_free(sc)) == NULL)
3048 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3049 
3050 	/* Save context for later */
3051 	context = cm->cm_frame->header.context;
3052 
3053 	dcmd = &cm->cm_frame->dcmd;
3054 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3055 
3056 	cm->cm_sg = &dcmd->sgl;
3057 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3058 	cm->cm_data = ioc_buf;
3059 	cm->cm_len = ioc->buf_size;
3060 
3061 	/* restore context */
3062 	cm->cm_frame->header.context = context;
3063 
3064 	/* Cheat since we don't know if we're writing or reading */
3065 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3066 
3067 	error = mfi_check_command_pre(sc, cm);
3068 	if (error)
3069 		goto out;
3070 
3071 	error = mfi_wait_command(sc, cm);
3072 	if (error) {
3073 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3074 		goto out;
3075 	}
3076 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3077 	mfi_check_command_post(sc, cm);
3078 out:
3079 	mfi_release_command(cm);
3080 	mtx_unlock(&sc->mfi_io_lock);
3081 	mfi_config_unlock(sc, locked);
3082 	if (ioc->buf_size > 0)
3083 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3084 	if (ioc_buf)
3085 		free(ioc_buf, M_MFIBUF);
3086 	return (error);
3087 }
3088 
3089 #define	PTRIN(p)		((void *)(uintptr_t)(p))
3090 
3091 static int
3092 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3093 {
3094 	struct mfi_softc *sc;
3095 	union mfi_statrequest *ms;
3096 	struct mfi_ioc_packet *ioc;
3097 #ifdef COMPAT_FREEBSD32
3098 	struct mfi_ioc_packet32 *ioc32;
3099 #endif
3100 	struct mfi_ioc_aen *aen;
3101 	struct mfi_command *cm = NULL;
3102 	uint32_t context = 0;
3103 	union mfi_sense_ptr sense_ptr;
3104 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3105 	size_t len;
3106 	int i, res;
3107 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3108 #ifdef COMPAT_FREEBSD32
3109 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3110 	struct mfi_ioc_passthru iop_swab;
3111 #endif
3112 	int error, locked;
3113 	union mfi_sgl *sgl;
3114 	sc = dev->si_drv1;
3115 	error = 0;
3116 
3117 	if (sc->adpreset)
3118 		return EBUSY;
3119 
3120 	if (sc->hw_crit_error)
3121 		return EBUSY;
3122 
3123 	if (sc->issuepend_done == 0)
3124 		return EBUSY;
3125 
3126 	switch (cmd) {
3127 	case MFIIO_STATS:
3128 		ms = (union mfi_statrequest *)arg;
3129 		switch (ms->ms_item) {
3130 		case MFIQ_FREE:
3131 		case MFIQ_BIO:
3132 		case MFIQ_READY:
3133 		case MFIQ_BUSY:
3134 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3135 			    sizeof(struct mfi_qstat));
3136 			break;
3137 		default:
3138 			error = ENOIOCTL;
3139 			break;
3140 		}
3141 		break;
3142 	case MFIIO_QUERY_DISK:
3143 	{
3144 		struct mfi_query_disk *qd;
3145 		struct mfi_disk *ld;
3146 
3147 		qd = (struct mfi_query_disk *)arg;
3148 		mtx_lock(&sc->mfi_io_lock);
3149 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3150 			if (ld->ld_id == qd->array_id)
3151 				break;
3152 		}
3153 		if (ld == NULL) {
3154 			qd->present = 0;
3155 			mtx_unlock(&sc->mfi_io_lock);
3156 			return (0);
3157 		}
3158 		qd->present = 1;
3159 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3160 			qd->open = 1;
3161 		bzero(qd->devname, SPECNAMELEN + 1);
3162 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3163 		mtx_unlock(&sc->mfi_io_lock);
3164 		break;
3165 	}
3166 	case MFI_CMD:
3167 #ifdef COMPAT_FREEBSD32
3168 	case MFI_CMD32:
3169 #endif
3170 		{
3171 		devclass_t devclass;
3172 		ioc = (struct mfi_ioc_packet *)arg;
3173 		int adapter;
3174 
3175 		adapter = ioc->mfi_adapter_no;
3176 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3177 			devclass = devclass_find("mfi");
3178 			sc = devclass_get_softc(devclass, adapter);
3179 		}
3180 		mtx_lock(&sc->mfi_io_lock);
3181 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3182 			mtx_unlock(&sc->mfi_io_lock);
3183 			return (EBUSY);
3184 		}
3185 		mtx_unlock(&sc->mfi_io_lock);
3186 		locked = 0;
3187 
3188 		/*
3189 		 * save off original context since copying from user
3190 		 * will clobber some data
3191 		 */
3192 		context = cm->cm_frame->header.context;
3193 		cm->cm_frame->header.context = cm->cm_index;
3194 
3195 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3196 		    2 * MEGAMFI_FRAME_SIZE);
3197 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3198 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3199 		cm->cm_frame->header.scsi_status = 0;
3200 		cm->cm_frame->header.pad0 = 0;
3201 		if (ioc->mfi_sge_count) {
3202 			cm->cm_sg =
3203 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3204 		}
3205 		sgl = cm->cm_sg;
3206 		cm->cm_flags = 0;
3207 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3208 			cm->cm_flags |= MFI_CMD_DATAIN;
3209 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3210 			cm->cm_flags |= MFI_CMD_DATAOUT;
3211 		/* Legacy app shim */
3212 		if (cm->cm_flags == 0)
3213 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3214 		cm->cm_len = cm->cm_frame->header.data_len;
3215 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3216 #ifdef COMPAT_FREEBSD32
3217 			if (cmd == MFI_CMD) {
3218 #endif
3219 				/* Native */
3220 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3221 #ifdef COMPAT_FREEBSD32
3222 			} else {
3223 				/* 32bit on 64bit */
3224 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3225 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3226 			}
3227 #endif
3228 			cm->cm_len += cm->cm_stp_len;
3229 		}
3230 		if (cm->cm_len &&
3231 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3232 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3233 			    M_WAITOK | M_ZERO);
3234 		} else {
3235 			cm->cm_data = 0;
3236 		}
3237 
3238 		/* restore header context */
3239 		cm->cm_frame->header.context = context;
3240 
3241 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3242 			res = mfi_stp_cmd(sc, cm, arg);
3243 			if (res != 0)
3244 				goto out;
3245 		} else {
3246 			temp = data;
3247 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3248 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3249 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3250 #ifdef COMPAT_FREEBSD32
3251 					if (cmd == MFI_CMD) {
3252 #endif
3253 						/* Native */
3254 						addr = ioc->mfi_sgl[i].iov_base;
3255 						len = ioc->mfi_sgl[i].iov_len;
3256 #ifdef COMPAT_FREEBSD32
3257 					} else {
3258 						/* 32bit on 64bit */
3259 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3260 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3261 						len = ioc32->mfi_sgl[i].iov_len;
3262 					}
3263 #endif
3264 					error = copyin(addr, temp, len);
3265 					if (error != 0) {
3266 						device_printf(sc->mfi_dev,
3267 						    "Copy in failed\n");
3268 						goto out;
3269 					}
3270 					temp = &temp[len];
3271 				}
3272 			}
3273 		}
3274 
3275 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3276 			locked = mfi_config_lock(sc,
3277 			     cm->cm_frame->dcmd.opcode);
3278 
3279 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3280 			cm->cm_frame->pass.sense_addr_lo =
3281 			    (uint32_t)cm->cm_sense_busaddr;
3282 			cm->cm_frame->pass.sense_addr_hi =
3283 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3284 		}
3285 		mtx_lock(&sc->mfi_io_lock);
3286 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3287 		if (!skip_pre_post) {
3288 			error = mfi_check_command_pre(sc, cm);
3289 			if (error) {
3290 				mtx_unlock(&sc->mfi_io_lock);
3291 				goto out;
3292 			}
3293 		}
3294 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3295 			device_printf(sc->mfi_dev,
3296 			    "Controller polled failed\n");
3297 			mtx_unlock(&sc->mfi_io_lock);
3298 			goto out;
3299 		}
3300 		if (!skip_pre_post) {
3301 			mfi_check_command_post(sc, cm);
3302 		}
3303 		mtx_unlock(&sc->mfi_io_lock);
3304 
3305 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3306 			temp = data;
3307 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3308 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3309 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3310 #ifdef COMPAT_FREEBSD32
3311 					if (cmd == MFI_CMD) {
3312 #endif
3313 						/* Native */
3314 						addr = ioc->mfi_sgl[i].iov_base;
3315 						len = ioc->mfi_sgl[i].iov_len;
3316 #ifdef COMPAT_FREEBSD32
3317 					} else {
3318 						/* 32bit on 64bit */
3319 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3320 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3321 						len = ioc32->mfi_sgl[i].iov_len;
3322 					}
3323 #endif
3324 					error = copyout(temp, addr, len);
3325 					if (error != 0) {
3326 						device_printf(sc->mfi_dev,
3327 						    "Copy out failed\n");
3328 						goto out;
3329 					}
3330 					temp = &temp[len];
3331 				}
3332 			}
3333 		}
3334 
3335 		if (ioc->mfi_sense_len) {
3336 			/* get user-space sense ptr then copy out sense */
3337 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3338 			    &sense_ptr.sense_ptr_data[0],
3339 			    sizeof(sense_ptr.sense_ptr_data));
3340 #ifdef COMPAT_FREEBSD32
3341 			if (cmd != MFI_CMD) {
3342 				/*
3343 				 * not 64bit native so zero out any address
3344 				 * over 32bit */
3345 				sense_ptr.addr.high = 0;
3346 			}
3347 #endif
3348 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3349 			    ioc->mfi_sense_len);
3350 			if (error != 0) {
3351 				device_printf(sc->mfi_dev,
3352 				    "Copy out failed\n");
3353 				goto out;
3354 			}
3355 		}
3356 
3357 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3358 out:
3359 		mfi_config_unlock(sc, locked);
3360 		if (data)
3361 			free(data, M_MFIBUF);
3362 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3363 			for (i = 0; i < 2; i++) {
3364 				if (sc->kbuff_arr[i]) {
3365 					if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3366 						bus_dmamap_unload(
3367 						    sc->mfi_kbuff_arr_dmat[i],
3368 						    sc->mfi_kbuff_arr_dmamap[i]
3369 						    );
3370 					if (sc->kbuff_arr[i] != NULL)
3371 						bus_dmamem_free(
3372 						    sc->mfi_kbuff_arr_dmat[i],
3373 						    sc->kbuff_arr[i],
3374 						    sc->mfi_kbuff_arr_dmamap[i]
3375 						    );
3376 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3377 						bus_dma_tag_destroy(
3378 						    sc->mfi_kbuff_arr_dmat[i]);
3379 				}
3380 			}
3381 		}
3382 		if (cm) {
3383 			mtx_lock(&sc->mfi_io_lock);
3384 			mfi_release_command(cm);
3385 			mtx_unlock(&sc->mfi_io_lock);
3386 		}
3387 
3388 		break;
3389 		}
3390 	case MFI_SET_AEN:
3391 		aen = (struct mfi_ioc_aen *)arg;
3392 		mtx_lock(&sc->mfi_io_lock);
3393 		error = mfi_aen_register(sc, aen->aen_seq_num,
3394 		    aen->aen_class_locale);
3395 		mtx_unlock(&sc->mfi_io_lock);
3396 
3397 		break;
3398 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3399 		{
3400 			devclass_t devclass;
3401 			struct mfi_linux_ioc_packet l_ioc;
3402 			int adapter;
3403 
3404 			devclass = devclass_find("mfi");
3405 			if (devclass == NULL)
3406 				return (ENOENT);
3407 
3408 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3409 			if (error)
3410 				return (error);
3411 			adapter = l_ioc.lioc_adapter_no;
3412 			sc = devclass_get_softc(devclass, adapter);
3413 			if (sc == NULL)
3414 				return (ENOENT);
3415 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3416 			    cmd, arg, flag, td));
3417 			break;
3418 		}
3419 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3420 		{
3421 			devclass_t devclass;
3422 			struct mfi_linux_ioc_aen l_aen;
3423 			int adapter;
3424 
3425 			devclass = devclass_find("mfi");
3426 			if (devclass == NULL)
3427 				return (ENOENT);
3428 
3429 			error = copyin(arg, &l_aen, sizeof(l_aen));
3430 			if (error)
3431 				return (error);
3432 			adapter = l_aen.laen_adapter_no;
3433 			sc = devclass_get_softc(devclass, adapter);
3434 			if (sc == NULL)
3435 				return (ENOENT);
3436 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3437 			    cmd, arg, flag, td));
3438 			break;
3439 		}
3440 #ifdef COMPAT_FREEBSD32
3441 	case MFIIO_PASSTHRU32:
3442 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3443 			error = ENOTTY;
3444 			break;
3445 		}
3446 		iop_swab.ioc_frame	= iop32->ioc_frame;
3447 		iop_swab.buf_size	= iop32->buf_size;
3448 		iop_swab.buf		= PTRIN(iop32->buf);
3449 		iop			= &iop_swab;
3450 		/* FALLTHROUGH */
3451 #endif
3452 	case MFIIO_PASSTHRU:
3453 		error = mfi_user_command(sc, iop);
3454 #ifdef COMPAT_FREEBSD32
3455 		if (cmd == MFIIO_PASSTHRU32)
3456 			iop32->ioc_frame = iop_swab.ioc_frame;
3457 #endif
3458 		break;
3459 	default:
3460 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3461 		error = ENOTTY;
3462 		break;
3463 	}
3464 
3465 	return (error);
3466 }
3467 
3468 static int
3469 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3470 {
3471 	struct mfi_softc *sc;
3472 	struct mfi_linux_ioc_packet l_ioc;
3473 	struct mfi_linux_ioc_aen l_aen;
3474 	struct mfi_command *cm = NULL;
3475 	struct mfi_aen *mfi_aen_entry;
3476 	union mfi_sense_ptr sense_ptr;
3477 	uint32_t context = 0;
3478 	uint8_t *data = NULL, *temp;
3479 	int i;
3480 	int error, locked;
3481 
3482 	sc = dev->si_drv1;
3483 	error = 0;
3484 	switch (cmd) {
3485 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3486 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3487 		if (error != 0)
3488 			return (error);
3489 
3490 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3491 			return (EINVAL);
3492 		}
3493 
3494 		mtx_lock(&sc->mfi_io_lock);
3495 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3496 			mtx_unlock(&sc->mfi_io_lock);
3497 			return (EBUSY);
3498 		}
3499 		mtx_unlock(&sc->mfi_io_lock);
3500 		locked = 0;
3501 
3502 		/*
3503 		 * save off original context since copying from user
3504 		 * will clobber some data
3505 		 */
3506 		context = cm->cm_frame->header.context;
3507 
3508 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3509 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3510 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3511 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3512 		cm->cm_frame->header.scsi_status = 0;
3513 		cm->cm_frame->header.pad0 = 0;
3514 		if (l_ioc.lioc_sge_count)
3515 			cm->cm_sg =
3516 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3517 		cm->cm_flags = 0;
3518 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3519 			cm->cm_flags |= MFI_CMD_DATAIN;
3520 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3521 			cm->cm_flags |= MFI_CMD_DATAOUT;
3522 		cm->cm_len = cm->cm_frame->header.data_len;
3523 		if (cm->cm_len &&
3524 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3525 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3526 			    M_WAITOK | M_ZERO);
3527 		} else {
3528 			cm->cm_data = 0;
3529 		}
3530 
3531 		/* restore header context */
3532 		cm->cm_frame->header.context = context;
3533 
3534 		temp = data;
3535 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3536 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3537 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3538 				       temp,
3539 				       l_ioc.lioc_sgl[i].iov_len);
3540 				if (error != 0) {
3541 					device_printf(sc->mfi_dev,
3542 					    "Copy in failed\n");
3543 					goto out;
3544 				}
3545 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3546 			}
3547 		}
3548 
3549 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3550 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3551 
3552 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3553 			cm->cm_frame->pass.sense_addr_lo =
3554 			    (uint32_t)cm->cm_sense_busaddr;
3555 			cm->cm_frame->pass.sense_addr_hi =
3556 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3557 		}
3558 
3559 		mtx_lock(&sc->mfi_io_lock);
3560 		error = mfi_check_command_pre(sc, cm);
3561 		if (error) {
3562 			mtx_unlock(&sc->mfi_io_lock);
3563 			goto out;
3564 		}
3565 
3566 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3567 			device_printf(sc->mfi_dev,
3568 			    "Controller polled failed\n");
3569 			mtx_unlock(&sc->mfi_io_lock);
3570 			goto out;
3571 		}
3572 
3573 		mfi_check_command_post(sc, cm);
3574 		mtx_unlock(&sc->mfi_io_lock);
3575 
3576 		temp = data;
3577 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3578 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3579 				error = copyout(temp,
3580 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3581 					l_ioc.lioc_sgl[i].iov_len);
3582 				if (error != 0) {
3583 					device_printf(sc->mfi_dev,
3584 					    "Copy out failed\n");
3585 					goto out;
3586 				}
3587 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3588 			}
3589 		}
3590 
3591 		if (l_ioc.lioc_sense_len) {
3592 			/* get user-space sense ptr then copy out sense */
3593 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3594                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3595 			    &sense_ptr.sense_ptr_data[0],
3596 			    sizeof(sense_ptr.sense_ptr_data));
3597 #ifdef __amd64__
3598 			/*
3599 			 * only 32bit Linux support so zero out any
3600 			 * address over 32bit
3601 			 */
3602 			sense_ptr.addr.high = 0;
3603 #endif
3604 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3605 			    l_ioc.lioc_sense_len);
3606 			if (error != 0) {
3607 				device_printf(sc->mfi_dev,
3608 				    "Copy out failed\n");
3609 				goto out;
3610 			}
3611 		}
3612 
3613 		error = copyout(&cm->cm_frame->header.cmd_status,
3614 			&((struct mfi_linux_ioc_packet*)arg)
3615 			->lioc_frame.hdr.cmd_status,
3616 			1);
3617 		if (error != 0) {
3618 			device_printf(sc->mfi_dev,
3619 				      "Copy out failed\n");
3620 			goto out;
3621 		}
3622 
3623 out:
3624 		mfi_config_unlock(sc, locked);
3625 		if (data)
3626 			free(data, M_MFIBUF);
3627 		if (cm) {
3628 			mtx_lock(&sc->mfi_io_lock);
3629 			mfi_release_command(cm);
3630 			mtx_unlock(&sc->mfi_io_lock);
3631 		}
3632 
3633 		return (error);
3634 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3635 		error = copyin(arg, &l_aen, sizeof(l_aen));
3636 		if (error != 0)
3637 			return (error);
3638 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3639 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3640 		    M_WAITOK);
3641 		mtx_lock(&sc->mfi_io_lock);
3642 		if (mfi_aen_entry != NULL) {
3643 			mfi_aen_entry->p = curproc;
3644 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3645 			    aen_link);
3646 		}
3647 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3648 		    l_aen.laen_class_locale);
3649 
3650 		if (error != 0) {
3651 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3652 			    aen_link);
3653 			free(mfi_aen_entry, M_MFIBUF);
3654 		}
3655 		mtx_unlock(&sc->mfi_io_lock);
3656 
3657 		return (error);
3658 	default:
3659 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3660 		error = ENOENT;
3661 		break;
3662 	}
3663 
3664 	return (error);
3665 }
3666 
3667 static int
3668 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3669 {
3670 	struct mfi_softc *sc;
3671 	int revents = 0;
3672 
3673 	sc = dev->si_drv1;
3674 
3675 	if (poll_events & (POLLIN | POLLRDNORM)) {
3676 		if (sc->mfi_aen_triggered != 0) {
3677 			revents |= poll_events & (POLLIN | POLLRDNORM);
3678 			sc->mfi_aen_triggered = 0;
3679 		}
3680 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3681 			revents |= POLLERR;
3682 		}
3683 	}
3684 
3685 	if (revents == 0) {
3686 		if (poll_events & (POLLIN | POLLRDNORM)) {
3687 			sc->mfi_poll_waiting = 1;
3688 			selrecord(td, &sc->mfi_select);
3689 		}
3690 	}
3691 
3692 	return revents;
3693 }
3694 
3695 static void
3696 mfi_dump_all(void)
3697 {
3698 	struct mfi_softc *sc;
3699 	struct mfi_command *cm;
3700 	devclass_t dc;
3701 	time_t deadline;
3702 	int timedout;
3703 	int i;
3704 
3705 	dc = devclass_find("mfi");
3706 	if (dc == NULL) {
3707 		printf("No mfi dev class\n");
3708 		return;
3709 	}
3710 
3711 	for (i = 0; ; i++) {
3712 		sc = devclass_get_softc(dc, i);
3713 		if (sc == NULL)
3714 			break;
3715 		device_printf(sc->mfi_dev, "Dumping\n\n");
3716 		timedout = 0;
3717 		deadline = time_uptime - mfi_cmd_timeout;
3718 		mtx_lock(&sc->mfi_io_lock);
3719 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3720 			if (cm->cm_timestamp <= deadline) {
3721 				device_printf(sc->mfi_dev,
3722 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3723 				    cm, (int)(time_uptime - cm->cm_timestamp));
3724 				MFI_PRINT_CMD(cm);
3725 				timedout++;
3726 			}
3727 		}
3728 
3729 #if 0
3730 		if (timedout)
3731 			MFI_DUMP_CMDS(sc);
3732 #endif
3733 
3734 		mtx_unlock(&sc->mfi_io_lock);
3735 	}
3736 
3737 	return;
3738 }
3739 
3740 static void
3741 mfi_timeout(void *data)
3742 {
3743 	struct mfi_softc *sc = (struct mfi_softc *)data;
3744 	struct mfi_command *cm, *tmp;
3745 	time_t deadline;
3746 	int timedout = 0;
3747 
3748 	deadline = time_uptime - mfi_cmd_timeout;
3749 	if (sc->adpreset == 0) {
3750 		if (!mfi_tbolt_reset(sc)) {
3751 			callout_reset(&sc->mfi_watchdog_callout,
3752 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3753 			return;
3754 		}
3755 	}
3756 	mtx_lock(&sc->mfi_io_lock);
3757 	TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3758 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3759 			continue;
3760 		if (cm->cm_timestamp <= deadline) {
3761 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3762 				cm->cm_timestamp = time_uptime;
3763 			} else {
3764 				device_printf(sc->mfi_dev,
3765 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3766 				     cm, (int)(time_uptime - cm->cm_timestamp)
3767 				     );
3768 				MFI_PRINT_CMD(cm);
3769 				MFI_VALIDATE_CMD(sc, cm);
3770 				/*
3771 				 * While commands can get stuck forever we do
3772 				 * not fail them as there is no way to tell if
3773 				 * the controller has actually processed them
3774 				 * or not.
3775 				 *
3776 				 * In addition its very likely that force
3777 				 * failing a command here would cause a panic
3778 				 * e.g. in UFS.
3779 				 */
3780 				timedout++;
3781 			}
3782 		}
3783 	}
3784 
3785 #if 0
3786 	if (timedout)
3787 		MFI_DUMP_CMDS(sc);
3788 #endif
3789 
3790 	mtx_unlock(&sc->mfi_io_lock);
3791 
3792 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3793 	    mfi_timeout, sc);
3794 
3795 	if (0)
3796 		mfi_dump_all();
3797 	return;
3798 }
3799