xref: /freebsd/sys/dev/mfi/mfi.c (revision a7623790fb345e6dc986dfd31df0ace115e6f2e4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause
3  *
4  * Copyright (c) 2006 IronPort Systems
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2007 LSI Corp.
30  * Copyright (c) 2007 Rajesh Prabhakaran.
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52  * SUCH DAMAGE.
53  */
54 
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
57 
58 #include "opt_mfi.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sysctl.h>
63 #include <sys/malloc.h>
64 #include <sys/kernel.h>
65 #include <sys/poll.h>
66 #include <sys/selinfo.h>
67 #include <sys/bus.h>
68 #include <sys/conf.h>
69 #include <sys/eventhandler.h>
70 #include <sys/rman.h>
71 #include <sys/bio.h>
72 #include <sys/ioccom.h>
73 #include <sys/uio.h>
74 #include <sys/proc.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
78 
79 #include <machine/bus.h>
80 #include <machine/resource.h>
81 
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
87 
88 static int	mfi_alloc_commands(struct mfi_softc *);
89 static int	mfi_comms_init(struct mfi_softc *);
90 static int	mfi_get_controller_info(struct mfi_softc *);
91 static int	mfi_get_log_state(struct mfi_softc *,
92 		    struct mfi_evt_log_state **);
93 static int	mfi_parse_entries(struct mfi_softc *, int, int);
94 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void	mfi_startup(void *arg);
96 static void	mfi_intr(void *arg);
97 static void	mfi_ldprobe(struct mfi_softc *sc);
98 static void	mfi_syspdprobe(struct mfi_softc *sc);
99 static void	mfi_handle_evt(void *context, int pending);
100 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void	mfi_aen_complete(struct mfi_command *);
102 static int	mfi_add_ld(struct mfi_softc *sc, int);
103 static void	mfi_add_ld_complete(struct mfi_command *);
104 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void	mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void	mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int	mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void	mfi_timeout(void *);
115 static int	mfi_user_command(struct mfi_softc *,
116 		    struct mfi_ioc_passthru *);
117 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
124 		    uint32_t frame_cnt);
125 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
126 		    uint32_t frame_cnt);
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
132 
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
134     "MFI driver parameters");
135 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
137            0, "event message locale");
138 
139 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
140 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
141            0, "event message class");
142 
143 static int	mfi_max_cmds = 128;
144 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
145 	   0, "Max commands limit (-1 = controller limit)");
146 
147 static int	mfi_detect_jbod_change = 1;
148 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
149 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
150 
151 int		mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
152 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
153 	   &mfi_polled_cmd_timeout, 0,
154 	   "Polled command timeout - used for firmware flash etc (in seconds)");
155 
156 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
157 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
158 	   0, "Command timeout (in seconds)");
159 
160 /* Management interface */
161 static d_open_t		mfi_open;
162 static d_close_t	mfi_close;
163 static d_ioctl_t	mfi_ioctl;
164 static d_poll_t		mfi_poll;
165 
166 static struct cdevsw mfi_cdevsw = {
167 	.d_version = 	D_VERSION,
168 	.d_flags =	0,
169 	.d_open = 	mfi_open,
170 	.d_close =	mfi_close,
171 	.d_ioctl =	mfi_ioctl,
172 	.d_poll =	mfi_poll,
173 	.d_name =	"mfi",
174 };
175 
176 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
177 
178 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
179 struct mfi_skinny_dma_info mfi_skinny;
180 
181 static void
182 mfi_enable_intr_xscale(struct mfi_softc *sc)
183 {
184 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
185 }
186 
187 static void
188 mfi_enable_intr_ppc(struct mfi_softc *sc)
189 {
190 	if (sc->mfi_flags & MFI_FLAGS_1078) {
191 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
192 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
193 	}
194 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
195 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
196 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
197 	}
198 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
200 	}
201 }
202 
203 static int32_t
204 mfi_read_fw_status_xscale(struct mfi_softc *sc)
205 {
206 	return MFI_READ4(sc, MFI_OMSG0);
207 }
208 
209 static int32_t
210 mfi_read_fw_status_ppc(struct mfi_softc *sc)
211 {
212 	return MFI_READ4(sc, MFI_OSP0);
213 }
214 
215 static int
216 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
217 {
218 	int32_t status;
219 
220 	status = MFI_READ4(sc, MFI_OSTS);
221 	if ((status & MFI_OSTS_INTR_VALID) == 0)
222 		return 1;
223 
224 	MFI_WRITE4(sc, MFI_OSTS, status);
225 	return 0;
226 }
227 
228 static int
229 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
230 {
231 	int32_t status;
232 
233 	status = MFI_READ4(sc, MFI_OSTS);
234 	if (sc->mfi_flags & MFI_FLAGS_1078) {
235 		if (!(status & MFI_1078_RM)) {
236 			return 1;
237 		}
238 	}
239 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
240 		if (!(status & MFI_GEN2_RM)) {
241 			return 1;
242 		}
243 	}
244 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
245 		if (!(status & MFI_SKINNY_RM)) {
246 			return 1;
247 		}
248 	}
249 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
250 		MFI_WRITE4(sc, MFI_OSTS, status);
251 	else
252 		MFI_WRITE4(sc, MFI_ODCR0, status);
253 	return 0;
254 }
255 
256 static void
257 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
258 {
259 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
260 }
261 
262 static void
263 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
264 {
265 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
266 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
267 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
268 	} else {
269 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
270 	}
271 }
272 
273 int
274 mfi_transition_firmware(struct mfi_softc *sc)
275 {
276 	uint32_t fw_state, cur_state;
277 	int max_wait, i;
278 	uint32_t cur_abs_reg_val = 0;
279 	uint32_t prev_abs_reg_val = 0;
280 
281 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
282 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
283 	while (fw_state != MFI_FWSTATE_READY) {
284 		if (bootverbose)
285 			device_printf(sc->mfi_dev, "Waiting for firmware to "
286 			"become ready\n");
287 		cur_state = fw_state;
288 		switch (fw_state) {
289 		case MFI_FWSTATE_FAULT:
290 			device_printf(sc->mfi_dev, "Firmware fault\n");
291 			return (ENXIO);
292 		case MFI_FWSTATE_WAIT_HANDSHAKE:
293 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
294 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
295 			else
296 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
297 			max_wait = MFI_RESET_WAIT_TIME;
298 			break;
299 		case MFI_FWSTATE_OPERATIONAL:
300 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
301 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
302 			else
303 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
304 			max_wait = MFI_RESET_WAIT_TIME;
305 			break;
306 		case MFI_FWSTATE_UNDEFINED:
307 		case MFI_FWSTATE_BB_INIT:
308 			max_wait = MFI_RESET_WAIT_TIME;
309 			break;
310 		case MFI_FWSTATE_FW_INIT_2:
311 			max_wait = MFI_RESET_WAIT_TIME;
312 			break;
313 		case MFI_FWSTATE_FW_INIT:
314 		case MFI_FWSTATE_FLUSH_CACHE:
315 			max_wait = MFI_RESET_WAIT_TIME;
316 			break;
317 		case MFI_FWSTATE_DEVICE_SCAN:
318 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
319 			prev_abs_reg_val = cur_abs_reg_val;
320 			break;
321 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
322 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
323 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
324 			else
325 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
326 			max_wait = MFI_RESET_WAIT_TIME;
327 			break;
328 		default:
329 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
330 			    fw_state);
331 			return (ENXIO);
332 		}
333 		for (i = 0; i < (max_wait * 10); i++) {
334 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
335 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
336 			if (fw_state == cur_state)
337 				DELAY(100000);
338 			else
339 				break;
340 		}
341 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
342 			/* Check the device scanning progress */
343 			if (prev_abs_reg_val != cur_abs_reg_val) {
344 				continue;
345 			}
346 		}
347 		if (fw_state == cur_state) {
348 			device_printf(sc->mfi_dev, "Firmware stuck in state "
349 			    "%#x\n", fw_state);
350 			return (ENXIO);
351 		}
352 	}
353 	return (0);
354 }
355 
356 static void
357 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
358 {
359 	bus_addr_t *addr;
360 
361 	addr = arg;
362 	*addr = segs[0].ds_addr;
363 }
364 
365 int
366 mfi_attach(struct mfi_softc *sc)
367 {
368 	uint32_t status;
369 	int error, commsz, framessz, sensesz;
370 	int frames, unit, max_fw_sge, max_fw_cmds;
371 	uint32_t tb_mem_size = 0;
372 	struct cdev *dev_t;
373 
374 	if (sc == NULL)
375 		return EINVAL;
376 
377 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
378 	    MEGASAS_VERSION);
379 
380 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
381 	sx_init(&sc->mfi_config_lock, "MFI config");
382 	TAILQ_INIT(&sc->mfi_ld_tqh);
383 	TAILQ_INIT(&sc->mfi_syspd_tqh);
384 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
385 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
386 	TAILQ_INIT(&sc->mfi_evt_queue);
387 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
388 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
389 	TAILQ_INIT(&sc->mfi_aen_pids);
390 	TAILQ_INIT(&sc->mfi_cam_ccbq);
391 
392 	mfi_initq_free(sc);
393 	mfi_initq_ready(sc);
394 	mfi_initq_busy(sc);
395 	mfi_initq_bio(sc);
396 
397 	sc->adpreset = 0;
398 	sc->last_seq_num = 0;
399 	sc->disableOnlineCtrlReset = 1;
400 	sc->issuepend_done = 1;
401 	sc->hw_crit_error = 0;
402 
403 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
404 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
405 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
406 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
407 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
408 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
409 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
410 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
411 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
412 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
413 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
414 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
415 		sc->mfi_tbolt = 1;
416 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
417 	} else {
418 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
419 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
420 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
421 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
422 	}
423 
424 	/* Before we get too far, see if the firmware is working */
425 	if ((error = mfi_transition_firmware(sc)) != 0) {
426 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
427 		    "error %d\n", error);
428 		return (ENXIO);
429 	}
430 
431 	/* Start: LSIP200113393 */
432 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
433 				1, 0,			/* algnmnt, boundary */
434 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
435 				BUS_SPACE_MAXADDR,	/* highaddr */
436 				NULL, NULL,		/* filter, filterarg */
437 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
438 				1,			/* msegments */
439 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
440 				0,			/* flags */
441 				NULL, NULL,		/* lockfunc, lockarg */
442 				&sc->verbuf_h_dmat)) {
443 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
444 		return (ENOMEM);
445 	}
446 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
447 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
448 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
449 		return (ENOMEM);
450 	}
451 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
452 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
453 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
454 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
455 	/* End: LSIP200113393 */
456 
457 	/*
458 	 * Get information needed for sizing the contiguous memory for the
459 	 * frame pool.  Size down the sgl parameter since we know that
460 	 * we will never need more than what's required for MAXPHYS.
461 	 * It would be nice if these constants were available at runtime
462 	 * instead of compile time.
463 	 */
464 	status = sc->mfi_read_fw_status(sc);
465 	max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
466 	if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
467 		device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
468 		    max_fw_cmds, mfi_max_cmds);
469 		sc->mfi_max_fw_cmds = mfi_max_cmds;
470 	} else {
471 		sc->mfi_max_fw_cmds = max_fw_cmds;
472 	}
473 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
474 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
475 
476 	/* ThunderBolt Support get the contiguous memory */
477 
478 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
479 		mfi_tbolt_init_globals(sc);
480 		device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
481 		    "MaxSgl = %d, state = %#x\n", max_fw_cmds,
482 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
483 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
484 
485 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
486 				1, 0,			/* algnmnt, boundary */
487 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
488 				BUS_SPACE_MAXADDR,	/* highaddr */
489 				NULL, NULL,		/* filter, filterarg */
490 				tb_mem_size,		/* maxsize */
491 				1,			/* msegments */
492 				tb_mem_size,		/* maxsegsize */
493 				0,			/* flags */
494 				NULL, NULL,		/* lockfunc, lockarg */
495 				&sc->mfi_tb_dmat)) {
496 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
497 			return (ENOMEM);
498 		}
499 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
500 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
501 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
502 			return (ENOMEM);
503 		}
504 		bzero(sc->request_message_pool, tb_mem_size);
505 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
506 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
507 
508 		/* For ThunderBolt memory init */
509 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
510 				0x100, 0,		/* alignmnt, boundary */
511 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
512 				BUS_SPACE_MAXADDR,	/* highaddr */
513 				NULL, NULL,		/* filter, filterarg */
514 				MFI_FRAME_SIZE,		/* maxsize */
515 				1,			/* msegments */
516 				MFI_FRAME_SIZE,		/* maxsegsize */
517 				0,			/* flags */
518 				NULL, NULL,		/* lockfunc, lockarg */
519 				&sc->mfi_tb_init_dmat)) {
520 			device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
521 			return (ENOMEM);
522 		}
523 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
524 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
525 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
526 			return (ENOMEM);
527 		}
528 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
529 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
530 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
531 		    &sc->mfi_tb_init_busaddr, 0);
532 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
533 		    tb_mem_size)) {
534 			device_printf(sc->mfi_dev,
535 			    "Thunderbolt pool preparation error\n");
536 			return 0;
537 		}
538 
539 		/*
540 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
541 		  we are taking it different from what we have allocated for Request
542 		  and reply descriptors to avoid confusion later
543 		*/
544 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
545 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
546 				1, 0,			/* algnmnt, boundary */
547 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
548 				BUS_SPACE_MAXADDR,	/* highaddr */
549 				NULL, NULL,		/* filter, filterarg */
550 				tb_mem_size,		/* maxsize */
551 				1,			/* msegments */
552 				tb_mem_size,		/* maxsegsize */
553 				0,			/* flags */
554 				NULL, NULL,		/* lockfunc, lockarg */
555 				&sc->mfi_tb_ioc_init_dmat)) {
556 			device_printf(sc->mfi_dev,
557 			    "Cannot allocate comms DMA tag\n");
558 			return (ENOMEM);
559 		}
560 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
561 		    (void **)&sc->mfi_tb_ioc_init_desc,
562 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
563 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
564 			return (ENOMEM);
565 		}
566 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
567 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
568 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
569 		    &sc->mfi_tb_ioc_init_busaddr, 0);
570 	}
571 	/*
572 	 * Create the dma tag for data buffers.  Used both for block I/O
573 	 * and for various internal data queries.
574 	 */
575 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
576 				1, 0,			/* algnmnt, boundary */
577 				BUS_SPACE_MAXADDR,	/* lowaddr */
578 				BUS_SPACE_MAXADDR,	/* highaddr */
579 				NULL, NULL,		/* filter, filterarg */
580 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
581 				sc->mfi_max_sge,	/* nsegments */
582 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
583 				BUS_DMA_ALLOCNOW,	/* flags */
584 				busdma_lock_mutex,	/* lockfunc */
585 				&sc->mfi_io_lock,	/* lockfuncarg */
586 				&sc->mfi_buffer_dmat)) {
587 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
588 		return (ENOMEM);
589 	}
590 
591 	/*
592 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
593 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
594 	 * entry, so the calculated size here will be will be 1 more than
595 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
596 	 */
597 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
598 	    sizeof(struct mfi_hwcomms);
599 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
600 				1, 0,			/* algnmnt, boundary */
601 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
602 				BUS_SPACE_MAXADDR,	/* highaddr */
603 				NULL, NULL,		/* filter, filterarg */
604 				commsz,			/* maxsize */
605 				1,			/* msegments */
606 				commsz,			/* maxsegsize */
607 				0,			/* flags */
608 				NULL, NULL,		/* lockfunc, lockarg */
609 				&sc->mfi_comms_dmat)) {
610 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
611 		return (ENOMEM);
612 	}
613 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
614 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
615 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
616 		return (ENOMEM);
617 	}
618 	bzero(sc->mfi_comms, commsz);
619 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
620 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
621 	/*
622 	 * Allocate DMA memory for the command frames.  Keep them in the
623 	 * lower 4GB for efficiency.  Calculate the size of the commands at
624 	 * the same time; each command is one 64 byte frame plus a set of
625          * additional frames for holding sg lists or other data.
626 	 * The assumption here is that the SG list will start at the second
627 	 * frame and not use the unused bytes in the first frame.  While this
628 	 * isn't technically correct, it simplifies the calculation and allows
629 	 * for command frames that might be larger than an mfi_io_frame.
630 	 */
631 	if (sizeof(bus_addr_t) == 8) {
632 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
633 		sc->mfi_flags |= MFI_FLAGS_SG64;
634 	} else {
635 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
636 	}
637 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
638 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
639 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
640 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
641 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
642 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
643 				64, 0,			/* algnmnt, boundary */
644 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
645 				BUS_SPACE_MAXADDR,	/* highaddr */
646 				NULL, NULL,		/* filter, filterarg */
647 				framessz,		/* maxsize */
648 				1,			/* nsegments */
649 				framessz,		/* maxsegsize */
650 				0,			/* flags */
651 				NULL, NULL,		/* lockfunc, lockarg */
652 				&sc->mfi_frames_dmat)) {
653 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
654 		return (ENOMEM);
655 	}
656 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
657 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
658 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
659 		return (ENOMEM);
660 	}
661 	bzero(sc->mfi_frames, framessz);
662 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
663 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
664 	/*
665 	 * Allocate DMA memory for the frame sense data.  Keep them in the
666 	 * lower 4GB for efficiency
667 	 */
668 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
669 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
670 				4, 0,			/* algnmnt, boundary */
671 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
672 				BUS_SPACE_MAXADDR,	/* highaddr */
673 				NULL, NULL,		/* filter, filterarg */
674 				sensesz,		/* maxsize */
675 				1,			/* nsegments */
676 				sensesz,		/* maxsegsize */
677 				0,			/* flags */
678 				NULL, NULL,		/* lockfunc, lockarg */
679 				&sc->mfi_sense_dmat)) {
680 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
681 		return (ENOMEM);
682 	}
683 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
684 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
685 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
686 		return (ENOMEM);
687 	}
688 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
689 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
690 	if ((error = mfi_alloc_commands(sc)) != 0)
691 		return (error);
692 
693 	/* Before moving the FW to operational state, check whether
694 	 * hostmemory is required by the FW or not
695 	 */
696 
697 	/* ThunderBolt MFI_IOC2 INIT */
698 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
699 		sc->mfi_disable_intr(sc);
700 		mtx_lock(&sc->mfi_io_lock);
701 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
702 			device_printf(sc->mfi_dev,
703 			    "TB Init has failed with error %d\n",error);
704 			mtx_unlock(&sc->mfi_io_lock);
705 			return error;
706 		}
707 		mtx_unlock(&sc->mfi_io_lock);
708 
709 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
710 			return error;
711 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
712 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
713 		    &sc->mfi_intr)) {
714 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
715 			return (EINVAL);
716 		}
717 		sc->mfi_intr_ptr = mfi_intr_tbolt;
718 		sc->mfi_enable_intr(sc);
719 	} else {
720 		if ((error = mfi_comms_init(sc)) != 0)
721 			return (error);
722 
723 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
724 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
725 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
726 			return (EINVAL);
727 		}
728 		sc->mfi_intr_ptr = mfi_intr;
729 		sc->mfi_enable_intr(sc);
730 	}
731 	if ((error = mfi_get_controller_info(sc)) != 0)
732 		return (error);
733 	sc->disableOnlineCtrlReset = 0;
734 
735 	/* Register a config hook to probe the bus for arrays */
736 	sc->mfi_ich.ich_func = mfi_startup;
737 	sc->mfi_ich.ich_arg = sc;
738 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
739 		device_printf(sc->mfi_dev, "Cannot establish configuration "
740 		    "hook\n");
741 		return (EINVAL);
742 	}
743 	mtx_lock(&sc->mfi_io_lock);
744 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
745 		mtx_unlock(&sc->mfi_io_lock);
746 		return (error);
747 	}
748 	mtx_unlock(&sc->mfi_io_lock);
749 
750 	/*
751 	 * Register a shutdown handler.
752 	 */
753 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
754 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
755 		device_printf(sc->mfi_dev, "Warning: shutdown event "
756 		    "registration failed\n");
757 	}
758 
759 	/*
760 	 * Create the control device for doing management
761 	 */
762 	unit = device_get_unit(sc->mfi_dev);
763 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
764 	    0640, "mfi%d", unit);
765 	if (unit == 0)
766 		make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
767 		    sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
768 	if (sc->mfi_cdev != NULL)
769 		sc->mfi_cdev->si_drv1 = sc;
770 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
771 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
772 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
773 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
774 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
775 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
776 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
777 	    &sc->mfi_keep_deleted_volumes, 0,
778 	    "Don't detach the mfid device for a busy volume that is deleted");
779 
780 	device_add_child(sc->mfi_dev, "mfip", -1);
781 	bus_generic_attach(sc->mfi_dev);
782 
783 	/* Start the timeout watchdog */
784 	callout_init(&sc->mfi_watchdog_callout, 1);
785 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
786 	    mfi_timeout, sc);
787 
788 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
789 		mtx_lock(&sc->mfi_io_lock);
790 		mfi_tbolt_sync_map_info(sc);
791 		mtx_unlock(&sc->mfi_io_lock);
792 	}
793 
794 	return (0);
795 }
796 
797 static int
798 mfi_alloc_commands(struct mfi_softc *sc)
799 {
800 	struct mfi_command *cm;
801 	int i, j;
802 
803 	/*
804 	 * XXX Should we allocate all the commands up front, or allocate on
805 	 * demand later like 'aac' does?
806 	 */
807 	sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
808 	    sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
809 
810 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
811 		cm = &sc->mfi_commands[i];
812 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
813 		    sc->mfi_cmd_size * i);
814 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
815 		    sc->mfi_cmd_size * i;
816 		cm->cm_frame->header.context = i;
817 		cm->cm_sense = &sc->mfi_sense[i];
818 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
819 		cm->cm_sc = sc;
820 		cm->cm_index = i;
821 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
822 		    &cm->cm_dmamap) == 0) {
823 			mtx_lock(&sc->mfi_io_lock);
824 			mfi_release_command(cm);
825 			mtx_unlock(&sc->mfi_io_lock);
826 		} else {
827 			device_printf(sc->mfi_dev, "Failed to allocate %d "
828 			   "command blocks, only allocated %d\n",
829 			    sc->mfi_max_fw_cmds, i - 1);
830 			for (j = 0; j < i; j++) {
831 				cm = &sc->mfi_commands[i];
832 				bus_dmamap_destroy(sc->mfi_buffer_dmat,
833 				    cm->cm_dmamap);
834 			}
835 			free(sc->mfi_commands, M_MFIBUF);
836 			sc->mfi_commands = NULL;
837 
838 			return (ENOMEM);
839 		}
840 	}
841 
842 	return (0);
843 }
844 
845 void
846 mfi_release_command(struct mfi_command *cm)
847 {
848 	struct mfi_frame_header *hdr;
849 	uint32_t *hdr_data;
850 
851 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
852 
853 	/*
854 	 * Zero out the important fields of the frame, but make sure the
855 	 * context field is preserved.  For efficiency, handle the fields
856 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
857 	 */
858 	hdr = &cm->cm_frame->header;
859 	if (cm->cm_data != NULL && hdr->sg_count) {
860 		cm->cm_sg->sg32[0].len = 0;
861 		cm->cm_sg->sg32[0].addr = 0;
862 	}
863 
864 	/*
865 	 * Command may be on other queues e.g. busy queue depending on the
866 	 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
867 	 * properly
868 	 */
869 	if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
870 		mfi_remove_busy(cm);
871 	if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
872 		mfi_remove_ready(cm);
873 
874 	/* We're not expecting it to be on any other queue but check */
875 	if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
876 		panic("Command %p is still on another queue, flags = %#x",
877 		    cm, cm->cm_flags);
878 	}
879 
880 	/* tbolt cleanup */
881 	if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
882 		mfi_tbolt_return_cmd(cm->cm_sc,
883 		    cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
884 		    cm);
885 	}
886 
887 	hdr_data = (uint32_t *)cm->cm_frame;
888 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
889 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
890 	hdr_data[4] = 0;	/* flags, timeout */
891 	hdr_data[5] = 0;	/* data_len */
892 
893 	cm->cm_extra_frames = 0;
894 	cm->cm_flags = 0;
895 	cm->cm_complete = NULL;
896 	cm->cm_private = NULL;
897 	cm->cm_data = NULL;
898 	cm->cm_sg = 0;
899 	cm->cm_total_frame_size = 0;
900 	cm->retry_for_fw_reset = 0;
901 
902 	mfi_enqueue_free(cm);
903 }
904 
905 int
906 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
907     uint32_t opcode, void **bufp, size_t bufsize)
908 {
909 	struct mfi_command *cm;
910 	struct mfi_dcmd_frame *dcmd;
911 	void *buf = NULL;
912 	uint32_t context = 0;
913 
914 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
915 
916 	cm = mfi_dequeue_free(sc);
917 	if (cm == NULL)
918 		return (EBUSY);
919 
920 	/* Zero out the MFI frame */
921 	context = cm->cm_frame->header.context;
922 	bzero(cm->cm_frame, sizeof(union mfi_frame));
923 	cm->cm_frame->header.context = context;
924 
925 	if ((bufsize > 0) && (bufp != NULL)) {
926 		if (*bufp == NULL) {
927 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
928 			if (buf == NULL) {
929 				mfi_release_command(cm);
930 				return (ENOMEM);
931 			}
932 			*bufp = buf;
933 		} else {
934 			buf = *bufp;
935 		}
936 	}
937 
938 	dcmd =  &cm->cm_frame->dcmd;
939 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
940 	dcmd->header.cmd = MFI_CMD_DCMD;
941 	dcmd->header.timeout = 0;
942 	dcmd->header.flags = 0;
943 	dcmd->header.data_len = bufsize;
944 	dcmd->header.scsi_status = 0;
945 	dcmd->opcode = opcode;
946 	cm->cm_sg = &dcmd->sgl;
947 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
948 	cm->cm_flags = 0;
949 	cm->cm_data = buf;
950 	cm->cm_private = buf;
951 	cm->cm_len = bufsize;
952 
953 	*cmp = cm;
954 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
955 		*bufp = buf;
956 	return (0);
957 }
958 
959 static int
960 mfi_comms_init(struct mfi_softc *sc)
961 {
962 	struct mfi_command *cm;
963 	struct mfi_init_frame *init;
964 	struct mfi_init_qinfo *qinfo;
965 	int error;
966 	uint32_t context = 0;
967 
968 	mtx_lock(&sc->mfi_io_lock);
969 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
970 		mtx_unlock(&sc->mfi_io_lock);
971 		return (EBUSY);
972 	}
973 
974 	/* Zero out the MFI frame */
975 	context = cm->cm_frame->header.context;
976 	bzero(cm->cm_frame, sizeof(union mfi_frame));
977 	cm->cm_frame->header.context = context;
978 
979 	/*
980 	 * Abuse the SG list area of the frame to hold the init_qinfo
981 	 * object;
982 	 */
983 	init = &cm->cm_frame->init;
984 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
985 
986 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
987 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
988 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
989 	    offsetof(struct mfi_hwcomms, hw_reply_q);
990 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
991 	    offsetof(struct mfi_hwcomms, hw_pi);
992 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
993 	    offsetof(struct mfi_hwcomms, hw_ci);
994 
995 	init->header.cmd = MFI_CMD_INIT;
996 	init->header.data_len = sizeof(struct mfi_init_qinfo);
997 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
998 	cm->cm_data = NULL;
999 	cm->cm_flags = MFI_CMD_POLLED;
1000 
1001 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1002 		device_printf(sc->mfi_dev, "failed to send init command\n");
1003 	mfi_release_command(cm);
1004 	mtx_unlock(&sc->mfi_io_lock);
1005 
1006 	return (error);
1007 }
1008 
1009 static int
1010 mfi_get_controller_info(struct mfi_softc *sc)
1011 {
1012 	struct mfi_command *cm = NULL;
1013 	struct mfi_ctrl_info *ci = NULL;
1014 	uint32_t max_sectors_1, max_sectors_2;
1015 	int error;
1016 
1017 	mtx_lock(&sc->mfi_io_lock);
1018 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1019 	    (void **)&ci, sizeof(*ci));
1020 	if (error)
1021 		goto out;
1022 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1023 
1024 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1025 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
1026 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1027 		    MFI_SECTOR_LEN;
1028 		error = 0;
1029 		goto out;
1030 	}
1031 
1032 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1033 	    BUS_DMASYNC_POSTREAD);
1034 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1035 
1036 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1037 	max_sectors_2 = ci->max_request_size;
1038 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1039 	sc->disableOnlineCtrlReset =
1040 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
1041 
1042 out:
1043 	if (ci)
1044 		free(ci, M_MFIBUF);
1045 	if (cm)
1046 		mfi_release_command(cm);
1047 	mtx_unlock(&sc->mfi_io_lock);
1048 	return (error);
1049 }
1050 
1051 static int
1052 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1053 {
1054 	struct mfi_command *cm = NULL;
1055 	int error;
1056 
1057 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1058 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1059 	    (void **)log_state, sizeof(**log_state));
1060 	if (error)
1061 		goto out;
1062 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1063 
1064 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1065 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1066 		goto out;
1067 	}
1068 
1069 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1070 	    BUS_DMASYNC_POSTREAD);
1071 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1072 
1073 out:
1074 	if (cm)
1075 		mfi_release_command(cm);
1076 
1077 	return (error);
1078 }
1079 
1080 int
1081 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1082 {
1083 	struct mfi_evt_log_state *log_state = NULL;
1084 	union mfi_evt class_locale;
1085 	int error = 0;
1086 	uint32_t seq;
1087 
1088 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1089 
1090 	class_locale.members.reserved = 0;
1091 	class_locale.members.locale = mfi_event_locale;
1092 	class_locale.members.evt_class  = mfi_event_class;
1093 
1094 	if (seq_start == 0) {
1095 		if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1096 			goto out;
1097 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1098 
1099 		/*
1100 		 * Walk through any events that fired since the last
1101 		 * shutdown.
1102 		 */
1103 		if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1104 		    log_state->newest_seq_num)) != 0)
1105 			goto out;
1106 		seq = log_state->newest_seq_num;
1107 	} else
1108 		seq = seq_start;
1109 	error = mfi_aen_register(sc, seq, class_locale.word);
1110 out:
1111 	free(log_state, M_MFIBUF);
1112 
1113 	return (error);
1114 }
1115 
1116 int
1117 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1118 {
1119 
1120 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1121 	cm->cm_complete = NULL;
1122 
1123 	/*
1124 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1125 	 * and return 0 to it as status
1126 	 */
1127 	if (cm->cm_frame->dcmd.opcode == 0) {
1128 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1129 		cm->cm_error = 0;
1130 		return (cm->cm_error);
1131 	}
1132 	mfi_enqueue_ready(cm);
1133 	mfi_startio(sc);
1134 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1135 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1136 	return (cm->cm_error);
1137 }
1138 
1139 void
1140 mfi_free(struct mfi_softc *sc)
1141 {
1142 	struct mfi_command *cm;
1143 	int i;
1144 
1145 	callout_drain(&sc->mfi_watchdog_callout);
1146 
1147 	if (sc->mfi_cdev != NULL)
1148 		destroy_dev(sc->mfi_cdev);
1149 
1150 	if (sc->mfi_commands != NULL) {
1151 		for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1152 			cm = &sc->mfi_commands[i];
1153 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1154 		}
1155 		free(sc->mfi_commands, M_MFIBUF);
1156 		sc->mfi_commands = NULL;
1157 	}
1158 
1159 	if (sc->mfi_intr)
1160 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1161 	if (sc->mfi_irq != NULL)
1162 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1163 		    sc->mfi_irq);
1164 
1165 	if (sc->mfi_sense_busaddr != 0)
1166 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1167 	if (sc->mfi_sense != NULL)
1168 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1169 		    sc->mfi_sense_dmamap);
1170 	if (sc->mfi_sense_dmat != NULL)
1171 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1172 
1173 	if (sc->mfi_frames_busaddr != 0)
1174 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1175 	if (sc->mfi_frames != NULL)
1176 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1177 		    sc->mfi_frames_dmamap);
1178 	if (sc->mfi_frames_dmat != NULL)
1179 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1180 
1181 	if (sc->mfi_comms_busaddr != 0)
1182 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1183 	if (sc->mfi_comms != NULL)
1184 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1185 		    sc->mfi_comms_dmamap);
1186 	if (sc->mfi_comms_dmat != NULL)
1187 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1188 
1189 	/* ThunderBolt contiguous memory free here */
1190 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1191 		if (sc->mfi_tb_busaddr != 0)
1192 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1193 		if (sc->request_message_pool != NULL)
1194 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1195 			    sc->mfi_tb_dmamap);
1196 		if (sc->mfi_tb_dmat != NULL)
1197 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1198 
1199 		/* Version buffer memory free */
1200 		/* Start LSIP200113393 */
1201 		if (sc->verbuf_h_busaddr != 0)
1202 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1203 		if (sc->verbuf != NULL)
1204 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1205 			    sc->verbuf_h_dmamap);
1206 		if (sc->verbuf_h_dmat != NULL)
1207 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1208 
1209 		/* End LSIP200113393 */
1210 		/* ThunderBolt INIT packet memory Free */
1211 		if (sc->mfi_tb_init_busaddr != 0)
1212 			bus_dmamap_unload(sc->mfi_tb_init_dmat,
1213 			    sc->mfi_tb_init_dmamap);
1214 		if (sc->mfi_tb_init != NULL)
1215 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1216 			    sc->mfi_tb_init_dmamap);
1217 		if (sc->mfi_tb_init_dmat != NULL)
1218 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1219 
1220 		/* ThunderBolt IOC Init Desc memory free here */
1221 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1222 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1223 			    sc->mfi_tb_ioc_init_dmamap);
1224 		if (sc->mfi_tb_ioc_init_desc != NULL)
1225 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1226 			    sc->mfi_tb_ioc_init_desc,
1227 			    sc->mfi_tb_ioc_init_dmamap);
1228 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1229 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1230 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1231 			for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1232 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1233 					free(sc->mfi_cmd_pool_tbolt[i],
1234 					    M_MFIBUF);
1235 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1236 				}
1237 			}
1238 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1239 			sc->mfi_cmd_pool_tbolt = NULL;
1240 		}
1241 		if (sc->request_desc_pool != NULL) {
1242 			free(sc->request_desc_pool, M_MFIBUF);
1243 			sc->request_desc_pool = NULL;
1244 		}
1245 	}
1246 	if (sc->mfi_buffer_dmat != NULL)
1247 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1248 	if (sc->mfi_parent_dmat != NULL)
1249 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1250 
1251 	if (mtx_initialized(&sc->mfi_io_lock)) {
1252 		mtx_destroy(&sc->mfi_io_lock);
1253 		sx_destroy(&sc->mfi_config_lock);
1254 	}
1255 
1256 	return;
1257 }
1258 
1259 static void
1260 mfi_startup(void *arg)
1261 {
1262 	struct mfi_softc *sc;
1263 
1264 	sc = (struct mfi_softc *)arg;
1265 
1266 	sc->mfi_enable_intr(sc);
1267 	sx_xlock(&sc->mfi_config_lock);
1268 	mtx_lock(&sc->mfi_io_lock);
1269 	mfi_ldprobe(sc);
1270 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1271 	    mfi_syspdprobe(sc);
1272 	mtx_unlock(&sc->mfi_io_lock);
1273 	sx_xunlock(&sc->mfi_config_lock);
1274 
1275 	config_intrhook_disestablish(&sc->mfi_ich);
1276 }
1277 
1278 static void
1279 mfi_intr(void *arg)
1280 {
1281 	struct mfi_softc *sc;
1282 	struct mfi_command *cm;
1283 	uint32_t pi, ci, context;
1284 
1285 	sc = (struct mfi_softc *)arg;
1286 
1287 	if (sc->mfi_check_clear_intr(sc))
1288 		return;
1289 
1290 restart:
1291 	pi = sc->mfi_comms->hw_pi;
1292 	ci = sc->mfi_comms->hw_ci;
1293 	mtx_lock(&sc->mfi_io_lock);
1294 	while (ci != pi) {
1295 		context = sc->mfi_comms->hw_reply_q[ci];
1296 		if (context < sc->mfi_max_fw_cmds) {
1297 			cm = &sc->mfi_commands[context];
1298 			mfi_remove_busy(cm);
1299 			cm->cm_error = 0;
1300 			mfi_complete(sc, cm);
1301 		}
1302 		if (++ci == (sc->mfi_max_fw_cmds + 1))
1303 			ci = 0;
1304 	}
1305 
1306 	sc->mfi_comms->hw_ci = ci;
1307 
1308 	/* Give defered I/O a chance to run */
1309 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1310 	mfi_startio(sc);
1311 	mtx_unlock(&sc->mfi_io_lock);
1312 
1313 	/*
1314 	 * Dummy read to flush the bus; this ensures that the indexes are up
1315 	 * to date.  Restart processing if more commands have come it.
1316 	 */
1317 	(void)sc->mfi_read_fw_status(sc);
1318 	if (pi != sc->mfi_comms->hw_pi)
1319 		goto restart;
1320 
1321 	return;
1322 }
1323 
1324 int
1325 mfi_shutdown(struct mfi_softc *sc)
1326 {
1327 	struct mfi_dcmd_frame *dcmd;
1328 	struct mfi_command *cm;
1329 	int error;
1330 
1331 	if (sc->mfi_aen_cm != NULL) {
1332 		sc->cm_aen_abort = 1;
1333 		mfi_abort(sc, &sc->mfi_aen_cm);
1334 	}
1335 
1336 	if (sc->mfi_map_sync_cm != NULL) {
1337 		sc->cm_map_abort = 1;
1338 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1339 	}
1340 
1341 	mtx_lock(&sc->mfi_io_lock);
1342 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1343 	if (error) {
1344 		mtx_unlock(&sc->mfi_io_lock);
1345 		return (error);
1346 	}
1347 
1348 	dcmd = &cm->cm_frame->dcmd;
1349 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1350 	cm->cm_flags = MFI_CMD_POLLED;
1351 	cm->cm_data = NULL;
1352 
1353 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1354 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1355 
1356 	mfi_release_command(cm);
1357 	mtx_unlock(&sc->mfi_io_lock);
1358 	return (error);
1359 }
1360 
1361 static void
1362 mfi_syspdprobe(struct mfi_softc *sc)
1363 {
1364 	struct mfi_frame_header *hdr;
1365 	struct mfi_command *cm = NULL;
1366 	struct mfi_pd_list *pdlist = NULL;
1367 	struct mfi_system_pd *syspd, *tmp;
1368 	struct mfi_system_pending *syspd_pend;
1369 	int error, i, found;
1370 
1371 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1372 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1373 	/* Add SYSTEM PD's */
1374 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1375 	    (void **)&pdlist, sizeof(*pdlist));
1376 	if (error) {
1377 		device_printf(sc->mfi_dev,
1378 		    "Error while forming SYSTEM PD list\n");
1379 		goto out;
1380 	}
1381 
1382 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1383 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1384 	cm->cm_frame->dcmd.mbox[1] = 0;
1385 	if (mfi_mapcmd(sc, cm) != 0) {
1386 		device_printf(sc->mfi_dev,
1387 		    "Failed to get syspd device listing\n");
1388 		goto out;
1389 	}
1390 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1391 	    BUS_DMASYNC_POSTREAD);
1392 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1393 	hdr = &cm->cm_frame->header;
1394 	if (hdr->cmd_status != MFI_STAT_OK) {
1395 		device_printf(sc->mfi_dev,
1396 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1397 		goto out;
1398 	}
1399 	/* Get each PD and add it to the system */
1400 	for (i = 0; i < pdlist->count; i++) {
1401 		if (pdlist->addr[i].device_id ==
1402 		    pdlist->addr[i].encl_device_id)
1403 			continue;
1404 		found = 0;
1405 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1406 			if (syspd->pd_id == pdlist->addr[i].device_id)
1407 				found = 1;
1408 		}
1409 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1410 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1411 				found = 1;
1412 		}
1413 		if (found == 0)
1414 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1415 	}
1416 	/* Delete SYSPD's whose state has been changed */
1417 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1418 		found = 0;
1419 		for (i = 0; i < pdlist->count; i++) {
1420 			if (syspd->pd_id == pdlist->addr[i].device_id) {
1421 				found = 1;
1422 				break;
1423 			}
1424 		}
1425 		if (found == 0) {
1426 			printf("DELETE\n");
1427 			mtx_unlock(&sc->mfi_io_lock);
1428 			mtx_lock(&Giant);
1429 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1430 			mtx_unlock(&Giant);
1431 			mtx_lock(&sc->mfi_io_lock);
1432 		}
1433 	}
1434 out:
1435 	if (pdlist)
1436 	    free(pdlist, M_MFIBUF);
1437 	if (cm)
1438 	    mfi_release_command(cm);
1439 
1440 	return;
1441 }
1442 
1443 static void
1444 mfi_ldprobe(struct mfi_softc *sc)
1445 {
1446 	struct mfi_frame_header *hdr;
1447 	struct mfi_command *cm = NULL;
1448 	struct mfi_ld_list *list = NULL;
1449 	struct mfi_disk *ld;
1450 	struct mfi_disk_pending *ld_pend;
1451 	int error, i;
1452 
1453 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1454 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1455 
1456 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1457 	    (void **)&list, sizeof(*list));
1458 	if (error)
1459 		goto out;
1460 
1461 	cm->cm_flags = MFI_CMD_DATAIN;
1462 	if (mfi_wait_command(sc, cm) != 0) {
1463 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1464 		goto out;
1465 	}
1466 
1467 	hdr = &cm->cm_frame->header;
1468 	if (hdr->cmd_status != MFI_STAT_OK) {
1469 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1470 		    hdr->cmd_status);
1471 		goto out;
1472 	}
1473 
1474 	for (i = 0; i < list->ld_count; i++) {
1475 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1476 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1477 				goto skip_add;
1478 		}
1479 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1480 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1481 				goto skip_add;
1482 		}
1483 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1484 	skip_add:;
1485 	}
1486 out:
1487 	if (list)
1488 		free(list, M_MFIBUF);
1489 	if (cm)
1490 		mfi_release_command(cm);
1491 
1492 	return;
1493 }
1494 
1495 /*
1496  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1497  * the bits in 24-31 are all set, then it is the number of seconds since
1498  * boot.
1499  */
1500 static const char *
1501 format_timestamp(uint32_t timestamp)
1502 {
1503 	static char buffer[32];
1504 
1505 	if ((timestamp & 0xff000000) == 0xff000000)
1506 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1507 		    0x00ffffff);
1508 	else
1509 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1510 	return (buffer);
1511 }
1512 
1513 static const char *
1514 format_class(int8_t class)
1515 {
1516 	static char buffer[6];
1517 
1518 	switch (class) {
1519 	case MFI_EVT_CLASS_DEBUG:
1520 		return ("debug");
1521 	case MFI_EVT_CLASS_PROGRESS:
1522 		return ("progress");
1523 	case MFI_EVT_CLASS_INFO:
1524 		return ("info");
1525 	case MFI_EVT_CLASS_WARNING:
1526 		return ("WARN");
1527 	case MFI_EVT_CLASS_CRITICAL:
1528 		return ("CRIT");
1529 	case MFI_EVT_CLASS_FATAL:
1530 		return ("FATAL");
1531 	case MFI_EVT_CLASS_DEAD:
1532 		return ("DEAD");
1533 	default:
1534 		snprintf(buffer, sizeof(buffer), "%d", class);
1535 		return (buffer);
1536 	}
1537 }
1538 
1539 static void
1540 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1541 {
1542 	struct mfi_system_pd *syspd = NULL;
1543 
1544 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1545 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1546 	    format_class(detail->evt_class.members.evt_class),
1547 	    detail->description);
1548 
1549         /* Don't act on old AEN's or while shutting down */
1550         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1551                 return;
1552 
1553 	switch (detail->arg_type) {
1554 	case MR_EVT_ARGS_NONE:
1555 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1556 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1557 			if (mfi_detect_jbod_change) {
1558 				/*
1559 				 * Probe for new SYSPD's and Delete
1560 				 * invalid SYSPD's
1561 				 */
1562 				sx_xlock(&sc->mfi_config_lock);
1563 				mtx_lock(&sc->mfi_io_lock);
1564 				mfi_syspdprobe(sc);
1565 				mtx_unlock(&sc->mfi_io_lock);
1566 				sx_xunlock(&sc->mfi_config_lock);
1567 			}
1568 		}
1569 		break;
1570 	case MR_EVT_ARGS_LD_STATE:
1571 		/* During load time driver reads all the events starting
1572 		 * from the one that has been logged after shutdown. Avoid
1573 		 * these old events.
1574 		 */
1575 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1576 			/* Remove the LD */
1577 			struct mfi_disk *ld;
1578 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1579 				if (ld->ld_id ==
1580 				    detail->args.ld_state.ld.target_id)
1581 					break;
1582 			}
1583 			/*
1584 			Fix: for kernel panics when SSCD is removed
1585 			KASSERT(ld != NULL, ("volume dissappeared"));
1586 			*/
1587 			if (ld != NULL) {
1588 				mtx_lock(&Giant);
1589 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1590 				mtx_unlock(&Giant);
1591 			}
1592 		}
1593 		break;
1594 	case MR_EVT_ARGS_PD:
1595 		if (detail->code == MR_EVT_PD_REMOVED) {
1596 			if (mfi_detect_jbod_change) {
1597 				/*
1598 				 * If the removed device is a SYSPD then
1599 				 * delete it
1600 				 */
1601 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1602 				    pd_link) {
1603 					if (syspd->pd_id ==
1604 					    detail->args.pd.device_id) {
1605 						mtx_lock(&Giant);
1606 						device_delete_child(
1607 						    sc->mfi_dev,
1608 						    syspd->pd_dev);
1609 						mtx_unlock(&Giant);
1610 						break;
1611 					}
1612 				}
1613 			}
1614 		}
1615 		if (detail->code == MR_EVT_PD_INSERTED) {
1616 			if (mfi_detect_jbod_change) {
1617 				/* Probe for new SYSPD's */
1618 				sx_xlock(&sc->mfi_config_lock);
1619 				mtx_lock(&sc->mfi_io_lock);
1620 				mfi_syspdprobe(sc);
1621 				mtx_unlock(&sc->mfi_io_lock);
1622 				sx_xunlock(&sc->mfi_config_lock);
1623 			}
1624 		}
1625 		if (sc->mfi_cam_rescan_cb != NULL &&
1626 		    (detail->code == MR_EVT_PD_INSERTED ||
1627 		    detail->code == MR_EVT_PD_REMOVED)) {
1628 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1629 		}
1630 		break;
1631 	}
1632 }
1633 
1634 static void
1635 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1636 {
1637 	struct mfi_evt_queue_elm *elm;
1638 
1639 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1640 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1641 	if (elm == NULL)
1642 		return;
1643 	memcpy(&elm->detail, detail, sizeof(*detail));
1644 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1645 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1646 }
1647 
1648 static void
1649 mfi_handle_evt(void *context, int pending)
1650 {
1651 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1652 	struct mfi_softc *sc;
1653 	struct mfi_evt_queue_elm *elm;
1654 
1655 	sc = context;
1656 	TAILQ_INIT(&queue);
1657 	mtx_lock(&sc->mfi_io_lock);
1658 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1659 	mtx_unlock(&sc->mfi_io_lock);
1660 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1661 		TAILQ_REMOVE(&queue, elm, link);
1662 		mfi_decode_evt(sc, &elm->detail);
1663 		free(elm, M_MFIBUF);
1664 	}
1665 }
1666 
1667 static int
1668 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1669 {
1670 	struct mfi_command *cm;
1671 	struct mfi_dcmd_frame *dcmd;
1672 	union mfi_evt current_aen, prior_aen;
1673 	struct mfi_evt_detail *ed = NULL;
1674 	int error = 0;
1675 
1676 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1677 
1678 	current_aen.word = locale;
1679 	if (sc->mfi_aen_cm != NULL) {
1680 		prior_aen.word =
1681 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1682 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1683 		    !((prior_aen.members.locale & current_aen.members.locale)
1684 		    ^current_aen.members.locale)) {
1685 			return (0);
1686 		} else {
1687 			prior_aen.members.locale |= current_aen.members.locale;
1688 			if (prior_aen.members.evt_class
1689 			    < current_aen.members.evt_class)
1690 				current_aen.members.evt_class =
1691 				    prior_aen.members.evt_class;
1692 			mfi_abort(sc, &sc->mfi_aen_cm);
1693 		}
1694 	}
1695 
1696 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1697 	    (void **)&ed, sizeof(*ed));
1698 	if (error)
1699 		goto out;
1700 
1701 	dcmd = &cm->cm_frame->dcmd;
1702 	((uint32_t *)&dcmd->mbox)[0] = seq;
1703 	((uint32_t *)&dcmd->mbox)[1] = locale;
1704 	cm->cm_flags = MFI_CMD_DATAIN;
1705 	cm->cm_complete = mfi_aen_complete;
1706 
1707 	sc->last_seq_num = seq;
1708 	sc->mfi_aen_cm = cm;
1709 
1710 	mfi_enqueue_ready(cm);
1711 	mfi_startio(sc);
1712 
1713 out:
1714 	return (error);
1715 }
1716 
1717 static void
1718 mfi_aen_complete(struct mfi_command *cm)
1719 {
1720 	struct mfi_frame_header *hdr;
1721 	struct mfi_softc *sc;
1722 	struct mfi_evt_detail *detail;
1723 	struct mfi_aen *mfi_aen_entry, *tmp;
1724 	int seq = 0, aborted = 0;
1725 
1726 	sc = cm->cm_sc;
1727 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1728 
1729 	if (sc->mfi_aen_cm == NULL)
1730 		return;
1731 
1732 	hdr = &cm->cm_frame->header;
1733 
1734 	if (sc->cm_aen_abort ||
1735 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1736 		sc->cm_aen_abort = 0;
1737 		aborted = 1;
1738 	} else {
1739 		sc->mfi_aen_triggered = 1;
1740 		if (sc->mfi_poll_waiting) {
1741 			sc->mfi_poll_waiting = 0;
1742 			selwakeup(&sc->mfi_select);
1743 		}
1744 		detail = cm->cm_data;
1745 		mfi_queue_evt(sc, detail);
1746 		seq = detail->seq + 1;
1747 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1748 		    tmp) {
1749 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1750 			    aen_link);
1751 			PROC_LOCK(mfi_aen_entry->p);
1752 			kern_psignal(mfi_aen_entry->p, SIGIO);
1753 			PROC_UNLOCK(mfi_aen_entry->p);
1754 			free(mfi_aen_entry, M_MFIBUF);
1755 		}
1756 	}
1757 
1758 	free(cm->cm_data, M_MFIBUF);
1759 	wakeup(&sc->mfi_aen_cm);
1760 	sc->mfi_aen_cm = NULL;
1761 	mfi_release_command(cm);
1762 
1763 	/* set it up again so the driver can catch more events */
1764 	if (!aborted)
1765 		mfi_aen_setup(sc, seq);
1766 }
1767 
1768 #define MAX_EVENTS 15
1769 
1770 static int
1771 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1772 {
1773 	struct mfi_command *cm;
1774 	struct mfi_dcmd_frame *dcmd;
1775 	struct mfi_evt_list *el;
1776 	union mfi_evt class_locale;
1777 	int error, i, seq, size;
1778 
1779 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1780 
1781 	class_locale.members.reserved = 0;
1782 	class_locale.members.locale = mfi_event_locale;
1783 	class_locale.members.evt_class  = mfi_event_class;
1784 
1785 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1786 		* (MAX_EVENTS - 1);
1787 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1788 	if (el == NULL)
1789 		return (ENOMEM);
1790 
1791 	for (seq = start_seq;;) {
1792 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1793 			free(el, M_MFIBUF);
1794 			return (EBUSY);
1795 		}
1796 
1797 		dcmd = &cm->cm_frame->dcmd;
1798 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1799 		dcmd->header.cmd = MFI_CMD_DCMD;
1800 		dcmd->header.timeout = 0;
1801 		dcmd->header.data_len = size;
1802 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1803 		((uint32_t *)&dcmd->mbox)[0] = seq;
1804 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1805 		cm->cm_sg = &dcmd->sgl;
1806 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1807 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1808 		cm->cm_data = el;
1809 		cm->cm_len = size;
1810 
1811 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1812 			device_printf(sc->mfi_dev,
1813 			    "Failed to get controller entries\n");
1814 			mfi_release_command(cm);
1815 			break;
1816 		}
1817 
1818 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1819 		    BUS_DMASYNC_POSTREAD);
1820 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1821 
1822 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1823 			mfi_release_command(cm);
1824 			break;
1825 		}
1826 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1827 			device_printf(sc->mfi_dev,
1828 			    "Error %d fetching controller entries\n",
1829 			    dcmd->header.cmd_status);
1830 			mfi_release_command(cm);
1831 			error = EIO;
1832 			break;
1833 		}
1834 		mfi_release_command(cm);
1835 
1836 		for (i = 0; i < el->count; i++) {
1837 			/*
1838 			 * If this event is newer than 'stop_seq' then
1839 			 * break out of the loop.  Note that the log
1840 			 * is a circular buffer so we have to handle
1841 			 * the case that our stop point is earlier in
1842 			 * the buffer than our start point.
1843 			 */
1844 			if (el->event[i].seq >= stop_seq) {
1845 				if (start_seq <= stop_seq)
1846 					break;
1847 				else if (el->event[i].seq < start_seq)
1848 					break;
1849 			}
1850 			mfi_queue_evt(sc, &el->event[i]);
1851 		}
1852 		seq = el->event[el->count - 1].seq + 1;
1853 	}
1854 
1855 	free(el, M_MFIBUF);
1856 	return (error);
1857 }
1858 
1859 static int
1860 mfi_add_ld(struct mfi_softc *sc, int id)
1861 {
1862 	struct mfi_command *cm;
1863 	struct mfi_dcmd_frame *dcmd = NULL;
1864 	struct mfi_ld_info *ld_info = NULL;
1865 	struct mfi_disk_pending *ld_pend;
1866 	int error;
1867 
1868 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1869 
1870 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1871 	if (ld_pend != NULL) {
1872 		ld_pend->ld_id = id;
1873 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1874 	}
1875 
1876 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1877 	    (void **)&ld_info, sizeof(*ld_info));
1878 	if (error) {
1879 		device_printf(sc->mfi_dev,
1880 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1881 		if (ld_info)
1882 			free(ld_info, M_MFIBUF);
1883 		return (error);
1884 	}
1885 	cm->cm_flags = MFI_CMD_DATAIN;
1886 	dcmd = &cm->cm_frame->dcmd;
1887 	dcmd->mbox[0] = id;
1888 	if (mfi_wait_command(sc, cm) != 0) {
1889 		device_printf(sc->mfi_dev,
1890 		    "Failed to get logical drive: %d\n", id);
1891 		free(ld_info, M_MFIBUF);
1892 		return (0);
1893 	}
1894 	if (ld_info->ld_config.params.isSSCD != 1)
1895 		mfi_add_ld_complete(cm);
1896 	else {
1897 		mfi_release_command(cm);
1898 		if (ld_info)		/* SSCD drives ld_info free here */
1899 			free(ld_info, M_MFIBUF);
1900 	}
1901 	return (0);
1902 }
1903 
1904 static void
1905 mfi_add_ld_complete(struct mfi_command *cm)
1906 {
1907 	struct mfi_frame_header *hdr;
1908 	struct mfi_ld_info *ld_info;
1909 	struct mfi_softc *sc;
1910 	device_t child;
1911 
1912 	sc = cm->cm_sc;
1913 	hdr = &cm->cm_frame->header;
1914 	ld_info = cm->cm_private;
1915 
1916 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1917 		free(ld_info, M_MFIBUF);
1918 		wakeup(&sc->mfi_map_sync_cm);
1919 		mfi_release_command(cm);
1920 		return;
1921 	}
1922 	wakeup(&sc->mfi_map_sync_cm);
1923 	mfi_release_command(cm);
1924 
1925 	mtx_unlock(&sc->mfi_io_lock);
1926 	mtx_lock(&Giant);
1927 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1928 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1929 		free(ld_info, M_MFIBUF);
1930 		mtx_unlock(&Giant);
1931 		mtx_lock(&sc->mfi_io_lock);
1932 		return;
1933 	}
1934 
1935 	device_set_ivars(child, ld_info);
1936 	device_set_desc(child, "MFI Logical Disk");
1937 	bus_generic_attach(sc->mfi_dev);
1938 	mtx_unlock(&Giant);
1939 	mtx_lock(&sc->mfi_io_lock);
1940 }
1941 
1942 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1943 {
1944 	struct mfi_command *cm;
1945 	struct mfi_dcmd_frame *dcmd = NULL;
1946 	struct mfi_pd_info *pd_info = NULL;
1947 	struct mfi_system_pending *syspd_pend;
1948 	int error;
1949 
1950 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1951 
1952 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1953 	if (syspd_pend != NULL) {
1954 		syspd_pend->pd_id = id;
1955 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1956 	}
1957 
1958 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1959 		(void **)&pd_info, sizeof(*pd_info));
1960 	if (error) {
1961 		device_printf(sc->mfi_dev,
1962 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1963 		    error);
1964 		if (pd_info)
1965 			free(pd_info, M_MFIBUF);
1966 		return (error);
1967 	}
1968 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1969 	dcmd = &cm->cm_frame->dcmd;
1970 	dcmd->mbox[0]=id;
1971 	dcmd->header.scsi_status = 0;
1972 	dcmd->header.pad0 = 0;
1973 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1974 		device_printf(sc->mfi_dev,
1975 		    "Failed to get physical drive info %d\n", id);
1976 		free(pd_info, M_MFIBUF);
1977 		mfi_release_command(cm);
1978 		return (error);
1979 	}
1980 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1981 	    BUS_DMASYNC_POSTREAD);
1982 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1983 	mfi_add_sys_pd_complete(cm);
1984 	return (0);
1985 }
1986 
1987 static void
1988 mfi_add_sys_pd_complete(struct mfi_command *cm)
1989 {
1990 	struct mfi_frame_header *hdr;
1991 	struct mfi_pd_info *pd_info;
1992 	struct mfi_softc *sc;
1993 	device_t child;
1994 
1995 	sc = cm->cm_sc;
1996 	hdr = &cm->cm_frame->header;
1997 	pd_info = cm->cm_private;
1998 
1999 	if (hdr->cmd_status != MFI_STAT_OK) {
2000 		free(pd_info, M_MFIBUF);
2001 		mfi_release_command(cm);
2002 		return;
2003 	}
2004 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2005 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2006 		    pd_info->ref.v.device_id);
2007 		free(pd_info, M_MFIBUF);
2008 		mfi_release_command(cm);
2009 		return;
2010 	}
2011 	mfi_release_command(cm);
2012 
2013 	mtx_unlock(&sc->mfi_io_lock);
2014 	mtx_lock(&Giant);
2015 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2016 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
2017 		free(pd_info, M_MFIBUF);
2018 		mtx_unlock(&Giant);
2019 		mtx_lock(&sc->mfi_io_lock);
2020 		return;
2021 	}
2022 
2023 	device_set_ivars(child, pd_info);
2024 	device_set_desc(child, "MFI System PD");
2025 	bus_generic_attach(sc->mfi_dev);
2026 	mtx_unlock(&Giant);
2027 	mtx_lock(&sc->mfi_io_lock);
2028 }
2029 
2030 static struct mfi_command *
2031 mfi_bio_command(struct mfi_softc *sc)
2032 {
2033 	struct bio *bio;
2034 	struct mfi_command *cm = NULL;
2035 
2036 	/*reserving two commands to avoid starvation for IOCTL*/
2037 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2038 		return (NULL);
2039 	}
2040 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2041 		return (NULL);
2042 	}
2043 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2044 		cm = mfi_build_ldio(sc, bio);
2045 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2046 		cm = mfi_build_syspdio(sc, bio);
2047 	}
2048 	if (!cm)
2049 	    mfi_enqueue_bio(sc, bio);
2050 	return cm;
2051 }
2052 
2053 /*
2054  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2055  */
2056 
2057 int
2058 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2059 {
2060 	int cdb_len;
2061 
2062 	if (((lba & 0x1fffff) == lba)
2063          && ((block_count & 0xff) == block_count)
2064          && (byte2 == 0)) {
2065 		/* We can fit in a 6 byte cdb */
2066 		struct scsi_rw_6 *scsi_cmd;
2067 
2068 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2069 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2070 		scsi_ulto3b(lba, scsi_cmd->addr);
2071 		scsi_cmd->length = block_count & 0xff;
2072 		scsi_cmd->control = 0;
2073 		cdb_len = sizeof(*scsi_cmd);
2074 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2075 		/* Need a 10 byte CDB */
2076 		struct scsi_rw_10 *scsi_cmd;
2077 
2078 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2079 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2080 		scsi_cmd->byte2 = byte2;
2081 		scsi_ulto4b(lba, scsi_cmd->addr);
2082 		scsi_cmd->reserved = 0;
2083 		scsi_ulto2b(block_count, scsi_cmd->length);
2084 		scsi_cmd->control = 0;
2085 		cdb_len = sizeof(*scsi_cmd);
2086 	} else if (((block_count & 0xffffffff) == block_count) &&
2087 	    ((lba & 0xffffffff) == lba)) {
2088 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2089 		struct scsi_rw_12 *scsi_cmd;
2090 
2091 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2092 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2093 		scsi_cmd->byte2 = byte2;
2094 		scsi_ulto4b(lba, scsi_cmd->addr);
2095 		scsi_cmd->reserved = 0;
2096 		scsi_ulto4b(block_count, scsi_cmd->length);
2097 		scsi_cmd->control = 0;
2098 		cdb_len = sizeof(*scsi_cmd);
2099 	} else {
2100 		/*
2101 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2102 		 * than 2^32
2103 		 */
2104 		struct scsi_rw_16 *scsi_cmd;
2105 
2106 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2107 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2108 		scsi_cmd->byte2 = byte2;
2109 		scsi_u64to8b(lba, scsi_cmd->addr);
2110 		scsi_cmd->reserved = 0;
2111 		scsi_ulto4b(block_count, scsi_cmd->length);
2112 		scsi_cmd->control = 0;
2113 		cdb_len = sizeof(*scsi_cmd);
2114 	}
2115 
2116 	return cdb_len;
2117 }
2118 
2119 extern char *unmapped_buf;
2120 
2121 static struct mfi_command *
2122 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2123 {
2124 	struct mfi_command *cm;
2125 	struct mfi_pass_frame *pass;
2126 	uint32_t context = 0;
2127 	int flags = 0, blkcount = 0, readop;
2128 	uint8_t cdb_len;
2129 
2130 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2131 
2132 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2133 	    return (NULL);
2134 
2135 	/* Zero out the MFI frame */
2136 	context = cm->cm_frame->header.context;
2137 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2138 	cm->cm_frame->header.context = context;
2139 	pass = &cm->cm_frame->pass;
2140 	bzero(pass->cdb, 16);
2141 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2142 	switch (bio->bio_cmd) {
2143 	case BIO_READ:
2144 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2145 		readop = 1;
2146 		break;
2147 	case BIO_WRITE:
2148 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2149 		readop = 0;
2150 		break;
2151 	default:
2152 		/* TODO: what about BIO_DELETE??? */
2153 		biofinish(bio, NULL, EOPNOTSUPP);
2154 		mfi_enqueue_free(cm);
2155 		return (NULL);
2156 	}
2157 
2158 	/* Cheat with the sector length to avoid a non-constant division */
2159 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2160 	/* Fill the LBA and Transfer length in CDB */
2161 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2162 	    pass->cdb);
2163 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2164 	pass->header.lun_id = 0;
2165 	pass->header.timeout = 0;
2166 	pass->header.flags = 0;
2167 	pass->header.scsi_status = 0;
2168 	pass->header.sense_len = MFI_SENSE_LEN;
2169 	pass->header.data_len = bio->bio_bcount;
2170 	pass->header.cdb_len = cdb_len;
2171 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2172 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2173 	cm->cm_complete = mfi_bio_complete;
2174 	cm->cm_private = bio;
2175 	cm->cm_data = unmapped_buf;
2176 	cm->cm_len = bio->bio_bcount;
2177 	cm->cm_sg = &pass->sgl;
2178 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2179 	cm->cm_flags = flags;
2180 
2181 	return (cm);
2182 }
2183 
2184 static struct mfi_command *
2185 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2186 {
2187 	struct mfi_io_frame *io;
2188 	struct mfi_command *cm;
2189 	int flags;
2190 	uint32_t blkcount;
2191 	uint32_t context = 0;
2192 
2193 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2194 
2195 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2196 	    return (NULL);
2197 
2198 	/* Zero out the MFI frame */
2199 	context = cm->cm_frame->header.context;
2200 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2201 	cm->cm_frame->header.context = context;
2202 	io = &cm->cm_frame->io;
2203 	switch (bio->bio_cmd) {
2204 	case BIO_READ:
2205 		io->header.cmd = MFI_CMD_LD_READ;
2206 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2207 		break;
2208 	case BIO_WRITE:
2209 		io->header.cmd = MFI_CMD_LD_WRITE;
2210 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2211 		break;
2212 	default:
2213 		/* TODO: what about BIO_DELETE??? */
2214 		biofinish(bio, NULL, EOPNOTSUPP);
2215 		mfi_enqueue_free(cm);
2216 		return (NULL);
2217 	}
2218 
2219 	/* Cheat with the sector length to avoid a non-constant division */
2220 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2221 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2222 	io->header.timeout = 0;
2223 	io->header.flags = 0;
2224 	io->header.scsi_status = 0;
2225 	io->header.sense_len = MFI_SENSE_LEN;
2226 	io->header.data_len = blkcount;
2227 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2228 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2229 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2230 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2231 	cm->cm_complete = mfi_bio_complete;
2232 	cm->cm_private = bio;
2233 	cm->cm_data = unmapped_buf;
2234 	cm->cm_len = bio->bio_bcount;
2235 	cm->cm_sg = &io->sgl;
2236 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2237 	cm->cm_flags = flags;
2238 
2239 	return (cm);
2240 }
2241 
2242 static void
2243 mfi_bio_complete(struct mfi_command *cm)
2244 {
2245 	struct bio *bio;
2246 	struct mfi_frame_header *hdr;
2247 	struct mfi_softc *sc;
2248 
2249 	bio = cm->cm_private;
2250 	hdr = &cm->cm_frame->header;
2251 	sc = cm->cm_sc;
2252 
2253 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2254 		bio->bio_flags |= BIO_ERROR;
2255 		bio->bio_error = EIO;
2256 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2257 		    "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2258 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2259 	} else if (cm->cm_error != 0) {
2260 		bio->bio_flags |= BIO_ERROR;
2261 		bio->bio_error = cm->cm_error;
2262 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2263 		    cm, cm->cm_error);
2264 	}
2265 
2266 	mfi_release_command(cm);
2267 	mfi_disk_complete(bio);
2268 }
2269 
2270 void
2271 mfi_startio(struct mfi_softc *sc)
2272 {
2273 	struct mfi_command *cm;
2274 	struct ccb_hdr *ccbh;
2275 
2276 	for (;;) {
2277 		/* Don't bother if we're short on resources */
2278 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2279 			break;
2280 
2281 		/* Try a command that has already been prepared */
2282 		cm = mfi_dequeue_ready(sc);
2283 
2284 		if (cm == NULL) {
2285 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2286 				cm = sc->mfi_cam_start(ccbh);
2287 		}
2288 
2289 		/* Nope, so look for work on the bioq */
2290 		if (cm == NULL)
2291 			cm = mfi_bio_command(sc);
2292 
2293 		/* No work available, so exit */
2294 		if (cm == NULL)
2295 			break;
2296 
2297 		/* Send the command to the controller */
2298 		if (mfi_mapcmd(sc, cm) != 0) {
2299 			device_printf(sc->mfi_dev, "Failed to startio\n");
2300 			mfi_requeue_ready(cm);
2301 			break;
2302 		}
2303 	}
2304 }
2305 
2306 int
2307 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2308 {
2309 	int error, polled;
2310 
2311 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2312 
2313 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2314 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2315 		if (cm->cm_flags & MFI_CMD_CCB)
2316 			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2317 			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2318 			    polled);
2319 		else if (cm->cm_flags & MFI_CMD_BIO)
2320 			error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2321 			    cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2322 			    polled);
2323 		else
2324 			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2325 			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2326 			    mfi_data_cb, cm, polled);
2327 		if (error == EINPROGRESS) {
2328 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2329 			return (0);
2330 		}
2331 	} else {
2332 		error = mfi_send_frame(sc, cm);
2333 	}
2334 
2335 	return (error);
2336 }
2337 
2338 static void
2339 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2340 {
2341 	struct mfi_frame_header *hdr;
2342 	struct mfi_command *cm;
2343 	union mfi_sgl *sgl;
2344 	struct mfi_softc *sc;
2345 	int i, j, first, dir;
2346 	int sge_size, locked;
2347 
2348 	cm = (struct mfi_command *)arg;
2349 	sc = cm->cm_sc;
2350 	hdr = &cm->cm_frame->header;
2351 	sgl = cm->cm_sg;
2352 
2353 	/*
2354 	 * We need to check if we have the lock as this is async
2355 	 * callback so even though our caller mfi_mapcmd asserts
2356 	 * it has the lock, there is no guarantee that hasn't been
2357 	 * dropped if bus_dmamap_load returned prior to our
2358 	 * completion.
2359 	 */
2360 	if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2361 		mtx_lock(&sc->mfi_io_lock);
2362 
2363 	if (error) {
2364 		printf("error %d in callback\n", error);
2365 		cm->cm_error = error;
2366 		mfi_complete(sc, cm);
2367 		goto out;
2368 	}
2369 	/* Use IEEE sgl only for IO's on a SKINNY controller
2370 	 * For other commands on a SKINNY controller use either
2371 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2372 	 * Also calculate the total frame size based on the type
2373 	 * of SGL used.
2374 	 */
2375 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2376 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2377 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2378 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2379 		for (i = 0; i < nsegs; i++) {
2380 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2381 			sgl->sg_skinny[i].len = segs[i].ds_len;
2382 			sgl->sg_skinny[i].flag = 0;
2383 		}
2384 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2385 		sge_size = sizeof(struct mfi_sg_skinny);
2386 		hdr->sg_count = nsegs;
2387 	} else {
2388 		j = 0;
2389 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2390 			first = cm->cm_stp_len;
2391 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2392 				sgl->sg32[j].addr = segs[0].ds_addr;
2393 				sgl->sg32[j++].len = first;
2394 			} else {
2395 				sgl->sg64[j].addr = segs[0].ds_addr;
2396 				sgl->sg64[j++].len = first;
2397 			}
2398 		} else
2399 			first = 0;
2400 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2401 			for (i = 0; i < nsegs; i++) {
2402 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2403 				sgl->sg32[j++].len = segs[i].ds_len - first;
2404 				first = 0;
2405 			}
2406 		} else {
2407 			for (i = 0; i < nsegs; i++) {
2408 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2409 				sgl->sg64[j++].len = segs[i].ds_len - first;
2410 				first = 0;
2411 			}
2412 			hdr->flags |= MFI_FRAME_SGL64;
2413 		}
2414 		hdr->sg_count = j;
2415 		sge_size = sc->mfi_sge_size;
2416 	}
2417 
2418 	dir = 0;
2419 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2420 		dir |= BUS_DMASYNC_PREREAD;
2421 		hdr->flags |= MFI_FRAME_DIR_READ;
2422 	}
2423 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2424 		dir |= BUS_DMASYNC_PREWRITE;
2425 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2426 	}
2427 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2428 	cm->cm_flags |= MFI_CMD_MAPPED;
2429 
2430 	/*
2431 	 * Instead of calculating the total number of frames in the
2432 	 * compound frame, it's already assumed that there will be at
2433 	 * least 1 frame, so don't compensate for the modulo of the
2434 	 * following division.
2435 	 */
2436 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2437 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2438 
2439 	if ((error = mfi_send_frame(sc, cm)) != 0) {
2440 		printf("error %d in callback from mfi_send_frame\n", error);
2441 		cm->cm_error = error;
2442 		mfi_complete(sc, cm);
2443 		goto out;
2444 	}
2445 
2446 out:
2447 	/* leave the lock in the state we found it */
2448 	if (locked == 0)
2449 		mtx_unlock(&sc->mfi_io_lock);
2450 
2451 	return;
2452 }
2453 
2454 static int
2455 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2456 {
2457 	int error;
2458 
2459 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2460 
2461 	if (sc->MFA_enabled)
2462 		error = mfi_tbolt_send_frame(sc, cm);
2463 	else
2464 		error = mfi_std_send_frame(sc, cm);
2465 
2466 	if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2467 		mfi_remove_busy(cm);
2468 
2469 	return (error);
2470 }
2471 
2472 static int
2473 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2474 {
2475 	struct mfi_frame_header *hdr;
2476 	int tm = mfi_polled_cmd_timeout * 1000;
2477 
2478 	hdr = &cm->cm_frame->header;
2479 
2480 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2481 		cm->cm_timestamp = time_uptime;
2482 		mfi_enqueue_busy(cm);
2483 	} else {
2484 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2485 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2486 	}
2487 
2488 	/*
2489 	 * The bus address of the command is aligned on a 64 byte boundary,
2490 	 * leaving the least 6 bits as zero.  For whatever reason, the
2491 	 * hardware wants the address shifted right by three, leaving just
2492 	 * 3 zero bits.  These three bits are then used as a prefetching
2493 	 * hint for the hardware to predict how many frames need to be
2494 	 * fetched across the bus.  If a command has more than 8 frames
2495 	 * then the 3 bits are set to 0x7 and the firmware uses other
2496 	 * information in the command to determine the total amount to fetch.
2497 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2498 	 * is enough for both 32bit and 64bit systems.
2499 	 */
2500 	if (cm->cm_extra_frames > 7)
2501 		cm->cm_extra_frames = 7;
2502 
2503 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2504 
2505 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2506 		return (0);
2507 
2508 	/* This is a polled command, so busy-wait for it to complete. */
2509 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2510 		DELAY(1000);
2511 		tm -= 1;
2512 		if (tm <= 0)
2513 			break;
2514 	}
2515 
2516 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2517 		device_printf(sc->mfi_dev, "Frame %p timed out "
2518 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2519 		return (ETIMEDOUT);
2520 	}
2521 
2522 	return (0);
2523 }
2524 
2525 void
2526 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2527 {
2528 	int dir;
2529 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2530 
2531 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2532 		dir = 0;
2533 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2534 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2535 			dir |= BUS_DMASYNC_POSTREAD;
2536 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2537 			dir |= BUS_DMASYNC_POSTWRITE;
2538 
2539 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2540 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2541 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2542 	}
2543 
2544 	cm->cm_flags |= MFI_CMD_COMPLETED;
2545 
2546 	if (cm->cm_complete != NULL)
2547 		cm->cm_complete(cm);
2548 	else
2549 		wakeup(cm);
2550 }
2551 
2552 static int
2553 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2554 {
2555 	struct mfi_command *cm;
2556 	struct mfi_abort_frame *abort;
2557 	int i = 0, error;
2558 	uint32_t context = 0;
2559 
2560 	mtx_lock(&sc->mfi_io_lock);
2561 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2562 		mtx_unlock(&sc->mfi_io_lock);
2563 		return (EBUSY);
2564 	}
2565 
2566 	/* Zero out the MFI frame */
2567 	context = cm->cm_frame->header.context;
2568 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2569 	cm->cm_frame->header.context = context;
2570 
2571 	abort = &cm->cm_frame->abort;
2572 	abort->header.cmd = MFI_CMD_ABORT;
2573 	abort->header.flags = 0;
2574 	abort->header.scsi_status = 0;
2575 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2576 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2577 	abort->abort_mfi_addr_hi =
2578 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2579 	cm->cm_data = NULL;
2580 	cm->cm_flags = MFI_CMD_POLLED;
2581 
2582 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2583 		device_printf(sc->mfi_dev, "failed to abort command\n");
2584 	mfi_release_command(cm);
2585 
2586 	mtx_unlock(&sc->mfi_io_lock);
2587 	while (i < 5 && *cm_abort != NULL) {
2588 		tsleep(cm_abort, 0, "mfiabort",
2589 		    5 * hz);
2590 		i++;
2591 	}
2592 	if (*cm_abort != NULL) {
2593 		/* Force a complete if command didn't abort */
2594 		mtx_lock(&sc->mfi_io_lock);
2595 		(*cm_abort)->cm_complete(*cm_abort);
2596 		mtx_unlock(&sc->mfi_io_lock);
2597 	}
2598 
2599 	return (error);
2600 }
2601 
2602 int
2603 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2604      int len)
2605 {
2606 	struct mfi_command *cm;
2607 	struct mfi_io_frame *io;
2608 	int error;
2609 	uint32_t context = 0;
2610 
2611 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2612 		return (EBUSY);
2613 
2614 	/* Zero out the MFI frame */
2615 	context = cm->cm_frame->header.context;
2616 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2617 	cm->cm_frame->header.context = context;
2618 
2619 	io = &cm->cm_frame->io;
2620 	io->header.cmd = MFI_CMD_LD_WRITE;
2621 	io->header.target_id = id;
2622 	io->header.timeout = 0;
2623 	io->header.flags = 0;
2624 	io->header.scsi_status = 0;
2625 	io->header.sense_len = MFI_SENSE_LEN;
2626 	io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2627 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2628 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2629 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2630 	io->lba_lo = lba & 0xffffffff;
2631 	cm->cm_data = virt;
2632 	cm->cm_len = len;
2633 	cm->cm_sg = &io->sgl;
2634 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2635 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2636 
2637 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2638 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2639 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2640 	    BUS_DMASYNC_POSTWRITE);
2641 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2642 	mfi_release_command(cm);
2643 
2644 	return (error);
2645 }
2646 
2647 int
2648 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2649     int len)
2650 {
2651 	struct mfi_command *cm;
2652 	struct mfi_pass_frame *pass;
2653 	int error, readop, cdb_len;
2654 	uint32_t blkcount;
2655 
2656 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2657 		return (EBUSY);
2658 
2659 	pass = &cm->cm_frame->pass;
2660 	bzero(pass->cdb, 16);
2661 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2662 
2663 	readop = 0;
2664 	blkcount = howmany(len, MFI_SECTOR_LEN);
2665 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2666 	pass->header.target_id = id;
2667 	pass->header.timeout = 0;
2668 	pass->header.flags = 0;
2669 	pass->header.scsi_status = 0;
2670 	pass->header.sense_len = MFI_SENSE_LEN;
2671 	pass->header.data_len = len;
2672 	pass->header.cdb_len = cdb_len;
2673 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2674 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2675 	cm->cm_data = virt;
2676 	cm->cm_len = len;
2677 	cm->cm_sg = &pass->sgl;
2678 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2679 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2680 
2681 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2682 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2683 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2684 	    BUS_DMASYNC_POSTWRITE);
2685 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2686 	mfi_release_command(cm);
2687 
2688 	return (error);
2689 }
2690 
2691 static int
2692 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2693 {
2694 	struct mfi_softc *sc;
2695 	int error;
2696 
2697 	sc = dev->si_drv1;
2698 
2699 	mtx_lock(&sc->mfi_io_lock);
2700 	if (sc->mfi_detaching)
2701 		error = ENXIO;
2702 	else {
2703 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2704 		error = 0;
2705 	}
2706 	mtx_unlock(&sc->mfi_io_lock);
2707 
2708 	return (error);
2709 }
2710 
2711 static int
2712 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2713 {
2714 	struct mfi_softc *sc;
2715 	struct mfi_aen *mfi_aen_entry, *tmp;
2716 
2717 	sc = dev->si_drv1;
2718 
2719 	mtx_lock(&sc->mfi_io_lock);
2720 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2721 
2722 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2723 		if (mfi_aen_entry->p == curproc) {
2724 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2725 			    aen_link);
2726 			free(mfi_aen_entry, M_MFIBUF);
2727 		}
2728 	}
2729 	mtx_unlock(&sc->mfi_io_lock);
2730 	return (0);
2731 }
2732 
2733 static int
2734 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2735 {
2736 
2737 	switch (opcode) {
2738 	case MFI_DCMD_LD_DELETE:
2739 	case MFI_DCMD_CFG_ADD:
2740 	case MFI_DCMD_CFG_CLEAR:
2741 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2742 		sx_xlock(&sc->mfi_config_lock);
2743 		return (1);
2744 	default:
2745 		return (0);
2746 	}
2747 }
2748 
2749 static void
2750 mfi_config_unlock(struct mfi_softc *sc, int locked)
2751 {
2752 
2753 	if (locked)
2754 		sx_xunlock(&sc->mfi_config_lock);
2755 }
2756 
2757 /*
2758  * Perform pre-issue checks on commands from userland and possibly veto
2759  * them.
2760  */
2761 static int
2762 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2763 {
2764 	struct mfi_disk *ld, *ld2;
2765 	int error;
2766 	struct mfi_system_pd *syspd = NULL;
2767 	uint16_t syspd_id;
2768 	uint16_t *mbox;
2769 
2770 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2771 	error = 0;
2772 	switch (cm->cm_frame->dcmd.opcode) {
2773 	case MFI_DCMD_LD_DELETE:
2774 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2775 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2776 				break;
2777 		}
2778 		if (ld == NULL)
2779 			error = ENOENT;
2780 		else
2781 			error = mfi_disk_disable(ld);
2782 		break;
2783 	case MFI_DCMD_CFG_CLEAR:
2784 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2785 			error = mfi_disk_disable(ld);
2786 			if (error)
2787 				break;
2788 		}
2789 		if (error) {
2790 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2791 				if (ld2 == ld)
2792 					break;
2793 				mfi_disk_enable(ld2);
2794 			}
2795 		}
2796 		break;
2797 	case MFI_DCMD_PD_STATE_SET:
2798 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2799 		syspd_id = mbox[0];
2800 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2801 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2802 				if (syspd->pd_id == syspd_id)
2803 					break;
2804 			}
2805 		}
2806 		else
2807 			break;
2808 		if (syspd)
2809 			error = mfi_syspd_disable(syspd);
2810 		break;
2811 	default:
2812 		break;
2813 	}
2814 	return (error);
2815 }
2816 
2817 /* Perform post-issue checks on commands from userland. */
2818 static void
2819 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2820 {
2821 	struct mfi_disk *ld, *ldn;
2822 	struct mfi_system_pd *syspd = NULL;
2823 	uint16_t syspd_id;
2824 	uint16_t *mbox;
2825 
2826 	switch (cm->cm_frame->dcmd.opcode) {
2827 	case MFI_DCMD_LD_DELETE:
2828 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2829 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2830 				break;
2831 		}
2832 		KASSERT(ld != NULL, ("volume dissappeared"));
2833 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2834 			mtx_unlock(&sc->mfi_io_lock);
2835 			mtx_lock(&Giant);
2836 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2837 			mtx_unlock(&Giant);
2838 			mtx_lock(&sc->mfi_io_lock);
2839 		} else
2840 			mfi_disk_enable(ld);
2841 		break;
2842 	case MFI_DCMD_CFG_CLEAR:
2843 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2844 			mtx_unlock(&sc->mfi_io_lock);
2845 			mtx_lock(&Giant);
2846 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2847 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2848 			}
2849 			mtx_unlock(&Giant);
2850 			mtx_lock(&sc->mfi_io_lock);
2851 		} else {
2852 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2853 				mfi_disk_enable(ld);
2854 		}
2855 		break;
2856 	case MFI_DCMD_CFG_ADD:
2857 		mfi_ldprobe(sc);
2858 		break;
2859 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2860 		mfi_ldprobe(sc);
2861 		break;
2862 	case MFI_DCMD_PD_STATE_SET:
2863 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2864 		syspd_id = mbox[0];
2865 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2866 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2867 				if (syspd->pd_id == syspd_id)
2868 					break;
2869 			}
2870 		}
2871 		else
2872 			break;
2873 		/* If the transition fails then enable the syspd again */
2874 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2875 			mfi_syspd_enable(syspd);
2876 		break;
2877 	}
2878 }
2879 
2880 static int
2881 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2882 {
2883 	struct mfi_config_data *conf_data;
2884 	struct mfi_command *ld_cm = NULL;
2885 	struct mfi_ld_info *ld_info = NULL;
2886 	struct mfi_ld_config *ld;
2887 	char *p;
2888 	int error = 0;
2889 
2890 	conf_data = (struct mfi_config_data *)cm->cm_data;
2891 
2892 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2893 		p = (char *)conf_data->array;
2894 		p += conf_data->array_size * conf_data->array_count;
2895 		ld = (struct mfi_ld_config *)p;
2896 		if (ld->params.isSSCD == 1)
2897 			error = 1;
2898 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2899 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2900 		    (void **)&ld_info, sizeof(*ld_info));
2901 		if (error) {
2902 			device_printf(sc->mfi_dev, "Failed to allocate"
2903 			    "MFI_DCMD_LD_GET_INFO %d", error);
2904 			if (ld_info)
2905 				free(ld_info, M_MFIBUF);
2906 			return 0;
2907 		}
2908 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2909 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2910 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2911 		if (mfi_wait_command(sc, ld_cm) != 0) {
2912 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2913 			mfi_release_command(ld_cm);
2914 			free(ld_info, M_MFIBUF);
2915 			return 0;
2916 		}
2917 
2918 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2919 			free(ld_info, M_MFIBUF);
2920 			mfi_release_command(ld_cm);
2921 			return 0;
2922 		}
2923 		else
2924 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2925 
2926 		if (ld_info->ld_config.params.isSSCD == 1)
2927 			error = 1;
2928 
2929 		mfi_release_command(ld_cm);
2930 		free(ld_info, M_MFIBUF);
2931 	}
2932 	return error;
2933 }
2934 
2935 static int
2936 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2937 {
2938 	uint8_t i;
2939 	struct mfi_ioc_packet *ioc;
2940 	ioc = (struct mfi_ioc_packet *)arg;
2941 	int sge_size, error;
2942 	struct megasas_sge *kern_sge;
2943 
2944 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2945 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2946 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2947 
2948 	if (sizeof(bus_addr_t) == 8) {
2949 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2950 		cm->cm_extra_frames = 2;
2951 		sge_size = sizeof(struct mfi_sg64);
2952 	} else {
2953 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2954 		sge_size = sizeof(struct mfi_sg32);
2955 	}
2956 
2957 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2958 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2959 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2960 			1, 0,			/* algnmnt, boundary */
2961 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2962 			BUS_SPACE_MAXADDR,	/* highaddr */
2963 			NULL, NULL,		/* filter, filterarg */
2964 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2965 			2,			/* nsegments */
2966 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2967 			BUS_DMA_ALLOCNOW,	/* flags */
2968 			NULL, NULL,		/* lockfunc, lockarg */
2969 			&sc->mfi_kbuff_arr_dmat[i])) {
2970 			device_printf(sc->mfi_dev,
2971 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2972 			return (ENOMEM);
2973 		}
2974 
2975 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2976 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2977 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2978 			device_printf(sc->mfi_dev,
2979 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2980 			return (ENOMEM);
2981 		}
2982 
2983 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2984 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2985 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2986 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2987 
2988 		if (!sc->kbuff_arr[i]) {
2989 			device_printf(sc->mfi_dev,
2990 			    "Could not allocate memory for kbuff_arr info\n");
2991 			return -1;
2992 		}
2993 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2994 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2995 
2996 		if (sizeof(bus_addr_t) == 8) {
2997 			cm->cm_frame->stp.sgl.sg64[i].addr =
2998 			    kern_sge[i].phys_addr;
2999 			cm->cm_frame->stp.sgl.sg64[i].len =
3000 			    ioc->mfi_sgl[i].iov_len;
3001 		} else {
3002 			cm->cm_frame->stp.sgl.sg32[i].addr =
3003 			    kern_sge[i].phys_addr;
3004 			cm->cm_frame->stp.sgl.sg32[i].len =
3005 			    ioc->mfi_sgl[i].iov_len;
3006 		}
3007 
3008 		error = copyin(ioc->mfi_sgl[i].iov_base,
3009 		    sc->kbuff_arr[i],
3010 		    ioc->mfi_sgl[i].iov_len);
3011 		if (error != 0) {
3012 			device_printf(sc->mfi_dev, "Copy in failed\n");
3013 			return error;
3014 		}
3015 	}
3016 
3017 	cm->cm_flags |=MFI_CMD_MAPPED;
3018 	return 0;
3019 }
3020 
3021 static int
3022 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3023 {
3024 	struct mfi_command *cm;
3025 	struct mfi_dcmd_frame *dcmd;
3026 	void *ioc_buf = NULL;
3027 	uint32_t context;
3028 	int error = 0, locked;
3029 
3030 	if (ioc->buf_size > 0) {
3031 		if (ioc->buf_size > 1024 * 1024)
3032 			return (ENOMEM);
3033 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3034 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3035 		if (error) {
3036 			device_printf(sc->mfi_dev, "failed to copyin\n");
3037 			free(ioc_buf, M_MFIBUF);
3038 			return (error);
3039 		}
3040 	}
3041 
3042 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3043 
3044 	mtx_lock(&sc->mfi_io_lock);
3045 	while ((cm = mfi_dequeue_free(sc)) == NULL)
3046 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3047 
3048 	/* Save context for later */
3049 	context = cm->cm_frame->header.context;
3050 
3051 	dcmd = &cm->cm_frame->dcmd;
3052 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3053 
3054 	cm->cm_sg = &dcmd->sgl;
3055 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3056 	cm->cm_data = ioc_buf;
3057 	cm->cm_len = ioc->buf_size;
3058 
3059 	/* restore context */
3060 	cm->cm_frame->header.context = context;
3061 
3062 	/* Cheat since we don't know if we're writing or reading */
3063 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3064 
3065 	error = mfi_check_command_pre(sc, cm);
3066 	if (error)
3067 		goto out;
3068 
3069 	error = mfi_wait_command(sc, cm);
3070 	if (error) {
3071 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3072 		goto out;
3073 	}
3074 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3075 	mfi_check_command_post(sc, cm);
3076 out:
3077 	mfi_release_command(cm);
3078 	mtx_unlock(&sc->mfi_io_lock);
3079 	mfi_config_unlock(sc, locked);
3080 	if (ioc->buf_size > 0)
3081 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3082 	if (ioc_buf)
3083 		free(ioc_buf, M_MFIBUF);
3084 	return (error);
3085 }
3086 
3087 #define	PTRIN(p)		((void *)(uintptr_t)(p))
3088 
3089 static int
3090 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3091 {
3092 	struct mfi_softc *sc;
3093 	union mfi_statrequest *ms;
3094 	struct mfi_ioc_packet *ioc;
3095 #ifdef COMPAT_FREEBSD32
3096 	struct mfi_ioc_packet32 *ioc32;
3097 #endif
3098 	struct mfi_ioc_aen *aen;
3099 	struct mfi_command *cm = NULL;
3100 	uint32_t context = 0;
3101 	union mfi_sense_ptr sense_ptr;
3102 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3103 	size_t len;
3104 	int i, res;
3105 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3106 #ifdef COMPAT_FREEBSD32
3107 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3108 	struct mfi_ioc_passthru iop_swab;
3109 #endif
3110 	int error, locked;
3111 	union mfi_sgl *sgl;
3112 	sc = dev->si_drv1;
3113 	error = 0;
3114 
3115 	if (sc->adpreset)
3116 		return EBUSY;
3117 
3118 	if (sc->hw_crit_error)
3119 		return EBUSY;
3120 
3121 	if (sc->issuepend_done == 0)
3122 		return EBUSY;
3123 
3124 	switch (cmd) {
3125 	case MFIIO_STATS:
3126 		ms = (union mfi_statrequest *)arg;
3127 		switch (ms->ms_item) {
3128 		case MFIQ_FREE:
3129 		case MFIQ_BIO:
3130 		case MFIQ_READY:
3131 		case MFIQ_BUSY:
3132 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3133 			    sizeof(struct mfi_qstat));
3134 			break;
3135 		default:
3136 			error = ENOIOCTL;
3137 			break;
3138 		}
3139 		break;
3140 	case MFIIO_QUERY_DISK:
3141 	{
3142 		struct mfi_query_disk *qd;
3143 		struct mfi_disk *ld;
3144 
3145 		qd = (struct mfi_query_disk *)arg;
3146 		mtx_lock(&sc->mfi_io_lock);
3147 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3148 			if (ld->ld_id == qd->array_id)
3149 				break;
3150 		}
3151 		if (ld == NULL) {
3152 			qd->present = 0;
3153 			mtx_unlock(&sc->mfi_io_lock);
3154 			return (0);
3155 		}
3156 		qd->present = 1;
3157 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3158 			qd->open = 1;
3159 		bzero(qd->devname, SPECNAMELEN + 1);
3160 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3161 		mtx_unlock(&sc->mfi_io_lock);
3162 		break;
3163 	}
3164 	case MFI_CMD:
3165 #ifdef COMPAT_FREEBSD32
3166 	case MFI_CMD32:
3167 #endif
3168 		{
3169 		devclass_t devclass;
3170 		ioc = (struct mfi_ioc_packet *)arg;
3171 		int adapter;
3172 
3173 		adapter = ioc->mfi_adapter_no;
3174 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3175 			devclass = devclass_find("mfi");
3176 			sc = devclass_get_softc(devclass, adapter);
3177 		}
3178 		mtx_lock(&sc->mfi_io_lock);
3179 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3180 			mtx_unlock(&sc->mfi_io_lock);
3181 			return (EBUSY);
3182 		}
3183 		mtx_unlock(&sc->mfi_io_lock);
3184 		locked = 0;
3185 
3186 		/*
3187 		 * save off original context since copying from user
3188 		 * will clobber some data
3189 		 */
3190 		context = cm->cm_frame->header.context;
3191 		cm->cm_frame->header.context = cm->cm_index;
3192 
3193 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3194 		    2 * MEGAMFI_FRAME_SIZE);
3195 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3196 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3197 		cm->cm_frame->header.scsi_status = 0;
3198 		cm->cm_frame->header.pad0 = 0;
3199 		if (ioc->mfi_sge_count) {
3200 			cm->cm_sg =
3201 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3202 		}
3203 		sgl = cm->cm_sg;
3204 		cm->cm_flags = 0;
3205 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3206 			cm->cm_flags |= MFI_CMD_DATAIN;
3207 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3208 			cm->cm_flags |= MFI_CMD_DATAOUT;
3209 		/* Legacy app shim */
3210 		if (cm->cm_flags == 0)
3211 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3212 		cm->cm_len = cm->cm_frame->header.data_len;
3213 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3214 #ifdef COMPAT_FREEBSD32
3215 			if (cmd == MFI_CMD) {
3216 #endif
3217 				/* Native */
3218 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3219 #ifdef COMPAT_FREEBSD32
3220 			} else {
3221 				/* 32bit on 64bit */
3222 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3223 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3224 			}
3225 #endif
3226 			cm->cm_len += cm->cm_stp_len;
3227 		}
3228 		if (cm->cm_len &&
3229 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3230 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3231 			    M_WAITOK | M_ZERO);
3232 		} else {
3233 			cm->cm_data = 0;
3234 		}
3235 
3236 		/* restore header context */
3237 		cm->cm_frame->header.context = context;
3238 
3239 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3240 			res = mfi_stp_cmd(sc, cm, arg);
3241 			if (res != 0)
3242 				goto out;
3243 		} else {
3244 			temp = data;
3245 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3246 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3247 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3248 #ifdef COMPAT_FREEBSD32
3249 					if (cmd == MFI_CMD) {
3250 #endif
3251 						/* Native */
3252 						addr = ioc->mfi_sgl[i].iov_base;
3253 						len = ioc->mfi_sgl[i].iov_len;
3254 #ifdef COMPAT_FREEBSD32
3255 					} else {
3256 						/* 32bit on 64bit */
3257 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3258 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3259 						len = ioc32->mfi_sgl[i].iov_len;
3260 					}
3261 #endif
3262 					error = copyin(addr, temp, len);
3263 					if (error != 0) {
3264 						device_printf(sc->mfi_dev,
3265 						    "Copy in failed\n");
3266 						goto out;
3267 					}
3268 					temp = &temp[len];
3269 				}
3270 			}
3271 		}
3272 
3273 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3274 			locked = mfi_config_lock(sc,
3275 			     cm->cm_frame->dcmd.opcode);
3276 
3277 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3278 			cm->cm_frame->pass.sense_addr_lo =
3279 			    (uint32_t)cm->cm_sense_busaddr;
3280 			cm->cm_frame->pass.sense_addr_hi =
3281 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3282 		}
3283 		mtx_lock(&sc->mfi_io_lock);
3284 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3285 		if (!skip_pre_post) {
3286 			error = mfi_check_command_pre(sc, cm);
3287 			if (error) {
3288 				mtx_unlock(&sc->mfi_io_lock);
3289 				goto out;
3290 			}
3291 		}
3292 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3293 			device_printf(sc->mfi_dev,
3294 			    "Controller polled failed\n");
3295 			mtx_unlock(&sc->mfi_io_lock);
3296 			goto out;
3297 		}
3298 		if (!skip_pre_post) {
3299 			mfi_check_command_post(sc, cm);
3300 		}
3301 		mtx_unlock(&sc->mfi_io_lock);
3302 
3303 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3304 			temp = data;
3305 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3306 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3307 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3308 #ifdef COMPAT_FREEBSD32
3309 					if (cmd == MFI_CMD) {
3310 #endif
3311 						/* Native */
3312 						addr = ioc->mfi_sgl[i].iov_base;
3313 						len = ioc->mfi_sgl[i].iov_len;
3314 #ifdef COMPAT_FREEBSD32
3315 					} else {
3316 						/* 32bit on 64bit */
3317 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3318 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3319 						len = ioc32->mfi_sgl[i].iov_len;
3320 					}
3321 #endif
3322 					error = copyout(temp, addr, len);
3323 					if (error != 0) {
3324 						device_printf(sc->mfi_dev,
3325 						    "Copy out failed\n");
3326 						goto out;
3327 					}
3328 					temp = &temp[len];
3329 				}
3330 			}
3331 		}
3332 
3333 		if (ioc->mfi_sense_len) {
3334 			/* get user-space sense ptr then copy out sense */
3335 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3336 			    &sense_ptr.sense_ptr_data[0],
3337 			    sizeof(sense_ptr.sense_ptr_data));
3338 #ifdef COMPAT_FREEBSD32
3339 			if (cmd != MFI_CMD) {
3340 				/*
3341 				 * not 64bit native so zero out any address
3342 				 * over 32bit */
3343 				sense_ptr.addr.high = 0;
3344 			}
3345 #endif
3346 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3347 			    ioc->mfi_sense_len);
3348 			if (error != 0) {
3349 				device_printf(sc->mfi_dev,
3350 				    "Copy out failed\n");
3351 				goto out;
3352 			}
3353 		}
3354 
3355 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3356 out:
3357 		mfi_config_unlock(sc, locked);
3358 		if (data)
3359 			free(data, M_MFIBUF);
3360 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3361 			for (i = 0; i < 2; i++) {
3362 				if (sc->kbuff_arr[i]) {
3363 					if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3364 						bus_dmamap_unload(
3365 						    sc->mfi_kbuff_arr_dmat[i],
3366 						    sc->mfi_kbuff_arr_dmamap[i]
3367 						    );
3368 					if (sc->kbuff_arr[i] != NULL)
3369 						bus_dmamem_free(
3370 						    sc->mfi_kbuff_arr_dmat[i],
3371 						    sc->kbuff_arr[i],
3372 						    sc->mfi_kbuff_arr_dmamap[i]
3373 						    );
3374 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3375 						bus_dma_tag_destroy(
3376 						    sc->mfi_kbuff_arr_dmat[i]);
3377 				}
3378 			}
3379 		}
3380 		if (cm) {
3381 			mtx_lock(&sc->mfi_io_lock);
3382 			mfi_release_command(cm);
3383 			mtx_unlock(&sc->mfi_io_lock);
3384 		}
3385 
3386 		break;
3387 		}
3388 	case MFI_SET_AEN:
3389 		aen = (struct mfi_ioc_aen *)arg;
3390 		mtx_lock(&sc->mfi_io_lock);
3391 		error = mfi_aen_register(sc, aen->aen_seq_num,
3392 		    aen->aen_class_locale);
3393 		mtx_unlock(&sc->mfi_io_lock);
3394 
3395 		break;
3396 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3397 		{
3398 			devclass_t devclass;
3399 			struct mfi_linux_ioc_packet l_ioc;
3400 			int adapter;
3401 
3402 			devclass = devclass_find("mfi");
3403 			if (devclass == NULL)
3404 				return (ENOENT);
3405 
3406 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3407 			if (error)
3408 				return (error);
3409 			adapter = l_ioc.lioc_adapter_no;
3410 			sc = devclass_get_softc(devclass, adapter);
3411 			if (sc == NULL)
3412 				return (ENOENT);
3413 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3414 			    cmd, arg, flag, td));
3415 			break;
3416 		}
3417 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3418 		{
3419 			devclass_t devclass;
3420 			struct mfi_linux_ioc_aen l_aen;
3421 			int adapter;
3422 
3423 			devclass = devclass_find("mfi");
3424 			if (devclass == NULL)
3425 				return (ENOENT);
3426 
3427 			error = copyin(arg, &l_aen, sizeof(l_aen));
3428 			if (error)
3429 				return (error);
3430 			adapter = l_aen.laen_adapter_no;
3431 			sc = devclass_get_softc(devclass, adapter);
3432 			if (sc == NULL)
3433 				return (ENOENT);
3434 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3435 			    cmd, arg, flag, td));
3436 			break;
3437 		}
3438 #ifdef COMPAT_FREEBSD32
3439 	case MFIIO_PASSTHRU32:
3440 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3441 			error = ENOTTY;
3442 			break;
3443 		}
3444 		iop_swab.ioc_frame	= iop32->ioc_frame;
3445 		iop_swab.buf_size	= iop32->buf_size;
3446 		iop_swab.buf		= PTRIN(iop32->buf);
3447 		iop			= &iop_swab;
3448 		/* FALLTHROUGH */
3449 #endif
3450 	case MFIIO_PASSTHRU:
3451 		error = mfi_user_command(sc, iop);
3452 #ifdef COMPAT_FREEBSD32
3453 		if (cmd == MFIIO_PASSTHRU32)
3454 			iop32->ioc_frame = iop_swab.ioc_frame;
3455 #endif
3456 		break;
3457 	default:
3458 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3459 		error = ENOTTY;
3460 		break;
3461 	}
3462 
3463 	return (error);
3464 }
3465 
3466 static int
3467 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3468 {
3469 	struct mfi_softc *sc;
3470 	struct mfi_linux_ioc_packet l_ioc;
3471 	struct mfi_linux_ioc_aen l_aen;
3472 	struct mfi_command *cm = NULL;
3473 	struct mfi_aen *mfi_aen_entry;
3474 	union mfi_sense_ptr sense_ptr;
3475 	uint32_t context = 0;
3476 	uint8_t *data = NULL, *temp;
3477 	int i;
3478 	int error, locked;
3479 
3480 	sc = dev->si_drv1;
3481 	error = 0;
3482 	switch (cmd) {
3483 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3484 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3485 		if (error != 0)
3486 			return (error);
3487 
3488 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3489 			return (EINVAL);
3490 		}
3491 
3492 		mtx_lock(&sc->mfi_io_lock);
3493 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3494 			mtx_unlock(&sc->mfi_io_lock);
3495 			return (EBUSY);
3496 		}
3497 		mtx_unlock(&sc->mfi_io_lock);
3498 		locked = 0;
3499 
3500 		/*
3501 		 * save off original context since copying from user
3502 		 * will clobber some data
3503 		 */
3504 		context = cm->cm_frame->header.context;
3505 
3506 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3507 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3508 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3509 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3510 		cm->cm_frame->header.scsi_status = 0;
3511 		cm->cm_frame->header.pad0 = 0;
3512 		if (l_ioc.lioc_sge_count)
3513 			cm->cm_sg =
3514 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3515 		cm->cm_flags = 0;
3516 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3517 			cm->cm_flags |= MFI_CMD_DATAIN;
3518 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3519 			cm->cm_flags |= MFI_CMD_DATAOUT;
3520 		cm->cm_len = cm->cm_frame->header.data_len;
3521 		if (cm->cm_len &&
3522 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3523 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3524 			    M_WAITOK | M_ZERO);
3525 		} else {
3526 			cm->cm_data = 0;
3527 		}
3528 
3529 		/* restore header context */
3530 		cm->cm_frame->header.context = context;
3531 
3532 		temp = data;
3533 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3534 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3535 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3536 				       temp,
3537 				       l_ioc.lioc_sgl[i].iov_len);
3538 				if (error != 0) {
3539 					device_printf(sc->mfi_dev,
3540 					    "Copy in failed\n");
3541 					goto out;
3542 				}
3543 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3544 			}
3545 		}
3546 
3547 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3548 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3549 
3550 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3551 			cm->cm_frame->pass.sense_addr_lo =
3552 			    (uint32_t)cm->cm_sense_busaddr;
3553 			cm->cm_frame->pass.sense_addr_hi =
3554 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3555 		}
3556 
3557 		mtx_lock(&sc->mfi_io_lock);
3558 		error = mfi_check_command_pre(sc, cm);
3559 		if (error) {
3560 			mtx_unlock(&sc->mfi_io_lock);
3561 			goto out;
3562 		}
3563 
3564 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3565 			device_printf(sc->mfi_dev,
3566 			    "Controller polled failed\n");
3567 			mtx_unlock(&sc->mfi_io_lock);
3568 			goto out;
3569 		}
3570 
3571 		mfi_check_command_post(sc, cm);
3572 		mtx_unlock(&sc->mfi_io_lock);
3573 
3574 		temp = data;
3575 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3576 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3577 				error = copyout(temp,
3578 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3579 					l_ioc.lioc_sgl[i].iov_len);
3580 				if (error != 0) {
3581 					device_printf(sc->mfi_dev,
3582 					    "Copy out failed\n");
3583 					goto out;
3584 				}
3585 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3586 			}
3587 		}
3588 
3589 		if (l_ioc.lioc_sense_len) {
3590 			/* get user-space sense ptr then copy out sense */
3591 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3592                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3593 			    &sense_ptr.sense_ptr_data[0],
3594 			    sizeof(sense_ptr.sense_ptr_data));
3595 #ifdef __amd64__
3596 			/*
3597 			 * only 32bit Linux support so zero out any
3598 			 * address over 32bit
3599 			 */
3600 			sense_ptr.addr.high = 0;
3601 #endif
3602 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3603 			    l_ioc.lioc_sense_len);
3604 			if (error != 0) {
3605 				device_printf(sc->mfi_dev,
3606 				    "Copy out failed\n");
3607 				goto out;
3608 			}
3609 		}
3610 
3611 		error = copyout(&cm->cm_frame->header.cmd_status,
3612 			&((struct mfi_linux_ioc_packet*)arg)
3613 			->lioc_frame.hdr.cmd_status,
3614 			1);
3615 		if (error != 0) {
3616 			device_printf(sc->mfi_dev,
3617 				      "Copy out failed\n");
3618 			goto out;
3619 		}
3620 
3621 out:
3622 		mfi_config_unlock(sc, locked);
3623 		if (data)
3624 			free(data, M_MFIBUF);
3625 		if (cm) {
3626 			mtx_lock(&sc->mfi_io_lock);
3627 			mfi_release_command(cm);
3628 			mtx_unlock(&sc->mfi_io_lock);
3629 		}
3630 
3631 		return (error);
3632 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3633 		error = copyin(arg, &l_aen, sizeof(l_aen));
3634 		if (error != 0)
3635 			return (error);
3636 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3637 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3638 		    M_WAITOK);
3639 		mtx_lock(&sc->mfi_io_lock);
3640 		if (mfi_aen_entry != NULL) {
3641 			mfi_aen_entry->p = curproc;
3642 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3643 			    aen_link);
3644 		}
3645 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3646 		    l_aen.laen_class_locale);
3647 
3648 		if (error != 0) {
3649 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3650 			    aen_link);
3651 			free(mfi_aen_entry, M_MFIBUF);
3652 		}
3653 		mtx_unlock(&sc->mfi_io_lock);
3654 
3655 		return (error);
3656 	default:
3657 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3658 		error = ENOENT;
3659 		break;
3660 	}
3661 
3662 	return (error);
3663 }
3664 
3665 static int
3666 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3667 {
3668 	struct mfi_softc *sc;
3669 	int revents = 0;
3670 
3671 	sc = dev->si_drv1;
3672 
3673 	if (poll_events & (POLLIN | POLLRDNORM)) {
3674 		if (sc->mfi_aen_triggered != 0) {
3675 			revents |= poll_events & (POLLIN | POLLRDNORM);
3676 			sc->mfi_aen_triggered = 0;
3677 		}
3678 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3679 			revents |= POLLERR;
3680 		}
3681 	}
3682 
3683 	if (revents == 0) {
3684 		if (poll_events & (POLLIN | POLLRDNORM)) {
3685 			sc->mfi_poll_waiting = 1;
3686 			selrecord(td, &sc->mfi_select);
3687 		}
3688 	}
3689 
3690 	return revents;
3691 }
3692 
3693 static void
3694 mfi_dump_all(void)
3695 {
3696 	struct mfi_softc *sc;
3697 	struct mfi_command *cm;
3698 	devclass_t dc;
3699 	time_t deadline;
3700 	int timedout;
3701 	int i;
3702 
3703 	dc = devclass_find("mfi");
3704 	if (dc == NULL) {
3705 		printf("No mfi dev class\n");
3706 		return;
3707 	}
3708 
3709 	for (i = 0; ; i++) {
3710 		sc = devclass_get_softc(dc, i);
3711 		if (sc == NULL)
3712 			break;
3713 		device_printf(sc->mfi_dev, "Dumping\n\n");
3714 		timedout = 0;
3715 		deadline = time_uptime - mfi_cmd_timeout;
3716 		mtx_lock(&sc->mfi_io_lock);
3717 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3718 			if (cm->cm_timestamp <= deadline) {
3719 				device_printf(sc->mfi_dev,
3720 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3721 				    cm, (int)(time_uptime - cm->cm_timestamp));
3722 				MFI_PRINT_CMD(cm);
3723 				timedout++;
3724 			}
3725 		}
3726 
3727 #if 0
3728 		if (timedout)
3729 			MFI_DUMP_CMDS(sc);
3730 #endif
3731 
3732 		mtx_unlock(&sc->mfi_io_lock);
3733 	}
3734 
3735 	return;
3736 }
3737 
3738 static void
3739 mfi_timeout(void *data)
3740 {
3741 	struct mfi_softc *sc = (struct mfi_softc *)data;
3742 	struct mfi_command *cm, *tmp;
3743 	time_t deadline;
3744 	int timedout = 0;
3745 
3746 	deadline = time_uptime - mfi_cmd_timeout;
3747 	if (sc->adpreset == 0) {
3748 		if (!mfi_tbolt_reset(sc)) {
3749 			callout_reset(&sc->mfi_watchdog_callout,
3750 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3751 			return;
3752 		}
3753 	}
3754 	mtx_lock(&sc->mfi_io_lock);
3755 	TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3756 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3757 			continue;
3758 		if (cm->cm_timestamp <= deadline) {
3759 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3760 				cm->cm_timestamp = time_uptime;
3761 			} else {
3762 				device_printf(sc->mfi_dev,
3763 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3764 				     cm, (int)(time_uptime - cm->cm_timestamp)
3765 				     );
3766 				MFI_PRINT_CMD(cm);
3767 				MFI_VALIDATE_CMD(sc, cm);
3768 				/*
3769 				 * While commands can get stuck forever we do
3770 				 * not fail them as there is no way to tell if
3771 				 * the controller has actually processed them
3772 				 * or not.
3773 				 *
3774 				 * In addition its very likely that force
3775 				 * failing a command here would cause a panic
3776 				 * e.g. in UFS.
3777 				 */
3778 				timedout++;
3779 			}
3780 		}
3781 	}
3782 
3783 #if 0
3784 	if (timedout)
3785 		MFI_DUMP_CMDS(sc);
3786 #endif
3787 
3788 	mtx_unlock(&sc->mfi_io_lock);
3789 
3790 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3791 	    mfi_timeout, sc);
3792 
3793 	if (0)
3794 		mfi_dump_all();
3795 	return;
3796 }
3797