xref: /freebsd/sys/dev/mfi/mfi.c (revision eb43a03acb08801a6734b6f3e75c6e6b8971220e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause
3  *
4  * Copyright (c) 2006 IronPort Systems
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2007 LSI Corp.
30  * Copyright (c) 2007 Rajesh Prabhakaran.
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52  * SUCH DAMAGE.
53  */
54 
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
57 
58 #include "opt_mfi.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sysctl.h>
63 #include <sys/malloc.h>
64 #include <sys/kernel.h>
65 #include <sys/poll.h>
66 #include <sys/selinfo.h>
67 #include <sys/bus.h>
68 #include <sys/conf.h>
69 #include <sys/eventhandler.h>
70 #include <sys/rman.h>
71 #include <sys/bio.h>
72 #include <sys/ioccom.h>
73 #include <sys/uio.h>
74 #include <sys/proc.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
78 
79 #include <machine/bus.h>
80 #include <machine/resource.h>
81 
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
87 
88 static int	mfi_alloc_commands(struct mfi_softc *);
89 static int	mfi_comms_init(struct mfi_softc *);
90 static int	mfi_get_controller_info(struct mfi_softc *);
91 static int	mfi_get_log_state(struct mfi_softc *,
92 		    struct mfi_evt_log_state **);
93 static int	mfi_parse_entries(struct mfi_softc *, int, int);
94 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void	mfi_startup(void *arg);
96 static void	mfi_intr(void *arg);
97 static void	mfi_ldprobe(struct mfi_softc *sc);
98 static void	mfi_syspdprobe(struct mfi_softc *sc);
99 static void	mfi_handle_evt(void *context, int pending);
100 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void	mfi_aen_complete(struct mfi_command *);
102 static int	mfi_add_ld(struct mfi_softc *sc, int);
103 static void	mfi_add_ld_complete(struct mfi_command *);
104 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void	mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void	mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int	mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void	mfi_timeout(void *);
115 static int	mfi_user_command(struct mfi_softc *,
116 		    struct mfi_ioc_passthru *);
117 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
124 		    uint32_t frame_cnt);
125 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
126 		    uint32_t frame_cnt);
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
132 
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
136            0, "event message locale");
137 
138 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
139 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
140            0, "event message class");
141 
142 static int	mfi_max_cmds = 128;
143 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
144 	   0, "Max commands limit (-1 = controller limit)");
145 
146 static int	mfi_detect_jbod_change = 1;
147 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
148 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
149 
150 int		mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
151 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
152 	   &mfi_polled_cmd_timeout, 0,
153 	   "Polled command timeout - used for firmware flash etc (in seconds)");
154 
155 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
156 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
157 	   0, "Command timeout (in seconds)");
158 
159 /* Management interface */
160 static d_open_t		mfi_open;
161 static d_close_t	mfi_close;
162 static d_ioctl_t	mfi_ioctl;
163 static d_poll_t		mfi_poll;
164 
165 static struct cdevsw mfi_cdevsw = {
166 	.d_version = 	D_VERSION,
167 	.d_flags =	0,
168 	.d_open = 	mfi_open,
169 	.d_close =	mfi_close,
170 	.d_ioctl =	mfi_ioctl,
171 	.d_poll =	mfi_poll,
172 	.d_name =	"mfi",
173 };
174 
175 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
176 
177 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
178 struct mfi_skinny_dma_info mfi_skinny;
179 
180 static void
181 mfi_enable_intr_xscale(struct mfi_softc *sc)
182 {
183 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
184 }
185 
186 static void
187 mfi_enable_intr_ppc(struct mfi_softc *sc)
188 {
189 	if (sc->mfi_flags & MFI_FLAGS_1078) {
190 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
191 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
192 	}
193 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
194 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
195 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
196 	}
197 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
198 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
199 	}
200 }
201 
202 static int32_t
203 mfi_read_fw_status_xscale(struct mfi_softc *sc)
204 {
205 	return MFI_READ4(sc, MFI_OMSG0);
206 }
207 
208 static int32_t
209 mfi_read_fw_status_ppc(struct mfi_softc *sc)
210 {
211 	return MFI_READ4(sc, MFI_OSP0);
212 }
213 
214 static int
215 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
216 {
217 	int32_t status;
218 
219 	status = MFI_READ4(sc, MFI_OSTS);
220 	if ((status & MFI_OSTS_INTR_VALID) == 0)
221 		return 1;
222 
223 	MFI_WRITE4(sc, MFI_OSTS, status);
224 	return 0;
225 }
226 
227 static int
228 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
229 {
230 	int32_t status;
231 
232 	status = MFI_READ4(sc, MFI_OSTS);
233 	if (sc->mfi_flags & MFI_FLAGS_1078) {
234 		if (!(status & MFI_1078_RM)) {
235 			return 1;
236 		}
237 	}
238 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
239 		if (!(status & MFI_GEN2_RM)) {
240 			return 1;
241 		}
242 	}
243 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
244 		if (!(status & MFI_SKINNY_RM)) {
245 			return 1;
246 		}
247 	}
248 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
249 		MFI_WRITE4(sc, MFI_OSTS, status);
250 	else
251 		MFI_WRITE4(sc, MFI_ODCR0, status);
252 	return 0;
253 }
254 
255 static void
256 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
257 {
258 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
259 }
260 
261 static void
262 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
263 {
264 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
265 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
266 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
267 	} else {
268 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
269 	}
270 }
271 
272 int
273 mfi_transition_firmware(struct mfi_softc *sc)
274 {
275 	uint32_t fw_state, cur_state;
276 	int max_wait, i;
277 	uint32_t cur_abs_reg_val = 0;
278 	uint32_t prev_abs_reg_val = 0;
279 
280 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
281 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
282 	while (fw_state != MFI_FWSTATE_READY) {
283 		if (bootverbose)
284 			device_printf(sc->mfi_dev, "Waiting for firmware to "
285 			"become ready\n");
286 		cur_state = fw_state;
287 		switch (fw_state) {
288 		case MFI_FWSTATE_FAULT:
289 			device_printf(sc->mfi_dev, "Firmware fault\n");
290 			return (ENXIO);
291 		case MFI_FWSTATE_WAIT_HANDSHAKE:
292 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
294 			else
295 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
296 			max_wait = MFI_RESET_WAIT_TIME;
297 			break;
298 		case MFI_FWSTATE_OPERATIONAL:
299 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
300 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
301 			else
302 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
303 			max_wait = MFI_RESET_WAIT_TIME;
304 			break;
305 		case MFI_FWSTATE_UNDEFINED:
306 		case MFI_FWSTATE_BB_INIT:
307 			max_wait = MFI_RESET_WAIT_TIME;
308 			break;
309 		case MFI_FWSTATE_FW_INIT_2:
310 			max_wait = MFI_RESET_WAIT_TIME;
311 			break;
312 		case MFI_FWSTATE_FW_INIT:
313 		case MFI_FWSTATE_FLUSH_CACHE:
314 			max_wait = MFI_RESET_WAIT_TIME;
315 			break;
316 		case MFI_FWSTATE_DEVICE_SCAN:
317 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
318 			prev_abs_reg_val = cur_abs_reg_val;
319 			break;
320 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
321 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
322 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
323 			else
324 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
325 			max_wait = MFI_RESET_WAIT_TIME;
326 			break;
327 		default:
328 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
329 			    fw_state);
330 			return (ENXIO);
331 		}
332 		for (i = 0; i < (max_wait * 10); i++) {
333 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
334 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
335 			if (fw_state == cur_state)
336 				DELAY(100000);
337 			else
338 				break;
339 		}
340 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
341 			/* Check the device scanning progress */
342 			if (prev_abs_reg_val != cur_abs_reg_val) {
343 				continue;
344 			}
345 		}
346 		if (fw_state == cur_state) {
347 			device_printf(sc->mfi_dev, "Firmware stuck in state "
348 			    "%#x\n", fw_state);
349 			return (ENXIO);
350 		}
351 	}
352 	return (0);
353 }
354 
355 static void
356 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
357 {
358 	bus_addr_t *addr;
359 
360 	addr = arg;
361 	*addr = segs[0].ds_addr;
362 }
363 
364 
365 int
366 mfi_attach(struct mfi_softc *sc)
367 {
368 	uint32_t status;
369 	int error, commsz, framessz, sensesz;
370 	int frames, unit, max_fw_sge, max_fw_cmds;
371 	uint32_t tb_mem_size = 0;
372 	struct cdev *dev_t;
373 
374 	if (sc == NULL)
375 		return EINVAL;
376 
377 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
378 	    MEGASAS_VERSION);
379 
380 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
381 	sx_init(&sc->mfi_config_lock, "MFI config");
382 	TAILQ_INIT(&sc->mfi_ld_tqh);
383 	TAILQ_INIT(&sc->mfi_syspd_tqh);
384 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
385 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
386 	TAILQ_INIT(&sc->mfi_evt_queue);
387 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
388 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
389 	TAILQ_INIT(&sc->mfi_aen_pids);
390 	TAILQ_INIT(&sc->mfi_cam_ccbq);
391 
392 	mfi_initq_free(sc);
393 	mfi_initq_ready(sc);
394 	mfi_initq_busy(sc);
395 	mfi_initq_bio(sc);
396 
397 	sc->adpreset = 0;
398 	sc->last_seq_num = 0;
399 	sc->disableOnlineCtrlReset = 1;
400 	sc->issuepend_done = 1;
401 	sc->hw_crit_error = 0;
402 
403 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
404 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
405 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
406 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
407 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
408 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
409 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
410 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
411 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
412 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
413 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
414 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
415 		sc->mfi_tbolt = 1;
416 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
417 	} else {
418 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
419 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
420 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
421 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
422 	}
423 
424 
425 	/* Before we get too far, see if the firmware is working */
426 	if ((error = mfi_transition_firmware(sc)) != 0) {
427 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
428 		    "error %d\n", error);
429 		return (ENXIO);
430 	}
431 
432 	/* Start: LSIP200113393 */
433 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
434 				1, 0,			/* algnmnt, boundary */
435 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
436 				BUS_SPACE_MAXADDR,	/* highaddr */
437 				NULL, NULL,		/* filter, filterarg */
438 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
439 				1,			/* msegments */
440 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
441 				0,			/* flags */
442 				NULL, NULL,		/* lockfunc, lockarg */
443 				&sc->verbuf_h_dmat)) {
444 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
445 		return (ENOMEM);
446 	}
447 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
448 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
449 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
450 		return (ENOMEM);
451 	}
452 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
453 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
454 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
455 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
456 	/* End: LSIP200113393 */
457 
458 	/*
459 	 * Get information needed for sizing the contiguous memory for the
460 	 * frame pool.  Size down the sgl parameter since we know that
461 	 * we will never need more than what's required for MAXPHYS.
462 	 * It would be nice if these constants were available at runtime
463 	 * instead of compile time.
464 	 */
465 	status = sc->mfi_read_fw_status(sc);
466 	max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
467 	if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
468 		device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
469 		    max_fw_cmds, mfi_max_cmds);
470 		sc->mfi_max_fw_cmds = mfi_max_cmds;
471 	} else {
472 		sc->mfi_max_fw_cmds = max_fw_cmds;
473 	}
474 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
475 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
476 
477 	/* ThunderBolt Support get the contiguous memory */
478 
479 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
480 		mfi_tbolt_init_globals(sc);
481 		device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
482 		    "MaxSgl = %d, state = %#x\n", max_fw_cmds,
483 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
484 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
485 
486 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
487 				1, 0,			/* algnmnt, boundary */
488 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
489 				BUS_SPACE_MAXADDR,	/* highaddr */
490 				NULL, NULL,		/* filter, filterarg */
491 				tb_mem_size,		/* maxsize */
492 				1,			/* msegments */
493 				tb_mem_size,		/* maxsegsize */
494 				0,			/* flags */
495 				NULL, NULL,		/* lockfunc, lockarg */
496 				&sc->mfi_tb_dmat)) {
497 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
498 			return (ENOMEM);
499 		}
500 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
501 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
502 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
503 			return (ENOMEM);
504 		}
505 		bzero(sc->request_message_pool, tb_mem_size);
506 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
507 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
508 
509 		/* For ThunderBolt memory init */
510 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
511 				0x100, 0,		/* alignmnt, boundary */
512 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
513 				BUS_SPACE_MAXADDR,	/* highaddr */
514 				NULL, NULL,		/* filter, filterarg */
515 				MFI_FRAME_SIZE,		/* maxsize */
516 				1,			/* msegments */
517 				MFI_FRAME_SIZE,		/* maxsegsize */
518 				0,			/* flags */
519 				NULL, NULL,		/* lockfunc, lockarg */
520 				&sc->mfi_tb_init_dmat)) {
521 			device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
522 			return (ENOMEM);
523 		}
524 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
525 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
526 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
527 			return (ENOMEM);
528 		}
529 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
530 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
531 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
532 		    &sc->mfi_tb_init_busaddr, 0);
533 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
534 		    tb_mem_size)) {
535 			device_printf(sc->mfi_dev,
536 			    "Thunderbolt pool preparation error\n");
537 			return 0;
538 		}
539 
540 		/*
541 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
542 		  we are taking it different from what we have allocated for Request
543 		  and reply descriptors to avoid confusion later
544 		*/
545 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
546 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
547 				1, 0,			/* algnmnt, boundary */
548 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
549 				BUS_SPACE_MAXADDR,	/* highaddr */
550 				NULL, NULL,		/* filter, filterarg */
551 				tb_mem_size,		/* maxsize */
552 				1,			/* msegments */
553 				tb_mem_size,		/* maxsegsize */
554 				0,			/* flags */
555 				NULL, NULL,		/* lockfunc, lockarg */
556 				&sc->mfi_tb_ioc_init_dmat)) {
557 			device_printf(sc->mfi_dev,
558 			    "Cannot allocate comms DMA tag\n");
559 			return (ENOMEM);
560 		}
561 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
562 		    (void **)&sc->mfi_tb_ioc_init_desc,
563 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
564 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
565 			return (ENOMEM);
566 		}
567 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
568 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
569 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
570 		    &sc->mfi_tb_ioc_init_busaddr, 0);
571 	}
572 	/*
573 	 * Create the dma tag for data buffers.  Used both for block I/O
574 	 * and for various internal data queries.
575 	 */
576 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
577 				1, 0,			/* algnmnt, boundary */
578 				BUS_SPACE_MAXADDR,	/* lowaddr */
579 				BUS_SPACE_MAXADDR,	/* highaddr */
580 				NULL, NULL,		/* filter, filterarg */
581 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
582 				sc->mfi_max_sge,	/* nsegments */
583 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
584 				BUS_DMA_ALLOCNOW,	/* flags */
585 				busdma_lock_mutex,	/* lockfunc */
586 				&sc->mfi_io_lock,	/* lockfuncarg */
587 				&sc->mfi_buffer_dmat)) {
588 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
589 		return (ENOMEM);
590 	}
591 
592 	/*
593 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
594 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
595 	 * entry, so the calculated size here will be will be 1 more than
596 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
597 	 */
598 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
599 	    sizeof(struct mfi_hwcomms);
600 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
601 				1, 0,			/* algnmnt, boundary */
602 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
603 				BUS_SPACE_MAXADDR,	/* highaddr */
604 				NULL, NULL,		/* filter, filterarg */
605 				commsz,			/* maxsize */
606 				1,			/* msegments */
607 				commsz,			/* maxsegsize */
608 				0,			/* flags */
609 				NULL, NULL,		/* lockfunc, lockarg */
610 				&sc->mfi_comms_dmat)) {
611 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
612 		return (ENOMEM);
613 	}
614 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
615 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
616 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
617 		return (ENOMEM);
618 	}
619 	bzero(sc->mfi_comms, commsz);
620 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
621 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
622 	/*
623 	 * Allocate DMA memory for the command frames.  Keep them in the
624 	 * lower 4GB for efficiency.  Calculate the size of the commands at
625 	 * the same time; each command is one 64 byte frame plus a set of
626          * additional frames for holding sg lists or other data.
627 	 * The assumption here is that the SG list will start at the second
628 	 * frame and not use the unused bytes in the first frame.  While this
629 	 * isn't technically correct, it simplifies the calculation and allows
630 	 * for command frames that might be larger than an mfi_io_frame.
631 	 */
632 	if (sizeof(bus_addr_t) == 8) {
633 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
634 		sc->mfi_flags |= MFI_FLAGS_SG64;
635 	} else {
636 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
637 	}
638 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
639 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
640 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
641 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
642 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
643 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
644 				64, 0,			/* algnmnt, boundary */
645 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
646 				BUS_SPACE_MAXADDR,	/* highaddr */
647 				NULL, NULL,		/* filter, filterarg */
648 				framessz,		/* maxsize */
649 				1,			/* nsegments */
650 				framessz,		/* maxsegsize */
651 				0,			/* flags */
652 				NULL, NULL,		/* lockfunc, lockarg */
653 				&sc->mfi_frames_dmat)) {
654 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
655 		return (ENOMEM);
656 	}
657 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
658 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
659 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
660 		return (ENOMEM);
661 	}
662 	bzero(sc->mfi_frames, framessz);
663 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
664 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
665 	/*
666 	 * Allocate DMA memory for the frame sense data.  Keep them in the
667 	 * lower 4GB for efficiency
668 	 */
669 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
670 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
671 				4, 0,			/* algnmnt, boundary */
672 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
673 				BUS_SPACE_MAXADDR,	/* highaddr */
674 				NULL, NULL,		/* filter, filterarg */
675 				sensesz,		/* maxsize */
676 				1,			/* nsegments */
677 				sensesz,		/* maxsegsize */
678 				0,			/* flags */
679 				NULL, NULL,		/* lockfunc, lockarg */
680 				&sc->mfi_sense_dmat)) {
681 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
682 		return (ENOMEM);
683 	}
684 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
685 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
686 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
687 		return (ENOMEM);
688 	}
689 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
690 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
691 	if ((error = mfi_alloc_commands(sc)) != 0)
692 		return (error);
693 
694 	/* Before moving the FW to operational state, check whether
695 	 * hostmemory is required by the FW or not
696 	 */
697 
698 	/* ThunderBolt MFI_IOC2 INIT */
699 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
700 		sc->mfi_disable_intr(sc);
701 		mtx_lock(&sc->mfi_io_lock);
702 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
703 			device_printf(sc->mfi_dev,
704 			    "TB Init has failed with error %d\n",error);
705 			mtx_unlock(&sc->mfi_io_lock);
706 			return error;
707 		}
708 		mtx_unlock(&sc->mfi_io_lock);
709 
710 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
711 			return error;
712 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
713 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
714 		    &sc->mfi_intr)) {
715 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
716 			return (EINVAL);
717 		}
718 		sc->mfi_intr_ptr = mfi_intr_tbolt;
719 		sc->mfi_enable_intr(sc);
720 	} else {
721 		if ((error = mfi_comms_init(sc)) != 0)
722 			return (error);
723 
724 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
725 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
726 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
727 			return (EINVAL);
728 		}
729 		sc->mfi_intr_ptr = mfi_intr;
730 		sc->mfi_enable_intr(sc);
731 	}
732 	if ((error = mfi_get_controller_info(sc)) != 0)
733 		return (error);
734 	sc->disableOnlineCtrlReset = 0;
735 
736 	/* Register a config hook to probe the bus for arrays */
737 	sc->mfi_ich.ich_func = mfi_startup;
738 	sc->mfi_ich.ich_arg = sc;
739 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
740 		device_printf(sc->mfi_dev, "Cannot establish configuration "
741 		    "hook\n");
742 		return (EINVAL);
743 	}
744 	mtx_lock(&sc->mfi_io_lock);
745 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
746 		mtx_unlock(&sc->mfi_io_lock);
747 		return (error);
748 	}
749 	mtx_unlock(&sc->mfi_io_lock);
750 
751 	/*
752 	 * Register a shutdown handler.
753 	 */
754 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
755 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
756 		device_printf(sc->mfi_dev, "Warning: shutdown event "
757 		    "registration failed\n");
758 	}
759 
760 	/*
761 	 * Create the control device for doing management
762 	 */
763 	unit = device_get_unit(sc->mfi_dev);
764 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
765 	    0640, "mfi%d", unit);
766 	if (unit == 0)
767 		make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
768 		    sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
769 	if (sc->mfi_cdev != NULL)
770 		sc->mfi_cdev->si_drv1 = sc;
771 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
772 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
773 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
774 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
775 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
776 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
777 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
778 	    &sc->mfi_keep_deleted_volumes, 0,
779 	    "Don't detach the mfid device for a busy volume that is deleted");
780 
781 	device_add_child(sc->mfi_dev, "mfip", -1);
782 	bus_generic_attach(sc->mfi_dev);
783 
784 	/* Start the timeout watchdog */
785 	callout_init(&sc->mfi_watchdog_callout, 1);
786 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
787 	    mfi_timeout, sc);
788 
789 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
790 		mtx_lock(&sc->mfi_io_lock);
791 		mfi_tbolt_sync_map_info(sc);
792 		mtx_unlock(&sc->mfi_io_lock);
793 	}
794 
795 	return (0);
796 }
797 
798 static int
799 mfi_alloc_commands(struct mfi_softc *sc)
800 {
801 	struct mfi_command *cm;
802 	int i, j;
803 
804 	/*
805 	 * XXX Should we allocate all the commands up front, or allocate on
806 	 * demand later like 'aac' does?
807 	 */
808 	sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
809 	    sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
810 
811 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
812 		cm = &sc->mfi_commands[i];
813 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
814 		    sc->mfi_cmd_size * i);
815 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
816 		    sc->mfi_cmd_size * i;
817 		cm->cm_frame->header.context = i;
818 		cm->cm_sense = &sc->mfi_sense[i];
819 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
820 		cm->cm_sc = sc;
821 		cm->cm_index = i;
822 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
823 		    &cm->cm_dmamap) == 0) {
824 			mtx_lock(&sc->mfi_io_lock);
825 			mfi_release_command(cm);
826 			mtx_unlock(&sc->mfi_io_lock);
827 		} else {
828 			device_printf(sc->mfi_dev, "Failed to allocate %d "
829 			   "command blocks, only allocated %d\n",
830 			    sc->mfi_max_fw_cmds, i - 1);
831 			for (j = 0; j < i; j++) {
832 				cm = &sc->mfi_commands[i];
833 				bus_dmamap_destroy(sc->mfi_buffer_dmat,
834 				    cm->cm_dmamap);
835 			}
836 			free(sc->mfi_commands, M_MFIBUF);
837 			sc->mfi_commands = NULL;
838 
839 			return (ENOMEM);
840 		}
841 	}
842 
843 	return (0);
844 }
845 
846 void
847 mfi_release_command(struct mfi_command *cm)
848 {
849 	struct mfi_frame_header *hdr;
850 	uint32_t *hdr_data;
851 
852 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
853 
854 	/*
855 	 * Zero out the important fields of the frame, but make sure the
856 	 * context field is preserved.  For efficiency, handle the fields
857 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
858 	 */
859 	hdr = &cm->cm_frame->header;
860 	if (cm->cm_data != NULL && hdr->sg_count) {
861 		cm->cm_sg->sg32[0].len = 0;
862 		cm->cm_sg->sg32[0].addr = 0;
863 	}
864 
865 	/*
866 	 * Command may be on other queues e.g. busy queue depending on the
867 	 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
868 	 * properly
869 	 */
870 	if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
871 		mfi_remove_busy(cm);
872 	if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
873 		mfi_remove_ready(cm);
874 
875 	/* We're not expecting it to be on any other queue but check */
876 	if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
877 		panic("Command %p is still on another queue, flags = %#x",
878 		    cm, cm->cm_flags);
879 	}
880 
881 	/* tbolt cleanup */
882 	if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
883 		mfi_tbolt_return_cmd(cm->cm_sc,
884 		    cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
885 		    cm);
886 	}
887 
888 	hdr_data = (uint32_t *)cm->cm_frame;
889 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
890 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
891 	hdr_data[4] = 0;	/* flags, timeout */
892 	hdr_data[5] = 0;	/* data_len */
893 
894 	cm->cm_extra_frames = 0;
895 	cm->cm_flags = 0;
896 	cm->cm_complete = NULL;
897 	cm->cm_private = NULL;
898 	cm->cm_data = NULL;
899 	cm->cm_sg = 0;
900 	cm->cm_total_frame_size = 0;
901 	cm->retry_for_fw_reset = 0;
902 
903 	mfi_enqueue_free(cm);
904 }
905 
906 int
907 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
908     uint32_t opcode, void **bufp, size_t bufsize)
909 {
910 	struct mfi_command *cm;
911 	struct mfi_dcmd_frame *dcmd;
912 	void *buf = NULL;
913 	uint32_t context = 0;
914 
915 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
916 
917 	cm = mfi_dequeue_free(sc);
918 	if (cm == NULL)
919 		return (EBUSY);
920 
921 	/* Zero out the MFI frame */
922 	context = cm->cm_frame->header.context;
923 	bzero(cm->cm_frame, sizeof(union mfi_frame));
924 	cm->cm_frame->header.context = context;
925 
926 	if ((bufsize > 0) && (bufp != NULL)) {
927 		if (*bufp == NULL) {
928 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
929 			if (buf == NULL) {
930 				mfi_release_command(cm);
931 				return (ENOMEM);
932 			}
933 			*bufp = buf;
934 		} else {
935 			buf = *bufp;
936 		}
937 	}
938 
939 	dcmd =  &cm->cm_frame->dcmd;
940 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
941 	dcmd->header.cmd = MFI_CMD_DCMD;
942 	dcmd->header.timeout = 0;
943 	dcmd->header.flags = 0;
944 	dcmd->header.data_len = bufsize;
945 	dcmd->header.scsi_status = 0;
946 	dcmd->opcode = opcode;
947 	cm->cm_sg = &dcmd->sgl;
948 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
949 	cm->cm_flags = 0;
950 	cm->cm_data = buf;
951 	cm->cm_private = buf;
952 	cm->cm_len = bufsize;
953 
954 	*cmp = cm;
955 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
956 		*bufp = buf;
957 	return (0);
958 }
959 
960 static int
961 mfi_comms_init(struct mfi_softc *sc)
962 {
963 	struct mfi_command *cm;
964 	struct mfi_init_frame *init;
965 	struct mfi_init_qinfo *qinfo;
966 	int error;
967 	uint32_t context = 0;
968 
969 	mtx_lock(&sc->mfi_io_lock);
970 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
971 		mtx_unlock(&sc->mfi_io_lock);
972 		return (EBUSY);
973 	}
974 
975 	/* Zero out the MFI frame */
976 	context = cm->cm_frame->header.context;
977 	bzero(cm->cm_frame, sizeof(union mfi_frame));
978 	cm->cm_frame->header.context = context;
979 
980 	/*
981 	 * Abuse the SG list area of the frame to hold the init_qinfo
982 	 * object;
983 	 */
984 	init = &cm->cm_frame->init;
985 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
986 
987 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
988 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
989 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
990 	    offsetof(struct mfi_hwcomms, hw_reply_q);
991 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
992 	    offsetof(struct mfi_hwcomms, hw_pi);
993 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
994 	    offsetof(struct mfi_hwcomms, hw_ci);
995 
996 	init->header.cmd = MFI_CMD_INIT;
997 	init->header.data_len = sizeof(struct mfi_init_qinfo);
998 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
999 	cm->cm_data = NULL;
1000 	cm->cm_flags = MFI_CMD_POLLED;
1001 
1002 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1003 		device_printf(sc->mfi_dev, "failed to send init command\n");
1004 	mfi_release_command(cm);
1005 	mtx_unlock(&sc->mfi_io_lock);
1006 
1007 	return (error);
1008 }
1009 
1010 static int
1011 mfi_get_controller_info(struct mfi_softc *sc)
1012 {
1013 	struct mfi_command *cm = NULL;
1014 	struct mfi_ctrl_info *ci = NULL;
1015 	uint32_t max_sectors_1, max_sectors_2;
1016 	int error;
1017 
1018 	mtx_lock(&sc->mfi_io_lock);
1019 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1020 	    (void **)&ci, sizeof(*ci));
1021 	if (error)
1022 		goto out;
1023 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1024 
1025 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1026 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
1027 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1028 		    MFI_SECTOR_LEN;
1029 		error = 0;
1030 		goto out;
1031 	}
1032 
1033 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1034 	    BUS_DMASYNC_POSTREAD);
1035 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1036 
1037 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1038 	max_sectors_2 = ci->max_request_size;
1039 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1040 	sc->disableOnlineCtrlReset =
1041 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
1042 
1043 out:
1044 	if (ci)
1045 		free(ci, M_MFIBUF);
1046 	if (cm)
1047 		mfi_release_command(cm);
1048 	mtx_unlock(&sc->mfi_io_lock);
1049 	return (error);
1050 }
1051 
1052 static int
1053 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1054 {
1055 	struct mfi_command *cm = NULL;
1056 	int error;
1057 
1058 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1059 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1060 	    (void **)log_state, sizeof(**log_state));
1061 	if (error)
1062 		goto out;
1063 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1064 
1065 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1066 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1067 		goto out;
1068 	}
1069 
1070 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1071 	    BUS_DMASYNC_POSTREAD);
1072 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1073 
1074 out:
1075 	if (cm)
1076 		mfi_release_command(cm);
1077 
1078 	return (error);
1079 }
1080 
1081 int
1082 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1083 {
1084 	struct mfi_evt_log_state *log_state = NULL;
1085 	union mfi_evt class_locale;
1086 	int error = 0;
1087 	uint32_t seq;
1088 
1089 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1090 
1091 	class_locale.members.reserved = 0;
1092 	class_locale.members.locale = mfi_event_locale;
1093 	class_locale.members.evt_class  = mfi_event_class;
1094 
1095 	if (seq_start == 0) {
1096 		if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1097 			goto out;
1098 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1099 
1100 		/*
1101 		 * Walk through any events that fired since the last
1102 		 * shutdown.
1103 		 */
1104 		if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1105 		    log_state->newest_seq_num)) != 0)
1106 			goto out;
1107 		seq = log_state->newest_seq_num;
1108 	} else
1109 		seq = seq_start;
1110 	error = mfi_aen_register(sc, seq, class_locale.word);
1111 out:
1112 	free(log_state, M_MFIBUF);
1113 
1114 	return (error);
1115 }
1116 
1117 int
1118 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1119 {
1120 
1121 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1122 	cm->cm_complete = NULL;
1123 
1124 	/*
1125 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1126 	 * and return 0 to it as status
1127 	 */
1128 	if (cm->cm_frame->dcmd.opcode == 0) {
1129 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1130 		cm->cm_error = 0;
1131 		return (cm->cm_error);
1132 	}
1133 	mfi_enqueue_ready(cm);
1134 	mfi_startio(sc);
1135 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1136 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1137 	return (cm->cm_error);
1138 }
1139 
1140 void
1141 mfi_free(struct mfi_softc *sc)
1142 {
1143 	struct mfi_command *cm;
1144 	int i;
1145 
1146 	callout_drain(&sc->mfi_watchdog_callout);
1147 
1148 	if (sc->mfi_cdev != NULL)
1149 		destroy_dev(sc->mfi_cdev);
1150 
1151 	if (sc->mfi_commands != NULL) {
1152 		for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1153 			cm = &sc->mfi_commands[i];
1154 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1155 		}
1156 		free(sc->mfi_commands, M_MFIBUF);
1157 		sc->mfi_commands = NULL;
1158 	}
1159 
1160 	if (sc->mfi_intr)
1161 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1162 	if (sc->mfi_irq != NULL)
1163 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1164 		    sc->mfi_irq);
1165 
1166 	if (sc->mfi_sense_busaddr != 0)
1167 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1168 	if (sc->mfi_sense != NULL)
1169 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1170 		    sc->mfi_sense_dmamap);
1171 	if (sc->mfi_sense_dmat != NULL)
1172 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1173 
1174 	if (sc->mfi_frames_busaddr != 0)
1175 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1176 	if (sc->mfi_frames != NULL)
1177 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1178 		    sc->mfi_frames_dmamap);
1179 	if (sc->mfi_frames_dmat != NULL)
1180 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1181 
1182 	if (sc->mfi_comms_busaddr != 0)
1183 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1184 	if (sc->mfi_comms != NULL)
1185 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1186 		    sc->mfi_comms_dmamap);
1187 	if (sc->mfi_comms_dmat != NULL)
1188 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1189 
1190 	/* ThunderBolt contiguous memory free here */
1191 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1192 		if (sc->mfi_tb_busaddr != 0)
1193 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1194 		if (sc->request_message_pool != NULL)
1195 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1196 			    sc->mfi_tb_dmamap);
1197 		if (sc->mfi_tb_dmat != NULL)
1198 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1199 
1200 		/* Version buffer memory free */
1201 		/* Start LSIP200113393 */
1202 		if (sc->verbuf_h_busaddr != 0)
1203 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1204 		if (sc->verbuf != NULL)
1205 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1206 			    sc->verbuf_h_dmamap);
1207 		if (sc->verbuf_h_dmat != NULL)
1208 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1209 
1210 		/* End LSIP200113393 */
1211 		/* ThunderBolt INIT packet memory Free */
1212 		if (sc->mfi_tb_init_busaddr != 0)
1213 			bus_dmamap_unload(sc->mfi_tb_init_dmat,
1214 			    sc->mfi_tb_init_dmamap);
1215 		if (sc->mfi_tb_init != NULL)
1216 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1217 			    sc->mfi_tb_init_dmamap);
1218 		if (sc->mfi_tb_init_dmat != NULL)
1219 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1220 
1221 		/* ThunderBolt IOC Init Desc memory free here */
1222 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1223 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1224 			    sc->mfi_tb_ioc_init_dmamap);
1225 		if (sc->mfi_tb_ioc_init_desc != NULL)
1226 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1227 			    sc->mfi_tb_ioc_init_desc,
1228 			    sc->mfi_tb_ioc_init_dmamap);
1229 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1230 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1231 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1232 			for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1233 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1234 					free(sc->mfi_cmd_pool_tbolt[i],
1235 					    M_MFIBUF);
1236 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1237 				}
1238 			}
1239 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1240 			sc->mfi_cmd_pool_tbolt = NULL;
1241 		}
1242 		if (sc->request_desc_pool != NULL) {
1243 			free(sc->request_desc_pool, M_MFIBUF);
1244 			sc->request_desc_pool = NULL;
1245 		}
1246 	}
1247 	if (sc->mfi_buffer_dmat != NULL)
1248 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1249 	if (sc->mfi_parent_dmat != NULL)
1250 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1251 
1252 	if (mtx_initialized(&sc->mfi_io_lock)) {
1253 		mtx_destroy(&sc->mfi_io_lock);
1254 		sx_destroy(&sc->mfi_config_lock);
1255 	}
1256 
1257 	return;
1258 }
1259 
1260 static void
1261 mfi_startup(void *arg)
1262 {
1263 	struct mfi_softc *sc;
1264 
1265 	sc = (struct mfi_softc *)arg;
1266 
1267 	sc->mfi_enable_intr(sc);
1268 	sx_xlock(&sc->mfi_config_lock);
1269 	mtx_lock(&sc->mfi_io_lock);
1270 	mfi_ldprobe(sc);
1271 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1272 	    mfi_syspdprobe(sc);
1273 	mtx_unlock(&sc->mfi_io_lock);
1274 	sx_xunlock(&sc->mfi_config_lock);
1275 
1276 	config_intrhook_disestablish(&sc->mfi_ich);
1277 }
1278 
1279 static void
1280 mfi_intr(void *arg)
1281 {
1282 	struct mfi_softc *sc;
1283 	struct mfi_command *cm;
1284 	uint32_t pi, ci, context;
1285 
1286 	sc = (struct mfi_softc *)arg;
1287 
1288 	if (sc->mfi_check_clear_intr(sc))
1289 		return;
1290 
1291 restart:
1292 	pi = sc->mfi_comms->hw_pi;
1293 	ci = sc->mfi_comms->hw_ci;
1294 	mtx_lock(&sc->mfi_io_lock);
1295 	while (ci != pi) {
1296 		context = sc->mfi_comms->hw_reply_q[ci];
1297 		if (context < sc->mfi_max_fw_cmds) {
1298 			cm = &sc->mfi_commands[context];
1299 			mfi_remove_busy(cm);
1300 			cm->cm_error = 0;
1301 			mfi_complete(sc, cm);
1302 		}
1303 		if (++ci == (sc->mfi_max_fw_cmds + 1))
1304 			ci = 0;
1305 	}
1306 
1307 	sc->mfi_comms->hw_ci = ci;
1308 
1309 	/* Give defered I/O a chance to run */
1310 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1311 	mfi_startio(sc);
1312 	mtx_unlock(&sc->mfi_io_lock);
1313 
1314 	/*
1315 	 * Dummy read to flush the bus; this ensures that the indexes are up
1316 	 * to date.  Restart processing if more commands have come it.
1317 	 */
1318 	(void)sc->mfi_read_fw_status(sc);
1319 	if (pi != sc->mfi_comms->hw_pi)
1320 		goto restart;
1321 
1322 	return;
1323 }
1324 
1325 int
1326 mfi_shutdown(struct mfi_softc *sc)
1327 {
1328 	struct mfi_dcmd_frame *dcmd;
1329 	struct mfi_command *cm;
1330 	int error;
1331 
1332 
1333 	if (sc->mfi_aen_cm != NULL) {
1334 		sc->cm_aen_abort = 1;
1335 		mfi_abort(sc, &sc->mfi_aen_cm);
1336 	}
1337 
1338 	if (sc->mfi_map_sync_cm != NULL) {
1339 		sc->cm_map_abort = 1;
1340 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1341 	}
1342 
1343 	mtx_lock(&sc->mfi_io_lock);
1344 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1345 	if (error) {
1346 		mtx_unlock(&sc->mfi_io_lock);
1347 		return (error);
1348 	}
1349 
1350 	dcmd = &cm->cm_frame->dcmd;
1351 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1352 	cm->cm_flags = MFI_CMD_POLLED;
1353 	cm->cm_data = NULL;
1354 
1355 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1356 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1357 
1358 	mfi_release_command(cm);
1359 	mtx_unlock(&sc->mfi_io_lock);
1360 	return (error);
1361 }
1362 
1363 static void
1364 mfi_syspdprobe(struct mfi_softc *sc)
1365 {
1366 	struct mfi_frame_header *hdr;
1367 	struct mfi_command *cm = NULL;
1368 	struct mfi_pd_list *pdlist = NULL;
1369 	struct mfi_system_pd *syspd, *tmp;
1370 	struct mfi_system_pending *syspd_pend;
1371 	int error, i, found;
1372 
1373 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1374 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1375 	/* Add SYSTEM PD's */
1376 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1377 	    (void **)&pdlist, sizeof(*pdlist));
1378 	if (error) {
1379 		device_printf(sc->mfi_dev,
1380 		    "Error while forming SYSTEM PD list\n");
1381 		goto out;
1382 	}
1383 
1384 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1385 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1386 	cm->cm_frame->dcmd.mbox[1] = 0;
1387 	if (mfi_mapcmd(sc, cm) != 0) {
1388 		device_printf(sc->mfi_dev,
1389 		    "Failed to get syspd device listing\n");
1390 		goto out;
1391 	}
1392 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1393 	    BUS_DMASYNC_POSTREAD);
1394 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1395 	hdr = &cm->cm_frame->header;
1396 	if (hdr->cmd_status != MFI_STAT_OK) {
1397 		device_printf(sc->mfi_dev,
1398 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1399 		goto out;
1400 	}
1401 	/* Get each PD and add it to the system */
1402 	for (i = 0; i < pdlist->count; i++) {
1403 		if (pdlist->addr[i].device_id ==
1404 		    pdlist->addr[i].encl_device_id)
1405 			continue;
1406 		found = 0;
1407 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1408 			if (syspd->pd_id == pdlist->addr[i].device_id)
1409 				found = 1;
1410 		}
1411 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1412 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1413 				found = 1;
1414 		}
1415 		if (found == 0)
1416 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1417 	}
1418 	/* Delete SYSPD's whose state has been changed */
1419 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1420 		found = 0;
1421 		for (i = 0; i < pdlist->count; i++) {
1422 			if (syspd->pd_id == pdlist->addr[i].device_id) {
1423 				found = 1;
1424 				break;
1425 			}
1426 		}
1427 		if (found == 0) {
1428 			printf("DELETE\n");
1429 			mtx_unlock(&sc->mfi_io_lock);
1430 			mtx_lock(&Giant);
1431 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1432 			mtx_unlock(&Giant);
1433 			mtx_lock(&sc->mfi_io_lock);
1434 		}
1435 	}
1436 out:
1437 	if (pdlist)
1438 	    free(pdlist, M_MFIBUF);
1439 	if (cm)
1440 	    mfi_release_command(cm);
1441 
1442 	return;
1443 }
1444 
1445 static void
1446 mfi_ldprobe(struct mfi_softc *sc)
1447 {
1448 	struct mfi_frame_header *hdr;
1449 	struct mfi_command *cm = NULL;
1450 	struct mfi_ld_list *list = NULL;
1451 	struct mfi_disk *ld;
1452 	struct mfi_disk_pending *ld_pend;
1453 	int error, i;
1454 
1455 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1456 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1457 
1458 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1459 	    (void **)&list, sizeof(*list));
1460 	if (error)
1461 		goto out;
1462 
1463 	cm->cm_flags = MFI_CMD_DATAIN;
1464 	if (mfi_wait_command(sc, cm) != 0) {
1465 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1466 		goto out;
1467 	}
1468 
1469 	hdr = &cm->cm_frame->header;
1470 	if (hdr->cmd_status != MFI_STAT_OK) {
1471 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1472 		    hdr->cmd_status);
1473 		goto out;
1474 	}
1475 
1476 	for (i = 0; i < list->ld_count; i++) {
1477 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1478 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1479 				goto skip_add;
1480 		}
1481 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1482 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1483 				goto skip_add;
1484 		}
1485 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1486 	skip_add:;
1487 	}
1488 out:
1489 	if (list)
1490 		free(list, M_MFIBUF);
1491 	if (cm)
1492 		mfi_release_command(cm);
1493 
1494 	return;
1495 }
1496 
1497 /*
1498  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1499  * the bits in 24-31 are all set, then it is the number of seconds since
1500  * boot.
1501  */
1502 static const char *
1503 format_timestamp(uint32_t timestamp)
1504 {
1505 	static char buffer[32];
1506 
1507 	if ((timestamp & 0xff000000) == 0xff000000)
1508 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1509 		    0x00ffffff);
1510 	else
1511 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1512 	return (buffer);
1513 }
1514 
1515 static const char *
1516 format_class(int8_t class)
1517 {
1518 	static char buffer[6];
1519 
1520 	switch (class) {
1521 	case MFI_EVT_CLASS_DEBUG:
1522 		return ("debug");
1523 	case MFI_EVT_CLASS_PROGRESS:
1524 		return ("progress");
1525 	case MFI_EVT_CLASS_INFO:
1526 		return ("info");
1527 	case MFI_EVT_CLASS_WARNING:
1528 		return ("WARN");
1529 	case MFI_EVT_CLASS_CRITICAL:
1530 		return ("CRIT");
1531 	case MFI_EVT_CLASS_FATAL:
1532 		return ("FATAL");
1533 	case MFI_EVT_CLASS_DEAD:
1534 		return ("DEAD");
1535 	default:
1536 		snprintf(buffer, sizeof(buffer), "%d", class);
1537 		return (buffer);
1538 	}
1539 }
1540 
1541 static void
1542 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1543 {
1544 	struct mfi_system_pd *syspd = NULL;
1545 
1546 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1547 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1548 	    format_class(detail->evt_class.members.evt_class),
1549 	    detail->description);
1550 
1551         /* Don't act on old AEN's or while shutting down */
1552         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1553                 return;
1554 
1555 	switch (detail->arg_type) {
1556 	case MR_EVT_ARGS_NONE:
1557 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1558 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1559 			if (mfi_detect_jbod_change) {
1560 				/*
1561 				 * Probe for new SYSPD's and Delete
1562 				 * invalid SYSPD's
1563 				 */
1564 				sx_xlock(&sc->mfi_config_lock);
1565 				mtx_lock(&sc->mfi_io_lock);
1566 				mfi_syspdprobe(sc);
1567 				mtx_unlock(&sc->mfi_io_lock);
1568 				sx_xunlock(&sc->mfi_config_lock);
1569 			}
1570 		}
1571 		break;
1572 	case MR_EVT_ARGS_LD_STATE:
1573 		/* During load time driver reads all the events starting
1574 		 * from the one that has been logged after shutdown. Avoid
1575 		 * these old events.
1576 		 */
1577 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1578 			/* Remove the LD */
1579 			struct mfi_disk *ld;
1580 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1581 				if (ld->ld_id ==
1582 				    detail->args.ld_state.ld.target_id)
1583 					break;
1584 			}
1585 			/*
1586 			Fix: for kernel panics when SSCD is removed
1587 			KASSERT(ld != NULL, ("volume dissappeared"));
1588 			*/
1589 			if (ld != NULL) {
1590 				mtx_lock(&Giant);
1591 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1592 				mtx_unlock(&Giant);
1593 			}
1594 		}
1595 		break;
1596 	case MR_EVT_ARGS_PD:
1597 		if (detail->code == MR_EVT_PD_REMOVED) {
1598 			if (mfi_detect_jbod_change) {
1599 				/*
1600 				 * If the removed device is a SYSPD then
1601 				 * delete it
1602 				 */
1603 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1604 				    pd_link) {
1605 					if (syspd->pd_id ==
1606 					    detail->args.pd.device_id) {
1607 						mtx_lock(&Giant);
1608 						device_delete_child(
1609 						    sc->mfi_dev,
1610 						    syspd->pd_dev);
1611 						mtx_unlock(&Giant);
1612 						break;
1613 					}
1614 				}
1615 			}
1616 		}
1617 		if (detail->code == MR_EVT_PD_INSERTED) {
1618 			if (mfi_detect_jbod_change) {
1619 				/* Probe for new SYSPD's */
1620 				sx_xlock(&sc->mfi_config_lock);
1621 				mtx_lock(&sc->mfi_io_lock);
1622 				mfi_syspdprobe(sc);
1623 				mtx_unlock(&sc->mfi_io_lock);
1624 				sx_xunlock(&sc->mfi_config_lock);
1625 			}
1626 		}
1627 		if (sc->mfi_cam_rescan_cb != NULL &&
1628 		    (detail->code == MR_EVT_PD_INSERTED ||
1629 		    detail->code == MR_EVT_PD_REMOVED)) {
1630 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1631 		}
1632 		break;
1633 	}
1634 }
1635 
1636 static void
1637 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1638 {
1639 	struct mfi_evt_queue_elm *elm;
1640 
1641 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1642 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1643 	if (elm == NULL)
1644 		return;
1645 	memcpy(&elm->detail, detail, sizeof(*detail));
1646 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1647 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1648 }
1649 
1650 static void
1651 mfi_handle_evt(void *context, int pending)
1652 {
1653 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1654 	struct mfi_softc *sc;
1655 	struct mfi_evt_queue_elm *elm;
1656 
1657 	sc = context;
1658 	TAILQ_INIT(&queue);
1659 	mtx_lock(&sc->mfi_io_lock);
1660 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1661 	mtx_unlock(&sc->mfi_io_lock);
1662 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1663 		TAILQ_REMOVE(&queue, elm, link);
1664 		mfi_decode_evt(sc, &elm->detail);
1665 		free(elm, M_MFIBUF);
1666 	}
1667 }
1668 
1669 static int
1670 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1671 {
1672 	struct mfi_command *cm;
1673 	struct mfi_dcmd_frame *dcmd;
1674 	union mfi_evt current_aen, prior_aen;
1675 	struct mfi_evt_detail *ed = NULL;
1676 	int error = 0;
1677 
1678 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1679 
1680 	current_aen.word = locale;
1681 	if (sc->mfi_aen_cm != NULL) {
1682 		prior_aen.word =
1683 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1684 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1685 		    !((prior_aen.members.locale & current_aen.members.locale)
1686 		    ^current_aen.members.locale)) {
1687 			return (0);
1688 		} else {
1689 			prior_aen.members.locale |= current_aen.members.locale;
1690 			if (prior_aen.members.evt_class
1691 			    < current_aen.members.evt_class)
1692 				current_aen.members.evt_class =
1693 				    prior_aen.members.evt_class;
1694 			mfi_abort(sc, &sc->mfi_aen_cm);
1695 		}
1696 	}
1697 
1698 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1699 	    (void **)&ed, sizeof(*ed));
1700 	if (error)
1701 		goto out;
1702 
1703 	dcmd = &cm->cm_frame->dcmd;
1704 	((uint32_t *)&dcmd->mbox)[0] = seq;
1705 	((uint32_t *)&dcmd->mbox)[1] = locale;
1706 	cm->cm_flags = MFI_CMD_DATAIN;
1707 	cm->cm_complete = mfi_aen_complete;
1708 
1709 	sc->last_seq_num = seq;
1710 	sc->mfi_aen_cm = cm;
1711 
1712 	mfi_enqueue_ready(cm);
1713 	mfi_startio(sc);
1714 
1715 out:
1716 	return (error);
1717 }
1718 
1719 static void
1720 mfi_aen_complete(struct mfi_command *cm)
1721 {
1722 	struct mfi_frame_header *hdr;
1723 	struct mfi_softc *sc;
1724 	struct mfi_evt_detail *detail;
1725 	struct mfi_aen *mfi_aen_entry, *tmp;
1726 	int seq = 0, aborted = 0;
1727 
1728 	sc = cm->cm_sc;
1729 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1730 
1731 	if (sc->mfi_aen_cm == NULL)
1732 		return;
1733 
1734 	hdr = &cm->cm_frame->header;
1735 
1736 	if (sc->cm_aen_abort ||
1737 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1738 		sc->cm_aen_abort = 0;
1739 		aborted = 1;
1740 	} else {
1741 		sc->mfi_aen_triggered = 1;
1742 		if (sc->mfi_poll_waiting) {
1743 			sc->mfi_poll_waiting = 0;
1744 			selwakeup(&sc->mfi_select);
1745 		}
1746 		detail = cm->cm_data;
1747 		mfi_queue_evt(sc, detail);
1748 		seq = detail->seq + 1;
1749 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1750 		    tmp) {
1751 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1752 			    aen_link);
1753 			PROC_LOCK(mfi_aen_entry->p);
1754 			kern_psignal(mfi_aen_entry->p, SIGIO);
1755 			PROC_UNLOCK(mfi_aen_entry->p);
1756 			free(mfi_aen_entry, M_MFIBUF);
1757 		}
1758 	}
1759 
1760 	free(cm->cm_data, M_MFIBUF);
1761 	wakeup(&sc->mfi_aen_cm);
1762 	sc->mfi_aen_cm = NULL;
1763 	mfi_release_command(cm);
1764 
1765 	/* set it up again so the driver can catch more events */
1766 	if (!aborted)
1767 		mfi_aen_setup(sc, seq);
1768 }
1769 
1770 #define MAX_EVENTS 15
1771 
1772 static int
1773 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1774 {
1775 	struct mfi_command *cm;
1776 	struct mfi_dcmd_frame *dcmd;
1777 	struct mfi_evt_list *el;
1778 	union mfi_evt class_locale;
1779 	int error, i, seq, size;
1780 
1781 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1782 
1783 	class_locale.members.reserved = 0;
1784 	class_locale.members.locale = mfi_event_locale;
1785 	class_locale.members.evt_class  = mfi_event_class;
1786 
1787 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1788 		* (MAX_EVENTS - 1);
1789 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1790 	if (el == NULL)
1791 		return (ENOMEM);
1792 
1793 	for (seq = start_seq;;) {
1794 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1795 			free(el, M_MFIBUF);
1796 			return (EBUSY);
1797 		}
1798 
1799 		dcmd = &cm->cm_frame->dcmd;
1800 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1801 		dcmd->header.cmd = MFI_CMD_DCMD;
1802 		dcmd->header.timeout = 0;
1803 		dcmd->header.data_len = size;
1804 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1805 		((uint32_t *)&dcmd->mbox)[0] = seq;
1806 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1807 		cm->cm_sg = &dcmd->sgl;
1808 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1809 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1810 		cm->cm_data = el;
1811 		cm->cm_len = size;
1812 
1813 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1814 			device_printf(sc->mfi_dev,
1815 			    "Failed to get controller entries\n");
1816 			mfi_release_command(cm);
1817 			break;
1818 		}
1819 
1820 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1821 		    BUS_DMASYNC_POSTREAD);
1822 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1823 
1824 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1825 			mfi_release_command(cm);
1826 			break;
1827 		}
1828 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1829 			device_printf(sc->mfi_dev,
1830 			    "Error %d fetching controller entries\n",
1831 			    dcmd->header.cmd_status);
1832 			mfi_release_command(cm);
1833 			error = EIO;
1834 			break;
1835 		}
1836 		mfi_release_command(cm);
1837 
1838 		for (i = 0; i < el->count; i++) {
1839 			/*
1840 			 * If this event is newer than 'stop_seq' then
1841 			 * break out of the loop.  Note that the log
1842 			 * is a circular buffer so we have to handle
1843 			 * the case that our stop point is earlier in
1844 			 * the buffer than our start point.
1845 			 */
1846 			if (el->event[i].seq >= stop_seq) {
1847 				if (start_seq <= stop_seq)
1848 					break;
1849 				else if (el->event[i].seq < start_seq)
1850 					break;
1851 			}
1852 			mfi_queue_evt(sc, &el->event[i]);
1853 		}
1854 		seq = el->event[el->count - 1].seq + 1;
1855 	}
1856 
1857 	free(el, M_MFIBUF);
1858 	return (error);
1859 }
1860 
1861 static int
1862 mfi_add_ld(struct mfi_softc *sc, int id)
1863 {
1864 	struct mfi_command *cm;
1865 	struct mfi_dcmd_frame *dcmd = NULL;
1866 	struct mfi_ld_info *ld_info = NULL;
1867 	struct mfi_disk_pending *ld_pend;
1868 	int error;
1869 
1870 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1871 
1872 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1873 	if (ld_pend != NULL) {
1874 		ld_pend->ld_id = id;
1875 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1876 	}
1877 
1878 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1879 	    (void **)&ld_info, sizeof(*ld_info));
1880 	if (error) {
1881 		device_printf(sc->mfi_dev,
1882 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1883 		if (ld_info)
1884 			free(ld_info, M_MFIBUF);
1885 		return (error);
1886 	}
1887 	cm->cm_flags = MFI_CMD_DATAIN;
1888 	dcmd = &cm->cm_frame->dcmd;
1889 	dcmd->mbox[0] = id;
1890 	if (mfi_wait_command(sc, cm) != 0) {
1891 		device_printf(sc->mfi_dev,
1892 		    "Failed to get logical drive: %d\n", id);
1893 		free(ld_info, M_MFIBUF);
1894 		return (0);
1895 	}
1896 	if (ld_info->ld_config.params.isSSCD != 1)
1897 		mfi_add_ld_complete(cm);
1898 	else {
1899 		mfi_release_command(cm);
1900 		if (ld_info)		/* SSCD drives ld_info free here */
1901 			free(ld_info, M_MFIBUF);
1902 	}
1903 	return (0);
1904 }
1905 
1906 static void
1907 mfi_add_ld_complete(struct mfi_command *cm)
1908 {
1909 	struct mfi_frame_header *hdr;
1910 	struct mfi_ld_info *ld_info;
1911 	struct mfi_softc *sc;
1912 	device_t child;
1913 
1914 	sc = cm->cm_sc;
1915 	hdr = &cm->cm_frame->header;
1916 	ld_info = cm->cm_private;
1917 
1918 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1919 		free(ld_info, M_MFIBUF);
1920 		wakeup(&sc->mfi_map_sync_cm);
1921 		mfi_release_command(cm);
1922 		return;
1923 	}
1924 	wakeup(&sc->mfi_map_sync_cm);
1925 	mfi_release_command(cm);
1926 
1927 	mtx_unlock(&sc->mfi_io_lock);
1928 	mtx_lock(&Giant);
1929 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1930 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1931 		free(ld_info, M_MFIBUF);
1932 		mtx_unlock(&Giant);
1933 		mtx_lock(&sc->mfi_io_lock);
1934 		return;
1935 	}
1936 
1937 	device_set_ivars(child, ld_info);
1938 	device_set_desc(child, "MFI Logical Disk");
1939 	bus_generic_attach(sc->mfi_dev);
1940 	mtx_unlock(&Giant);
1941 	mtx_lock(&sc->mfi_io_lock);
1942 }
1943 
1944 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1945 {
1946 	struct mfi_command *cm;
1947 	struct mfi_dcmd_frame *dcmd = NULL;
1948 	struct mfi_pd_info *pd_info = NULL;
1949 	struct mfi_system_pending *syspd_pend;
1950 	int error;
1951 
1952 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1953 
1954 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1955 	if (syspd_pend != NULL) {
1956 		syspd_pend->pd_id = id;
1957 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1958 	}
1959 
1960 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1961 		(void **)&pd_info, sizeof(*pd_info));
1962 	if (error) {
1963 		device_printf(sc->mfi_dev,
1964 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1965 		    error);
1966 		if (pd_info)
1967 			free(pd_info, M_MFIBUF);
1968 		return (error);
1969 	}
1970 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1971 	dcmd = &cm->cm_frame->dcmd;
1972 	dcmd->mbox[0]=id;
1973 	dcmd->header.scsi_status = 0;
1974 	dcmd->header.pad0 = 0;
1975 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1976 		device_printf(sc->mfi_dev,
1977 		    "Failed to get physical drive info %d\n", id);
1978 		free(pd_info, M_MFIBUF);
1979 		mfi_release_command(cm);
1980 		return (error);
1981 	}
1982 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1983 	    BUS_DMASYNC_POSTREAD);
1984 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1985 	mfi_add_sys_pd_complete(cm);
1986 	return (0);
1987 }
1988 
1989 static void
1990 mfi_add_sys_pd_complete(struct mfi_command *cm)
1991 {
1992 	struct mfi_frame_header *hdr;
1993 	struct mfi_pd_info *pd_info;
1994 	struct mfi_softc *sc;
1995 	device_t child;
1996 
1997 	sc = cm->cm_sc;
1998 	hdr = &cm->cm_frame->header;
1999 	pd_info = cm->cm_private;
2000 
2001 	if (hdr->cmd_status != MFI_STAT_OK) {
2002 		free(pd_info, M_MFIBUF);
2003 		mfi_release_command(cm);
2004 		return;
2005 	}
2006 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2007 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2008 		    pd_info->ref.v.device_id);
2009 		free(pd_info, M_MFIBUF);
2010 		mfi_release_command(cm);
2011 		return;
2012 	}
2013 	mfi_release_command(cm);
2014 
2015 	mtx_unlock(&sc->mfi_io_lock);
2016 	mtx_lock(&Giant);
2017 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2018 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
2019 		free(pd_info, M_MFIBUF);
2020 		mtx_unlock(&Giant);
2021 		mtx_lock(&sc->mfi_io_lock);
2022 		return;
2023 	}
2024 
2025 	device_set_ivars(child, pd_info);
2026 	device_set_desc(child, "MFI System PD");
2027 	bus_generic_attach(sc->mfi_dev);
2028 	mtx_unlock(&Giant);
2029 	mtx_lock(&sc->mfi_io_lock);
2030 }
2031 
2032 static struct mfi_command *
2033 mfi_bio_command(struct mfi_softc *sc)
2034 {
2035 	struct bio *bio;
2036 	struct mfi_command *cm = NULL;
2037 
2038 	/*reserving two commands to avoid starvation for IOCTL*/
2039 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2040 		return (NULL);
2041 	}
2042 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2043 		return (NULL);
2044 	}
2045 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2046 		cm = mfi_build_ldio(sc, bio);
2047 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2048 		cm = mfi_build_syspdio(sc, bio);
2049 	}
2050 	if (!cm)
2051 	    mfi_enqueue_bio(sc, bio);
2052 	return cm;
2053 }
2054 
2055 /*
2056  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2057  */
2058 
2059 int
2060 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2061 {
2062 	int cdb_len;
2063 
2064 	if (((lba & 0x1fffff) == lba)
2065          && ((block_count & 0xff) == block_count)
2066          && (byte2 == 0)) {
2067 		/* We can fit in a 6 byte cdb */
2068 		struct scsi_rw_6 *scsi_cmd;
2069 
2070 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2071 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2072 		scsi_ulto3b(lba, scsi_cmd->addr);
2073 		scsi_cmd->length = block_count & 0xff;
2074 		scsi_cmd->control = 0;
2075 		cdb_len = sizeof(*scsi_cmd);
2076 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2077 		/* Need a 10 byte CDB */
2078 		struct scsi_rw_10 *scsi_cmd;
2079 
2080 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2081 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2082 		scsi_cmd->byte2 = byte2;
2083 		scsi_ulto4b(lba, scsi_cmd->addr);
2084 		scsi_cmd->reserved = 0;
2085 		scsi_ulto2b(block_count, scsi_cmd->length);
2086 		scsi_cmd->control = 0;
2087 		cdb_len = sizeof(*scsi_cmd);
2088 	} else if (((block_count & 0xffffffff) == block_count) &&
2089 	    ((lba & 0xffffffff) == lba)) {
2090 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2091 		struct scsi_rw_12 *scsi_cmd;
2092 
2093 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2094 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2095 		scsi_cmd->byte2 = byte2;
2096 		scsi_ulto4b(lba, scsi_cmd->addr);
2097 		scsi_cmd->reserved = 0;
2098 		scsi_ulto4b(block_count, scsi_cmd->length);
2099 		scsi_cmd->control = 0;
2100 		cdb_len = sizeof(*scsi_cmd);
2101 	} else {
2102 		/*
2103 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2104 		 * than 2^32
2105 		 */
2106 		struct scsi_rw_16 *scsi_cmd;
2107 
2108 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2109 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2110 		scsi_cmd->byte2 = byte2;
2111 		scsi_u64to8b(lba, scsi_cmd->addr);
2112 		scsi_cmd->reserved = 0;
2113 		scsi_ulto4b(block_count, scsi_cmd->length);
2114 		scsi_cmd->control = 0;
2115 		cdb_len = sizeof(*scsi_cmd);
2116 	}
2117 
2118 	return cdb_len;
2119 }
2120 
2121 extern char *unmapped_buf;
2122 
2123 static struct mfi_command *
2124 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2125 {
2126 	struct mfi_command *cm;
2127 	struct mfi_pass_frame *pass;
2128 	uint32_t context = 0;
2129 	int flags = 0, blkcount = 0, readop;
2130 	uint8_t cdb_len;
2131 
2132 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2133 
2134 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2135 	    return (NULL);
2136 
2137 	/* Zero out the MFI frame */
2138 	context = cm->cm_frame->header.context;
2139 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2140 	cm->cm_frame->header.context = context;
2141 	pass = &cm->cm_frame->pass;
2142 	bzero(pass->cdb, 16);
2143 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2144 	switch (bio->bio_cmd) {
2145 	case BIO_READ:
2146 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2147 		readop = 1;
2148 		break;
2149 	case BIO_WRITE:
2150 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2151 		readop = 0;
2152 		break;
2153 	default:
2154 		/* TODO: what about BIO_DELETE??? */
2155 		biofinish(bio, NULL, EOPNOTSUPP);
2156 		mfi_enqueue_free(cm);
2157 		return (NULL);
2158 	}
2159 
2160 	/* Cheat with the sector length to avoid a non-constant division */
2161 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2162 	/* Fill the LBA and Transfer length in CDB */
2163 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2164 	    pass->cdb);
2165 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2166 	pass->header.lun_id = 0;
2167 	pass->header.timeout = 0;
2168 	pass->header.flags = 0;
2169 	pass->header.scsi_status = 0;
2170 	pass->header.sense_len = MFI_SENSE_LEN;
2171 	pass->header.data_len = bio->bio_bcount;
2172 	pass->header.cdb_len = cdb_len;
2173 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2174 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2175 	cm->cm_complete = mfi_bio_complete;
2176 	cm->cm_private = bio;
2177 	cm->cm_data = unmapped_buf;
2178 	cm->cm_len = bio->bio_bcount;
2179 	cm->cm_sg = &pass->sgl;
2180 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2181 	cm->cm_flags = flags;
2182 
2183 	return (cm);
2184 }
2185 
2186 static struct mfi_command *
2187 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2188 {
2189 	struct mfi_io_frame *io;
2190 	struct mfi_command *cm;
2191 	int flags;
2192 	uint32_t blkcount;
2193 	uint32_t context = 0;
2194 
2195 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2196 
2197 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2198 	    return (NULL);
2199 
2200 	/* Zero out the MFI frame */
2201 	context = cm->cm_frame->header.context;
2202 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2203 	cm->cm_frame->header.context = context;
2204 	io = &cm->cm_frame->io;
2205 	switch (bio->bio_cmd) {
2206 	case BIO_READ:
2207 		io->header.cmd = MFI_CMD_LD_READ;
2208 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2209 		break;
2210 	case BIO_WRITE:
2211 		io->header.cmd = MFI_CMD_LD_WRITE;
2212 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2213 		break;
2214 	default:
2215 		/* TODO: what about BIO_DELETE??? */
2216 		biofinish(bio, NULL, EOPNOTSUPP);
2217 		mfi_enqueue_free(cm);
2218 		return (NULL);
2219 	}
2220 
2221 	/* Cheat with the sector length to avoid a non-constant division */
2222 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2223 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2224 	io->header.timeout = 0;
2225 	io->header.flags = 0;
2226 	io->header.scsi_status = 0;
2227 	io->header.sense_len = MFI_SENSE_LEN;
2228 	io->header.data_len = blkcount;
2229 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2230 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2231 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2232 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2233 	cm->cm_complete = mfi_bio_complete;
2234 	cm->cm_private = bio;
2235 	cm->cm_data = unmapped_buf;
2236 	cm->cm_len = bio->bio_bcount;
2237 	cm->cm_sg = &io->sgl;
2238 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2239 	cm->cm_flags = flags;
2240 
2241 	return (cm);
2242 }
2243 
2244 static void
2245 mfi_bio_complete(struct mfi_command *cm)
2246 {
2247 	struct bio *bio;
2248 	struct mfi_frame_header *hdr;
2249 	struct mfi_softc *sc;
2250 
2251 	bio = cm->cm_private;
2252 	hdr = &cm->cm_frame->header;
2253 	sc = cm->cm_sc;
2254 
2255 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2256 		bio->bio_flags |= BIO_ERROR;
2257 		bio->bio_error = EIO;
2258 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2259 		    "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2260 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2261 	} else if (cm->cm_error != 0) {
2262 		bio->bio_flags |= BIO_ERROR;
2263 		bio->bio_error = cm->cm_error;
2264 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2265 		    cm, cm->cm_error);
2266 	}
2267 
2268 	mfi_release_command(cm);
2269 	mfi_disk_complete(bio);
2270 }
2271 
2272 void
2273 mfi_startio(struct mfi_softc *sc)
2274 {
2275 	struct mfi_command *cm;
2276 	struct ccb_hdr *ccbh;
2277 
2278 	for (;;) {
2279 		/* Don't bother if we're short on resources */
2280 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2281 			break;
2282 
2283 		/* Try a command that has already been prepared */
2284 		cm = mfi_dequeue_ready(sc);
2285 
2286 		if (cm == NULL) {
2287 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2288 				cm = sc->mfi_cam_start(ccbh);
2289 		}
2290 
2291 		/* Nope, so look for work on the bioq */
2292 		if (cm == NULL)
2293 			cm = mfi_bio_command(sc);
2294 
2295 		/* No work available, so exit */
2296 		if (cm == NULL)
2297 			break;
2298 
2299 		/* Send the command to the controller */
2300 		if (mfi_mapcmd(sc, cm) != 0) {
2301 			device_printf(sc->mfi_dev, "Failed to startio\n");
2302 			mfi_requeue_ready(cm);
2303 			break;
2304 		}
2305 	}
2306 }
2307 
2308 int
2309 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2310 {
2311 	int error, polled;
2312 
2313 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2314 
2315 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2316 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2317 		if (cm->cm_flags & MFI_CMD_CCB)
2318 			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2319 			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2320 			    polled);
2321 		else if (cm->cm_flags & MFI_CMD_BIO)
2322 			error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2323 			    cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2324 			    polled);
2325 		else
2326 			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2327 			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2328 			    mfi_data_cb, cm, polled);
2329 		if (error == EINPROGRESS) {
2330 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2331 			return (0);
2332 		}
2333 	} else {
2334 		error = mfi_send_frame(sc, cm);
2335 	}
2336 
2337 	return (error);
2338 }
2339 
2340 static void
2341 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2342 {
2343 	struct mfi_frame_header *hdr;
2344 	struct mfi_command *cm;
2345 	union mfi_sgl *sgl;
2346 	struct mfi_softc *sc;
2347 	int i, j, first, dir;
2348 	int sge_size, locked;
2349 
2350 	cm = (struct mfi_command *)arg;
2351 	sc = cm->cm_sc;
2352 	hdr = &cm->cm_frame->header;
2353 	sgl = cm->cm_sg;
2354 
2355 	/*
2356 	 * We need to check if we have the lock as this is async
2357 	 * callback so even though our caller mfi_mapcmd asserts
2358 	 * it has the lock, there is no guarantee that hasn't been
2359 	 * dropped if bus_dmamap_load returned prior to our
2360 	 * completion.
2361 	 */
2362 	if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2363 		mtx_lock(&sc->mfi_io_lock);
2364 
2365 	if (error) {
2366 		printf("error %d in callback\n", error);
2367 		cm->cm_error = error;
2368 		mfi_complete(sc, cm);
2369 		goto out;
2370 	}
2371 	/* Use IEEE sgl only for IO's on a SKINNY controller
2372 	 * For other commands on a SKINNY controller use either
2373 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2374 	 * Also calculate the total frame size based on the type
2375 	 * of SGL used.
2376 	 */
2377 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2378 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2379 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2380 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2381 		for (i = 0; i < nsegs; i++) {
2382 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2383 			sgl->sg_skinny[i].len = segs[i].ds_len;
2384 			sgl->sg_skinny[i].flag = 0;
2385 		}
2386 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2387 		sge_size = sizeof(struct mfi_sg_skinny);
2388 		hdr->sg_count = nsegs;
2389 	} else {
2390 		j = 0;
2391 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2392 			first = cm->cm_stp_len;
2393 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2394 				sgl->sg32[j].addr = segs[0].ds_addr;
2395 				sgl->sg32[j++].len = first;
2396 			} else {
2397 				sgl->sg64[j].addr = segs[0].ds_addr;
2398 				sgl->sg64[j++].len = first;
2399 			}
2400 		} else
2401 			first = 0;
2402 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2403 			for (i = 0; i < nsegs; i++) {
2404 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2405 				sgl->sg32[j++].len = segs[i].ds_len - first;
2406 				first = 0;
2407 			}
2408 		} else {
2409 			for (i = 0; i < nsegs; i++) {
2410 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2411 				sgl->sg64[j++].len = segs[i].ds_len - first;
2412 				first = 0;
2413 			}
2414 			hdr->flags |= MFI_FRAME_SGL64;
2415 		}
2416 		hdr->sg_count = j;
2417 		sge_size = sc->mfi_sge_size;
2418 	}
2419 
2420 	dir = 0;
2421 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2422 		dir |= BUS_DMASYNC_PREREAD;
2423 		hdr->flags |= MFI_FRAME_DIR_READ;
2424 	}
2425 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2426 		dir |= BUS_DMASYNC_PREWRITE;
2427 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2428 	}
2429 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2430 	cm->cm_flags |= MFI_CMD_MAPPED;
2431 
2432 	/*
2433 	 * Instead of calculating the total number of frames in the
2434 	 * compound frame, it's already assumed that there will be at
2435 	 * least 1 frame, so don't compensate for the modulo of the
2436 	 * following division.
2437 	 */
2438 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2439 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2440 
2441 	if ((error = mfi_send_frame(sc, cm)) != 0) {
2442 		printf("error %d in callback from mfi_send_frame\n", error);
2443 		cm->cm_error = error;
2444 		mfi_complete(sc, cm);
2445 		goto out;
2446 	}
2447 
2448 out:
2449 	/* leave the lock in the state we found it */
2450 	if (locked == 0)
2451 		mtx_unlock(&sc->mfi_io_lock);
2452 
2453 	return;
2454 }
2455 
2456 static int
2457 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2458 {
2459 	int error;
2460 
2461 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2462 
2463 	if (sc->MFA_enabled)
2464 		error = mfi_tbolt_send_frame(sc, cm);
2465 	else
2466 		error = mfi_std_send_frame(sc, cm);
2467 
2468 	if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2469 		mfi_remove_busy(cm);
2470 
2471 	return (error);
2472 }
2473 
2474 static int
2475 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2476 {
2477 	struct mfi_frame_header *hdr;
2478 	int tm = mfi_polled_cmd_timeout * 1000;
2479 
2480 	hdr = &cm->cm_frame->header;
2481 
2482 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2483 		cm->cm_timestamp = time_uptime;
2484 		mfi_enqueue_busy(cm);
2485 	} else {
2486 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2487 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2488 	}
2489 
2490 	/*
2491 	 * The bus address of the command is aligned on a 64 byte boundary,
2492 	 * leaving the least 6 bits as zero.  For whatever reason, the
2493 	 * hardware wants the address shifted right by three, leaving just
2494 	 * 3 zero bits.  These three bits are then used as a prefetching
2495 	 * hint for the hardware to predict how many frames need to be
2496 	 * fetched across the bus.  If a command has more than 8 frames
2497 	 * then the 3 bits are set to 0x7 and the firmware uses other
2498 	 * information in the command to determine the total amount to fetch.
2499 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2500 	 * is enough for both 32bit and 64bit systems.
2501 	 */
2502 	if (cm->cm_extra_frames > 7)
2503 		cm->cm_extra_frames = 7;
2504 
2505 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2506 
2507 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2508 		return (0);
2509 
2510 	/* This is a polled command, so busy-wait for it to complete. */
2511 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2512 		DELAY(1000);
2513 		tm -= 1;
2514 		if (tm <= 0)
2515 			break;
2516 	}
2517 
2518 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2519 		device_printf(sc->mfi_dev, "Frame %p timed out "
2520 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2521 		return (ETIMEDOUT);
2522 	}
2523 
2524 	return (0);
2525 }
2526 
2527 
2528 void
2529 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2530 {
2531 	int dir;
2532 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2533 
2534 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2535 		dir = 0;
2536 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2537 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2538 			dir |= BUS_DMASYNC_POSTREAD;
2539 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2540 			dir |= BUS_DMASYNC_POSTWRITE;
2541 
2542 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2543 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2544 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2545 	}
2546 
2547 	cm->cm_flags |= MFI_CMD_COMPLETED;
2548 
2549 	if (cm->cm_complete != NULL)
2550 		cm->cm_complete(cm);
2551 	else
2552 		wakeup(cm);
2553 }
2554 
2555 static int
2556 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2557 {
2558 	struct mfi_command *cm;
2559 	struct mfi_abort_frame *abort;
2560 	int i = 0, error;
2561 	uint32_t context = 0;
2562 
2563 	mtx_lock(&sc->mfi_io_lock);
2564 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2565 		mtx_unlock(&sc->mfi_io_lock);
2566 		return (EBUSY);
2567 	}
2568 
2569 	/* Zero out the MFI frame */
2570 	context = cm->cm_frame->header.context;
2571 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2572 	cm->cm_frame->header.context = context;
2573 
2574 	abort = &cm->cm_frame->abort;
2575 	abort->header.cmd = MFI_CMD_ABORT;
2576 	abort->header.flags = 0;
2577 	abort->header.scsi_status = 0;
2578 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2579 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2580 	abort->abort_mfi_addr_hi =
2581 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2582 	cm->cm_data = NULL;
2583 	cm->cm_flags = MFI_CMD_POLLED;
2584 
2585 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2586 		device_printf(sc->mfi_dev, "failed to abort command\n");
2587 	mfi_release_command(cm);
2588 
2589 	mtx_unlock(&sc->mfi_io_lock);
2590 	while (i < 5 && *cm_abort != NULL) {
2591 		tsleep(cm_abort, 0, "mfiabort",
2592 		    5 * hz);
2593 		i++;
2594 	}
2595 	if (*cm_abort != NULL) {
2596 		/* Force a complete if command didn't abort */
2597 		mtx_lock(&sc->mfi_io_lock);
2598 		(*cm_abort)->cm_complete(*cm_abort);
2599 		mtx_unlock(&sc->mfi_io_lock);
2600 	}
2601 
2602 	return (error);
2603 }
2604 
2605 int
2606 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2607      int len)
2608 {
2609 	struct mfi_command *cm;
2610 	struct mfi_io_frame *io;
2611 	int error;
2612 	uint32_t context = 0;
2613 
2614 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2615 		return (EBUSY);
2616 
2617 	/* Zero out the MFI frame */
2618 	context = cm->cm_frame->header.context;
2619 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2620 	cm->cm_frame->header.context = context;
2621 
2622 	io = &cm->cm_frame->io;
2623 	io->header.cmd = MFI_CMD_LD_WRITE;
2624 	io->header.target_id = id;
2625 	io->header.timeout = 0;
2626 	io->header.flags = 0;
2627 	io->header.scsi_status = 0;
2628 	io->header.sense_len = MFI_SENSE_LEN;
2629 	io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2630 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2631 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2632 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2633 	io->lba_lo = lba & 0xffffffff;
2634 	cm->cm_data = virt;
2635 	cm->cm_len = len;
2636 	cm->cm_sg = &io->sgl;
2637 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2638 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2639 
2640 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2641 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2642 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2643 	    BUS_DMASYNC_POSTWRITE);
2644 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2645 	mfi_release_command(cm);
2646 
2647 	return (error);
2648 }
2649 
2650 int
2651 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2652     int len)
2653 {
2654 	struct mfi_command *cm;
2655 	struct mfi_pass_frame *pass;
2656 	int error, readop, cdb_len;
2657 	uint32_t blkcount;
2658 
2659 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2660 		return (EBUSY);
2661 
2662 	pass = &cm->cm_frame->pass;
2663 	bzero(pass->cdb, 16);
2664 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2665 
2666 	readop = 0;
2667 	blkcount = howmany(len, MFI_SECTOR_LEN);
2668 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2669 	pass->header.target_id = id;
2670 	pass->header.timeout = 0;
2671 	pass->header.flags = 0;
2672 	pass->header.scsi_status = 0;
2673 	pass->header.sense_len = MFI_SENSE_LEN;
2674 	pass->header.data_len = len;
2675 	pass->header.cdb_len = cdb_len;
2676 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2677 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2678 	cm->cm_data = virt;
2679 	cm->cm_len = len;
2680 	cm->cm_sg = &pass->sgl;
2681 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2682 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2683 
2684 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2685 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2686 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2687 	    BUS_DMASYNC_POSTWRITE);
2688 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2689 	mfi_release_command(cm);
2690 
2691 	return (error);
2692 }
2693 
2694 static int
2695 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2696 {
2697 	struct mfi_softc *sc;
2698 	int error;
2699 
2700 	sc = dev->si_drv1;
2701 
2702 	mtx_lock(&sc->mfi_io_lock);
2703 	if (sc->mfi_detaching)
2704 		error = ENXIO;
2705 	else {
2706 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2707 		error = 0;
2708 	}
2709 	mtx_unlock(&sc->mfi_io_lock);
2710 
2711 	return (error);
2712 }
2713 
2714 static int
2715 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2716 {
2717 	struct mfi_softc *sc;
2718 	struct mfi_aen *mfi_aen_entry, *tmp;
2719 
2720 	sc = dev->si_drv1;
2721 
2722 	mtx_lock(&sc->mfi_io_lock);
2723 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2724 
2725 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2726 		if (mfi_aen_entry->p == curproc) {
2727 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2728 			    aen_link);
2729 			free(mfi_aen_entry, M_MFIBUF);
2730 		}
2731 	}
2732 	mtx_unlock(&sc->mfi_io_lock);
2733 	return (0);
2734 }
2735 
2736 static int
2737 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2738 {
2739 
2740 	switch (opcode) {
2741 	case MFI_DCMD_LD_DELETE:
2742 	case MFI_DCMD_CFG_ADD:
2743 	case MFI_DCMD_CFG_CLEAR:
2744 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2745 		sx_xlock(&sc->mfi_config_lock);
2746 		return (1);
2747 	default:
2748 		return (0);
2749 	}
2750 }
2751 
2752 static void
2753 mfi_config_unlock(struct mfi_softc *sc, int locked)
2754 {
2755 
2756 	if (locked)
2757 		sx_xunlock(&sc->mfi_config_lock);
2758 }
2759 
2760 /*
2761  * Perform pre-issue checks on commands from userland and possibly veto
2762  * them.
2763  */
2764 static int
2765 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2766 {
2767 	struct mfi_disk *ld, *ld2;
2768 	int error;
2769 	struct mfi_system_pd *syspd = NULL;
2770 	uint16_t syspd_id;
2771 	uint16_t *mbox;
2772 
2773 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2774 	error = 0;
2775 	switch (cm->cm_frame->dcmd.opcode) {
2776 	case MFI_DCMD_LD_DELETE:
2777 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2778 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2779 				break;
2780 		}
2781 		if (ld == NULL)
2782 			error = ENOENT;
2783 		else
2784 			error = mfi_disk_disable(ld);
2785 		break;
2786 	case MFI_DCMD_CFG_CLEAR:
2787 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2788 			error = mfi_disk_disable(ld);
2789 			if (error)
2790 				break;
2791 		}
2792 		if (error) {
2793 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2794 				if (ld2 == ld)
2795 					break;
2796 				mfi_disk_enable(ld2);
2797 			}
2798 		}
2799 		break;
2800 	case MFI_DCMD_PD_STATE_SET:
2801 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2802 		syspd_id = mbox[0];
2803 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2804 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2805 				if (syspd->pd_id == syspd_id)
2806 					break;
2807 			}
2808 		}
2809 		else
2810 			break;
2811 		if (syspd)
2812 			error = mfi_syspd_disable(syspd);
2813 		break;
2814 	default:
2815 		break;
2816 	}
2817 	return (error);
2818 }
2819 
2820 /* Perform post-issue checks on commands from userland. */
2821 static void
2822 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2823 {
2824 	struct mfi_disk *ld, *ldn;
2825 	struct mfi_system_pd *syspd = NULL;
2826 	uint16_t syspd_id;
2827 	uint16_t *mbox;
2828 
2829 	switch (cm->cm_frame->dcmd.opcode) {
2830 	case MFI_DCMD_LD_DELETE:
2831 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2832 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2833 				break;
2834 		}
2835 		KASSERT(ld != NULL, ("volume dissappeared"));
2836 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2837 			mtx_unlock(&sc->mfi_io_lock);
2838 			mtx_lock(&Giant);
2839 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2840 			mtx_unlock(&Giant);
2841 			mtx_lock(&sc->mfi_io_lock);
2842 		} else
2843 			mfi_disk_enable(ld);
2844 		break;
2845 	case MFI_DCMD_CFG_CLEAR:
2846 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2847 			mtx_unlock(&sc->mfi_io_lock);
2848 			mtx_lock(&Giant);
2849 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2850 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2851 			}
2852 			mtx_unlock(&Giant);
2853 			mtx_lock(&sc->mfi_io_lock);
2854 		} else {
2855 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2856 				mfi_disk_enable(ld);
2857 		}
2858 		break;
2859 	case MFI_DCMD_CFG_ADD:
2860 		mfi_ldprobe(sc);
2861 		break;
2862 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2863 		mfi_ldprobe(sc);
2864 		break;
2865 	case MFI_DCMD_PD_STATE_SET:
2866 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2867 		syspd_id = mbox[0];
2868 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2869 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2870 				if (syspd->pd_id == syspd_id)
2871 					break;
2872 			}
2873 		}
2874 		else
2875 			break;
2876 		/* If the transition fails then enable the syspd again */
2877 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2878 			mfi_syspd_enable(syspd);
2879 		break;
2880 	}
2881 }
2882 
2883 static int
2884 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2885 {
2886 	struct mfi_config_data *conf_data;
2887 	struct mfi_command *ld_cm = NULL;
2888 	struct mfi_ld_info *ld_info = NULL;
2889 	struct mfi_ld_config *ld;
2890 	char *p;
2891 	int error = 0;
2892 
2893 	conf_data = (struct mfi_config_data *)cm->cm_data;
2894 
2895 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2896 		p = (char *)conf_data->array;
2897 		p += conf_data->array_size * conf_data->array_count;
2898 		ld = (struct mfi_ld_config *)p;
2899 		if (ld->params.isSSCD == 1)
2900 			error = 1;
2901 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2902 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2903 		    (void **)&ld_info, sizeof(*ld_info));
2904 		if (error) {
2905 			device_printf(sc->mfi_dev, "Failed to allocate"
2906 			    "MFI_DCMD_LD_GET_INFO %d", error);
2907 			if (ld_info)
2908 				free(ld_info, M_MFIBUF);
2909 			return 0;
2910 		}
2911 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2912 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2913 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2914 		if (mfi_wait_command(sc, ld_cm) != 0) {
2915 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2916 			mfi_release_command(ld_cm);
2917 			free(ld_info, M_MFIBUF);
2918 			return 0;
2919 		}
2920 
2921 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2922 			free(ld_info, M_MFIBUF);
2923 			mfi_release_command(ld_cm);
2924 			return 0;
2925 		}
2926 		else
2927 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2928 
2929 		if (ld_info->ld_config.params.isSSCD == 1)
2930 			error = 1;
2931 
2932 		mfi_release_command(ld_cm);
2933 		free(ld_info, M_MFIBUF);
2934 
2935 	}
2936 	return error;
2937 }
2938 
2939 static int
2940 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2941 {
2942 	uint8_t i;
2943 	struct mfi_ioc_packet *ioc;
2944 	ioc = (struct mfi_ioc_packet *)arg;
2945 	int sge_size, error;
2946 	struct megasas_sge *kern_sge;
2947 
2948 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2949 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2950 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2951 
2952 	if (sizeof(bus_addr_t) == 8) {
2953 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2954 		cm->cm_extra_frames = 2;
2955 		sge_size = sizeof(struct mfi_sg64);
2956 	} else {
2957 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2958 		sge_size = sizeof(struct mfi_sg32);
2959 	}
2960 
2961 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2962 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2963 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2964 			1, 0,			/* algnmnt, boundary */
2965 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2966 			BUS_SPACE_MAXADDR,	/* highaddr */
2967 			NULL, NULL,		/* filter, filterarg */
2968 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2969 			2,			/* nsegments */
2970 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2971 			BUS_DMA_ALLOCNOW,	/* flags */
2972 			NULL, NULL,		/* lockfunc, lockarg */
2973 			&sc->mfi_kbuff_arr_dmat[i])) {
2974 			device_printf(sc->mfi_dev,
2975 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2976 			return (ENOMEM);
2977 		}
2978 
2979 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2980 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2981 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2982 			device_printf(sc->mfi_dev,
2983 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2984 			return (ENOMEM);
2985 		}
2986 
2987 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2988 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2989 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2990 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2991 
2992 		if (!sc->kbuff_arr[i]) {
2993 			device_printf(sc->mfi_dev,
2994 			    "Could not allocate memory for kbuff_arr info\n");
2995 			return -1;
2996 		}
2997 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2998 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2999 
3000 		if (sizeof(bus_addr_t) == 8) {
3001 			cm->cm_frame->stp.sgl.sg64[i].addr =
3002 			    kern_sge[i].phys_addr;
3003 			cm->cm_frame->stp.sgl.sg64[i].len =
3004 			    ioc->mfi_sgl[i].iov_len;
3005 		} else {
3006 			cm->cm_frame->stp.sgl.sg32[i].addr =
3007 			    kern_sge[i].phys_addr;
3008 			cm->cm_frame->stp.sgl.sg32[i].len =
3009 			    ioc->mfi_sgl[i].iov_len;
3010 		}
3011 
3012 		error = copyin(ioc->mfi_sgl[i].iov_base,
3013 		    sc->kbuff_arr[i],
3014 		    ioc->mfi_sgl[i].iov_len);
3015 		if (error != 0) {
3016 			device_printf(sc->mfi_dev, "Copy in failed\n");
3017 			return error;
3018 		}
3019 	}
3020 
3021 	cm->cm_flags |=MFI_CMD_MAPPED;
3022 	return 0;
3023 }
3024 
3025 static int
3026 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3027 {
3028 	struct mfi_command *cm;
3029 	struct mfi_dcmd_frame *dcmd;
3030 	void *ioc_buf = NULL;
3031 	uint32_t context;
3032 	int error = 0, locked;
3033 
3034 
3035 	if (ioc->buf_size > 0) {
3036 		if (ioc->buf_size > 1024 * 1024)
3037 			return (ENOMEM);
3038 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3039 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3040 		if (error) {
3041 			device_printf(sc->mfi_dev, "failed to copyin\n");
3042 			free(ioc_buf, M_MFIBUF);
3043 			return (error);
3044 		}
3045 	}
3046 
3047 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3048 
3049 	mtx_lock(&sc->mfi_io_lock);
3050 	while ((cm = mfi_dequeue_free(sc)) == NULL)
3051 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3052 
3053 	/* Save context for later */
3054 	context = cm->cm_frame->header.context;
3055 
3056 	dcmd = &cm->cm_frame->dcmd;
3057 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3058 
3059 	cm->cm_sg = &dcmd->sgl;
3060 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3061 	cm->cm_data = ioc_buf;
3062 	cm->cm_len = ioc->buf_size;
3063 
3064 	/* restore context */
3065 	cm->cm_frame->header.context = context;
3066 
3067 	/* Cheat since we don't know if we're writing or reading */
3068 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3069 
3070 	error = mfi_check_command_pre(sc, cm);
3071 	if (error)
3072 		goto out;
3073 
3074 	error = mfi_wait_command(sc, cm);
3075 	if (error) {
3076 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3077 		goto out;
3078 	}
3079 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3080 	mfi_check_command_post(sc, cm);
3081 out:
3082 	mfi_release_command(cm);
3083 	mtx_unlock(&sc->mfi_io_lock);
3084 	mfi_config_unlock(sc, locked);
3085 	if (ioc->buf_size > 0)
3086 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3087 	if (ioc_buf)
3088 		free(ioc_buf, M_MFIBUF);
3089 	return (error);
3090 }
3091 
3092 #define	PTRIN(p)		((void *)(uintptr_t)(p))
3093 
3094 static int
3095 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3096 {
3097 	struct mfi_softc *sc;
3098 	union mfi_statrequest *ms;
3099 	struct mfi_ioc_packet *ioc;
3100 #ifdef COMPAT_FREEBSD32
3101 	struct mfi_ioc_packet32 *ioc32;
3102 #endif
3103 	struct mfi_ioc_aen *aen;
3104 	struct mfi_command *cm = NULL;
3105 	uint32_t context = 0;
3106 	union mfi_sense_ptr sense_ptr;
3107 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3108 	size_t len;
3109 	int i, res;
3110 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3111 #ifdef COMPAT_FREEBSD32
3112 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3113 	struct mfi_ioc_passthru iop_swab;
3114 #endif
3115 	int error, locked;
3116 	union mfi_sgl *sgl;
3117 	sc = dev->si_drv1;
3118 	error = 0;
3119 
3120 	if (sc->adpreset)
3121 		return EBUSY;
3122 
3123 	if (sc->hw_crit_error)
3124 		return EBUSY;
3125 
3126 	if (sc->issuepend_done == 0)
3127 		return EBUSY;
3128 
3129 	switch (cmd) {
3130 	case MFIIO_STATS:
3131 		ms = (union mfi_statrequest *)arg;
3132 		switch (ms->ms_item) {
3133 		case MFIQ_FREE:
3134 		case MFIQ_BIO:
3135 		case MFIQ_READY:
3136 		case MFIQ_BUSY:
3137 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3138 			    sizeof(struct mfi_qstat));
3139 			break;
3140 		default:
3141 			error = ENOIOCTL;
3142 			break;
3143 		}
3144 		break;
3145 	case MFIIO_QUERY_DISK:
3146 	{
3147 		struct mfi_query_disk *qd;
3148 		struct mfi_disk *ld;
3149 
3150 		qd = (struct mfi_query_disk *)arg;
3151 		mtx_lock(&sc->mfi_io_lock);
3152 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3153 			if (ld->ld_id == qd->array_id)
3154 				break;
3155 		}
3156 		if (ld == NULL) {
3157 			qd->present = 0;
3158 			mtx_unlock(&sc->mfi_io_lock);
3159 			return (0);
3160 		}
3161 		qd->present = 1;
3162 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3163 			qd->open = 1;
3164 		bzero(qd->devname, SPECNAMELEN + 1);
3165 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3166 		mtx_unlock(&sc->mfi_io_lock);
3167 		break;
3168 	}
3169 	case MFI_CMD:
3170 #ifdef COMPAT_FREEBSD32
3171 	case MFI_CMD32:
3172 #endif
3173 		{
3174 		devclass_t devclass;
3175 		ioc = (struct mfi_ioc_packet *)arg;
3176 		int adapter;
3177 
3178 		adapter = ioc->mfi_adapter_no;
3179 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3180 			devclass = devclass_find("mfi");
3181 			sc = devclass_get_softc(devclass, adapter);
3182 		}
3183 		mtx_lock(&sc->mfi_io_lock);
3184 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3185 			mtx_unlock(&sc->mfi_io_lock);
3186 			return (EBUSY);
3187 		}
3188 		mtx_unlock(&sc->mfi_io_lock);
3189 		locked = 0;
3190 
3191 		/*
3192 		 * save off original context since copying from user
3193 		 * will clobber some data
3194 		 */
3195 		context = cm->cm_frame->header.context;
3196 		cm->cm_frame->header.context = cm->cm_index;
3197 
3198 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3199 		    2 * MEGAMFI_FRAME_SIZE);
3200 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3201 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3202 		cm->cm_frame->header.scsi_status = 0;
3203 		cm->cm_frame->header.pad0 = 0;
3204 		if (ioc->mfi_sge_count) {
3205 			cm->cm_sg =
3206 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3207 		}
3208 		sgl = cm->cm_sg;
3209 		cm->cm_flags = 0;
3210 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3211 			cm->cm_flags |= MFI_CMD_DATAIN;
3212 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3213 			cm->cm_flags |= MFI_CMD_DATAOUT;
3214 		/* Legacy app shim */
3215 		if (cm->cm_flags == 0)
3216 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3217 		cm->cm_len = cm->cm_frame->header.data_len;
3218 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3219 #ifdef COMPAT_FREEBSD32
3220 			if (cmd == MFI_CMD) {
3221 #endif
3222 				/* Native */
3223 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3224 #ifdef COMPAT_FREEBSD32
3225 			} else {
3226 				/* 32bit on 64bit */
3227 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3228 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3229 			}
3230 #endif
3231 			cm->cm_len += cm->cm_stp_len;
3232 		}
3233 		if (cm->cm_len &&
3234 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3235 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3236 			    M_WAITOK | M_ZERO);
3237 		} else {
3238 			cm->cm_data = 0;
3239 		}
3240 
3241 		/* restore header context */
3242 		cm->cm_frame->header.context = context;
3243 
3244 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3245 			res = mfi_stp_cmd(sc, cm, arg);
3246 			if (res != 0)
3247 				goto out;
3248 		} else {
3249 			temp = data;
3250 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3251 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3252 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3253 #ifdef COMPAT_FREEBSD32
3254 					if (cmd == MFI_CMD) {
3255 #endif
3256 						/* Native */
3257 						addr = ioc->mfi_sgl[i].iov_base;
3258 						len = ioc->mfi_sgl[i].iov_len;
3259 #ifdef COMPAT_FREEBSD32
3260 					} else {
3261 						/* 32bit on 64bit */
3262 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3263 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3264 						len = ioc32->mfi_sgl[i].iov_len;
3265 					}
3266 #endif
3267 					error = copyin(addr, temp, len);
3268 					if (error != 0) {
3269 						device_printf(sc->mfi_dev,
3270 						    "Copy in failed\n");
3271 						goto out;
3272 					}
3273 					temp = &temp[len];
3274 				}
3275 			}
3276 		}
3277 
3278 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3279 			locked = mfi_config_lock(sc,
3280 			     cm->cm_frame->dcmd.opcode);
3281 
3282 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3283 			cm->cm_frame->pass.sense_addr_lo =
3284 			    (uint32_t)cm->cm_sense_busaddr;
3285 			cm->cm_frame->pass.sense_addr_hi =
3286 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3287 		}
3288 		mtx_lock(&sc->mfi_io_lock);
3289 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3290 		if (!skip_pre_post) {
3291 			error = mfi_check_command_pre(sc, cm);
3292 			if (error) {
3293 				mtx_unlock(&sc->mfi_io_lock);
3294 				goto out;
3295 			}
3296 		}
3297 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3298 			device_printf(sc->mfi_dev,
3299 			    "Controller polled failed\n");
3300 			mtx_unlock(&sc->mfi_io_lock);
3301 			goto out;
3302 		}
3303 		if (!skip_pre_post) {
3304 			mfi_check_command_post(sc, cm);
3305 		}
3306 		mtx_unlock(&sc->mfi_io_lock);
3307 
3308 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3309 			temp = data;
3310 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3311 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3312 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3313 #ifdef COMPAT_FREEBSD32
3314 					if (cmd == MFI_CMD) {
3315 #endif
3316 						/* Native */
3317 						addr = ioc->mfi_sgl[i].iov_base;
3318 						len = ioc->mfi_sgl[i].iov_len;
3319 #ifdef COMPAT_FREEBSD32
3320 					} else {
3321 						/* 32bit on 64bit */
3322 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3323 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3324 						len = ioc32->mfi_sgl[i].iov_len;
3325 					}
3326 #endif
3327 					error = copyout(temp, addr, len);
3328 					if (error != 0) {
3329 						device_printf(sc->mfi_dev,
3330 						    "Copy out failed\n");
3331 						goto out;
3332 					}
3333 					temp = &temp[len];
3334 				}
3335 			}
3336 		}
3337 
3338 		if (ioc->mfi_sense_len) {
3339 			/* get user-space sense ptr then copy out sense */
3340 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3341 			    &sense_ptr.sense_ptr_data[0],
3342 			    sizeof(sense_ptr.sense_ptr_data));
3343 #ifdef COMPAT_FREEBSD32
3344 			if (cmd != MFI_CMD) {
3345 				/*
3346 				 * not 64bit native so zero out any address
3347 				 * over 32bit */
3348 				sense_ptr.addr.high = 0;
3349 			}
3350 #endif
3351 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3352 			    ioc->mfi_sense_len);
3353 			if (error != 0) {
3354 				device_printf(sc->mfi_dev,
3355 				    "Copy out failed\n");
3356 				goto out;
3357 			}
3358 		}
3359 
3360 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3361 out:
3362 		mfi_config_unlock(sc, locked);
3363 		if (data)
3364 			free(data, M_MFIBUF);
3365 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3366 			for (i = 0; i < 2; i++) {
3367 				if (sc->kbuff_arr[i]) {
3368 					if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3369 						bus_dmamap_unload(
3370 						    sc->mfi_kbuff_arr_dmat[i],
3371 						    sc->mfi_kbuff_arr_dmamap[i]
3372 						    );
3373 					if (sc->kbuff_arr[i] != NULL)
3374 						bus_dmamem_free(
3375 						    sc->mfi_kbuff_arr_dmat[i],
3376 						    sc->kbuff_arr[i],
3377 						    sc->mfi_kbuff_arr_dmamap[i]
3378 						    );
3379 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3380 						bus_dma_tag_destroy(
3381 						    sc->mfi_kbuff_arr_dmat[i]);
3382 				}
3383 			}
3384 		}
3385 		if (cm) {
3386 			mtx_lock(&sc->mfi_io_lock);
3387 			mfi_release_command(cm);
3388 			mtx_unlock(&sc->mfi_io_lock);
3389 		}
3390 
3391 		break;
3392 		}
3393 	case MFI_SET_AEN:
3394 		aen = (struct mfi_ioc_aen *)arg;
3395 		mtx_lock(&sc->mfi_io_lock);
3396 		error = mfi_aen_register(sc, aen->aen_seq_num,
3397 		    aen->aen_class_locale);
3398 		mtx_unlock(&sc->mfi_io_lock);
3399 
3400 		break;
3401 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3402 		{
3403 			devclass_t devclass;
3404 			struct mfi_linux_ioc_packet l_ioc;
3405 			int adapter;
3406 
3407 			devclass = devclass_find("mfi");
3408 			if (devclass == NULL)
3409 				return (ENOENT);
3410 
3411 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3412 			if (error)
3413 				return (error);
3414 			adapter = l_ioc.lioc_adapter_no;
3415 			sc = devclass_get_softc(devclass, adapter);
3416 			if (sc == NULL)
3417 				return (ENOENT);
3418 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3419 			    cmd, arg, flag, td));
3420 			break;
3421 		}
3422 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3423 		{
3424 			devclass_t devclass;
3425 			struct mfi_linux_ioc_aen l_aen;
3426 			int adapter;
3427 
3428 			devclass = devclass_find("mfi");
3429 			if (devclass == NULL)
3430 				return (ENOENT);
3431 
3432 			error = copyin(arg, &l_aen, sizeof(l_aen));
3433 			if (error)
3434 				return (error);
3435 			adapter = l_aen.laen_adapter_no;
3436 			sc = devclass_get_softc(devclass, adapter);
3437 			if (sc == NULL)
3438 				return (ENOENT);
3439 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3440 			    cmd, arg, flag, td));
3441 			break;
3442 		}
3443 #ifdef COMPAT_FREEBSD32
3444 	case MFIIO_PASSTHRU32:
3445 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3446 			error = ENOTTY;
3447 			break;
3448 		}
3449 		iop_swab.ioc_frame	= iop32->ioc_frame;
3450 		iop_swab.buf_size	= iop32->buf_size;
3451 		iop_swab.buf		= PTRIN(iop32->buf);
3452 		iop			= &iop_swab;
3453 		/* FALLTHROUGH */
3454 #endif
3455 	case MFIIO_PASSTHRU:
3456 		error = mfi_user_command(sc, iop);
3457 #ifdef COMPAT_FREEBSD32
3458 		if (cmd == MFIIO_PASSTHRU32)
3459 			iop32->ioc_frame = iop_swab.ioc_frame;
3460 #endif
3461 		break;
3462 	default:
3463 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3464 		error = ENOTTY;
3465 		break;
3466 	}
3467 
3468 	return (error);
3469 }
3470 
3471 static int
3472 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3473 {
3474 	struct mfi_softc *sc;
3475 	struct mfi_linux_ioc_packet l_ioc;
3476 	struct mfi_linux_ioc_aen l_aen;
3477 	struct mfi_command *cm = NULL;
3478 	struct mfi_aen *mfi_aen_entry;
3479 	union mfi_sense_ptr sense_ptr;
3480 	uint32_t context = 0;
3481 	uint8_t *data = NULL, *temp;
3482 	int i;
3483 	int error, locked;
3484 
3485 	sc = dev->si_drv1;
3486 	error = 0;
3487 	switch (cmd) {
3488 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3489 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3490 		if (error != 0)
3491 			return (error);
3492 
3493 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3494 			return (EINVAL);
3495 		}
3496 
3497 		mtx_lock(&sc->mfi_io_lock);
3498 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3499 			mtx_unlock(&sc->mfi_io_lock);
3500 			return (EBUSY);
3501 		}
3502 		mtx_unlock(&sc->mfi_io_lock);
3503 		locked = 0;
3504 
3505 		/*
3506 		 * save off original context since copying from user
3507 		 * will clobber some data
3508 		 */
3509 		context = cm->cm_frame->header.context;
3510 
3511 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3512 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3513 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3514 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3515 		cm->cm_frame->header.scsi_status = 0;
3516 		cm->cm_frame->header.pad0 = 0;
3517 		if (l_ioc.lioc_sge_count)
3518 			cm->cm_sg =
3519 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3520 		cm->cm_flags = 0;
3521 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3522 			cm->cm_flags |= MFI_CMD_DATAIN;
3523 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3524 			cm->cm_flags |= MFI_CMD_DATAOUT;
3525 		cm->cm_len = cm->cm_frame->header.data_len;
3526 		if (cm->cm_len &&
3527 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3528 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3529 			    M_WAITOK | M_ZERO);
3530 		} else {
3531 			cm->cm_data = 0;
3532 		}
3533 
3534 		/* restore header context */
3535 		cm->cm_frame->header.context = context;
3536 
3537 		temp = data;
3538 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3539 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3540 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3541 				       temp,
3542 				       l_ioc.lioc_sgl[i].iov_len);
3543 				if (error != 0) {
3544 					device_printf(sc->mfi_dev,
3545 					    "Copy in failed\n");
3546 					goto out;
3547 				}
3548 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3549 			}
3550 		}
3551 
3552 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3553 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3554 
3555 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3556 			cm->cm_frame->pass.sense_addr_lo =
3557 			    (uint32_t)cm->cm_sense_busaddr;
3558 			cm->cm_frame->pass.sense_addr_hi =
3559 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3560 		}
3561 
3562 		mtx_lock(&sc->mfi_io_lock);
3563 		error = mfi_check_command_pre(sc, cm);
3564 		if (error) {
3565 			mtx_unlock(&sc->mfi_io_lock);
3566 			goto out;
3567 		}
3568 
3569 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3570 			device_printf(sc->mfi_dev,
3571 			    "Controller polled failed\n");
3572 			mtx_unlock(&sc->mfi_io_lock);
3573 			goto out;
3574 		}
3575 
3576 		mfi_check_command_post(sc, cm);
3577 		mtx_unlock(&sc->mfi_io_lock);
3578 
3579 		temp = data;
3580 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3581 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3582 				error = copyout(temp,
3583 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3584 					l_ioc.lioc_sgl[i].iov_len);
3585 				if (error != 0) {
3586 					device_printf(sc->mfi_dev,
3587 					    "Copy out failed\n");
3588 					goto out;
3589 				}
3590 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3591 			}
3592 		}
3593 
3594 		if (l_ioc.lioc_sense_len) {
3595 			/* get user-space sense ptr then copy out sense */
3596 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3597                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3598 			    &sense_ptr.sense_ptr_data[0],
3599 			    sizeof(sense_ptr.sense_ptr_data));
3600 #ifdef __amd64__
3601 			/*
3602 			 * only 32bit Linux support so zero out any
3603 			 * address over 32bit
3604 			 */
3605 			sense_ptr.addr.high = 0;
3606 #endif
3607 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3608 			    l_ioc.lioc_sense_len);
3609 			if (error != 0) {
3610 				device_printf(sc->mfi_dev,
3611 				    "Copy out failed\n");
3612 				goto out;
3613 			}
3614 		}
3615 
3616 		error = copyout(&cm->cm_frame->header.cmd_status,
3617 			&((struct mfi_linux_ioc_packet*)arg)
3618 			->lioc_frame.hdr.cmd_status,
3619 			1);
3620 		if (error != 0) {
3621 			device_printf(sc->mfi_dev,
3622 				      "Copy out failed\n");
3623 			goto out;
3624 		}
3625 
3626 out:
3627 		mfi_config_unlock(sc, locked);
3628 		if (data)
3629 			free(data, M_MFIBUF);
3630 		if (cm) {
3631 			mtx_lock(&sc->mfi_io_lock);
3632 			mfi_release_command(cm);
3633 			mtx_unlock(&sc->mfi_io_lock);
3634 		}
3635 
3636 		return (error);
3637 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3638 		error = copyin(arg, &l_aen, sizeof(l_aen));
3639 		if (error != 0)
3640 			return (error);
3641 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3642 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3643 		    M_WAITOK);
3644 		mtx_lock(&sc->mfi_io_lock);
3645 		if (mfi_aen_entry != NULL) {
3646 			mfi_aen_entry->p = curproc;
3647 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3648 			    aen_link);
3649 		}
3650 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3651 		    l_aen.laen_class_locale);
3652 
3653 		if (error != 0) {
3654 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3655 			    aen_link);
3656 			free(mfi_aen_entry, M_MFIBUF);
3657 		}
3658 		mtx_unlock(&sc->mfi_io_lock);
3659 
3660 		return (error);
3661 	default:
3662 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3663 		error = ENOENT;
3664 		break;
3665 	}
3666 
3667 	return (error);
3668 }
3669 
3670 static int
3671 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3672 {
3673 	struct mfi_softc *sc;
3674 	int revents = 0;
3675 
3676 	sc = dev->si_drv1;
3677 
3678 	if (poll_events & (POLLIN | POLLRDNORM)) {
3679 		if (sc->mfi_aen_triggered != 0) {
3680 			revents |= poll_events & (POLLIN | POLLRDNORM);
3681 			sc->mfi_aen_triggered = 0;
3682 		}
3683 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3684 			revents |= POLLERR;
3685 		}
3686 	}
3687 
3688 	if (revents == 0) {
3689 		if (poll_events & (POLLIN | POLLRDNORM)) {
3690 			sc->mfi_poll_waiting = 1;
3691 			selrecord(td, &sc->mfi_select);
3692 		}
3693 	}
3694 
3695 	return revents;
3696 }
3697 
3698 static void
3699 mfi_dump_all(void)
3700 {
3701 	struct mfi_softc *sc;
3702 	struct mfi_command *cm;
3703 	devclass_t dc;
3704 	time_t deadline;
3705 	int timedout;
3706 	int i;
3707 
3708 	dc = devclass_find("mfi");
3709 	if (dc == NULL) {
3710 		printf("No mfi dev class\n");
3711 		return;
3712 	}
3713 
3714 	for (i = 0; ; i++) {
3715 		sc = devclass_get_softc(dc, i);
3716 		if (sc == NULL)
3717 			break;
3718 		device_printf(sc->mfi_dev, "Dumping\n\n");
3719 		timedout = 0;
3720 		deadline = time_uptime - mfi_cmd_timeout;
3721 		mtx_lock(&sc->mfi_io_lock);
3722 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3723 			if (cm->cm_timestamp <= deadline) {
3724 				device_printf(sc->mfi_dev,
3725 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3726 				    cm, (int)(time_uptime - cm->cm_timestamp));
3727 				MFI_PRINT_CMD(cm);
3728 				timedout++;
3729 			}
3730 		}
3731 
3732 #if 0
3733 		if (timedout)
3734 			MFI_DUMP_CMDS(sc);
3735 #endif
3736 
3737 		mtx_unlock(&sc->mfi_io_lock);
3738 	}
3739 
3740 	return;
3741 }
3742 
3743 static void
3744 mfi_timeout(void *data)
3745 {
3746 	struct mfi_softc *sc = (struct mfi_softc *)data;
3747 	struct mfi_command *cm, *tmp;
3748 	time_t deadline;
3749 	int timedout = 0;
3750 
3751 	deadline = time_uptime - mfi_cmd_timeout;
3752 	if (sc->adpreset == 0) {
3753 		if (!mfi_tbolt_reset(sc)) {
3754 			callout_reset(&sc->mfi_watchdog_callout,
3755 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3756 			return;
3757 		}
3758 	}
3759 	mtx_lock(&sc->mfi_io_lock);
3760 	TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3761 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3762 			continue;
3763 		if (cm->cm_timestamp <= deadline) {
3764 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3765 				cm->cm_timestamp = time_uptime;
3766 			} else {
3767 				device_printf(sc->mfi_dev,
3768 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3769 				     cm, (int)(time_uptime - cm->cm_timestamp)
3770 				     );
3771 				MFI_PRINT_CMD(cm);
3772 				MFI_VALIDATE_CMD(sc, cm);
3773 				/*
3774 				 * While commands can get stuck forever we do
3775 				 * not fail them as there is no way to tell if
3776 				 * the controller has actually processed them
3777 				 * or not.
3778 				 *
3779 				 * In addition its very likely that force
3780 				 * failing a command here would cause a panic
3781 				 * e.g. in UFS.
3782 				 */
3783 				timedout++;
3784 			}
3785 		}
3786 	}
3787 
3788 #if 0
3789 	if (timedout)
3790 		MFI_DUMP_CMDS(sc);
3791 #endif
3792 
3793 	mtx_unlock(&sc->mfi_io_lock);
3794 
3795 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3796 	    mfi_timeout, sc);
3797 
3798 	if (0)
3799 		mfi_dump_all();
3800 	return;
3801 }
3802