xref: /freebsd/sys/dev/mfi/mfi.c (revision b1f9167f94059fd55c630891d359bcff987bd7eb)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2007 LSI Corp.
28  * Copyright (c) 2007 Rajesh Prabhakaran.
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52 
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
55 
56 #include "opt_compat.h"
57 #include "opt_mfi.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
64 #include <sys/poll.h>
65 #include <sys/selinfo.h>
66 #include <sys/bus.h>
67 #include <sys/conf.h>
68 #include <sys/eventhandler.h>
69 #include <sys/rman.h>
70 #include <sys/bus_dma.h>
71 #include <sys/bio.h>
72 #include <sys/ioccom.h>
73 #include <sys/uio.h>
74 #include <sys/proc.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
78 
79 #include <machine/bus.h>
80 #include <machine/resource.h>
81 
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
87 
88 static int	mfi_alloc_commands(struct mfi_softc *);
89 static int	mfi_comms_init(struct mfi_softc *);
90 static int	mfi_get_controller_info(struct mfi_softc *);
91 static int	mfi_get_log_state(struct mfi_softc *,
92 		    struct mfi_evt_log_state **);
93 static int	mfi_parse_entries(struct mfi_softc *, int, int);
94 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void	mfi_startup(void *arg);
96 static void	mfi_intr(void *arg);
97 static void	mfi_ldprobe(struct mfi_softc *sc);
98 static void	mfi_syspdprobe(struct mfi_softc *sc);
99 static void	mfi_handle_evt(void *context, int pending);
100 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void	mfi_aen_complete(struct mfi_command *);
102 static int	mfi_add_ld(struct mfi_softc *sc, int);
103 static void	mfi_add_ld_complete(struct mfi_command *);
104 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void	mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void	mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int	mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void	mfi_timeout(void *);
115 static int	mfi_user_command(struct mfi_softc *,
116 		    struct mfi_ioc_passthru *);
117 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
124 		    uint32_t frame_cnt);
125 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
126 		    uint32_t frame_cnt);
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
132 
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
137            0, "event message locale");
138 
139 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
142            0, "event message class");
143 
144 static int	mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
147 	   0, "Max commands limit (-1 = controller limit)");
148 
149 static int	mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
152 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
153 
154 int		mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
155 TUNABLE_INT("hw.mfi.polled_cmd_timeout", &mfi_polled_cmd_timeout);
156 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
157 	   &mfi_polled_cmd_timeout, 0,
158 	   "Polled command timeout - used for firmware flash etc (in seconds)");
159 
160 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
161 TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout);
162 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
163 	   0, "Command timeout (in seconds)");
164 
165 /* Management interface */
166 static d_open_t		mfi_open;
167 static d_close_t	mfi_close;
168 static d_ioctl_t	mfi_ioctl;
169 static d_poll_t		mfi_poll;
170 
171 static struct cdevsw mfi_cdevsw = {
172 	.d_version = 	D_VERSION,
173 	.d_flags =	0,
174 	.d_open = 	mfi_open,
175 	.d_close =	mfi_close,
176 	.d_ioctl =	mfi_ioctl,
177 	.d_poll =	mfi_poll,
178 	.d_name =	"mfi",
179 };
180 
181 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
182 
183 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
184 struct mfi_skinny_dma_info mfi_skinny;
185 
186 static void
187 mfi_enable_intr_xscale(struct mfi_softc *sc)
188 {
189 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
190 }
191 
192 static void
193 mfi_enable_intr_ppc(struct mfi_softc *sc)
194 {
195 	if (sc->mfi_flags & MFI_FLAGS_1078) {
196 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
197 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
198 	}
199 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
200 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
201 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
202 	}
203 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
204 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
205 	}
206 }
207 
208 static int32_t
209 mfi_read_fw_status_xscale(struct mfi_softc *sc)
210 {
211 	return MFI_READ4(sc, MFI_OMSG0);
212 }
213 
214 static int32_t
215 mfi_read_fw_status_ppc(struct mfi_softc *sc)
216 {
217 	return MFI_READ4(sc, MFI_OSP0);
218 }
219 
220 static int
221 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
222 {
223 	int32_t status;
224 
225 	status = MFI_READ4(sc, MFI_OSTS);
226 	if ((status & MFI_OSTS_INTR_VALID) == 0)
227 		return 1;
228 
229 	MFI_WRITE4(sc, MFI_OSTS, status);
230 	return 0;
231 }
232 
233 static int
234 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
235 {
236 	int32_t status;
237 
238 	status = MFI_READ4(sc, MFI_OSTS);
239 	if (sc->mfi_flags & MFI_FLAGS_1078) {
240 		if (!(status & MFI_1078_RM)) {
241 			return 1;
242 		}
243 	}
244 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
245 		if (!(status & MFI_GEN2_RM)) {
246 			return 1;
247 		}
248 	}
249 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
250 		if (!(status & MFI_SKINNY_RM)) {
251 			return 1;
252 		}
253 	}
254 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
255 		MFI_WRITE4(sc, MFI_OSTS, status);
256 	else
257 		MFI_WRITE4(sc, MFI_ODCR0, status);
258 	return 0;
259 }
260 
261 static void
262 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
263 {
264 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
265 }
266 
267 static void
268 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
269 {
270 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
271 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
272 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
273 	} else {
274 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
275 	}
276 }
277 
278 int
279 mfi_transition_firmware(struct mfi_softc *sc)
280 {
281 	uint32_t fw_state, cur_state;
282 	int max_wait, i;
283 	uint32_t cur_abs_reg_val = 0;
284 	uint32_t prev_abs_reg_val = 0;
285 
286 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
287 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
288 	while (fw_state != MFI_FWSTATE_READY) {
289 		if (bootverbose)
290 			device_printf(sc->mfi_dev, "Waiting for firmware to "
291 			"become ready\n");
292 		cur_state = fw_state;
293 		switch (fw_state) {
294 		case MFI_FWSTATE_FAULT:
295 			device_printf(sc->mfi_dev, "Firmware fault\n");
296 			return (ENXIO);
297 		case MFI_FWSTATE_WAIT_HANDSHAKE:
298 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
299 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
300 			else
301 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
302 			max_wait = MFI_RESET_WAIT_TIME;
303 			break;
304 		case MFI_FWSTATE_OPERATIONAL:
305 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
306 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
307 			else
308 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
309 			max_wait = MFI_RESET_WAIT_TIME;
310 			break;
311 		case MFI_FWSTATE_UNDEFINED:
312 		case MFI_FWSTATE_BB_INIT:
313 			max_wait = MFI_RESET_WAIT_TIME;
314 			break;
315 		case MFI_FWSTATE_FW_INIT_2:
316 			max_wait = MFI_RESET_WAIT_TIME;
317 			break;
318 		case MFI_FWSTATE_FW_INIT:
319 		case MFI_FWSTATE_FLUSH_CACHE:
320 			max_wait = MFI_RESET_WAIT_TIME;
321 			break;
322 		case MFI_FWSTATE_DEVICE_SCAN:
323 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
324 			prev_abs_reg_val = cur_abs_reg_val;
325 			break;
326 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
327 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
328 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
329 			else
330 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
331 			max_wait = MFI_RESET_WAIT_TIME;
332 			break;
333 		default:
334 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
335 			    fw_state);
336 			return (ENXIO);
337 		}
338 		for (i = 0; i < (max_wait * 10); i++) {
339 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
340 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
341 			if (fw_state == cur_state)
342 				DELAY(100000);
343 			else
344 				break;
345 		}
346 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
347 			/* Check the device scanning progress */
348 			if (prev_abs_reg_val != cur_abs_reg_val) {
349 				continue;
350 			}
351 		}
352 		if (fw_state == cur_state) {
353 			device_printf(sc->mfi_dev, "Firmware stuck in state "
354 			    "%#x\n", fw_state);
355 			return (ENXIO);
356 		}
357 	}
358 	return (0);
359 }
360 
361 static void
362 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
363 {
364 	bus_addr_t *addr;
365 
366 	addr = arg;
367 	*addr = segs[0].ds_addr;
368 }
369 
370 
371 int
372 mfi_attach(struct mfi_softc *sc)
373 {
374 	uint32_t status;
375 	int error, commsz, framessz, sensesz;
376 	int frames, unit, max_fw_sge, max_fw_cmds;
377 	uint32_t tb_mem_size = 0;
378 
379 	if (sc == NULL)
380 		return EINVAL;
381 
382 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
383 	    MEGASAS_VERSION);
384 
385 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
386 	sx_init(&sc->mfi_config_lock, "MFI config");
387 	TAILQ_INIT(&sc->mfi_ld_tqh);
388 	TAILQ_INIT(&sc->mfi_syspd_tqh);
389 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
390 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
391 	TAILQ_INIT(&sc->mfi_evt_queue);
392 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
393 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
394 	TAILQ_INIT(&sc->mfi_aen_pids);
395 	TAILQ_INIT(&sc->mfi_cam_ccbq);
396 
397 	mfi_initq_free(sc);
398 	mfi_initq_ready(sc);
399 	mfi_initq_busy(sc);
400 	mfi_initq_bio(sc);
401 
402 	sc->adpreset = 0;
403 	sc->last_seq_num = 0;
404 	sc->disableOnlineCtrlReset = 1;
405 	sc->issuepend_done = 1;
406 	sc->hw_crit_error = 0;
407 
408 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
409 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
410 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
411 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
412 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
413 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
414 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
415 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
416 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
417 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
418 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
419 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
420 		sc->mfi_tbolt = 1;
421 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
422 	} else {
423 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
424 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
425 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
426 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
427 	}
428 
429 
430 	/* Before we get too far, see if the firmware is working */
431 	if ((error = mfi_transition_firmware(sc)) != 0) {
432 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
433 		    "error %d\n", error);
434 		return (ENXIO);
435 	}
436 
437 	/* Start: LSIP200113393 */
438 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
439 				1, 0,			/* algnmnt, boundary */
440 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
441 				BUS_SPACE_MAXADDR,	/* highaddr */
442 				NULL, NULL,		/* filter, filterarg */
443 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
444 				1,			/* msegments */
445 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
446 				0,			/* flags */
447 				NULL, NULL,		/* lockfunc, lockarg */
448 				&sc->verbuf_h_dmat)) {
449 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
450 		return (ENOMEM);
451 	}
452 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
453 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
454 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
455 		return (ENOMEM);
456 	}
457 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
458 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
459 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
460 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
461 	/* End: LSIP200113393 */
462 
463 	/*
464 	 * Get information needed for sizing the contiguous memory for the
465 	 * frame pool.  Size down the sgl parameter since we know that
466 	 * we will never need more than what's required for MAXPHYS.
467 	 * It would be nice if these constants were available at runtime
468 	 * instead of compile time.
469 	 */
470 	status = sc->mfi_read_fw_status(sc);
471 	max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
472 	if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
473 		device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
474 		    max_fw_cmds, mfi_max_cmds);
475 		sc->mfi_max_fw_cmds = mfi_max_cmds;
476 	} else {
477 		sc->mfi_max_fw_cmds = max_fw_cmds;
478 	}
479 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
480 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
481 
482 	/* ThunderBolt Support get the contiguous memory */
483 
484 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
485 		mfi_tbolt_init_globals(sc);
486 		device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
487 		    "MaxSgl = %d, state = %#x\n", max_fw_cmds,
488 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
489 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
490 
491 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
492 				1, 0,			/* algnmnt, boundary */
493 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
494 				BUS_SPACE_MAXADDR,	/* highaddr */
495 				NULL, NULL,		/* filter, filterarg */
496 				tb_mem_size,		/* maxsize */
497 				1,			/* msegments */
498 				tb_mem_size,		/* maxsegsize */
499 				0,			/* flags */
500 				NULL, NULL,		/* lockfunc, lockarg */
501 				&sc->mfi_tb_dmat)) {
502 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
503 			return (ENOMEM);
504 		}
505 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
506 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
507 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
508 			return (ENOMEM);
509 		}
510 		bzero(sc->request_message_pool, tb_mem_size);
511 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
512 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
513 
514 		/* For ThunderBolt memory init */
515 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
516 				0x100, 0,		/* alignmnt, boundary */
517 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
518 				BUS_SPACE_MAXADDR,	/* highaddr */
519 				NULL, NULL,		/* filter, filterarg */
520 				MFI_FRAME_SIZE,		/* maxsize */
521 				1,			/* msegments */
522 				MFI_FRAME_SIZE,		/* maxsegsize */
523 				0,			/* flags */
524 				NULL, NULL,		/* lockfunc, lockarg */
525 				&sc->mfi_tb_init_dmat)) {
526 			device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
527 			return (ENOMEM);
528 		}
529 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
530 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
531 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
532 			return (ENOMEM);
533 		}
534 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
535 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
536 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
537 		    &sc->mfi_tb_init_busaddr, 0);
538 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
539 		    tb_mem_size)) {
540 			device_printf(sc->mfi_dev,
541 			    "Thunderbolt pool preparation error\n");
542 			return 0;
543 		}
544 
545 		/*
546 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
547 		  we are taking it diffrent from what we have allocated for Request
548 		  and reply descriptors to avoid confusion later
549 		*/
550 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
551 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
552 				1, 0,			/* algnmnt, boundary */
553 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
554 				BUS_SPACE_MAXADDR,	/* highaddr */
555 				NULL, NULL,		/* filter, filterarg */
556 				tb_mem_size,		/* maxsize */
557 				1,			/* msegments */
558 				tb_mem_size,		/* maxsegsize */
559 				0,			/* flags */
560 				NULL, NULL,		/* lockfunc, lockarg */
561 				&sc->mfi_tb_ioc_init_dmat)) {
562 			device_printf(sc->mfi_dev,
563 			    "Cannot allocate comms DMA tag\n");
564 			return (ENOMEM);
565 		}
566 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
567 		    (void **)&sc->mfi_tb_ioc_init_desc,
568 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
569 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
570 			return (ENOMEM);
571 		}
572 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
573 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
574 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
575 		    &sc->mfi_tb_ioc_init_busaddr, 0);
576 	}
577 	/*
578 	 * Create the dma tag for data buffers.  Used both for block I/O
579 	 * and for various internal data queries.
580 	 */
581 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
582 				1, 0,			/* algnmnt, boundary */
583 				BUS_SPACE_MAXADDR,	/* lowaddr */
584 				BUS_SPACE_MAXADDR,	/* highaddr */
585 				NULL, NULL,		/* filter, filterarg */
586 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
587 				sc->mfi_max_sge,	/* nsegments */
588 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
589 				BUS_DMA_ALLOCNOW,	/* flags */
590 				busdma_lock_mutex,	/* lockfunc */
591 				&sc->mfi_io_lock,	/* lockfuncarg */
592 				&sc->mfi_buffer_dmat)) {
593 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
594 		return (ENOMEM);
595 	}
596 
597 	/*
598 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
599 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
600 	 * entry, so the calculated size here will be will be 1 more than
601 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
602 	 */
603 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
604 	    sizeof(struct mfi_hwcomms);
605 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
606 				1, 0,			/* algnmnt, boundary */
607 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
608 				BUS_SPACE_MAXADDR,	/* highaddr */
609 				NULL, NULL,		/* filter, filterarg */
610 				commsz,			/* maxsize */
611 				1,			/* msegments */
612 				commsz,			/* maxsegsize */
613 				0,			/* flags */
614 				NULL, NULL,		/* lockfunc, lockarg */
615 				&sc->mfi_comms_dmat)) {
616 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
617 		return (ENOMEM);
618 	}
619 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
620 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
621 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
622 		return (ENOMEM);
623 	}
624 	bzero(sc->mfi_comms, commsz);
625 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
626 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
627 	/*
628 	 * Allocate DMA memory for the command frames.  Keep them in the
629 	 * lower 4GB for efficiency.  Calculate the size of the commands at
630 	 * the same time; each command is one 64 byte frame plus a set of
631          * additional frames for holding sg lists or other data.
632 	 * The assumption here is that the SG list will start at the second
633 	 * frame and not use the unused bytes in the first frame.  While this
634 	 * isn't technically correct, it simplifies the calculation and allows
635 	 * for command frames that might be larger than an mfi_io_frame.
636 	 */
637 	if (sizeof(bus_addr_t) == 8) {
638 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
639 		sc->mfi_flags |= MFI_FLAGS_SG64;
640 	} else {
641 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
642 	}
643 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
644 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
645 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
646 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
647 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
648 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
649 				64, 0,			/* algnmnt, boundary */
650 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
651 				BUS_SPACE_MAXADDR,	/* highaddr */
652 				NULL, NULL,		/* filter, filterarg */
653 				framessz,		/* maxsize */
654 				1,			/* nsegments */
655 				framessz,		/* maxsegsize */
656 				0,			/* flags */
657 				NULL, NULL,		/* lockfunc, lockarg */
658 				&sc->mfi_frames_dmat)) {
659 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
660 		return (ENOMEM);
661 	}
662 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
663 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
664 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
665 		return (ENOMEM);
666 	}
667 	bzero(sc->mfi_frames, framessz);
668 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
669 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
670 	/*
671 	 * Allocate DMA memory for the frame sense data.  Keep them in the
672 	 * lower 4GB for efficiency
673 	 */
674 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
675 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
676 				4, 0,			/* algnmnt, boundary */
677 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
678 				BUS_SPACE_MAXADDR,	/* highaddr */
679 				NULL, NULL,		/* filter, filterarg */
680 				sensesz,		/* maxsize */
681 				1,			/* nsegments */
682 				sensesz,		/* maxsegsize */
683 				0,			/* flags */
684 				NULL, NULL,		/* lockfunc, lockarg */
685 				&sc->mfi_sense_dmat)) {
686 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
687 		return (ENOMEM);
688 	}
689 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
690 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
691 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
692 		return (ENOMEM);
693 	}
694 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
695 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
696 	if ((error = mfi_alloc_commands(sc)) != 0)
697 		return (error);
698 
699 	/* Before moving the FW to operational state, check whether
700 	 * hostmemory is required by the FW or not
701 	 */
702 
703 	/* ThunderBolt MFI_IOC2 INIT */
704 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
705 		sc->mfi_disable_intr(sc);
706 		mtx_lock(&sc->mfi_io_lock);
707 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
708 			device_printf(sc->mfi_dev,
709 			    "TB Init has failed with error %d\n",error);
710 			mtx_unlock(&sc->mfi_io_lock);
711 			return error;
712 		}
713 		mtx_unlock(&sc->mfi_io_lock);
714 
715 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
716 			return error;
717 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
718 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
719 		    &sc->mfi_intr)) {
720 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
721 			return (EINVAL);
722 		}
723 		sc->mfi_intr_ptr = mfi_intr_tbolt;
724 		sc->mfi_enable_intr(sc);
725 	} else {
726 		if ((error = mfi_comms_init(sc)) != 0)
727 			return (error);
728 
729 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
730 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
731 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
732 			return (EINVAL);
733 		}
734 		sc->mfi_intr_ptr = mfi_intr;
735 		sc->mfi_enable_intr(sc);
736 	}
737 	if ((error = mfi_get_controller_info(sc)) != 0)
738 		return (error);
739 	sc->disableOnlineCtrlReset = 0;
740 
741 	/* Register a config hook to probe the bus for arrays */
742 	sc->mfi_ich.ich_func = mfi_startup;
743 	sc->mfi_ich.ich_arg = sc;
744 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
745 		device_printf(sc->mfi_dev, "Cannot establish configuration "
746 		    "hook\n");
747 		return (EINVAL);
748 	}
749 	mtx_lock(&sc->mfi_io_lock);
750 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
751 		mtx_unlock(&sc->mfi_io_lock);
752 		return (error);
753 	}
754 	mtx_unlock(&sc->mfi_io_lock);
755 
756 	/*
757 	 * Register a shutdown handler.
758 	 */
759 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
760 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
761 		device_printf(sc->mfi_dev, "Warning: shutdown event "
762 		    "registration failed\n");
763 	}
764 
765 	/*
766 	 * Create the control device for doing management
767 	 */
768 	unit = device_get_unit(sc->mfi_dev);
769 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
770 	    0640, "mfi%d", unit);
771 	if (unit == 0)
772 		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
773 	if (sc->mfi_cdev != NULL)
774 		sc->mfi_cdev->si_drv1 = sc;
775 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
776 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
777 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
778 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
779 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
780 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
781 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
782 	    &sc->mfi_keep_deleted_volumes, 0,
783 	    "Don't detach the mfid device for a busy volume that is deleted");
784 
785 	device_add_child(sc->mfi_dev, "mfip", -1);
786 	bus_generic_attach(sc->mfi_dev);
787 
788 	/* Start the timeout watchdog */
789 	callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
790 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
791 	    mfi_timeout, sc);
792 
793 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
794 		mtx_lock(&sc->mfi_io_lock);
795 		mfi_tbolt_sync_map_info(sc);
796 		mtx_unlock(&sc->mfi_io_lock);
797 	}
798 
799 	return (0);
800 }
801 
802 static int
803 mfi_alloc_commands(struct mfi_softc *sc)
804 {
805 	struct mfi_command *cm;
806 	int i, j;
807 
808 	/*
809 	 * XXX Should we allocate all the commands up front, or allocate on
810 	 * demand later like 'aac' does?
811 	 */
812 	sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
813 	    sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
814 
815 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
816 		cm = &sc->mfi_commands[i];
817 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
818 		    sc->mfi_cmd_size * i);
819 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
820 		    sc->mfi_cmd_size * i;
821 		cm->cm_frame->header.context = i;
822 		cm->cm_sense = &sc->mfi_sense[i];
823 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
824 		cm->cm_sc = sc;
825 		cm->cm_index = i;
826 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
827 		    &cm->cm_dmamap) == 0) {
828 			mtx_lock(&sc->mfi_io_lock);
829 			mfi_release_command(cm);
830 			mtx_unlock(&sc->mfi_io_lock);
831 		} else {
832 			device_printf(sc->mfi_dev, "Failed to allocate %d "
833 			   "command blocks, only allocated %d\n",
834 			    sc->mfi_max_fw_cmds, i - 1);
835 			for (j = 0; j < i; j++) {
836 				cm = &sc->mfi_commands[i];
837 				bus_dmamap_destroy(sc->mfi_buffer_dmat,
838 				    cm->cm_dmamap);
839 			}
840 			free(sc->mfi_commands, M_MFIBUF);
841 			sc->mfi_commands = NULL;
842 
843 			return (ENOMEM);
844 		}
845 	}
846 
847 	return (0);
848 }
849 
850 void
851 mfi_release_command(struct mfi_command *cm)
852 {
853 	struct mfi_frame_header *hdr;
854 	uint32_t *hdr_data;
855 
856 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
857 
858 	/*
859 	 * Zero out the important fields of the frame, but make sure the
860 	 * context field is preserved.  For efficiency, handle the fields
861 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
862 	 */
863 	hdr = &cm->cm_frame->header;
864 	if (cm->cm_data != NULL && hdr->sg_count) {
865 		cm->cm_sg->sg32[0].len = 0;
866 		cm->cm_sg->sg32[0].addr = 0;
867 	}
868 
869 	/*
870 	 * Command may be on other queues e.g. busy queue depending on the
871 	 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
872 	 * properly
873 	 */
874 	if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
875 		mfi_remove_busy(cm);
876 	if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
877 		mfi_remove_ready(cm);
878 
879 	/* We're not expecting it to be on any other queue but check */
880 	if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
881 		panic("Command %p is still on another queue, flags = %#x",
882 		    cm, cm->cm_flags);
883 	}
884 
885 	/* tbolt cleanup */
886 	if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
887 		mfi_tbolt_return_cmd(cm->cm_sc,
888 		    cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
889 		    cm);
890 	}
891 
892 	hdr_data = (uint32_t *)cm->cm_frame;
893 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
894 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
895 	hdr_data[4] = 0;	/* flags, timeout */
896 	hdr_data[5] = 0;	/* data_len */
897 
898 	cm->cm_extra_frames = 0;
899 	cm->cm_flags = 0;
900 	cm->cm_complete = NULL;
901 	cm->cm_private = NULL;
902 	cm->cm_data = NULL;
903 	cm->cm_sg = 0;
904 	cm->cm_total_frame_size = 0;
905 	cm->retry_for_fw_reset = 0;
906 
907 	mfi_enqueue_free(cm);
908 }
909 
910 int
911 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
912     uint32_t opcode, void **bufp, size_t bufsize)
913 {
914 	struct mfi_command *cm;
915 	struct mfi_dcmd_frame *dcmd;
916 	void *buf = NULL;
917 	uint32_t context = 0;
918 
919 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
920 
921 	cm = mfi_dequeue_free(sc);
922 	if (cm == NULL)
923 		return (EBUSY);
924 
925 	/* Zero out the MFI frame */
926 	context = cm->cm_frame->header.context;
927 	bzero(cm->cm_frame, sizeof(union mfi_frame));
928 	cm->cm_frame->header.context = context;
929 
930 	if ((bufsize > 0) && (bufp != NULL)) {
931 		if (*bufp == NULL) {
932 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
933 			if (buf == NULL) {
934 				mfi_release_command(cm);
935 				return (ENOMEM);
936 			}
937 			*bufp = buf;
938 		} else {
939 			buf = *bufp;
940 		}
941 	}
942 
943 	dcmd =  &cm->cm_frame->dcmd;
944 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
945 	dcmd->header.cmd = MFI_CMD_DCMD;
946 	dcmd->header.timeout = 0;
947 	dcmd->header.flags = 0;
948 	dcmd->header.data_len = bufsize;
949 	dcmd->header.scsi_status = 0;
950 	dcmd->opcode = opcode;
951 	cm->cm_sg = &dcmd->sgl;
952 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
953 	cm->cm_flags = 0;
954 	cm->cm_data = buf;
955 	cm->cm_private = buf;
956 	cm->cm_len = bufsize;
957 
958 	*cmp = cm;
959 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
960 		*bufp = buf;
961 	return (0);
962 }
963 
964 static int
965 mfi_comms_init(struct mfi_softc *sc)
966 {
967 	struct mfi_command *cm;
968 	struct mfi_init_frame *init;
969 	struct mfi_init_qinfo *qinfo;
970 	int error;
971 	uint32_t context = 0;
972 
973 	mtx_lock(&sc->mfi_io_lock);
974 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
975 		mtx_unlock(&sc->mfi_io_lock);
976 		return (EBUSY);
977 	}
978 
979 	/* Zero out the MFI frame */
980 	context = cm->cm_frame->header.context;
981 	bzero(cm->cm_frame, sizeof(union mfi_frame));
982 	cm->cm_frame->header.context = context;
983 
984 	/*
985 	 * Abuse the SG list area of the frame to hold the init_qinfo
986 	 * object;
987 	 */
988 	init = &cm->cm_frame->init;
989 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
990 
991 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
992 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
993 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
994 	    offsetof(struct mfi_hwcomms, hw_reply_q);
995 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
996 	    offsetof(struct mfi_hwcomms, hw_pi);
997 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
998 	    offsetof(struct mfi_hwcomms, hw_ci);
999 
1000 	init->header.cmd = MFI_CMD_INIT;
1001 	init->header.data_len = sizeof(struct mfi_init_qinfo);
1002 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
1003 	cm->cm_data = NULL;
1004 	cm->cm_flags = MFI_CMD_POLLED;
1005 
1006 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1007 		device_printf(sc->mfi_dev, "failed to send init command\n");
1008 	mfi_release_command(cm);
1009 	mtx_unlock(&sc->mfi_io_lock);
1010 
1011 	return (error);
1012 }
1013 
1014 static int
1015 mfi_get_controller_info(struct mfi_softc *sc)
1016 {
1017 	struct mfi_command *cm = NULL;
1018 	struct mfi_ctrl_info *ci = NULL;
1019 	uint32_t max_sectors_1, max_sectors_2;
1020 	int error;
1021 
1022 	mtx_lock(&sc->mfi_io_lock);
1023 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1024 	    (void **)&ci, sizeof(*ci));
1025 	if (error)
1026 		goto out;
1027 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1028 
1029 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1030 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
1031 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1032 		    MFI_SECTOR_LEN;
1033 		error = 0;
1034 		goto out;
1035 	}
1036 
1037 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1038 	    BUS_DMASYNC_POSTREAD);
1039 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1040 
1041 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1042 	max_sectors_2 = ci->max_request_size;
1043 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1044 	sc->disableOnlineCtrlReset =
1045 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
1046 
1047 out:
1048 	if (ci)
1049 		free(ci, M_MFIBUF);
1050 	if (cm)
1051 		mfi_release_command(cm);
1052 	mtx_unlock(&sc->mfi_io_lock);
1053 	return (error);
1054 }
1055 
1056 static int
1057 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1058 {
1059 	struct mfi_command *cm = NULL;
1060 	int error;
1061 
1062 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1063 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1064 	    (void **)log_state, sizeof(**log_state));
1065 	if (error)
1066 		goto out;
1067 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1068 
1069 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1070 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1071 		goto out;
1072 	}
1073 
1074 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1075 	    BUS_DMASYNC_POSTREAD);
1076 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1077 
1078 out:
1079 	if (cm)
1080 		mfi_release_command(cm);
1081 
1082 	return (error);
1083 }
1084 
1085 int
1086 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1087 {
1088 	struct mfi_evt_log_state *log_state = NULL;
1089 	union mfi_evt class_locale;
1090 	int error = 0;
1091 	uint32_t seq;
1092 
1093 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1094 
1095 	class_locale.members.reserved = 0;
1096 	class_locale.members.locale = mfi_event_locale;
1097 	class_locale.members.evt_class  = mfi_event_class;
1098 
1099 	if (seq_start == 0) {
1100 		if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1101 			goto out;
1102 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1103 
1104 		/*
1105 		 * Walk through any events that fired since the last
1106 		 * shutdown.
1107 		 */
1108 		if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1109 		    log_state->newest_seq_num)) != 0)
1110 			goto out;
1111 		seq = log_state->newest_seq_num;
1112 	} else
1113 		seq = seq_start;
1114 	error = mfi_aen_register(sc, seq, class_locale.word);
1115 out:
1116 	free(log_state, M_MFIBUF);
1117 
1118 	return (error);
1119 }
1120 
1121 int
1122 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1123 {
1124 
1125 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1126 	cm->cm_complete = NULL;
1127 
1128 	/*
1129 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1130 	 * and return 0 to it as status
1131 	 */
1132 	if (cm->cm_frame->dcmd.opcode == 0) {
1133 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1134 		cm->cm_error = 0;
1135 		return (cm->cm_error);
1136 	}
1137 	mfi_enqueue_ready(cm);
1138 	mfi_startio(sc);
1139 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1140 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1141 	return (cm->cm_error);
1142 }
1143 
1144 void
1145 mfi_free(struct mfi_softc *sc)
1146 {
1147 	struct mfi_command *cm;
1148 	int i;
1149 
1150 	callout_drain(&sc->mfi_watchdog_callout);
1151 
1152 	if (sc->mfi_cdev != NULL)
1153 		destroy_dev(sc->mfi_cdev);
1154 
1155 	if (sc->mfi_commands != NULL) {
1156 		for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1157 			cm = &sc->mfi_commands[i];
1158 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1159 		}
1160 		free(sc->mfi_commands, M_MFIBUF);
1161 		sc->mfi_commands = NULL;
1162 	}
1163 
1164 	if (sc->mfi_intr)
1165 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1166 	if (sc->mfi_irq != NULL)
1167 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1168 		    sc->mfi_irq);
1169 
1170 	if (sc->mfi_sense_busaddr != 0)
1171 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1172 	if (sc->mfi_sense != NULL)
1173 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1174 		    sc->mfi_sense_dmamap);
1175 	if (sc->mfi_sense_dmat != NULL)
1176 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1177 
1178 	if (sc->mfi_frames_busaddr != 0)
1179 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1180 	if (sc->mfi_frames != NULL)
1181 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1182 		    sc->mfi_frames_dmamap);
1183 	if (sc->mfi_frames_dmat != NULL)
1184 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1185 
1186 	if (sc->mfi_comms_busaddr != 0)
1187 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1188 	if (sc->mfi_comms != NULL)
1189 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1190 		    sc->mfi_comms_dmamap);
1191 	if (sc->mfi_comms_dmat != NULL)
1192 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1193 
1194 	/* ThunderBolt contiguous memory free here */
1195 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1196 		if (sc->mfi_tb_busaddr != 0)
1197 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1198 		if (sc->request_message_pool != NULL)
1199 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1200 			    sc->mfi_tb_dmamap);
1201 		if (sc->mfi_tb_dmat != NULL)
1202 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1203 
1204 		/* Version buffer memory free */
1205 		/* Start LSIP200113393 */
1206 		if (sc->verbuf_h_busaddr != 0)
1207 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1208 		if (sc->verbuf != NULL)
1209 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1210 			    sc->verbuf_h_dmamap);
1211 		if (sc->verbuf_h_dmat != NULL)
1212 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1213 
1214 		/* End LSIP200113393 */
1215 		/* ThunderBolt INIT packet memory Free */
1216 		if (sc->mfi_tb_init_busaddr != 0)
1217 			bus_dmamap_unload(sc->mfi_tb_init_dmat,
1218 			    sc->mfi_tb_init_dmamap);
1219 		if (sc->mfi_tb_init != NULL)
1220 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1221 			    sc->mfi_tb_init_dmamap);
1222 		if (sc->mfi_tb_init_dmat != NULL)
1223 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1224 
1225 		/* ThunderBolt IOC Init Desc memory free here */
1226 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1227 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1228 			    sc->mfi_tb_ioc_init_dmamap);
1229 		if (sc->mfi_tb_ioc_init_desc != NULL)
1230 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1231 			    sc->mfi_tb_ioc_init_desc,
1232 			    sc->mfi_tb_ioc_init_dmamap);
1233 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1234 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1235 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1236 			for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1237 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1238 					free(sc->mfi_cmd_pool_tbolt[i],
1239 					    M_MFIBUF);
1240 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1241 				}
1242 			}
1243 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1244 			sc->mfi_cmd_pool_tbolt = NULL;
1245 		}
1246 		if (sc->request_desc_pool != NULL) {
1247 			free(sc->request_desc_pool, M_MFIBUF);
1248 			sc->request_desc_pool = NULL;
1249 		}
1250 	}
1251 	if (sc->mfi_buffer_dmat != NULL)
1252 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1253 	if (sc->mfi_parent_dmat != NULL)
1254 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1255 
1256 	if (mtx_initialized(&sc->mfi_io_lock)) {
1257 		mtx_destroy(&sc->mfi_io_lock);
1258 		sx_destroy(&sc->mfi_config_lock);
1259 	}
1260 
1261 	return;
1262 }
1263 
1264 static void
1265 mfi_startup(void *arg)
1266 {
1267 	struct mfi_softc *sc;
1268 
1269 	sc = (struct mfi_softc *)arg;
1270 
1271 	config_intrhook_disestablish(&sc->mfi_ich);
1272 
1273 	sc->mfi_enable_intr(sc);
1274 	sx_xlock(&sc->mfi_config_lock);
1275 	mtx_lock(&sc->mfi_io_lock);
1276 	mfi_ldprobe(sc);
1277 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1278 	    mfi_syspdprobe(sc);
1279 	mtx_unlock(&sc->mfi_io_lock);
1280 	sx_xunlock(&sc->mfi_config_lock);
1281 }
1282 
1283 static void
1284 mfi_intr(void *arg)
1285 {
1286 	struct mfi_softc *sc;
1287 	struct mfi_command *cm;
1288 	uint32_t pi, ci, context;
1289 
1290 	sc = (struct mfi_softc *)arg;
1291 
1292 	if (sc->mfi_check_clear_intr(sc))
1293 		return;
1294 
1295 restart:
1296 	pi = sc->mfi_comms->hw_pi;
1297 	ci = sc->mfi_comms->hw_ci;
1298 	mtx_lock(&sc->mfi_io_lock);
1299 	while (ci != pi) {
1300 		context = sc->mfi_comms->hw_reply_q[ci];
1301 		if (context < sc->mfi_max_fw_cmds) {
1302 			cm = &sc->mfi_commands[context];
1303 			mfi_remove_busy(cm);
1304 			cm->cm_error = 0;
1305 			mfi_complete(sc, cm);
1306 		}
1307 		if (++ci == (sc->mfi_max_fw_cmds + 1))
1308 			ci = 0;
1309 	}
1310 
1311 	sc->mfi_comms->hw_ci = ci;
1312 
1313 	/* Give defered I/O a chance to run */
1314 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1315 	mfi_startio(sc);
1316 	mtx_unlock(&sc->mfi_io_lock);
1317 
1318 	/*
1319 	 * Dummy read to flush the bus; this ensures that the indexes are up
1320 	 * to date.  Restart processing if more commands have come it.
1321 	 */
1322 	(void)sc->mfi_read_fw_status(sc);
1323 	if (pi != sc->mfi_comms->hw_pi)
1324 		goto restart;
1325 
1326 	return;
1327 }
1328 
1329 int
1330 mfi_shutdown(struct mfi_softc *sc)
1331 {
1332 	struct mfi_dcmd_frame *dcmd;
1333 	struct mfi_command *cm;
1334 	int error;
1335 
1336 
1337 	if (sc->mfi_aen_cm != NULL) {
1338 		sc->cm_aen_abort = 1;
1339 		mfi_abort(sc, &sc->mfi_aen_cm);
1340 	}
1341 
1342 	if (sc->mfi_map_sync_cm != NULL) {
1343 		sc->cm_map_abort = 1;
1344 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1345 	}
1346 
1347 	mtx_lock(&sc->mfi_io_lock);
1348 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1349 	if (error) {
1350 		mtx_unlock(&sc->mfi_io_lock);
1351 		return (error);
1352 	}
1353 
1354 	dcmd = &cm->cm_frame->dcmd;
1355 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1356 	cm->cm_flags = MFI_CMD_POLLED;
1357 	cm->cm_data = NULL;
1358 
1359 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1360 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1361 
1362 	mfi_release_command(cm);
1363 	mtx_unlock(&sc->mfi_io_lock);
1364 	return (error);
1365 }
1366 
1367 static void
1368 mfi_syspdprobe(struct mfi_softc *sc)
1369 {
1370 	struct mfi_frame_header *hdr;
1371 	struct mfi_command *cm = NULL;
1372 	struct mfi_pd_list *pdlist = NULL;
1373 	struct mfi_system_pd *syspd, *tmp;
1374 	struct mfi_system_pending *syspd_pend;
1375 	int error, i, found;
1376 
1377 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1378 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1379 	/* Add SYSTEM PD's */
1380 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1381 	    (void **)&pdlist, sizeof(*pdlist));
1382 	if (error) {
1383 		device_printf(sc->mfi_dev,
1384 		    "Error while forming SYSTEM PD list\n");
1385 		goto out;
1386 	}
1387 
1388 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1389 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1390 	cm->cm_frame->dcmd.mbox[1] = 0;
1391 	if (mfi_mapcmd(sc, cm) != 0) {
1392 		device_printf(sc->mfi_dev,
1393 		    "Failed to get syspd device listing\n");
1394 		goto out;
1395 	}
1396 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1397 	    BUS_DMASYNC_POSTREAD);
1398 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1399 	hdr = &cm->cm_frame->header;
1400 	if (hdr->cmd_status != MFI_STAT_OK) {
1401 		device_printf(sc->mfi_dev,
1402 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1403 		goto out;
1404 	}
1405 	/* Get each PD and add it to the system */
1406 	for (i = 0; i < pdlist->count; i++) {
1407 		if (pdlist->addr[i].device_id ==
1408 		    pdlist->addr[i].encl_device_id)
1409 			continue;
1410 		found = 0;
1411 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1412 			if (syspd->pd_id == pdlist->addr[i].device_id)
1413 				found = 1;
1414 		}
1415 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1416 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1417 				found = 1;
1418 		}
1419 		if (found == 0)
1420 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1421 	}
1422 	/* Delete SYSPD's whose state has been changed */
1423 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1424 		found = 0;
1425 		for (i = 0; i < pdlist->count; i++) {
1426 			if (syspd->pd_id == pdlist->addr[i].device_id) {
1427 				found = 1;
1428 				break;
1429 			}
1430 		}
1431 		if (found == 0) {
1432 			printf("DELETE\n");
1433 			mtx_unlock(&sc->mfi_io_lock);
1434 			mtx_lock(&Giant);
1435 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1436 			mtx_unlock(&Giant);
1437 			mtx_lock(&sc->mfi_io_lock);
1438 		}
1439 	}
1440 out:
1441 	if (pdlist)
1442 	    free(pdlist, M_MFIBUF);
1443 	if (cm)
1444 	    mfi_release_command(cm);
1445 
1446 	return;
1447 }
1448 
1449 static void
1450 mfi_ldprobe(struct mfi_softc *sc)
1451 {
1452 	struct mfi_frame_header *hdr;
1453 	struct mfi_command *cm = NULL;
1454 	struct mfi_ld_list *list = NULL;
1455 	struct mfi_disk *ld;
1456 	struct mfi_disk_pending *ld_pend;
1457 	int error, i;
1458 
1459 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1460 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1461 
1462 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1463 	    (void **)&list, sizeof(*list));
1464 	if (error)
1465 		goto out;
1466 
1467 	cm->cm_flags = MFI_CMD_DATAIN;
1468 	if (mfi_wait_command(sc, cm) != 0) {
1469 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1470 		goto out;
1471 	}
1472 
1473 	hdr = &cm->cm_frame->header;
1474 	if (hdr->cmd_status != MFI_STAT_OK) {
1475 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1476 		    hdr->cmd_status);
1477 		goto out;
1478 	}
1479 
1480 	for (i = 0; i < list->ld_count; i++) {
1481 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1482 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1483 				goto skip_add;
1484 		}
1485 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1486 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1487 				goto skip_add;
1488 		}
1489 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1490 	skip_add:;
1491 	}
1492 out:
1493 	if (list)
1494 		free(list, M_MFIBUF);
1495 	if (cm)
1496 		mfi_release_command(cm);
1497 
1498 	return;
1499 }
1500 
1501 /*
1502  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1503  * the bits in 24-31 are all set, then it is the number of seconds since
1504  * boot.
1505  */
1506 static const char *
1507 format_timestamp(uint32_t timestamp)
1508 {
1509 	static char buffer[32];
1510 
1511 	if ((timestamp & 0xff000000) == 0xff000000)
1512 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1513 		    0x00ffffff);
1514 	else
1515 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1516 	return (buffer);
1517 }
1518 
1519 static const char *
1520 format_class(int8_t class)
1521 {
1522 	static char buffer[6];
1523 
1524 	switch (class) {
1525 	case MFI_EVT_CLASS_DEBUG:
1526 		return ("debug");
1527 	case MFI_EVT_CLASS_PROGRESS:
1528 		return ("progress");
1529 	case MFI_EVT_CLASS_INFO:
1530 		return ("info");
1531 	case MFI_EVT_CLASS_WARNING:
1532 		return ("WARN");
1533 	case MFI_EVT_CLASS_CRITICAL:
1534 		return ("CRIT");
1535 	case MFI_EVT_CLASS_FATAL:
1536 		return ("FATAL");
1537 	case MFI_EVT_CLASS_DEAD:
1538 		return ("DEAD");
1539 	default:
1540 		snprintf(buffer, sizeof(buffer), "%d", class);
1541 		return (buffer);
1542 	}
1543 }
1544 
1545 static void
1546 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1547 {
1548 	struct mfi_system_pd *syspd = NULL;
1549 
1550 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1551 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1552 	    format_class(detail->evt_class.members.evt_class),
1553 	    detail->description);
1554 
1555         /* Don't act on old AEN's or while shutting down */
1556         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1557                 return;
1558 
1559 	switch (detail->arg_type) {
1560 	case MR_EVT_ARGS_NONE:
1561 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1562 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1563 			if (mfi_detect_jbod_change) {
1564 				/*
1565 				 * Probe for new SYSPD's and Delete
1566 				 * invalid SYSPD's
1567 				 */
1568 				sx_xlock(&sc->mfi_config_lock);
1569 				mtx_lock(&sc->mfi_io_lock);
1570 				mfi_syspdprobe(sc);
1571 				mtx_unlock(&sc->mfi_io_lock);
1572 				sx_xunlock(&sc->mfi_config_lock);
1573 			}
1574 		}
1575 		break;
1576 	case MR_EVT_ARGS_LD_STATE:
1577 		/* During load time driver reads all the events starting
1578 		 * from the one that has been logged after shutdown. Avoid
1579 		 * these old events.
1580 		 */
1581 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1582 			/* Remove the LD */
1583 			struct mfi_disk *ld;
1584 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1585 				if (ld->ld_id ==
1586 				    detail->args.ld_state.ld.target_id)
1587 					break;
1588 			}
1589 			/*
1590 			Fix: for kernel panics when SSCD is removed
1591 			KASSERT(ld != NULL, ("volume dissappeared"));
1592 			*/
1593 			if (ld != NULL) {
1594 				mtx_lock(&Giant);
1595 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1596 				mtx_unlock(&Giant);
1597 			}
1598 		}
1599 		break;
1600 	case MR_EVT_ARGS_PD:
1601 		if (detail->code == MR_EVT_PD_REMOVED) {
1602 			if (mfi_detect_jbod_change) {
1603 				/*
1604 				 * If the removed device is a SYSPD then
1605 				 * delete it
1606 				 */
1607 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1608 				    pd_link) {
1609 					if (syspd->pd_id ==
1610 					    detail->args.pd.device_id) {
1611 						mtx_lock(&Giant);
1612 						device_delete_child(
1613 						    sc->mfi_dev,
1614 						    syspd->pd_dev);
1615 						mtx_unlock(&Giant);
1616 						break;
1617 					}
1618 				}
1619 			}
1620 		}
1621 		if (detail->code == MR_EVT_PD_INSERTED) {
1622 			if (mfi_detect_jbod_change) {
1623 				/* Probe for new SYSPD's */
1624 				sx_xlock(&sc->mfi_config_lock);
1625 				mtx_lock(&sc->mfi_io_lock);
1626 				mfi_syspdprobe(sc);
1627 				mtx_unlock(&sc->mfi_io_lock);
1628 				sx_xunlock(&sc->mfi_config_lock);
1629 			}
1630 		}
1631 		if (sc->mfi_cam_rescan_cb != NULL &&
1632 		    (detail->code == MR_EVT_PD_INSERTED ||
1633 		    detail->code == MR_EVT_PD_REMOVED)) {
1634 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1635 		}
1636 		break;
1637 	}
1638 }
1639 
1640 static void
1641 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1642 {
1643 	struct mfi_evt_queue_elm *elm;
1644 
1645 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1646 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1647 	if (elm == NULL)
1648 		return;
1649 	memcpy(&elm->detail, detail, sizeof(*detail));
1650 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1651 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1652 }
1653 
1654 static void
1655 mfi_handle_evt(void *context, int pending)
1656 {
1657 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1658 	struct mfi_softc *sc;
1659 	struct mfi_evt_queue_elm *elm;
1660 
1661 	sc = context;
1662 	TAILQ_INIT(&queue);
1663 	mtx_lock(&sc->mfi_io_lock);
1664 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1665 	mtx_unlock(&sc->mfi_io_lock);
1666 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1667 		TAILQ_REMOVE(&queue, elm, link);
1668 		mfi_decode_evt(sc, &elm->detail);
1669 		free(elm, M_MFIBUF);
1670 	}
1671 }
1672 
1673 static int
1674 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1675 {
1676 	struct mfi_command *cm;
1677 	struct mfi_dcmd_frame *dcmd;
1678 	union mfi_evt current_aen, prior_aen;
1679 	struct mfi_evt_detail *ed = NULL;
1680 	int error = 0;
1681 
1682 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1683 
1684 	current_aen.word = locale;
1685 	if (sc->mfi_aen_cm != NULL) {
1686 		prior_aen.word =
1687 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1688 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1689 		    !((prior_aen.members.locale & current_aen.members.locale)
1690 		    ^current_aen.members.locale)) {
1691 			return (0);
1692 		} else {
1693 			prior_aen.members.locale |= current_aen.members.locale;
1694 			if (prior_aen.members.evt_class
1695 			    < current_aen.members.evt_class)
1696 				current_aen.members.evt_class =
1697 				    prior_aen.members.evt_class;
1698 			mfi_abort(sc, &sc->mfi_aen_cm);
1699 		}
1700 	}
1701 
1702 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1703 	    (void **)&ed, sizeof(*ed));
1704 	if (error)
1705 		goto out;
1706 
1707 	dcmd = &cm->cm_frame->dcmd;
1708 	((uint32_t *)&dcmd->mbox)[0] = seq;
1709 	((uint32_t *)&dcmd->mbox)[1] = locale;
1710 	cm->cm_flags = MFI_CMD_DATAIN;
1711 	cm->cm_complete = mfi_aen_complete;
1712 
1713 	sc->last_seq_num = seq;
1714 	sc->mfi_aen_cm = cm;
1715 
1716 	mfi_enqueue_ready(cm);
1717 	mfi_startio(sc);
1718 
1719 out:
1720 	return (error);
1721 }
1722 
1723 static void
1724 mfi_aen_complete(struct mfi_command *cm)
1725 {
1726 	struct mfi_frame_header *hdr;
1727 	struct mfi_softc *sc;
1728 	struct mfi_evt_detail *detail;
1729 	struct mfi_aen *mfi_aen_entry, *tmp;
1730 	int seq = 0, aborted = 0;
1731 
1732 	sc = cm->cm_sc;
1733 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1734 
1735 	if (sc->mfi_aen_cm == NULL)
1736 		return;
1737 
1738 	hdr = &cm->cm_frame->header;
1739 
1740 	if (sc->cm_aen_abort ||
1741 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1742 		sc->cm_aen_abort = 0;
1743 		aborted = 1;
1744 	} else {
1745 		sc->mfi_aen_triggered = 1;
1746 		if (sc->mfi_poll_waiting) {
1747 			sc->mfi_poll_waiting = 0;
1748 			selwakeup(&sc->mfi_select);
1749 		}
1750 		detail = cm->cm_data;
1751 		mfi_queue_evt(sc, detail);
1752 		seq = detail->seq + 1;
1753 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1754 		    tmp) {
1755 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1756 			    aen_link);
1757 			PROC_LOCK(mfi_aen_entry->p);
1758 			kern_psignal(mfi_aen_entry->p, SIGIO);
1759 			PROC_UNLOCK(mfi_aen_entry->p);
1760 			free(mfi_aen_entry, M_MFIBUF);
1761 		}
1762 	}
1763 
1764 	free(cm->cm_data, M_MFIBUF);
1765 	wakeup(&sc->mfi_aen_cm);
1766 	sc->mfi_aen_cm = NULL;
1767 	mfi_release_command(cm);
1768 
1769 	/* set it up again so the driver can catch more events */
1770 	if (!aborted)
1771 		mfi_aen_setup(sc, seq);
1772 }
1773 
1774 #define MAX_EVENTS 15
1775 
1776 static int
1777 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1778 {
1779 	struct mfi_command *cm;
1780 	struct mfi_dcmd_frame *dcmd;
1781 	struct mfi_evt_list *el;
1782 	union mfi_evt class_locale;
1783 	int error, i, seq, size;
1784 
1785 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1786 
1787 	class_locale.members.reserved = 0;
1788 	class_locale.members.locale = mfi_event_locale;
1789 	class_locale.members.evt_class  = mfi_event_class;
1790 
1791 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1792 		* (MAX_EVENTS - 1);
1793 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1794 	if (el == NULL)
1795 		return (ENOMEM);
1796 
1797 	for (seq = start_seq;;) {
1798 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1799 			free(el, M_MFIBUF);
1800 			return (EBUSY);
1801 		}
1802 
1803 		dcmd = &cm->cm_frame->dcmd;
1804 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1805 		dcmd->header.cmd = MFI_CMD_DCMD;
1806 		dcmd->header.timeout = 0;
1807 		dcmd->header.data_len = size;
1808 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1809 		((uint32_t *)&dcmd->mbox)[0] = seq;
1810 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1811 		cm->cm_sg = &dcmd->sgl;
1812 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1813 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1814 		cm->cm_data = el;
1815 		cm->cm_len = size;
1816 
1817 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1818 			device_printf(sc->mfi_dev,
1819 			    "Failed to get controller entries\n");
1820 			mfi_release_command(cm);
1821 			break;
1822 		}
1823 
1824 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1825 		    BUS_DMASYNC_POSTREAD);
1826 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1827 
1828 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1829 			mfi_release_command(cm);
1830 			break;
1831 		}
1832 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1833 			device_printf(sc->mfi_dev,
1834 			    "Error %d fetching controller entries\n",
1835 			    dcmd->header.cmd_status);
1836 			mfi_release_command(cm);
1837 			error = EIO;
1838 			break;
1839 		}
1840 		mfi_release_command(cm);
1841 
1842 		for (i = 0; i < el->count; i++) {
1843 			/*
1844 			 * If this event is newer than 'stop_seq' then
1845 			 * break out of the loop.  Note that the log
1846 			 * is a circular buffer so we have to handle
1847 			 * the case that our stop point is earlier in
1848 			 * the buffer than our start point.
1849 			 */
1850 			if (el->event[i].seq >= stop_seq) {
1851 				if (start_seq <= stop_seq)
1852 					break;
1853 				else if (el->event[i].seq < start_seq)
1854 					break;
1855 			}
1856 			mfi_queue_evt(sc, &el->event[i]);
1857 		}
1858 		seq = el->event[el->count - 1].seq + 1;
1859 	}
1860 
1861 	free(el, M_MFIBUF);
1862 	return (error);
1863 }
1864 
1865 static int
1866 mfi_add_ld(struct mfi_softc *sc, int id)
1867 {
1868 	struct mfi_command *cm;
1869 	struct mfi_dcmd_frame *dcmd = NULL;
1870 	struct mfi_ld_info *ld_info = NULL;
1871 	struct mfi_disk_pending *ld_pend;
1872 	int error;
1873 
1874 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1875 
1876 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1877 	if (ld_pend != NULL) {
1878 		ld_pend->ld_id = id;
1879 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1880 	}
1881 
1882 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1883 	    (void **)&ld_info, sizeof(*ld_info));
1884 	if (error) {
1885 		device_printf(sc->mfi_dev,
1886 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1887 		if (ld_info)
1888 			free(ld_info, M_MFIBUF);
1889 		return (error);
1890 	}
1891 	cm->cm_flags = MFI_CMD_DATAIN;
1892 	dcmd = &cm->cm_frame->dcmd;
1893 	dcmd->mbox[0] = id;
1894 	if (mfi_wait_command(sc, cm) != 0) {
1895 		device_printf(sc->mfi_dev,
1896 		    "Failed to get logical drive: %d\n", id);
1897 		free(ld_info, M_MFIBUF);
1898 		return (0);
1899 	}
1900 	if (ld_info->ld_config.params.isSSCD != 1)
1901 		mfi_add_ld_complete(cm);
1902 	else {
1903 		mfi_release_command(cm);
1904 		if (ld_info)		/* SSCD drives ld_info free here */
1905 			free(ld_info, M_MFIBUF);
1906 	}
1907 	return (0);
1908 }
1909 
1910 static void
1911 mfi_add_ld_complete(struct mfi_command *cm)
1912 {
1913 	struct mfi_frame_header *hdr;
1914 	struct mfi_ld_info *ld_info;
1915 	struct mfi_softc *sc;
1916 	device_t child;
1917 
1918 	sc = cm->cm_sc;
1919 	hdr = &cm->cm_frame->header;
1920 	ld_info = cm->cm_private;
1921 
1922 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1923 		free(ld_info, M_MFIBUF);
1924 		wakeup(&sc->mfi_map_sync_cm);
1925 		mfi_release_command(cm);
1926 		return;
1927 	}
1928 	wakeup(&sc->mfi_map_sync_cm);
1929 	mfi_release_command(cm);
1930 
1931 	mtx_unlock(&sc->mfi_io_lock);
1932 	mtx_lock(&Giant);
1933 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1934 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1935 		free(ld_info, M_MFIBUF);
1936 		mtx_unlock(&Giant);
1937 		mtx_lock(&sc->mfi_io_lock);
1938 		return;
1939 	}
1940 
1941 	device_set_ivars(child, ld_info);
1942 	device_set_desc(child, "MFI Logical Disk");
1943 	bus_generic_attach(sc->mfi_dev);
1944 	mtx_unlock(&Giant);
1945 	mtx_lock(&sc->mfi_io_lock);
1946 }
1947 
1948 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1949 {
1950 	struct mfi_command *cm;
1951 	struct mfi_dcmd_frame *dcmd = NULL;
1952 	struct mfi_pd_info *pd_info = NULL;
1953 	struct mfi_system_pending *syspd_pend;
1954 	int error;
1955 
1956 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1957 
1958 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1959 	if (syspd_pend != NULL) {
1960 		syspd_pend->pd_id = id;
1961 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1962 	}
1963 
1964 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1965 		(void **)&pd_info, sizeof(*pd_info));
1966 	if (error) {
1967 		device_printf(sc->mfi_dev,
1968 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1969 		    error);
1970 		if (pd_info)
1971 			free(pd_info, M_MFIBUF);
1972 		return (error);
1973 	}
1974 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1975 	dcmd = &cm->cm_frame->dcmd;
1976 	dcmd->mbox[0]=id;
1977 	dcmd->header.scsi_status = 0;
1978 	dcmd->header.pad0 = 0;
1979 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1980 		device_printf(sc->mfi_dev,
1981 		    "Failed to get physical drive info %d\n", id);
1982 		free(pd_info, M_MFIBUF);
1983 		mfi_release_command(cm);
1984 		return (error);
1985 	}
1986 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1987 	    BUS_DMASYNC_POSTREAD);
1988 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1989 	mfi_add_sys_pd_complete(cm);
1990 	return (0);
1991 }
1992 
1993 static void
1994 mfi_add_sys_pd_complete(struct mfi_command *cm)
1995 {
1996 	struct mfi_frame_header *hdr;
1997 	struct mfi_pd_info *pd_info;
1998 	struct mfi_softc *sc;
1999 	device_t child;
2000 
2001 	sc = cm->cm_sc;
2002 	hdr = &cm->cm_frame->header;
2003 	pd_info = cm->cm_private;
2004 
2005 	if (hdr->cmd_status != MFI_STAT_OK) {
2006 		free(pd_info, M_MFIBUF);
2007 		mfi_release_command(cm);
2008 		return;
2009 	}
2010 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2011 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2012 		    pd_info->ref.v.device_id);
2013 		free(pd_info, M_MFIBUF);
2014 		mfi_release_command(cm);
2015 		return;
2016 	}
2017 	mfi_release_command(cm);
2018 
2019 	mtx_unlock(&sc->mfi_io_lock);
2020 	mtx_lock(&Giant);
2021 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2022 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
2023 		free(pd_info, M_MFIBUF);
2024 		mtx_unlock(&Giant);
2025 		mtx_lock(&sc->mfi_io_lock);
2026 		return;
2027 	}
2028 
2029 	device_set_ivars(child, pd_info);
2030 	device_set_desc(child, "MFI System PD");
2031 	bus_generic_attach(sc->mfi_dev);
2032 	mtx_unlock(&Giant);
2033 	mtx_lock(&sc->mfi_io_lock);
2034 }
2035 
2036 static struct mfi_command *
2037 mfi_bio_command(struct mfi_softc *sc)
2038 {
2039 	struct bio *bio;
2040 	struct mfi_command *cm = NULL;
2041 
2042 	/*reserving two commands to avoid starvation for IOCTL*/
2043 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2044 		return (NULL);
2045 	}
2046 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2047 		return (NULL);
2048 	}
2049 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2050 		cm = mfi_build_ldio(sc, bio);
2051 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2052 		cm = mfi_build_syspdio(sc, bio);
2053 	}
2054 	if (!cm)
2055 	    mfi_enqueue_bio(sc, bio);
2056 	return cm;
2057 }
2058 
2059 /*
2060  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2061  */
2062 
2063 int
2064 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2065 {
2066 	int cdb_len;
2067 
2068 	if (((lba & 0x1fffff) == lba)
2069          && ((block_count & 0xff) == block_count)
2070          && (byte2 == 0)) {
2071 		/* We can fit in a 6 byte cdb */
2072 		struct scsi_rw_6 *scsi_cmd;
2073 
2074 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2075 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2076 		scsi_ulto3b(lba, scsi_cmd->addr);
2077 		scsi_cmd->length = block_count & 0xff;
2078 		scsi_cmd->control = 0;
2079 		cdb_len = sizeof(*scsi_cmd);
2080 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2081 		/* Need a 10 byte CDB */
2082 		struct scsi_rw_10 *scsi_cmd;
2083 
2084 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2085 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2086 		scsi_cmd->byte2 = byte2;
2087 		scsi_ulto4b(lba, scsi_cmd->addr);
2088 		scsi_cmd->reserved = 0;
2089 		scsi_ulto2b(block_count, scsi_cmd->length);
2090 		scsi_cmd->control = 0;
2091 		cdb_len = sizeof(*scsi_cmd);
2092 	} else if (((block_count & 0xffffffff) == block_count) &&
2093 	    ((lba & 0xffffffff) == lba)) {
2094 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2095 		struct scsi_rw_12 *scsi_cmd;
2096 
2097 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2098 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2099 		scsi_cmd->byte2 = byte2;
2100 		scsi_ulto4b(lba, scsi_cmd->addr);
2101 		scsi_cmd->reserved = 0;
2102 		scsi_ulto4b(block_count, scsi_cmd->length);
2103 		scsi_cmd->control = 0;
2104 		cdb_len = sizeof(*scsi_cmd);
2105 	} else {
2106 		/*
2107 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2108 		 * than 2^32
2109 		 */
2110 		struct scsi_rw_16 *scsi_cmd;
2111 
2112 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2113 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2114 		scsi_cmd->byte2 = byte2;
2115 		scsi_u64to8b(lba, scsi_cmd->addr);
2116 		scsi_cmd->reserved = 0;
2117 		scsi_ulto4b(block_count, scsi_cmd->length);
2118 		scsi_cmd->control = 0;
2119 		cdb_len = sizeof(*scsi_cmd);
2120 	}
2121 
2122 	return cdb_len;
2123 }
2124 
2125 extern char *unmapped_buf;
2126 
2127 static struct mfi_command *
2128 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2129 {
2130 	struct mfi_command *cm;
2131 	struct mfi_pass_frame *pass;
2132 	uint32_t context = 0;
2133 	int flags = 0, blkcount = 0, readop;
2134 	uint8_t cdb_len;
2135 
2136 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2137 
2138 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2139 	    return (NULL);
2140 
2141 	/* Zero out the MFI frame */
2142 	context = cm->cm_frame->header.context;
2143 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2144 	cm->cm_frame->header.context = context;
2145 	pass = &cm->cm_frame->pass;
2146 	bzero(pass->cdb, 16);
2147 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2148 	switch (bio->bio_cmd & 0x03) {
2149 	case BIO_READ:
2150 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2151 		readop = 1;
2152 		break;
2153 	case BIO_WRITE:
2154 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2155 		readop = 0;
2156 		break;
2157 	default:
2158 		/* TODO: what about BIO_DELETE??? */
2159 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2160 	}
2161 
2162 	/* Cheat with the sector length to avoid a non-constant division */
2163 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2164 	/* Fill the LBA and Transfer length in CDB */
2165 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2166 	    pass->cdb);
2167 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2168 	pass->header.lun_id = 0;
2169 	pass->header.timeout = 0;
2170 	pass->header.flags = 0;
2171 	pass->header.scsi_status = 0;
2172 	pass->header.sense_len = MFI_SENSE_LEN;
2173 	pass->header.data_len = bio->bio_bcount;
2174 	pass->header.cdb_len = cdb_len;
2175 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2176 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2177 	cm->cm_complete = mfi_bio_complete;
2178 	cm->cm_private = bio;
2179 	cm->cm_data = unmapped_buf;
2180 	cm->cm_len = bio->bio_bcount;
2181 	cm->cm_sg = &pass->sgl;
2182 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2183 	cm->cm_flags = flags;
2184 
2185 	return (cm);
2186 }
2187 
2188 static struct mfi_command *
2189 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2190 {
2191 	struct mfi_io_frame *io;
2192 	struct mfi_command *cm;
2193 	int flags;
2194 	uint32_t blkcount;
2195 	uint32_t context = 0;
2196 
2197 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2198 
2199 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2200 	    return (NULL);
2201 
2202 	/* Zero out the MFI frame */
2203 	context = cm->cm_frame->header.context;
2204 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2205 	cm->cm_frame->header.context = context;
2206 	io = &cm->cm_frame->io;
2207 	switch (bio->bio_cmd & 0x03) {
2208 	case BIO_READ:
2209 		io->header.cmd = MFI_CMD_LD_READ;
2210 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2211 		break;
2212 	case BIO_WRITE:
2213 		io->header.cmd = MFI_CMD_LD_WRITE;
2214 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2215 		break;
2216 	default:
2217 		/* TODO: what about BIO_DELETE??? */
2218 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2219 	}
2220 
2221 	/* Cheat with the sector length to avoid a non-constant division */
2222 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2223 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2224 	io->header.timeout = 0;
2225 	io->header.flags = 0;
2226 	io->header.scsi_status = 0;
2227 	io->header.sense_len = MFI_SENSE_LEN;
2228 	io->header.data_len = blkcount;
2229 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2230 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2231 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2232 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2233 	cm->cm_complete = mfi_bio_complete;
2234 	cm->cm_private = bio;
2235 	cm->cm_data = unmapped_buf;
2236 	cm->cm_len = bio->bio_bcount;
2237 	cm->cm_sg = &io->sgl;
2238 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2239 	cm->cm_flags = flags;
2240 
2241 	return (cm);
2242 }
2243 
2244 static void
2245 mfi_bio_complete(struct mfi_command *cm)
2246 {
2247 	struct bio *bio;
2248 	struct mfi_frame_header *hdr;
2249 	struct mfi_softc *sc;
2250 
2251 	bio = cm->cm_private;
2252 	hdr = &cm->cm_frame->header;
2253 	sc = cm->cm_sc;
2254 
2255 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2256 		bio->bio_flags |= BIO_ERROR;
2257 		bio->bio_error = EIO;
2258 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2259 		    "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2260 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2261 	} else if (cm->cm_error != 0) {
2262 		bio->bio_flags |= BIO_ERROR;
2263 		bio->bio_error = cm->cm_error;
2264 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2265 		    cm, cm->cm_error);
2266 	}
2267 
2268 	mfi_release_command(cm);
2269 	mfi_disk_complete(bio);
2270 }
2271 
2272 void
2273 mfi_startio(struct mfi_softc *sc)
2274 {
2275 	struct mfi_command *cm;
2276 	struct ccb_hdr *ccbh;
2277 
2278 	for (;;) {
2279 		/* Don't bother if we're short on resources */
2280 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2281 			break;
2282 
2283 		/* Try a command that has already been prepared */
2284 		cm = mfi_dequeue_ready(sc);
2285 
2286 		if (cm == NULL) {
2287 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2288 				cm = sc->mfi_cam_start(ccbh);
2289 		}
2290 
2291 		/* Nope, so look for work on the bioq */
2292 		if (cm == NULL)
2293 			cm = mfi_bio_command(sc);
2294 
2295 		/* No work available, so exit */
2296 		if (cm == NULL)
2297 			break;
2298 
2299 		/* Send the command to the controller */
2300 		if (mfi_mapcmd(sc, cm) != 0) {
2301 			device_printf(sc->mfi_dev, "Failed to startio\n");
2302 			mfi_requeue_ready(cm);
2303 			break;
2304 		}
2305 	}
2306 }
2307 
2308 int
2309 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2310 {
2311 	int error, polled;
2312 
2313 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2314 
2315 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2316 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2317 		if (cm->cm_flags & MFI_CMD_CCB)
2318 			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2319 			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2320 			    polled);
2321 		else if (cm->cm_flags & MFI_CMD_BIO)
2322 			error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2323 			    cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2324 			    polled);
2325 		else
2326 			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2327 			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2328 			    mfi_data_cb, cm, polled);
2329 		if (error == EINPROGRESS) {
2330 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2331 			return (0);
2332 		}
2333 	} else {
2334 		error = mfi_send_frame(sc, cm);
2335 	}
2336 
2337 	return (error);
2338 }
2339 
2340 static void
2341 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2342 {
2343 	struct mfi_frame_header *hdr;
2344 	struct mfi_command *cm;
2345 	union mfi_sgl *sgl;
2346 	struct mfi_softc *sc;
2347 	int i, j, first, dir;
2348 	int sge_size, locked;
2349 
2350 	cm = (struct mfi_command *)arg;
2351 	sc = cm->cm_sc;
2352 	hdr = &cm->cm_frame->header;
2353 	sgl = cm->cm_sg;
2354 
2355 	/*
2356 	 * We need to check if we have the lock as this is async
2357 	 * callback so even though our caller mfi_mapcmd asserts
2358 	 * it has the lock, there is no garantee that hasn't been
2359 	 * dropped if bus_dmamap_load returned prior to our
2360 	 * completion.
2361 	 */
2362 	if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2363 		mtx_lock(&sc->mfi_io_lock);
2364 
2365 	if (error) {
2366 		printf("error %d in callback\n", error);
2367 		cm->cm_error = error;
2368 		mfi_complete(sc, cm);
2369 		goto out;
2370 	}
2371 	/* Use IEEE sgl only for IO's on a SKINNY controller
2372 	 * For other commands on a SKINNY controller use either
2373 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2374 	 * Also calculate the total frame size based on the type
2375 	 * of SGL used.
2376 	 */
2377 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2378 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2379 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2380 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2381 		for (i = 0; i < nsegs; i++) {
2382 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2383 			sgl->sg_skinny[i].len = segs[i].ds_len;
2384 			sgl->sg_skinny[i].flag = 0;
2385 		}
2386 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2387 		sge_size = sizeof(struct mfi_sg_skinny);
2388 		hdr->sg_count = nsegs;
2389 	} else {
2390 		j = 0;
2391 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2392 			first = cm->cm_stp_len;
2393 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2394 				sgl->sg32[j].addr = segs[0].ds_addr;
2395 				sgl->sg32[j++].len = first;
2396 			} else {
2397 				sgl->sg64[j].addr = segs[0].ds_addr;
2398 				sgl->sg64[j++].len = first;
2399 			}
2400 		} else
2401 			first = 0;
2402 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2403 			for (i = 0; i < nsegs; i++) {
2404 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2405 				sgl->sg32[j++].len = segs[i].ds_len - first;
2406 				first = 0;
2407 			}
2408 		} else {
2409 			for (i = 0; i < nsegs; i++) {
2410 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2411 				sgl->sg64[j++].len = segs[i].ds_len - first;
2412 				first = 0;
2413 			}
2414 			hdr->flags |= MFI_FRAME_SGL64;
2415 		}
2416 		hdr->sg_count = j;
2417 		sge_size = sc->mfi_sge_size;
2418 	}
2419 
2420 	dir = 0;
2421 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2422 		dir |= BUS_DMASYNC_PREREAD;
2423 		hdr->flags |= MFI_FRAME_DIR_READ;
2424 	}
2425 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2426 		dir |= BUS_DMASYNC_PREWRITE;
2427 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2428 	}
2429 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2430 	cm->cm_flags |= MFI_CMD_MAPPED;
2431 
2432 	/*
2433 	 * Instead of calculating the total number of frames in the
2434 	 * compound frame, it's already assumed that there will be at
2435 	 * least 1 frame, so don't compensate for the modulo of the
2436 	 * following division.
2437 	 */
2438 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2439 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2440 
2441 	if ((error = mfi_send_frame(sc, cm)) != 0) {
2442 		printf("error %d in callback from mfi_send_frame\n", error);
2443 		cm->cm_error = error;
2444 		mfi_complete(sc, cm);
2445 		goto out;
2446 	}
2447 
2448 out:
2449 	/* leave the lock in the state we found it */
2450 	if (locked == 0)
2451 		mtx_unlock(&sc->mfi_io_lock);
2452 
2453 	return;
2454 }
2455 
2456 static int
2457 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2458 {
2459 	int error;
2460 
2461 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2462 
2463 	if (sc->MFA_enabled)
2464 		error = mfi_tbolt_send_frame(sc, cm);
2465 	else
2466 		error = mfi_std_send_frame(sc, cm);
2467 
2468 	if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2469 		mfi_remove_busy(cm);
2470 
2471 	return (error);
2472 }
2473 
2474 static int
2475 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2476 {
2477 	struct mfi_frame_header *hdr;
2478 	int tm = mfi_polled_cmd_timeout * 1000;
2479 
2480 	hdr = &cm->cm_frame->header;
2481 
2482 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2483 		cm->cm_timestamp = time_uptime;
2484 		mfi_enqueue_busy(cm);
2485 	} else {
2486 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2487 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2488 	}
2489 
2490 	/*
2491 	 * The bus address of the command is aligned on a 64 byte boundary,
2492 	 * leaving the least 6 bits as zero.  For whatever reason, the
2493 	 * hardware wants the address shifted right by three, leaving just
2494 	 * 3 zero bits.  These three bits are then used as a prefetching
2495 	 * hint for the hardware to predict how many frames need to be
2496 	 * fetched across the bus.  If a command has more than 8 frames
2497 	 * then the 3 bits are set to 0x7 and the firmware uses other
2498 	 * information in the command to determine the total amount to fetch.
2499 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2500 	 * is enough for both 32bit and 64bit systems.
2501 	 */
2502 	if (cm->cm_extra_frames > 7)
2503 		cm->cm_extra_frames = 7;
2504 
2505 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2506 
2507 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2508 		return (0);
2509 
2510 	/* This is a polled command, so busy-wait for it to complete. */
2511 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2512 		DELAY(1000);
2513 		tm -= 1;
2514 		if (tm <= 0)
2515 			break;
2516 	}
2517 
2518 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2519 		device_printf(sc->mfi_dev, "Frame %p timed out "
2520 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2521 		return (ETIMEDOUT);
2522 	}
2523 
2524 	return (0);
2525 }
2526 
2527 
2528 void
2529 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2530 {
2531 	int dir;
2532 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2533 
2534 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2535 		dir = 0;
2536 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2537 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2538 			dir |= BUS_DMASYNC_POSTREAD;
2539 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2540 			dir |= BUS_DMASYNC_POSTWRITE;
2541 
2542 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2543 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2544 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2545 	}
2546 
2547 	cm->cm_flags |= MFI_CMD_COMPLETED;
2548 
2549 	if (cm->cm_complete != NULL)
2550 		cm->cm_complete(cm);
2551 	else
2552 		wakeup(cm);
2553 }
2554 
2555 static int
2556 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2557 {
2558 	struct mfi_command *cm;
2559 	struct mfi_abort_frame *abort;
2560 	int i = 0, error;
2561 	uint32_t context = 0;
2562 
2563 	mtx_lock(&sc->mfi_io_lock);
2564 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2565 		mtx_unlock(&sc->mfi_io_lock);
2566 		return (EBUSY);
2567 	}
2568 
2569 	/* Zero out the MFI frame */
2570 	context = cm->cm_frame->header.context;
2571 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2572 	cm->cm_frame->header.context = context;
2573 
2574 	abort = &cm->cm_frame->abort;
2575 	abort->header.cmd = MFI_CMD_ABORT;
2576 	abort->header.flags = 0;
2577 	abort->header.scsi_status = 0;
2578 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2579 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2580 	abort->abort_mfi_addr_hi =
2581 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2582 	cm->cm_data = NULL;
2583 	cm->cm_flags = MFI_CMD_POLLED;
2584 
2585 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2586 		device_printf(sc->mfi_dev, "failed to abort command\n");
2587 	mfi_release_command(cm);
2588 
2589 	mtx_unlock(&sc->mfi_io_lock);
2590 	while (i < 5 && *cm_abort != NULL) {
2591 		tsleep(cm_abort, 0, "mfiabort",
2592 		    5 * hz);
2593 		i++;
2594 	}
2595 	if (*cm_abort != NULL) {
2596 		/* Force a complete if command didn't abort */
2597 		mtx_lock(&sc->mfi_io_lock);
2598 		(*cm_abort)->cm_complete(*cm_abort);
2599 		mtx_unlock(&sc->mfi_io_lock);
2600 	}
2601 
2602 	return (error);
2603 }
2604 
2605 int
2606 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2607      int len)
2608 {
2609 	struct mfi_command *cm;
2610 	struct mfi_io_frame *io;
2611 	int error;
2612 	uint32_t context = 0;
2613 
2614 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2615 		return (EBUSY);
2616 
2617 	/* Zero out the MFI frame */
2618 	context = cm->cm_frame->header.context;
2619 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2620 	cm->cm_frame->header.context = context;
2621 
2622 	io = &cm->cm_frame->io;
2623 	io->header.cmd = MFI_CMD_LD_WRITE;
2624 	io->header.target_id = id;
2625 	io->header.timeout = 0;
2626 	io->header.flags = 0;
2627 	io->header.scsi_status = 0;
2628 	io->header.sense_len = MFI_SENSE_LEN;
2629 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2630 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2631 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2632 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2633 	io->lba_lo = lba & 0xffffffff;
2634 	cm->cm_data = virt;
2635 	cm->cm_len = len;
2636 	cm->cm_sg = &io->sgl;
2637 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2638 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2639 
2640 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2641 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2642 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2643 	    BUS_DMASYNC_POSTWRITE);
2644 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2645 	mfi_release_command(cm);
2646 
2647 	return (error);
2648 }
2649 
2650 int
2651 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2652     int len)
2653 {
2654 	struct mfi_command *cm;
2655 	struct mfi_pass_frame *pass;
2656 	int error, readop, cdb_len;
2657 	uint32_t blkcount;
2658 
2659 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2660 		return (EBUSY);
2661 
2662 	pass = &cm->cm_frame->pass;
2663 	bzero(pass->cdb, 16);
2664 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2665 
2666 	readop = 0;
2667 	blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2668 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2669 	pass->header.target_id = id;
2670 	pass->header.timeout = 0;
2671 	pass->header.flags = 0;
2672 	pass->header.scsi_status = 0;
2673 	pass->header.sense_len = MFI_SENSE_LEN;
2674 	pass->header.data_len = len;
2675 	pass->header.cdb_len = cdb_len;
2676 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2677 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2678 	cm->cm_data = virt;
2679 	cm->cm_len = len;
2680 	cm->cm_sg = &pass->sgl;
2681 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2682 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2683 
2684 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2685 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2686 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2687 	    BUS_DMASYNC_POSTWRITE);
2688 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2689 	mfi_release_command(cm);
2690 
2691 	return (error);
2692 }
2693 
2694 static int
2695 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2696 {
2697 	struct mfi_softc *sc;
2698 	int error;
2699 
2700 	sc = dev->si_drv1;
2701 
2702 	mtx_lock(&sc->mfi_io_lock);
2703 	if (sc->mfi_detaching)
2704 		error = ENXIO;
2705 	else {
2706 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2707 		error = 0;
2708 	}
2709 	mtx_unlock(&sc->mfi_io_lock);
2710 
2711 	return (error);
2712 }
2713 
2714 static int
2715 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2716 {
2717 	struct mfi_softc *sc;
2718 	struct mfi_aen *mfi_aen_entry, *tmp;
2719 
2720 	sc = dev->si_drv1;
2721 
2722 	mtx_lock(&sc->mfi_io_lock);
2723 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2724 
2725 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2726 		if (mfi_aen_entry->p == curproc) {
2727 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2728 			    aen_link);
2729 			free(mfi_aen_entry, M_MFIBUF);
2730 		}
2731 	}
2732 	mtx_unlock(&sc->mfi_io_lock);
2733 	return (0);
2734 }
2735 
2736 static int
2737 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2738 {
2739 
2740 	switch (opcode) {
2741 	case MFI_DCMD_LD_DELETE:
2742 	case MFI_DCMD_CFG_ADD:
2743 	case MFI_DCMD_CFG_CLEAR:
2744 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2745 		sx_xlock(&sc->mfi_config_lock);
2746 		return (1);
2747 	default:
2748 		return (0);
2749 	}
2750 }
2751 
2752 static void
2753 mfi_config_unlock(struct mfi_softc *sc, int locked)
2754 {
2755 
2756 	if (locked)
2757 		sx_xunlock(&sc->mfi_config_lock);
2758 }
2759 
2760 /*
2761  * Perform pre-issue checks on commands from userland and possibly veto
2762  * them.
2763  */
2764 static int
2765 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2766 {
2767 	struct mfi_disk *ld, *ld2;
2768 	int error;
2769 	struct mfi_system_pd *syspd = NULL;
2770 	uint16_t syspd_id;
2771 	uint16_t *mbox;
2772 
2773 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2774 	error = 0;
2775 	switch (cm->cm_frame->dcmd.opcode) {
2776 	case MFI_DCMD_LD_DELETE:
2777 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2778 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2779 				break;
2780 		}
2781 		if (ld == NULL)
2782 			error = ENOENT;
2783 		else
2784 			error = mfi_disk_disable(ld);
2785 		break;
2786 	case MFI_DCMD_CFG_CLEAR:
2787 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2788 			error = mfi_disk_disable(ld);
2789 			if (error)
2790 				break;
2791 		}
2792 		if (error) {
2793 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2794 				if (ld2 == ld)
2795 					break;
2796 				mfi_disk_enable(ld2);
2797 			}
2798 		}
2799 		break;
2800 	case MFI_DCMD_PD_STATE_SET:
2801 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2802 		syspd_id = mbox[0];
2803 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2804 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2805 				if (syspd->pd_id == syspd_id)
2806 					break;
2807 			}
2808 		}
2809 		else
2810 			break;
2811 		if (syspd)
2812 			error = mfi_syspd_disable(syspd);
2813 		break;
2814 	default:
2815 		break;
2816 	}
2817 	return (error);
2818 }
2819 
2820 /* Perform post-issue checks on commands from userland. */
2821 static void
2822 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2823 {
2824 	struct mfi_disk *ld, *ldn;
2825 	struct mfi_system_pd *syspd = NULL;
2826 	uint16_t syspd_id;
2827 	uint16_t *mbox;
2828 
2829 	switch (cm->cm_frame->dcmd.opcode) {
2830 	case MFI_DCMD_LD_DELETE:
2831 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2832 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2833 				break;
2834 		}
2835 		KASSERT(ld != NULL, ("volume dissappeared"));
2836 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2837 			mtx_unlock(&sc->mfi_io_lock);
2838 			mtx_lock(&Giant);
2839 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2840 			mtx_unlock(&Giant);
2841 			mtx_lock(&sc->mfi_io_lock);
2842 		} else
2843 			mfi_disk_enable(ld);
2844 		break;
2845 	case MFI_DCMD_CFG_CLEAR:
2846 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2847 			mtx_unlock(&sc->mfi_io_lock);
2848 			mtx_lock(&Giant);
2849 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2850 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2851 			}
2852 			mtx_unlock(&Giant);
2853 			mtx_lock(&sc->mfi_io_lock);
2854 		} else {
2855 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2856 				mfi_disk_enable(ld);
2857 		}
2858 		break;
2859 	case MFI_DCMD_CFG_ADD:
2860 		mfi_ldprobe(sc);
2861 		break;
2862 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2863 		mfi_ldprobe(sc);
2864 		break;
2865 	case MFI_DCMD_PD_STATE_SET:
2866 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2867 		syspd_id = mbox[0];
2868 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2869 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2870 				if (syspd->pd_id == syspd_id)
2871 					break;
2872 			}
2873 		}
2874 		else
2875 			break;
2876 		/* If the transition fails then enable the syspd again */
2877 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2878 			mfi_syspd_enable(syspd);
2879 		break;
2880 	}
2881 }
2882 
2883 static int
2884 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2885 {
2886 	struct mfi_config_data *conf_data;
2887 	struct mfi_command *ld_cm = NULL;
2888 	struct mfi_ld_info *ld_info = NULL;
2889 	struct mfi_ld_config *ld;
2890 	char *p;
2891 	int error = 0;
2892 
2893 	conf_data = (struct mfi_config_data *)cm->cm_data;
2894 
2895 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2896 		p = (char *)conf_data->array;
2897 		p += conf_data->array_size * conf_data->array_count;
2898 		ld = (struct mfi_ld_config *)p;
2899 		if (ld->params.isSSCD == 1)
2900 			error = 1;
2901 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2902 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2903 		    (void **)&ld_info, sizeof(*ld_info));
2904 		if (error) {
2905 			device_printf(sc->mfi_dev, "Failed to allocate"
2906 			    "MFI_DCMD_LD_GET_INFO %d", error);
2907 			if (ld_info)
2908 				free(ld_info, M_MFIBUF);
2909 			return 0;
2910 		}
2911 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2912 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2913 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2914 		if (mfi_wait_command(sc, ld_cm) != 0) {
2915 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2916 			mfi_release_command(ld_cm);
2917 			free(ld_info, M_MFIBUF);
2918 			return 0;
2919 		}
2920 
2921 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2922 			free(ld_info, M_MFIBUF);
2923 			mfi_release_command(ld_cm);
2924 			return 0;
2925 		}
2926 		else
2927 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2928 
2929 		if (ld_info->ld_config.params.isSSCD == 1)
2930 			error = 1;
2931 
2932 		mfi_release_command(ld_cm);
2933 		free(ld_info, M_MFIBUF);
2934 
2935 	}
2936 	return error;
2937 }
2938 
2939 static int
2940 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2941 {
2942 	uint8_t i;
2943 	struct mfi_ioc_packet *ioc;
2944 	ioc = (struct mfi_ioc_packet *)arg;
2945 	int sge_size, error;
2946 	struct megasas_sge *kern_sge;
2947 
2948 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2949 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2950 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2951 
2952 	if (sizeof(bus_addr_t) == 8) {
2953 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2954 		cm->cm_extra_frames = 2;
2955 		sge_size = sizeof(struct mfi_sg64);
2956 	} else {
2957 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2958 		sge_size = sizeof(struct mfi_sg32);
2959 	}
2960 
2961 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2962 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2963 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2964 			1, 0,			/* algnmnt, boundary */
2965 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2966 			BUS_SPACE_MAXADDR,	/* highaddr */
2967 			NULL, NULL,		/* filter, filterarg */
2968 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2969 			2,			/* nsegments */
2970 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2971 			BUS_DMA_ALLOCNOW,	/* flags */
2972 			NULL, NULL,		/* lockfunc, lockarg */
2973 			&sc->mfi_kbuff_arr_dmat[i])) {
2974 			device_printf(sc->mfi_dev,
2975 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2976 			return (ENOMEM);
2977 		}
2978 
2979 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2980 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2981 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2982 			device_printf(sc->mfi_dev,
2983 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2984 			return (ENOMEM);
2985 		}
2986 
2987 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2988 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2989 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2990 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2991 
2992 		if (!sc->kbuff_arr[i]) {
2993 			device_printf(sc->mfi_dev,
2994 			    "Could not allocate memory for kbuff_arr info\n");
2995 			return -1;
2996 		}
2997 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2998 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2999 
3000 		if (sizeof(bus_addr_t) == 8) {
3001 			cm->cm_frame->stp.sgl.sg64[i].addr =
3002 			    kern_sge[i].phys_addr;
3003 			cm->cm_frame->stp.sgl.sg64[i].len =
3004 			    ioc->mfi_sgl[i].iov_len;
3005 		} else {
3006 			cm->cm_frame->stp.sgl.sg32[i].addr =
3007 			    kern_sge[i].phys_addr;
3008 			cm->cm_frame->stp.sgl.sg32[i].len =
3009 			    ioc->mfi_sgl[i].iov_len;
3010 		}
3011 
3012 		error = copyin(ioc->mfi_sgl[i].iov_base,
3013 		    sc->kbuff_arr[i],
3014 		    ioc->mfi_sgl[i].iov_len);
3015 		if (error != 0) {
3016 			device_printf(sc->mfi_dev, "Copy in failed\n");
3017 			return error;
3018 		}
3019 	}
3020 
3021 	cm->cm_flags |=MFI_CMD_MAPPED;
3022 	return 0;
3023 }
3024 
3025 static int
3026 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3027 {
3028 	struct mfi_command *cm;
3029 	struct mfi_dcmd_frame *dcmd;
3030 	void *ioc_buf = NULL;
3031 	uint32_t context;
3032 	int error = 0, locked;
3033 
3034 
3035 	if (ioc->buf_size > 0) {
3036 		if (ioc->buf_size > 1024 * 1024)
3037 			return (ENOMEM);
3038 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3039 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3040 		if (error) {
3041 			device_printf(sc->mfi_dev, "failed to copyin\n");
3042 			free(ioc_buf, M_MFIBUF);
3043 			return (error);
3044 		}
3045 	}
3046 
3047 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3048 
3049 	mtx_lock(&sc->mfi_io_lock);
3050 	while ((cm = mfi_dequeue_free(sc)) == NULL)
3051 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3052 
3053 	/* Save context for later */
3054 	context = cm->cm_frame->header.context;
3055 
3056 	dcmd = &cm->cm_frame->dcmd;
3057 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3058 
3059 	cm->cm_sg = &dcmd->sgl;
3060 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3061 	cm->cm_data = ioc_buf;
3062 	cm->cm_len = ioc->buf_size;
3063 
3064 	/* restore context */
3065 	cm->cm_frame->header.context = context;
3066 
3067 	/* Cheat since we don't know if we're writing or reading */
3068 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3069 
3070 	error = mfi_check_command_pre(sc, cm);
3071 	if (error)
3072 		goto out;
3073 
3074 	error = mfi_wait_command(sc, cm);
3075 	if (error) {
3076 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3077 		goto out;
3078 	}
3079 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3080 	mfi_check_command_post(sc, cm);
3081 out:
3082 	mfi_release_command(cm);
3083 	mtx_unlock(&sc->mfi_io_lock);
3084 	mfi_config_unlock(sc, locked);
3085 	if (ioc->buf_size > 0)
3086 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3087 	if (ioc_buf)
3088 		free(ioc_buf, M_MFIBUF);
3089 	return (error);
3090 }
3091 
3092 #define	PTRIN(p)		((void *)(uintptr_t)(p))
3093 
3094 static int
3095 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3096 {
3097 	struct mfi_softc *sc;
3098 	union mfi_statrequest *ms;
3099 	struct mfi_ioc_packet *ioc;
3100 #ifdef COMPAT_FREEBSD32
3101 	struct mfi_ioc_packet32 *ioc32;
3102 #endif
3103 	struct mfi_ioc_aen *aen;
3104 	struct mfi_command *cm = NULL;
3105 	uint32_t context = 0;
3106 	union mfi_sense_ptr sense_ptr;
3107 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3108 	size_t len;
3109 	int i, res;
3110 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3111 #ifdef COMPAT_FREEBSD32
3112 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3113 	struct mfi_ioc_passthru iop_swab;
3114 #endif
3115 	int error, locked;
3116 	union mfi_sgl *sgl;
3117 	sc = dev->si_drv1;
3118 	error = 0;
3119 
3120 	if (sc->adpreset)
3121 		return EBUSY;
3122 
3123 	if (sc->hw_crit_error)
3124 		return EBUSY;
3125 
3126 	if (sc->issuepend_done == 0)
3127 		return EBUSY;
3128 
3129 	switch (cmd) {
3130 	case MFIIO_STATS:
3131 		ms = (union mfi_statrequest *)arg;
3132 		switch (ms->ms_item) {
3133 		case MFIQ_FREE:
3134 		case MFIQ_BIO:
3135 		case MFIQ_READY:
3136 		case MFIQ_BUSY:
3137 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3138 			    sizeof(struct mfi_qstat));
3139 			break;
3140 		default:
3141 			error = ENOIOCTL;
3142 			break;
3143 		}
3144 		break;
3145 	case MFIIO_QUERY_DISK:
3146 	{
3147 		struct mfi_query_disk *qd;
3148 		struct mfi_disk *ld;
3149 
3150 		qd = (struct mfi_query_disk *)arg;
3151 		mtx_lock(&sc->mfi_io_lock);
3152 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3153 			if (ld->ld_id == qd->array_id)
3154 				break;
3155 		}
3156 		if (ld == NULL) {
3157 			qd->present = 0;
3158 			mtx_unlock(&sc->mfi_io_lock);
3159 			return (0);
3160 		}
3161 		qd->present = 1;
3162 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3163 			qd->open = 1;
3164 		bzero(qd->devname, SPECNAMELEN + 1);
3165 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3166 		mtx_unlock(&sc->mfi_io_lock);
3167 		break;
3168 	}
3169 	case MFI_CMD:
3170 #ifdef COMPAT_FREEBSD32
3171 	case MFI_CMD32:
3172 #endif
3173 		{
3174 		devclass_t devclass;
3175 		ioc = (struct mfi_ioc_packet *)arg;
3176 		int adapter;
3177 
3178 		adapter = ioc->mfi_adapter_no;
3179 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3180 			devclass = devclass_find("mfi");
3181 			sc = devclass_get_softc(devclass, adapter);
3182 		}
3183 		mtx_lock(&sc->mfi_io_lock);
3184 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3185 			mtx_unlock(&sc->mfi_io_lock);
3186 			return (EBUSY);
3187 		}
3188 		mtx_unlock(&sc->mfi_io_lock);
3189 		locked = 0;
3190 
3191 		/*
3192 		 * save off original context since copying from user
3193 		 * will clobber some data
3194 		 */
3195 		context = cm->cm_frame->header.context;
3196 		cm->cm_frame->header.context = cm->cm_index;
3197 
3198 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3199 		    2 * MEGAMFI_FRAME_SIZE);
3200 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3201 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3202 		cm->cm_frame->header.scsi_status = 0;
3203 		cm->cm_frame->header.pad0 = 0;
3204 		if (ioc->mfi_sge_count) {
3205 			cm->cm_sg =
3206 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3207 		}
3208 		sgl = cm->cm_sg;
3209 		cm->cm_flags = 0;
3210 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3211 			cm->cm_flags |= MFI_CMD_DATAIN;
3212 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3213 			cm->cm_flags |= MFI_CMD_DATAOUT;
3214 		/* Legacy app shim */
3215 		if (cm->cm_flags == 0)
3216 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3217 		cm->cm_len = cm->cm_frame->header.data_len;
3218 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3219 #ifdef COMPAT_FREEBSD32
3220 			if (cmd == MFI_CMD) {
3221 #endif
3222 				/* Native */
3223 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3224 #ifdef COMPAT_FREEBSD32
3225 			} else {
3226 				/* 32bit on 64bit */
3227 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3228 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3229 			}
3230 #endif
3231 			cm->cm_len += cm->cm_stp_len;
3232 		}
3233 		if (cm->cm_len &&
3234 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3235 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3236 			    M_WAITOK | M_ZERO);
3237 			if (cm->cm_data == NULL) {
3238 				device_printf(sc->mfi_dev, "Malloc failed\n");
3239 				goto out;
3240 			}
3241 		} else {
3242 			cm->cm_data = 0;
3243 		}
3244 
3245 		/* restore header context */
3246 		cm->cm_frame->header.context = context;
3247 
3248 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3249 			res = mfi_stp_cmd(sc, cm, arg);
3250 			if (res != 0)
3251 				goto out;
3252 		} else {
3253 			temp = data;
3254 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3255 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3256 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3257 #ifdef COMPAT_FREEBSD32
3258 					if (cmd == MFI_CMD) {
3259 #endif
3260 						/* Native */
3261 						addr = ioc->mfi_sgl[i].iov_base;
3262 						len = ioc->mfi_sgl[i].iov_len;
3263 #ifdef COMPAT_FREEBSD32
3264 					} else {
3265 						/* 32bit on 64bit */
3266 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3267 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3268 						len = ioc32->mfi_sgl[i].iov_len;
3269 					}
3270 #endif
3271 					error = copyin(addr, temp, len);
3272 					if (error != 0) {
3273 						device_printf(sc->mfi_dev,
3274 						    "Copy in failed\n");
3275 						goto out;
3276 					}
3277 					temp = &temp[len];
3278 				}
3279 			}
3280 		}
3281 
3282 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3283 			locked = mfi_config_lock(sc,
3284 			     cm->cm_frame->dcmd.opcode);
3285 
3286 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3287 			cm->cm_frame->pass.sense_addr_lo =
3288 			    (uint32_t)cm->cm_sense_busaddr;
3289 			cm->cm_frame->pass.sense_addr_hi =
3290 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3291 		}
3292 		mtx_lock(&sc->mfi_io_lock);
3293 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3294 		if (!skip_pre_post) {
3295 			error = mfi_check_command_pre(sc, cm);
3296 			if (error) {
3297 				mtx_unlock(&sc->mfi_io_lock);
3298 				goto out;
3299 			}
3300 		}
3301 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3302 			device_printf(sc->mfi_dev,
3303 			    "Controller polled failed\n");
3304 			mtx_unlock(&sc->mfi_io_lock);
3305 			goto out;
3306 		}
3307 		if (!skip_pre_post) {
3308 			mfi_check_command_post(sc, cm);
3309 		}
3310 		mtx_unlock(&sc->mfi_io_lock);
3311 
3312 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3313 			temp = data;
3314 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3315 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3316 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3317 #ifdef COMPAT_FREEBSD32
3318 					if (cmd == MFI_CMD) {
3319 #endif
3320 						/* Native */
3321 						addr = ioc->mfi_sgl[i].iov_base;
3322 						len = ioc->mfi_sgl[i].iov_len;
3323 #ifdef COMPAT_FREEBSD32
3324 					} else {
3325 						/* 32bit on 64bit */
3326 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3327 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3328 						len = ioc32->mfi_sgl[i].iov_len;
3329 					}
3330 #endif
3331 					error = copyout(temp, addr, len);
3332 					if (error != 0) {
3333 						device_printf(sc->mfi_dev,
3334 						    "Copy out failed\n");
3335 						goto out;
3336 					}
3337 					temp = &temp[len];
3338 				}
3339 			}
3340 		}
3341 
3342 		if (ioc->mfi_sense_len) {
3343 			/* get user-space sense ptr then copy out sense */
3344 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3345 			    &sense_ptr.sense_ptr_data[0],
3346 			    sizeof(sense_ptr.sense_ptr_data));
3347 #ifdef COMPAT_FREEBSD32
3348 			if (cmd != MFI_CMD) {
3349 				/*
3350 				 * not 64bit native so zero out any address
3351 				 * over 32bit */
3352 				sense_ptr.addr.high = 0;
3353 			}
3354 #endif
3355 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3356 			    ioc->mfi_sense_len);
3357 			if (error != 0) {
3358 				device_printf(sc->mfi_dev,
3359 				    "Copy out failed\n");
3360 				goto out;
3361 			}
3362 		}
3363 
3364 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3365 out:
3366 		mfi_config_unlock(sc, locked);
3367 		if (data)
3368 			free(data, M_MFIBUF);
3369 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3370 			for (i = 0; i < 2; i++) {
3371 				if (sc->kbuff_arr[i]) {
3372 					if (sc->mfi_kbuff_arr_busaddr != 0)
3373 						bus_dmamap_unload(
3374 						    sc->mfi_kbuff_arr_dmat[i],
3375 						    sc->mfi_kbuff_arr_dmamap[i]
3376 						    );
3377 					if (sc->kbuff_arr[i] != NULL)
3378 						bus_dmamem_free(
3379 						    sc->mfi_kbuff_arr_dmat[i],
3380 						    sc->kbuff_arr[i],
3381 						    sc->mfi_kbuff_arr_dmamap[i]
3382 						    );
3383 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3384 						bus_dma_tag_destroy(
3385 						    sc->mfi_kbuff_arr_dmat[i]);
3386 				}
3387 			}
3388 		}
3389 		if (cm) {
3390 			mtx_lock(&sc->mfi_io_lock);
3391 			mfi_release_command(cm);
3392 			mtx_unlock(&sc->mfi_io_lock);
3393 		}
3394 
3395 		break;
3396 		}
3397 	case MFI_SET_AEN:
3398 		aen = (struct mfi_ioc_aen *)arg;
3399 		mtx_lock(&sc->mfi_io_lock);
3400 		error = mfi_aen_register(sc, aen->aen_seq_num,
3401 		    aen->aen_class_locale);
3402 		mtx_unlock(&sc->mfi_io_lock);
3403 
3404 		break;
3405 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3406 		{
3407 			devclass_t devclass;
3408 			struct mfi_linux_ioc_packet l_ioc;
3409 			int adapter;
3410 
3411 			devclass = devclass_find("mfi");
3412 			if (devclass == NULL)
3413 				return (ENOENT);
3414 
3415 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3416 			if (error)
3417 				return (error);
3418 			adapter = l_ioc.lioc_adapter_no;
3419 			sc = devclass_get_softc(devclass, adapter);
3420 			if (sc == NULL)
3421 				return (ENOENT);
3422 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3423 			    cmd, arg, flag, td));
3424 			break;
3425 		}
3426 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3427 		{
3428 			devclass_t devclass;
3429 			struct mfi_linux_ioc_aen l_aen;
3430 			int adapter;
3431 
3432 			devclass = devclass_find("mfi");
3433 			if (devclass == NULL)
3434 				return (ENOENT);
3435 
3436 			error = copyin(arg, &l_aen, sizeof(l_aen));
3437 			if (error)
3438 				return (error);
3439 			adapter = l_aen.laen_adapter_no;
3440 			sc = devclass_get_softc(devclass, adapter);
3441 			if (sc == NULL)
3442 				return (ENOENT);
3443 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3444 			    cmd, arg, flag, td));
3445 			break;
3446 		}
3447 #ifdef COMPAT_FREEBSD32
3448 	case MFIIO_PASSTHRU32:
3449 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3450 			error = ENOTTY;
3451 			break;
3452 		}
3453 		iop_swab.ioc_frame	= iop32->ioc_frame;
3454 		iop_swab.buf_size	= iop32->buf_size;
3455 		iop_swab.buf		= PTRIN(iop32->buf);
3456 		iop			= &iop_swab;
3457 		/* FALLTHROUGH */
3458 #endif
3459 	case MFIIO_PASSTHRU:
3460 		error = mfi_user_command(sc, iop);
3461 #ifdef COMPAT_FREEBSD32
3462 		if (cmd == MFIIO_PASSTHRU32)
3463 			iop32->ioc_frame = iop_swab.ioc_frame;
3464 #endif
3465 		break;
3466 	default:
3467 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3468 		error = ENOTTY;
3469 		break;
3470 	}
3471 
3472 	return (error);
3473 }
3474 
3475 static int
3476 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3477 {
3478 	struct mfi_softc *sc;
3479 	struct mfi_linux_ioc_packet l_ioc;
3480 	struct mfi_linux_ioc_aen l_aen;
3481 	struct mfi_command *cm = NULL;
3482 	struct mfi_aen *mfi_aen_entry;
3483 	union mfi_sense_ptr sense_ptr;
3484 	uint32_t context = 0;
3485 	uint8_t *data = NULL, *temp;
3486 	int i;
3487 	int error, locked;
3488 
3489 	sc = dev->si_drv1;
3490 	error = 0;
3491 	switch (cmd) {
3492 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3493 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3494 		if (error != 0)
3495 			return (error);
3496 
3497 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3498 			return (EINVAL);
3499 		}
3500 
3501 		mtx_lock(&sc->mfi_io_lock);
3502 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3503 			mtx_unlock(&sc->mfi_io_lock);
3504 			return (EBUSY);
3505 		}
3506 		mtx_unlock(&sc->mfi_io_lock);
3507 		locked = 0;
3508 
3509 		/*
3510 		 * save off original context since copying from user
3511 		 * will clobber some data
3512 		 */
3513 		context = cm->cm_frame->header.context;
3514 
3515 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3516 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3517 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3518 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3519 		cm->cm_frame->header.scsi_status = 0;
3520 		cm->cm_frame->header.pad0 = 0;
3521 		if (l_ioc.lioc_sge_count)
3522 			cm->cm_sg =
3523 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3524 		cm->cm_flags = 0;
3525 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3526 			cm->cm_flags |= MFI_CMD_DATAIN;
3527 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3528 			cm->cm_flags |= MFI_CMD_DATAOUT;
3529 		cm->cm_len = cm->cm_frame->header.data_len;
3530 		if (cm->cm_len &&
3531 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3532 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3533 			    M_WAITOK | M_ZERO);
3534 			if (cm->cm_data == NULL) {
3535 				device_printf(sc->mfi_dev, "Malloc failed\n");
3536 				goto out;
3537 			}
3538 		} else {
3539 			cm->cm_data = 0;
3540 		}
3541 
3542 		/* restore header context */
3543 		cm->cm_frame->header.context = context;
3544 
3545 		temp = data;
3546 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3547 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3548 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3549 				       temp,
3550 				       l_ioc.lioc_sgl[i].iov_len);
3551 				if (error != 0) {
3552 					device_printf(sc->mfi_dev,
3553 					    "Copy in failed\n");
3554 					goto out;
3555 				}
3556 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3557 			}
3558 		}
3559 
3560 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3561 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3562 
3563 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3564 			cm->cm_frame->pass.sense_addr_lo =
3565 			    (uint32_t)cm->cm_sense_busaddr;
3566 			cm->cm_frame->pass.sense_addr_hi =
3567 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3568 		}
3569 
3570 		mtx_lock(&sc->mfi_io_lock);
3571 		error = mfi_check_command_pre(sc, cm);
3572 		if (error) {
3573 			mtx_unlock(&sc->mfi_io_lock);
3574 			goto out;
3575 		}
3576 
3577 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3578 			device_printf(sc->mfi_dev,
3579 			    "Controller polled failed\n");
3580 			mtx_unlock(&sc->mfi_io_lock);
3581 			goto out;
3582 		}
3583 
3584 		mfi_check_command_post(sc, cm);
3585 		mtx_unlock(&sc->mfi_io_lock);
3586 
3587 		temp = data;
3588 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3589 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3590 				error = copyout(temp,
3591 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3592 					l_ioc.lioc_sgl[i].iov_len);
3593 				if (error != 0) {
3594 					device_printf(sc->mfi_dev,
3595 					    "Copy out failed\n");
3596 					goto out;
3597 				}
3598 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3599 			}
3600 		}
3601 
3602 		if (l_ioc.lioc_sense_len) {
3603 			/* get user-space sense ptr then copy out sense */
3604 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3605                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3606 			    &sense_ptr.sense_ptr_data[0],
3607 			    sizeof(sense_ptr.sense_ptr_data));
3608 #ifdef __amd64__
3609 			/*
3610 			 * only 32bit Linux support so zero out any
3611 			 * address over 32bit
3612 			 */
3613 			sense_ptr.addr.high = 0;
3614 #endif
3615 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3616 			    l_ioc.lioc_sense_len);
3617 			if (error != 0) {
3618 				device_printf(sc->mfi_dev,
3619 				    "Copy out failed\n");
3620 				goto out;
3621 			}
3622 		}
3623 
3624 		error = copyout(&cm->cm_frame->header.cmd_status,
3625 			&((struct mfi_linux_ioc_packet*)arg)
3626 			->lioc_frame.hdr.cmd_status,
3627 			1);
3628 		if (error != 0) {
3629 			device_printf(sc->mfi_dev,
3630 				      "Copy out failed\n");
3631 			goto out;
3632 		}
3633 
3634 out:
3635 		mfi_config_unlock(sc, locked);
3636 		if (data)
3637 			free(data, M_MFIBUF);
3638 		if (cm) {
3639 			mtx_lock(&sc->mfi_io_lock);
3640 			mfi_release_command(cm);
3641 			mtx_unlock(&sc->mfi_io_lock);
3642 		}
3643 
3644 		return (error);
3645 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3646 		error = copyin(arg, &l_aen, sizeof(l_aen));
3647 		if (error != 0)
3648 			return (error);
3649 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3650 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3651 		    M_WAITOK);
3652 		mtx_lock(&sc->mfi_io_lock);
3653 		if (mfi_aen_entry != NULL) {
3654 			mfi_aen_entry->p = curproc;
3655 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3656 			    aen_link);
3657 		}
3658 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3659 		    l_aen.laen_class_locale);
3660 
3661 		if (error != 0) {
3662 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3663 			    aen_link);
3664 			free(mfi_aen_entry, M_MFIBUF);
3665 		}
3666 		mtx_unlock(&sc->mfi_io_lock);
3667 
3668 		return (error);
3669 	default:
3670 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3671 		error = ENOENT;
3672 		break;
3673 	}
3674 
3675 	return (error);
3676 }
3677 
3678 static int
3679 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3680 {
3681 	struct mfi_softc *sc;
3682 	int revents = 0;
3683 
3684 	sc = dev->si_drv1;
3685 
3686 	if (poll_events & (POLLIN | POLLRDNORM)) {
3687 		if (sc->mfi_aen_triggered != 0) {
3688 			revents |= poll_events & (POLLIN | POLLRDNORM);
3689 			sc->mfi_aen_triggered = 0;
3690 		}
3691 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3692 			revents |= POLLERR;
3693 		}
3694 	}
3695 
3696 	if (revents == 0) {
3697 		if (poll_events & (POLLIN | POLLRDNORM)) {
3698 			sc->mfi_poll_waiting = 1;
3699 			selrecord(td, &sc->mfi_select);
3700 		}
3701 	}
3702 
3703 	return revents;
3704 }
3705 
3706 static void
3707 mfi_dump_all(void)
3708 {
3709 	struct mfi_softc *sc;
3710 	struct mfi_command *cm;
3711 	devclass_t dc;
3712 	time_t deadline;
3713 	int timedout;
3714 	int i;
3715 
3716 	dc = devclass_find("mfi");
3717 	if (dc == NULL) {
3718 		printf("No mfi dev class\n");
3719 		return;
3720 	}
3721 
3722 	for (i = 0; ; i++) {
3723 		sc = devclass_get_softc(dc, i);
3724 		if (sc == NULL)
3725 			break;
3726 		device_printf(sc->mfi_dev, "Dumping\n\n");
3727 		timedout = 0;
3728 		deadline = time_uptime - mfi_cmd_timeout;
3729 		mtx_lock(&sc->mfi_io_lock);
3730 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3731 			if (cm->cm_timestamp <= deadline) {
3732 				device_printf(sc->mfi_dev,
3733 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3734 				    cm, (int)(time_uptime - cm->cm_timestamp));
3735 				MFI_PRINT_CMD(cm);
3736 				timedout++;
3737 			}
3738 		}
3739 
3740 #if 0
3741 		if (timedout)
3742 			MFI_DUMP_CMDS(sc);
3743 #endif
3744 
3745 		mtx_unlock(&sc->mfi_io_lock);
3746 	}
3747 
3748 	return;
3749 }
3750 
3751 static void
3752 mfi_timeout(void *data)
3753 {
3754 	struct mfi_softc *sc = (struct mfi_softc *)data;
3755 	struct mfi_command *cm, *tmp;
3756 	time_t deadline;
3757 	int timedout = 0;
3758 
3759 	deadline = time_uptime - mfi_cmd_timeout;
3760 	if (sc->adpreset == 0) {
3761 		if (!mfi_tbolt_reset(sc)) {
3762 			callout_reset(&sc->mfi_watchdog_callout,
3763 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3764 			return;
3765 		}
3766 	}
3767 	mtx_lock(&sc->mfi_io_lock);
3768 	TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3769 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3770 			continue;
3771 		if (cm->cm_timestamp <= deadline) {
3772 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3773 				cm->cm_timestamp = time_uptime;
3774 			} else {
3775 				device_printf(sc->mfi_dev,
3776 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3777 				     cm, (int)(time_uptime - cm->cm_timestamp)
3778 				     );
3779 				MFI_PRINT_CMD(cm);
3780 				MFI_VALIDATE_CMD(sc, cm);
3781 				/*
3782 				 * While commands can get stuck forever we do
3783 				 * not fail them as there is no way to tell if
3784 				 * the controller has actually processed them
3785 				 * or not.
3786 				 *
3787 				 * In addition its very likely that force
3788 				 * failing a command here would cause a panic
3789 				 * e.g. in UFS.
3790 				 */
3791 				timedout++;
3792 			}
3793 		}
3794 	}
3795 
3796 #if 0
3797 	if (timedout)
3798 		MFI_DUMP_CMDS(sc);
3799 #endif
3800 
3801 	mtx_unlock(&sc->mfi_io_lock);
3802 
3803 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3804 	    mfi_timeout, sc);
3805 
3806 	if (0)
3807 		mfi_dump_all();
3808 	return;
3809 }
3810