xref: /freebsd/sys/dev/mfi/mfi.c (revision 9a41df2a0e6408e9b329bbd8b9e37c2b44461a1b)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2007 LSI Corp.
28  * Copyright (c) 2007 Rajesh Prabhakaran.
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52 
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
55 
56 #include "opt_compat.h"
57 #include "opt_mfi.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
64 #include <sys/poll.h>
65 #include <sys/selinfo.h>
66 #include <sys/bus.h>
67 #include <sys/conf.h>
68 #include <sys/eventhandler.h>
69 #include <sys/rman.h>
70 #include <sys/bus_dma.h>
71 #include <sys/bio.h>
72 #include <sys/ioccom.h>
73 #include <sys/uio.h>
74 #include <sys/proc.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
78 
79 #include <machine/bus.h>
80 #include <machine/resource.h>
81 
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
87 
88 static int	mfi_alloc_commands(struct mfi_softc *);
89 static int	mfi_comms_init(struct mfi_softc *);
90 static int	mfi_get_controller_info(struct mfi_softc *);
91 static int	mfi_get_log_state(struct mfi_softc *,
92 		    struct mfi_evt_log_state **);
93 static int	mfi_parse_entries(struct mfi_softc *, int, int);
94 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void	mfi_startup(void *arg);
96 static void	mfi_intr(void *arg);
97 static void	mfi_ldprobe(struct mfi_softc *sc);
98 static void	mfi_syspdprobe(struct mfi_softc *sc);
99 static void	mfi_handle_evt(void *context, int pending);
100 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void	mfi_aen_complete(struct mfi_command *);
102 static int	mfi_add_ld(struct mfi_softc *sc, int);
103 static void	mfi_add_ld_complete(struct mfi_command *);
104 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void	mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void	mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int	mfi_abort(struct mfi_softc *, struct mfi_command *);
112 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
113 static void	mfi_timeout(void *);
114 static int	mfi_user_command(struct mfi_softc *,
115 		    struct mfi_ioc_passthru *);
116 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
117 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
118 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
119 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
120 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
121 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
122 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
123 		    uint32_t frame_cnt);
124 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
125 		    uint32_t frame_cnt);
126 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
127 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
128 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
129 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
130 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
131 
132 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
133 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
134 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
135 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
136             0, "event message locale");
137 
138 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
139 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
140 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
141           0, "event message class");
142 
143 static int	mfi_max_cmds = 128;
144 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
145 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
146 	   0, "Max commands");
147 
148 static int	mfi_detect_jbod_change = 1;
149 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
150 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
151 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
152 
153 /* Management interface */
154 static d_open_t		mfi_open;
155 static d_close_t	mfi_close;
156 static d_ioctl_t	mfi_ioctl;
157 static d_poll_t		mfi_poll;
158 
159 static struct cdevsw mfi_cdevsw = {
160 	.d_version = 	D_VERSION,
161 	.d_flags =	0,
162 	.d_open = 	mfi_open,
163 	.d_close =	mfi_close,
164 	.d_ioctl =	mfi_ioctl,
165 	.d_poll =	mfi_poll,
166 	.d_name =	"mfi",
167 };
168 
169 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
170 
171 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
172 struct mfi_skinny_dma_info mfi_skinny;
173 
174 static void
175 mfi_enable_intr_xscale(struct mfi_softc *sc)
176 {
177 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
178 }
179 
180 static void
181 mfi_enable_intr_ppc(struct mfi_softc *sc)
182 {
183 	if (sc->mfi_flags & MFI_FLAGS_1078) {
184 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
185 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
186 	}
187 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
188 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
189 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
190 	}
191 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
192 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
193 	}
194 }
195 
196 static int32_t
197 mfi_read_fw_status_xscale(struct mfi_softc *sc)
198 {
199 	return MFI_READ4(sc, MFI_OMSG0);
200 }
201 
202 static int32_t
203 mfi_read_fw_status_ppc(struct mfi_softc *sc)
204 {
205 	return MFI_READ4(sc, MFI_OSP0);
206 }
207 
208 static int
209 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
210 {
211 	int32_t status;
212 
213 	status = MFI_READ4(sc, MFI_OSTS);
214 	if ((status & MFI_OSTS_INTR_VALID) == 0)
215 		return 1;
216 
217 	MFI_WRITE4(sc, MFI_OSTS, status);
218 	return 0;
219 }
220 
221 static int
222 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
223 {
224 	int32_t status;
225 
226 	status = MFI_READ4(sc, MFI_OSTS);
227 	if (sc->mfi_flags & MFI_FLAGS_1078) {
228 		if (!(status & MFI_1078_RM)) {
229 			return 1;
230 		}
231 	}
232 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
233 		if (!(status & MFI_GEN2_RM)) {
234 			return 1;
235 		}
236 	}
237 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
238 		if (!(status & MFI_SKINNY_RM)) {
239 			return 1;
240 		}
241 	}
242 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
243 		MFI_WRITE4(sc, MFI_OSTS, status);
244 	else
245 		MFI_WRITE4(sc, MFI_ODCR0, status);
246 	return 0;
247 }
248 
249 static void
250 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
251 {
252 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
253 }
254 
255 static void
256 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
257 {
258 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
259 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
260 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
261 	} else {
262 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
263 	}
264 }
265 
266 int
267 mfi_transition_firmware(struct mfi_softc *sc)
268 {
269 	uint32_t fw_state, cur_state;
270 	int max_wait, i;
271 	uint32_t cur_abs_reg_val = 0;
272 	uint32_t prev_abs_reg_val = 0;
273 
274 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
275 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
276 	while (fw_state != MFI_FWSTATE_READY) {
277 		if (bootverbose)
278 			device_printf(sc->mfi_dev, "Waiting for firmware to "
279 			"become ready\n");
280 		cur_state = fw_state;
281 		switch (fw_state) {
282 		case MFI_FWSTATE_FAULT:
283 			device_printf(sc->mfi_dev, "Firmware fault\n");
284 			return (ENXIO);
285 		case MFI_FWSTATE_WAIT_HANDSHAKE:
286 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
287 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
288 			else
289 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
290 			max_wait = MFI_RESET_WAIT_TIME;
291 			break;
292 		case MFI_FWSTATE_OPERATIONAL:
293 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
294 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
295 			else
296 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
297 			max_wait = MFI_RESET_WAIT_TIME;
298 			break;
299 		case MFI_FWSTATE_UNDEFINED:
300 		case MFI_FWSTATE_BB_INIT:
301 			max_wait = MFI_RESET_WAIT_TIME;
302 			break;
303 		case MFI_FWSTATE_FW_INIT_2:
304 			max_wait = MFI_RESET_WAIT_TIME;
305 			break;
306 		case MFI_FWSTATE_FW_INIT:
307 		case MFI_FWSTATE_FLUSH_CACHE:
308 			max_wait = MFI_RESET_WAIT_TIME;
309 			break;
310 		case MFI_FWSTATE_DEVICE_SCAN:
311 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
312 			prev_abs_reg_val = cur_abs_reg_val;
313 			break;
314 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
315 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
316 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
317 			else
318 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
319 			max_wait = MFI_RESET_WAIT_TIME;
320 			break;
321 		default:
322 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
323 			    fw_state);
324 			return (ENXIO);
325 		}
326 		for (i = 0; i < (max_wait * 10); i++) {
327 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
328 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
329 			if (fw_state == cur_state)
330 				DELAY(100000);
331 			else
332 				break;
333 		}
334 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
335 			/* Check the device scanning progress */
336 			if (prev_abs_reg_val != cur_abs_reg_val) {
337 				continue;
338 			}
339 		}
340 		if (fw_state == cur_state) {
341 			device_printf(sc->mfi_dev, "Firmware stuck in state "
342 			    "%#x\n", fw_state);
343 			return (ENXIO);
344 		}
345 	}
346 	return (0);
347 }
348 
349 static void
350 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
351 {
352 	bus_addr_t *addr;
353 
354 	addr = arg;
355 	*addr = segs[0].ds_addr;
356 }
357 
358 
359 int
360 mfi_attach(struct mfi_softc *sc)
361 {
362 	uint32_t status;
363 	int error, commsz, framessz, sensesz;
364 	int frames, unit, max_fw_sge;
365 	uint32_t tb_mem_size = 0;
366 
367 	if (sc == NULL)
368 		return EINVAL;
369 
370 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
371 	    MEGASAS_VERSION);
372 
373 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
374 	sx_init(&sc->mfi_config_lock, "MFI config");
375 	TAILQ_INIT(&sc->mfi_ld_tqh);
376 	TAILQ_INIT(&sc->mfi_syspd_tqh);
377 	TAILQ_INIT(&sc->mfi_evt_queue);
378 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
379 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
380 	TAILQ_INIT(&sc->mfi_aen_pids);
381 	TAILQ_INIT(&sc->mfi_cam_ccbq);
382 
383 	mfi_initq_free(sc);
384 	mfi_initq_ready(sc);
385 	mfi_initq_busy(sc);
386 	mfi_initq_bio(sc);
387 
388 	sc->adpreset = 0;
389 	sc->last_seq_num = 0;
390 	sc->disableOnlineCtrlReset = 1;
391 	sc->issuepend_done = 1;
392 	sc->hw_crit_error = 0;
393 
394 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
395 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
396 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
397 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
398 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
399 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
400 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
401 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
402 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
403 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
404 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
405 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
406 		sc->mfi_tbolt = 1;
407 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
408 	} else {
409 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
410 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
411 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
412 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
413 	}
414 
415 
416 	/* Before we get too far, see if the firmware is working */
417 	if ((error = mfi_transition_firmware(sc)) != 0) {
418 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
419 		    "error %d\n", error);
420 		return (ENXIO);
421 	}
422 
423 	/* Start: LSIP200113393 */
424 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
425 				1, 0,			/* algnmnt, boundary */
426 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
427 				BUS_SPACE_MAXADDR,	/* highaddr */
428 				NULL, NULL,		/* filter, filterarg */
429 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
430 				1,			/* msegments */
431 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
432 				0,			/* flags */
433 				NULL, NULL,		/* lockfunc, lockarg */
434 				&sc->verbuf_h_dmat)) {
435 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
436 		return (ENOMEM);
437 	}
438 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
439 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
440 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
441 		return (ENOMEM);
442 	}
443 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
444 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
445 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
446 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
447 	/* End: LSIP200113393 */
448 
449 	/*
450 	 * Get information needed for sizing the contiguous memory for the
451 	 * frame pool.  Size down the sgl parameter since we know that
452 	 * we will never need more than what's required for MAXPHYS.
453 	 * It would be nice if these constants were available at runtime
454 	 * instead of compile time.
455 	 */
456 	status = sc->mfi_read_fw_status(sc);
457 	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
458 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
459 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
460 
461 	/* ThunderBolt Support get the contiguous memory */
462 
463 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
464 		mfi_tbolt_init_globals(sc);
465 		device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
466 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
467 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
468 
469 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
470 				1, 0,			/* algnmnt, boundary */
471 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
472 				BUS_SPACE_MAXADDR,	/* highaddr */
473 				NULL, NULL,		/* filter, filterarg */
474 				tb_mem_size,		/* maxsize */
475 				1,			/* msegments */
476 				tb_mem_size,		/* maxsegsize */
477 				0,			/* flags */
478 				NULL, NULL,		/* lockfunc, lockarg */
479 				&sc->mfi_tb_dmat)) {
480 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
481 			return (ENOMEM);
482 		}
483 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
484 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
485 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
486 			return (ENOMEM);
487 		}
488 		bzero(sc->request_message_pool, tb_mem_size);
489 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
490 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
491 
492 		/* For ThunderBolt memory init */
493 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
494 				0x100, 0,		/* alignmnt, boundary */
495 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
496 				BUS_SPACE_MAXADDR,	/* highaddr */
497 				NULL, NULL,		/* filter, filterarg */
498 				MFI_FRAME_SIZE,		/* maxsize */
499 				1,			/* msegments */
500 				MFI_FRAME_SIZE,		/* maxsegsize */
501 				0,			/* flags */
502 				NULL, NULL,		/* lockfunc, lockarg */
503 				&sc->mfi_tb_init_dmat)) {
504 		device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
505 		return (ENOMEM);
506 		}
507 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
508 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
509 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
510 			return (ENOMEM);
511 		}
512 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
513 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
514 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
515 		    &sc->mfi_tb_init_busaddr, 0);
516 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
517 		    tb_mem_size)) {
518 			device_printf(sc->mfi_dev,
519 			    "Thunderbolt pool preparation error\n");
520 			return 0;
521 		}
522 
523 		/*
524 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
525 		  we are taking it diffrent from what we have allocated for Request
526 		  and reply descriptors to avoid confusion later
527 		*/
528 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
529 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
530 				1, 0,			/* algnmnt, boundary */
531 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
532 				BUS_SPACE_MAXADDR,	/* highaddr */
533 				NULL, NULL,		/* filter, filterarg */
534 				tb_mem_size,		/* maxsize */
535 				1,			/* msegments */
536 				tb_mem_size,		/* maxsegsize */
537 				0,			/* flags */
538 				NULL, NULL,		/* lockfunc, lockarg */
539 				&sc->mfi_tb_ioc_init_dmat)) {
540 			device_printf(sc->mfi_dev,
541 			    "Cannot allocate comms DMA tag\n");
542 			return (ENOMEM);
543 		}
544 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
545 		    (void **)&sc->mfi_tb_ioc_init_desc,
546 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
547 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
548 			return (ENOMEM);
549 		}
550 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
551 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
552 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
553 		    &sc->mfi_tb_ioc_init_busaddr, 0);
554 	}
555 	/*
556 	 * Create the dma tag for data buffers.  Used both for block I/O
557 	 * and for various internal data queries.
558 	 */
559 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
560 				1, 0,			/* algnmnt, boundary */
561 				BUS_SPACE_MAXADDR,	/* lowaddr */
562 				BUS_SPACE_MAXADDR,	/* highaddr */
563 				NULL, NULL,		/* filter, filterarg */
564 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
565 				sc->mfi_max_sge,	/* nsegments */
566 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
567 				BUS_DMA_ALLOCNOW,	/* flags */
568 				busdma_lock_mutex,	/* lockfunc */
569 				&sc->mfi_io_lock,	/* lockfuncarg */
570 				&sc->mfi_buffer_dmat)) {
571 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
572 		return (ENOMEM);
573 	}
574 
575 	/*
576 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
577 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
578 	 * entry, so the calculated size here will be will be 1 more than
579 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
580 	 */
581 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
582 	    sizeof(struct mfi_hwcomms);
583 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
584 				1, 0,			/* algnmnt, boundary */
585 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
586 				BUS_SPACE_MAXADDR,	/* highaddr */
587 				NULL, NULL,		/* filter, filterarg */
588 				commsz,			/* maxsize */
589 				1,			/* msegments */
590 				commsz,			/* maxsegsize */
591 				0,			/* flags */
592 				NULL, NULL,		/* lockfunc, lockarg */
593 				&sc->mfi_comms_dmat)) {
594 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
595 		return (ENOMEM);
596 	}
597 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
598 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
599 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
600 		return (ENOMEM);
601 	}
602 	bzero(sc->mfi_comms, commsz);
603 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
604 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
605 	/*
606 	 * Allocate DMA memory for the command frames.  Keep them in the
607 	 * lower 4GB for efficiency.  Calculate the size of the commands at
608 	 * the same time; each command is one 64 byte frame plus a set of
609          * additional frames for holding sg lists or other data.
610 	 * The assumption here is that the SG list will start at the second
611 	 * frame and not use the unused bytes in the first frame.  While this
612 	 * isn't technically correct, it simplifies the calculation and allows
613 	 * for command frames that might be larger than an mfi_io_frame.
614 	 */
615 	if (sizeof(bus_addr_t) == 8) {
616 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
617 		sc->mfi_flags |= MFI_FLAGS_SG64;
618 	} else {
619 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
620 	}
621 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
622 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
623 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
624 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
625 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
626 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
627 				64, 0,			/* algnmnt, boundary */
628 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
629 				BUS_SPACE_MAXADDR,	/* highaddr */
630 				NULL, NULL,		/* filter, filterarg */
631 				framessz,		/* maxsize */
632 				1,			/* nsegments */
633 				framessz,		/* maxsegsize */
634 				0,			/* flags */
635 				NULL, NULL,		/* lockfunc, lockarg */
636 				&sc->mfi_frames_dmat)) {
637 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
638 		return (ENOMEM);
639 	}
640 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
641 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
642 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
643 		return (ENOMEM);
644 	}
645 	bzero(sc->mfi_frames, framessz);
646 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
647 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
648 	/*
649 	 * Allocate DMA memory for the frame sense data.  Keep them in the
650 	 * lower 4GB for efficiency
651 	 */
652 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
653 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
654 				4, 0,			/* algnmnt, boundary */
655 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
656 				BUS_SPACE_MAXADDR,	/* highaddr */
657 				NULL, NULL,		/* filter, filterarg */
658 				sensesz,		/* maxsize */
659 				1,			/* nsegments */
660 				sensesz,		/* maxsegsize */
661 				0,			/* flags */
662 				NULL, NULL,		/* lockfunc, lockarg */
663 				&sc->mfi_sense_dmat)) {
664 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
665 		return (ENOMEM);
666 	}
667 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
668 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
669 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
670 		return (ENOMEM);
671 	}
672 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
673 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
674 	if ((error = mfi_alloc_commands(sc)) != 0)
675 		return (error);
676 
677 	/* Before moving the FW to operational state, check whether
678 	 * hostmemory is required by the FW or not
679 	 */
680 
681 	/* ThunderBolt MFI_IOC2 INIT */
682 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
683 		sc->mfi_disable_intr(sc);
684 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
685 			device_printf(sc->mfi_dev,
686 			    "TB Init has failed with error %d\n",error);
687 			return error;
688 		}
689 
690 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
691 			return error;
692 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
693 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
694 		    &sc->mfi_intr)) {
695 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
696 			return (EINVAL);
697 		}
698 		sc->mfi_enable_intr(sc);
699 	} else {
700 		if ((error = mfi_comms_init(sc)) != 0)
701 			return (error);
702 
703 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
704 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
705 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
706 			return (EINVAL);
707 		}
708 		sc->mfi_enable_intr(sc);
709 	}
710 	if ((error = mfi_get_controller_info(sc)) != 0)
711 		return (error);
712 	sc->disableOnlineCtrlReset = 0;
713 
714 	/* Register a config hook to probe the bus for arrays */
715 	sc->mfi_ich.ich_func = mfi_startup;
716 	sc->mfi_ich.ich_arg = sc;
717 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
718 		device_printf(sc->mfi_dev, "Cannot establish configuration "
719 		    "hook\n");
720 		return (EINVAL);
721 	}
722 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
723 		mtx_unlock(&sc->mfi_io_lock);
724 		return (error);
725 	}
726 
727 	/*
728 	 * Register a shutdown handler.
729 	 */
730 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
731 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
732 		device_printf(sc->mfi_dev, "Warning: shutdown event "
733 		    "registration failed\n");
734 	}
735 
736 	/*
737 	 * Create the control device for doing management
738 	 */
739 	unit = device_get_unit(sc->mfi_dev);
740 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
741 	    0640, "mfi%d", unit);
742 	if (unit == 0)
743 		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
744 	if (sc->mfi_cdev != NULL)
745 		sc->mfi_cdev->si_drv1 = sc;
746 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
747 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
748 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
749 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
750 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
751 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
752 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
753 	    &sc->mfi_keep_deleted_volumes, 0,
754 	    "Don't detach the mfid device for a busy volume that is deleted");
755 
756 	device_add_child(sc->mfi_dev, "mfip", -1);
757 	bus_generic_attach(sc->mfi_dev);
758 
759 	/* Start the timeout watchdog */
760 	callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
761 	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
762 	    mfi_timeout, sc);
763 
764 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
765 		mfi_tbolt_sync_map_info(sc);
766 	}
767 
768 	return (0);
769 }
770 
771 static int
772 mfi_alloc_commands(struct mfi_softc *sc)
773 {
774 	struct mfi_command *cm;
775 	int i, ncmds;
776 
777 	/*
778 	 * XXX Should we allocate all the commands up front, or allocate on
779 	 * demand later like 'aac' does?
780 	 */
781 	ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
782 	if (bootverbose)
783 		device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
784 		   "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
785 
786 	sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
787 	    M_WAITOK | M_ZERO);
788 
789 	for (i = 0; i < ncmds; i++) {
790 		cm = &sc->mfi_commands[i];
791 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
792 		    sc->mfi_cmd_size * i);
793 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
794 		    sc->mfi_cmd_size * i;
795 		cm->cm_frame->header.context = i;
796 		cm->cm_sense = &sc->mfi_sense[i];
797 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
798 		cm->cm_sc = sc;
799 		cm->cm_index = i;
800 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
801 		    &cm->cm_dmamap) == 0) {
802 			mtx_lock(&sc->mfi_io_lock);
803 			mfi_release_command(cm);
804 			mtx_unlock(&sc->mfi_io_lock);
805 		}
806 		else
807 			break;
808 		sc->mfi_total_cmds++;
809 	}
810 
811 	return (0);
812 }
813 
814 void
815 mfi_release_command(struct mfi_command *cm)
816 {
817 	struct mfi_frame_header *hdr;
818 	uint32_t *hdr_data;
819 
820 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
821 
822 	/*
823 	 * Zero out the important fields of the frame, but make sure the
824 	 * context field is preserved.  For efficiency, handle the fields
825 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
826 	 */
827 	hdr = &cm->cm_frame->header;
828 	if (cm->cm_data != NULL && hdr->sg_count) {
829 		cm->cm_sg->sg32[0].len = 0;
830 		cm->cm_sg->sg32[0].addr = 0;
831 	}
832 
833 	hdr_data = (uint32_t *)cm->cm_frame;
834 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
835 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
836 	hdr_data[4] = 0;	/* flags, timeout */
837 	hdr_data[5] = 0;	/* data_len */
838 
839 	cm->cm_extra_frames = 0;
840 	cm->cm_flags = 0;
841 	cm->cm_complete = NULL;
842 	cm->cm_private = NULL;
843 	cm->cm_data = NULL;
844 	cm->cm_sg = 0;
845 	cm->cm_total_frame_size = 0;
846 	cm->retry_for_fw_reset = 0;
847 
848 	mfi_enqueue_free(cm);
849 }
850 
851 int
852 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
853     uint32_t opcode, void **bufp, size_t bufsize)
854 {
855 	struct mfi_command *cm;
856 	struct mfi_dcmd_frame *dcmd;
857 	void *buf = NULL;
858 	uint32_t context = 0;
859 
860 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
861 
862 	cm = mfi_dequeue_free(sc);
863 	if (cm == NULL)
864 		return (EBUSY);
865 
866 	/* Zero out the MFI frame */
867 	context = cm->cm_frame->header.context;
868 	bzero(cm->cm_frame, sizeof(union mfi_frame));
869 	cm->cm_frame->header.context = context;
870 
871 	if ((bufsize > 0) && (bufp != NULL)) {
872 		if (*bufp == NULL) {
873 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
874 			if (buf == NULL) {
875 				mfi_release_command(cm);
876 				return (ENOMEM);
877 			}
878 			*bufp = buf;
879 		} else {
880 			buf = *bufp;
881 		}
882 	}
883 
884 	dcmd =  &cm->cm_frame->dcmd;
885 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
886 	dcmd->header.cmd = MFI_CMD_DCMD;
887 	dcmd->header.timeout = 0;
888 	dcmd->header.flags = 0;
889 	dcmd->header.data_len = bufsize;
890 	dcmd->header.scsi_status = 0;
891 	dcmd->opcode = opcode;
892 	cm->cm_sg = &dcmd->sgl;
893 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
894 	cm->cm_flags = 0;
895 	cm->cm_data = buf;
896 	cm->cm_private = buf;
897 	cm->cm_len = bufsize;
898 
899 	*cmp = cm;
900 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
901 		*bufp = buf;
902 	return (0);
903 }
904 
905 static int
906 mfi_comms_init(struct mfi_softc *sc)
907 {
908 	struct mfi_command *cm;
909 	struct mfi_init_frame *init;
910 	struct mfi_init_qinfo *qinfo;
911 	int error;
912 	uint32_t context = 0;
913 
914 	mtx_lock(&sc->mfi_io_lock);
915 	if ((cm = mfi_dequeue_free(sc)) == NULL)
916 		return (EBUSY);
917 
918 	/* Zero out the MFI frame */
919 	context = cm->cm_frame->header.context;
920 	bzero(cm->cm_frame, sizeof(union mfi_frame));
921 	cm->cm_frame->header.context = context;
922 
923 	/*
924 	 * Abuse the SG list area of the frame to hold the init_qinfo
925 	 * object;
926 	 */
927 	init = &cm->cm_frame->init;
928 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
929 
930 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
931 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
932 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
933 	    offsetof(struct mfi_hwcomms, hw_reply_q);
934 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
935 	    offsetof(struct mfi_hwcomms, hw_pi);
936 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
937 	    offsetof(struct mfi_hwcomms, hw_ci);
938 
939 	init->header.cmd = MFI_CMD_INIT;
940 	init->header.data_len = sizeof(struct mfi_init_qinfo);
941 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
942 	cm->cm_data = NULL;
943 	cm->cm_flags = MFI_CMD_POLLED;
944 
945 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
946 		device_printf(sc->mfi_dev, "failed to send init command\n");
947 		mtx_unlock(&sc->mfi_io_lock);
948 		return (error);
949 	}
950 	mfi_release_command(cm);
951 	mtx_unlock(&sc->mfi_io_lock);
952 
953 	return (0);
954 }
955 
956 static int
957 mfi_get_controller_info(struct mfi_softc *sc)
958 {
959 	struct mfi_command *cm = NULL;
960 	struct mfi_ctrl_info *ci = NULL;
961 	uint32_t max_sectors_1, max_sectors_2;
962 	int error;
963 
964 	mtx_lock(&sc->mfi_io_lock);
965 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
966 	    (void **)&ci, sizeof(*ci));
967 	if (error)
968 		goto out;
969 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
970 
971 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
972 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
973 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
974 		    MFI_SECTOR_LEN;
975 		error = 0;
976 		goto out;
977 	}
978 
979 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
980 	    BUS_DMASYNC_POSTREAD);
981 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
982 
983 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
984 	max_sectors_2 = ci->max_request_size;
985 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
986 	sc->disableOnlineCtrlReset =
987 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
988 
989 out:
990 	if (ci)
991 		free(ci, M_MFIBUF);
992 	if (cm)
993 		mfi_release_command(cm);
994 	mtx_unlock(&sc->mfi_io_lock);
995 	return (error);
996 }
997 
998 static int
999 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1000 {
1001 	struct mfi_command *cm = NULL;
1002 	int error;
1003 
1004 	mtx_lock(&sc->mfi_io_lock);
1005 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1006 	    (void **)log_state, sizeof(**log_state));
1007 	if (error)
1008 		goto out;
1009 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1010 
1011 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1012 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1013 		goto out;
1014 	}
1015 
1016 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1017 	    BUS_DMASYNC_POSTREAD);
1018 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1019 
1020 out:
1021 	if (cm)
1022 		mfi_release_command(cm);
1023 	mtx_unlock(&sc->mfi_io_lock);
1024 
1025 	return (error);
1026 }
1027 
1028 int
1029 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1030 {
1031 	struct mfi_evt_log_state *log_state = NULL;
1032 	union mfi_evt class_locale;
1033 	int error = 0;
1034 	uint32_t seq;
1035 
1036 	class_locale.members.reserved = 0;
1037 	class_locale.members.locale = mfi_event_locale;
1038 	class_locale.members.evt_class  = mfi_event_class;
1039 
1040 	if (seq_start == 0) {
1041 		error = mfi_get_log_state(sc, &log_state);
1042 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1043 		if (error) {
1044 			if (log_state)
1045 				free(log_state, M_MFIBUF);
1046 			return (error);
1047 		}
1048 
1049 		/*
1050 		 * Walk through any events that fired since the last
1051 		 * shutdown.
1052 		 */
1053 		mfi_parse_entries(sc, log_state->shutdown_seq_num,
1054 		    log_state->newest_seq_num);
1055 		seq = log_state->newest_seq_num;
1056 	} else
1057 		seq = seq_start;
1058 	mfi_aen_register(sc, seq, class_locale.word);
1059 	free(log_state, M_MFIBUF);
1060 
1061 	return 0;
1062 }
1063 
1064 int
1065 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1066 {
1067 
1068 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1069 	cm->cm_complete = NULL;
1070 
1071 
1072 	/*
1073 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1074 	 * and return 0 to it as status
1075 	 */
1076 	if (cm->cm_frame->dcmd.opcode == 0) {
1077 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1078 		cm->cm_error = 0;
1079 		return (cm->cm_error);
1080 	}
1081 	mfi_enqueue_ready(cm);
1082 	mfi_startio(sc);
1083 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1084 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1085 	return (cm->cm_error);
1086 }
1087 
1088 void
1089 mfi_free(struct mfi_softc *sc)
1090 {
1091 	struct mfi_command *cm;
1092 	int i;
1093 
1094 	callout_drain(&sc->mfi_watchdog_callout);
1095 
1096 	if (sc->mfi_cdev != NULL)
1097 		destroy_dev(sc->mfi_cdev);
1098 
1099 	if (sc->mfi_total_cmds != 0) {
1100 		for (i = 0; i < sc->mfi_total_cmds; i++) {
1101 			cm = &sc->mfi_commands[i];
1102 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1103 		}
1104 		free(sc->mfi_commands, M_MFIBUF);
1105 	}
1106 
1107 	if (sc->mfi_intr)
1108 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1109 	if (sc->mfi_irq != NULL)
1110 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1111 		    sc->mfi_irq);
1112 
1113 	if (sc->mfi_sense_busaddr != 0)
1114 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1115 	if (sc->mfi_sense != NULL)
1116 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1117 		    sc->mfi_sense_dmamap);
1118 	if (sc->mfi_sense_dmat != NULL)
1119 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1120 
1121 	if (sc->mfi_frames_busaddr != 0)
1122 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1123 	if (sc->mfi_frames != NULL)
1124 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1125 		    sc->mfi_frames_dmamap);
1126 	if (sc->mfi_frames_dmat != NULL)
1127 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1128 
1129 	if (sc->mfi_comms_busaddr != 0)
1130 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1131 	if (sc->mfi_comms != NULL)
1132 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1133 		    sc->mfi_comms_dmamap);
1134 	if (sc->mfi_comms_dmat != NULL)
1135 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1136 
1137 	/* ThunderBolt contiguous memory free here */
1138 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1139 		if (sc->mfi_tb_busaddr != 0)
1140 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1141 		if (sc->request_message_pool != NULL)
1142 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1143 			    sc->mfi_tb_dmamap);
1144 		if (sc->mfi_tb_dmat != NULL)
1145 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1146 
1147 		/* Version buffer memory free */
1148 		/* Start LSIP200113393 */
1149 		if (sc->verbuf_h_busaddr != 0)
1150 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1151 		if (sc->verbuf != NULL)
1152 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1153 			    sc->verbuf_h_dmamap);
1154 		if (sc->verbuf_h_dmat != NULL)
1155 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1156 
1157 		/* End LSIP200113393 */
1158 		/* ThunderBolt INIT packet memory Free */
1159 		if (sc->mfi_tb_init_busaddr != 0)
1160 			bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1161 		if (sc->mfi_tb_init != NULL)
1162 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1163 			    sc->mfi_tb_init_dmamap);
1164 		if (sc->mfi_tb_init_dmat != NULL)
1165 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1166 
1167 		/* ThunderBolt IOC Init Desc memory free here */
1168 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1169 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1170 			    sc->mfi_tb_ioc_init_dmamap);
1171 		if (sc->mfi_tb_ioc_init_desc != NULL)
1172 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1173 			    sc->mfi_tb_ioc_init_desc,
1174 			    sc->mfi_tb_ioc_init_dmamap);
1175 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1176 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1177 		for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1178 			if (sc->mfi_cmd_pool_tbolt != NULL) {
1179 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1180 					free(sc->mfi_cmd_pool_tbolt[i],
1181 					    M_MFIBUF);
1182 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1183 				}
1184 			}
1185 		}
1186 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1187 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1188 			sc->mfi_cmd_pool_tbolt = NULL;
1189 		}
1190 		if (sc->request_desc_pool != NULL) {
1191 			free(sc->request_desc_pool, M_MFIBUF);
1192 			sc->request_desc_pool = NULL;
1193 		}
1194 	}
1195 	if (sc->mfi_buffer_dmat != NULL)
1196 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1197 	if (sc->mfi_parent_dmat != NULL)
1198 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1199 
1200 	if (mtx_initialized(&sc->mfi_io_lock)) {
1201 		mtx_destroy(&sc->mfi_io_lock);
1202 		sx_destroy(&sc->mfi_config_lock);
1203 	}
1204 
1205 	return;
1206 }
1207 
1208 static void
1209 mfi_startup(void *arg)
1210 {
1211 	struct mfi_softc *sc;
1212 
1213 	sc = (struct mfi_softc *)arg;
1214 
1215 	config_intrhook_disestablish(&sc->mfi_ich);
1216 
1217 	sc->mfi_enable_intr(sc);
1218 	sx_xlock(&sc->mfi_config_lock);
1219 	mtx_lock(&sc->mfi_io_lock);
1220 	mfi_ldprobe(sc);
1221 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1222 	    mfi_syspdprobe(sc);
1223 	mtx_unlock(&sc->mfi_io_lock);
1224 	sx_xunlock(&sc->mfi_config_lock);
1225 }
1226 
1227 static void
1228 mfi_intr(void *arg)
1229 {
1230 	struct mfi_softc *sc;
1231 	struct mfi_command *cm;
1232 	uint32_t pi, ci, context;
1233 
1234 	sc = (struct mfi_softc *)arg;
1235 
1236 	if (sc->mfi_check_clear_intr(sc))
1237 		return;
1238 
1239 restart:
1240 	pi = sc->mfi_comms->hw_pi;
1241 	ci = sc->mfi_comms->hw_ci;
1242 	mtx_lock(&sc->mfi_io_lock);
1243 	while (ci != pi) {
1244 		context = sc->mfi_comms->hw_reply_q[ci];
1245 		if (context < sc->mfi_max_fw_cmds) {
1246 			cm = &sc->mfi_commands[context];
1247 			mfi_remove_busy(cm);
1248 			cm->cm_error = 0;
1249 			mfi_complete(sc, cm);
1250 		}
1251 		if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1252 			ci = 0;
1253 		}
1254 	}
1255 
1256 	sc->mfi_comms->hw_ci = ci;
1257 
1258 	/* Give defered I/O a chance to run */
1259 	if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1260 		sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1261 	mfi_startio(sc);
1262 	mtx_unlock(&sc->mfi_io_lock);
1263 
1264 	/*
1265 	 * Dummy read to flush the bus; this ensures that the indexes are up
1266 	 * to date.  Restart processing if more commands have come it.
1267 	 */
1268 	(void)sc->mfi_read_fw_status(sc);
1269 	if (pi != sc->mfi_comms->hw_pi)
1270 		goto restart;
1271 
1272 	return;
1273 }
1274 
1275 int
1276 mfi_shutdown(struct mfi_softc *sc)
1277 {
1278 	struct mfi_dcmd_frame *dcmd;
1279 	struct mfi_command *cm;
1280 	int error;
1281 
1282 	mtx_lock(&sc->mfi_io_lock);
1283 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1284 	if (error) {
1285 		mtx_unlock(&sc->mfi_io_lock);
1286 		return (error);
1287 	}
1288 
1289 	if (sc->mfi_aen_cm != NULL)
1290 		mfi_abort(sc, sc->mfi_aen_cm);
1291 
1292 	if (sc->mfi_map_sync_cm != NULL)
1293 		mfi_abort(sc, sc->mfi_map_sync_cm);
1294 
1295 	dcmd = &cm->cm_frame->dcmd;
1296 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1297 	cm->cm_flags = MFI_CMD_POLLED;
1298 	cm->cm_data = NULL;
1299 
1300 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1301 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1302 	}
1303 
1304 	mfi_release_command(cm);
1305 	mtx_unlock(&sc->mfi_io_lock);
1306 	return (error);
1307 }
1308 
1309 static void
1310 mfi_syspdprobe(struct mfi_softc *sc)
1311 {
1312 	struct mfi_frame_header *hdr;
1313 	struct mfi_command *cm = NULL;
1314 	struct mfi_pd_list *pdlist = NULL;
1315 	struct mfi_system_pd *syspd, *tmp;
1316 	int error, i, found;
1317 
1318 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1319 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1320 	/* Add SYSTEM PD's */
1321 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1322 	    (void **)&pdlist, sizeof(*pdlist));
1323 	if (error) {
1324 		device_printf(sc->mfi_dev,
1325 		    "Error while forming SYSTEM PD list\n");
1326 		goto out;
1327 	}
1328 
1329 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1330 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1331 	cm->cm_frame->dcmd.mbox[1] = 0;
1332 	if (mfi_mapcmd(sc, cm) != 0) {
1333 		device_printf(sc->mfi_dev,
1334 		    "Failed to get syspd device listing\n");
1335 		goto out;
1336 	}
1337 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1338 	    BUS_DMASYNC_POSTREAD);
1339 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1340 	hdr = &cm->cm_frame->header;
1341 	if (hdr->cmd_status != MFI_STAT_OK) {
1342 		device_printf(sc->mfi_dev,
1343 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1344 		goto out;
1345 	}
1346 	/* Get each PD and add it to the system */
1347 	for (i = 0; i < pdlist->count; i++) {
1348 		if (pdlist->addr[i].device_id ==
1349 		    pdlist->addr[i].encl_device_id)
1350 			continue;
1351 		found = 0;
1352 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1353 			if (syspd->pd_id == pdlist->addr[i].device_id)
1354 				found = 1;
1355 		}
1356 		if (found == 0)
1357 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1358 	}
1359 	/* Delete SYSPD's whose state has been changed */
1360 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1361 		found = 0;
1362 		for (i = 0; i < pdlist->count; i++) {
1363 			if (syspd->pd_id == pdlist->addr[i].device_id)
1364 				found = 1;
1365 		}
1366 		if (found == 0) {
1367 			printf("DELETE\n");
1368 			mtx_unlock(&sc->mfi_io_lock);
1369 			mtx_lock(&Giant);
1370 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1371 			mtx_unlock(&Giant);
1372 			mtx_lock(&sc->mfi_io_lock);
1373 		}
1374 	}
1375 out:
1376 	if (pdlist)
1377 	    free(pdlist, M_MFIBUF);
1378 	if (cm)
1379 	    mfi_release_command(cm);
1380 
1381 	return;
1382 }
1383 
1384 static void
1385 mfi_ldprobe(struct mfi_softc *sc)
1386 {
1387 	struct mfi_frame_header *hdr;
1388 	struct mfi_command *cm = NULL;
1389 	struct mfi_ld_list *list = NULL;
1390 	struct mfi_disk *ld;
1391 	int error, i;
1392 
1393 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1394 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1395 
1396 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1397 	    (void **)&list, sizeof(*list));
1398 	if (error)
1399 		goto out;
1400 
1401 	cm->cm_flags = MFI_CMD_DATAIN;
1402 	if (mfi_wait_command(sc, cm) != 0) {
1403 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1404 		goto out;
1405 	}
1406 
1407 	hdr = &cm->cm_frame->header;
1408 	if (hdr->cmd_status != MFI_STAT_OK) {
1409 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1410 		    hdr->cmd_status);
1411 		goto out;
1412 	}
1413 
1414 	for (i = 0; i < list->ld_count; i++) {
1415 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1416 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1417 				goto skip_add;
1418 		}
1419 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1420 	skip_add:;
1421 	}
1422 out:
1423 	if (list)
1424 		free(list, M_MFIBUF);
1425 	if (cm)
1426 		mfi_release_command(cm);
1427 
1428 	return;
1429 }
1430 
1431 /*
1432  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1433  * the bits in 24-31 are all set, then it is the number of seconds since
1434  * boot.
1435  */
1436 static const char *
1437 format_timestamp(uint32_t timestamp)
1438 {
1439 	static char buffer[32];
1440 
1441 	if ((timestamp & 0xff000000) == 0xff000000)
1442 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1443 		    0x00ffffff);
1444 	else
1445 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1446 	return (buffer);
1447 }
1448 
1449 static const char *
1450 format_class(int8_t class)
1451 {
1452 	static char buffer[6];
1453 
1454 	switch (class) {
1455 	case MFI_EVT_CLASS_DEBUG:
1456 		return ("debug");
1457 	case MFI_EVT_CLASS_PROGRESS:
1458 		return ("progress");
1459 	case MFI_EVT_CLASS_INFO:
1460 		return ("info");
1461 	case MFI_EVT_CLASS_WARNING:
1462 		return ("WARN");
1463 	case MFI_EVT_CLASS_CRITICAL:
1464 		return ("CRIT");
1465 	case MFI_EVT_CLASS_FATAL:
1466 		return ("FATAL");
1467 	case MFI_EVT_CLASS_DEAD:
1468 		return ("DEAD");
1469 	default:
1470 		snprintf(buffer, sizeof(buffer), "%d", class);
1471 		return (buffer);
1472 	}
1473 }
1474 
1475 static void
1476 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1477 {
1478 	struct mfi_system_pd *syspd = NULL;
1479 
1480 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1481 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1482 	    format_class(detail->evt_class.members.evt_class),
1483 	    detail->description);
1484 
1485         /* Don't act on old AEN's or while shutting down */
1486         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1487                 return;
1488 
1489 	switch (detail->arg_type) {
1490 	case MR_EVT_ARGS_NONE:
1491 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1492 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1493 			if (mfi_detect_jbod_change) {
1494 				/*
1495 				 * Probe for new SYSPD's and Delete
1496 				 * invalid SYSPD's
1497 				 */
1498 				sx_xlock(&sc->mfi_config_lock);
1499 				mtx_lock(&sc->mfi_io_lock);
1500 				mfi_syspdprobe(sc);
1501 				mtx_unlock(&sc->mfi_io_lock);
1502 				sx_xunlock(&sc->mfi_config_lock);
1503 			}
1504 		}
1505 		break;
1506 	case MR_EVT_ARGS_LD_STATE:
1507 		/* During load time driver reads all the events starting
1508 		 * from the one that has been logged after shutdown. Avoid
1509 		 * these old events.
1510 		 */
1511 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1512 			/* Remove the LD */
1513 			struct mfi_disk *ld;
1514 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1515 				if (ld->ld_id ==
1516 				    detail->args.ld_state.ld.target_id)
1517 					break;
1518 			}
1519 			/*
1520 			Fix: for kernel panics when SSCD is removed
1521 			KASSERT(ld != NULL, ("volume dissappeared"));
1522 			*/
1523 			if (ld != NULL) {
1524 				mtx_lock(&Giant);
1525 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1526 				mtx_unlock(&Giant);
1527 			}
1528 		}
1529 		break;
1530 	case MR_EVT_ARGS_PD:
1531 		if (detail->code == MR_EVT_PD_REMOVED) {
1532 			if (mfi_detect_jbod_change) {
1533 				/*
1534 				 * If the removed device is a SYSPD then
1535 				 * delete it
1536 				 */
1537 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1538 				    pd_link) {
1539 					if (syspd->pd_id ==
1540 					    detail->args.pd.device_id) {
1541 						mtx_lock(&Giant);
1542 						device_delete_child(
1543 						    sc->mfi_dev,
1544 						    syspd->pd_dev);
1545 						mtx_unlock(&Giant);
1546 						break;
1547 					}
1548 				}
1549 			}
1550 		}
1551 		if (detail->code == MR_EVT_PD_INSERTED) {
1552 			if (mfi_detect_jbod_change) {
1553 				/* Probe for new SYSPD's */
1554 				sx_xlock(&sc->mfi_config_lock);
1555 				mtx_lock(&sc->mfi_io_lock);
1556 				mfi_syspdprobe(sc);
1557 				mtx_unlock(&sc->mfi_io_lock);
1558 				sx_xunlock(&sc->mfi_config_lock);
1559 			}
1560 		}
1561 		break;
1562 	}
1563 }
1564 
1565 static void
1566 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1567 {
1568 	struct mfi_evt_queue_elm *elm;
1569 
1570 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1571 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1572 	if (elm == NULL)
1573 		return;
1574 	memcpy(&elm->detail, detail, sizeof(*detail));
1575 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1576 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1577 }
1578 
1579 static void
1580 mfi_handle_evt(void *context, int pending)
1581 {
1582 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1583 	struct mfi_softc *sc;
1584 	struct mfi_evt_queue_elm *elm;
1585 
1586 	sc = context;
1587 	TAILQ_INIT(&queue);
1588 	mtx_lock(&sc->mfi_io_lock);
1589 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1590 	mtx_unlock(&sc->mfi_io_lock);
1591 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1592 		TAILQ_REMOVE(&queue, elm, link);
1593 		mfi_decode_evt(sc, &elm->detail);
1594 		free(elm, M_MFIBUF);
1595 	}
1596 }
1597 
1598 static int
1599 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1600 {
1601 	struct mfi_command *cm;
1602 	struct mfi_dcmd_frame *dcmd;
1603 	union mfi_evt current_aen, prior_aen;
1604 	struct mfi_evt_detail *ed = NULL;
1605 	int error = 0;
1606 
1607 	current_aen.word = locale;
1608 	if (sc->mfi_aen_cm != NULL) {
1609 		prior_aen.word =
1610 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1611 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1612 		    !((prior_aen.members.locale & current_aen.members.locale)
1613 		    ^current_aen.members.locale)) {
1614 			return (0);
1615 		} else {
1616 			prior_aen.members.locale |= current_aen.members.locale;
1617 			if (prior_aen.members.evt_class
1618 			    < current_aen.members.evt_class)
1619 				current_aen.members.evt_class =
1620 				    prior_aen.members.evt_class;
1621 			mtx_lock(&sc->mfi_io_lock);
1622 			mfi_abort(sc, sc->mfi_aen_cm);
1623 			mtx_unlock(&sc->mfi_io_lock);
1624 		}
1625 	}
1626 
1627 	mtx_lock(&sc->mfi_io_lock);
1628 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1629 	    (void **)&ed, sizeof(*ed));
1630 	mtx_unlock(&sc->mfi_io_lock);
1631 	if (error) {
1632 		goto out;
1633 	}
1634 
1635 	dcmd = &cm->cm_frame->dcmd;
1636 	((uint32_t *)&dcmd->mbox)[0] = seq;
1637 	((uint32_t *)&dcmd->mbox)[1] = locale;
1638 	cm->cm_flags = MFI_CMD_DATAIN;
1639 	cm->cm_complete = mfi_aen_complete;
1640 
1641 	sc->last_seq_num = seq;
1642 	sc->mfi_aen_cm = cm;
1643 
1644 	mtx_lock(&sc->mfi_io_lock);
1645 	mfi_enqueue_ready(cm);
1646 	mfi_startio(sc);
1647 	mtx_unlock(&sc->mfi_io_lock);
1648 
1649 out:
1650 	return (error);
1651 }
1652 
1653 static void
1654 mfi_aen_complete(struct mfi_command *cm)
1655 {
1656 	struct mfi_frame_header *hdr;
1657 	struct mfi_softc *sc;
1658 	struct mfi_evt_detail *detail;
1659 	struct mfi_aen *mfi_aen_entry, *tmp;
1660 	int seq = 0, aborted = 0;
1661 
1662 	sc = cm->cm_sc;
1663 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1664 
1665 	hdr = &cm->cm_frame->header;
1666 
1667 	if (sc->mfi_aen_cm == NULL)
1668 		return;
1669 
1670 	if (sc->cm_aen_abort ||
1671 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1672 		sc->cm_aen_abort = 0;
1673 		aborted = 1;
1674 	} else {
1675 		sc->mfi_aen_triggered = 1;
1676 		if (sc->mfi_poll_waiting) {
1677 			sc->mfi_poll_waiting = 0;
1678 			selwakeup(&sc->mfi_select);
1679 		}
1680 		detail = cm->cm_data;
1681 		mfi_queue_evt(sc, detail);
1682 		seq = detail->seq + 1;
1683 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1684 		    tmp) {
1685 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1686 			    aen_link);
1687 			PROC_LOCK(mfi_aen_entry->p);
1688 			kern_psignal(mfi_aen_entry->p, SIGIO);
1689 			PROC_UNLOCK(mfi_aen_entry->p);
1690 			free(mfi_aen_entry, M_MFIBUF);
1691 		}
1692 	}
1693 
1694 	free(cm->cm_data, M_MFIBUF);
1695 	sc->mfi_aen_cm = NULL;
1696 	wakeup(&sc->mfi_aen_cm);
1697 	mfi_release_command(cm);
1698 
1699 	/* set it up again so the driver can catch more events */
1700 	if (!aborted) {
1701 		mtx_unlock(&sc->mfi_io_lock);
1702 		mfi_aen_setup(sc, seq);
1703 		mtx_lock(&sc->mfi_io_lock);
1704 	}
1705 }
1706 
1707 #define MAX_EVENTS 15
1708 
1709 static int
1710 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1711 {
1712 	struct mfi_command *cm;
1713 	struct mfi_dcmd_frame *dcmd;
1714 	struct mfi_evt_list *el;
1715 	union mfi_evt class_locale;
1716 	int error, i, seq, size;
1717 
1718 	class_locale.members.reserved = 0;
1719 	class_locale.members.locale = mfi_event_locale;
1720 	class_locale.members.evt_class  = mfi_event_class;
1721 
1722 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1723 		* (MAX_EVENTS - 1);
1724 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1725 	if (el == NULL)
1726 		return (ENOMEM);
1727 
1728 	for (seq = start_seq;;) {
1729 		mtx_lock(&sc->mfi_io_lock);
1730 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1731 			free(el, M_MFIBUF);
1732 			mtx_unlock(&sc->mfi_io_lock);
1733 			return (EBUSY);
1734 		}
1735 		mtx_unlock(&sc->mfi_io_lock);
1736 
1737 		dcmd = &cm->cm_frame->dcmd;
1738 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1739 		dcmd->header.cmd = MFI_CMD_DCMD;
1740 		dcmd->header.timeout = 0;
1741 		dcmd->header.data_len = size;
1742 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1743 		((uint32_t *)&dcmd->mbox)[0] = seq;
1744 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1745 		cm->cm_sg = &dcmd->sgl;
1746 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1747 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1748 		cm->cm_data = el;
1749 		cm->cm_len = size;
1750 
1751 		mtx_lock(&sc->mfi_io_lock);
1752 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1753 			device_printf(sc->mfi_dev,
1754 			    "Failed to get controller entries\n");
1755 			mfi_release_command(cm);
1756 			mtx_unlock(&sc->mfi_io_lock);
1757 			break;
1758 		}
1759 
1760 		mtx_unlock(&sc->mfi_io_lock);
1761 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1762 		    BUS_DMASYNC_POSTREAD);
1763 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1764 
1765 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1766 			mtx_lock(&sc->mfi_io_lock);
1767 			mfi_release_command(cm);
1768 			mtx_unlock(&sc->mfi_io_lock);
1769 			break;
1770 		}
1771 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1772 			device_printf(sc->mfi_dev,
1773 			    "Error %d fetching controller entries\n",
1774 			    dcmd->header.cmd_status);
1775 			mtx_lock(&sc->mfi_io_lock);
1776 			mfi_release_command(cm);
1777 			mtx_unlock(&sc->mfi_io_lock);
1778 			break;
1779 		}
1780 		mtx_lock(&sc->mfi_io_lock);
1781 		mfi_release_command(cm);
1782 		mtx_unlock(&sc->mfi_io_lock);
1783 
1784 		for (i = 0; i < el->count; i++) {
1785 			/*
1786 			 * If this event is newer than 'stop_seq' then
1787 			 * break out of the loop.  Note that the log
1788 			 * is a circular buffer so we have to handle
1789 			 * the case that our stop point is earlier in
1790 			 * the buffer than our start point.
1791 			 */
1792 			if (el->event[i].seq >= stop_seq) {
1793 				if (start_seq <= stop_seq)
1794 					break;
1795 				else if (el->event[i].seq < start_seq)
1796 					break;
1797 			}
1798 			mtx_lock(&sc->mfi_io_lock);
1799 			mfi_queue_evt(sc, &el->event[i]);
1800 			mtx_unlock(&sc->mfi_io_lock);
1801 		}
1802 		seq = el->event[el->count - 1].seq + 1;
1803 	}
1804 
1805 	free(el, M_MFIBUF);
1806 	return (0);
1807 }
1808 
1809 static int
1810 mfi_add_ld(struct mfi_softc *sc, int id)
1811 {
1812 	struct mfi_command *cm;
1813 	struct mfi_dcmd_frame *dcmd = NULL;
1814 	struct mfi_ld_info *ld_info = NULL;
1815 	int error;
1816 
1817 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1818 
1819 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1820 	    (void **)&ld_info, sizeof(*ld_info));
1821 	if (error) {
1822 		device_printf(sc->mfi_dev,
1823 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1824 		if (ld_info)
1825 			free(ld_info, M_MFIBUF);
1826 		return (error);
1827 	}
1828 	cm->cm_flags = MFI_CMD_DATAIN;
1829 	dcmd = &cm->cm_frame->dcmd;
1830 	dcmd->mbox[0] = id;
1831 	if (mfi_wait_command(sc, cm) != 0) {
1832 		device_printf(sc->mfi_dev,
1833 		    "Failed to get logical drive: %d\n", id);
1834 		free(ld_info, M_MFIBUF);
1835 		return (0);
1836 	}
1837 	if (ld_info->ld_config.params.isSSCD != 1)
1838 		mfi_add_ld_complete(cm);
1839 	else {
1840 		mfi_release_command(cm);
1841 		if (ld_info)		/* SSCD drives ld_info free here */
1842 			free(ld_info, M_MFIBUF);
1843 	}
1844 	return (0);
1845 }
1846 
1847 static void
1848 mfi_add_ld_complete(struct mfi_command *cm)
1849 {
1850 	struct mfi_frame_header *hdr;
1851 	struct mfi_ld_info *ld_info;
1852 	struct mfi_softc *sc;
1853 	device_t child;
1854 
1855 	sc = cm->cm_sc;
1856 	hdr = &cm->cm_frame->header;
1857 	ld_info = cm->cm_private;
1858 
1859 	if (hdr->cmd_status != MFI_STAT_OK) {
1860 		free(ld_info, M_MFIBUF);
1861 		mfi_release_command(cm);
1862 		return;
1863 	}
1864 	mfi_release_command(cm);
1865 
1866 	mtx_unlock(&sc->mfi_io_lock);
1867 	mtx_lock(&Giant);
1868 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1869 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1870 		free(ld_info, M_MFIBUF);
1871 		mtx_unlock(&Giant);
1872 		mtx_lock(&sc->mfi_io_lock);
1873 		return;
1874 	}
1875 
1876 	device_set_ivars(child, ld_info);
1877 	device_set_desc(child, "MFI Logical Disk");
1878 	bus_generic_attach(sc->mfi_dev);
1879 	mtx_unlock(&Giant);
1880 	mtx_lock(&sc->mfi_io_lock);
1881 }
1882 
1883 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1884 {
1885 	struct mfi_command *cm;
1886 	struct mfi_dcmd_frame *dcmd = NULL;
1887 	struct mfi_pd_info *pd_info = NULL;
1888 	int error;
1889 
1890 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1891 
1892 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1893 		(void **)&pd_info, sizeof(*pd_info));
1894 	if (error) {
1895 		device_printf(sc->mfi_dev,
1896 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1897 		    error);
1898 		if (pd_info)
1899 			free(pd_info, M_MFIBUF);
1900 		return (error);
1901 	}
1902 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1903 	dcmd = &cm->cm_frame->dcmd;
1904 	dcmd->mbox[0]=id;
1905 	dcmd->header.scsi_status = 0;
1906 	dcmd->header.pad0 = 0;
1907 	if (mfi_mapcmd(sc, cm) != 0) {
1908 		device_printf(sc->mfi_dev,
1909 		    "Failed to get physical drive info %d\n", id);
1910 		free(pd_info, M_MFIBUF);
1911 		return (0);
1912 	}
1913 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1914 	    BUS_DMASYNC_POSTREAD);
1915 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1916 	mfi_add_sys_pd_complete(cm);
1917 	return (0);
1918 }
1919 
1920 static void
1921 mfi_add_sys_pd_complete(struct mfi_command *cm)
1922 {
1923 	struct mfi_frame_header *hdr;
1924 	struct mfi_pd_info *pd_info;
1925 	struct mfi_softc *sc;
1926 	device_t child;
1927 
1928 	sc = cm->cm_sc;
1929 	hdr = &cm->cm_frame->header;
1930 	pd_info = cm->cm_private;
1931 
1932 	if (hdr->cmd_status != MFI_STAT_OK) {
1933 		free(pd_info, M_MFIBUF);
1934 		mfi_release_command(cm);
1935 		return;
1936 	}
1937 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1938 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1939 		    pd_info->ref.v.device_id);
1940 		free(pd_info, M_MFIBUF);
1941 		mfi_release_command(cm);
1942 		return;
1943 	}
1944 	mfi_release_command(cm);
1945 
1946 	mtx_unlock(&sc->mfi_io_lock);
1947 	mtx_lock(&Giant);
1948 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1949 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
1950 		free(pd_info, M_MFIBUF);
1951 		mtx_unlock(&Giant);
1952 		mtx_lock(&sc->mfi_io_lock);
1953 		return;
1954 	}
1955 
1956 	device_set_ivars(child, pd_info);
1957 	device_set_desc(child, "MFI System PD");
1958 	bus_generic_attach(sc->mfi_dev);
1959 	mtx_unlock(&Giant);
1960 	mtx_lock(&sc->mfi_io_lock);
1961 }
1962 
1963 static struct mfi_command *
1964 mfi_bio_command(struct mfi_softc *sc)
1965 {
1966 	struct bio *bio;
1967 	struct mfi_command *cm = NULL;
1968 
1969 	/*reserving two commands to avoid starvation for IOCTL*/
1970 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
1971 		return (NULL);
1972 	}
1973 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1974 		return (NULL);
1975 	}
1976 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
1977 		cm = mfi_build_ldio(sc, bio);
1978 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
1979 		cm = mfi_build_syspdio(sc, bio);
1980 	}
1981 	if (!cm)
1982 	    mfi_enqueue_bio(sc, bio);
1983 	return cm;
1984 }
1985 static struct mfi_command *
1986 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1987 {
1988 	struct mfi_command *cm;
1989 	struct mfi_pass_frame *pass;
1990 	int flags = 0, blkcount = 0;
1991 	uint32_t context = 0;
1992 
1993 	if ((cm = mfi_dequeue_free(sc)) == NULL)
1994 	    return (NULL);
1995 
1996 	/* Zero out the MFI frame */
1997  	context = cm->cm_frame->header.context;
1998 	bzero(cm->cm_frame, sizeof(union mfi_frame));
1999 	cm->cm_frame->header.context = context;
2000 	pass = &cm->cm_frame->pass;
2001 	bzero(pass->cdb, 16);
2002 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2003 	switch (bio->bio_cmd & 0x03) {
2004 	case BIO_READ:
2005 #define SCSI_READ 0x28
2006 		pass->cdb[0] = SCSI_READ;
2007 		flags = MFI_CMD_DATAIN;
2008 		break;
2009 	case BIO_WRITE:
2010 #define SCSI_WRITE 0x2a
2011 		pass->cdb[0] = SCSI_WRITE;
2012 		flags = MFI_CMD_DATAOUT;
2013 		break;
2014 	default:
2015 		panic("Invalid bio command");
2016 	}
2017 
2018 	/* Cheat with the sector length to avoid a non-constant division */
2019 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2020 	/* Fill the LBA and Transfer length in CDB */
2021 	pass->cdb[2] = (bio->bio_pblkno & 0xff000000) >> 24;
2022 	pass->cdb[3] = (bio->bio_pblkno & 0x00ff0000) >> 16;
2023 	pass->cdb[4] = (bio->bio_pblkno & 0x0000ff00) >> 8;
2024 	pass->cdb[5] = bio->bio_pblkno & 0x000000ff;
2025 	pass->cdb[7] = (blkcount & 0xff00) >> 8;
2026 	pass->cdb[8] = (blkcount & 0x00ff);
2027 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2028 	pass->header.timeout = 0;
2029 	pass->header.flags = 0;
2030 	pass->header.scsi_status = 0;
2031 	pass->header.sense_len = MFI_SENSE_LEN;
2032 	pass->header.data_len = bio->bio_bcount;
2033 	pass->header.cdb_len = 10;
2034 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2035 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2036 	cm->cm_complete = mfi_bio_complete;
2037 	cm->cm_private = bio;
2038 	cm->cm_data = bio->bio_data;
2039 	cm->cm_len = bio->bio_bcount;
2040 	cm->cm_sg = &pass->sgl;
2041 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2042 	cm->cm_flags = flags;
2043 	return (cm);
2044 }
2045 
2046 static struct mfi_command *
2047 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2048 {
2049 	struct mfi_io_frame *io;
2050 	struct mfi_command *cm;
2051 	int flags, blkcount;
2052 	uint32_t context = 0;
2053 
2054 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2055 	    return (NULL);
2056 
2057 	/* Zero out the MFI frame */
2058 	context = cm->cm_frame->header.context;
2059 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2060 	cm->cm_frame->header.context = context;
2061 	io = &cm->cm_frame->io;
2062 	switch (bio->bio_cmd & 0x03) {
2063 	case BIO_READ:
2064 		io->header.cmd = MFI_CMD_LD_READ;
2065 		flags = MFI_CMD_DATAIN;
2066 		break;
2067 	case BIO_WRITE:
2068 		io->header.cmd = MFI_CMD_LD_WRITE;
2069 		flags = MFI_CMD_DATAOUT;
2070 		break;
2071 	default:
2072 		panic("Invalid bio command");
2073 	}
2074 
2075 	/* Cheat with the sector length to avoid a non-constant division */
2076 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2077 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2078 	io->header.timeout = 0;
2079 	io->header.flags = 0;
2080 	io->header.scsi_status = 0;
2081 	io->header.sense_len = MFI_SENSE_LEN;
2082 	io->header.data_len = blkcount;
2083 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2084 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2085 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2086 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2087 	cm->cm_complete = mfi_bio_complete;
2088 	cm->cm_private = bio;
2089 	cm->cm_data = bio->bio_data;
2090 	cm->cm_len = bio->bio_bcount;
2091 	cm->cm_sg = &io->sgl;
2092 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2093 	cm->cm_flags = flags;
2094 	return (cm);
2095 }
2096 
2097 static void
2098 mfi_bio_complete(struct mfi_command *cm)
2099 {
2100 	struct bio *bio;
2101 	struct mfi_frame_header *hdr;
2102 	struct mfi_softc *sc;
2103 
2104 	bio = cm->cm_private;
2105 	hdr = &cm->cm_frame->header;
2106 	sc = cm->cm_sc;
2107 
2108 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2109 		bio->bio_flags |= BIO_ERROR;
2110 		bio->bio_error = EIO;
2111 		device_printf(sc->mfi_dev, "I/O error, status= %d "
2112 		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2113 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2114 	} else if (cm->cm_error != 0) {
2115 		bio->bio_flags |= BIO_ERROR;
2116 	}
2117 
2118 	mfi_release_command(cm);
2119 	mfi_disk_complete(bio);
2120 }
2121 
2122 void
2123 mfi_startio(struct mfi_softc *sc)
2124 {
2125 	struct mfi_command *cm;
2126 	struct ccb_hdr *ccbh;
2127 
2128 	for (;;) {
2129 		/* Don't bother if we're short on resources */
2130 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2131 			break;
2132 
2133 		/* Try a command that has already been prepared */
2134 		cm = mfi_dequeue_ready(sc);
2135 
2136 		if (cm == NULL) {
2137 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2138 				cm = sc->mfi_cam_start(ccbh);
2139 		}
2140 
2141 		/* Nope, so look for work on the bioq */
2142 		if (cm == NULL)
2143 			cm = mfi_bio_command(sc);
2144 
2145 		/* No work available, so exit */
2146 		if (cm == NULL)
2147 			break;
2148 
2149 		/* Send the command to the controller */
2150 		if (mfi_mapcmd(sc, cm) != 0) {
2151 			mfi_requeue_ready(cm);
2152 			break;
2153 		}
2154 	}
2155 }
2156 
2157 int
2158 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2159 {
2160 	int error, polled;
2161 
2162 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2163 
2164 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2165 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2166 		error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2167 		    cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2168 		if (error == EINPROGRESS) {
2169 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2170 			return (0);
2171 		}
2172 	} else {
2173 		if (sc->MFA_enabled)
2174 			error = mfi_tbolt_send_frame(sc, cm);
2175 		else
2176 			error = mfi_send_frame(sc, cm);
2177 	}
2178 
2179 	return (error);
2180 }
2181 
2182 static void
2183 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2184 {
2185 	struct mfi_frame_header *hdr;
2186 	struct mfi_command *cm;
2187 	union mfi_sgl *sgl;
2188 	struct mfi_softc *sc;
2189 	int i, j, first, dir;
2190 	int sge_size;
2191 
2192 	cm = (struct mfi_command *)arg;
2193 	sc = cm->cm_sc;
2194 	hdr = &cm->cm_frame->header;
2195 	sgl = cm->cm_sg;
2196 
2197 	if (error) {
2198 		printf("error %d in callback\n", error);
2199 		cm->cm_error = error;
2200 		mfi_complete(sc, cm);
2201 		return;
2202 	}
2203 	/* Use IEEE sgl only for IO's on a SKINNY controller
2204 	 * For other commands on a SKINNY controller use either
2205 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2206 	 * Also calculate the total frame size based on the type
2207 	 * of SGL used.
2208 	 */
2209 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2210 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2211 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2212 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2213 		for (i = 0; i < nsegs; i++) {
2214 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2215 			sgl->sg_skinny[i].len = segs[i].ds_len;
2216 			sgl->sg_skinny[i].flag = 0;
2217 		}
2218 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2219 		sge_size = sizeof(struct mfi_sg_skinny);
2220 		hdr->sg_count = nsegs;
2221 	} else {
2222 		j = 0;
2223 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2224 			first = cm->cm_stp_len;
2225 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2226 				sgl->sg32[j].addr = segs[0].ds_addr;
2227 				sgl->sg32[j++].len = first;
2228 			} else {
2229 				sgl->sg64[j].addr = segs[0].ds_addr;
2230 				sgl->sg64[j++].len = first;
2231 			}
2232 		} else
2233 			first = 0;
2234 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2235 			for (i = 0; i < nsegs; i++) {
2236 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2237 				sgl->sg32[j++].len = segs[i].ds_len - first;
2238 				first = 0;
2239 			}
2240 		} else {
2241 			for (i = 0; i < nsegs; i++) {
2242 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2243 				sgl->sg64[j++].len = segs[i].ds_len - first;
2244 				first = 0;
2245 			}
2246 			hdr->flags |= MFI_FRAME_SGL64;
2247 		}
2248 		hdr->sg_count = j;
2249 		sge_size = sc->mfi_sge_size;
2250 	}
2251 
2252 	dir = 0;
2253 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2254 		dir |= BUS_DMASYNC_PREREAD;
2255 		hdr->flags |= MFI_FRAME_DIR_READ;
2256 	}
2257 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2258 		dir |= BUS_DMASYNC_PREWRITE;
2259 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2260 	}
2261 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2262 	cm->cm_flags |= MFI_CMD_MAPPED;
2263 
2264 	/*
2265 	 * Instead of calculating the total number of frames in the
2266 	 * compound frame, it's already assumed that there will be at
2267 	 * least 1 frame, so don't compensate for the modulo of the
2268 	 * following division.
2269 	 */
2270 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2271 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2272 
2273 	if (sc->MFA_enabled)
2274 			mfi_tbolt_send_frame(sc, cm);
2275 	else
2276 		mfi_send_frame(sc, cm);
2277 
2278 	return;
2279 }
2280 
2281 static int
2282 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2283 {
2284 	struct mfi_frame_header *hdr;
2285 	int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2286 
2287 	hdr = &cm->cm_frame->header;
2288 
2289 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2290 		cm->cm_timestamp = time_uptime;
2291 		mfi_enqueue_busy(cm);
2292 	} else {
2293 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2294 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2295 	}
2296 
2297 	/*
2298 	 * The bus address of the command is aligned on a 64 byte boundary,
2299 	 * leaving the least 6 bits as zero.  For whatever reason, the
2300 	 * hardware wants the address shifted right by three, leaving just
2301 	 * 3 zero bits.  These three bits are then used as a prefetching
2302 	 * hint for the hardware to predict how many frames need to be
2303 	 * fetched across the bus.  If a command has more than 8 frames
2304 	 * then the 3 bits are set to 0x7 and the firmware uses other
2305 	 * information in the command to determine the total amount to fetch.
2306 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2307 	 * is enough for both 32bit and 64bit systems.
2308 	 */
2309 	if (cm->cm_extra_frames > 7)
2310 		cm->cm_extra_frames = 7;
2311 
2312 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2313 
2314 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2315 		return (0);
2316 
2317 	/* This is a polled command, so busy-wait for it to complete. */
2318 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2319 		DELAY(1000);
2320 		tm -= 1;
2321 		if (tm <= 0)
2322 			break;
2323 	}
2324 
2325 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2326 		device_printf(sc->mfi_dev, "Frame %p timed out "
2327 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2328 		return (ETIMEDOUT);
2329 	}
2330 
2331 	return (0);
2332 }
2333 
2334 
2335 void
2336 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2337 {
2338 	int dir;
2339 
2340 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2341 		dir = 0;
2342 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2343 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2344 			dir |= BUS_DMASYNC_POSTREAD;
2345 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2346 			dir |= BUS_DMASYNC_POSTWRITE;
2347 
2348 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2349 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2350 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2351 	}
2352 
2353 	cm->cm_flags |= MFI_CMD_COMPLETED;
2354 
2355 	if (cm->cm_complete != NULL)
2356 		cm->cm_complete(cm);
2357 	else
2358 		wakeup(cm);
2359 }
2360 
2361 static int
2362 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2363 {
2364 	struct mfi_command *cm;
2365 	struct mfi_abort_frame *abort;
2366 	int i = 0;
2367 	uint32_t context = 0;
2368 
2369 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2370 
2371 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2372 		return (EBUSY);
2373 	}
2374 
2375 	/* Zero out the MFI frame */
2376 	context = cm->cm_frame->header.context;
2377 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2378 	cm->cm_frame->header.context = context;
2379 
2380 	abort = &cm->cm_frame->abort;
2381 	abort->header.cmd = MFI_CMD_ABORT;
2382 	abort->header.flags = 0;
2383 	abort->header.scsi_status = 0;
2384 	abort->abort_context = cm_abort->cm_frame->header.context;
2385 	abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr;
2386 	abort->abort_mfi_addr_hi =
2387 	    (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32);
2388 	cm->cm_data = NULL;
2389 	cm->cm_flags = MFI_CMD_POLLED;
2390 
2391 	if (sc->mfi_aen_cm)
2392 		sc->cm_aen_abort = 1;
2393 	if (sc->mfi_map_sync_cm)
2394 		sc->cm_map_abort = 1;
2395 	mfi_mapcmd(sc, cm);
2396 	mfi_release_command(cm);
2397 
2398 	while (i < 5 && sc->mfi_aen_cm != NULL) {
2399 		msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort",
2400 		    5 * hz);
2401 		i++;
2402 	}
2403 	while (i < 5 && sc->mfi_map_sync_cm != NULL) {
2404 		msleep(&sc->mfi_map_sync_cm, &sc->mfi_io_lock, 0, "mfiabort",
2405 		    5 * hz);
2406 		i++;
2407 	}
2408 
2409 	return (0);
2410 }
2411 
2412 int
2413 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2414      int len)
2415 {
2416 	struct mfi_command *cm;
2417 	struct mfi_io_frame *io;
2418 	int error;
2419 	uint32_t context = 0;
2420 
2421 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2422 		return (EBUSY);
2423 
2424 	/* Zero out the MFI frame */
2425 	context = cm->cm_frame->header.context;
2426 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2427 	cm->cm_frame->header.context = context;
2428 
2429 	io = &cm->cm_frame->io;
2430 	io->header.cmd = MFI_CMD_LD_WRITE;
2431 	io->header.target_id = id;
2432 	io->header.timeout = 0;
2433 	io->header.flags = 0;
2434 	io->header.scsi_status = 0;
2435 	io->header.sense_len = MFI_SENSE_LEN;
2436 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2437 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2438 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2439 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2440 	io->lba_lo = lba & 0xffffffff;
2441 	cm->cm_data = virt;
2442 	cm->cm_len = len;
2443 	cm->cm_sg = &io->sgl;
2444 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2445 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2446 
2447 	error = mfi_mapcmd(sc, cm);
2448 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2449 	    BUS_DMASYNC_POSTWRITE);
2450 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2451 	mfi_release_command(cm);
2452 
2453 	return (error);
2454 }
2455 
2456 int
2457 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2458     int len)
2459 {
2460 	struct mfi_command *cm;
2461 	struct mfi_pass_frame *pass;
2462 	int error;
2463 	int blkcount = 0;
2464 
2465 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2466 		return (EBUSY);
2467 
2468 	pass = &cm->cm_frame->pass;
2469 	bzero(pass->cdb, 16);
2470 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2471 	pass->cdb[0] = SCSI_WRITE;
2472 	pass->cdb[2] = (lba & 0xff000000) >> 24;
2473 	pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2474 	pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2475 	pass->cdb[5] = (lba & 0x000000ff);
2476 	blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2477 	pass->cdb[7] = (blkcount & 0xff00) >> 8;
2478 	pass->cdb[8] = (blkcount & 0x00ff);
2479 	pass->header.target_id = id;
2480 	pass->header.timeout = 0;
2481 	pass->header.flags = 0;
2482 	pass->header.scsi_status = 0;
2483 	pass->header.sense_len = MFI_SENSE_LEN;
2484 	pass->header.data_len = len;
2485 	pass->header.cdb_len = 10;
2486 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2487 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2488 	cm->cm_data = virt;
2489 	cm->cm_len = len;
2490 	cm->cm_sg = &pass->sgl;
2491 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2492 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2493 
2494 	error = mfi_mapcmd(sc, cm);
2495 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2496 	    BUS_DMASYNC_POSTWRITE);
2497 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2498 	mfi_release_command(cm);
2499 
2500 	return (error);
2501 }
2502 
2503 static int
2504 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2505 {
2506 	struct mfi_softc *sc;
2507 	int error;
2508 
2509 	sc = dev->si_drv1;
2510 
2511 	mtx_lock(&sc->mfi_io_lock);
2512 	if (sc->mfi_detaching)
2513 		error = ENXIO;
2514 	else {
2515 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2516 		error = 0;
2517 	}
2518 	mtx_unlock(&sc->mfi_io_lock);
2519 
2520 	return (error);
2521 }
2522 
2523 static int
2524 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2525 {
2526 	struct mfi_softc *sc;
2527 	struct mfi_aen *mfi_aen_entry, *tmp;
2528 
2529 	sc = dev->si_drv1;
2530 
2531 	mtx_lock(&sc->mfi_io_lock);
2532 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2533 
2534 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2535 		if (mfi_aen_entry->p == curproc) {
2536 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2537 			    aen_link);
2538 			free(mfi_aen_entry, M_MFIBUF);
2539 		}
2540 	}
2541 	mtx_unlock(&sc->mfi_io_lock);
2542 	return (0);
2543 }
2544 
2545 static int
2546 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2547 {
2548 
2549 	switch (opcode) {
2550 	case MFI_DCMD_LD_DELETE:
2551 	case MFI_DCMD_CFG_ADD:
2552 	case MFI_DCMD_CFG_CLEAR:
2553 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2554 		sx_xlock(&sc->mfi_config_lock);
2555 		return (1);
2556 	default:
2557 		return (0);
2558 	}
2559 }
2560 
2561 static void
2562 mfi_config_unlock(struct mfi_softc *sc, int locked)
2563 {
2564 
2565 	if (locked)
2566 		sx_xunlock(&sc->mfi_config_lock);
2567 }
2568 
2569 /*
2570  * Perform pre-issue checks on commands from userland and possibly veto
2571  * them.
2572  */
2573 static int
2574 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2575 {
2576 	struct mfi_disk *ld, *ld2;
2577 	int error;
2578 	struct mfi_system_pd *syspd = NULL;
2579 	uint16_t syspd_id;
2580 	uint16_t *mbox;
2581 
2582 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2583 	error = 0;
2584 	switch (cm->cm_frame->dcmd.opcode) {
2585 	case MFI_DCMD_LD_DELETE:
2586 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2587 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2588 				break;
2589 		}
2590 		if (ld == NULL)
2591 			error = ENOENT;
2592 		else
2593 			error = mfi_disk_disable(ld);
2594 		break;
2595 	case MFI_DCMD_CFG_CLEAR:
2596 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2597 			error = mfi_disk_disable(ld);
2598 			if (error)
2599 				break;
2600 		}
2601 		if (error) {
2602 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2603 				if (ld2 == ld)
2604 					break;
2605 				mfi_disk_enable(ld2);
2606 			}
2607 		}
2608 		break;
2609 	case MFI_DCMD_PD_STATE_SET:
2610 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2611 		syspd_id = mbox[0];
2612 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2613 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2614 				if (syspd->pd_id == syspd_id)
2615 					break;
2616 			}
2617 		}
2618 		else
2619 			break;
2620 		if (syspd)
2621 			error = mfi_syspd_disable(syspd);
2622 		break;
2623 	default:
2624 		break;
2625 	}
2626 	return (error);
2627 }
2628 
2629 /* Perform post-issue checks on commands from userland. */
2630 static void
2631 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2632 {
2633 	struct mfi_disk *ld, *ldn;
2634 	struct mfi_system_pd *syspd = NULL;
2635 	uint16_t syspd_id;
2636 	uint16_t *mbox;
2637 
2638 	switch (cm->cm_frame->dcmd.opcode) {
2639 	case MFI_DCMD_LD_DELETE:
2640 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2641 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2642 				break;
2643 		}
2644 		KASSERT(ld != NULL, ("volume dissappeared"));
2645 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2646 			mtx_unlock(&sc->mfi_io_lock);
2647 			mtx_lock(&Giant);
2648 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2649 			mtx_unlock(&Giant);
2650 			mtx_lock(&sc->mfi_io_lock);
2651 		} else
2652 			mfi_disk_enable(ld);
2653 		break;
2654 	case MFI_DCMD_CFG_CLEAR:
2655 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2656 			mtx_unlock(&sc->mfi_io_lock);
2657 			mtx_lock(&Giant);
2658 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2659 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2660 			}
2661 			mtx_unlock(&Giant);
2662 			mtx_lock(&sc->mfi_io_lock);
2663 		} else {
2664 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2665 				mfi_disk_enable(ld);
2666 		}
2667 		break;
2668 	case MFI_DCMD_CFG_ADD:
2669 		mfi_ldprobe(sc);
2670 		break;
2671 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2672 		mfi_ldprobe(sc);
2673 		break;
2674 	case MFI_DCMD_PD_STATE_SET:
2675 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2676 		syspd_id = mbox[0];
2677 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2678 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2679 				if (syspd->pd_id == syspd_id)
2680 					break;
2681 			}
2682 		}
2683 		else
2684 			break;
2685 		/* If the transition fails then enable the syspd again */
2686 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2687 			mfi_syspd_enable(syspd);
2688 		break;
2689 	}
2690 }
2691 
2692 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2693 {
2694 	struct mfi_config_data *conf_data=(struct mfi_config_data *)cm->cm_data;
2695 	struct mfi_command *ld_cm = NULL;
2696 	struct mfi_ld_info *ld_info = NULL;
2697 	int error = 0;
2698 
2699 	if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2700 	    (conf_data->ld[0].params.isSSCD == 1)) {
2701 		error = 1;
2702 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2703 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2704 		    (void **)&ld_info, sizeof(*ld_info));
2705 		if (error) {
2706 			device_printf(sc->mfi_dev, "Failed to allocate"
2707 			    "MFI_DCMD_LD_GET_INFO %d", error);
2708 			if (ld_info)
2709 				free(ld_info, M_MFIBUF);
2710 			return 0;
2711 		}
2712 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2713 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2714 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2715 		if (mfi_wait_command(sc, ld_cm) != 0) {
2716 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2717 			mfi_release_command(ld_cm);
2718 			free(ld_info, M_MFIBUF);
2719 			return 0;
2720 		}
2721 
2722 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2723 			free(ld_info, M_MFIBUF);
2724 			mfi_release_command(ld_cm);
2725 			return 0;
2726 		}
2727 		else
2728 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2729 
2730 		if (ld_info->ld_config.params.isSSCD == 1)
2731 			error = 1;
2732 
2733 		mfi_release_command(ld_cm);
2734 		free(ld_info, M_MFIBUF);
2735 
2736 	}
2737 	return error;
2738 }
2739 
2740 static int
2741 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2742 {
2743 	uint8_t i;
2744 	struct mfi_ioc_packet *ioc;
2745 	ioc = (struct mfi_ioc_packet *)arg;
2746 	int sge_size, error;
2747 	struct megasas_sge *kern_sge;
2748 
2749 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2750 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2751 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2752 
2753 	if (sizeof(bus_addr_t) == 8) {
2754 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2755 		cm->cm_extra_frames = 2;
2756 		sge_size = sizeof(struct mfi_sg64);
2757 	} else {
2758 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2759 		sge_size = sizeof(struct mfi_sg32);
2760 	}
2761 
2762 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2763 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2764 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2765 			1, 0,			/* algnmnt, boundary */
2766 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2767 			BUS_SPACE_MAXADDR,	/* highaddr */
2768 			NULL, NULL,		/* filter, filterarg */
2769 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2770 			2,			/* nsegments */
2771 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2772 			BUS_DMA_ALLOCNOW,	/* flags */
2773 			NULL, NULL,		/* lockfunc, lockarg */
2774 			&sc->mfi_kbuff_arr_dmat[i])) {
2775 			device_printf(sc->mfi_dev,
2776 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2777 			return (ENOMEM);
2778 		}
2779 
2780 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2781 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2782 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2783 			device_printf(sc->mfi_dev,
2784 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2785 			return (ENOMEM);
2786 		}
2787 
2788 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2789 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2790 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2791 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2792 
2793 		if (!sc->kbuff_arr[i]) {
2794 			device_printf(sc->mfi_dev,
2795 			    "Could not allocate memory for kbuff_arr info\n");
2796 			return -1;
2797 		}
2798 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2799 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2800 
2801 		if (sizeof(bus_addr_t) == 8) {
2802 			cm->cm_frame->stp.sgl.sg64[i].addr =
2803 			    kern_sge[i].phys_addr;
2804 			cm->cm_frame->stp.sgl.sg64[i].len =
2805 			    ioc->mfi_sgl[i].iov_len;
2806 		} else {
2807 			cm->cm_frame->stp.sgl.sg32[i].len =
2808 			    kern_sge[i].phys_addr;
2809 			cm->cm_frame->stp.sgl.sg32[i].len =
2810 			    ioc->mfi_sgl[i].iov_len;
2811 		}
2812 
2813 		error = copyin(ioc->mfi_sgl[i].iov_base,
2814 		    sc->kbuff_arr[i],
2815 		    ioc->mfi_sgl[i].iov_len);
2816 		if (error != 0) {
2817 			device_printf(sc->mfi_dev, "Copy in failed\n");
2818 			return error;
2819 		}
2820 	}
2821 
2822 	cm->cm_flags |=MFI_CMD_MAPPED;
2823 	return 0;
2824 }
2825 
2826 static int
2827 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2828 {
2829 	struct mfi_command *cm;
2830 	struct mfi_dcmd_frame *dcmd;
2831 	void *ioc_buf = NULL;
2832 	uint32_t context;
2833 	int error = 0, locked;
2834 
2835 
2836 	if (ioc->buf_size > 0) {
2837 		if (ioc->buf_size > 1024 * 1024)
2838 			return (ENOMEM);
2839 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2840 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2841 		if (error) {
2842 			device_printf(sc->mfi_dev, "failed to copyin\n");
2843 			free(ioc_buf, M_MFIBUF);
2844 			return (error);
2845 		}
2846 	}
2847 
2848 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2849 
2850 	mtx_lock(&sc->mfi_io_lock);
2851 	while ((cm = mfi_dequeue_free(sc)) == NULL)
2852 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2853 
2854 	/* Save context for later */
2855 	context = cm->cm_frame->header.context;
2856 
2857 	dcmd = &cm->cm_frame->dcmd;
2858 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2859 
2860 	cm->cm_sg = &dcmd->sgl;
2861 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2862 	cm->cm_data = ioc_buf;
2863 	cm->cm_len = ioc->buf_size;
2864 
2865 	/* restore context */
2866 	cm->cm_frame->header.context = context;
2867 
2868 	/* Cheat since we don't know if we're writing or reading */
2869 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2870 
2871 	error = mfi_check_command_pre(sc, cm);
2872 	if (error)
2873 		goto out;
2874 
2875 	error = mfi_wait_command(sc, cm);
2876 	if (error) {
2877 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2878 		goto out;
2879 	}
2880 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2881 	mfi_check_command_post(sc, cm);
2882 out:
2883 	mfi_release_command(cm);
2884 	mtx_unlock(&sc->mfi_io_lock);
2885 	mfi_config_unlock(sc, locked);
2886 	if (ioc->buf_size > 0)
2887 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2888 	if (ioc_buf)
2889 		free(ioc_buf, M_MFIBUF);
2890 	return (error);
2891 }
2892 
2893 #define	PTRIN(p)		((void *)(uintptr_t)(p))
2894 
2895 static int
2896 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2897 {
2898 	struct mfi_softc *sc;
2899 	union mfi_statrequest *ms;
2900 	struct mfi_ioc_packet *ioc;
2901 #ifdef COMPAT_FREEBSD32
2902 	struct mfi_ioc_packet32 *ioc32;
2903 #endif
2904 	struct mfi_ioc_aen *aen;
2905 	struct mfi_command *cm = NULL;
2906 	uint32_t context = 0;
2907 	union mfi_sense_ptr sense_ptr;
2908 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2909 	size_t len;
2910 	int i, res;
2911 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2912 #ifdef COMPAT_FREEBSD32
2913 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
2914 	struct mfi_ioc_passthru iop_swab;
2915 #endif
2916 	int error, locked;
2917 	union mfi_sgl *sgl;
2918 	sc = dev->si_drv1;
2919 	error = 0;
2920 
2921 	if (sc->adpreset)
2922 		return EBUSY;
2923 
2924 	if (sc->hw_crit_error)
2925 		return EBUSY;
2926 
2927 	if (sc->issuepend_done == 0)
2928 		return EBUSY;
2929 
2930 	switch (cmd) {
2931 	case MFIIO_STATS:
2932 		ms = (union mfi_statrequest *)arg;
2933 		switch (ms->ms_item) {
2934 		case MFIQ_FREE:
2935 		case MFIQ_BIO:
2936 		case MFIQ_READY:
2937 		case MFIQ_BUSY:
2938 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2939 			    sizeof(struct mfi_qstat));
2940 			break;
2941 		default:
2942 			error = ENOIOCTL;
2943 			break;
2944 		}
2945 		break;
2946 	case MFIIO_QUERY_DISK:
2947 	{
2948 		struct mfi_query_disk *qd;
2949 		struct mfi_disk *ld;
2950 
2951 		qd = (struct mfi_query_disk *)arg;
2952 		mtx_lock(&sc->mfi_io_lock);
2953 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2954 			if (ld->ld_id == qd->array_id)
2955 				break;
2956 		}
2957 		if (ld == NULL) {
2958 			qd->present = 0;
2959 			mtx_unlock(&sc->mfi_io_lock);
2960 			return (0);
2961 		}
2962 		qd->present = 1;
2963 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2964 			qd->open = 1;
2965 		bzero(qd->devname, SPECNAMELEN + 1);
2966 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2967 		mtx_unlock(&sc->mfi_io_lock);
2968 		break;
2969 	}
2970 	case MFI_CMD:
2971 #ifdef COMPAT_FREEBSD32
2972 	case MFI_CMD32:
2973 #endif
2974 		{
2975 		devclass_t devclass;
2976 		ioc = (struct mfi_ioc_packet *)arg;
2977 		int adapter;
2978 
2979 		adapter = ioc->mfi_adapter_no;
2980 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2981 			devclass = devclass_find("mfi");
2982 			sc = devclass_get_softc(devclass, adapter);
2983 		}
2984 		mtx_lock(&sc->mfi_io_lock);
2985 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
2986 			mtx_unlock(&sc->mfi_io_lock);
2987 			return (EBUSY);
2988 		}
2989 		mtx_unlock(&sc->mfi_io_lock);
2990 		locked = 0;
2991 
2992 		/*
2993 		 * save off original context since copying from user
2994 		 * will clobber some data
2995 		 */
2996 		context = cm->cm_frame->header.context;
2997 		cm->cm_frame->header.context = cm->cm_index;
2998 
2999 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3000 		    2 * MEGAMFI_FRAME_SIZE);
3001 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3002 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3003 		cm->cm_frame->header.scsi_status = 0;
3004 		cm->cm_frame->header.pad0 = 0;
3005 		if (ioc->mfi_sge_count) {
3006 			cm->cm_sg =
3007 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3008 		}
3009 		sgl = cm->cm_sg;
3010 		cm->cm_flags = 0;
3011 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3012 			cm->cm_flags |= MFI_CMD_DATAIN;
3013 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3014 			cm->cm_flags |= MFI_CMD_DATAOUT;
3015 		/* Legacy app shim */
3016 		if (cm->cm_flags == 0)
3017 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3018 		cm->cm_len = cm->cm_frame->header.data_len;
3019 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3020 #ifdef COMPAT_FREEBSD32
3021 			if (cmd == MFI_CMD) {
3022 #endif
3023 				/* Native */
3024 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3025 #ifdef COMPAT_FREEBSD32
3026 			} else {
3027 				/* 32bit on 64bit */
3028 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3029 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3030 			}
3031 #endif
3032 			cm->cm_len += cm->cm_stp_len;
3033 		}
3034 		if (cm->cm_len &&
3035 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3036 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3037 			    M_WAITOK | M_ZERO);
3038 			if (cm->cm_data == NULL) {
3039 				device_printf(sc->mfi_dev, "Malloc failed\n");
3040 				goto out;
3041 			}
3042 		} else {
3043 			cm->cm_data = 0;
3044 		}
3045 
3046 		/* restore header context */
3047 		cm->cm_frame->header.context = context;
3048 
3049 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3050 			res = mfi_stp_cmd(sc, cm, arg);
3051 			if (res != 0)
3052 				goto out;
3053 		} else {
3054 			temp = data;
3055 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3056 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3057 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3058 #ifdef COMPAT_FREEBSD32
3059 					if (cmd == MFI_CMD) {
3060 #endif
3061 						/* Native */
3062 						addr = ioc->mfi_sgl[i].iov_base;
3063 						len = ioc->mfi_sgl[i].iov_len;
3064 #ifdef COMPAT_FREEBSD32
3065 					} else {
3066 						/* 32bit on 64bit */
3067 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3068 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3069 						len = ioc32->mfi_sgl[i].iov_len;
3070 					}
3071 #endif
3072 					error = copyin(addr, temp, len);
3073 					if (error != 0) {
3074 						device_printf(sc->mfi_dev,
3075 						    "Copy in failed\n");
3076 						goto out;
3077 					}
3078 					temp = &temp[len];
3079 				}
3080 			}
3081 		}
3082 
3083 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3084 			locked = mfi_config_lock(sc,
3085 			     cm->cm_frame->dcmd.opcode);
3086 
3087 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3088 			cm->cm_frame->pass.sense_addr_lo =
3089 			    (uint32_t)cm->cm_sense_busaddr;
3090 			cm->cm_frame->pass.sense_addr_hi =
3091 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3092 		}
3093 		mtx_lock(&sc->mfi_io_lock);
3094 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3095 		if (!skip_pre_post) {
3096 			error = mfi_check_command_pre(sc, cm);
3097 			if (error) {
3098 				mtx_unlock(&sc->mfi_io_lock);
3099 				goto out;
3100 			}
3101 		}
3102 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3103 			device_printf(sc->mfi_dev,
3104 			    "Controller polled failed\n");
3105 			mtx_unlock(&sc->mfi_io_lock);
3106 			goto out;
3107 		}
3108 		if (!skip_pre_post) {
3109 			mfi_check_command_post(sc, cm);
3110 		}
3111 		mtx_unlock(&sc->mfi_io_lock);
3112 
3113 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3114 			temp = data;
3115 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3116 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3117 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3118 #ifdef COMPAT_FREEBSD32
3119 					if (cmd == MFI_CMD) {
3120 #endif
3121 						/* Native */
3122 						addr = ioc->mfi_sgl[i].iov_base;
3123 						len = ioc->mfi_sgl[i].iov_len;
3124 #ifdef COMPAT_FREEBSD32
3125 					} else {
3126 						/* 32bit on 64bit */
3127 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3128 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3129 						len = ioc32->mfi_sgl[i].iov_len;
3130 					}
3131 #endif
3132 					error = copyout(temp, addr, len);
3133 					if (error != 0) {
3134 						device_printf(sc->mfi_dev,
3135 						    "Copy out failed\n");
3136 						goto out;
3137 					}
3138 					temp = &temp[len];
3139 				}
3140 			}
3141 		}
3142 
3143 		if (ioc->mfi_sense_len) {
3144 			/* get user-space sense ptr then copy out sense */
3145 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3146 			    &sense_ptr.sense_ptr_data[0],
3147 			    sizeof(sense_ptr.sense_ptr_data));
3148 #ifdef COMPAT_FREEBSD32
3149 			if (cmd != MFI_CMD) {
3150 				/*
3151 				 * not 64bit native so zero out any address
3152 				 * over 32bit */
3153 				sense_ptr.addr.high = 0;
3154 			}
3155 #endif
3156 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3157 			    ioc->mfi_sense_len);
3158 			if (error != 0) {
3159 				device_printf(sc->mfi_dev,
3160 				    "Copy out failed\n");
3161 				goto out;
3162 			}
3163 		}
3164 
3165 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3166 out:
3167 		mfi_config_unlock(sc, locked);
3168 		if (data)
3169 			free(data, M_MFIBUF);
3170 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3171 			for (i = 0; i < 2; i++) {
3172 				if (sc->kbuff_arr[i]) {
3173 					if (sc->mfi_kbuff_arr_busaddr != 0)
3174 						bus_dmamap_unload(
3175 						    sc->mfi_kbuff_arr_dmat[i],
3176 						    sc->mfi_kbuff_arr_dmamap[i]
3177 						    );
3178 					if (sc->kbuff_arr[i] != NULL)
3179 						bus_dmamem_free(
3180 						    sc->mfi_kbuff_arr_dmat[i],
3181 						    sc->kbuff_arr[i],
3182 						    sc->mfi_kbuff_arr_dmamap[i]
3183 						    );
3184 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3185 						bus_dma_tag_destroy(
3186 						    sc->mfi_kbuff_arr_dmat[i]);
3187 				}
3188 			}
3189 		}
3190 		if (cm) {
3191 			mtx_lock(&sc->mfi_io_lock);
3192 			mfi_release_command(cm);
3193 			mtx_unlock(&sc->mfi_io_lock);
3194 		}
3195 
3196 		break;
3197 		}
3198 	case MFI_SET_AEN:
3199 		aen = (struct mfi_ioc_aen *)arg;
3200 		error = mfi_aen_register(sc, aen->aen_seq_num,
3201 		    aen->aen_class_locale);
3202 
3203 		break;
3204 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3205 		{
3206 			devclass_t devclass;
3207 			struct mfi_linux_ioc_packet l_ioc;
3208 			int adapter;
3209 
3210 			devclass = devclass_find("mfi");
3211 			if (devclass == NULL)
3212 				return (ENOENT);
3213 
3214 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3215 			if (error)
3216 				return (error);
3217 			adapter = l_ioc.lioc_adapter_no;
3218 			sc = devclass_get_softc(devclass, adapter);
3219 			if (sc == NULL)
3220 				return (ENOENT);
3221 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3222 			    cmd, arg, flag, td));
3223 			break;
3224 		}
3225 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3226 		{
3227 			devclass_t devclass;
3228 			struct mfi_linux_ioc_aen l_aen;
3229 			int adapter;
3230 
3231 			devclass = devclass_find("mfi");
3232 			if (devclass == NULL)
3233 				return (ENOENT);
3234 
3235 			error = copyin(arg, &l_aen, sizeof(l_aen));
3236 			if (error)
3237 				return (error);
3238 			adapter = l_aen.laen_adapter_no;
3239 			sc = devclass_get_softc(devclass, adapter);
3240 			if (sc == NULL)
3241 				return (ENOENT);
3242 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3243 			    cmd, arg, flag, td));
3244 			break;
3245 		}
3246 #ifdef COMPAT_FREEBSD32
3247 	case MFIIO_PASSTHRU32:
3248 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3249 			error = ENOTTY;
3250 			break;
3251 		}
3252 		iop_swab.ioc_frame	= iop32->ioc_frame;
3253 		iop_swab.buf_size	= iop32->buf_size;
3254 		iop_swab.buf		= PTRIN(iop32->buf);
3255 		iop			= &iop_swab;
3256 		/* FALLTHROUGH */
3257 #endif
3258 	case MFIIO_PASSTHRU:
3259 		error = mfi_user_command(sc, iop);
3260 #ifdef COMPAT_FREEBSD32
3261 		if (cmd == MFIIO_PASSTHRU32)
3262 			iop32->ioc_frame = iop_swab.ioc_frame;
3263 #endif
3264 		break;
3265 	default:
3266 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3267 		error = ENOTTY;
3268 		break;
3269 	}
3270 
3271 	return (error);
3272 }
3273 
3274 static int
3275 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3276 {
3277 	struct mfi_softc *sc;
3278 	struct mfi_linux_ioc_packet l_ioc;
3279 	struct mfi_linux_ioc_aen l_aen;
3280 	struct mfi_command *cm = NULL;
3281 	struct mfi_aen *mfi_aen_entry;
3282 	union mfi_sense_ptr sense_ptr;
3283 	uint32_t context = 0;
3284 	uint8_t *data = NULL, *temp;
3285 	int i;
3286 	int error, locked;
3287 
3288 	sc = dev->si_drv1;
3289 	error = 0;
3290 	switch (cmd) {
3291 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3292 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3293 		if (error != 0)
3294 			return (error);
3295 
3296 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3297 			return (EINVAL);
3298 		}
3299 
3300 		mtx_lock(&sc->mfi_io_lock);
3301 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3302 			mtx_unlock(&sc->mfi_io_lock);
3303 			return (EBUSY);
3304 		}
3305 		mtx_unlock(&sc->mfi_io_lock);
3306 		locked = 0;
3307 
3308 		/*
3309 		 * save off original context since copying from user
3310 		 * will clobber some data
3311 		 */
3312 		context = cm->cm_frame->header.context;
3313 
3314 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3315 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3316 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3317 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3318 		cm->cm_frame->header.scsi_status = 0;
3319 		cm->cm_frame->header.pad0 = 0;
3320 		if (l_ioc.lioc_sge_count)
3321 			cm->cm_sg =
3322 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3323 		cm->cm_flags = 0;
3324 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3325 			cm->cm_flags |= MFI_CMD_DATAIN;
3326 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3327 			cm->cm_flags |= MFI_CMD_DATAOUT;
3328 		cm->cm_len = cm->cm_frame->header.data_len;
3329 		if (cm->cm_len &&
3330 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3331 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3332 			    M_WAITOK | M_ZERO);
3333 			if (cm->cm_data == NULL) {
3334 				device_printf(sc->mfi_dev, "Malloc failed\n");
3335 				goto out;
3336 			}
3337 		} else {
3338 			cm->cm_data = 0;
3339 		}
3340 
3341 		/* restore header context */
3342 		cm->cm_frame->header.context = context;
3343 
3344 		temp = data;
3345 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3346 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3347 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3348 				       temp,
3349 				       l_ioc.lioc_sgl[i].iov_len);
3350 				if (error != 0) {
3351 					device_printf(sc->mfi_dev,
3352 					    "Copy in failed\n");
3353 					goto out;
3354 				}
3355 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3356 			}
3357 		}
3358 
3359 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3360 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3361 
3362 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3363 			cm->cm_frame->pass.sense_addr_lo =
3364 			    (uint32_t)cm->cm_sense_busaddr;
3365 			cm->cm_frame->pass.sense_addr_hi =
3366 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3367 		}
3368 
3369 		mtx_lock(&sc->mfi_io_lock);
3370 		error = mfi_check_command_pre(sc, cm);
3371 		if (error) {
3372 			mtx_unlock(&sc->mfi_io_lock);
3373 			goto out;
3374 		}
3375 
3376 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3377 			device_printf(sc->mfi_dev,
3378 			    "Controller polled failed\n");
3379 			mtx_unlock(&sc->mfi_io_lock);
3380 			goto out;
3381 		}
3382 
3383 		mfi_check_command_post(sc, cm);
3384 		mtx_unlock(&sc->mfi_io_lock);
3385 
3386 		temp = data;
3387 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3388 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3389 				error = copyout(temp,
3390 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3391 					l_ioc.lioc_sgl[i].iov_len);
3392 				if (error != 0) {
3393 					device_printf(sc->mfi_dev,
3394 					    "Copy out failed\n");
3395 					goto out;
3396 				}
3397 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3398 			}
3399 		}
3400 
3401 		if (l_ioc.lioc_sense_len) {
3402 			/* get user-space sense ptr then copy out sense */
3403 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3404                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3405 			    &sense_ptr.sense_ptr_data[0],
3406 			    sizeof(sense_ptr.sense_ptr_data));
3407 #ifdef __amd64__
3408 			/*
3409 			 * only 32bit Linux support so zero out any
3410 			 * address over 32bit
3411 			 */
3412 			sense_ptr.addr.high = 0;
3413 #endif
3414 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3415 			    l_ioc.lioc_sense_len);
3416 			if (error != 0) {
3417 				device_printf(sc->mfi_dev,
3418 				    "Copy out failed\n");
3419 				goto out;
3420 			}
3421 		}
3422 
3423 		error = copyout(&cm->cm_frame->header.cmd_status,
3424 			&((struct mfi_linux_ioc_packet*)arg)
3425 			->lioc_frame.hdr.cmd_status,
3426 			1);
3427 		if (error != 0) {
3428 			device_printf(sc->mfi_dev,
3429 				      "Copy out failed\n");
3430 			goto out;
3431 		}
3432 
3433 out:
3434 		mfi_config_unlock(sc, locked);
3435 		if (data)
3436 			free(data, M_MFIBUF);
3437 		if (cm) {
3438 			mtx_lock(&sc->mfi_io_lock);
3439 			mfi_release_command(cm);
3440 			mtx_unlock(&sc->mfi_io_lock);
3441 		}
3442 
3443 		return (error);
3444 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3445 		error = copyin(arg, &l_aen, sizeof(l_aen));
3446 		if (error != 0)
3447 			return (error);
3448 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3449 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3450 		    M_WAITOK);
3451 		mtx_lock(&sc->mfi_io_lock);
3452 		if (mfi_aen_entry != NULL) {
3453 			mfi_aen_entry->p = curproc;
3454 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3455 			    aen_link);
3456 		}
3457 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3458 		    l_aen.laen_class_locale);
3459 
3460 		if (error != 0) {
3461 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3462 			    aen_link);
3463 			free(mfi_aen_entry, M_MFIBUF);
3464 		}
3465 		mtx_unlock(&sc->mfi_io_lock);
3466 
3467 		return (error);
3468 	default:
3469 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3470 		error = ENOENT;
3471 		break;
3472 	}
3473 
3474 	return (error);
3475 }
3476 
3477 static int
3478 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3479 {
3480 	struct mfi_softc *sc;
3481 	int revents = 0;
3482 
3483 	sc = dev->si_drv1;
3484 
3485 	if (poll_events & (POLLIN | POLLRDNORM)) {
3486 		if (sc->mfi_aen_triggered != 0) {
3487 			revents |= poll_events & (POLLIN | POLLRDNORM);
3488 			sc->mfi_aen_triggered = 0;
3489 		}
3490 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3491 			revents |= POLLERR;
3492 		}
3493 	}
3494 
3495 	if (revents == 0) {
3496 		if (poll_events & (POLLIN | POLLRDNORM)) {
3497 			sc->mfi_poll_waiting = 1;
3498 			selrecord(td, &sc->mfi_select);
3499 		}
3500 	}
3501 
3502 	return revents;
3503 }
3504 
3505 static void
3506 mfi_dump_all(void)
3507 {
3508 	struct mfi_softc *sc;
3509 	struct mfi_command *cm;
3510 	devclass_t dc;
3511 	time_t deadline;
3512 	int timedout;
3513 	int i;
3514 
3515 	dc = devclass_find("mfi");
3516 	if (dc == NULL) {
3517 		printf("No mfi dev class\n");
3518 		return;
3519 	}
3520 
3521 	for (i = 0; ; i++) {
3522 		sc = devclass_get_softc(dc, i);
3523 		if (sc == NULL)
3524 			break;
3525 		device_printf(sc->mfi_dev, "Dumping\n\n");
3526 		timedout = 0;
3527 		deadline = time_uptime - MFI_CMD_TIMEOUT;
3528 		mtx_lock(&sc->mfi_io_lock);
3529 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3530 			if (cm->cm_timestamp < deadline) {
3531 				device_printf(sc->mfi_dev,
3532 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3533 				    cm, (int)(time_uptime - cm->cm_timestamp));
3534 				MFI_PRINT_CMD(cm);
3535 				timedout++;
3536 			}
3537 		}
3538 
3539 #if 0
3540 		if (timedout)
3541 			MFI_DUMP_CMDS(SC);
3542 #endif
3543 
3544 		mtx_unlock(&sc->mfi_io_lock);
3545 	}
3546 
3547 	return;
3548 }
3549 
3550 static void
3551 mfi_timeout(void *data)
3552 {
3553 	struct mfi_softc *sc = (struct mfi_softc *)data;
3554 	struct mfi_command *cm;
3555 	time_t deadline;
3556 	int timedout = 0;
3557 
3558 	deadline = time_uptime - MFI_CMD_TIMEOUT;
3559 	if (sc->adpreset == 0) {
3560 		if (!mfi_tbolt_reset(sc)) {
3561 			callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
3562 			return;
3563 		}
3564 	}
3565 	mtx_lock(&sc->mfi_io_lock);
3566 	TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3567 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3568 			continue;
3569 		if (cm->cm_timestamp < deadline) {
3570 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3571 				cm->cm_timestamp = time_uptime;
3572 			} else {
3573 				device_printf(sc->mfi_dev,
3574 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3575 				     cm, (int)(time_uptime - cm->cm_timestamp)
3576 				     );
3577 				MFI_PRINT_CMD(cm);
3578 				MFI_VALIDATE_CMD(sc, cm);
3579 				timedout++;
3580 			}
3581 		}
3582 	}
3583 
3584 #if 0
3585 	if (timedout)
3586 		MFI_DUMP_CMDS(SC);
3587 #endif
3588 
3589 	mtx_unlock(&sc->mfi_io_lock);
3590 
3591 	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
3592 	    mfi_timeout, sc);
3593 
3594 	if (0)
3595 		mfi_dump_all();
3596 	return;
3597 }
3598