xref: /freebsd/sys/dev/mfi/mfi.c (revision 4310d6deb27da04d3fe079a0584edd557a764e21)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2007 LSI Corp.
28  * Copyright (c) 2007 Rajesh Prabhakaran.
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52 
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
55 
56 #include "opt_compat.h"
57 #include "opt_mfi.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
64 #include <sys/poll.h>
65 #include <sys/selinfo.h>
66 #include <sys/bus.h>
67 #include <sys/conf.h>
68 #include <sys/eventhandler.h>
69 #include <sys/rman.h>
70 #include <sys/bus_dma.h>
71 #include <sys/bio.h>
72 #include <sys/ioccom.h>
73 #include <sys/uio.h>
74 #include <sys/proc.h>
75 #include <sys/signalvar.h>
76 #include <sys/taskqueue.h>
77 
78 #include <machine/bus.h>
79 #include <machine/resource.h>
80 
81 #include <dev/mfi/mfireg.h>
82 #include <dev/mfi/mfi_ioctl.h>
83 #include <dev/mfi/mfivar.h>
84 #include <sys/interrupt.h>
85 #include <sys/priority.h>
86 
87 static int	mfi_alloc_commands(struct mfi_softc *);
88 static int	mfi_comms_init(struct mfi_softc *);
89 static int	mfi_get_controller_info(struct mfi_softc *);
90 static int	mfi_get_log_state(struct mfi_softc *,
91 		    struct mfi_evt_log_state **);
92 static int	mfi_parse_entries(struct mfi_softc *, int, int);
93 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
94 static void	mfi_startup(void *arg);
95 static void	mfi_intr(void *arg);
96 static void	mfi_ldprobe(struct mfi_softc *sc);
97 static void	mfi_syspdprobe(struct mfi_softc *sc);
98 static void	mfi_handle_evt(void *context, int pending);
99 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
100 static void	mfi_aen_complete(struct mfi_command *);
101 static int	mfi_add_ld(struct mfi_softc *sc, int);
102 static void	mfi_add_ld_complete(struct mfi_command *);
103 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
104 static void	mfi_add_sys_pd_complete(struct mfi_command *);
105 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
106 static void	mfi_bio_complete(struct mfi_command *);
107 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
108 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
109 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
110 static int	mfi_abort(struct mfi_softc *, struct mfi_command *);
111 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
112 static void	mfi_timeout(void *);
113 static int	mfi_user_command(struct mfi_softc *,
114 		    struct mfi_ioc_passthru *);
115 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
116 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
117 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
118 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
119 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
120 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
121 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
122 		    uint32_t frame_cnt);
123 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
124 		    uint32_t frame_cnt);
125 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
126 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
127 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
128 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
129 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
130 
131 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
132 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
133 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
134 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
135             0, "event message locale");
136 
137 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
138 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
139 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
140           0, "event message class");
141 
142 static int	mfi_max_cmds = 128;
143 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
144 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
145 	   0, "Max commands");
146 
147 static int	mfi_detect_jbod_change = 1;
148 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
149 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
150 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
151 
152 /* Management interface */
153 static d_open_t		mfi_open;
154 static d_close_t	mfi_close;
155 static d_ioctl_t	mfi_ioctl;
156 static d_poll_t		mfi_poll;
157 
158 static struct cdevsw mfi_cdevsw = {
159 	.d_version = 	D_VERSION,
160 	.d_flags =	0,
161 	.d_open = 	mfi_open,
162 	.d_close =	mfi_close,
163 	.d_ioctl =	mfi_ioctl,
164 	.d_poll =	mfi_poll,
165 	.d_name =	"mfi",
166 };
167 
168 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
169 
170 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
171 struct mfi_skinny_dma_info mfi_skinny;
172 
173 static void
174 mfi_enable_intr_xscale(struct mfi_softc *sc)
175 {
176 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
177 }
178 
179 static void
180 mfi_enable_intr_ppc(struct mfi_softc *sc)
181 {
182 	if (sc->mfi_flags & MFI_FLAGS_1078) {
183 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
184 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
185 	}
186 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
187 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
188 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
189 	}
190 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
191 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
192 	}
193 }
194 
195 static int32_t
196 mfi_read_fw_status_xscale(struct mfi_softc *sc)
197 {
198 	return MFI_READ4(sc, MFI_OMSG0);
199 }
200 
201 static int32_t
202 mfi_read_fw_status_ppc(struct mfi_softc *sc)
203 {
204 	return MFI_READ4(sc, MFI_OSP0);
205 }
206 
207 static int
208 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
209 {
210 	int32_t status;
211 
212 	status = MFI_READ4(sc, MFI_OSTS);
213 	if ((status & MFI_OSTS_INTR_VALID) == 0)
214 		return 1;
215 
216 	MFI_WRITE4(sc, MFI_OSTS, status);
217 	return 0;
218 }
219 
220 static int
221 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
222 {
223 	int32_t status;
224 
225 	status = MFI_READ4(sc, MFI_OSTS);
226 	if (sc->mfi_flags & MFI_FLAGS_1078) {
227 		if (!(status & MFI_1078_RM)) {
228 			return 1;
229 		}
230 	}
231 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
232 		if (!(status & MFI_GEN2_RM)) {
233 			return 1;
234 		}
235 	}
236 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
237 		if (!(status & MFI_SKINNY_RM)) {
238 			return 1;
239 		}
240 	}
241 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
242 		MFI_WRITE4(sc, MFI_OSTS, status);
243 	else
244 		MFI_WRITE4(sc, MFI_ODCR0, status);
245 	return 0;
246 }
247 
248 static void
249 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
250 {
251 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
252 }
253 
254 static void
255 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
256 {
257 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
258 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
259 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
260 	} else {
261 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
262 	}
263 }
264 
265 int
266 mfi_transition_firmware(struct mfi_softc *sc)
267 {
268 	uint32_t fw_state, cur_state;
269 	int max_wait, i;
270 	uint32_t cur_abs_reg_val = 0;
271 	uint32_t prev_abs_reg_val = 0;
272 
273 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
274 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
275 	while (fw_state != MFI_FWSTATE_READY) {
276 		if (bootverbose)
277 			device_printf(sc->mfi_dev, "Waiting for firmware to "
278 			"become ready\n");
279 		cur_state = fw_state;
280 		switch (fw_state) {
281 		case MFI_FWSTATE_FAULT:
282 			device_printf(sc->mfi_dev, "Firmware fault\n");
283 			return (ENXIO);
284 		case MFI_FWSTATE_WAIT_HANDSHAKE:
285 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
286 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
287 			else
288 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
289 			max_wait = MFI_RESET_WAIT_TIME;
290 			break;
291 		case MFI_FWSTATE_OPERATIONAL:
292 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
294 			else
295 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
296 			max_wait = MFI_RESET_WAIT_TIME;
297 			break;
298 		case MFI_FWSTATE_UNDEFINED:
299 		case MFI_FWSTATE_BB_INIT:
300 			max_wait = MFI_RESET_WAIT_TIME;
301 			break;
302 		case MFI_FWSTATE_FW_INIT_2:
303 			max_wait = MFI_RESET_WAIT_TIME;
304 			break;
305 		case MFI_FWSTATE_FW_INIT:
306 		case MFI_FWSTATE_FLUSH_CACHE:
307 			max_wait = MFI_RESET_WAIT_TIME;
308 			break;
309 		case MFI_FWSTATE_DEVICE_SCAN:
310 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
311 			prev_abs_reg_val = cur_abs_reg_val;
312 			break;
313 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
314 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
315 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
316 			else
317 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
318 			max_wait = MFI_RESET_WAIT_TIME;
319 			break;
320 		default:
321 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
322 			    fw_state);
323 			return (ENXIO);
324 		}
325 		for (i = 0; i < (max_wait * 10); i++) {
326 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
327 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
328 			if (fw_state == cur_state)
329 				DELAY(100000);
330 			else
331 				break;
332 		}
333 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
334 			/* Check the device scanning progress */
335 			if (prev_abs_reg_val != cur_abs_reg_val) {
336 				continue;
337 			}
338 		}
339 		if (fw_state == cur_state) {
340 			device_printf(sc->mfi_dev, "Firmware stuck in state "
341 			    "%#x\n", fw_state);
342 			return (ENXIO);
343 		}
344 	}
345 	return (0);
346 }
347 
348 static void
349 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
350 {
351 	bus_addr_t *addr;
352 
353 	addr = arg;
354 	*addr = segs[0].ds_addr;
355 }
356 
357 
358 int
359 mfi_attach(struct mfi_softc *sc)
360 {
361 	uint32_t status;
362 	int error, commsz, framessz, sensesz;
363 	int frames, unit, max_fw_sge;
364 	uint32_t tb_mem_size = 0;
365 
366 	if (sc == NULL)
367 		return EINVAL;
368 
369 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
370 	    MEGASAS_VERSION);
371 
372 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
373 	sx_init(&sc->mfi_config_lock, "MFI config");
374 	TAILQ_INIT(&sc->mfi_ld_tqh);
375 	TAILQ_INIT(&sc->mfi_syspd_tqh);
376 	TAILQ_INIT(&sc->mfi_evt_queue);
377 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
378 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
379 	TAILQ_INIT(&sc->mfi_aen_pids);
380 	TAILQ_INIT(&sc->mfi_cam_ccbq);
381 
382 	mfi_initq_free(sc);
383 	mfi_initq_ready(sc);
384 	mfi_initq_busy(sc);
385 	mfi_initq_bio(sc);
386 
387 	sc->adpreset = 0;
388 	sc->last_seq_num = 0;
389 	sc->disableOnlineCtrlReset = 1;
390 	sc->issuepend_done = 1;
391 	sc->hw_crit_error = 0;
392 
393 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
394 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
395 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
396 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
397 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
398 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
399 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
400 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
401 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
402 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
403 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
404 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
405 		sc->mfi_tbolt = 1;
406 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
407 	} else {
408 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
409 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
410 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
411 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
412 	}
413 
414 
415 	/* Before we get too far, see if the firmware is working */
416 	if ((error = mfi_transition_firmware(sc)) != 0) {
417 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
418 		    "error %d\n", error);
419 		return (ENXIO);
420 	}
421 
422 	/* Start: LSIP200113393 */
423 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
424 				1, 0,			/* algnmnt, boundary */
425 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
426 				BUS_SPACE_MAXADDR,	/* highaddr */
427 				NULL, NULL,		/* filter, filterarg */
428 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
429 				1,			/* msegments */
430 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
431 				0,			/* flags */
432 				NULL, NULL,		/* lockfunc, lockarg */
433 				&sc->verbuf_h_dmat)) {
434 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
435 		return (ENOMEM);
436 	}
437 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
438 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
439 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
440 		return (ENOMEM);
441 	}
442 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
443 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
444 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
445 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
446 	/* End: LSIP200113393 */
447 
448 	/*
449 	 * Get information needed for sizing the contiguous memory for the
450 	 * frame pool.  Size down the sgl parameter since we know that
451 	 * we will never need more than what's required for MAXPHYS.
452 	 * It would be nice if these constants were available at runtime
453 	 * instead of compile time.
454 	 */
455 	status = sc->mfi_read_fw_status(sc);
456 	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
457 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
458 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
459 
460 	/* ThunderBolt Support get the contiguous memory */
461 
462 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
463 		mfi_tbolt_init_globals(sc);
464 		device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
465 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
466 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
467 
468 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
469 				1, 0,			/* algnmnt, boundary */
470 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
471 				BUS_SPACE_MAXADDR,	/* highaddr */
472 				NULL, NULL,		/* filter, filterarg */
473 				tb_mem_size,		/* maxsize */
474 				1,			/* msegments */
475 				tb_mem_size,		/* maxsegsize */
476 				0,			/* flags */
477 				NULL, NULL,		/* lockfunc, lockarg */
478 				&sc->mfi_tb_dmat)) {
479 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
480 			return (ENOMEM);
481 		}
482 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
483 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
484 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
485 			return (ENOMEM);
486 		}
487 		bzero(sc->request_message_pool, tb_mem_size);
488 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
489 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
490 
491 		/* For ThunderBolt memory init */
492 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
493 				0x100, 0,		/* alignmnt, boundary */
494 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
495 				BUS_SPACE_MAXADDR,	/* highaddr */
496 				NULL, NULL,		/* filter, filterarg */
497 				MFI_FRAME_SIZE,		/* maxsize */
498 				1,			/* msegments */
499 				MFI_FRAME_SIZE,		/* maxsegsize */
500 				0,			/* flags */
501 				NULL, NULL,		/* lockfunc, lockarg */
502 				&sc->mfi_tb_init_dmat)) {
503 		device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
504 		return (ENOMEM);
505 		}
506 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
507 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
508 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
509 			return (ENOMEM);
510 		}
511 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
512 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
513 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
514 		    &sc->mfi_tb_init_busaddr, 0);
515 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
516 		    tb_mem_size)) {
517 			device_printf(sc->mfi_dev,
518 			    "Thunderbolt pool preparation error\n");
519 			return 0;
520 		}
521 
522 		/*
523 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
524 		  we are taking it diffrent from what we have allocated for Request
525 		  and reply descriptors to avoid confusion later
526 		*/
527 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
528 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
529 				1, 0,			/* algnmnt, boundary */
530 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
531 				BUS_SPACE_MAXADDR,	/* highaddr */
532 				NULL, NULL,		/* filter, filterarg */
533 				tb_mem_size,		/* maxsize */
534 				1,			/* msegments */
535 				tb_mem_size,		/* maxsegsize */
536 				0,			/* flags */
537 				NULL, NULL,		/* lockfunc, lockarg */
538 				&sc->mfi_tb_ioc_init_dmat)) {
539 			device_printf(sc->mfi_dev,
540 			    "Cannot allocate comms DMA tag\n");
541 			return (ENOMEM);
542 		}
543 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
544 		    (void **)&sc->mfi_tb_ioc_init_desc,
545 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
546 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
547 			return (ENOMEM);
548 		}
549 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
550 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
551 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
552 		    &sc->mfi_tb_ioc_init_busaddr, 0);
553 	}
554 	/*
555 	 * Create the dma tag for data buffers.  Used both for block I/O
556 	 * and for various internal data queries.
557 	 */
558 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
559 				1, 0,			/* algnmnt, boundary */
560 				BUS_SPACE_MAXADDR,	/* lowaddr */
561 				BUS_SPACE_MAXADDR,	/* highaddr */
562 				NULL, NULL,		/* filter, filterarg */
563 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
564 				sc->mfi_max_sge,	/* nsegments */
565 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
566 				BUS_DMA_ALLOCNOW,	/* flags */
567 				busdma_lock_mutex,	/* lockfunc */
568 				&sc->mfi_io_lock,	/* lockfuncarg */
569 				&sc->mfi_buffer_dmat)) {
570 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
571 		return (ENOMEM);
572 	}
573 
574 	/*
575 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
576 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
577 	 * entry, so the calculated size here will be will be 1 more than
578 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
579 	 */
580 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
581 	    sizeof(struct mfi_hwcomms);
582 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
583 				1, 0,			/* algnmnt, boundary */
584 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
585 				BUS_SPACE_MAXADDR,	/* highaddr */
586 				NULL, NULL,		/* filter, filterarg */
587 				commsz,			/* maxsize */
588 				1,			/* msegments */
589 				commsz,			/* maxsegsize */
590 				0,			/* flags */
591 				NULL, NULL,		/* lockfunc, lockarg */
592 				&sc->mfi_comms_dmat)) {
593 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
594 		return (ENOMEM);
595 	}
596 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
597 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
598 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
599 		return (ENOMEM);
600 	}
601 	bzero(sc->mfi_comms, commsz);
602 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
603 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
604 	/*
605 	 * Allocate DMA memory for the command frames.  Keep them in the
606 	 * lower 4GB for efficiency.  Calculate the size of the commands at
607 	 * the same time; each command is one 64 byte frame plus a set of
608          * additional frames for holding sg lists or other data.
609 	 * The assumption here is that the SG list will start at the second
610 	 * frame and not use the unused bytes in the first frame.  While this
611 	 * isn't technically correct, it simplifies the calculation and allows
612 	 * for command frames that might be larger than an mfi_io_frame.
613 	 */
614 	if (sizeof(bus_addr_t) == 8) {
615 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
616 		sc->mfi_flags |= MFI_FLAGS_SG64;
617 	} else {
618 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
619 	}
620 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
621 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
622 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
623 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
624 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
625 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
626 				64, 0,			/* algnmnt, boundary */
627 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
628 				BUS_SPACE_MAXADDR,	/* highaddr */
629 				NULL, NULL,		/* filter, filterarg */
630 				framessz,		/* maxsize */
631 				1,			/* nsegments */
632 				framessz,		/* maxsegsize */
633 				0,			/* flags */
634 				NULL, NULL,		/* lockfunc, lockarg */
635 				&sc->mfi_frames_dmat)) {
636 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
637 		return (ENOMEM);
638 	}
639 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
640 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
641 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
642 		return (ENOMEM);
643 	}
644 	bzero(sc->mfi_frames, framessz);
645 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
646 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
647 	/*
648 	 * Allocate DMA memory for the frame sense data.  Keep them in the
649 	 * lower 4GB for efficiency
650 	 */
651 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
652 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
653 				4, 0,			/* algnmnt, boundary */
654 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
655 				BUS_SPACE_MAXADDR,	/* highaddr */
656 				NULL, NULL,		/* filter, filterarg */
657 				sensesz,		/* maxsize */
658 				1,			/* nsegments */
659 				sensesz,		/* maxsegsize */
660 				0,			/* flags */
661 				NULL, NULL,		/* lockfunc, lockarg */
662 				&sc->mfi_sense_dmat)) {
663 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
664 		return (ENOMEM);
665 	}
666 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
667 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
668 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
669 		return (ENOMEM);
670 	}
671 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
672 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
673 	if ((error = mfi_alloc_commands(sc)) != 0)
674 		return (error);
675 
676 	/* Before moving the FW to operational state, check whether
677 	 * hostmemory is required by the FW or not
678 	 */
679 
680 	/* ThunderBolt MFI_IOC2 INIT */
681 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
682 		sc->mfi_disable_intr(sc);
683 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
684 			device_printf(sc->mfi_dev,
685 			    "TB Init has failed with error %d\n",error);
686 			return error;
687 		}
688 
689 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
690 			return error;
691 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
692 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
693 		    &sc->mfi_intr)) {
694 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
695 			return (EINVAL);
696 		}
697 		sc->mfi_enable_intr(sc);
698 	} else {
699 		if ((error = mfi_comms_init(sc)) != 0)
700 			return (error);
701 
702 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
703 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
704 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
705 			return (EINVAL);
706 		}
707 		sc->mfi_enable_intr(sc);
708 	}
709 	if ((error = mfi_get_controller_info(sc)) != 0)
710 		return (error);
711 	sc->disableOnlineCtrlReset = 0;
712 
713 	/* Register a config hook to probe the bus for arrays */
714 	sc->mfi_ich.ich_func = mfi_startup;
715 	sc->mfi_ich.ich_arg = sc;
716 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
717 		device_printf(sc->mfi_dev, "Cannot establish configuration "
718 		    "hook\n");
719 		return (EINVAL);
720 	}
721 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
722 		mtx_unlock(&sc->mfi_io_lock);
723 		return (error);
724 	}
725 
726 	/*
727 	 * Register a shutdown handler.
728 	 */
729 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
730 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
731 		device_printf(sc->mfi_dev, "Warning: shutdown event "
732 		    "registration failed\n");
733 	}
734 
735 	/*
736 	 * Create the control device for doing management
737 	 */
738 	unit = device_get_unit(sc->mfi_dev);
739 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
740 	    0640, "mfi%d", unit);
741 	if (unit == 0)
742 		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
743 	if (sc->mfi_cdev != NULL)
744 		sc->mfi_cdev->si_drv1 = sc;
745 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
746 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
747 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
748 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
749 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
750 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
751 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
752 	    &sc->mfi_keep_deleted_volumes, 0,
753 	    "Don't detach the mfid device for a busy volume that is deleted");
754 
755 	device_add_child(sc->mfi_dev, "mfip", -1);
756 	bus_generic_attach(sc->mfi_dev);
757 
758 	/* Start the timeout watchdog */
759 	callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
760 	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
761 	    mfi_timeout, sc);
762 
763 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
764 		mfi_tbolt_sync_map_info(sc);
765 	}
766 
767 	return (0);
768 }
769 
770 static int
771 mfi_alloc_commands(struct mfi_softc *sc)
772 {
773 	struct mfi_command *cm;
774 	int i, ncmds;
775 
776 	/*
777 	 * XXX Should we allocate all the commands up front, or allocate on
778 	 * demand later like 'aac' does?
779 	 */
780 	ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
781 	if (bootverbose)
782 		device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
783 		   "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
784 
785 	sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
786 	    M_WAITOK | M_ZERO);
787 
788 	for (i = 0; i < ncmds; i++) {
789 		cm = &sc->mfi_commands[i];
790 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
791 		    sc->mfi_cmd_size * i);
792 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
793 		    sc->mfi_cmd_size * i;
794 		cm->cm_frame->header.context = i;
795 		cm->cm_sense = &sc->mfi_sense[i];
796 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
797 		cm->cm_sc = sc;
798 		cm->cm_index = i;
799 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
800 		    &cm->cm_dmamap) == 0) {
801 			mtx_lock(&sc->mfi_io_lock);
802 			mfi_release_command(cm);
803 			mtx_unlock(&sc->mfi_io_lock);
804 		}
805 		else
806 			break;
807 		sc->mfi_total_cmds++;
808 	}
809 
810 	return (0);
811 }
812 
813 void
814 mfi_release_command(struct mfi_command *cm)
815 {
816 	struct mfi_frame_header *hdr;
817 	uint32_t *hdr_data;
818 
819 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
820 
821 	/*
822 	 * Zero out the important fields of the frame, but make sure the
823 	 * context field is preserved.  For efficiency, handle the fields
824 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
825 	 */
826 	hdr = &cm->cm_frame->header;
827 	if (cm->cm_data != NULL && hdr->sg_count) {
828 		cm->cm_sg->sg32[0].len = 0;
829 		cm->cm_sg->sg32[0].addr = 0;
830 	}
831 
832 	hdr_data = (uint32_t *)cm->cm_frame;
833 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
834 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
835 	hdr_data[4] = 0;	/* flags, timeout */
836 	hdr_data[5] = 0;	/* data_len */
837 
838 	cm->cm_extra_frames = 0;
839 	cm->cm_flags = 0;
840 	cm->cm_complete = NULL;
841 	cm->cm_private = NULL;
842 	cm->cm_data = NULL;
843 	cm->cm_sg = 0;
844 	cm->cm_total_frame_size = 0;
845 	cm->retry_for_fw_reset = 0;
846 
847 	mfi_enqueue_free(cm);
848 }
849 
850 int
851 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
852     uint32_t opcode, void **bufp, size_t bufsize)
853 {
854 	struct mfi_command *cm;
855 	struct mfi_dcmd_frame *dcmd;
856 	void *buf = NULL;
857 	uint32_t context = 0;
858 
859 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
860 
861 	cm = mfi_dequeue_free(sc);
862 	if (cm == NULL)
863 		return (EBUSY);
864 
865 	/* Zero out the MFI frame */
866 	context = cm->cm_frame->header.context;
867 	bzero(cm->cm_frame, sizeof(union mfi_frame));
868 	cm->cm_frame->header.context = context;
869 
870 	if ((bufsize > 0) && (bufp != NULL)) {
871 		if (*bufp == NULL) {
872 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
873 			if (buf == NULL) {
874 				mfi_release_command(cm);
875 				return (ENOMEM);
876 			}
877 			*bufp = buf;
878 		} else {
879 			buf = *bufp;
880 		}
881 	}
882 
883 	dcmd =  &cm->cm_frame->dcmd;
884 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
885 	dcmd->header.cmd = MFI_CMD_DCMD;
886 	dcmd->header.timeout = 0;
887 	dcmd->header.flags = 0;
888 	dcmd->header.data_len = bufsize;
889 	dcmd->header.scsi_status = 0;
890 	dcmd->opcode = opcode;
891 	cm->cm_sg = &dcmd->sgl;
892 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
893 	cm->cm_flags = 0;
894 	cm->cm_data = buf;
895 	cm->cm_private = buf;
896 	cm->cm_len = bufsize;
897 
898 	*cmp = cm;
899 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
900 		*bufp = buf;
901 	return (0);
902 }
903 
904 static int
905 mfi_comms_init(struct mfi_softc *sc)
906 {
907 	struct mfi_command *cm;
908 	struct mfi_init_frame *init;
909 	struct mfi_init_qinfo *qinfo;
910 	int error;
911 	uint32_t context = 0;
912 
913 	mtx_lock(&sc->mfi_io_lock);
914 	if ((cm = mfi_dequeue_free(sc)) == NULL)
915 		return (EBUSY);
916 
917 	/* Zero out the MFI frame */
918 	context = cm->cm_frame->header.context;
919 	bzero(cm->cm_frame, sizeof(union mfi_frame));
920 	cm->cm_frame->header.context = context;
921 
922 	/*
923 	 * Abuse the SG list area of the frame to hold the init_qinfo
924 	 * object;
925 	 */
926 	init = &cm->cm_frame->init;
927 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
928 
929 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
930 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
931 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
932 	    offsetof(struct mfi_hwcomms, hw_reply_q);
933 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
934 	    offsetof(struct mfi_hwcomms, hw_pi);
935 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
936 	    offsetof(struct mfi_hwcomms, hw_ci);
937 
938 	init->header.cmd = MFI_CMD_INIT;
939 	init->header.data_len = sizeof(struct mfi_init_qinfo);
940 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
941 	cm->cm_data = NULL;
942 	cm->cm_flags = MFI_CMD_POLLED;
943 
944 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
945 		device_printf(sc->mfi_dev, "failed to send init command\n");
946 		mtx_unlock(&sc->mfi_io_lock);
947 		return (error);
948 	}
949 	mfi_release_command(cm);
950 	mtx_unlock(&sc->mfi_io_lock);
951 
952 	return (0);
953 }
954 
955 static int
956 mfi_get_controller_info(struct mfi_softc *sc)
957 {
958 	struct mfi_command *cm = NULL;
959 	struct mfi_ctrl_info *ci = NULL;
960 	uint32_t max_sectors_1, max_sectors_2;
961 	int error;
962 
963 	mtx_lock(&sc->mfi_io_lock);
964 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
965 	    (void **)&ci, sizeof(*ci));
966 	if (error)
967 		goto out;
968 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
969 
970 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
971 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
972 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
973 		    MFI_SECTOR_LEN;
974 		error = 0;
975 		goto out;
976 	}
977 
978 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
979 	    BUS_DMASYNC_POSTREAD);
980 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
981 
982 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
983 	max_sectors_2 = ci->max_request_size;
984 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
985 	sc->disableOnlineCtrlReset =
986 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
987 
988 out:
989 	if (ci)
990 		free(ci, M_MFIBUF);
991 	if (cm)
992 		mfi_release_command(cm);
993 	mtx_unlock(&sc->mfi_io_lock);
994 	return (error);
995 }
996 
997 static int
998 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
999 {
1000 	struct mfi_command *cm = NULL;
1001 	int error;
1002 
1003 	mtx_lock(&sc->mfi_io_lock);
1004 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1005 	    (void **)log_state, sizeof(**log_state));
1006 	if (error)
1007 		goto out;
1008 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1009 
1010 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1011 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1012 		goto out;
1013 	}
1014 
1015 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1016 	    BUS_DMASYNC_POSTREAD);
1017 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1018 
1019 out:
1020 	if (cm)
1021 		mfi_release_command(cm);
1022 	mtx_unlock(&sc->mfi_io_lock);
1023 
1024 	return (error);
1025 }
1026 
1027 int
1028 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1029 {
1030 	struct mfi_evt_log_state *log_state = NULL;
1031 	union mfi_evt class_locale;
1032 	int error = 0;
1033 	uint32_t seq;
1034 
1035 	class_locale.members.reserved = 0;
1036 	class_locale.members.locale = mfi_event_locale;
1037 	class_locale.members.evt_class  = mfi_event_class;
1038 
1039 	if (seq_start == 0) {
1040 		error = mfi_get_log_state(sc, &log_state);
1041 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1042 		if (error) {
1043 			if (log_state)
1044 				free(log_state, M_MFIBUF);
1045 			return (error);
1046 		}
1047 
1048 		/*
1049 		 * Walk through any events that fired since the last
1050 		 * shutdown.
1051 		 */
1052 		mfi_parse_entries(sc, log_state->shutdown_seq_num,
1053 		    log_state->newest_seq_num);
1054 		seq = log_state->newest_seq_num;
1055 	} else
1056 		seq = seq_start;
1057 	mfi_aen_register(sc, seq, class_locale.word);
1058 	free(log_state, M_MFIBUF);
1059 
1060 	return 0;
1061 }
1062 
1063 int
1064 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1065 {
1066 
1067 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1068 	cm->cm_complete = NULL;
1069 
1070 
1071 	/*
1072 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1073 	 * and return 0 to it as status
1074 	 */
1075 	if (cm->cm_frame->dcmd.opcode == 0) {
1076 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1077 		cm->cm_error = 0;
1078 		return (cm->cm_error);
1079 	}
1080 	mfi_enqueue_ready(cm);
1081 	mfi_startio(sc);
1082 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1083 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1084 	return (cm->cm_error);
1085 }
1086 
1087 void
1088 mfi_free(struct mfi_softc *sc)
1089 {
1090 	struct mfi_command *cm;
1091 	int i;
1092 
1093 	callout_drain(&sc->mfi_watchdog_callout);
1094 
1095 	if (sc->mfi_cdev != NULL)
1096 		destroy_dev(sc->mfi_cdev);
1097 
1098 	if (sc->mfi_total_cmds != 0) {
1099 		for (i = 0; i < sc->mfi_total_cmds; i++) {
1100 			cm = &sc->mfi_commands[i];
1101 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1102 		}
1103 		free(sc->mfi_commands, M_MFIBUF);
1104 	}
1105 
1106 	if (sc->mfi_intr)
1107 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1108 	if (sc->mfi_irq != NULL)
1109 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1110 		    sc->mfi_irq);
1111 
1112 	if (sc->mfi_sense_busaddr != 0)
1113 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1114 	if (sc->mfi_sense != NULL)
1115 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1116 		    sc->mfi_sense_dmamap);
1117 	if (sc->mfi_sense_dmat != NULL)
1118 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1119 
1120 	if (sc->mfi_frames_busaddr != 0)
1121 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1122 	if (sc->mfi_frames != NULL)
1123 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1124 		    sc->mfi_frames_dmamap);
1125 	if (sc->mfi_frames_dmat != NULL)
1126 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1127 
1128 	if (sc->mfi_comms_busaddr != 0)
1129 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1130 	if (sc->mfi_comms != NULL)
1131 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1132 		    sc->mfi_comms_dmamap);
1133 	if (sc->mfi_comms_dmat != NULL)
1134 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1135 
1136 	/* ThunderBolt contiguous memory free here */
1137 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1138 		if (sc->mfi_tb_busaddr != 0)
1139 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1140 		if (sc->request_message_pool != NULL)
1141 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1142 			    sc->mfi_tb_dmamap);
1143 		if (sc->mfi_tb_dmat != NULL)
1144 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1145 
1146 		/* Version buffer memory free */
1147 		/* Start LSIP200113393 */
1148 		if (sc->verbuf_h_busaddr != 0)
1149 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1150 		if (sc->verbuf != NULL)
1151 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1152 			    sc->verbuf_h_dmamap);
1153 		if (sc->verbuf_h_dmat != NULL)
1154 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1155 
1156 		/* End LSIP200113393 */
1157 		/* ThunderBolt INIT packet memory Free */
1158 		if (sc->mfi_tb_init_busaddr != 0)
1159 			bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1160 		if (sc->mfi_tb_init != NULL)
1161 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1162 			    sc->mfi_tb_init_dmamap);
1163 		if (sc->mfi_tb_init_dmat != NULL)
1164 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1165 
1166 		/* ThunderBolt IOC Init Desc memory free here */
1167 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1168 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1169 			    sc->mfi_tb_ioc_init_dmamap);
1170 		if (sc->mfi_tb_ioc_init_desc != NULL)
1171 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1172 			    sc->mfi_tb_ioc_init_desc,
1173 			    sc->mfi_tb_ioc_init_dmamap);
1174 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1175 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1176 		for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1177 			if (sc->mfi_cmd_pool_tbolt != NULL) {
1178 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1179 					free(sc->mfi_cmd_pool_tbolt[i],
1180 					    M_MFIBUF);
1181 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1182 				}
1183 			}
1184 		}
1185 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1186 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1187 			sc->mfi_cmd_pool_tbolt = NULL;
1188 		}
1189 		if (sc->request_desc_pool != NULL) {
1190 			free(sc->request_desc_pool, M_MFIBUF);
1191 			sc->request_desc_pool = NULL;
1192 		}
1193 	}
1194 	if (sc->mfi_buffer_dmat != NULL)
1195 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1196 	if (sc->mfi_parent_dmat != NULL)
1197 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1198 
1199 	if (mtx_initialized(&sc->mfi_io_lock)) {
1200 		mtx_destroy(&sc->mfi_io_lock);
1201 		sx_destroy(&sc->mfi_config_lock);
1202 	}
1203 
1204 	return;
1205 }
1206 
1207 static void
1208 mfi_startup(void *arg)
1209 {
1210 	struct mfi_softc *sc;
1211 
1212 	sc = (struct mfi_softc *)arg;
1213 
1214 	config_intrhook_disestablish(&sc->mfi_ich);
1215 
1216 	sc->mfi_enable_intr(sc);
1217 	sx_xlock(&sc->mfi_config_lock);
1218 	mtx_lock(&sc->mfi_io_lock);
1219 	mfi_ldprobe(sc);
1220 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1221 	    mfi_syspdprobe(sc);
1222 	mtx_unlock(&sc->mfi_io_lock);
1223 	sx_xunlock(&sc->mfi_config_lock);
1224 }
1225 
1226 static void
1227 mfi_intr(void *arg)
1228 {
1229 	struct mfi_softc *sc;
1230 	struct mfi_command *cm;
1231 	uint32_t pi, ci, context;
1232 
1233 	sc = (struct mfi_softc *)arg;
1234 
1235 	if (sc->mfi_check_clear_intr(sc))
1236 		return;
1237 
1238 restart:
1239 	pi = sc->mfi_comms->hw_pi;
1240 	ci = sc->mfi_comms->hw_ci;
1241 	mtx_lock(&sc->mfi_io_lock);
1242 	while (ci != pi) {
1243 		context = sc->mfi_comms->hw_reply_q[ci];
1244 		if (context < sc->mfi_max_fw_cmds) {
1245 			cm = &sc->mfi_commands[context];
1246 			mfi_remove_busy(cm);
1247 			cm->cm_error = 0;
1248 			mfi_complete(sc, cm);
1249 		}
1250 		if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1251 			ci = 0;
1252 		}
1253 	}
1254 
1255 	sc->mfi_comms->hw_ci = ci;
1256 
1257 	/* Give defered I/O a chance to run */
1258 	if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1259 		sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1260 	mfi_startio(sc);
1261 	mtx_unlock(&sc->mfi_io_lock);
1262 
1263 	/*
1264 	 * Dummy read to flush the bus; this ensures that the indexes are up
1265 	 * to date.  Restart processing if more commands have come it.
1266 	 */
1267 	(void)sc->mfi_read_fw_status(sc);
1268 	if (pi != sc->mfi_comms->hw_pi)
1269 		goto restart;
1270 
1271 	return;
1272 }
1273 
1274 int
1275 mfi_shutdown(struct mfi_softc *sc)
1276 {
1277 	struct mfi_dcmd_frame *dcmd;
1278 	struct mfi_command *cm;
1279 	int error;
1280 
1281 	mtx_lock(&sc->mfi_io_lock);
1282 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1283 	if (error) {
1284 		mtx_unlock(&sc->mfi_io_lock);
1285 		return (error);
1286 	}
1287 
1288 	if (sc->mfi_aen_cm != NULL)
1289 		mfi_abort(sc, sc->mfi_aen_cm);
1290 
1291 	if (sc->mfi_map_sync_cm != NULL)
1292 		mfi_abort(sc, sc->mfi_map_sync_cm);
1293 
1294 	dcmd = &cm->cm_frame->dcmd;
1295 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1296 	cm->cm_flags = MFI_CMD_POLLED;
1297 	cm->cm_data = NULL;
1298 
1299 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1300 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1301 	}
1302 
1303 	mfi_release_command(cm);
1304 	mtx_unlock(&sc->mfi_io_lock);
1305 	return (error);
1306 }
1307 
1308 static void
1309 mfi_syspdprobe(struct mfi_softc *sc)
1310 {
1311 	struct mfi_frame_header *hdr;
1312 	struct mfi_command *cm = NULL;
1313 	struct mfi_pd_list *pdlist = NULL;
1314 	struct mfi_system_pd *syspd, *tmp;
1315 	int error, i, found;
1316 
1317 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1318 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1319 	/* Add SYSTEM PD's */
1320 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1321 	    (void **)&pdlist, sizeof(*pdlist));
1322 	if (error) {
1323 		device_printf(sc->mfi_dev,
1324 		    "Error while forming SYSTEM PD list\n");
1325 		goto out;
1326 	}
1327 
1328 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1329 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1330 	cm->cm_frame->dcmd.mbox[1] = 0;
1331 	if (mfi_mapcmd(sc, cm) != 0) {
1332 		device_printf(sc->mfi_dev,
1333 		    "Failed to get syspd device listing\n");
1334 		goto out;
1335 	}
1336 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1337 	    BUS_DMASYNC_POSTREAD);
1338 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1339 	hdr = &cm->cm_frame->header;
1340 	if (hdr->cmd_status != MFI_STAT_OK) {
1341 		device_printf(sc->mfi_dev,
1342 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1343 		goto out;
1344 	}
1345 	/* Get each PD and add it to the system */
1346 	for (i = 0; i < pdlist->count; i++) {
1347 		if (pdlist->addr[i].device_id ==
1348 		    pdlist->addr[i].encl_device_id)
1349 			continue;
1350 		found = 0;
1351 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1352 			if (syspd->pd_id == pdlist->addr[i].device_id)
1353 				found = 1;
1354 		}
1355 		if (found == 0)
1356 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1357 	}
1358 	/* Delete SYSPD's whose state has been changed */
1359 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1360 		found = 0;
1361 		for (i = 0; i < pdlist->count; i++) {
1362 			if (syspd->pd_id == pdlist->addr[i].device_id)
1363 				found = 1;
1364 		}
1365 		if (found == 0) {
1366 			printf("DELETE\n");
1367 			mtx_unlock(&sc->mfi_io_lock);
1368 			mtx_lock(&Giant);
1369 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1370 			mtx_unlock(&Giant);
1371 			mtx_lock(&sc->mfi_io_lock);
1372 		}
1373 	}
1374 out:
1375 	if (pdlist)
1376 	    free(pdlist, M_MFIBUF);
1377 	if (cm)
1378 	    mfi_release_command(cm);
1379 
1380 	return;
1381 }
1382 
1383 static void
1384 mfi_ldprobe(struct mfi_softc *sc)
1385 {
1386 	struct mfi_frame_header *hdr;
1387 	struct mfi_command *cm = NULL;
1388 	struct mfi_ld_list *list = NULL;
1389 	struct mfi_disk *ld;
1390 	int error, i;
1391 
1392 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1393 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1394 
1395 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1396 	    (void **)&list, sizeof(*list));
1397 	if (error)
1398 		goto out;
1399 
1400 	cm->cm_flags = MFI_CMD_DATAIN;
1401 	if (mfi_wait_command(sc, cm) != 0) {
1402 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1403 		goto out;
1404 	}
1405 
1406 	hdr = &cm->cm_frame->header;
1407 	if (hdr->cmd_status != MFI_STAT_OK) {
1408 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1409 		    hdr->cmd_status);
1410 		goto out;
1411 	}
1412 
1413 	for (i = 0; i < list->ld_count; i++) {
1414 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1415 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1416 				goto skip_add;
1417 		}
1418 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1419 	skip_add:;
1420 	}
1421 out:
1422 	if (list)
1423 		free(list, M_MFIBUF);
1424 	if (cm)
1425 		mfi_release_command(cm);
1426 
1427 	return;
1428 }
1429 
1430 /*
1431  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1432  * the bits in 24-31 are all set, then it is the number of seconds since
1433  * boot.
1434  */
1435 static const char *
1436 format_timestamp(uint32_t timestamp)
1437 {
1438 	static char buffer[32];
1439 
1440 	if ((timestamp & 0xff000000) == 0xff000000)
1441 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1442 		    0x00ffffff);
1443 	else
1444 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1445 	return (buffer);
1446 }
1447 
1448 static const char *
1449 format_class(int8_t class)
1450 {
1451 	static char buffer[6];
1452 
1453 	switch (class) {
1454 	case MFI_EVT_CLASS_DEBUG:
1455 		return ("debug");
1456 	case MFI_EVT_CLASS_PROGRESS:
1457 		return ("progress");
1458 	case MFI_EVT_CLASS_INFO:
1459 		return ("info");
1460 	case MFI_EVT_CLASS_WARNING:
1461 		return ("WARN");
1462 	case MFI_EVT_CLASS_CRITICAL:
1463 		return ("CRIT");
1464 	case MFI_EVT_CLASS_FATAL:
1465 		return ("FATAL");
1466 	case MFI_EVT_CLASS_DEAD:
1467 		return ("DEAD");
1468 	default:
1469 		snprintf(buffer, sizeof(buffer), "%d", class);
1470 		return (buffer);
1471 	}
1472 }
1473 
1474 static void
1475 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1476 {
1477 	struct mfi_system_pd *syspd = NULL;
1478 
1479 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1480 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1481 	    format_class(detail->evt_class.members.evt_class),
1482 	    detail->description);
1483 
1484         /* Don't act on old AEN's or while shutting down */
1485         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1486                 return;
1487 
1488 	switch (detail->arg_type) {
1489 	case MR_EVT_ARGS_NONE:
1490 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1491 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1492 			if (mfi_detect_jbod_change) {
1493 				/*
1494 				 * Probe for new SYSPD's and Delete
1495 				 * invalid SYSPD's
1496 				 */
1497 				sx_xlock(&sc->mfi_config_lock);
1498 				mtx_lock(&sc->mfi_io_lock);
1499 				mfi_syspdprobe(sc);
1500 				mtx_unlock(&sc->mfi_io_lock);
1501 				sx_xunlock(&sc->mfi_config_lock);
1502 			}
1503 		}
1504 		break;
1505 	case MR_EVT_ARGS_LD_STATE:
1506 		/* During load time driver reads all the events starting
1507 		 * from the one that has been logged after shutdown. Avoid
1508 		 * these old events.
1509 		 */
1510 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1511 			/* Remove the LD */
1512 			struct mfi_disk *ld;
1513 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1514 				if (ld->ld_id ==
1515 				    detail->args.ld_state.ld.target_id)
1516 					break;
1517 			}
1518 			/*
1519 			Fix: for kernel panics when SSCD is removed
1520 			KASSERT(ld != NULL, ("volume dissappeared"));
1521 			*/
1522 			if (ld != NULL) {
1523 				mtx_lock(&Giant);
1524 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1525 				mtx_unlock(&Giant);
1526 			}
1527 		}
1528 		break;
1529 	case MR_EVT_ARGS_PD:
1530 		if (detail->code == MR_EVT_PD_REMOVED) {
1531 			if (mfi_detect_jbod_change) {
1532 				/*
1533 				 * If the removed device is a SYSPD then
1534 				 * delete it
1535 				 */
1536 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1537 				    pd_link) {
1538 					if (syspd->pd_id ==
1539 					    detail->args.pd.device_id) {
1540 						mtx_lock(&Giant);
1541 						device_delete_child(
1542 						    sc->mfi_dev,
1543 						    syspd->pd_dev);
1544 						mtx_unlock(&Giant);
1545 						break;
1546 					}
1547 				}
1548 			}
1549 		}
1550 		if (detail->code == MR_EVT_PD_INSERTED) {
1551 			if (mfi_detect_jbod_change) {
1552 				/* Probe for new SYSPD's */
1553 				sx_xlock(&sc->mfi_config_lock);
1554 				mtx_lock(&sc->mfi_io_lock);
1555 				mfi_syspdprobe(sc);
1556 				mtx_unlock(&sc->mfi_io_lock);
1557 				sx_xunlock(&sc->mfi_config_lock);
1558 			}
1559 		}
1560 		break;
1561 	}
1562 }
1563 
1564 static void
1565 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1566 {
1567 	struct mfi_evt_queue_elm *elm;
1568 
1569 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1570 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1571 	if (elm == NULL)
1572 		return;
1573 	memcpy(&elm->detail, detail, sizeof(*detail));
1574 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1575 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1576 }
1577 
1578 static void
1579 mfi_handle_evt(void *context, int pending)
1580 {
1581 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1582 	struct mfi_softc *sc;
1583 	struct mfi_evt_queue_elm *elm;
1584 
1585 	sc = context;
1586 	TAILQ_INIT(&queue);
1587 	mtx_lock(&sc->mfi_io_lock);
1588 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1589 	mtx_unlock(&sc->mfi_io_lock);
1590 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1591 		TAILQ_REMOVE(&queue, elm, link);
1592 		mfi_decode_evt(sc, &elm->detail);
1593 		free(elm, M_MFIBUF);
1594 	}
1595 }
1596 
1597 static int
1598 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1599 {
1600 	struct mfi_command *cm;
1601 	struct mfi_dcmd_frame *dcmd;
1602 	union mfi_evt current_aen, prior_aen;
1603 	struct mfi_evt_detail *ed = NULL;
1604 	int error = 0;
1605 
1606 	current_aen.word = locale;
1607 	if (sc->mfi_aen_cm != NULL) {
1608 		prior_aen.word =
1609 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1610 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1611 		    !((prior_aen.members.locale & current_aen.members.locale)
1612 		    ^current_aen.members.locale)) {
1613 			return (0);
1614 		} else {
1615 			prior_aen.members.locale |= current_aen.members.locale;
1616 			if (prior_aen.members.evt_class
1617 			    < current_aen.members.evt_class)
1618 				current_aen.members.evt_class =
1619 				    prior_aen.members.evt_class;
1620 			mtx_lock(&sc->mfi_io_lock);
1621 			mfi_abort(sc, sc->mfi_aen_cm);
1622 			mtx_unlock(&sc->mfi_io_lock);
1623 		}
1624 	}
1625 
1626 	mtx_lock(&sc->mfi_io_lock);
1627 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1628 	    (void **)&ed, sizeof(*ed));
1629 	mtx_unlock(&sc->mfi_io_lock);
1630 	if (error) {
1631 		goto out;
1632 	}
1633 
1634 	dcmd = &cm->cm_frame->dcmd;
1635 	((uint32_t *)&dcmd->mbox)[0] = seq;
1636 	((uint32_t *)&dcmd->mbox)[1] = locale;
1637 	cm->cm_flags = MFI_CMD_DATAIN;
1638 	cm->cm_complete = mfi_aen_complete;
1639 
1640 	sc->last_seq_num = seq;
1641 	sc->mfi_aen_cm = cm;
1642 
1643 	mtx_lock(&sc->mfi_io_lock);
1644 	mfi_enqueue_ready(cm);
1645 	mfi_startio(sc);
1646 	mtx_unlock(&sc->mfi_io_lock);
1647 
1648 out:
1649 	return (error);
1650 }
1651 
1652 static void
1653 mfi_aen_complete(struct mfi_command *cm)
1654 {
1655 	struct mfi_frame_header *hdr;
1656 	struct mfi_softc *sc;
1657 	struct mfi_evt_detail *detail;
1658 	struct mfi_aen *mfi_aen_entry, *tmp;
1659 	int seq = 0, aborted = 0;
1660 
1661 	sc = cm->cm_sc;
1662 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1663 
1664 	hdr = &cm->cm_frame->header;
1665 
1666 	if (sc->mfi_aen_cm == NULL)
1667 		return;
1668 
1669 	if (sc->cm_aen_abort ||
1670 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1671 		sc->cm_aen_abort = 0;
1672 		aborted = 1;
1673 	} else {
1674 		sc->mfi_aen_triggered = 1;
1675 		if (sc->mfi_poll_waiting) {
1676 			sc->mfi_poll_waiting = 0;
1677 			selwakeup(&sc->mfi_select);
1678 		}
1679 		detail = cm->cm_data;
1680 		mfi_queue_evt(sc, detail);
1681 		seq = detail->seq + 1;
1682 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1683 		    tmp) {
1684 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1685 			    aen_link);
1686 			PROC_LOCK(mfi_aen_entry->p);
1687 			kern_psignal(mfi_aen_entry->p, SIGIO);
1688 			PROC_UNLOCK(mfi_aen_entry->p);
1689 			free(mfi_aen_entry, M_MFIBUF);
1690 		}
1691 	}
1692 
1693 	free(cm->cm_data, M_MFIBUF);
1694 	sc->mfi_aen_cm = NULL;
1695 	wakeup(&sc->mfi_aen_cm);
1696 	mfi_release_command(cm);
1697 
1698 	/* set it up again so the driver can catch more events */
1699 	if (!aborted) {
1700 		mtx_unlock(&sc->mfi_io_lock);
1701 		mfi_aen_setup(sc, seq);
1702 		mtx_lock(&sc->mfi_io_lock);
1703 	}
1704 }
1705 
1706 #define MAX_EVENTS 15
1707 
1708 static int
1709 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1710 {
1711 	struct mfi_command *cm;
1712 	struct mfi_dcmd_frame *dcmd;
1713 	struct mfi_evt_list *el;
1714 	union mfi_evt class_locale;
1715 	int error, i, seq, size;
1716 
1717 	class_locale.members.reserved = 0;
1718 	class_locale.members.locale = mfi_event_locale;
1719 	class_locale.members.evt_class  = mfi_event_class;
1720 
1721 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1722 		* (MAX_EVENTS - 1);
1723 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1724 	if (el == NULL)
1725 		return (ENOMEM);
1726 
1727 	for (seq = start_seq;;) {
1728 		mtx_lock(&sc->mfi_io_lock);
1729 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1730 			free(el, M_MFIBUF);
1731 			mtx_unlock(&sc->mfi_io_lock);
1732 			return (EBUSY);
1733 		}
1734 		mtx_unlock(&sc->mfi_io_lock);
1735 
1736 		dcmd = &cm->cm_frame->dcmd;
1737 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1738 		dcmd->header.cmd = MFI_CMD_DCMD;
1739 		dcmd->header.timeout = 0;
1740 		dcmd->header.data_len = size;
1741 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1742 		((uint32_t *)&dcmd->mbox)[0] = seq;
1743 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1744 		cm->cm_sg = &dcmd->sgl;
1745 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1746 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1747 		cm->cm_data = el;
1748 		cm->cm_len = size;
1749 
1750 		mtx_lock(&sc->mfi_io_lock);
1751 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1752 			device_printf(sc->mfi_dev,
1753 			    "Failed to get controller entries\n");
1754 			mfi_release_command(cm);
1755 			mtx_unlock(&sc->mfi_io_lock);
1756 			break;
1757 		}
1758 
1759 		mtx_unlock(&sc->mfi_io_lock);
1760 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1761 		    BUS_DMASYNC_POSTREAD);
1762 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1763 
1764 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1765 			mtx_lock(&sc->mfi_io_lock);
1766 			mfi_release_command(cm);
1767 			mtx_unlock(&sc->mfi_io_lock);
1768 			break;
1769 		}
1770 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1771 			device_printf(sc->mfi_dev,
1772 			    "Error %d fetching controller entries\n",
1773 			    dcmd->header.cmd_status);
1774 			mtx_lock(&sc->mfi_io_lock);
1775 			mfi_release_command(cm);
1776 			mtx_unlock(&sc->mfi_io_lock);
1777 			break;
1778 		}
1779 		mtx_lock(&sc->mfi_io_lock);
1780 		mfi_release_command(cm);
1781 		mtx_unlock(&sc->mfi_io_lock);
1782 
1783 		for (i = 0; i < el->count; i++) {
1784 			/*
1785 			 * If this event is newer than 'stop_seq' then
1786 			 * break out of the loop.  Note that the log
1787 			 * is a circular buffer so we have to handle
1788 			 * the case that our stop point is earlier in
1789 			 * the buffer than our start point.
1790 			 */
1791 			if (el->event[i].seq >= stop_seq) {
1792 				if (start_seq <= stop_seq)
1793 					break;
1794 				else if (el->event[i].seq < start_seq)
1795 					break;
1796 			}
1797 			mtx_lock(&sc->mfi_io_lock);
1798 			mfi_queue_evt(sc, &el->event[i]);
1799 			mtx_unlock(&sc->mfi_io_lock);
1800 		}
1801 		seq = el->event[el->count - 1].seq + 1;
1802 	}
1803 
1804 	free(el, M_MFIBUF);
1805 	return (0);
1806 }
1807 
1808 static int
1809 mfi_add_ld(struct mfi_softc *sc, int id)
1810 {
1811 	struct mfi_command *cm;
1812 	struct mfi_dcmd_frame *dcmd = NULL;
1813 	struct mfi_ld_info *ld_info = NULL;
1814 	int error;
1815 
1816 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1817 
1818 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1819 	    (void **)&ld_info, sizeof(*ld_info));
1820 	if (error) {
1821 		device_printf(sc->mfi_dev,
1822 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1823 		if (ld_info)
1824 			free(ld_info, M_MFIBUF);
1825 		return (error);
1826 	}
1827 	cm->cm_flags = MFI_CMD_DATAIN;
1828 	dcmd = &cm->cm_frame->dcmd;
1829 	dcmd->mbox[0] = id;
1830 	if (mfi_wait_command(sc, cm) != 0) {
1831 		device_printf(sc->mfi_dev,
1832 		    "Failed to get logical drive: %d\n", id);
1833 		free(ld_info, M_MFIBUF);
1834 		return (0);
1835 	}
1836 	if (ld_info->ld_config.params.isSSCD != 1)
1837 		mfi_add_ld_complete(cm);
1838 	else {
1839 		mfi_release_command(cm);
1840 		if (ld_info)		/* SSCD drives ld_info free here */
1841 			free(ld_info, M_MFIBUF);
1842 	}
1843 	return (0);
1844 }
1845 
1846 static void
1847 mfi_add_ld_complete(struct mfi_command *cm)
1848 {
1849 	struct mfi_frame_header *hdr;
1850 	struct mfi_ld_info *ld_info;
1851 	struct mfi_softc *sc;
1852 	device_t child;
1853 
1854 	sc = cm->cm_sc;
1855 	hdr = &cm->cm_frame->header;
1856 	ld_info = cm->cm_private;
1857 
1858 	if (hdr->cmd_status != MFI_STAT_OK) {
1859 		free(ld_info, M_MFIBUF);
1860 		mfi_release_command(cm);
1861 		return;
1862 	}
1863 	mfi_release_command(cm);
1864 
1865 	mtx_unlock(&sc->mfi_io_lock);
1866 	mtx_lock(&Giant);
1867 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1868 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1869 		free(ld_info, M_MFIBUF);
1870 		mtx_unlock(&Giant);
1871 		mtx_lock(&sc->mfi_io_lock);
1872 		return;
1873 	}
1874 
1875 	device_set_ivars(child, ld_info);
1876 	device_set_desc(child, "MFI Logical Disk");
1877 	bus_generic_attach(sc->mfi_dev);
1878 	mtx_unlock(&Giant);
1879 	mtx_lock(&sc->mfi_io_lock);
1880 }
1881 
1882 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1883 {
1884 	struct mfi_command *cm;
1885 	struct mfi_dcmd_frame *dcmd = NULL;
1886 	struct mfi_pd_info *pd_info = NULL;
1887 	int error;
1888 
1889 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1890 
1891 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1892 		(void **)&pd_info, sizeof(*pd_info));
1893 	if (error) {
1894 		device_printf(sc->mfi_dev,
1895 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1896 		    error);
1897 		if (pd_info)
1898 			free(pd_info, M_MFIBUF);
1899 		return (error);
1900 	}
1901 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1902 	dcmd = &cm->cm_frame->dcmd;
1903 	dcmd->mbox[0]=id;
1904 	dcmd->header.scsi_status = 0;
1905 	dcmd->header.pad0 = 0;
1906 	if (mfi_mapcmd(sc, cm) != 0) {
1907 		device_printf(sc->mfi_dev,
1908 		    "Failed to get physical drive info %d\n", id);
1909 		free(pd_info, M_MFIBUF);
1910 		return (0);
1911 	}
1912 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1913 	    BUS_DMASYNC_POSTREAD);
1914 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1915 	mfi_add_sys_pd_complete(cm);
1916 	return (0);
1917 }
1918 
1919 static void
1920 mfi_add_sys_pd_complete(struct mfi_command *cm)
1921 {
1922 	struct mfi_frame_header *hdr;
1923 	struct mfi_pd_info *pd_info;
1924 	struct mfi_softc *sc;
1925 	device_t child;
1926 
1927 	sc = cm->cm_sc;
1928 	hdr = &cm->cm_frame->header;
1929 	pd_info = cm->cm_private;
1930 
1931 	if (hdr->cmd_status != MFI_STAT_OK) {
1932 		free(pd_info, M_MFIBUF);
1933 		mfi_release_command(cm);
1934 		return;
1935 	}
1936 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1937 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1938 		    pd_info->ref.v.device_id);
1939 		free(pd_info, M_MFIBUF);
1940 		mfi_release_command(cm);
1941 		return;
1942 	}
1943 	mfi_release_command(cm);
1944 
1945 	mtx_unlock(&sc->mfi_io_lock);
1946 	mtx_lock(&Giant);
1947 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1948 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
1949 		free(pd_info, M_MFIBUF);
1950 		mtx_unlock(&Giant);
1951 		mtx_lock(&sc->mfi_io_lock);
1952 		return;
1953 	}
1954 
1955 	device_set_ivars(child, pd_info);
1956 	device_set_desc(child, "MFI System PD");
1957 	bus_generic_attach(sc->mfi_dev);
1958 	mtx_unlock(&Giant);
1959 	mtx_lock(&sc->mfi_io_lock);
1960 }
1961 
1962 static struct mfi_command *
1963 mfi_bio_command(struct mfi_softc *sc)
1964 {
1965 	struct bio *bio;
1966 	struct mfi_command *cm = NULL;
1967 
1968 	/*reserving two commands to avoid starvation for IOCTL*/
1969 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
1970 		return (NULL);
1971 	}
1972 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1973 		return (NULL);
1974 	}
1975 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
1976 		cm = mfi_build_ldio(sc, bio);
1977 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
1978 		cm = mfi_build_syspdio(sc, bio);
1979 	}
1980 	if (!cm)
1981 	    mfi_enqueue_bio(sc, bio);
1982 	return cm;
1983 }
1984 static struct mfi_command *
1985 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1986 {
1987 	struct mfi_command *cm;
1988 	struct mfi_pass_frame *pass;
1989 	int flags = 0, blkcount = 0;
1990 	uint32_t context = 0;
1991 
1992 	if ((cm = mfi_dequeue_free(sc)) == NULL)
1993 	    return (NULL);
1994 
1995 	/* Zero out the MFI frame */
1996  	context = cm->cm_frame->header.context;
1997 	bzero(cm->cm_frame, sizeof(union mfi_frame));
1998 	cm->cm_frame->header.context = context;
1999 	pass = &cm->cm_frame->pass;
2000 	bzero(pass->cdb, 16);
2001 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2002 	switch (bio->bio_cmd & 0x03) {
2003 	case BIO_READ:
2004 #define SCSI_READ 0x28
2005 		pass->cdb[0] = SCSI_READ;
2006 		flags = MFI_CMD_DATAIN;
2007 		break;
2008 	case BIO_WRITE:
2009 #define SCSI_WRITE 0x2a
2010 		pass->cdb[0] = SCSI_WRITE;
2011 		flags = MFI_CMD_DATAOUT;
2012 		break;
2013 	default:
2014 		panic("Invalid bio command");
2015 	}
2016 
2017 	/* Cheat with the sector length to avoid a non-constant division */
2018 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2019 	/* Fill the LBA and Transfer length in CDB */
2020 	pass->cdb[2] = (bio->bio_pblkno & 0xff000000) >> 24;
2021 	pass->cdb[3] = (bio->bio_pblkno & 0x00ff0000) >> 16;
2022 	pass->cdb[4] = (bio->bio_pblkno & 0x0000ff00) >> 8;
2023 	pass->cdb[5] = bio->bio_pblkno & 0x000000ff;
2024 	pass->cdb[7] = (blkcount & 0xff00) >> 8;
2025 	pass->cdb[8] = (blkcount & 0x00ff);
2026 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2027 	pass->header.timeout = 0;
2028 	pass->header.flags = 0;
2029 	pass->header.scsi_status = 0;
2030 	pass->header.sense_len = MFI_SENSE_LEN;
2031 	pass->header.data_len = bio->bio_bcount;
2032 	pass->header.cdb_len = 10;
2033 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2034 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2035 	cm->cm_complete = mfi_bio_complete;
2036 	cm->cm_private = bio;
2037 	cm->cm_data = bio->bio_data;
2038 	cm->cm_len = bio->bio_bcount;
2039 	cm->cm_sg = &pass->sgl;
2040 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2041 	cm->cm_flags = flags;
2042 	return (cm);
2043 }
2044 
2045 static struct mfi_command *
2046 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2047 {
2048 	struct mfi_io_frame *io;
2049 	struct mfi_command *cm;
2050 	int flags, blkcount;
2051 	uint32_t context = 0;
2052 
2053 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2054 	    return (NULL);
2055 
2056 	/* Zero out the MFI frame */
2057 	context = cm->cm_frame->header.context;
2058 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2059 	cm->cm_frame->header.context = context;
2060 	io = &cm->cm_frame->io;
2061 	switch (bio->bio_cmd & 0x03) {
2062 	case BIO_READ:
2063 		io->header.cmd = MFI_CMD_LD_READ;
2064 		flags = MFI_CMD_DATAIN;
2065 		break;
2066 	case BIO_WRITE:
2067 		io->header.cmd = MFI_CMD_LD_WRITE;
2068 		flags = MFI_CMD_DATAOUT;
2069 		break;
2070 	default:
2071 		panic("Invalid bio command");
2072 	}
2073 
2074 	/* Cheat with the sector length to avoid a non-constant division */
2075 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2076 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2077 	io->header.timeout = 0;
2078 	io->header.flags = 0;
2079 	io->header.scsi_status = 0;
2080 	io->header.sense_len = MFI_SENSE_LEN;
2081 	io->header.data_len = blkcount;
2082 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2083 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2084 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2085 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2086 	cm->cm_complete = mfi_bio_complete;
2087 	cm->cm_private = bio;
2088 	cm->cm_data = bio->bio_data;
2089 	cm->cm_len = bio->bio_bcount;
2090 	cm->cm_sg = &io->sgl;
2091 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2092 	cm->cm_flags = flags;
2093 	return (cm);
2094 }
2095 
2096 static void
2097 mfi_bio_complete(struct mfi_command *cm)
2098 {
2099 	struct bio *bio;
2100 	struct mfi_frame_header *hdr;
2101 	struct mfi_softc *sc;
2102 
2103 	bio = cm->cm_private;
2104 	hdr = &cm->cm_frame->header;
2105 	sc = cm->cm_sc;
2106 
2107 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2108 		bio->bio_flags |= BIO_ERROR;
2109 		bio->bio_error = EIO;
2110 		device_printf(sc->mfi_dev, "I/O error, status= %d "
2111 		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2112 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2113 	} else if (cm->cm_error != 0) {
2114 		bio->bio_flags |= BIO_ERROR;
2115 	}
2116 
2117 	mfi_release_command(cm);
2118 	mfi_disk_complete(bio);
2119 }
2120 
2121 void
2122 mfi_startio(struct mfi_softc *sc)
2123 {
2124 	struct mfi_command *cm;
2125 	struct ccb_hdr *ccbh;
2126 
2127 	for (;;) {
2128 		/* Don't bother if we're short on resources */
2129 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2130 			break;
2131 
2132 		/* Try a command that has already been prepared */
2133 		cm = mfi_dequeue_ready(sc);
2134 
2135 		if (cm == NULL) {
2136 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2137 				cm = sc->mfi_cam_start(ccbh);
2138 		}
2139 
2140 		/* Nope, so look for work on the bioq */
2141 		if (cm == NULL)
2142 			cm = mfi_bio_command(sc);
2143 
2144 		/* No work available, so exit */
2145 		if (cm == NULL)
2146 			break;
2147 
2148 		/* Send the command to the controller */
2149 		if (mfi_mapcmd(sc, cm) != 0) {
2150 			mfi_requeue_ready(cm);
2151 			break;
2152 		}
2153 	}
2154 }
2155 
2156 int
2157 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2158 {
2159 	int error, polled;
2160 
2161 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2162 
2163 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2164 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2165 		error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2166 		    cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2167 		if (error == EINPROGRESS) {
2168 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2169 			return (0);
2170 		}
2171 	} else {
2172 		if (sc->MFA_enabled)
2173 			error = mfi_tbolt_send_frame(sc, cm);
2174 		else
2175 			error = mfi_send_frame(sc, cm);
2176 	}
2177 
2178 	return (error);
2179 }
2180 
2181 static void
2182 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2183 {
2184 	struct mfi_frame_header *hdr;
2185 	struct mfi_command *cm;
2186 	union mfi_sgl *sgl;
2187 	struct mfi_softc *sc;
2188 	int i, j, first, dir;
2189 	int sge_size;
2190 
2191 	cm = (struct mfi_command *)arg;
2192 	sc = cm->cm_sc;
2193 	hdr = &cm->cm_frame->header;
2194 	sgl = cm->cm_sg;
2195 
2196 	if (error) {
2197 		printf("error %d in callback\n", error);
2198 		cm->cm_error = error;
2199 		mfi_complete(sc, cm);
2200 		return;
2201 	}
2202 	/* Use IEEE sgl only for IO's on a SKINNY controller
2203 	 * For other commands on a SKINNY controller use either
2204 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2205 	 * Also calculate the total frame size based on the type
2206 	 * of SGL used.
2207 	 */
2208 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2209 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2210 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2211 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2212 		for (i = 0; i < nsegs; i++) {
2213 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2214 			sgl->sg_skinny[i].len = segs[i].ds_len;
2215 			sgl->sg_skinny[i].flag = 0;
2216 		}
2217 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2218 		sge_size = sizeof(struct mfi_sg_skinny);
2219 		hdr->sg_count = nsegs;
2220 	} else {
2221 		j = 0;
2222 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2223 			first = cm->cm_stp_len;
2224 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2225 				sgl->sg32[j].addr = segs[0].ds_addr;
2226 				sgl->sg32[j++].len = first;
2227 			} else {
2228 				sgl->sg64[j].addr = segs[0].ds_addr;
2229 				sgl->sg64[j++].len = first;
2230 			}
2231 		} else
2232 			first = 0;
2233 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2234 			for (i = 0; i < nsegs; i++) {
2235 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2236 				sgl->sg32[j++].len = segs[i].ds_len - first;
2237 				first = 0;
2238 			}
2239 		} else {
2240 			for (i = 0; i < nsegs; i++) {
2241 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2242 				sgl->sg64[j++].len = segs[i].ds_len - first;
2243 				first = 0;
2244 			}
2245 			hdr->flags |= MFI_FRAME_SGL64;
2246 		}
2247 		hdr->sg_count = j;
2248 		sge_size = sc->mfi_sge_size;
2249 	}
2250 
2251 	dir = 0;
2252 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2253 		dir |= BUS_DMASYNC_PREREAD;
2254 		hdr->flags |= MFI_FRAME_DIR_READ;
2255 	}
2256 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2257 		dir |= BUS_DMASYNC_PREWRITE;
2258 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2259 	}
2260 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2261 	cm->cm_flags |= MFI_CMD_MAPPED;
2262 
2263 	/*
2264 	 * Instead of calculating the total number of frames in the
2265 	 * compound frame, it's already assumed that there will be at
2266 	 * least 1 frame, so don't compensate for the modulo of the
2267 	 * following division.
2268 	 */
2269 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2270 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2271 
2272 	if (sc->MFA_enabled)
2273 			mfi_tbolt_send_frame(sc, cm);
2274 	else
2275 		mfi_send_frame(sc, cm);
2276 
2277 	return;
2278 }
2279 
2280 static int
2281 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2282 {
2283 	struct mfi_frame_header *hdr;
2284 	int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2285 
2286 	hdr = &cm->cm_frame->header;
2287 
2288 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2289 		cm->cm_timestamp = time_uptime;
2290 		mfi_enqueue_busy(cm);
2291 	} else {
2292 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2293 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2294 	}
2295 
2296 	/*
2297 	 * The bus address of the command is aligned on a 64 byte boundary,
2298 	 * leaving the least 6 bits as zero.  For whatever reason, the
2299 	 * hardware wants the address shifted right by three, leaving just
2300 	 * 3 zero bits.  These three bits are then used as a prefetching
2301 	 * hint for the hardware to predict how many frames need to be
2302 	 * fetched across the bus.  If a command has more than 8 frames
2303 	 * then the 3 bits are set to 0x7 and the firmware uses other
2304 	 * information in the command to determine the total amount to fetch.
2305 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2306 	 * is enough for both 32bit and 64bit systems.
2307 	 */
2308 	if (cm->cm_extra_frames > 7)
2309 		cm->cm_extra_frames = 7;
2310 
2311 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2312 
2313 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2314 		return (0);
2315 
2316 	/* This is a polled command, so busy-wait for it to complete. */
2317 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2318 		DELAY(1000);
2319 		tm -= 1;
2320 		if (tm <= 0)
2321 			break;
2322 	}
2323 
2324 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2325 		device_printf(sc->mfi_dev, "Frame %p timed out "
2326 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2327 		return (ETIMEDOUT);
2328 	}
2329 
2330 	return (0);
2331 }
2332 
2333 
2334 void
2335 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2336 {
2337 	int dir;
2338 
2339 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2340 		dir = 0;
2341 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2342 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2343 			dir |= BUS_DMASYNC_POSTREAD;
2344 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2345 			dir |= BUS_DMASYNC_POSTWRITE;
2346 
2347 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2348 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2349 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2350 	}
2351 
2352 	cm->cm_flags |= MFI_CMD_COMPLETED;
2353 
2354 	if (cm->cm_complete != NULL)
2355 		cm->cm_complete(cm);
2356 	else
2357 		wakeup(cm);
2358 }
2359 
2360 static int
2361 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2362 {
2363 	struct mfi_command *cm;
2364 	struct mfi_abort_frame *abort;
2365 	int i = 0;
2366 	uint32_t context = 0;
2367 
2368 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2369 
2370 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2371 		return (EBUSY);
2372 	}
2373 
2374 	/* Zero out the MFI frame */
2375 	context = cm->cm_frame->header.context;
2376 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2377 	cm->cm_frame->header.context = context;
2378 
2379 	abort = &cm->cm_frame->abort;
2380 	abort->header.cmd = MFI_CMD_ABORT;
2381 	abort->header.flags = 0;
2382 	abort->header.scsi_status = 0;
2383 	abort->abort_context = cm_abort->cm_frame->header.context;
2384 	abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr;
2385 	abort->abort_mfi_addr_hi =
2386 	    (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32);
2387 	cm->cm_data = NULL;
2388 	cm->cm_flags = MFI_CMD_POLLED;
2389 
2390 	if (sc->mfi_aen_cm)
2391 		sc->cm_aen_abort = 1;
2392 	if (sc->mfi_map_sync_cm)
2393 		sc->cm_map_abort = 1;
2394 	mfi_mapcmd(sc, cm);
2395 	mfi_release_command(cm);
2396 
2397 	while (i < 5 && sc->mfi_aen_cm != NULL) {
2398 		msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort",
2399 		    5 * hz);
2400 		i++;
2401 	}
2402 	while (i < 5 && sc->mfi_map_sync_cm != NULL) {
2403 		msleep(&sc->mfi_map_sync_cm, &sc->mfi_io_lock, 0, "mfiabort",
2404 		    5 * hz);
2405 		i++;
2406 	}
2407 
2408 	return (0);
2409 }
2410 
2411 int
2412 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2413      int len)
2414 {
2415 	struct mfi_command *cm;
2416 	struct mfi_io_frame *io;
2417 	int error;
2418 	uint32_t context = 0;
2419 
2420 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2421 		return (EBUSY);
2422 
2423 	/* Zero out the MFI frame */
2424 	context = cm->cm_frame->header.context;
2425 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2426 	cm->cm_frame->header.context = context;
2427 
2428 	io = &cm->cm_frame->io;
2429 	io->header.cmd = MFI_CMD_LD_WRITE;
2430 	io->header.target_id = id;
2431 	io->header.timeout = 0;
2432 	io->header.flags = 0;
2433 	io->header.scsi_status = 0;
2434 	io->header.sense_len = MFI_SENSE_LEN;
2435 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2436 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2437 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2438 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2439 	io->lba_lo = lba & 0xffffffff;
2440 	cm->cm_data = virt;
2441 	cm->cm_len = len;
2442 	cm->cm_sg = &io->sgl;
2443 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2444 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2445 
2446 	error = mfi_mapcmd(sc, cm);
2447 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2448 	    BUS_DMASYNC_POSTWRITE);
2449 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2450 	mfi_release_command(cm);
2451 
2452 	return (error);
2453 }
2454 
2455 int
2456 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2457     int len)
2458 {
2459 	struct mfi_command *cm;
2460 	struct mfi_pass_frame *pass;
2461 	int error;
2462 	int blkcount = 0;
2463 
2464 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2465 		return (EBUSY);
2466 
2467 	pass = &cm->cm_frame->pass;
2468 	bzero(pass->cdb, 16);
2469 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2470 	pass->cdb[0] = SCSI_WRITE;
2471 	pass->cdb[2] = (lba & 0xff000000) >> 24;
2472 	pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2473 	pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2474 	pass->cdb[5] = (lba & 0x000000ff);
2475 	blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2476 	pass->cdb[7] = (blkcount & 0xff00) >> 8;
2477 	pass->cdb[8] = (blkcount & 0x00ff);
2478 	pass->header.target_id = id;
2479 	pass->header.timeout = 0;
2480 	pass->header.flags = 0;
2481 	pass->header.scsi_status = 0;
2482 	pass->header.sense_len = MFI_SENSE_LEN;
2483 	pass->header.data_len = len;
2484 	pass->header.cdb_len = 10;
2485 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2486 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2487 	cm->cm_data = virt;
2488 	cm->cm_len = len;
2489 	cm->cm_sg = &pass->sgl;
2490 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2491 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2492 
2493 	error = mfi_mapcmd(sc, cm);
2494 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2495 	    BUS_DMASYNC_POSTWRITE);
2496 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2497 	mfi_release_command(cm);
2498 
2499 	return (error);
2500 }
2501 
2502 static int
2503 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2504 {
2505 	struct mfi_softc *sc;
2506 	int error;
2507 
2508 	sc = dev->si_drv1;
2509 
2510 	mtx_lock(&sc->mfi_io_lock);
2511 	if (sc->mfi_detaching)
2512 		error = ENXIO;
2513 	else {
2514 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2515 		error = 0;
2516 	}
2517 	mtx_unlock(&sc->mfi_io_lock);
2518 
2519 	return (error);
2520 }
2521 
2522 static int
2523 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2524 {
2525 	struct mfi_softc *sc;
2526 	struct mfi_aen *mfi_aen_entry, *tmp;
2527 
2528 	sc = dev->si_drv1;
2529 
2530 	mtx_lock(&sc->mfi_io_lock);
2531 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2532 
2533 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2534 		if (mfi_aen_entry->p == curproc) {
2535 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2536 			    aen_link);
2537 			free(mfi_aen_entry, M_MFIBUF);
2538 		}
2539 	}
2540 	mtx_unlock(&sc->mfi_io_lock);
2541 	return (0);
2542 }
2543 
2544 static int
2545 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2546 {
2547 
2548 	switch (opcode) {
2549 	case MFI_DCMD_LD_DELETE:
2550 	case MFI_DCMD_CFG_ADD:
2551 	case MFI_DCMD_CFG_CLEAR:
2552 		sx_xlock(&sc->mfi_config_lock);
2553 		return (1);
2554 	default:
2555 		return (0);
2556 	}
2557 }
2558 
2559 static void
2560 mfi_config_unlock(struct mfi_softc *sc, int locked)
2561 {
2562 
2563 	if (locked)
2564 		sx_xunlock(&sc->mfi_config_lock);
2565 }
2566 
2567 /*
2568  * Perform pre-issue checks on commands from userland and possibly veto
2569  * them.
2570  */
2571 static int
2572 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2573 {
2574 	struct mfi_disk *ld, *ld2;
2575 	int error;
2576 	struct mfi_system_pd *syspd = NULL;
2577 	uint16_t syspd_id;
2578 	uint16_t *mbox;
2579 
2580 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2581 	error = 0;
2582 	switch (cm->cm_frame->dcmd.opcode) {
2583 	case MFI_DCMD_LD_DELETE:
2584 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2585 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2586 				break;
2587 		}
2588 		if (ld == NULL)
2589 			error = ENOENT;
2590 		else
2591 			error = mfi_disk_disable(ld);
2592 		break;
2593 	case MFI_DCMD_CFG_CLEAR:
2594 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2595 			error = mfi_disk_disable(ld);
2596 			if (error)
2597 				break;
2598 		}
2599 		if (error) {
2600 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2601 				if (ld2 == ld)
2602 					break;
2603 				mfi_disk_enable(ld2);
2604 			}
2605 		}
2606 		break;
2607 	case MFI_DCMD_PD_STATE_SET:
2608 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2609 		syspd_id = mbox[0];
2610 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2611 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2612 				if (syspd->pd_id == syspd_id)
2613 					break;
2614 			}
2615 		}
2616 		else
2617 			break;
2618 		if (syspd)
2619 			error = mfi_syspd_disable(syspd);
2620 		break;
2621 	default:
2622 		break;
2623 	}
2624 	return (error);
2625 }
2626 
2627 /* Perform post-issue checks on commands from userland. */
2628 static void
2629 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2630 {
2631 	struct mfi_disk *ld, *ldn;
2632 	struct mfi_system_pd *syspd = NULL;
2633 	uint16_t syspd_id;
2634 	uint16_t *mbox;
2635 
2636 	switch (cm->cm_frame->dcmd.opcode) {
2637 	case MFI_DCMD_LD_DELETE:
2638 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2639 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2640 				break;
2641 		}
2642 		KASSERT(ld != NULL, ("volume dissappeared"));
2643 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2644 			mtx_unlock(&sc->mfi_io_lock);
2645 			mtx_lock(&Giant);
2646 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2647 			mtx_unlock(&Giant);
2648 			mtx_lock(&sc->mfi_io_lock);
2649 		} else
2650 			mfi_disk_enable(ld);
2651 		break;
2652 	case MFI_DCMD_CFG_CLEAR:
2653 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2654 			mtx_unlock(&sc->mfi_io_lock);
2655 			mtx_lock(&Giant);
2656 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2657 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2658 			}
2659 			mtx_unlock(&Giant);
2660 			mtx_lock(&sc->mfi_io_lock);
2661 		} else {
2662 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2663 				mfi_disk_enable(ld);
2664 		}
2665 		break;
2666 	case MFI_DCMD_CFG_ADD:
2667 		mfi_ldprobe(sc);
2668 		break;
2669 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2670 		mfi_ldprobe(sc);
2671 		break;
2672 	case MFI_DCMD_PD_STATE_SET:
2673 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2674 		syspd_id = mbox[0];
2675 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2676 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2677 				if (syspd->pd_id == syspd_id)
2678 					break;
2679 			}
2680 		}
2681 		else
2682 			break;
2683 		/* If the transition fails then enable the syspd again */
2684 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2685 			mfi_syspd_enable(syspd);
2686 		break;
2687 	}
2688 }
2689 
2690 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2691 {
2692 	struct mfi_config_data *conf_data=(struct mfi_config_data *)cm->cm_data;
2693 	struct mfi_command *ld_cm = NULL;
2694 	struct mfi_ld_info *ld_info = NULL;
2695 	int error = 0;
2696 
2697 	if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2698 	    (conf_data->ld[0].params.isSSCD == 1)) {
2699 		error = 1;
2700 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2701 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2702 		    (void **)&ld_info, sizeof(*ld_info));
2703 		if (error) {
2704 			device_printf(sc->mfi_dev, "Failed to allocate"
2705 			    "MFI_DCMD_LD_GET_INFO %d", error);
2706 			if (ld_info)
2707 				free(ld_info, M_MFIBUF);
2708 			return 0;
2709 		}
2710 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2711 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2712 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2713 		if (mfi_wait_command(sc, ld_cm) != 0) {
2714 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2715 			mfi_release_command(ld_cm);
2716 			free(ld_info, M_MFIBUF);
2717 			return 0;
2718 		}
2719 
2720 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2721 			free(ld_info, M_MFIBUF);
2722 			mfi_release_command(ld_cm);
2723 			return 0;
2724 		}
2725 		else
2726 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2727 
2728 		if (ld_info->ld_config.params.isSSCD == 1)
2729 			error = 1;
2730 
2731 		mfi_release_command(ld_cm);
2732 		free(ld_info, M_MFIBUF);
2733 
2734 	}
2735 	return error;
2736 }
2737 
2738 static int
2739 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2740 {
2741 	uint8_t i;
2742 	struct mfi_ioc_packet *ioc;
2743 	ioc = (struct mfi_ioc_packet *)arg;
2744 	int sge_size, error;
2745 	struct megasas_sge *kern_sge;
2746 
2747 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2748 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2749 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2750 
2751 	if (sizeof(bus_addr_t) == 8) {
2752 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2753 		cm->cm_extra_frames = 2;
2754 		sge_size = sizeof(struct mfi_sg64);
2755 	} else {
2756 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2757 		sge_size = sizeof(struct mfi_sg32);
2758 	}
2759 
2760 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2761 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2762 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2763 			1, 0,			/* algnmnt, boundary */
2764 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2765 			BUS_SPACE_MAXADDR,	/* highaddr */
2766 			NULL, NULL,		/* filter, filterarg */
2767 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2768 			2,			/* nsegments */
2769 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2770 			BUS_DMA_ALLOCNOW,	/* flags */
2771 			NULL, NULL,		/* lockfunc, lockarg */
2772 			&sc->mfi_kbuff_arr_dmat[i])) {
2773 			device_printf(sc->mfi_dev,
2774 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2775 			return (ENOMEM);
2776 		}
2777 
2778 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2779 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2780 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2781 			device_printf(sc->mfi_dev,
2782 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2783 			return (ENOMEM);
2784 		}
2785 
2786 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2787 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2788 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2789 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2790 
2791 		if (!sc->kbuff_arr[i]) {
2792 			device_printf(sc->mfi_dev,
2793 			    "Could not allocate memory for kbuff_arr info\n");
2794 			return -1;
2795 		}
2796 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2797 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2798 
2799 		if (sizeof(bus_addr_t) == 8) {
2800 			cm->cm_frame->stp.sgl.sg64[i].addr =
2801 			    kern_sge[i].phys_addr;
2802 			cm->cm_frame->stp.sgl.sg64[i].len =
2803 			    ioc->mfi_sgl[i].iov_len;
2804 		} else {
2805 			cm->cm_frame->stp.sgl.sg32[i].len =
2806 			    kern_sge[i].phys_addr;
2807 			cm->cm_frame->stp.sgl.sg32[i].len =
2808 			    ioc->mfi_sgl[i].iov_len;
2809 		}
2810 
2811 		error = copyin(ioc->mfi_sgl[i].iov_base,
2812 		    sc->kbuff_arr[i],
2813 		    ioc->mfi_sgl[i].iov_len);
2814 		if (error != 0) {
2815 			device_printf(sc->mfi_dev, "Copy in failed\n");
2816 			return error;
2817 		}
2818 	}
2819 
2820 	cm->cm_flags |=MFI_CMD_MAPPED;
2821 	return 0;
2822 }
2823 
2824 static int
2825 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2826 {
2827 	struct mfi_command *cm;
2828 	struct mfi_dcmd_frame *dcmd;
2829 	void *ioc_buf = NULL;
2830 	uint32_t context;
2831 	int error = 0, locked;
2832 
2833 
2834 	if (ioc->buf_size > 0) {
2835 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2836 		if (ioc_buf == NULL) {
2837 			return (ENOMEM);
2838 		}
2839 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2840 		if (error) {
2841 			device_printf(sc->mfi_dev, "failed to copyin\n");
2842 			free(ioc_buf, M_MFIBUF);
2843 			return (error);
2844 		}
2845 	}
2846 
2847 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2848 
2849 	mtx_lock(&sc->mfi_io_lock);
2850 	while ((cm = mfi_dequeue_free(sc)) == NULL)
2851 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2852 
2853 	/* Save context for later */
2854 	context = cm->cm_frame->header.context;
2855 
2856 	dcmd = &cm->cm_frame->dcmd;
2857 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2858 
2859 	cm->cm_sg = &dcmd->sgl;
2860 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2861 	cm->cm_data = ioc_buf;
2862 	cm->cm_len = ioc->buf_size;
2863 
2864 	/* restore context */
2865 	cm->cm_frame->header.context = context;
2866 
2867 	/* Cheat since we don't know if we're writing or reading */
2868 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2869 
2870 	error = mfi_check_command_pre(sc, cm);
2871 	if (error)
2872 		goto out;
2873 
2874 	error = mfi_wait_command(sc, cm);
2875 	if (error) {
2876 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2877 		goto out;
2878 	}
2879 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2880 	mfi_check_command_post(sc, cm);
2881 out:
2882 	mfi_release_command(cm);
2883 	mtx_unlock(&sc->mfi_io_lock);
2884 	mfi_config_unlock(sc, locked);
2885 	if (ioc->buf_size > 0)
2886 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2887 	if (ioc_buf)
2888 		free(ioc_buf, M_MFIBUF);
2889 	return (error);
2890 }
2891 
2892 #define	PTRIN(p)		((void *)(uintptr_t)(p))
2893 
2894 static int
2895 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2896 {
2897 	struct mfi_softc *sc;
2898 	union mfi_statrequest *ms;
2899 	struct mfi_ioc_packet *ioc;
2900 #ifdef COMPAT_FREEBSD32
2901 	struct mfi_ioc_packet32 *ioc32;
2902 #endif
2903 	struct mfi_ioc_aen *aen;
2904 	struct mfi_command *cm = NULL;
2905 	uint32_t context = 0;
2906 	union mfi_sense_ptr sense_ptr;
2907 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2908 	size_t len;
2909 	int i, res;
2910 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2911 #ifdef COMPAT_FREEBSD32
2912 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
2913 	struct mfi_ioc_passthru iop_swab;
2914 #endif
2915 	int error, locked;
2916 	union mfi_sgl *sgl;
2917 	sc = dev->si_drv1;
2918 	error = 0;
2919 
2920 	if (sc->adpreset)
2921 		return EBUSY;
2922 
2923 	if (sc->hw_crit_error)
2924 		return EBUSY;
2925 
2926 	if (sc->issuepend_done == 0)
2927 		return EBUSY;
2928 
2929 	switch (cmd) {
2930 	case MFIIO_STATS:
2931 		ms = (union mfi_statrequest *)arg;
2932 		switch (ms->ms_item) {
2933 		case MFIQ_FREE:
2934 		case MFIQ_BIO:
2935 		case MFIQ_READY:
2936 		case MFIQ_BUSY:
2937 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2938 			    sizeof(struct mfi_qstat));
2939 			break;
2940 		default:
2941 			error = ENOIOCTL;
2942 			break;
2943 		}
2944 		break;
2945 	case MFIIO_QUERY_DISK:
2946 	{
2947 		struct mfi_query_disk *qd;
2948 		struct mfi_disk *ld;
2949 
2950 		qd = (struct mfi_query_disk *)arg;
2951 		mtx_lock(&sc->mfi_io_lock);
2952 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2953 			if (ld->ld_id == qd->array_id)
2954 				break;
2955 		}
2956 		if (ld == NULL) {
2957 			qd->present = 0;
2958 			mtx_unlock(&sc->mfi_io_lock);
2959 			return (0);
2960 		}
2961 		qd->present = 1;
2962 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2963 			qd->open = 1;
2964 		bzero(qd->devname, SPECNAMELEN + 1);
2965 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2966 		mtx_unlock(&sc->mfi_io_lock);
2967 		break;
2968 	}
2969 	case MFI_CMD:
2970 #ifdef COMPAT_FREEBSD32
2971 	case MFI_CMD32:
2972 #endif
2973 		{
2974 		devclass_t devclass;
2975 		ioc = (struct mfi_ioc_packet *)arg;
2976 		int adapter;
2977 
2978 		adapter = ioc->mfi_adapter_no;
2979 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2980 			devclass = devclass_find("mfi");
2981 			sc = devclass_get_softc(devclass, adapter);
2982 		}
2983 		mtx_lock(&sc->mfi_io_lock);
2984 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
2985 			mtx_unlock(&sc->mfi_io_lock);
2986 			return (EBUSY);
2987 		}
2988 		mtx_unlock(&sc->mfi_io_lock);
2989 		locked = 0;
2990 
2991 		/*
2992 		 * save off original context since copying from user
2993 		 * will clobber some data
2994 		 */
2995 		context = cm->cm_frame->header.context;
2996 		cm->cm_frame->header.context = cm->cm_index;
2997 
2998 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2999 		    2 * MEGAMFI_FRAME_SIZE);
3000 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3001 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3002 		cm->cm_frame->header.scsi_status = 0;
3003 		cm->cm_frame->header.pad0 = 0;
3004 		if (ioc->mfi_sge_count) {
3005 			cm->cm_sg =
3006 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3007 		}
3008 		sgl = cm->cm_sg;
3009 		cm->cm_flags = 0;
3010 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3011 			cm->cm_flags |= MFI_CMD_DATAIN;
3012 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3013 			cm->cm_flags |= MFI_CMD_DATAOUT;
3014 		/* Legacy app shim */
3015 		if (cm->cm_flags == 0)
3016 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3017 		cm->cm_len = cm->cm_frame->header.data_len;
3018 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3019 #ifdef COMPAT_FREEBSD32
3020 			if (cmd == MFI_CMD) {
3021 #endif
3022 				/* Native */
3023 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3024 #ifdef COMPAT_FREEBSD32
3025 			} else {
3026 				/* 32bit on 64bit */
3027 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3028 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3029 			}
3030 #endif
3031 			cm->cm_len += cm->cm_stp_len;
3032 		}
3033 		if (cm->cm_len &&
3034 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3035 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3036 			    M_WAITOK | M_ZERO);
3037 			if (cm->cm_data == NULL) {
3038 				device_printf(sc->mfi_dev, "Malloc failed\n");
3039 				goto out;
3040 			}
3041 		} else {
3042 			cm->cm_data = 0;
3043 		}
3044 
3045 		/* restore header context */
3046 		cm->cm_frame->header.context = context;
3047 
3048 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3049 			res = mfi_stp_cmd(sc, cm, arg);
3050 			if (res != 0)
3051 				goto out;
3052 		} else {
3053 			temp = data;
3054 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3055 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3056 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3057 #ifdef COMPAT_FREEBSD32
3058 					if (cmd == MFI_CMD) {
3059 #endif
3060 						/* Native */
3061 						addr = ioc->mfi_sgl[i].iov_base;
3062 						len = ioc->mfi_sgl[i].iov_len;
3063 #ifdef COMPAT_FREEBSD32
3064 					} else {
3065 						/* 32bit on 64bit */
3066 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3067 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3068 						len = ioc32->mfi_sgl[i].iov_len;
3069 					}
3070 #endif
3071 					error = copyin(addr, temp, len);
3072 					if (error != 0) {
3073 						device_printf(sc->mfi_dev,
3074 						    "Copy in failed\n");
3075 						goto out;
3076 					}
3077 					temp = &temp[len];
3078 				}
3079 			}
3080 		}
3081 
3082 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3083 			locked = mfi_config_lock(sc,
3084 			     cm->cm_frame->dcmd.opcode);
3085 
3086 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3087 			cm->cm_frame->pass.sense_addr_lo =
3088 			    (uint32_t)cm->cm_sense_busaddr;
3089 			cm->cm_frame->pass.sense_addr_hi =
3090 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3091 		}
3092 		mtx_lock(&sc->mfi_io_lock);
3093 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3094 		if (!skip_pre_post) {
3095 			error = mfi_check_command_pre(sc, cm);
3096 			if (error) {
3097 				mtx_unlock(&sc->mfi_io_lock);
3098 				goto out;
3099 			}
3100 		}
3101 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3102 			device_printf(sc->mfi_dev,
3103 			    "Controller polled failed\n");
3104 			mtx_unlock(&sc->mfi_io_lock);
3105 			goto out;
3106 		}
3107 		if (!skip_pre_post) {
3108 			mfi_check_command_post(sc, cm);
3109 		}
3110 		mtx_unlock(&sc->mfi_io_lock);
3111 
3112 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3113 			temp = data;
3114 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3115 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3116 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3117 #ifdef COMPAT_FREEBSD32
3118 					if (cmd == MFI_CMD) {
3119 #endif
3120 						/* Native */
3121 						addr = ioc->mfi_sgl[i].iov_base;
3122 						len = ioc->mfi_sgl[i].iov_len;
3123 #ifdef COMPAT_FREEBSD32
3124 					} else {
3125 						/* 32bit on 64bit */
3126 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3127 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3128 						len = ioc32->mfi_sgl[i].iov_len;
3129 					}
3130 #endif
3131 					error = copyout(temp, addr, len);
3132 					if (error != 0) {
3133 						device_printf(sc->mfi_dev,
3134 						    "Copy out failed\n");
3135 						goto out;
3136 					}
3137 					temp = &temp[len];
3138 				}
3139 			}
3140 		}
3141 
3142 		if (ioc->mfi_sense_len) {
3143 			/* get user-space sense ptr then copy out sense */
3144 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3145 			    &sense_ptr.sense_ptr_data[0],
3146 			    sizeof(sense_ptr.sense_ptr_data));
3147 #ifdef COMPAT_FREEBSD32
3148 			if (cmd != MFI_CMD) {
3149 				/*
3150 				 * not 64bit native so zero out any address
3151 				 * over 32bit */
3152 				sense_ptr.addr.high = 0;
3153 			}
3154 #endif
3155 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3156 			    ioc->mfi_sense_len);
3157 			if (error != 0) {
3158 				device_printf(sc->mfi_dev,
3159 				    "Copy out failed\n");
3160 				goto out;
3161 			}
3162 		}
3163 
3164 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3165 out:
3166 		mfi_config_unlock(sc, locked);
3167 		if (data)
3168 			free(data, M_MFIBUF);
3169 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3170 			for (i = 0; i < 2; i++) {
3171 				if (sc->kbuff_arr[i]) {
3172 					if (sc->mfi_kbuff_arr_busaddr != 0)
3173 						bus_dmamap_unload(
3174 						    sc->mfi_kbuff_arr_dmat[i],
3175 						    sc->mfi_kbuff_arr_dmamap[i]
3176 						    );
3177 					if (sc->kbuff_arr[i] != NULL)
3178 						bus_dmamem_free(
3179 						    sc->mfi_kbuff_arr_dmat[i],
3180 						    sc->kbuff_arr[i],
3181 						    sc->mfi_kbuff_arr_dmamap[i]
3182 						    );
3183 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3184 						bus_dma_tag_destroy(
3185 						    sc->mfi_kbuff_arr_dmat[i]);
3186 				}
3187 			}
3188 		}
3189 		if (cm) {
3190 			mtx_lock(&sc->mfi_io_lock);
3191 			mfi_release_command(cm);
3192 			mtx_unlock(&sc->mfi_io_lock);
3193 		}
3194 
3195 		break;
3196 		}
3197 	case MFI_SET_AEN:
3198 		aen = (struct mfi_ioc_aen *)arg;
3199 		error = mfi_aen_register(sc, aen->aen_seq_num,
3200 		    aen->aen_class_locale);
3201 
3202 		break;
3203 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3204 		{
3205 			devclass_t devclass;
3206 			struct mfi_linux_ioc_packet l_ioc;
3207 			int adapter;
3208 
3209 			devclass = devclass_find("mfi");
3210 			if (devclass == NULL)
3211 				return (ENOENT);
3212 
3213 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3214 			if (error)
3215 				return (error);
3216 			adapter = l_ioc.lioc_adapter_no;
3217 			sc = devclass_get_softc(devclass, adapter);
3218 			if (sc == NULL)
3219 				return (ENOENT);
3220 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3221 			    cmd, arg, flag, td));
3222 			break;
3223 		}
3224 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3225 		{
3226 			devclass_t devclass;
3227 			struct mfi_linux_ioc_aen l_aen;
3228 			int adapter;
3229 
3230 			devclass = devclass_find("mfi");
3231 			if (devclass == NULL)
3232 				return (ENOENT);
3233 
3234 			error = copyin(arg, &l_aen, sizeof(l_aen));
3235 			if (error)
3236 				return (error);
3237 			adapter = l_aen.laen_adapter_no;
3238 			sc = devclass_get_softc(devclass, adapter);
3239 			if (sc == NULL)
3240 				return (ENOENT);
3241 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3242 			    cmd, arg, flag, td));
3243 			break;
3244 		}
3245 #ifdef COMPAT_FREEBSD32
3246 	case MFIIO_PASSTHRU32:
3247 		iop_swab.ioc_frame	= iop32->ioc_frame;
3248 		iop_swab.buf_size	= iop32->buf_size;
3249 		iop_swab.buf		= PTRIN(iop32->buf);
3250 		iop			= &iop_swab;
3251 		/* FALLTHROUGH */
3252 #endif
3253 	case MFIIO_PASSTHRU:
3254 		error = mfi_user_command(sc, iop);
3255 #ifdef COMPAT_FREEBSD32
3256 		if (cmd == MFIIO_PASSTHRU32)
3257 			iop32->ioc_frame = iop_swab.ioc_frame;
3258 #endif
3259 		break;
3260 	default:
3261 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3262 		error = ENOENT;
3263 		break;
3264 	}
3265 
3266 	return (error);
3267 }
3268 
3269 static int
3270 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3271 {
3272 	struct mfi_softc *sc;
3273 	struct mfi_linux_ioc_packet l_ioc;
3274 	struct mfi_linux_ioc_aen l_aen;
3275 	struct mfi_command *cm = NULL;
3276 	struct mfi_aen *mfi_aen_entry;
3277 	union mfi_sense_ptr sense_ptr;
3278 	uint32_t context = 0;
3279 	uint8_t *data = NULL, *temp;
3280 	int i;
3281 	int error, locked;
3282 
3283 	sc = dev->si_drv1;
3284 	error = 0;
3285 	switch (cmd) {
3286 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3287 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3288 		if (error != 0)
3289 			return (error);
3290 
3291 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3292 			return (EINVAL);
3293 		}
3294 
3295 		mtx_lock(&sc->mfi_io_lock);
3296 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3297 			mtx_unlock(&sc->mfi_io_lock);
3298 			return (EBUSY);
3299 		}
3300 		mtx_unlock(&sc->mfi_io_lock);
3301 		locked = 0;
3302 
3303 		/*
3304 		 * save off original context since copying from user
3305 		 * will clobber some data
3306 		 */
3307 		context = cm->cm_frame->header.context;
3308 
3309 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3310 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3311 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3312 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3313 		cm->cm_frame->header.scsi_status = 0;
3314 		cm->cm_frame->header.pad0 = 0;
3315 		if (l_ioc.lioc_sge_count)
3316 			cm->cm_sg =
3317 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3318 		cm->cm_flags = 0;
3319 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3320 			cm->cm_flags |= MFI_CMD_DATAIN;
3321 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3322 			cm->cm_flags |= MFI_CMD_DATAOUT;
3323 		cm->cm_len = cm->cm_frame->header.data_len;
3324 		if (cm->cm_len &&
3325 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3326 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3327 			    M_WAITOK | M_ZERO);
3328 			if (cm->cm_data == NULL) {
3329 				device_printf(sc->mfi_dev, "Malloc failed\n");
3330 				goto out;
3331 			}
3332 		} else {
3333 			cm->cm_data = 0;
3334 		}
3335 
3336 		/* restore header context */
3337 		cm->cm_frame->header.context = context;
3338 
3339 		temp = data;
3340 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3341 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3342 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3343 				       temp,
3344 				       l_ioc.lioc_sgl[i].iov_len);
3345 				if (error != 0) {
3346 					device_printf(sc->mfi_dev,
3347 					    "Copy in failed\n");
3348 					goto out;
3349 				}
3350 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3351 			}
3352 		}
3353 
3354 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3355 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3356 
3357 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3358 			cm->cm_frame->pass.sense_addr_lo =
3359 			    (uint32_t)cm->cm_sense_busaddr;
3360 			cm->cm_frame->pass.sense_addr_hi =
3361 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3362 		}
3363 
3364 		mtx_lock(&sc->mfi_io_lock);
3365 		error = mfi_check_command_pre(sc, cm);
3366 		if (error) {
3367 			mtx_unlock(&sc->mfi_io_lock);
3368 			goto out;
3369 		}
3370 
3371 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3372 			device_printf(sc->mfi_dev,
3373 			    "Controller polled failed\n");
3374 			mtx_unlock(&sc->mfi_io_lock);
3375 			goto out;
3376 		}
3377 
3378 		mfi_check_command_post(sc, cm);
3379 		mtx_unlock(&sc->mfi_io_lock);
3380 
3381 		temp = data;
3382 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3383 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3384 				error = copyout(temp,
3385 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3386 					l_ioc.lioc_sgl[i].iov_len);
3387 				if (error != 0) {
3388 					device_printf(sc->mfi_dev,
3389 					    "Copy out failed\n");
3390 					goto out;
3391 				}
3392 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3393 			}
3394 		}
3395 
3396 		if (l_ioc.lioc_sense_len) {
3397 			/* get user-space sense ptr then copy out sense */
3398 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3399                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3400 			    &sense_ptr.sense_ptr_data[0],
3401 			    sizeof(sense_ptr.sense_ptr_data));
3402 #ifdef __amd64__
3403 			/*
3404 			 * only 32bit Linux support so zero out any
3405 			 * address over 32bit
3406 			 */
3407 			sense_ptr.addr.high = 0;
3408 #endif
3409 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3410 			    l_ioc.lioc_sense_len);
3411 			if (error != 0) {
3412 				device_printf(sc->mfi_dev,
3413 				    "Copy out failed\n");
3414 				goto out;
3415 			}
3416 		}
3417 
3418 		error = copyout(&cm->cm_frame->header.cmd_status,
3419 			&((struct mfi_linux_ioc_packet*)arg)
3420 			->lioc_frame.hdr.cmd_status,
3421 			1);
3422 		if (error != 0) {
3423 			device_printf(sc->mfi_dev,
3424 				      "Copy out failed\n");
3425 			goto out;
3426 		}
3427 
3428 out:
3429 		mfi_config_unlock(sc, locked);
3430 		if (data)
3431 			free(data, M_MFIBUF);
3432 		if (cm) {
3433 			mtx_lock(&sc->mfi_io_lock);
3434 			mfi_release_command(cm);
3435 			mtx_unlock(&sc->mfi_io_lock);
3436 		}
3437 
3438 		return (error);
3439 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3440 		error = copyin(arg, &l_aen, sizeof(l_aen));
3441 		if (error != 0)
3442 			return (error);
3443 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3444 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3445 		    M_WAITOK);
3446 		mtx_lock(&sc->mfi_io_lock);
3447 		if (mfi_aen_entry != NULL) {
3448 			mfi_aen_entry->p = curproc;
3449 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3450 			    aen_link);
3451 		}
3452 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3453 		    l_aen.laen_class_locale);
3454 
3455 		if (error != 0) {
3456 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3457 			    aen_link);
3458 			free(mfi_aen_entry, M_MFIBUF);
3459 		}
3460 		mtx_unlock(&sc->mfi_io_lock);
3461 
3462 		return (error);
3463 	default:
3464 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3465 		error = ENOENT;
3466 		break;
3467 	}
3468 
3469 	return (error);
3470 }
3471 
3472 static int
3473 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3474 {
3475 	struct mfi_softc *sc;
3476 	int revents = 0;
3477 
3478 	sc = dev->si_drv1;
3479 
3480 	if (poll_events & (POLLIN | POLLRDNORM)) {
3481 		if (sc->mfi_aen_triggered != 0) {
3482 			revents |= poll_events & (POLLIN | POLLRDNORM);
3483 			sc->mfi_aen_triggered = 0;
3484 		}
3485 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3486 			revents |= POLLERR;
3487 		}
3488 	}
3489 
3490 	if (revents == 0) {
3491 		if (poll_events & (POLLIN | POLLRDNORM)) {
3492 			sc->mfi_poll_waiting = 1;
3493 			selrecord(td, &sc->mfi_select);
3494 		}
3495 	}
3496 
3497 	return revents;
3498 }
3499 
3500 static void
3501 mfi_dump_all(void)
3502 {
3503 	struct mfi_softc *sc;
3504 	struct mfi_command *cm;
3505 	devclass_t dc;
3506 	time_t deadline;
3507 	int timedout;
3508 	int i;
3509 
3510 	dc = devclass_find("mfi");
3511 	if (dc == NULL) {
3512 		printf("No mfi dev class\n");
3513 		return;
3514 	}
3515 
3516 	for (i = 0; ; i++) {
3517 		sc = devclass_get_softc(dc, i);
3518 		if (sc == NULL)
3519 			break;
3520 		device_printf(sc->mfi_dev, "Dumping\n\n");
3521 		timedout = 0;
3522 		deadline = time_uptime - MFI_CMD_TIMEOUT;
3523 		mtx_lock(&sc->mfi_io_lock);
3524 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3525 			if (cm->cm_timestamp < deadline) {
3526 				device_printf(sc->mfi_dev,
3527 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3528 				    cm, (int)(time_uptime - cm->cm_timestamp));
3529 				MFI_PRINT_CMD(cm);
3530 				timedout++;
3531 			}
3532 		}
3533 
3534 #if 0
3535 		if (timedout)
3536 			MFI_DUMP_CMDS(SC);
3537 #endif
3538 
3539 		mtx_unlock(&sc->mfi_io_lock);
3540 	}
3541 
3542 	return;
3543 }
3544 
3545 static void
3546 mfi_timeout(void *data)
3547 {
3548 	struct mfi_softc *sc = (struct mfi_softc *)data;
3549 	struct mfi_command *cm;
3550 	time_t deadline;
3551 	int timedout = 0;
3552 
3553 	deadline = time_uptime - MFI_CMD_TIMEOUT;
3554 	if (sc->adpreset == 0) {
3555 		if (!mfi_tbolt_reset(sc)) {
3556 			callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
3557 			return;
3558 		}
3559 	}
3560 	mtx_lock(&sc->mfi_io_lock);
3561 	TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3562 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3563 			continue;
3564 		if (cm->cm_timestamp < deadline) {
3565 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3566 				cm->cm_timestamp = time_uptime;
3567 			} else {
3568 				device_printf(sc->mfi_dev,
3569 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3570 				     cm, (int)(time_uptime - cm->cm_timestamp)
3571 				     );
3572 				MFI_PRINT_CMD(cm);
3573 				MFI_VALIDATE_CMD(sc, cm);
3574 				timedout++;
3575 			}
3576 		}
3577 	}
3578 
3579 #if 0
3580 	if (timedout)
3581 		MFI_DUMP_CMDS(SC);
3582 #endif
3583 
3584 	mtx_unlock(&sc->mfi_io_lock);
3585 
3586 	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
3587 	    mfi_timeout, sc);
3588 
3589 	if (0)
3590 		mfi_dump_all();
3591 	return;
3592 }
3593