xref: /freebsd/sys/dev/mfi/mfi.c (revision c6ec7d31830ab1c80edae95ad5e4b9dba10c47ac)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2007 LSI Corp.
28  * Copyright (c) 2007 Rajesh Prabhakaran.
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52 
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
55 
56 #include "opt_compat.h"
57 #include "opt_mfi.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
64 #include <sys/poll.h>
65 #include <sys/selinfo.h>
66 #include <sys/bus.h>
67 #include <sys/conf.h>
68 #include <sys/eventhandler.h>
69 #include <sys/rman.h>
70 #include <sys/bus_dma.h>
71 #include <sys/bio.h>
72 #include <sys/ioccom.h>
73 #include <sys/uio.h>
74 #include <sys/proc.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
78 
79 #include <machine/bus.h>
80 #include <machine/resource.h>
81 
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
87 
88 static int	mfi_alloc_commands(struct mfi_softc *);
89 static int	mfi_comms_init(struct mfi_softc *);
90 static int	mfi_get_controller_info(struct mfi_softc *);
91 static int	mfi_get_log_state(struct mfi_softc *,
92 		    struct mfi_evt_log_state **);
93 static int	mfi_parse_entries(struct mfi_softc *, int, int);
94 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void	mfi_startup(void *arg);
96 static void	mfi_intr(void *arg);
97 static void	mfi_ldprobe(struct mfi_softc *sc);
98 static void	mfi_syspdprobe(struct mfi_softc *sc);
99 static void	mfi_handle_evt(void *context, int pending);
100 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void	mfi_aen_complete(struct mfi_command *);
102 static int	mfi_add_ld(struct mfi_softc *sc, int);
103 static void	mfi_add_ld_complete(struct mfi_command *);
104 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void	mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void	mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
112 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
113 static void	mfi_timeout(void *);
114 static int	mfi_user_command(struct mfi_softc *,
115 		    struct mfi_ioc_passthru *);
116 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
117 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
118 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
119 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
120 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
121 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
122 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
123 		    uint32_t frame_cnt);
124 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
125 		    uint32_t frame_cnt);
126 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
127 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
128 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
129 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
130 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
131 
132 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
133 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
134 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
135 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
136             0, "event message locale");
137 
138 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
139 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
140 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
141           0, "event message class");
142 
143 static int	mfi_max_cmds = 128;
144 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
145 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
146 	   0, "Max commands");
147 
148 static int	mfi_detect_jbod_change = 1;
149 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
150 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
151 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
152 
153 /* Management interface */
154 static d_open_t		mfi_open;
155 static d_close_t	mfi_close;
156 static d_ioctl_t	mfi_ioctl;
157 static d_poll_t		mfi_poll;
158 
159 static struct cdevsw mfi_cdevsw = {
160 	.d_version = 	D_VERSION,
161 	.d_flags =	0,
162 	.d_open = 	mfi_open,
163 	.d_close =	mfi_close,
164 	.d_ioctl =	mfi_ioctl,
165 	.d_poll =	mfi_poll,
166 	.d_name =	"mfi",
167 };
168 
169 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
170 
171 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
172 struct mfi_skinny_dma_info mfi_skinny;
173 
174 static void
175 mfi_enable_intr_xscale(struct mfi_softc *sc)
176 {
177 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
178 }
179 
180 static void
181 mfi_enable_intr_ppc(struct mfi_softc *sc)
182 {
183 	if (sc->mfi_flags & MFI_FLAGS_1078) {
184 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
185 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
186 	}
187 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
188 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
189 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
190 	}
191 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
192 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
193 	}
194 }
195 
196 static int32_t
197 mfi_read_fw_status_xscale(struct mfi_softc *sc)
198 {
199 	return MFI_READ4(sc, MFI_OMSG0);
200 }
201 
202 static int32_t
203 mfi_read_fw_status_ppc(struct mfi_softc *sc)
204 {
205 	return MFI_READ4(sc, MFI_OSP0);
206 }
207 
208 static int
209 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
210 {
211 	int32_t status;
212 
213 	status = MFI_READ4(sc, MFI_OSTS);
214 	if ((status & MFI_OSTS_INTR_VALID) == 0)
215 		return 1;
216 
217 	MFI_WRITE4(sc, MFI_OSTS, status);
218 	return 0;
219 }
220 
221 static int
222 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
223 {
224 	int32_t status;
225 
226 	status = MFI_READ4(sc, MFI_OSTS);
227 	if (sc->mfi_flags & MFI_FLAGS_1078) {
228 		if (!(status & MFI_1078_RM)) {
229 			return 1;
230 		}
231 	}
232 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
233 		if (!(status & MFI_GEN2_RM)) {
234 			return 1;
235 		}
236 	}
237 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
238 		if (!(status & MFI_SKINNY_RM)) {
239 			return 1;
240 		}
241 	}
242 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
243 		MFI_WRITE4(sc, MFI_OSTS, status);
244 	else
245 		MFI_WRITE4(sc, MFI_ODCR0, status);
246 	return 0;
247 }
248 
249 static void
250 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
251 {
252 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
253 }
254 
255 static void
256 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
257 {
258 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
259 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
260 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
261 	} else {
262 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
263 	}
264 }
265 
266 int
267 mfi_transition_firmware(struct mfi_softc *sc)
268 {
269 	uint32_t fw_state, cur_state;
270 	int max_wait, i;
271 	uint32_t cur_abs_reg_val = 0;
272 	uint32_t prev_abs_reg_val = 0;
273 
274 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
275 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
276 	while (fw_state != MFI_FWSTATE_READY) {
277 		if (bootverbose)
278 			device_printf(sc->mfi_dev, "Waiting for firmware to "
279 			"become ready\n");
280 		cur_state = fw_state;
281 		switch (fw_state) {
282 		case MFI_FWSTATE_FAULT:
283 			device_printf(sc->mfi_dev, "Firmware fault\n");
284 			return (ENXIO);
285 		case MFI_FWSTATE_WAIT_HANDSHAKE:
286 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
287 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
288 			else
289 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
290 			max_wait = MFI_RESET_WAIT_TIME;
291 			break;
292 		case MFI_FWSTATE_OPERATIONAL:
293 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
294 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
295 			else
296 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
297 			max_wait = MFI_RESET_WAIT_TIME;
298 			break;
299 		case MFI_FWSTATE_UNDEFINED:
300 		case MFI_FWSTATE_BB_INIT:
301 			max_wait = MFI_RESET_WAIT_TIME;
302 			break;
303 		case MFI_FWSTATE_FW_INIT_2:
304 			max_wait = MFI_RESET_WAIT_TIME;
305 			break;
306 		case MFI_FWSTATE_FW_INIT:
307 		case MFI_FWSTATE_FLUSH_CACHE:
308 			max_wait = MFI_RESET_WAIT_TIME;
309 			break;
310 		case MFI_FWSTATE_DEVICE_SCAN:
311 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
312 			prev_abs_reg_val = cur_abs_reg_val;
313 			break;
314 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
315 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
316 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
317 			else
318 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
319 			max_wait = MFI_RESET_WAIT_TIME;
320 			break;
321 		default:
322 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
323 			    fw_state);
324 			return (ENXIO);
325 		}
326 		for (i = 0; i < (max_wait * 10); i++) {
327 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
328 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
329 			if (fw_state == cur_state)
330 				DELAY(100000);
331 			else
332 				break;
333 		}
334 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
335 			/* Check the device scanning progress */
336 			if (prev_abs_reg_val != cur_abs_reg_val) {
337 				continue;
338 			}
339 		}
340 		if (fw_state == cur_state) {
341 			device_printf(sc->mfi_dev, "Firmware stuck in state "
342 			    "%#x\n", fw_state);
343 			return (ENXIO);
344 		}
345 	}
346 	return (0);
347 }
348 
349 static void
350 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
351 {
352 	bus_addr_t *addr;
353 
354 	addr = arg;
355 	*addr = segs[0].ds_addr;
356 }
357 
358 
359 int
360 mfi_attach(struct mfi_softc *sc)
361 {
362 	uint32_t status;
363 	int error, commsz, framessz, sensesz;
364 	int frames, unit, max_fw_sge;
365 	uint32_t tb_mem_size = 0;
366 
367 	if (sc == NULL)
368 		return EINVAL;
369 
370 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
371 	    MEGASAS_VERSION);
372 
373 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
374 	sx_init(&sc->mfi_config_lock, "MFI config");
375 	TAILQ_INIT(&sc->mfi_ld_tqh);
376 	TAILQ_INIT(&sc->mfi_syspd_tqh);
377 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
378 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
379 	TAILQ_INIT(&sc->mfi_evt_queue);
380 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
381 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
382 	TAILQ_INIT(&sc->mfi_aen_pids);
383 	TAILQ_INIT(&sc->mfi_cam_ccbq);
384 
385 	mfi_initq_free(sc);
386 	mfi_initq_ready(sc);
387 	mfi_initq_busy(sc);
388 	mfi_initq_bio(sc);
389 
390 	sc->adpreset = 0;
391 	sc->last_seq_num = 0;
392 	sc->disableOnlineCtrlReset = 1;
393 	sc->issuepend_done = 1;
394 	sc->hw_crit_error = 0;
395 
396 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
397 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
398 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
399 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
400 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
401 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
402 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
403 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
404 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
405 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
406 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
407 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
408 		sc->mfi_tbolt = 1;
409 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
410 	} else {
411 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
412 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
413 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
414 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
415 	}
416 
417 
418 	/* Before we get too far, see if the firmware is working */
419 	if ((error = mfi_transition_firmware(sc)) != 0) {
420 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
421 		    "error %d\n", error);
422 		return (ENXIO);
423 	}
424 
425 	/* Start: LSIP200113393 */
426 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
427 				1, 0,			/* algnmnt, boundary */
428 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
429 				BUS_SPACE_MAXADDR,	/* highaddr */
430 				NULL, NULL,		/* filter, filterarg */
431 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
432 				1,			/* msegments */
433 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
434 				0,			/* flags */
435 				NULL, NULL,		/* lockfunc, lockarg */
436 				&sc->verbuf_h_dmat)) {
437 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
438 		return (ENOMEM);
439 	}
440 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
441 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
442 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
443 		return (ENOMEM);
444 	}
445 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
446 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
447 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
448 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
449 	/* End: LSIP200113393 */
450 
451 	/*
452 	 * Get information needed for sizing the contiguous memory for the
453 	 * frame pool.  Size down the sgl parameter since we know that
454 	 * we will never need more than what's required for MAXPHYS.
455 	 * It would be nice if these constants were available at runtime
456 	 * instead of compile time.
457 	 */
458 	status = sc->mfi_read_fw_status(sc);
459 	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
460 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
461 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
462 
463 	/* ThunderBolt Support get the contiguous memory */
464 
465 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
466 		mfi_tbolt_init_globals(sc);
467 		device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
468 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
469 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
470 
471 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
472 				1, 0,			/* algnmnt, boundary */
473 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
474 				BUS_SPACE_MAXADDR,	/* highaddr */
475 				NULL, NULL,		/* filter, filterarg */
476 				tb_mem_size,		/* maxsize */
477 				1,			/* msegments */
478 				tb_mem_size,		/* maxsegsize */
479 				0,			/* flags */
480 				NULL, NULL,		/* lockfunc, lockarg */
481 				&sc->mfi_tb_dmat)) {
482 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
483 			return (ENOMEM);
484 		}
485 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
486 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
487 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
488 			return (ENOMEM);
489 		}
490 		bzero(sc->request_message_pool, tb_mem_size);
491 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
492 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
493 
494 		/* For ThunderBolt memory init */
495 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
496 				0x100, 0,		/* alignmnt, boundary */
497 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
498 				BUS_SPACE_MAXADDR,	/* highaddr */
499 				NULL, NULL,		/* filter, filterarg */
500 				MFI_FRAME_SIZE,		/* maxsize */
501 				1,			/* msegments */
502 				MFI_FRAME_SIZE,		/* maxsegsize */
503 				0,			/* flags */
504 				NULL, NULL,		/* lockfunc, lockarg */
505 				&sc->mfi_tb_init_dmat)) {
506 		device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
507 		return (ENOMEM);
508 		}
509 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
510 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
511 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
512 			return (ENOMEM);
513 		}
514 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
515 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
516 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
517 		    &sc->mfi_tb_init_busaddr, 0);
518 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
519 		    tb_mem_size)) {
520 			device_printf(sc->mfi_dev,
521 			    "Thunderbolt pool preparation error\n");
522 			return 0;
523 		}
524 
525 		/*
526 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
527 		  we are taking it diffrent from what we have allocated for Request
528 		  and reply descriptors to avoid confusion later
529 		*/
530 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
531 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
532 				1, 0,			/* algnmnt, boundary */
533 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
534 				BUS_SPACE_MAXADDR,	/* highaddr */
535 				NULL, NULL,		/* filter, filterarg */
536 				tb_mem_size,		/* maxsize */
537 				1,			/* msegments */
538 				tb_mem_size,		/* maxsegsize */
539 				0,			/* flags */
540 				NULL, NULL,		/* lockfunc, lockarg */
541 				&sc->mfi_tb_ioc_init_dmat)) {
542 			device_printf(sc->mfi_dev,
543 			    "Cannot allocate comms DMA tag\n");
544 			return (ENOMEM);
545 		}
546 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
547 		    (void **)&sc->mfi_tb_ioc_init_desc,
548 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
549 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
550 			return (ENOMEM);
551 		}
552 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
553 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
554 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
555 		    &sc->mfi_tb_ioc_init_busaddr, 0);
556 	}
557 	/*
558 	 * Create the dma tag for data buffers.  Used both for block I/O
559 	 * and for various internal data queries.
560 	 */
561 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
562 				1, 0,			/* algnmnt, boundary */
563 				BUS_SPACE_MAXADDR,	/* lowaddr */
564 				BUS_SPACE_MAXADDR,	/* highaddr */
565 				NULL, NULL,		/* filter, filterarg */
566 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
567 				sc->mfi_max_sge,	/* nsegments */
568 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
569 				BUS_DMA_ALLOCNOW,	/* flags */
570 				busdma_lock_mutex,	/* lockfunc */
571 				&sc->mfi_io_lock,	/* lockfuncarg */
572 				&sc->mfi_buffer_dmat)) {
573 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
574 		return (ENOMEM);
575 	}
576 
577 	/*
578 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
579 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
580 	 * entry, so the calculated size here will be will be 1 more than
581 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
582 	 */
583 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
584 	    sizeof(struct mfi_hwcomms);
585 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
586 				1, 0,			/* algnmnt, boundary */
587 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
588 				BUS_SPACE_MAXADDR,	/* highaddr */
589 				NULL, NULL,		/* filter, filterarg */
590 				commsz,			/* maxsize */
591 				1,			/* msegments */
592 				commsz,			/* maxsegsize */
593 				0,			/* flags */
594 				NULL, NULL,		/* lockfunc, lockarg */
595 				&sc->mfi_comms_dmat)) {
596 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
597 		return (ENOMEM);
598 	}
599 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
600 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
601 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
602 		return (ENOMEM);
603 	}
604 	bzero(sc->mfi_comms, commsz);
605 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
606 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
607 	/*
608 	 * Allocate DMA memory for the command frames.  Keep them in the
609 	 * lower 4GB for efficiency.  Calculate the size of the commands at
610 	 * the same time; each command is one 64 byte frame plus a set of
611          * additional frames for holding sg lists or other data.
612 	 * The assumption here is that the SG list will start at the second
613 	 * frame and not use the unused bytes in the first frame.  While this
614 	 * isn't technically correct, it simplifies the calculation and allows
615 	 * for command frames that might be larger than an mfi_io_frame.
616 	 */
617 	if (sizeof(bus_addr_t) == 8) {
618 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
619 		sc->mfi_flags |= MFI_FLAGS_SG64;
620 	} else {
621 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
622 	}
623 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
624 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
625 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
626 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
627 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
628 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
629 				64, 0,			/* algnmnt, boundary */
630 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
631 				BUS_SPACE_MAXADDR,	/* highaddr */
632 				NULL, NULL,		/* filter, filterarg */
633 				framessz,		/* maxsize */
634 				1,			/* nsegments */
635 				framessz,		/* maxsegsize */
636 				0,			/* flags */
637 				NULL, NULL,		/* lockfunc, lockarg */
638 				&sc->mfi_frames_dmat)) {
639 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
640 		return (ENOMEM);
641 	}
642 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
643 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
644 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
645 		return (ENOMEM);
646 	}
647 	bzero(sc->mfi_frames, framessz);
648 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
649 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
650 	/*
651 	 * Allocate DMA memory for the frame sense data.  Keep them in the
652 	 * lower 4GB for efficiency
653 	 */
654 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
655 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
656 				4, 0,			/* algnmnt, boundary */
657 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
658 				BUS_SPACE_MAXADDR,	/* highaddr */
659 				NULL, NULL,		/* filter, filterarg */
660 				sensesz,		/* maxsize */
661 				1,			/* nsegments */
662 				sensesz,		/* maxsegsize */
663 				0,			/* flags */
664 				NULL, NULL,		/* lockfunc, lockarg */
665 				&sc->mfi_sense_dmat)) {
666 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
667 		return (ENOMEM);
668 	}
669 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
670 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
671 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
672 		return (ENOMEM);
673 	}
674 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
675 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
676 	if ((error = mfi_alloc_commands(sc)) != 0)
677 		return (error);
678 
679 	/* Before moving the FW to operational state, check whether
680 	 * hostmemory is required by the FW or not
681 	 */
682 
683 	/* ThunderBolt MFI_IOC2 INIT */
684 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
685 		sc->mfi_disable_intr(sc);
686 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
687 			device_printf(sc->mfi_dev,
688 			    "TB Init has failed with error %d\n",error);
689 			return error;
690 		}
691 
692 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
693 			return error;
694 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
695 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
696 		    &sc->mfi_intr)) {
697 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
698 			return (EINVAL);
699 		}
700 		sc->mfi_intr_ptr = mfi_intr_tbolt;
701 		sc->mfi_enable_intr(sc);
702 	} else {
703 		if ((error = mfi_comms_init(sc)) != 0)
704 			return (error);
705 
706 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
707 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
708 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
709 			return (EINVAL);
710 		}
711 		sc->mfi_intr_ptr = mfi_intr;
712 		sc->mfi_enable_intr(sc);
713 	}
714 	if ((error = mfi_get_controller_info(sc)) != 0)
715 		return (error);
716 	sc->disableOnlineCtrlReset = 0;
717 
718 	/* Register a config hook to probe the bus for arrays */
719 	sc->mfi_ich.ich_func = mfi_startup;
720 	sc->mfi_ich.ich_arg = sc;
721 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
722 		device_printf(sc->mfi_dev, "Cannot establish configuration "
723 		    "hook\n");
724 		return (EINVAL);
725 	}
726 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
727 		mtx_unlock(&sc->mfi_io_lock);
728 		return (error);
729 	}
730 
731 	/*
732 	 * Register a shutdown handler.
733 	 */
734 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
735 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
736 		device_printf(sc->mfi_dev, "Warning: shutdown event "
737 		    "registration failed\n");
738 	}
739 
740 	/*
741 	 * Create the control device for doing management
742 	 */
743 	unit = device_get_unit(sc->mfi_dev);
744 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
745 	    0640, "mfi%d", unit);
746 	if (unit == 0)
747 		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
748 	if (sc->mfi_cdev != NULL)
749 		sc->mfi_cdev->si_drv1 = sc;
750 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
751 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
752 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
753 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
754 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
755 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
756 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
757 	    &sc->mfi_keep_deleted_volumes, 0,
758 	    "Don't detach the mfid device for a busy volume that is deleted");
759 
760 	device_add_child(sc->mfi_dev, "mfip", -1);
761 	bus_generic_attach(sc->mfi_dev);
762 
763 	/* Start the timeout watchdog */
764 	callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
765 	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
766 	    mfi_timeout, sc);
767 
768 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
769 		mfi_tbolt_sync_map_info(sc);
770 	}
771 
772 	return (0);
773 }
774 
775 static int
776 mfi_alloc_commands(struct mfi_softc *sc)
777 {
778 	struct mfi_command *cm;
779 	int i, ncmds;
780 
781 	/*
782 	 * XXX Should we allocate all the commands up front, or allocate on
783 	 * demand later like 'aac' does?
784 	 */
785 	ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
786 	if (bootverbose)
787 		device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
788 		   "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
789 
790 	sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
791 	    M_WAITOK | M_ZERO);
792 
793 	for (i = 0; i < ncmds; i++) {
794 		cm = &sc->mfi_commands[i];
795 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
796 		    sc->mfi_cmd_size * i);
797 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
798 		    sc->mfi_cmd_size * i;
799 		cm->cm_frame->header.context = i;
800 		cm->cm_sense = &sc->mfi_sense[i];
801 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
802 		cm->cm_sc = sc;
803 		cm->cm_index = i;
804 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
805 		    &cm->cm_dmamap) == 0) {
806 			mtx_lock(&sc->mfi_io_lock);
807 			mfi_release_command(cm);
808 			mtx_unlock(&sc->mfi_io_lock);
809 		}
810 		else
811 			break;
812 		sc->mfi_total_cmds++;
813 	}
814 
815 	return (0);
816 }
817 
818 void
819 mfi_release_command(struct mfi_command *cm)
820 {
821 	struct mfi_frame_header *hdr;
822 	uint32_t *hdr_data;
823 
824 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
825 
826 	/*
827 	 * Zero out the important fields of the frame, but make sure the
828 	 * context field is preserved.  For efficiency, handle the fields
829 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
830 	 */
831 	hdr = &cm->cm_frame->header;
832 	if (cm->cm_data != NULL && hdr->sg_count) {
833 		cm->cm_sg->sg32[0].len = 0;
834 		cm->cm_sg->sg32[0].addr = 0;
835 	}
836 
837 	hdr_data = (uint32_t *)cm->cm_frame;
838 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
839 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
840 	hdr_data[4] = 0;	/* flags, timeout */
841 	hdr_data[5] = 0;	/* data_len */
842 
843 	cm->cm_extra_frames = 0;
844 	cm->cm_flags = 0;
845 	cm->cm_complete = NULL;
846 	cm->cm_private = NULL;
847 	cm->cm_data = NULL;
848 	cm->cm_sg = 0;
849 	cm->cm_total_frame_size = 0;
850 	cm->retry_for_fw_reset = 0;
851 
852 	mfi_enqueue_free(cm);
853 }
854 
855 int
856 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
857     uint32_t opcode, void **bufp, size_t bufsize)
858 {
859 	struct mfi_command *cm;
860 	struct mfi_dcmd_frame *dcmd;
861 	void *buf = NULL;
862 	uint32_t context = 0;
863 
864 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
865 
866 	cm = mfi_dequeue_free(sc);
867 	if (cm == NULL)
868 		return (EBUSY);
869 
870 	/* Zero out the MFI frame */
871 	context = cm->cm_frame->header.context;
872 	bzero(cm->cm_frame, sizeof(union mfi_frame));
873 	cm->cm_frame->header.context = context;
874 
875 	if ((bufsize > 0) && (bufp != NULL)) {
876 		if (*bufp == NULL) {
877 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
878 			if (buf == NULL) {
879 				mfi_release_command(cm);
880 				return (ENOMEM);
881 			}
882 			*bufp = buf;
883 		} else {
884 			buf = *bufp;
885 		}
886 	}
887 
888 	dcmd =  &cm->cm_frame->dcmd;
889 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
890 	dcmd->header.cmd = MFI_CMD_DCMD;
891 	dcmd->header.timeout = 0;
892 	dcmd->header.flags = 0;
893 	dcmd->header.data_len = bufsize;
894 	dcmd->header.scsi_status = 0;
895 	dcmd->opcode = opcode;
896 	cm->cm_sg = &dcmd->sgl;
897 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
898 	cm->cm_flags = 0;
899 	cm->cm_data = buf;
900 	cm->cm_private = buf;
901 	cm->cm_len = bufsize;
902 
903 	*cmp = cm;
904 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
905 		*bufp = buf;
906 	return (0);
907 }
908 
909 static int
910 mfi_comms_init(struct mfi_softc *sc)
911 {
912 	struct mfi_command *cm;
913 	struct mfi_init_frame *init;
914 	struct mfi_init_qinfo *qinfo;
915 	int error;
916 	uint32_t context = 0;
917 
918 	mtx_lock(&sc->mfi_io_lock);
919 	if ((cm = mfi_dequeue_free(sc)) == NULL)
920 		return (EBUSY);
921 
922 	/* Zero out the MFI frame */
923 	context = cm->cm_frame->header.context;
924 	bzero(cm->cm_frame, sizeof(union mfi_frame));
925 	cm->cm_frame->header.context = context;
926 
927 	/*
928 	 * Abuse the SG list area of the frame to hold the init_qinfo
929 	 * object;
930 	 */
931 	init = &cm->cm_frame->init;
932 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
933 
934 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
935 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
936 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
937 	    offsetof(struct mfi_hwcomms, hw_reply_q);
938 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
939 	    offsetof(struct mfi_hwcomms, hw_pi);
940 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
941 	    offsetof(struct mfi_hwcomms, hw_ci);
942 
943 	init->header.cmd = MFI_CMD_INIT;
944 	init->header.data_len = sizeof(struct mfi_init_qinfo);
945 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
946 	cm->cm_data = NULL;
947 	cm->cm_flags = MFI_CMD_POLLED;
948 
949 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
950 		device_printf(sc->mfi_dev, "failed to send init command\n");
951 		mtx_unlock(&sc->mfi_io_lock);
952 		return (error);
953 	}
954 	mfi_release_command(cm);
955 	mtx_unlock(&sc->mfi_io_lock);
956 
957 	return (0);
958 }
959 
960 static int
961 mfi_get_controller_info(struct mfi_softc *sc)
962 {
963 	struct mfi_command *cm = NULL;
964 	struct mfi_ctrl_info *ci = NULL;
965 	uint32_t max_sectors_1, max_sectors_2;
966 	int error;
967 
968 	mtx_lock(&sc->mfi_io_lock);
969 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
970 	    (void **)&ci, sizeof(*ci));
971 	if (error)
972 		goto out;
973 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
974 
975 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
976 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
977 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
978 		    MFI_SECTOR_LEN;
979 		error = 0;
980 		goto out;
981 	}
982 
983 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
984 	    BUS_DMASYNC_POSTREAD);
985 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
986 
987 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
988 	max_sectors_2 = ci->max_request_size;
989 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
990 	sc->disableOnlineCtrlReset =
991 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
992 
993 out:
994 	if (ci)
995 		free(ci, M_MFIBUF);
996 	if (cm)
997 		mfi_release_command(cm);
998 	mtx_unlock(&sc->mfi_io_lock);
999 	return (error);
1000 }
1001 
1002 static int
1003 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1004 {
1005 	struct mfi_command *cm = NULL;
1006 	int error;
1007 
1008 	mtx_lock(&sc->mfi_io_lock);
1009 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1010 	    (void **)log_state, sizeof(**log_state));
1011 	if (error)
1012 		goto out;
1013 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1014 
1015 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1016 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1017 		goto out;
1018 	}
1019 
1020 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1021 	    BUS_DMASYNC_POSTREAD);
1022 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1023 
1024 out:
1025 	if (cm)
1026 		mfi_release_command(cm);
1027 	mtx_unlock(&sc->mfi_io_lock);
1028 
1029 	return (error);
1030 }
1031 
1032 int
1033 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1034 {
1035 	struct mfi_evt_log_state *log_state = NULL;
1036 	union mfi_evt class_locale;
1037 	int error = 0;
1038 	uint32_t seq;
1039 
1040 	class_locale.members.reserved = 0;
1041 	class_locale.members.locale = mfi_event_locale;
1042 	class_locale.members.evt_class  = mfi_event_class;
1043 
1044 	if (seq_start == 0) {
1045 		error = mfi_get_log_state(sc, &log_state);
1046 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1047 		if (error) {
1048 			if (log_state)
1049 				free(log_state, M_MFIBUF);
1050 			return (error);
1051 		}
1052 
1053 		/*
1054 		 * Walk through any events that fired since the last
1055 		 * shutdown.
1056 		 */
1057 		mfi_parse_entries(sc, log_state->shutdown_seq_num,
1058 		    log_state->newest_seq_num);
1059 		seq = log_state->newest_seq_num;
1060 	} else
1061 		seq = seq_start;
1062 	mfi_aen_register(sc, seq, class_locale.word);
1063 	free(log_state, M_MFIBUF);
1064 
1065 	return 0;
1066 }
1067 
1068 int
1069 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1070 {
1071 
1072 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1073 	cm->cm_complete = NULL;
1074 
1075 
1076 	/*
1077 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1078 	 * and return 0 to it as status
1079 	 */
1080 	if (cm->cm_frame->dcmd.opcode == 0) {
1081 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1082 		cm->cm_error = 0;
1083 		return (cm->cm_error);
1084 	}
1085 	mfi_enqueue_ready(cm);
1086 	mfi_startio(sc);
1087 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1088 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1089 	return (cm->cm_error);
1090 }
1091 
1092 void
1093 mfi_free(struct mfi_softc *sc)
1094 {
1095 	struct mfi_command *cm;
1096 	int i;
1097 
1098 	callout_drain(&sc->mfi_watchdog_callout);
1099 
1100 	if (sc->mfi_cdev != NULL)
1101 		destroy_dev(sc->mfi_cdev);
1102 
1103 	if (sc->mfi_total_cmds != 0) {
1104 		for (i = 0; i < sc->mfi_total_cmds; i++) {
1105 			cm = &sc->mfi_commands[i];
1106 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1107 		}
1108 		free(sc->mfi_commands, M_MFIBUF);
1109 	}
1110 
1111 	if (sc->mfi_intr)
1112 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1113 	if (sc->mfi_irq != NULL)
1114 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1115 		    sc->mfi_irq);
1116 
1117 	if (sc->mfi_sense_busaddr != 0)
1118 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1119 	if (sc->mfi_sense != NULL)
1120 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1121 		    sc->mfi_sense_dmamap);
1122 	if (sc->mfi_sense_dmat != NULL)
1123 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1124 
1125 	if (sc->mfi_frames_busaddr != 0)
1126 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1127 	if (sc->mfi_frames != NULL)
1128 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1129 		    sc->mfi_frames_dmamap);
1130 	if (sc->mfi_frames_dmat != NULL)
1131 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1132 
1133 	if (sc->mfi_comms_busaddr != 0)
1134 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1135 	if (sc->mfi_comms != NULL)
1136 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1137 		    sc->mfi_comms_dmamap);
1138 	if (sc->mfi_comms_dmat != NULL)
1139 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1140 
1141 	/* ThunderBolt contiguous memory free here */
1142 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1143 		if (sc->mfi_tb_busaddr != 0)
1144 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1145 		if (sc->request_message_pool != NULL)
1146 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1147 			    sc->mfi_tb_dmamap);
1148 		if (sc->mfi_tb_dmat != NULL)
1149 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1150 
1151 		/* Version buffer memory free */
1152 		/* Start LSIP200113393 */
1153 		if (sc->verbuf_h_busaddr != 0)
1154 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1155 		if (sc->verbuf != NULL)
1156 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1157 			    sc->verbuf_h_dmamap);
1158 		if (sc->verbuf_h_dmat != NULL)
1159 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1160 
1161 		/* End LSIP200113393 */
1162 		/* ThunderBolt INIT packet memory Free */
1163 		if (sc->mfi_tb_init_busaddr != 0)
1164 			bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1165 		if (sc->mfi_tb_init != NULL)
1166 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1167 			    sc->mfi_tb_init_dmamap);
1168 		if (sc->mfi_tb_init_dmat != NULL)
1169 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1170 
1171 		/* ThunderBolt IOC Init Desc memory free here */
1172 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1173 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1174 			    sc->mfi_tb_ioc_init_dmamap);
1175 		if (sc->mfi_tb_ioc_init_desc != NULL)
1176 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1177 			    sc->mfi_tb_ioc_init_desc,
1178 			    sc->mfi_tb_ioc_init_dmamap);
1179 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1180 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1181 		for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1182 			if (sc->mfi_cmd_pool_tbolt != NULL) {
1183 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1184 					free(sc->mfi_cmd_pool_tbolt[i],
1185 					    M_MFIBUF);
1186 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1187 				}
1188 			}
1189 		}
1190 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1191 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1192 			sc->mfi_cmd_pool_tbolt = NULL;
1193 		}
1194 		if (sc->request_desc_pool != NULL) {
1195 			free(sc->request_desc_pool, M_MFIBUF);
1196 			sc->request_desc_pool = NULL;
1197 		}
1198 	}
1199 	if (sc->mfi_buffer_dmat != NULL)
1200 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1201 	if (sc->mfi_parent_dmat != NULL)
1202 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1203 
1204 	if (mtx_initialized(&sc->mfi_io_lock)) {
1205 		mtx_destroy(&sc->mfi_io_lock);
1206 		sx_destroy(&sc->mfi_config_lock);
1207 	}
1208 
1209 	return;
1210 }
1211 
1212 static void
1213 mfi_startup(void *arg)
1214 {
1215 	struct mfi_softc *sc;
1216 
1217 	sc = (struct mfi_softc *)arg;
1218 
1219 	config_intrhook_disestablish(&sc->mfi_ich);
1220 
1221 	sc->mfi_enable_intr(sc);
1222 	sx_xlock(&sc->mfi_config_lock);
1223 	mtx_lock(&sc->mfi_io_lock);
1224 	mfi_ldprobe(sc);
1225 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1226 	    mfi_syspdprobe(sc);
1227 	mtx_unlock(&sc->mfi_io_lock);
1228 	sx_xunlock(&sc->mfi_config_lock);
1229 }
1230 
1231 static void
1232 mfi_intr(void *arg)
1233 {
1234 	struct mfi_softc *sc;
1235 	struct mfi_command *cm;
1236 	uint32_t pi, ci, context;
1237 
1238 	sc = (struct mfi_softc *)arg;
1239 
1240 	if (sc->mfi_check_clear_intr(sc))
1241 		return;
1242 
1243 restart:
1244 	pi = sc->mfi_comms->hw_pi;
1245 	ci = sc->mfi_comms->hw_ci;
1246 	mtx_lock(&sc->mfi_io_lock);
1247 	while (ci != pi) {
1248 		context = sc->mfi_comms->hw_reply_q[ci];
1249 		if (context < sc->mfi_max_fw_cmds) {
1250 			cm = &sc->mfi_commands[context];
1251 			mfi_remove_busy(cm);
1252 			cm->cm_error = 0;
1253 			mfi_complete(sc, cm);
1254 		}
1255 		if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1256 			ci = 0;
1257 		}
1258 	}
1259 
1260 	sc->mfi_comms->hw_ci = ci;
1261 
1262 	/* Give defered I/O a chance to run */
1263 	if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1264 		sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1265 	mfi_startio(sc);
1266 	mtx_unlock(&sc->mfi_io_lock);
1267 
1268 	/*
1269 	 * Dummy read to flush the bus; this ensures that the indexes are up
1270 	 * to date.  Restart processing if more commands have come it.
1271 	 */
1272 	(void)sc->mfi_read_fw_status(sc);
1273 	if (pi != sc->mfi_comms->hw_pi)
1274 		goto restart;
1275 
1276 	return;
1277 }
1278 
1279 int
1280 mfi_shutdown(struct mfi_softc *sc)
1281 {
1282 	struct mfi_dcmd_frame *dcmd;
1283 	struct mfi_command *cm;
1284 	int error;
1285 
1286 
1287 	if (sc->mfi_aen_cm)
1288 		sc->cm_aen_abort = 1;
1289 	if (sc->mfi_aen_cm != NULL)
1290 		mfi_abort(sc, &sc->mfi_aen_cm);
1291 
1292 	if (sc->mfi_map_sync_cm)
1293 		sc->cm_map_abort = 1;
1294 	if (sc->mfi_map_sync_cm != NULL)
1295 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1296 
1297 	mtx_lock(&sc->mfi_io_lock);
1298 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1299 	if (error) {
1300 		mtx_unlock(&sc->mfi_io_lock);
1301 		return (error);
1302 	}
1303 
1304 	dcmd = &cm->cm_frame->dcmd;
1305 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1306 	cm->cm_flags = MFI_CMD_POLLED;
1307 	cm->cm_data = NULL;
1308 
1309 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1310 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1311 	}
1312 
1313 	mfi_release_command(cm);
1314 	mtx_unlock(&sc->mfi_io_lock);
1315 	return (error);
1316 }
1317 
1318 static void
1319 mfi_syspdprobe(struct mfi_softc *sc)
1320 {
1321 	struct mfi_frame_header *hdr;
1322 	struct mfi_command *cm = NULL;
1323 	struct mfi_pd_list *pdlist = NULL;
1324 	struct mfi_system_pd *syspd, *tmp;
1325 	struct mfi_system_pending *syspd_pend;
1326 	int error, i, found;
1327 
1328 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1329 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1330 	/* Add SYSTEM PD's */
1331 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1332 	    (void **)&pdlist, sizeof(*pdlist));
1333 	if (error) {
1334 		device_printf(sc->mfi_dev,
1335 		    "Error while forming SYSTEM PD list\n");
1336 		goto out;
1337 	}
1338 
1339 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1340 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1341 	cm->cm_frame->dcmd.mbox[1] = 0;
1342 	if (mfi_mapcmd(sc, cm) != 0) {
1343 		device_printf(sc->mfi_dev,
1344 		    "Failed to get syspd device listing\n");
1345 		goto out;
1346 	}
1347 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1348 	    BUS_DMASYNC_POSTREAD);
1349 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1350 	hdr = &cm->cm_frame->header;
1351 	if (hdr->cmd_status != MFI_STAT_OK) {
1352 		device_printf(sc->mfi_dev,
1353 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1354 		goto out;
1355 	}
1356 	/* Get each PD and add it to the system */
1357 	for (i = 0; i < pdlist->count; i++) {
1358 		if (pdlist->addr[i].device_id ==
1359 		    pdlist->addr[i].encl_device_id)
1360 			continue;
1361 		found = 0;
1362 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1363 			if (syspd->pd_id == pdlist->addr[i].device_id)
1364 				found = 1;
1365 		}
1366 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1367 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1368 				found = 1;
1369 		}
1370 		if (found == 0)
1371 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1372 	}
1373 	/* Delete SYSPD's whose state has been changed */
1374 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1375 		found = 0;
1376 		for (i = 0; i < pdlist->count; i++) {
1377 			if (syspd->pd_id == pdlist->addr[i].device_id)
1378 				found = 1;
1379 		}
1380 		if (found == 0) {
1381 			printf("DELETE\n");
1382 			mtx_unlock(&sc->mfi_io_lock);
1383 			mtx_lock(&Giant);
1384 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1385 			mtx_unlock(&Giant);
1386 			mtx_lock(&sc->mfi_io_lock);
1387 		}
1388 	}
1389 out:
1390 	if (pdlist)
1391 	    free(pdlist, M_MFIBUF);
1392 	if (cm)
1393 	    mfi_release_command(cm);
1394 
1395 	return;
1396 }
1397 
1398 static void
1399 mfi_ldprobe(struct mfi_softc *sc)
1400 {
1401 	struct mfi_frame_header *hdr;
1402 	struct mfi_command *cm = NULL;
1403 	struct mfi_ld_list *list = NULL;
1404 	struct mfi_disk *ld;
1405 	struct mfi_disk_pending *ld_pend;
1406 	int error, i;
1407 
1408 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1409 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1410 
1411 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1412 	    (void **)&list, sizeof(*list));
1413 	if (error)
1414 		goto out;
1415 
1416 	cm->cm_flags = MFI_CMD_DATAIN;
1417 	if (mfi_wait_command(sc, cm) != 0) {
1418 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1419 		goto out;
1420 	}
1421 
1422 	hdr = &cm->cm_frame->header;
1423 	if (hdr->cmd_status != MFI_STAT_OK) {
1424 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1425 		    hdr->cmd_status);
1426 		goto out;
1427 	}
1428 
1429 	for (i = 0; i < list->ld_count; i++) {
1430 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1431 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1432 				goto skip_add;
1433 		}
1434 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1435 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1436 				goto skip_add;
1437 		}
1438 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1439 	skip_add:;
1440 	}
1441 out:
1442 	if (list)
1443 		free(list, M_MFIBUF);
1444 	if (cm)
1445 		mfi_release_command(cm);
1446 
1447 	return;
1448 }
1449 
1450 /*
1451  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1452  * the bits in 24-31 are all set, then it is the number of seconds since
1453  * boot.
1454  */
1455 static const char *
1456 format_timestamp(uint32_t timestamp)
1457 {
1458 	static char buffer[32];
1459 
1460 	if ((timestamp & 0xff000000) == 0xff000000)
1461 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1462 		    0x00ffffff);
1463 	else
1464 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1465 	return (buffer);
1466 }
1467 
1468 static const char *
1469 format_class(int8_t class)
1470 {
1471 	static char buffer[6];
1472 
1473 	switch (class) {
1474 	case MFI_EVT_CLASS_DEBUG:
1475 		return ("debug");
1476 	case MFI_EVT_CLASS_PROGRESS:
1477 		return ("progress");
1478 	case MFI_EVT_CLASS_INFO:
1479 		return ("info");
1480 	case MFI_EVT_CLASS_WARNING:
1481 		return ("WARN");
1482 	case MFI_EVT_CLASS_CRITICAL:
1483 		return ("CRIT");
1484 	case MFI_EVT_CLASS_FATAL:
1485 		return ("FATAL");
1486 	case MFI_EVT_CLASS_DEAD:
1487 		return ("DEAD");
1488 	default:
1489 		snprintf(buffer, sizeof(buffer), "%d", class);
1490 		return (buffer);
1491 	}
1492 }
1493 
1494 static void
1495 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1496 {
1497 	struct mfi_system_pd *syspd = NULL;
1498 
1499 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1500 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1501 	    format_class(detail->evt_class.members.evt_class),
1502 	    detail->description);
1503 
1504         /* Don't act on old AEN's or while shutting down */
1505         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1506                 return;
1507 
1508 	switch (detail->arg_type) {
1509 	case MR_EVT_ARGS_NONE:
1510 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1511 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1512 			if (mfi_detect_jbod_change) {
1513 				/*
1514 				 * Probe for new SYSPD's and Delete
1515 				 * invalid SYSPD's
1516 				 */
1517 				sx_xlock(&sc->mfi_config_lock);
1518 				mtx_lock(&sc->mfi_io_lock);
1519 				mfi_syspdprobe(sc);
1520 				mtx_unlock(&sc->mfi_io_lock);
1521 				sx_xunlock(&sc->mfi_config_lock);
1522 			}
1523 		}
1524 		break;
1525 	case MR_EVT_ARGS_LD_STATE:
1526 		/* During load time driver reads all the events starting
1527 		 * from the one that has been logged after shutdown. Avoid
1528 		 * these old events.
1529 		 */
1530 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1531 			/* Remove the LD */
1532 			struct mfi_disk *ld;
1533 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1534 				if (ld->ld_id ==
1535 				    detail->args.ld_state.ld.target_id)
1536 					break;
1537 			}
1538 			/*
1539 			Fix: for kernel panics when SSCD is removed
1540 			KASSERT(ld != NULL, ("volume dissappeared"));
1541 			*/
1542 			if (ld != NULL) {
1543 				mtx_lock(&Giant);
1544 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1545 				mtx_unlock(&Giant);
1546 			}
1547 		}
1548 		break;
1549 	case MR_EVT_ARGS_PD:
1550 		if (detail->code == MR_EVT_PD_REMOVED) {
1551 			if (mfi_detect_jbod_change) {
1552 				/*
1553 				 * If the removed device is a SYSPD then
1554 				 * delete it
1555 				 */
1556 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1557 				    pd_link) {
1558 					if (syspd->pd_id ==
1559 					    detail->args.pd.device_id) {
1560 						mtx_lock(&Giant);
1561 						device_delete_child(
1562 						    sc->mfi_dev,
1563 						    syspd->pd_dev);
1564 						mtx_unlock(&Giant);
1565 						break;
1566 					}
1567 				}
1568 			}
1569 		}
1570 		if (detail->code == MR_EVT_PD_INSERTED) {
1571 			if (mfi_detect_jbod_change) {
1572 				/* Probe for new SYSPD's */
1573 				sx_xlock(&sc->mfi_config_lock);
1574 				mtx_lock(&sc->mfi_io_lock);
1575 				mfi_syspdprobe(sc);
1576 				mtx_unlock(&sc->mfi_io_lock);
1577 				sx_xunlock(&sc->mfi_config_lock);
1578 			}
1579 		}
1580 		if (sc->mfi_cam_rescan_cb != NULL &&
1581 		    (detail->code == MR_EVT_PD_INSERTED ||
1582 		    detail->code == MR_EVT_PD_REMOVED)) {
1583 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1584 		}
1585 		break;
1586 	}
1587 }
1588 
1589 static void
1590 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1591 {
1592 	struct mfi_evt_queue_elm *elm;
1593 
1594 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1595 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1596 	if (elm == NULL)
1597 		return;
1598 	memcpy(&elm->detail, detail, sizeof(*detail));
1599 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1600 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1601 }
1602 
1603 static void
1604 mfi_handle_evt(void *context, int pending)
1605 {
1606 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1607 	struct mfi_softc *sc;
1608 	struct mfi_evt_queue_elm *elm;
1609 
1610 	sc = context;
1611 	TAILQ_INIT(&queue);
1612 	mtx_lock(&sc->mfi_io_lock);
1613 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1614 	mtx_unlock(&sc->mfi_io_lock);
1615 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1616 		TAILQ_REMOVE(&queue, elm, link);
1617 		mfi_decode_evt(sc, &elm->detail);
1618 		free(elm, M_MFIBUF);
1619 	}
1620 }
1621 
1622 static int
1623 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1624 {
1625 	struct mfi_command *cm;
1626 	struct mfi_dcmd_frame *dcmd;
1627 	union mfi_evt current_aen, prior_aen;
1628 	struct mfi_evt_detail *ed = NULL;
1629 	int error = 0;
1630 
1631 	current_aen.word = locale;
1632 	if (sc->mfi_aen_cm != NULL) {
1633 		prior_aen.word =
1634 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1635 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1636 		    !((prior_aen.members.locale & current_aen.members.locale)
1637 		    ^current_aen.members.locale)) {
1638 			return (0);
1639 		} else {
1640 			prior_aen.members.locale |= current_aen.members.locale;
1641 			if (prior_aen.members.evt_class
1642 			    < current_aen.members.evt_class)
1643 				current_aen.members.evt_class =
1644 				    prior_aen.members.evt_class;
1645 			mfi_abort(sc, &sc->mfi_aen_cm);
1646 		}
1647 	}
1648 
1649 	mtx_lock(&sc->mfi_io_lock);
1650 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1651 	    (void **)&ed, sizeof(*ed));
1652 	mtx_unlock(&sc->mfi_io_lock);
1653 	if (error) {
1654 		goto out;
1655 	}
1656 
1657 	dcmd = &cm->cm_frame->dcmd;
1658 	((uint32_t *)&dcmd->mbox)[0] = seq;
1659 	((uint32_t *)&dcmd->mbox)[1] = locale;
1660 	cm->cm_flags = MFI_CMD_DATAIN;
1661 	cm->cm_complete = mfi_aen_complete;
1662 
1663 	sc->last_seq_num = seq;
1664 	sc->mfi_aen_cm = cm;
1665 
1666 	mtx_lock(&sc->mfi_io_lock);
1667 	mfi_enqueue_ready(cm);
1668 	mfi_startio(sc);
1669 	mtx_unlock(&sc->mfi_io_lock);
1670 
1671 out:
1672 	return (error);
1673 }
1674 
1675 static void
1676 mfi_aen_complete(struct mfi_command *cm)
1677 {
1678 	struct mfi_frame_header *hdr;
1679 	struct mfi_softc *sc;
1680 	struct mfi_evt_detail *detail;
1681 	struct mfi_aen *mfi_aen_entry, *tmp;
1682 	int seq = 0, aborted = 0;
1683 
1684 	sc = cm->cm_sc;
1685 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1686 
1687 	hdr = &cm->cm_frame->header;
1688 
1689 	if (sc->mfi_aen_cm == NULL)
1690 		return;
1691 
1692 	if (sc->cm_aen_abort ||
1693 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1694 		sc->cm_aen_abort = 0;
1695 		aborted = 1;
1696 	} else {
1697 		sc->mfi_aen_triggered = 1;
1698 		if (sc->mfi_poll_waiting) {
1699 			sc->mfi_poll_waiting = 0;
1700 			selwakeup(&sc->mfi_select);
1701 		}
1702 		detail = cm->cm_data;
1703 		mfi_queue_evt(sc, detail);
1704 		seq = detail->seq + 1;
1705 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1706 		    tmp) {
1707 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1708 			    aen_link);
1709 			PROC_LOCK(mfi_aen_entry->p);
1710 			kern_psignal(mfi_aen_entry->p, SIGIO);
1711 			PROC_UNLOCK(mfi_aen_entry->p);
1712 			free(mfi_aen_entry, M_MFIBUF);
1713 		}
1714 	}
1715 
1716 	free(cm->cm_data, M_MFIBUF);
1717 	sc->mfi_aen_cm = NULL;
1718 	wakeup(&sc->mfi_aen_cm);
1719 	mfi_release_command(cm);
1720 
1721 	/* set it up again so the driver can catch more events */
1722 	if (!aborted) {
1723 		mtx_unlock(&sc->mfi_io_lock);
1724 		mfi_aen_setup(sc, seq);
1725 		mtx_lock(&sc->mfi_io_lock);
1726 	}
1727 }
1728 
1729 #define MAX_EVENTS 15
1730 
1731 static int
1732 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1733 {
1734 	struct mfi_command *cm;
1735 	struct mfi_dcmd_frame *dcmd;
1736 	struct mfi_evt_list *el;
1737 	union mfi_evt class_locale;
1738 	int error, i, seq, size;
1739 
1740 	class_locale.members.reserved = 0;
1741 	class_locale.members.locale = mfi_event_locale;
1742 	class_locale.members.evt_class  = mfi_event_class;
1743 
1744 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1745 		* (MAX_EVENTS - 1);
1746 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1747 	if (el == NULL)
1748 		return (ENOMEM);
1749 
1750 	for (seq = start_seq;;) {
1751 		mtx_lock(&sc->mfi_io_lock);
1752 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1753 			free(el, M_MFIBUF);
1754 			mtx_unlock(&sc->mfi_io_lock);
1755 			return (EBUSY);
1756 		}
1757 		mtx_unlock(&sc->mfi_io_lock);
1758 
1759 		dcmd = &cm->cm_frame->dcmd;
1760 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1761 		dcmd->header.cmd = MFI_CMD_DCMD;
1762 		dcmd->header.timeout = 0;
1763 		dcmd->header.data_len = size;
1764 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1765 		((uint32_t *)&dcmd->mbox)[0] = seq;
1766 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1767 		cm->cm_sg = &dcmd->sgl;
1768 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1769 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1770 		cm->cm_data = el;
1771 		cm->cm_len = size;
1772 
1773 		mtx_lock(&sc->mfi_io_lock);
1774 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1775 			device_printf(sc->mfi_dev,
1776 			    "Failed to get controller entries\n");
1777 			mfi_release_command(cm);
1778 			mtx_unlock(&sc->mfi_io_lock);
1779 			break;
1780 		}
1781 
1782 		mtx_unlock(&sc->mfi_io_lock);
1783 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1784 		    BUS_DMASYNC_POSTREAD);
1785 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1786 
1787 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1788 			mtx_lock(&sc->mfi_io_lock);
1789 			mfi_release_command(cm);
1790 			mtx_unlock(&sc->mfi_io_lock);
1791 			break;
1792 		}
1793 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1794 			device_printf(sc->mfi_dev,
1795 			    "Error %d fetching controller entries\n",
1796 			    dcmd->header.cmd_status);
1797 			mtx_lock(&sc->mfi_io_lock);
1798 			mfi_release_command(cm);
1799 			mtx_unlock(&sc->mfi_io_lock);
1800 			break;
1801 		}
1802 		mtx_lock(&sc->mfi_io_lock);
1803 		mfi_release_command(cm);
1804 		mtx_unlock(&sc->mfi_io_lock);
1805 
1806 		for (i = 0; i < el->count; i++) {
1807 			/*
1808 			 * If this event is newer than 'stop_seq' then
1809 			 * break out of the loop.  Note that the log
1810 			 * is a circular buffer so we have to handle
1811 			 * the case that our stop point is earlier in
1812 			 * the buffer than our start point.
1813 			 */
1814 			if (el->event[i].seq >= stop_seq) {
1815 				if (start_seq <= stop_seq)
1816 					break;
1817 				else if (el->event[i].seq < start_seq)
1818 					break;
1819 			}
1820 			mtx_lock(&sc->mfi_io_lock);
1821 			mfi_queue_evt(sc, &el->event[i]);
1822 			mtx_unlock(&sc->mfi_io_lock);
1823 		}
1824 		seq = el->event[el->count - 1].seq + 1;
1825 	}
1826 
1827 	free(el, M_MFIBUF);
1828 	return (0);
1829 }
1830 
1831 static int
1832 mfi_add_ld(struct mfi_softc *sc, int id)
1833 {
1834 	struct mfi_command *cm;
1835 	struct mfi_dcmd_frame *dcmd = NULL;
1836 	struct mfi_ld_info *ld_info = NULL;
1837 	struct mfi_disk_pending *ld_pend;
1838 	int error;
1839 
1840 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1841 
1842 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1843 	if (ld_pend != NULL) {
1844 		ld_pend->ld_id = id;
1845 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1846 	}
1847 
1848 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1849 	    (void **)&ld_info, sizeof(*ld_info));
1850 	if (error) {
1851 		device_printf(sc->mfi_dev,
1852 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1853 		if (ld_info)
1854 			free(ld_info, M_MFIBUF);
1855 		return (error);
1856 	}
1857 	cm->cm_flags = MFI_CMD_DATAIN;
1858 	dcmd = &cm->cm_frame->dcmd;
1859 	dcmd->mbox[0] = id;
1860 	if (mfi_wait_command(sc, cm) != 0) {
1861 		device_printf(sc->mfi_dev,
1862 		    "Failed to get logical drive: %d\n", id);
1863 		free(ld_info, M_MFIBUF);
1864 		return (0);
1865 	}
1866 	if (ld_info->ld_config.params.isSSCD != 1)
1867 		mfi_add_ld_complete(cm);
1868 	else {
1869 		mfi_release_command(cm);
1870 		if (ld_info)		/* SSCD drives ld_info free here */
1871 			free(ld_info, M_MFIBUF);
1872 	}
1873 	return (0);
1874 }
1875 
1876 static void
1877 mfi_add_ld_complete(struct mfi_command *cm)
1878 {
1879 	struct mfi_frame_header *hdr;
1880 	struct mfi_ld_info *ld_info;
1881 	struct mfi_softc *sc;
1882 	device_t child;
1883 
1884 	sc = cm->cm_sc;
1885 	hdr = &cm->cm_frame->header;
1886 	ld_info = cm->cm_private;
1887 
1888 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1889 		free(ld_info, M_MFIBUF);
1890 		wakeup(&sc->mfi_map_sync_cm);
1891 		mfi_release_command(cm);
1892 		return;
1893 	}
1894 	wakeup(&sc->mfi_map_sync_cm);
1895 	mfi_release_command(cm);
1896 
1897 	mtx_unlock(&sc->mfi_io_lock);
1898 	mtx_lock(&Giant);
1899 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1900 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1901 		free(ld_info, M_MFIBUF);
1902 		mtx_unlock(&Giant);
1903 		mtx_lock(&sc->mfi_io_lock);
1904 		return;
1905 	}
1906 
1907 	device_set_ivars(child, ld_info);
1908 	device_set_desc(child, "MFI Logical Disk");
1909 	bus_generic_attach(sc->mfi_dev);
1910 	mtx_unlock(&Giant);
1911 	mtx_lock(&sc->mfi_io_lock);
1912 }
1913 
1914 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1915 {
1916 	struct mfi_command *cm;
1917 	struct mfi_dcmd_frame *dcmd = NULL;
1918 	struct mfi_pd_info *pd_info = NULL;
1919 	struct mfi_system_pending *syspd_pend;
1920 	int error;
1921 
1922 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1923 
1924 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1925 	if (syspd_pend != NULL) {
1926 		syspd_pend->pd_id = id;
1927 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1928 	}
1929 
1930 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1931 		(void **)&pd_info, sizeof(*pd_info));
1932 	if (error) {
1933 		device_printf(sc->mfi_dev,
1934 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1935 		    error);
1936 		if (pd_info)
1937 			free(pd_info, M_MFIBUF);
1938 		return (error);
1939 	}
1940 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1941 	dcmd = &cm->cm_frame->dcmd;
1942 	dcmd->mbox[0]=id;
1943 	dcmd->header.scsi_status = 0;
1944 	dcmd->header.pad0 = 0;
1945 	if (mfi_mapcmd(sc, cm) != 0) {
1946 		device_printf(sc->mfi_dev,
1947 		    "Failed to get physical drive info %d\n", id);
1948 		free(pd_info, M_MFIBUF);
1949 		return (0);
1950 	}
1951 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1952 	    BUS_DMASYNC_POSTREAD);
1953 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1954 	mfi_add_sys_pd_complete(cm);
1955 	return (0);
1956 }
1957 
1958 static void
1959 mfi_add_sys_pd_complete(struct mfi_command *cm)
1960 {
1961 	struct mfi_frame_header *hdr;
1962 	struct mfi_pd_info *pd_info;
1963 	struct mfi_softc *sc;
1964 	device_t child;
1965 
1966 	sc = cm->cm_sc;
1967 	hdr = &cm->cm_frame->header;
1968 	pd_info = cm->cm_private;
1969 
1970 	if (hdr->cmd_status != MFI_STAT_OK) {
1971 		free(pd_info, M_MFIBUF);
1972 		mfi_release_command(cm);
1973 		return;
1974 	}
1975 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1976 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1977 		    pd_info->ref.v.device_id);
1978 		free(pd_info, M_MFIBUF);
1979 		mfi_release_command(cm);
1980 		return;
1981 	}
1982 	mfi_release_command(cm);
1983 
1984 	mtx_unlock(&sc->mfi_io_lock);
1985 	mtx_lock(&Giant);
1986 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1987 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
1988 		free(pd_info, M_MFIBUF);
1989 		mtx_unlock(&Giant);
1990 		mtx_lock(&sc->mfi_io_lock);
1991 		return;
1992 	}
1993 
1994 	device_set_ivars(child, pd_info);
1995 	device_set_desc(child, "MFI System PD");
1996 	bus_generic_attach(sc->mfi_dev);
1997 	mtx_unlock(&Giant);
1998 	mtx_lock(&sc->mfi_io_lock);
1999 }
2000 
2001 static struct mfi_command *
2002 mfi_bio_command(struct mfi_softc *sc)
2003 {
2004 	struct bio *bio;
2005 	struct mfi_command *cm = NULL;
2006 
2007 	/*reserving two commands to avoid starvation for IOCTL*/
2008 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2009 		return (NULL);
2010 	}
2011 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2012 		return (NULL);
2013 	}
2014 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2015 		cm = mfi_build_ldio(sc, bio);
2016 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2017 		cm = mfi_build_syspdio(sc, bio);
2018 	}
2019 	if (!cm)
2020 	    mfi_enqueue_bio(sc, bio);
2021 	return cm;
2022 }
2023 
2024 /*
2025  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2026  */
2027 
2028 int
2029 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2030 {
2031 	int cdb_len;
2032 
2033 	if (((lba & 0x1fffff) == lba)
2034          && ((block_count & 0xff) == block_count)
2035          && (byte2 == 0)) {
2036 		/* We can fit in a 6 byte cdb */
2037 		struct scsi_rw_6 *scsi_cmd;
2038 
2039 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2040 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2041 		scsi_ulto3b(lba, scsi_cmd->addr);
2042 		scsi_cmd->length = block_count & 0xff;
2043 		scsi_cmd->control = 0;
2044 		cdb_len = sizeof(*scsi_cmd);
2045 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2046 		/* Need a 10 byte CDB */
2047 		struct scsi_rw_10 *scsi_cmd;
2048 
2049 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2050 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2051 		scsi_cmd->byte2 = byte2;
2052 		scsi_ulto4b(lba, scsi_cmd->addr);
2053 		scsi_cmd->reserved = 0;
2054 		scsi_ulto2b(block_count, scsi_cmd->length);
2055 		scsi_cmd->control = 0;
2056 		cdb_len = sizeof(*scsi_cmd);
2057 	} else if (((block_count & 0xffffffff) == block_count) &&
2058 	    ((lba & 0xffffffff) == lba)) {
2059 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2060 		struct scsi_rw_12 *scsi_cmd;
2061 
2062 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2063 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2064 		scsi_cmd->byte2 = byte2;
2065 		scsi_ulto4b(lba, scsi_cmd->addr);
2066 		scsi_cmd->reserved = 0;
2067 		scsi_ulto4b(block_count, scsi_cmd->length);
2068 		scsi_cmd->control = 0;
2069 		cdb_len = sizeof(*scsi_cmd);
2070 	} else {
2071 		/*
2072 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2073 		 * than 2^32
2074 		 */
2075 		struct scsi_rw_16 *scsi_cmd;
2076 
2077 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2078 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2079 		scsi_cmd->byte2 = byte2;
2080 		scsi_u64to8b(lba, scsi_cmd->addr);
2081 		scsi_cmd->reserved = 0;
2082 		scsi_ulto4b(block_count, scsi_cmd->length);
2083 		scsi_cmd->control = 0;
2084 		cdb_len = sizeof(*scsi_cmd);
2085 	}
2086 
2087 	return cdb_len;
2088 }
2089 
2090 static struct mfi_command *
2091 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2092 {
2093 	struct mfi_command *cm;
2094 	struct mfi_pass_frame *pass;
2095 	uint32_t context = 0;
2096 	int flags = 0, blkcount = 0, readop;
2097 	uint8_t cdb_len;
2098 
2099 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2100 	    return (NULL);
2101 
2102 	/* Zero out the MFI frame */
2103 	context = cm->cm_frame->header.context;
2104 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2105 	cm->cm_frame->header.context = context;
2106 	pass = &cm->cm_frame->pass;
2107 	bzero(pass->cdb, 16);
2108 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2109 	switch (bio->bio_cmd & 0x03) {
2110 	case BIO_READ:
2111 		flags = MFI_CMD_DATAIN;
2112 		readop = 1;
2113 		break;
2114 	case BIO_WRITE:
2115 		flags = MFI_CMD_DATAOUT;
2116 		readop = 0;
2117 		break;
2118 	default:
2119 		/* TODO: what about BIO_DELETE??? */
2120 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2121 	}
2122 
2123 	/* Cheat with the sector length to avoid a non-constant division */
2124 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2125 	/* Fill the LBA and Transfer length in CDB */
2126 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2127 	    pass->cdb);
2128 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2129 	pass->header.lun_id = 0;
2130 	pass->header.timeout = 0;
2131 	pass->header.flags = 0;
2132 	pass->header.scsi_status = 0;
2133 	pass->header.sense_len = MFI_SENSE_LEN;
2134 	pass->header.data_len = bio->bio_bcount;
2135 	pass->header.cdb_len = cdb_len;
2136 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2137 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2138 	cm->cm_complete = mfi_bio_complete;
2139 	cm->cm_private = bio;
2140 	cm->cm_data = bio->bio_data;
2141 	cm->cm_len = bio->bio_bcount;
2142 	cm->cm_sg = &pass->sgl;
2143 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2144 	cm->cm_flags = flags;
2145 	return (cm);
2146 }
2147 
2148 static struct mfi_command *
2149 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2150 {
2151 	struct mfi_io_frame *io;
2152 	struct mfi_command *cm;
2153 	int flags;
2154 	uint32_t blkcount;
2155 	uint32_t context = 0;
2156 
2157 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2158 	    return (NULL);
2159 
2160 	/* Zero out the MFI frame */
2161 	context = cm->cm_frame->header.context;
2162 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2163 	cm->cm_frame->header.context = context;
2164 	io = &cm->cm_frame->io;
2165 	switch (bio->bio_cmd & 0x03) {
2166 	case BIO_READ:
2167 		io->header.cmd = MFI_CMD_LD_READ;
2168 		flags = MFI_CMD_DATAIN;
2169 		break;
2170 	case BIO_WRITE:
2171 		io->header.cmd = MFI_CMD_LD_WRITE;
2172 		flags = MFI_CMD_DATAOUT;
2173 		break;
2174 	default:
2175 		/* TODO: what about BIO_DELETE??? */
2176 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2177 	}
2178 
2179 	/* Cheat with the sector length to avoid a non-constant division */
2180 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2181 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2182 	io->header.timeout = 0;
2183 	io->header.flags = 0;
2184 	io->header.scsi_status = 0;
2185 	io->header.sense_len = MFI_SENSE_LEN;
2186 	io->header.data_len = blkcount;
2187 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2188 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2189 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2190 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2191 	cm->cm_complete = mfi_bio_complete;
2192 	cm->cm_private = bio;
2193 	cm->cm_data = bio->bio_data;
2194 	cm->cm_len = bio->bio_bcount;
2195 	cm->cm_sg = &io->sgl;
2196 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2197 	cm->cm_flags = flags;
2198 	return (cm);
2199 }
2200 
2201 static void
2202 mfi_bio_complete(struct mfi_command *cm)
2203 {
2204 	struct bio *bio;
2205 	struct mfi_frame_header *hdr;
2206 	struct mfi_softc *sc;
2207 
2208 	bio = cm->cm_private;
2209 	hdr = &cm->cm_frame->header;
2210 	sc = cm->cm_sc;
2211 
2212 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2213 		bio->bio_flags |= BIO_ERROR;
2214 		bio->bio_error = EIO;
2215 		device_printf(sc->mfi_dev, "I/O error, status= %d "
2216 		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2217 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2218 	} else if (cm->cm_error != 0) {
2219 		bio->bio_flags |= BIO_ERROR;
2220 	}
2221 
2222 	mfi_release_command(cm);
2223 	mfi_disk_complete(bio);
2224 }
2225 
2226 void
2227 mfi_startio(struct mfi_softc *sc)
2228 {
2229 	struct mfi_command *cm;
2230 	struct ccb_hdr *ccbh;
2231 
2232 	for (;;) {
2233 		/* Don't bother if we're short on resources */
2234 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2235 			break;
2236 
2237 		/* Try a command that has already been prepared */
2238 		cm = mfi_dequeue_ready(sc);
2239 
2240 		if (cm == NULL) {
2241 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2242 				cm = sc->mfi_cam_start(ccbh);
2243 		}
2244 
2245 		/* Nope, so look for work on the bioq */
2246 		if (cm == NULL)
2247 			cm = mfi_bio_command(sc);
2248 
2249 		/* No work available, so exit */
2250 		if (cm == NULL)
2251 			break;
2252 
2253 		/* Send the command to the controller */
2254 		if (mfi_mapcmd(sc, cm) != 0) {
2255 			mfi_requeue_ready(cm);
2256 			break;
2257 		}
2258 	}
2259 }
2260 
2261 int
2262 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2263 {
2264 	int error, polled;
2265 
2266 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2267 
2268 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2269 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2270 		error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2271 		    cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2272 		if (error == EINPROGRESS) {
2273 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2274 			return (0);
2275 		}
2276 	} else {
2277 		if (sc->MFA_enabled)
2278 			error = mfi_tbolt_send_frame(sc, cm);
2279 		else
2280 			error = mfi_send_frame(sc, cm);
2281 	}
2282 
2283 	return (error);
2284 }
2285 
2286 static void
2287 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2288 {
2289 	struct mfi_frame_header *hdr;
2290 	struct mfi_command *cm;
2291 	union mfi_sgl *sgl;
2292 	struct mfi_softc *sc;
2293 	int i, j, first, dir;
2294 	int sge_size;
2295 
2296 	cm = (struct mfi_command *)arg;
2297 	sc = cm->cm_sc;
2298 	hdr = &cm->cm_frame->header;
2299 	sgl = cm->cm_sg;
2300 
2301 	if (error) {
2302 		printf("error %d in callback\n", error);
2303 		cm->cm_error = error;
2304 		mfi_complete(sc, cm);
2305 		return;
2306 	}
2307 	/* Use IEEE sgl only for IO's on a SKINNY controller
2308 	 * For other commands on a SKINNY controller use either
2309 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2310 	 * Also calculate the total frame size based on the type
2311 	 * of SGL used.
2312 	 */
2313 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2314 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2315 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2316 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2317 		for (i = 0; i < nsegs; i++) {
2318 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2319 			sgl->sg_skinny[i].len = segs[i].ds_len;
2320 			sgl->sg_skinny[i].flag = 0;
2321 		}
2322 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2323 		sge_size = sizeof(struct mfi_sg_skinny);
2324 		hdr->sg_count = nsegs;
2325 	} else {
2326 		j = 0;
2327 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2328 			first = cm->cm_stp_len;
2329 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2330 				sgl->sg32[j].addr = segs[0].ds_addr;
2331 				sgl->sg32[j++].len = first;
2332 			} else {
2333 				sgl->sg64[j].addr = segs[0].ds_addr;
2334 				sgl->sg64[j++].len = first;
2335 			}
2336 		} else
2337 			first = 0;
2338 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2339 			for (i = 0; i < nsegs; i++) {
2340 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2341 				sgl->sg32[j++].len = segs[i].ds_len - first;
2342 				first = 0;
2343 			}
2344 		} else {
2345 			for (i = 0; i < nsegs; i++) {
2346 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2347 				sgl->sg64[j++].len = segs[i].ds_len - first;
2348 				first = 0;
2349 			}
2350 			hdr->flags |= MFI_FRAME_SGL64;
2351 		}
2352 		hdr->sg_count = j;
2353 		sge_size = sc->mfi_sge_size;
2354 	}
2355 
2356 	dir = 0;
2357 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2358 		dir |= BUS_DMASYNC_PREREAD;
2359 		hdr->flags |= MFI_FRAME_DIR_READ;
2360 	}
2361 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2362 		dir |= BUS_DMASYNC_PREWRITE;
2363 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2364 	}
2365 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2366 	cm->cm_flags |= MFI_CMD_MAPPED;
2367 
2368 	/*
2369 	 * Instead of calculating the total number of frames in the
2370 	 * compound frame, it's already assumed that there will be at
2371 	 * least 1 frame, so don't compensate for the modulo of the
2372 	 * following division.
2373 	 */
2374 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2375 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2376 
2377 	if (sc->MFA_enabled)
2378 			mfi_tbolt_send_frame(sc, cm);
2379 	else
2380 		mfi_send_frame(sc, cm);
2381 
2382 	return;
2383 }
2384 
2385 static int
2386 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2387 {
2388 	struct mfi_frame_header *hdr;
2389 	int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2390 
2391 	hdr = &cm->cm_frame->header;
2392 
2393 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2394 		cm->cm_timestamp = time_uptime;
2395 		mfi_enqueue_busy(cm);
2396 	} else {
2397 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2398 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2399 	}
2400 
2401 	/*
2402 	 * The bus address of the command is aligned on a 64 byte boundary,
2403 	 * leaving the least 6 bits as zero.  For whatever reason, the
2404 	 * hardware wants the address shifted right by three, leaving just
2405 	 * 3 zero bits.  These three bits are then used as a prefetching
2406 	 * hint for the hardware to predict how many frames need to be
2407 	 * fetched across the bus.  If a command has more than 8 frames
2408 	 * then the 3 bits are set to 0x7 and the firmware uses other
2409 	 * information in the command to determine the total amount to fetch.
2410 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2411 	 * is enough for both 32bit and 64bit systems.
2412 	 */
2413 	if (cm->cm_extra_frames > 7)
2414 		cm->cm_extra_frames = 7;
2415 
2416 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2417 
2418 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2419 		return (0);
2420 
2421 	/* This is a polled command, so busy-wait for it to complete. */
2422 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2423 		DELAY(1000);
2424 		tm -= 1;
2425 		if (tm <= 0)
2426 			break;
2427 	}
2428 
2429 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2430 		device_printf(sc->mfi_dev, "Frame %p timed out "
2431 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2432 		return (ETIMEDOUT);
2433 	}
2434 
2435 	return (0);
2436 }
2437 
2438 
2439 void
2440 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2441 {
2442 	int dir;
2443 
2444 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2445 		dir = 0;
2446 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2447 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2448 			dir |= BUS_DMASYNC_POSTREAD;
2449 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2450 			dir |= BUS_DMASYNC_POSTWRITE;
2451 
2452 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2453 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2454 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2455 	}
2456 
2457 	cm->cm_flags |= MFI_CMD_COMPLETED;
2458 
2459 	if (cm->cm_complete != NULL)
2460 		cm->cm_complete(cm);
2461 	else
2462 		wakeup(cm);
2463 }
2464 
2465 static int
2466 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2467 {
2468 	struct mfi_command *cm;
2469 	struct mfi_abort_frame *abort;
2470 	int i = 0;
2471 	uint32_t context = 0;
2472 
2473 	mtx_lock(&sc->mfi_io_lock);
2474 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2475 		return (EBUSY);
2476 	}
2477 
2478 	/* Zero out the MFI frame */
2479 	context = cm->cm_frame->header.context;
2480 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2481 	cm->cm_frame->header.context = context;
2482 
2483 	abort = &cm->cm_frame->abort;
2484 	abort->header.cmd = MFI_CMD_ABORT;
2485 	abort->header.flags = 0;
2486 	abort->header.scsi_status = 0;
2487 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2488 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2489 	abort->abort_mfi_addr_hi =
2490 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2491 	cm->cm_data = NULL;
2492 	cm->cm_flags = MFI_CMD_POLLED;
2493 
2494 	mfi_mapcmd(sc, cm);
2495 	mfi_release_command(cm);
2496 
2497 	mtx_unlock(&sc->mfi_io_lock);
2498 	while (i < 5 && *cm_abort != NULL) {
2499 		tsleep(cm_abort, 0, "mfiabort",
2500 		    5 * hz);
2501 		i++;
2502 	}
2503 	if (*cm_abort != NULL) {
2504 		/* Force a complete if command didn't abort */
2505 		mtx_lock(&sc->mfi_io_lock);
2506 		(*cm_abort)->cm_complete(*cm_abort);
2507 		mtx_unlock(&sc->mfi_io_lock);
2508 	}
2509 
2510 	return (0);
2511 }
2512 
2513 int
2514 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2515      int len)
2516 {
2517 	struct mfi_command *cm;
2518 	struct mfi_io_frame *io;
2519 	int error;
2520 	uint32_t context = 0;
2521 
2522 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2523 		return (EBUSY);
2524 
2525 	/* Zero out the MFI frame */
2526 	context = cm->cm_frame->header.context;
2527 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2528 	cm->cm_frame->header.context = context;
2529 
2530 	io = &cm->cm_frame->io;
2531 	io->header.cmd = MFI_CMD_LD_WRITE;
2532 	io->header.target_id = id;
2533 	io->header.timeout = 0;
2534 	io->header.flags = 0;
2535 	io->header.scsi_status = 0;
2536 	io->header.sense_len = MFI_SENSE_LEN;
2537 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2538 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2539 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2540 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2541 	io->lba_lo = lba & 0xffffffff;
2542 	cm->cm_data = virt;
2543 	cm->cm_len = len;
2544 	cm->cm_sg = &io->sgl;
2545 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2546 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2547 
2548 	error = mfi_mapcmd(sc, cm);
2549 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2550 	    BUS_DMASYNC_POSTWRITE);
2551 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2552 	mfi_release_command(cm);
2553 
2554 	return (error);
2555 }
2556 
2557 int
2558 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2559     int len)
2560 {
2561 	struct mfi_command *cm;
2562 	struct mfi_pass_frame *pass;
2563 	int error, readop, cdb_len;
2564 	uint32_t blkcount;
2565 
2566 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2567 		return (EBUSY);
2568 
2569 	pass = &cm->cm_frame->pass;
2570 	bzero(pass->cdb, 16);
2571 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2572 
2573 	readop = 0;
2574 	blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2575 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2576 	pass->header.target_id = id;
2577 	pass->header.timeout = 0;
2578 	pass->header.flags = 0;
2579 	pass->header.scsi_status = 0;
2580 	pass->header.sense_len = MFI_SENSE_LEN;
2581 	pass->header.data_len = len;
2582 	pass->header.cdb_len = cdb_len;
2583 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2584 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2585 	cm->cm_data = virt;
2586 	cm->cm_len = len;
2587 	cm->cm_sg = &pass->sgl;
2588 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2589 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2590 
2591 	error = mfi_mapcmd(sc, cm);
2592 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2593 	    BUS_DMASYNC_POSTWRITE);
2594 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2595 	mfi_release_command(cm);
2596 
2597 	return (error);
2598 }
2599 
2600 static int
2601 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2602 {
2603 	struct mfi_softc *sc;
2604 	int error;
2605 
2606 	sc = dev->si_drv1;
2607 
2608 	mtx_lock(&sc->mfi_io_lock);
2609 	if (sc->mfi_detaching)
2610 		error = ENXIO;
2611 	else {
2612 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2613 		error = 0;
2614 	}
2615 	mtx_unlock(&sc->mfi_io_lock);
2616 
2617 	return (error);
2618 }
2619 
2620 static int
2621 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2622 {
2623 	struct mfi_softc *sc;
2624 	struct mfi_aen *mfi_aen_entry, *tmp;
2625 
2626 	sc = dev->si_drv1;
2627 
2628 	mtx_lock(&sc->mfi_io_lock);
2629 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2630 
2631 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2632 		if (mfi_aen_entry->p == curproc) {
2633 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2634 			    aen_link);
2635 			free(mfi_aen_entry, M_MFIBUF);
2636 		}
2637 	}
2638 	mtx_unlock(&sc->mfi_io_lock);
2639 	return (0);
2640 }
2641 
2642 static int
2643 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2644 {
2645 
2646 	switch (opcode) {
2647 	case MFI_DCMD_LD_DELETE:
2648 	case MFI_DCMD_CFG_ADD:
2649 	case MFI_DCMD_CFG_CLEAR:
2650 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2651 		sx_xlock(&sc->mfi_config_lock);
2652 		return (1);
2653 	default:
2654 		return (0);
2655 	}
2656 }
2657 
2658 static void
2659 mfi_config_unlock(struct mfi_softc *sc, int locked)
2660 {
2661 
2662 	if (locked)
2663 		sx_xunlock(&sc->mfi_config_lock);
2664 }
2665 
2666 /*
2667  * Perform pre-issue checks on commands from userland and possibly veto
2668  * them.
2669  */
2670 static int
2671 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2672 {
2673 	struct mfi_disk *ld, *ld2;
2674 	int error;
2675 	struct mfi_system_pd *syspd = NULL;
2676 	uint16_t syspd_id;
2677 	uint16_t *mbox;
2678 
2679 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2680 	error = 0;
2681 	switch (cm->cm_frame->dcmd.opcode) {
2682 	case MFI_DCMD_LD_DELETE:
2683 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2684 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2685 				break;
2686 		}
2687 		if (ld == NULL)
2688 			error = ENOENT;
2689 		else
2690 			error = mfi_disk_disable(ld);
2691 		break;
2692 	case MFI_DCMD_CFG_CLEAR:
2693 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2694 			error = mfi_disk_disable(ld);
2695 			if (error)
2696 				break;
2697 		}
2698 		if (error) {
2699 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2700 				if (ld2 == ld)
2701 					break;
2702 				mfi_disk_enable(ld2);
2703 			}
2704 		}
2705 		break;
2706 	case MFI_DCMD_PD_STATE_SET:
2707 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2708 		syspd_id = mbox[0];
2709 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2710 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2711 				if (syspd->pd_id == syspd_id)
2712 					break;
2713 			}
2714 		}
2715 		else
2716 			break;
2717 		if (syspd)
2718 			error = mfi_syspd_disable(syspd);
2719 		break;
2720 	default:
2721 		break;
2722 	}
2723 	return (error);
2724 }
2725 
2726 /* Perform post-issue checks on commands from userland. */
2727 static void
2728 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2729 {
2730 	struct mfi_disk *ld, *ldn;
2731 	struct mfi_system_pd *syspd = NULL;
2732 	uint16_t syspd_id;
2733 	uint16_t *mbox;
2734 
2735 	switch (cm->cm_frame->dcmd.opcode) {
2736 	case MFI_DCMD_LD_DELETE:
2737 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2738 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2739 				break;
2740 		}
2741 		KASSERT(ld != NULL, ("volume dissappeared"));
2742 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2743 			mtx_unlock(&sc->mfi_io_lock);
2744 			mtx_lock(&Giant);
2745 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2746 			mtx_unlock(&Giant);
2747 			mtx_lock(&sc->mfi_io_lock);
2748 		} else
2749 			mfi_disk_enable(ld);
2750 		break;
2751 	case MFI_DCMD_CFG_CLEAR:
2752 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2753 			mtx_unlock(&sc->mfi_io_lock);
2754 			mtx_lock(&Giant);
2755 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2756 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2757 			}
2758 			mtx_unlock(&Giant);
2759 			mtx_lock(&sc->mfi_io_lock);
2760 		} else {
2761 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2762 				mfi_disk_enable(ld);
2763 		}
2764 		break;
2765 	case MFI_DCMD_CFG_ADD:
2766 		mfi_ldprobe(sc);
2767 		break;
2768 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2769 		mfi_ldprobe(sc);
2770 		break;
2771 	case MFI_DCMD_PD_STATE_SET:
2772 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2773 		syspd_id = mbox[0];
2774 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2775 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2776 				if (syspd->pd_id == syspd_id)
2777 					break;
2778 			}
2779 		}
2780 		else
2781 			break;
2782 		/* If the transition fails then enable the syspd again */
2783 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2784 			mfi_syspd_enable(syspd);
2785 		break;
2786 	}
2787 }
2788 
2789 static int
2790 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2791 {
2792 	struct mfi_config_data *conf_data;
2793 	struct mfi_command *ld_cm = NULL;
2794 	struct mfi_ld_info *ld_info = NULL;
2795 	struct mfi_ld_config *ld;
2796 	char *p;
2797 	int error = 0;
2798 
2799 	conf_data = (struct mfi_config_data *)cm->cm_data;
2800 
2801 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2802 		p = (char *)conf_data->array;
2803 		p += conf_data->array_size * conf_data->array_count;
2804 		ld = (struct mfi_ld_config *)p;
2805 		if (ld->params.isSSCD == 1)
2806 			error = 1;
2807 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2808 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2809 		    (void **)&ld_info, sizeof(*ld_info));
2810 		if (error) {
2811 			device_printf(sc->mfi_dev, "Failed to allocate"
2812 			    "MFI_DCMD_LD_GET_INFO %d", error);
2813 			if (ld_info)
2814 				free(ld_info, M_MFIBUF);
2815 			return 0;
2816 		}
2817 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2818 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2819 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2820 		if (mfi_wait_command(sc, ld_cm) != 0) {
2821 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2822 			mfi_release_command(ld_cm);
2823 			free(ld_info, M_MFIBUF);
2824 			return 0;
2825 		}
2826 
2827 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2828 			free(ld_info, M_MFIBUF);
2829 			mfi_release_command(ld_cm);
2830 			return 0;
2831 		}
2832 		else
2833 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2834 
2835 		if (ld_info->ld_config.params.isSSCD == 1)
2836 			error = 1;
2837 
2838 		mfi_release_command(ld_cm);
2839 		free(ld_info, M_MFIBUF);
2840 
2841 	}
2842 	return error;
2843 }
2844 
2845 static int
2846 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2847 {
2848 	uint8_t i;
2849 	struct mfi_ioc_packet *ioc;
2850 	ioc = (struct mfi_ioc_packet *)arg;
2851 	int sge_size, error;
2852 	struct megasas_sge *kern_sge;
2853 
2854 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2855 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2856 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2857 
2858 	if (sizeof(bus_addr_t) == 8) {
2859 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2860 		cm->cm_extra_frames = 2;
2861 		sge_size = sizeof(struct mfi_sg64);
2862 	} else {
2863 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2864 		sge_size = sizeof(struct mfi_sg32);
2865 	}
2866 
2867 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2868 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2869 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2870 			1, 0,			/* algnmnt, boundary */
2871 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2872 			BUS_SPACE_MAXADDR,	/* highaddr */
2873 			NULL, NULL,		/* filter, filterarg */
2874 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2875 			2,			/* nsegments */
2876 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2877 			BUS_DMA_ALLOCNOW,	/* flags */
2878 			NULL, NULL,		/* lockfunc, lockarg */
2879 			&sc->mfi_kbuff_arr_dmat[i])) {
2880 			device_printf(sc->mfi_dev,
2881 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2882 			return (ENOMEM);
2883 		}
2884 
2885 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2886 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2887 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2888 			device_printf(sc->mfi_dev,
2889 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2890 			return (ENOMEM);
2891 		}
2892 
2893 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2894 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2895 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2896 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2897 
2898 		if (!sc->kbuff_arr[i]) {
2899 			device_printf(sc->mfi_dev,
2900 			    "Could not allocate memory for kbuff_arr info\n");
2901 			return -1;
2902 		}
2903 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2904 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2905 
2906 		if (sizeof(bus_addr_t) == 8) {
2907 			cm->cm_frame->stp.sgl.sg64[i].addr =
2908 			    kern_sge[i].phys_addr;
2909 			cm->cm_frame->stp.sgl.sg64[i].len =
2910 			    ioc->mfi_sgl[i].iov_len;
2911 		} else {
2912 			cm->cm_frame->stp.sgl.sg32[i].len =
2913 			    kern_sge[i].phys_addr;
2914 			cm->cm_frame->stp.sgl.sg32[i].len =
2915 			    ioc->mfi_sgl[i].iov_len;
2916 		}
2917 
2918 		error = copyin(ioc->mfi_sgl[i].iov_base,
2919 		    sc->kbuff_arr[i],
2920 		    ioc->mfi_sgl[i].iov_len);
2921 		if (error != 0) {
2922 			device_printf(sc->mfi_dev, "Copy in failed\n");
2923 			return error;
2924 		}
2925 	}
2926 
2927 	cm->cm_flags |=MFI_CMD_MAPPED;
2928 	return 0;
2929 }
2930 
2931 static int
2932 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2933 {
2934 	struct mfi_command *cm;
2935 	struct mfi_dcmd_frame *dcmd;
2936 	void *ioc_buf = NULL;
2937 	uint32_t context;
2938 	int error = 0, locked;
2939 
2940 
2941 	if (ioc->buf_size > 0) {
2942 		if (ioc->buf_size > 1024 * 1024)
2943 			return (ENOMEM);
2944 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2945 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2946 		if (error) {
2947 			device_printf(sc->mfi_dev, "failed to copyin\n");
2948 			free(ioc_buf, M_MFIBUF);
2949 			return (error);
2950 		}
2951 	}
2952 
2953 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2954 
2955 	mtx_lock(&sc->mfi_io_lock);
2956 	while ((cm = mfi_dequeue_free(sc)) == NULL)
2957 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2958 
2959 	/* Save context for later */
2960 	context = cm->cm_frame->header.context;
2961 
2962 	dcmd = &cm->cm_frame->dcmd;
2963 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2964 
2965 	cm->cm_sg = &dcmd->sgl;
2966 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2967 	cm->cm_data = ioc_buf;
2968 	cm->cm_len = ioc->buf_size;
2969 
2970 	/* restore context */
2971 	cm->cm_frame->header.context = context;
2972 
2973 	/* Cheat since we don't know if we're writing or reading */
2974 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2975 
2976 	error = mfi_check_command_pre(sc, cm);
2977 	if (error)
2978 		goto out;
2979 
2980 	error = mfi_wait_command(sc, cm);
2981 	if (error) {
2982 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2983 		goto out;
2984 	}
2985 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2986 	mfi_check_command_post(sc, cm);
2987 out:
2988 	mfi_release_command(cm);
2989 	mtx_unlock(&sc->mfi_io_lock);
2990 	mfi_config_unlock(sc, locked);
2991 	if (ioc->buf_size > 0)
2992 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2993 	if (ioc_buf)
2994 		free(ioc_buf, M_MFIBUF);
2995 	return (error);
2996 }
2997 
2998 #define	PTRIN(p)		((void *)(uintptr_t)(p))
2999 
3000 static int
3001 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3002 {
3003 	struct mfi_softc *sc;
3004 	union mfi_statrequest *ms;
3005 	struct mfi_ioc_packet *ioc;
3006 #ifdef COMPAT_FREEBSD32
3007 	struct mfi_ioc_packet32 *ioc32;
3008 #endif
3009 	struct mfi_ioc_aen *aen;
3010 	struct mfi_command *cm = NULL;
3011 	uint32_t context = 0;
3012 	union mfi_sense_ptr sense_ptr;
3013 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3014 	size_t len;
3015 	int i, res;
3016 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3017 #ifdef COMPAT_FREEBSD32
3018 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3019 	struct mfi_ioc_passthru iop_swab;
3020 #endif
3021 	int error, locked;
3022 	union mfi_sgl *sgl;
3023 	sc = dev->si_drv1;
3024 	error = 0;
3025 
3026 	if (sc->adpreset)
3027 		return EBUSY;
3028 
3029 	if (sc->hw_crit_error)
3030 		return EBUSY;
3031 
3032 	if (sc->issuepend_done == 0)
3033 		return EBUSY;
3034 
3035 	switch (cmd) {
3036 	case MFIIO_STATS:
3037 		ms = (union mfi_statrequest *)arg;
3038 		switch (ms->ms_item) {
3039 		case MFIQ_FREE:
3040 		case MFIQ_BIO:
3041 		case MFIQ_READY:
3042 		case MFIQ_BUSY:
3043 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3044 			    sizeof(struct mfi_qstat));
3045 			break;
3046 		default:
3047 			error = ENOIOCTL;
3048 			break;
3049 		}
3050 		break;
3051 	case MFIIO_QUERY_DISK:
3052 	{
3053 		struct mfi_query_disk *qd;
3054 		struct mfi_disk *ld;
3055 
3056 		qd = (struct mfi_query_disk *)arg;
3057 		mtx_lock(&sc->mfi_io_lock);
3058 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3059 			if (ld->ld_id == qd->array_id)
3060 				break;
3061 		}
3062 		if (ld == NULL) {
3063 			qd->present = 0;
3064 			mtx_unlock(&sc->mfi_io_lock);
3065 			return (0);
3066 		}
3067 		qd->present = 1;
3068 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3069 			qd->open = 1;
3070 		bzero(qd->devname, SPECNAMELEN + 1);
3071 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3072 		mtx_unlock(&sc->mfi_io_lock);
3073 		break;
3074 	}
3075 	case MFI_CMD:
3076 #ifdef COMPAT_FREEBSD32
3077 	case MFI_CMD32:
3078 #endif
3079 		{
3080 		devclass_t devclass;
3081 		ioc = (struct mfi_ioc_packet *)arg;
3082 		int adapter;
3083 
3084 		adapter = ioc->mfi_adapter_no;
3085 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3086 			devclass = devclass_find("mfi");
3087 			sc = devclass_get_softc(devclass, adapter);
3088 		}
3089 		mtx_lock(&sc->mfi_io_lock);
3090 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3091 			mtx_unlock(&sc->mfi_io_lock);
3092 			return (EBUSY);
3093 		}
3094 		mtx_unlock(&sc->mfi_io_lock);
3095 		locked = 0;
3096 
3097 		/*
3098 		 * save off original context since copying from user
3099 		 * will clobber some data
3100 		 */
3101 		context = cm->cm_frame->header.context;
3102 		cm->cm_frame->header.context = cm->cm_index;
3103 
3104 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3105 		    2 * MEGAMFI_FRAME_SIZE);
3106 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3107 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3108 		cm->cm_frame->header.scsi_status = 0;
3109 		cm->cm_frame->header.pad0 = 0;
3110 		if (ioc->mfi_sge_count) {
3111 			cm->cm_sg =
3112 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3113 		}
3114 		sgl = cm->cm_sg;
3115 		cm->cm_flags = 0;
3116 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3117 			cm->cm_flags |= MFI_CMD_DATAIN;
3118 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3119 			cm->cm_flags |= MFI_CMD_DATAOUT;
3120 		/* Legacy app shim */
3121 		if (cm->cm_flags == 0)
3122 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3123 		cm->cm_len = cm->cm_frame->header.data_len;
3124 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3125 #ifdef COMPAT_FREEBSD32
3126 			if (cmd == MFI_CMD) {
3127 #endif
3128 				/* Native */
3129 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3130 #ifdef COMPAT_FREEBSD32
3131 			} else {
3132 				/* 32bit on 64bit */
3133 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3134 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3135 			}
3136 #endif
3137 			cm->cm_len += cm->cm_stp_len;
3138 		}
3139 		if (cm->cm_len &&
3140 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3141 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3142 			    M_WAITOK | M_ZERO);
3143 			if (cm->cm_data == NULL) {
3144 				device_printf(sc->mfi_dev, "Malloc failed\n");
3145 				goto out;
3146 			}
3147 		} else {
3148 			cm->cm_data = 0;
3149 		}
3150 
3151 		/* restore header context */
3152 		cm->cm_frame->header.context = context;
3153 
3154 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3155 			res = mfi_stp_cmd(sc, cm, arg);
3156 			if (res != 0)
3157 				goto out;
3158 		} else {
3159 			temp = data;
3160 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3161 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3162 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3163 #ifdef COMPAT_FREEBSD32
3164 					if (cmd == MFI_CMD) {
3165 #endif
3166 						/* Native */
3167 						addr = ioc->mfi_sgl[i].iov_base;
3168 						len = ioc->mfi_sgl[i].iov_len;
3169 #ifdef COMPAT_FREEBSD32
3170 					} else {
3171 						/* 32bit on 64bit */
3172 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3173 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3174 						len = ioc32->mfi_sgl[i].iov_len;
3175 					}
3176 #endif
3177 					error = copyin(addr, temp, len);
3178 					if (error != 0) {
3179 						device_printf(sc->mfi_dev,
3180 						    "Copy in failed\n");
3181 						goto out;
3182 					}
3183 					temp = &temp[len];
3184 				}
3185 			}
3186 		}
3187 
3188 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3189 			locked = mfi_config_lock(sc,
3190 			     cm->cm_frame->dcmd.opcode);
3191 
3192 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3193 			cm->cm_frame->pass.sense_addr_lo =
3194 			    (uint32_t)cm->cm_sense_busaddr;
3195 			cm->cm_frame->pass.sense_addr_hi =
3196 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3197 		}
3198 		mtx_lock(&sc->mfi_io_lock);
3199 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3200 		if (!skip_pre_post) {
3201 			error = mfi_check_command_pre(sc, cm);
3202 			if (error) {
3203 				mtx_unlock(&sc->mfi_io_lock);
3204 				goto out;
3205 			}
3206 		}
3207 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3208 			device_printf(sc->mfi_dev,
3209 			    "Controller polled failed\n");
3210 			mtx_unlock(&sc->mfi_io_lock);
3211 			goto out;
3212 		}
3213 		if (!skip_pre_post) {
3214 			mfi_check_command_post(sc, cm);
3215 		}
3216 		mtx_unlock(&sc->mfi_io_lock);
3217 
3218 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3219 			temp = data;
3220 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3221 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3222 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3223 #ifdef COMPAT_FREEBSD32
3224 					if (cmd == MFI_CMD) {
3225 #endif
3226 						/* Native */
3227 						addr = ioc->mfi_sgl[i].iov_base;
3228 						len = ioc->mfi_sgl[i].iov_len;
3229 #ifdef COMPAT_FREEBSD32
3230 					} else {
3231 						/* 32bit on 64bit */
3232 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3233 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3234 						len = ioc32->mfi_sgl[i].iov_len;
3235 					}
3236 #endif
3237 					error = copyout(temp, addr, len);
3238 					if (error != 0) {
3239 						device_printf(sc->mfi_dev,
3240 						    "Copy out failed\n");
3241 						goto out;
3242 					}
3243 					temp = &temp[len];
3244 				}
3245 			}
3246 		}
3247 
3248 		if (ioc->mfi_sense_len) {
3249 			/* get user-space sense ptr then copy out sense */
3250 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3251 			    &sense_ptr.sense_ptr_data[0],
3252 			    sizeof(sense_ptr.sense_ptr_data));
3253 #ifdef COMPAT_FREEBSD32
3254 			if (cmd != MFI_CMD) {
3255 				/*
3256 				 * not 64bit native so zero out any address
3257 				 * over 32bit */
3258 				sense_ptr.addr.high = 0;
3259 			}
3260 #endif
3261 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3262 			    ioc->mfi_sense_len);
3263 			if (error != 0) {
3264 				device_printf(sc->mfi_dev,
3265 				    "Copy out failed\n");
3266 				goto out;
3267 			}
3268 		}
3269 
3270 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3271 out:
3272 		mfi_config_unlock(sc, locked);
3273 		if (data)
3274 			free(data, M_MFIBUF);
3275 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3276 			for (i = 0; i < 2; i++) {
3277 				if (sc->kbuff_arr[i]) {
3278 					if (sc->mfi_kbuff_arr_busaddr != 0)
3279 						bus_dmamap_unload(
3280 						    sc->mfi_kbuff_arr_dmat[i],
3281 						    sc->mfi_kbuff_arr_dmamap[i]
3282 						    );
3283 					if (sc->kbuff_arr[i] != NULL)
3284 						bus_dmamem_free(
3285 						    sc->mfi_kbuff_arr_dmat[i],
3286 						    sc->kbuff_arr[i],
3287 						    sc->mfi_kbuff_arr_dmamap[i]
3288 						    );
3289 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3290 						bus_dma_tag_destroy(
3291 						    sc->mfi_kbuff_arr_dmat[i]);
3292 				}
3293 			}
3294 		}
3295 		if (cm) {
3296 			mtx_lock(&sc->mfi_io_lock);
3297 			mfi_release_command(cm);
3298 			mtx_unlock(&sc->mfi_io_lock);
3299 		}
3300 
3301 		break;
3302 		}
3303 	case MFI_SET_AEN:
3304 		aen = (struct mfi_ioc_aen *)arg;
3305 		error = mfi_aen_register(sc, aen->aen_seq_num,
3306 		    aen->aen_class_locale);
3307 
3308 		break;
3309 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3310 		{
3311 			devclass_t devclass;
3312 			struct mfi_linux_ioc_packet l_ioc;
3313 			int adapter;
3314 
3315 			devclass = devclass_find("mfi");
3316 			if (devclass == NULL)
3317 				return (ENOENT);
3318 
3319 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3320 			if (error)
3321 				return (error);
3322 			adapter = l_ioc.lioc_adapter_no;
3323 			sc = devclass_get_softc(devclass, adapter);
3324 			if (sc == NULL)
3325 				return (ENOENT);
3326 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3327 			    cmd, arg, flag, td));
3328 			break;
3329 		}
3330 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3331 		{
3332 			devclass_t devclass;
3333 			struct mfi_linux_ioc_aen l_aen;
3334 			int adapter;
3335 
3336 			devclass = devclass_find("mfi");
3337 			if (devclass == NULL)
3338 				return (ENOENT);
3339 
3340 			error = copyin(arg, &l_aen, sizeof(l_aen));
3341 			if (error)
3342 				return (error);
3343 			adapter = l_aen.laen_adapter_no;
3344 			sc = devclass_get_softc(devclass, adapter);
3345 			if (sc == NULL)
3346 				return (ENOENT);
3347 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3348 			    cmd, arg, flag, td));
3349 			break;
3350 		}
3351 #ifdef COMPAT_FREEBSD32
3352 	case MFIIO_PASSTHRU32:
3353 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3354 			error = ENOTTY;
3355 			break;
3356 		}
3357 		iop_swab.ioc_frame	= iop32->ioc_frame;
3358 		iop_swab.buf_size	= iop32->buf_size;
3359 		iop_swab.buf		= PTRIN(iop32->buf);
3360 		iop			= &iop_swab;
3361 		/* FALLTHROUGH */
3362 #endif
3363 	case MFIIO_PASSTHRU:
3364 		error = mfi_user_command(sc, iop);
3365 #ifdef COMPAT_FREEBSD32
3366 		if (cmd == MFIIO_PASSTHRU32)
3367 			iop32->ioc_frame = iop_swab.ioc_frame;
3368 #endif
3369 		break;
3370 	default:
3371 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3372 		error = ENOTTY;
3373 		break;
3374 	}
3375 
3376 	return (error);
3377 }
3378 
3379 static int
3380 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3381 {
3382 	struct mfi_softc *sc;
3383 	struct mfi_linux_ioc_packet l_ioc;
3384 	struct mfi_linux_ioc_aen l_aen;
3385 	struct mfi_command *cm = NULL;
3386 	struct mfi_aen *mfi_aen_entry;
3387 	union mfi_sense_ptr sense_ptr;
3388 	uint32_t context = 0;
3389 	uint8_t *data = NULL, *temp;
3390 	int i;
3391 	int error, locked;
3392 
3393 	sc = dev->si_drv1;
3394 	error = 0;
3395 	switch (cmd) {
3396 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3397 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3398 		if (error != 0)
3399 			return (error);
3400 
3401 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3402 			return (EINVAL);
3403 		}
3404 
3405 		mtx_lock(&sc->mfi_io_lock);
3406 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3407 			mtx_unlock(&sc->mfi_io_lock);
3408 			return (EBUSY);
3409 		}
3410 		mtx_unlock(&sc->mfi_io_lock);
3411 		locked = 0;
3412 
3413 		/*
3414 		 * save off original context since copying from user
3415 		 * will clobber some data
3416 		 */
3417 		context = cm->cm_frame->header.context;
3418 
3419 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3420 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3421 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3422 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3423 		cm->cm_frame->header.scsi_status = 0;
3424 		cm->cm_frame->header.pad0 = 0;
3425 		if (l_ioc.lioc_sge_count)
3426 			cm->cm_sg =
3427 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3428 		cm->cm_flags = 0;
3429 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3430 			cm->cm_flags |= MFI_CMD_DATAIN;
3431 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3432 			cm->cm_flags |= MFI_CMD_DATAOUT;
3433 		cm->cm_len = cm->cm_frame->header.data_len;
3434 		if (cm->cm_len &&
3435 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3436 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3437 			    M_WAITOK | M_ZERO);
3438 			if (cm->cm_data == NULL) {
3439 				device_printf(sc->mfi_dev, "Malloc failed\n");
3440 				goto out;
3441 			}
3442 		} else {
3443 			cm->cm_data = 0;
3444 		}
3445 
3446 		/* restore header context */
3447 		cm->cm_frame->header.context = context;
3448 
3449 		temp = data;
3450 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3451 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3452 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3453 				       temp,
3454 				       l_ioc.lioc_sgl[i].iov_len);
3455 				if (error != 0) {
3456 					device_printf(sc->mfi_dev,
3457 					    "Copy in failed\n");
3458 					goto out;
3459 				}
3460 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3461 			}
3462 		}
3463 
3464 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3465 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3466 
3467 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3468 			cm->cm_frame->pass.sense_addr_lo =
3469 			    (uint32_t)cm->cm_sense_busaddr;
3470 			cm->cm_frame->pass.sense_addr_hi =
3471 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3472 		}
3473 
3474 		mtx_lock(&sc->mfi_io_lock);
3475 		error = mfi_check_command_pre(sc, cm);
3476 		if (error) {
3477 			mtx_unlock(&sc->mfi_io_lock);
3478 			goto out;
3479 		}
3480 
3481 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3482 			device_printf(sc->mfi_dev,
3483 			    "Controller polled failed\n");
3484 			mtx_unlock(&sc->mfi_io_lock);
3485 			goto out;
3486 		}
3487 
3488 		mfi_check_command_post(sc, cm);
3489 		mtx_unlock(&sc->mfi_io_lock);
3490 
3491 		temp = data;
3492 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3493 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3494 				error = copyout(temp,
3495 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3496 					l_ioc.lioc_sgl[i].iov_len);
3497 				if (error != 0) {
3498 					device_printf(sc->mfi_dev,
3499 					    "Copy out failed\n");
3500 					goto out;
3501 				}
3502 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3503 			}
3504 		}
3505 
3506 		if (l_ioc.lioc_sense_len) {
3507 			/* get user-space sense ptr then copy out sense */
3508 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3509                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3510 			    &sense_ptr.sense_ptr_data[0],
3511 			    sizeof(sense_ptr.sense_ptr_data));
3512 #ifdef __amd64__
3513 			/*
3514 			 * only 32bit Linux support so zero out any
3515 			 * address over 32bit
3516 			 */
3517 			sense_ptr.addr.high = 0;
3518 #endif
3519 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3520 			    l_ioc.lioc_sense_len);
3521 			if (error != 0) {
3522 				device_printf(sc->mfi_dev,
3523 				    "Copy out failed\n");
3524 				goto out;
3525 			}
3526 		}
3527 
3528 		error = copyout(&cm->cm_frame->header.cmd_status,
3529 			&((struct mfi_linux_ioc_packet*)arg)
3530 			->lioc_frame.hdr.cmd_status,
3531 			1);
3532 		if (error != 0) {
3533 			device_printf(sc->mfi_dev,
3534 				      "Copy out failed\n");
3535 			goto out;
3536 		}
3537 
3538 out:
3539 		mfi_config_unlock(sc, locked);
3540 		if (data)
3541 			free(data, M_MFIBUF);
3542 		if (cm) {
3543 			mtx_lock(&sc->mfi_io_lock);
3544 			mfi_release_command(cm);
3545 			mtx_unlock(&sc->mfi_io_lock);
3546 		}
3547 
3548 		return (error);
3549 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3550 		error = copyin(arg, &l_aen, sizeof(l_aen));
3551 		if (error != 0)
3552 			return (error);
3553 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3554 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3555 		    M_WAITOK);
3556 		mtx_lock(&sc->mfi_io_lock);
3557 		if (mfi_aen_entry != NULL) {
3558 			mfi_aen_entry->p = curproc;
3559 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3560 			    aen_link);
3561 		}
3562 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3563 		    l_aen.laen_class_locale);
3564 
3565 		if (error != 0) {
3566 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3567 			    aen_link);
3568 			free(mfi_aen_entry, M_MFIBUF);
3569 		}
3570 		mtx_unlock(&sc->mfi_io_lock);
3571 
3572 		return (error);
3573 	default:
3574 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3575 		error = ENOENT;
3576 		break;
3577 	}
3578 
3579 	return (error);
3580 }
3581 
3582 static int
3583 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3584 {
3585 	struct mfi_softc *sc;
3586 	int revents = 0;
3587 
3588 	sc = dev->si_drv1;
3589 
3590 	if (poll_events & (POLLIN | POLLRDNORM)) {
3591 		if (sc->mfi_aen_triggered != 0) {
3592 			revents |= poll_events & (POLLIN | POLLRDNORM);
3593 			sc->mfi_aen_triggered = 0;
3594 		}
3595 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3596 			revents |= POLLERR;
3597 		}
3598 	}
3599 
3600 	if (revents == 0) {
3601 		if (poll_events & (POLLIN | POLLRDNORM)) {
3602 			sc->mfi_poll_waiting = 1;
3603 			selrecord(td, &sc->mfi_select);
3604 		}
3605 	}
3606 
3607 	return revents;
3608 }
3609 
3610 static void
3611 mfi_dump_all(void)
3612 {
3613 	struct mfi_softc *sc;
3614 	struct mfi_command *cm;
3615 	devclass_t dc;
3616 	time_t deadline;
3617 	int timedout;
3618 	int i;
3619 
3620 	dc = devclass_find("mfi");
3621 	if (dc == NULL) {
3622 		printf("No mfi dev class\n");
3623 		return;
3624 	}
3625 
3626 	for (i = 0; ; i++) {
3627 		sc = devclass_get_softc(dc, i);
3628 		if (sc == NULL)
3629 			break;
3630 		device_printf(sc->mfi_dev, "Dumping\n\n");
3631 		timedout = 0;
3632 		deadline = time_uptime - MFI_CMD_TIMEOUT;
3633 		mtx_lock(&sc->mfi_io_lock);
3634 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3635 			if (cm->cm_timestamp < deadline) {
3636 				device_printf(sc->mfi_dev,
3637 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3638 				    cm, (int)(time_uptime - cm->cm_timestamp));
3639 				MFI_PRINT_CMD(cm);
3640 				timedout++;
3641 			}
3642 		}
3643 
3644 #if 0
3645 		if (timedout)
3646 			MFI_DUMP_CMDS(SC);
3647 #endif
3648 
3649 		mtx_unlock(&sc->mfi_io_lock);
3650 	}
3651 
3652 	return;
3653 }
3654 
3655 static void
3656 mfi_timeout(void *data)
3657 {
3658 	struct mfi_softc *sc = (struct mfi_softc *)data;
3659 	struct mfi_command *cm;
3660 	time_t deadline;
3661 	int timedout = 0;
3662 
3663 	deadline = time_uptime - MFI_CMD_TIMEOUT;
3664 	if (sc->adpreset == 0) {
3665 		if (!mfi_tbolt_reset(sc)) {
3666 			callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
3667 			return;
3668 		}
3669 	}
3670 	mtx_lock(&sc->mfi_io_lock);
3671 	TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3672 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3673 			continue;
3674 		if (cm->cm_timestamp < deadline) {
3675 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3676 				cm->cm_timestamp = time_uptime;
3677 			} else {
3678 				device_printf(sc->mfi_dev,
3679 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3680 				     cm, (int)(time_uptime - cm->cm_timestamp)
3681 				     );
3682 				MFI_PRINT_CMD(cm);
3683 				MFI_VALIDATE_CMD(sc, cm);
3684 				timedout++;
3685 			}
3686 		}
3687 	}
3688 
3689 #if 0
3690 	if (timedout)
3691 		MFI_DUMP_CMDS(SC);
3692 #endif
3693 
3694 	mtx_unlock(&sc->mfi_io_lock);
3695 
3696 	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
3697 	    mfi_timeout, sc);
3698 
3699 	if (0)
3700 		mfi_dump_all();
3701 	return;
3702 }
3703