xref: /freebsd/sys/dev/mfi/mfi.c (revision e3466a89fd9c3d0be2f831d42a5b5cf65cb0fd53)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2007 LSI Corp.
28  * Copyright (c) 2007 Rajesh Prabhakaran.
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52 
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
55 
56 #include "opt_compat.h"
57 #include "opt_mfi.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63 #include <sys/kernel.h>
64 #include <sys/poll.h>
65 #include <sys/selinfo.h>
66 #include <sys/bus.h>
67 #include <sys/conf.h>
68 #include <sys/eventhandler.h>
69 #include <sys/rman.h>
70 #include <sys/bus_dma.h>
71 #include <sys/bio.h>
72 #include <sys/ioccom.h>
73 #include <sys/uio.h>
74 #include <sys/proc.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
78 
79 #include <machine/bus.h>
80 #include <machine/resource.h>
81 
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
87 
88 static int	mfi_alloc_commands(struct mfi_softc *);
89 static int	mfi_comms_init(struct mfi_softc *);
90 static int	mfi_get_controller_info(struct mfi_softc *);
91 static int	mfi_get_log_state(struct mfi_softc *,
92 		    struct mfi_evt_log_state **);
93 static int	mfi_parse_entries(struct mfi_softc *, int, int);
94 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void	mfi_startup(void *arg);
96 static void	mfi_intr(void *arg);
97 static void	mfi_ldprobe(struct mfi_softc *sc);
98 static void	mfi_syspdprobe(struct mfi_softc *sc);
99 static void	mfi_handle_evt(void *context, int pending);
100 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void	mfi_aen_complete(struct mfi_command *);
102 static int	mfi_add_ld(struct mfi_softc *sc, int);
103 static void	mfi_add_ld_complete(struct mfi_command *);
104 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void	mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void	mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
112 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
113 static void	mfi_timeout(void *);
114 static int	mfi_user_command(struct mfi_softc *,
115 		    struct mfi_ioc_passthru *);
116 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
117 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
118 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
119 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
120 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
121 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
122 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
123 		    uint32_t frame_cnt);
124 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
125 		    uint32_t frame_cnt);
126 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
127 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
128 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
129 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
130 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
131 
132 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
133 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
134 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
135 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
136             0, "event message locale");
137 
138 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
139 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
140 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
141           0, "event message class");
142 
143 static int	mfi_max_cmds = 128;
144 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
145 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
146 	   0, "Max commands");
147 
148 static int	mfi_detect_jbod_change = 1;
149 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
150 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
151 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
152 
153 /* Management interface */
154 static d_open_t		mfi_open;
155 static d_close_t	mfi_close;
156 static d_ioctl_t	mfi_ioctl;
157 static d_poll_t		mfi_poll;
158 
159 static struct cdevsw mfi_cdevsw = {
160 	.d_version = 	D_VERSION,
161 	.d_flags =	0,
162 	.d_open = 	mfi_open,
163 	.d_close =	mfi_close,
164 	.d_ioctl =	mfi_ioctl,
165 	.d_poll =	mfi_poll,
166 	.d_name =	"mfi",
167 };
168 
169 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
170 
171 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
172 struct mfi_skinny_dma_info mfi_skinny;
173 
174 static void
175 mfi_enable_intr_xscale(struct mfi_softc *sc)
176 {
177 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
178 }
179 
180 static void
181 mfi_enable_intr_ppc(struct mfi_softc *sc)
182 {
183 	if (sc->mfi_flags & MFI_FLAGS_1078) {
184 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
185 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
186 	}
187 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
188 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
189 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
190 	}
191 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
192 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
193 	}
194 }
195 
196 static int32_t
197 mfi_read_fw_status_xscale(struct mfi_softc *sc)
198 {
199 	return MFI_READ4(sc, MFI_OMSG0);
200 }
201 
202 static int32_t
203 mfi_read_fw_status_ppc(struct mfi_softc *sc)
204 {
205 	return MFI_READ4(sc, MFI_OSP0);
206 }
207 
208 static int
209 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
210 {
211 	int32_t status;
212 
213 	status = MFI_READ4(sc, MFI_OSTS);
214 	if ((status & MFI_OSTS_INTR_VALID) == 0)
215 		return 1;
216 
217 	MFI_WRITE4(sc, MFI_OSTS, status);
218 	return 0;
219 }
220 
221 static int
222 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
223 {
224 	int32_t status;
225 
226 	status = MFI_READ4(sc, MFI_OSTS);
227 	if (sc->mfi_flags & MFI_FLAGS_1078) {
228 		if (!(status & MFI_1078_RM)) {
229 			return 1;
230 		}
231 	}
232 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
233 		if (!(status & MFI_GEN2_RM)) {
234 			return 1;
235 		}
236 	}
237 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
238 		if (!(status & MFI_SKINNY_RM)) {
239 			return 1;
240 		}
241 	}
242 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
243 		MFI_WRITE4(sc, MFI_OSTS, status);
244 	else
245 		MFI_WRITE4(sc, MFI_ODCR0, status);
246 	return 0;
247 }
248 
249 static void
250 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
251 {
252 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
253 }
254 
255 static void
256 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
257 {
258 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
259 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
260 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
261 	} else {
262 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
263 	}
264 }
265 
266 int
267 mfi_transition_firmware(struct mfi_softc *sc)
268 {
269 	uint32_t fw_state, cur_state;
270 	int max_wait, i;
271 	uint32_t cur_abs_reg_val = 0;
272 	uint32_t prev_abs_reg_val = 0;
273 
274 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
275 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
276 	while (fw_state != MFI_FWSTATE_READY) {
277 		if (bootverbose)
278 			device_printf(sc->mfi_dev, "Waiting for firmware to "
279 			"become ready\n");
280 		cur_state = fw_state;
281 		switch (fw_state) {
282 		case MFI_FWSTATE_FAULT:
283 			device_printf(sc->mfi_dev, "Firmware fault\n");
284 			return (ENXIO);
285 		case MFI_FWSTATE_WAIT_HANDSHAKE:
286 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
287 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
288 			else
289 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
290 			max_wait = MFI_RESET_WAIT_TIME;
291 			break;
292 		case MFI_FWSTATE_OPERATIONAL:
293 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
294 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
295 			else
296 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
297 			max_wait = MFI_RESET_WAIT_TIME;
298 			break;
299 		case MFI_FWSTATE_UNDEFINED:
300 		case MFI_FWSTATE_BB_INIT:
301 			max_wait = MFI_RESET_WAIT_TIME;
302 			break;
303 		case MFI_FWSTATE_FW_INIT_2:
304 			max_wait = MFI_RESET_WAIT_TIME;
305 			break;
306 		case MFI_FWSTATE_FW_INIT:
307 		case MFI_FWSTATE_FLUSH_CACHE:
308 			max_wait = MFI_RESET_WAIT_TIME;
309 			break;
310 		case MFI_FWSTATE_DEVICE_SCAN:
311 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
312 			prev_abs_reg_val = cur_abs_reg_val;
313 			break;
314 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
315 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
316 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
317 			else
318 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
319 			max_wait = MFI_RESET_WAIT_TIME;
320 			break;
321 		default:
322 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
323 			    fw_state);
324 			return (ENXIO);
325 		}
326 		for (i = 0; i < (max_wait * 10); i++) {
327 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
328 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
329 			if (fw_state == cur_state)
330 				DELAY(100000);
331 			else
332 				break;
333 		}
334 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
335 			/* Check the device scanning progress */
336 			if (prev_abs_reg_val != cur_abs_reg_val) {
337 				continue;
338 			}
339 		}
340 		if (fw_state == cur_state) {
341 			device_printf(sc->mfi_dev, "Firmware stuck in state "
342 			    "%#x\n", fw_state);
343 			return (ENXIO);
344 		}
345 	}
346 	return (0);
347 }
348 
349 static void
350 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
351 {
352 	bus_addr_t *addr;
353 
354 	addr = arg;
355 	*addr = segs[0].ds_addr;
356 }
357 
358 
359 int
360 mfi_attach(struct mfi_softc *sc)
361 {
362 	uint32_t status;
363 	int error, commsz, framessz, sensesz;
364 	int frames, unit, max_fw_sge;
365 	uint32_t tb_mem_size = 0;
366 
367 	if (sc == NULL)
368 		return EINVAL;
369 
370 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
371 	    MEGASAS_VERSION);
372 
373 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
374 	sx_init(&sc->mfi_config_lock, "MFI config");
375 	TAILQ_INIT(&sc->mfi_ld_tqh);
376 	TAILQ_INIT(&sc->mfi_syspd_tqh);
377 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
378 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
379 	TAILQ_INIT(&sc->mfi_evt_queue);
380 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
381 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
382 	TAILQ_INIT(&sc->mfi_aen_pids);
383 	TAILQ_INIT(&sc->mfi_cam_ccbq);
384 
385 	mfi_initq_free(sc);
386 	mfi_initq_ready(sc);
387 	mfi_initq_busy(sc);
388 	mfi_initq_bio(sc);
389 
390 	sc->adpreset = 0;
391 	sc->last_seq_num = 0;
392 	sc->disableOnlineCtrlReset = 1;
393 	sc->issuepend_done = 1;
394 	sc->hw_crit_error = 0;
395 
396 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
397 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
398 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
399 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
400 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
401 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
402 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
403 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
404 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
405 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
406 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
407 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
408 		sc->mfi_tbolt = 1;
409 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
410 	} else {
411 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
412 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
413 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
414 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
415 	}
416 
417 
418 	/* Before we get too far, see if the firmware is working */
419 	if ((error = mfi_transition_firmware(sc)) != 0) {
420 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
421 		    "error %d\n", error);
422 		return (ENXIO);
423 	}
424 
425 	/* Start: LSIP200113393 */
426 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
427 				1, 0,			/* algnmnt, boundary */
428 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
429 				BUS_SPACE_MAXADDR,	/* highaddr */
430 				NULL, NULL,		/* filter, filterarg */
431 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
432 				1,			/* msegments */
433 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
434 				0,			/* flags */
435 				NULL, NULL,		/* lockfunc, lockarg */
436 				&sc->verbuf_h_dmat)) {
437 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
438 		return (ENOMEM);
439 	}
440 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
441 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
442 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
443 		return (ENOMEM);
444 	}
445 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
446 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
447 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
448 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
449 	/* End: LSIP200113393 */
450 
451 	/*
452 	 * Get information needed for sizing the contiguous memory for the
453 	 * frame pool.  Size down the sgl parameter since we know that
454 	 * we will never need more than what's required for MAXPHYS.
455 	 * It would be nice if these constants were available at runtime
456 	 * instead of compile time.
457 	 */
458 	status = sc->mfi_read_fw_status(sc);
459 	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
460 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
461 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
462 
463 	/* ThunderBolt Support get the contiguous memory */
464 
465 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
466 		mfi_tbolt_init_globals(sc);
467 		device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
468 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
469 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
470 
471 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
472 				1, 0,			/* algnmnt, boundary */
473 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
474 				BUS_SPACE_MAXADDR,	/* highaddr */
475 				NULL, NULL,		/* filter, filterarg */
476 				tb_mem_size,		/* maxsize */
477 				1,			/* msegments */
478 				tb_mem_size,		/* maxsegsize */
479 				0,			/* flags */
480 				NULL, NULL,		/* lockfunc, lockarg */
481 				&sc->mfi_tb_dmat)) {
482 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
483 			return (ENOMEM);
484 		}
485 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
486 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
487 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
488 			return (ENOMEM);
489 		}
490 		bzero(sc->request_message_pool, tb_mem_size);
491 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
492 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
493 
494 		/* For ThunderBolt memory init */
495 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
496 				0x100, 0,		/* alignmnt, boundary */
497 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
498 				BUS_SPACE_MAXADDR,	/* highaddr */
499 				NULL, NULL,		/* filter, filterarg */
500 				MFI_FRAME_SIZE,		/* maxsize */
501 				1,			/* msegments */
502 				MFI_FRAME_SIZE,		/* maxsegsize */
503 				0,			/* flags */
504 				NULL, NULL,		/* lockfunc, lockarg */
505 				&sc->mfi_tb_init_dmat)) {
506 		device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
507 		return (ENOMEM);
508 		}
509 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
510 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
511 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
512 			return (ENOMEM);
513 		}
514 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
515 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
516 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
517 		    &sc->mfi_tb_init_busaddr, 0);
518 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
519 		    tb_mem_size)) {
520 			device_printf(sc->mfi_dev,
521 			    "Thunderbolt pool preparation error\n");
522 			return 0;
523 		}
524 
525 		/*
526 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
527 		  we are taking it diffrent from what we have allocated for Request
528 		  and reply descriptors to avoid confusion later
529 		*/
530 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
531 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
532 				1, 0,			/* algnmnt, boundary */
533 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
534 				BUS_SPACE_MAXADDR,	/* highaddr */
535 				NULL, NULL,		/* filter, filterarg */
536 				tb_mem_size,		/* maxsize */
537 				1,			/* msegments */
538 				tb_mem_size,		/* maxsegsize */
539 				0,			/* flags */
540 				NULL, NULL,		/* lockfunc, lockarg */
541 				&sc->mfi_tb_ioc_init_dmat)) {
542 			device_printf(sc->mfi_dev,
543 			    "Cannot allocate comms DMA tag\n");
544 			return (ENOMEM);
545 		}
546 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
547 		    (void **)&sc->mfi_tb_ioc_init_desc,
548 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
549 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
550 			return (ENOMEM);
551 		}
552 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
553 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
554 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
555 		    &sc->mfi_tb_ioc_init_busaddr, 0);
556 	}
557 	/*
558 	 * Create the dma tag for data buffers.  Used both for block I/O
559 	 * and for various internal data queries.
560 	 */
561 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
562 				1, 0,			/* algnmnt, boundary */
563 				BUS_SPACE_MAXADDR,	/* lowaddr */
564 				BUS_SPACE_MAXADDR,	/* highaddr */
565 				NULL, NULL,		/* filter, filterarg */
566 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
567 				sc->mfi_max_sge,	/* nsegments */
568 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
569 				BUS_DMA_ALLOCNOW,	/* flags */
570 				busdma_lock_mutex,	/* lockfunc */
571 				&sc->mfi_io_lock,	/* lockfuncarg */
572 				&sc->mfi_buffer_dmat)) {
573 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
574 		return (ENOMEM);
575 	}
576 
577 	/*
578 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
579 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
580 	 * entry, so the calculated size here will be will be 1 more than
581 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
582 	 */
583 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
584 	    sizeof(struct mfi_hwcomms);
585 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
586 				1, 0,			/* algnmnt, boundary */
587 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
588 				BUS_SPACE_MAXADDR,	/* highaddr */
589 				NULL, NULL,		/* filter, filterarg */
590 				commsz,			/* maxsize */
591 				1,			/* msegments */
592 				commsz,			/* maxsegsize */
593 				0,			/* flags */
594 				NULL, NULL,		/* lockfunc, lockarg */
595 				&sc->mfi_comms_dmat)) {
596 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
597 		return (ENOMEM);
598 	}
599 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
600 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
601 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
602 		return (ENOMEM);
603 	}
604 	bzero(sc->mfi_comms, commsz);
605 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
606 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
607 	/*
608 	 * Allocate DMA memory for the command frames.  Keep them in the
609 	 * lower 4GB for efficiency.  Calculate the size of the commands at
610 	 * the same time; each command is one 64 byte frame plus a set of
611          * additional frames for holding sg lists or other data.
612 	 * The assumption here is that the SG list will start at the second
613 	 * frame and not use the unused bytes in the first frame.  While this
614 	 * isn't technically correct, it simplifies the calculation and allows
615 	 * for command frames that might be larger than an mfi_io_frame.
616 	 */
617 	if (sizeof(bus_addr_t) == 8) {
618 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
619 		sc->mfi_flags |= MFI_FLAGS_SG64;
620 	} else {
621 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
622 	}
623 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
624 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
625 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
626 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
627 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
628 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
629 				64, 0,			/* algnmnt, boundary */
630 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
631 				BUS_SPACE_MAXADDR,	/* highaddr */
632 				NULL, NULL,		/* filter, filterarg */
633 				framessz,		/* maxsize */
634 				1,			/* nsegments */
635 				framessz,		/* maxsegsize */
636 				0,			/* flags */
637 				NULL, NULL,		/* lockfunc, lockarg */
638 				&sc->mfi_frames_dmat)) {
639 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
640 		return (ENOMEM);
641 	}
642 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
643 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
644 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
645 		return (ENOMEM);
646 	}
647 	bzero(sc->mfi_frames, framessz);
648 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
649 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
650 	/*
651 	 * Allocate DMA memory for the frame sense data.  Keep them in the
652 	 * lower 4GB for efficiency
653 	 */
654 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
655 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
656 				4, 0,			/* algnmnt, boundary */
657 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
658 				BUS_SPACE_MAXADDR,	/* highaddr */
659 				NULL, NULL,		/* filter, filterarg */
660 				sensesz,		/* maxsize */
661 				1,			/* nsegments */
662 				sensesz,		/* maxsegsize */
663 				0,			/* flags */
664 				NULL, NULL,		/* lockfunc, lockarg */
665 				&sc->mfi_sense_dmat)) {
666 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
667 		return (ENOMEM);
668 	}
669 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
670 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
671 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
672 		return (ENOMEM);
673 	}
674 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
675 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
676 	if ((error = mfi_alloc_commands(sc)) != 0)
677 		return (error);
678 
679 	/* Before moving the FW to operational state, check whether
680 	 * hostmemory is required by the FW or not
681 	 */
682 
683 	/* ThunderBolt MFI_IOC2 INIT */
684 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
685 		sc->mfi_disable_intr(sc);
686 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
687 			device_printf(sc->mfi_dev,
688 			    "TB Init has failed with error %d\n",error);
689 			return error;
690 		}
691 
692 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
693 			return error;
694 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
695 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
696 		    &sc->mfi_intr)) {
697 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
698 			return (EINVAL);
699 		}
700 		sc->mfi_intr_ptr = mfi_intr_tbolt;
701 		sc->mfi_enable_intr(sc);
702 	} else {
703 		if ((error = mfi_comms_init(sc)) != 0)
704 			return (error);
705 
706 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
707 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
708 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
709 			return (EINVAL);
710 		}
711 		sc->mfi_intr_ptr = mfi_intr;
712 		sc->mfi_enable_intr(sc);
713 	}
714 	if ((error = mfi_get_controller_info(sc)) != 0)
715 		return (error);
716 	sc->disableOnlineCtrlReset = 0;
717 
718 	/* Register a config hook to probe the bus for arrays */
719 	sc->mfi_ich.ich_func = mfi_startup;
720 	sc->mfi_ich.ich_arg = sc;
721 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
722 		device_printf(sc->mfi_dev, "Cannot establish configuration "
723 		    "hook\n");
724 		return (EINVAL);
725 	}
726 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
727 		mtx_unlock(&sc->mfi_io_lock);
728 		return (error);
729 	}
730 
731 	/*
732 	 * Register a shutdown handler.
733 	 */
734 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
735 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
736 		device_printf(sc->mfi_dev, "Warning: shutdown event "
737 		    "registration failed\n");
738 	}
739 
740 	/*
741 	 * Create the control device for doing management
742 	 */
743 	unit = device_get_unit(sc->mfi_dev);
744 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
745 	    0640, "mfi%d", unit);
746 	if (unit == 0)
747 		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
748 	if (sc->mfi_cdev != NULL)
749 		sc->mfi_cdev->si_drv1 = sc;
750 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
751 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
752 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
753 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
754 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
755 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
756 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
757 	    &sc->mfi_keep_deleted_volumes, 0,
758 	    "Don't detach the mfid device for a busy volume that is deleted");
759 
760 	device_add_child(sc->mfi_dev, "mfip", -1);
761 	bus_generic_attach(sc->mfi_dev);
762 
763 	/* Start the timeout watchdog */
764 	callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
765 	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
766 	    mfi_timeout, sc);
767 
768 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
769 		mfi_tbolt_sync_map_info(sc);
770 	}
771 
772 	return (0);
773 }
774 
775 static int
776 mfi_alloc_commands(struct mfi_softc *sc)
777 {
778 	struct mfi_command *cm;
779 	int i, ncmds;
780 
781 	/*
782 	 * XXX Should we allocate all the commands up front, or allocate on
783 	 * demand later like 'aac' does?
784 	 */
785 	ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
786 	if (bootverbose)
787 		device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
788 		   "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
789 
790 	sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
791 	    M_WAITOK | M_ZERO);
792 
793 	for (i = 0; i < ncmds; i++) {
794 		cm = &sc->mfi_commands[i];
795 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
796 		    sc->mfi_cmd_size * i);
797 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
798 		    sc->mfi_cmd_size * i;
799 		cm->cm_frame->header.context = i;
800 		cm->cm_sense = &sc->mfi_sense[i];
801 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
802 		cm->cm_sc = sc;
803 		cm->cm_index = i;
804 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
805 		    &cm->cm_dmamap) == 0) {
806 			mtx_lock(&sc->mfi_io_lock);
807 			mfi_release_command(cm);
808 			mtx_unlock(&sc->mfi_io_lock);
809 		}
810 		else
811 			break;
812 		sc->mfi_total_cmds++;
813 	}
814 
815 	return (0);
816 }
817 
818 void
819 mfi_release_command(struct mfi_command *cm)
820 {
821 	struct mfi_frame_header *hdr;
822 	uint32_t *hdr_data;
823 
824 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
825 
826 	/*
827 	 * Zero out the important fields of the frame, but make sure the
828 	 * context field is preserved.  For efficiency, handle the fields
829 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
830 	 */
831 	hdr = &cm->cm_frame->header;
832 	if (cm->cm_data != NULL && hdr->sg_count) {
833 		cm->cm_sg->sg32[0].len = 0;
834 		cm->cm_sg->sg32[0].addr = 0;
835 	}
836 
837 	hdr_data = (uint32_t *)cm->cm_frame;
838 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
839 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
840 	hdr_data[4] = 0;	/* flags, timeout */
841 	hdr_data[5] = 0;	/* data_len */
842 
843 	cm->cm_extra_frames = 0;
844 	cm->cm_flags = 0;
845 	cm->cm_complete = NULL;
846 	cm->cm_private = NULL;
847 	cm->cm_data = NULL;
848 	cm->cm_sg = 0;
849 	cm->cm_total_frame_size = 0;
850 	cm->retry_for_fw_reset = 0;
851 
852 	mfi_enqueue_free(cm);
853 }
854 
855 int
856 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
857     uint32_t opcode, void **bufp, size_t bufsize)
858 {
859 	struct mfi_command *cm;
860 	struct mfi_dcmd_frame *dcmd;
861 	void *buf = NULL;
862 	uint32_t context = 0;
863 
864 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
865 
866 	cm = mfi_dequeue_free(sc);
867 	if (cm == NULL)
868 		return (EBUSY);
869 
870 	/* Zero out the MFI frame */
871 	context = cm->cm_frame->header.context;
872 	bzero(cm->cm_frame, sizeof(union mfi_frame));
873 	cm->cm_frame->header.context = context;
874 
875 	if ((bufsize > 0) && (bufp != NULL)) {
876 		if (*bufp == NULL) {
877 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
878 			if (buf == NULL) {
879 				mfi_release_command(cm);
880 				return (ENOMEM);
881 			}
882 			*bufp = buf;
883 		} else {
884 			buf = *bufp;
885 		}
886 	}
887 
888 	dcmd =  &cm->cm_frame->dcmd;
889 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
890 	dcmd->header.cmd = MFI_CMD_DCMD;
891 	dcmd->header.timeout = 0;
892 	dcmd->header.flags = 0;
893 	dcmd->header.data_len = bufsize;
894 	dcmd->header.scsi_status = 0;
895 	dcmd->opcode = opcode;
896 	cm->cm_sg = &dcmd->sgl;
897 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
898 	cm->cm_flags = 0;
899 	cm->cm_data = buf;
900 	cm->cm_private = buf;
901 	cm->cm_len = bufsize;
902 
903 	*cmp = cm;
904 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
905 		*bufp = buf;
906 	return (0);
907 }
908 
909 static int
910 mfi_comms_init(struct mfi_softc *sc)
911 {
912 	struct mfi_command *cm;
913 	struct mfi_init_frame *init;
914 	struct mfi_init_qinfo *qinfo;
915 	int error;
916 	uint32_t context = 0;
917 
918 	mtx_lock(&sc->mfi_io_lock);
919 	if ((cm = mfi_dequeue_free(sc)) == NULL)
920 		return (EBUSY);
921 
922 	/* Zero out the MFI frame */
923 	context = cm->cm_frame->header.context;
924 	bzero(cm->cm_frame, sizeof(union mfi_frame));
925 	cm->cm_frame->header.context = context;
926 
927 	/*
928 	 * Abuse the SG list area of the frame to hold the init_qinfo
929 	 * object;
930 	 */
931 	init = &cm->cm_frame->init;
932 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
933 
934 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
935 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
936 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
937 	    offsetof(struct mfi_hwcomms, hw_reply_q);
938 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
939 	    offsetof(struct mfi_hwcomms, hw_pi);
940 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
941 	    offsetof(struct mfi_hwcomms, hw_ci);
942 
943 	init->header.cmd = MFI_CMD_INIT;
944 	init->header.data_len = sizeof(struct mfi_init_qinfo);
945 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
946 	cm->cm_data = NULL;
947 	cm->cm_flags = MFI_CMD_POLLED;
948 
949 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
950 		device_printf(sc->mfi_dev, "failed to send init command\n");
951 		mtx_unlock(&sc->mfi_io_lock);
952 		return (error);
953 	}
954 	mfi_release_command(cm);
955 	mtx_unlock(&sc->mfi_io_lock);
956 
957 	return (0);
958 }
959 
960 static int
961 mfi_get_controller_info(struct mfi_softc *sc)
962 {
963 	struct mfi_command *cm = NULL;
964 	struct mfi_ctrl_info *ci = NULL;
965 	uint32_t max_sectors_1, max_sectors_2;
966 	int error;
967 
968 	mtx_lock(&sc->mfi_io_lock);
969 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
970 	    (void **)&ci, sizeof(*ci));
971 	if (error)
972 		goto out;
973 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
974 
975 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
976 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
977 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
978 		    MFI_SECTOR_LEN;
979 		error = 0;
980 		goto out;
981 	}
982 
983 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
984 	    BUS_DMASYNC_POSTREAD);
985 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
986 
987 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
988 	max_sectors_2 = ci->max_request_size;
989 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
990 	sc->disableOnlineCtrlReset =
991 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
992 
993 out:
994 	if (ci)
995 		free(ci, M_MFIBUF);
996 	if (cm)
997 		mfi_release_command(cm);
998 	mtx_unlock(&sc->mfi_io_lock);
999 	return (error);
1000 }
1001 
1002 static int
1003 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1004 {
1005 	struct mfi_command *cm = NULL;
1006 	int error;
1007 
1008 	mtx_lock(&sc->mfi_io_lock);
1009 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1010 	    (void **)log_state, sizeof(**log_state));
1011 	if (error)
1012 		goto out;
1013 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1014 
1015 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1016 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1017 		goto out;
1018 	}
1019 
1020 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1021 	    BUS_DMASYNC_POSTREAD);
1022 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1023 
1024 out:
1025 	if (cm)
1026 		mfi_release_command(cm);
1027 	mtx_unlock(&sc->mfi_io_lock);
1028 
1029 	return (error);
1030 }
1031 
1032 int
1033 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1034 {
1035 	struct mfi_evt_log_state *log_state = NULL;
1036 	union mfi_evt class_locale;
1037 	int error = 0;
1038 	uint32_t seq;
1039 
1040 	class_locale.members.reserved = 0;
1041 	class_locale.members.locale = mfi_event_locale;
1042 	class_locale.members.evt_class  = mfi_event_class;
1043 
1044 	if (seq_start == 0) {
1045 		error = mfi_get_log_state(sc, &log_state);
1046 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1047 		if (error) {
1048 			if (log_state)
1049 				free(log_state, M_MFIBUF);
1050 			return (error);
1051 		}
1052 
1053 		/*
1054 		 * Walk through any events that fired since the last
1055 		 * shutdown.
1056 		 */
1057 		mfi_parse_entries(sc, log_state->shutdown_seq_num,
1058 		    log_state->newest_seq_num);
1059 		seq = log_state->newest_seq_num;
1060 	} else
1061 		seq = seq_start;
1062 	mfi_aen_register(sc, seq, class_locale.word);
1063 	free(log_state, M_MFIBUF);
1064 
1065 	return 0;
1066 }
1067 
1068 int
1069 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1070 {
1071 
1072 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1073 	cm->cm_complete = NULL;
1074 
1075 
1076 	/*
1077 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1078 	 * and return 0 to it as status
1079 	 */
1080 	if (cm->cm_frame->dcmd.opcode == 0) {
1081 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1082 		cm->cm_error = 0;
1083 		return (cm->cm_error);
1084 	}
1085 	mfi_enqueue_ready(cm);
1086 	mfi_startio(sc);
1087 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1088 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1089 	return (cm->cm_error);
1090 }
1091 
1092 void
1093 mfi_free(struct mfi_softc *sc)
1094 {
1095 	struct mfi_command *cm;
1096 	int i;
1097 
1098 	callout_drain(&sc->mfi_watchdog_callout);
1099 
1100 	if (sc->mfi_cdev != NULL)
1101 		destroy_dev(sc->mfi_cdev);
1102 
1103 	if (sc->mfi_total_cmds != 0) {
1104 		for (i = 0; i < sc->mfi_total_cmds; i++) {
1105 			cm = &sc->mfi_commands[i];
1106 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1107 		}
1108 		free(sc->mfi_commands, M_MFIBUF);
1109 	}
1110 
1111 	if (sc->mfi_intr)
1112 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1113 	if (sc->mfi_irq != NULL)
1114 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1115 		    sc->mfi_irq);
1116 
1117 	if (sc->mfi_sense_busaddr != 0)
1118 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1119 	if (sc->mfi_sense != NULL)
1120 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1121 		    sc->mfi_sense_dmamap);
1122 	if (sc->mfi_sense_dmat != NULL)
1123 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1124 
1125 	if (sc->mfi_frames_busaddr != 0)
1126 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1127 	if (sc->mfi_frames != NULL)
1128 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1129 		    sc->mfi_frames_dmamap);
1130 	if (sc->mfi_frames_dmat != NULL)
1131 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1132 
1133 	if (sc->mfi_comms_busaddr != 0)
1134 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1135 	if (sc->mfi_comms != NULL)
1136 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1137 		    sc->mfi_comms_dmamap);
1138 	if (sc->mfi_comms_dmat != NULL)
1139 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1140 
1141 	/* ThunderBolt contiguous memory free here */
1142 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1143 		if (sc->mfi_tb_busaddr != 0)
1144 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1145 		if (sc->request_message_pool != NULL)
1146 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1147 			    sc->mfi_tb_dmamap);
1148 		if (sc->mfi_tb_dmat != NULL)
1149 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1150 
1151 		/* Version buffer memory free */
1152 		/* Start LSIP200113393 */
1153 		if (sc->verbuf_h_busaddr != 0)
1154 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1155 		if (sc->verbuf != NULL)
1156 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1157 			    sc->verbuf_h_dmamap);
1158 		if (sc->verbuf_h_dmat != NULL)
1159 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1160 
1161 		/* End LSIP200113393 */
1162 		/* ThunderBolt INIT packet memory Free */
1163 		if (sc->mfi_tb_init_busaddr != 0)
1164 			bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1165 		if (sc->mfi_tb_init != NULL)
1166 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1167 			    sc->mfi_tb_init_dmamap);
1168 		if (sc->mfi_tb_init_dmat != NULL)
1169 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1170 
1171 		/* ThunderBolt IOC Init Desc memory free here */
1172 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1173 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1174 			    sc->mfi_tb_ioc_init_dmamap);
1175 		if (sc->mfi_tb_ioc_init_desc != NULL)
1176 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1177 			    sc->mfi_tb_ioc_init_desc,
1178 			    sc->mfi_tb_ioc_init_dmamap);
1179 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1180 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1181 		for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1182 			if (sc->mfi_cmd_pool_tbolt != NULL) {
1183 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1184 					free(sc->mfi_cmd_pool_tbolt[i],
1185 					    M_MFIBUF);
1186 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1187 				}
1188 			}
1189 		}
1190 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1191 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1192 			sc->mfi_cmd_pool_tbolt = NULL;
1193 		}
1194 		if (sc->request_desc_pool != NULL) {
1195 			free(sc->request_desc_pool, M_MFIBUF);
1196 			sc->request_desc_pool = NULL;
1197 		}
1198 	}
1199 	if (sc->mfi_buffer_dmat != NULL)
1200 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1201 	if (sc->mfi_parent_dmat != NULL)
1202 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1203 
1204 	if (mtx_initialized(&sc->mfi_io_lock)) {
1205 		mtx_destroy(&sc->mfi_io_lock);
1206 		sx_destroy(&sc->mfi_config_lock);
1207 	}
1208 
1209 	return;
1210 }
1211 
1212 static void
1213 mfi_startup(void *arg)
1214 {
1215 	struct mfi_softc *sc;
1216 
1217 	sc = (struct mfi_softc *)arg;
1218 
1219 	config_intrhook_disestablish(&sc->mfi_ich);
1220 
1221 	sc->mfi_enable_intr(sc);
1222 	sx_xlock(&sc->mfi_config_lock);
1223 	mtx_lock(&sc->mfi_io_lock);
1224 	mfi_ldprobe(sc);
1225 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1226 	    mfi_syspdprobe(sc);
1227 	mtx_unlock(&sc->mfi_io_lock);
1228 	sx_xunlock(&sc->mfi_config_lock);
1229 }
1230 
1231 static void
1232 mfi_intr(void *arg)
1233 {
1234 	struct mfi_softc *sc;
1235 	struct mfi_command *cm;
1236 	uint32_t pi, ci, context;
1237 
1238 	sc = (struct mfi_softc *)arg;
1239 
1240 	if (sc->mfi_check_clear_intr(sc))
1241 		return;
1242 
1243 restart:
1244 	pi = sc->mfi_comms->hw_pi;
1245 	ci = sc->mfi_comms->hw_ci;
1246 	mtx_lock(&sc->mfi_io_lock);
1247 	while (ci != pi) {
1248 		context = sc->mfi_comms->hw_reply_q[ci];
1249 		if (context < sc->mfi_max_fw_cmds) {
1250 			cm = &sc->mfi_commands[context];
1251 			mfi_remove_busy(cm);
1252 			cm->cm_error = 0;
1253 			mfi_complete(sc, cm);
1254 		}
1255 		if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1256 			ci = 0;
1257 		}
1258 	}
1259 
1260 	sc->mfi_comms->hw_ci = ci;
1261 
1262 	/* Give defered I/O a chance to run */
1263 	if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1264 		sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1265 	mfi_startio(sc);
1266 	mtx_unlock(&sc->mfi_io_lock);
1267 
1268 	/*
1269 	 * Dummy read to flush the bus; this ensures that the indexes are up
1270 	 * to date.  Restart processing if more commands have come it.
1271 	 */
1272 	(void)sc->mfi_read_fw_status(sc);
1273 	if (pi != sc->mfi_comms->hw_pi)
1274 		goto restart;
1275 
1276 	return;
1277 }
1278 
1279 int
1280 mfi_shutdown(struct mfi_softc *sc)
1281 {
1282 	struct mfi_dcmd_frame *dcmd;
1283 	struct mfi_command *cm;
1284 	int error;
1285 
1286 
1287 	if (sc->mfi_aen_cm)
1288 		sc->cm_aen_abort = 1;
1289 	if (sc->mfi_aen_cm != NULL)
1290 		mfi_abort(sc, &sc->mfi_aen_cm);
1291 
1292 	if (sc->mfi_map_sync_cm)
1293 		sc->cm_map_abort = 1;
1294 	if (sc->mfi_map_sync_cm != NULL)
1295 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1296 
1297 	mtx_lock(&sc->mfi_io_lock);
1298 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1299 	if (error) {
1300 		mtx_unlock(&sc->mfi_io_lock);
1301 		return (error);
1302 	}
1303 
1304 	dcmd = &cm->cm_frame->dcmd;
1305 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1306 	cm->cm_flags = MFI_CMD_POLLED;
1307 	cm->cm_data = NULL;
1308 
1309 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1310 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1311 	}
1312 
1313 	mfi_release_command(cm);
1314 	mtx_unlock(&sc->mfi_io_lock);
1315 	return (error);
1316 }
1317 
1318 static void
1319 mfi_syspdprobe(struct mfi_softc *sc)
1320 {
1321 	struct mfi_frame_header *hdr;
1322 	struct mfi_command *cm = NULL;
1323 	struct mfi_pd_list *pdlist = NULL;
1324 	struct mfi_system_pd *syspd, *tmp;
1325 	struct mfi_system_pending *syspd_pend;
1326 	int error, i, found;
1327 
1328 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1329 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1330 	/* Add SYSTEM PD's */
1331 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1332 	    (void **)&pdlist, sizeof(*pdlist));
1333 	if (error) {
1334 		device_printf(sc->mfi_dev,
1335 		    "Error while forming SYSTEM PD list\n");
1336 		goto out;
1337 	}
1338 
1339 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1340 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1341 	cm->cm_frame->dcmd.mbox[1] = 0;
1342 	if (mfi_mapcmd(sc, cm) != 0) {
1343 		device_printf(sc->mfi_dev,
1344 		    "Failed to get syspd device listing\n");
1345 		goto out;
1346 	}
1347 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1348 	    BUS_DMASYNC_POSTREAD);
1349 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1350 	hdr = &cm->cm_frame->header;
1351 	if (hdr->cmd_status != MFI_STAT_OK) {
1352 		device_printf(sc->mfi_dev,
1353 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1354 		goto out;
1355 	}
1356 	/* Get each PD and add it to the system */
1357 	for (i = 0; i < pdlist->count; i++) {
1358 		if (pdlist->addr[i].device_id ==
1359 		    pdlist->addr[i].encl_device_id)
1360 			continue;
1361 		found = 0;
1362 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1363 			if (syspd->pd_id == pdlist->addr[i].device_id)
1364 				found = 1;
1365 		}
1366 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1367 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1368 				found = 1;
1369 		}
1370 		if (found == 0)
1371 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1372 	}
1373 	/* Delete SYSPD's whose state has been changed */
1374 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1375 		found = 0;
1376 		for (i = 0; i < pdlist->count; i++) {
1377 			if (syspd->pd_id == pdlist->addr[i].device_id)
1378 				found = 1;
1379 		}
1380 		if (found == 0) {
1381 			printf("DELETE\n");
1382 			mtx_unlock(&sc->mfi_io_lock);
1383 			mtx_lock(&Giant);
1384 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1385 			mtx_unlock(&Giant);
1386 			mtx_lock(&sc->mfi_io_lock);
1387 		}
1388 	}
1389 out:
1390 	if (pdlist)
1391 	    free(pdlist, M_MFIBUF);
1392 	if (cm)
1393 	    mfi_release_command(cm);
1394 
1395 	return;
1396 }
1397 
1398 static void
1399 mfi_ldprobe(struct mfi_softc *sc)
1400 {
1401 	struct mfi_frame_header *hdr;
1402 	struct mfi_command *cm = NULL;
1403 	struct mfi_ld_list *list = NULL;
1404 	struct mfi_disk *ld;
1405 	struct mfi_disk_pending *ld_pend;
1406 	int error, i;
1407 
1408 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1409 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1410 
1411 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1412 	    (void **)&list, sizeof(*list));
1413 	if (error)
1414 		goto out;
1415 
1416 	cm->cm_flags = MFI_CMD_DATAIN;
1417 	if (mfi_wait_command(sc, cm) != 0) {
1418 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1419 		goto out;
1420 	}
1421 
1422 	hdr = &cm->cm_frame->header;
1423 	if (hdr->cmd_status != MFI_STAT_OK) {
1424 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1425 		    hdr->cmd_status);
1426 		goto out;
1427 	}
1428 
1429 	for (i = 0; i < list->ld_count; i++) {
1430 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1431 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1432 				goto skip_add;
1433 		}
1434 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1435 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1436 				goto skip_add;
1437 		}
1438 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1439 	skip_add:;
1440 	}
1441 out:
1442 	if (list)
1443 		free(list, M_MFIBUF);
1444 	if (cm)
1445 		mfi_release_command(cm);
1446 
1447 	return;
1448 }
1449 
1450 /*
1451  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1452  * the bits in 24-31 are all set, then it is the number of seconds since
1453  * boot.
1454  */
1455 static const char *
1456 format_timestamp(uint32_t timestamp)
1457 {
1458 	static char buffer[32];
1459 
1460 	if ((timestamp & 0xff000000) == 0xff000000)
1461 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1462 		    0x00ffffff);
1463 	else
1464 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1465 	return (buffer);
1466 }
1467 
1468 static const char *
1469 format_class(int8_t class)
1470 {
1471 	static char buffer[6];
1472 
1473 	switch (class) {
1474 	case MFI_EVT_CLASS_DEBUG:
1475 		return ("debug");
1476 	case MFI_EVT_CLASS_PROGRESS:
1477 		return ("progress");
1478 	case MFI_EVT_CLASS_INFO:
1479 		return ("info");
1480 	case MFI_EVT_CLASS_WARNING:
1481 		return ("WARN");
1482 	case MFI_EVT_CLASS_CRITICAL:
1483 		return ("CRIT");
1484 	case MFI_EVT_CLASS_FATAL:
1485 		return ("FATAL");
1486 	case MFI_EVT_CLASS_DEAD:
1487 		return ("DEAD");
1488 	default:
1489 		snprintf(buffer, sizeof(buffer), "%d", class);
1490 		return (buffer);
1491 	}
1492 }
1493 
1494 static void
1495 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1496 {
1497 	struct mfi_system_pd *syspd = NULL;
1498 
1499 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1500 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1501 	    format_class(detail->evt_class.members.evt_class),
1502 	    detail->description);
1503 
1504         /* Don't act on old AEN's or while shutting down */
1505         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1506                 return;
1507 
1508 	switch (detail->arg_type) {
1509 	case MR_EVT_ARGS_NONE:
1510 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1511 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1512 			if (mfi_detect_jbod_change) {
1513 				/*
1514 				 * Probe for new SYSPD's and Delete
1515 				 * invalid SYSPD's
1516 				 */
1517 				sx_xlock(&sc->mfi_config_lock);
1518 				mtx_lock(&sc->mfi_io_lock);
1519 				mfi_syspdprobe(sc);
1520 				mtx_unlock(&sc->mfi_io_lock);
1521 				sx_xunlock(&sc->mfi_config_lock);
1522 			}
1523 		}
1524 		break;
1525 	case MR_EVT_ARGS_LD_STATE:
1526 		/* During load time driver reads all the events starting
1527 		 * from the one that has been logged after shutdown. Avoid
1528 		 * these old events.
1529 		 */
1530 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1531 			/* Remove the LD */
1532 			struct mfi_disk *ld;
1533 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1534 				if (ld->ld_id ==
1535 				    detail->args.ld_state.ld.target_id)
1536 					break;
1537 			}
1538 			/*
1539 			Fix: for kernel panics when SSCD is removed
1540 			KASSERT(ld != NULL, ("volume dissappeared"));
1541 			*/
1542 			if (ld != NULL) {
1543 				mtx_lock(&Giant);
1544 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1545 				mtx_unlock(&Giant);
1546 			}
1547 		}
1548 		break;
1549 	case MR_EVT_ARGS_PD:
1550 		if (detail->code == MR_EVT_PD_REMOVED) {
1551 			if (mfi_detect_jbod_change) {
1552 				/*
1553 				 * If the removed device is a SYSPD then
1554 				 * delete it
1555 				 */
1556 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1557 				    pd_link) {
1558 					if (syspd->pd_id ==
1559 					    detail->args.pd.device_id) {
1560 						mtx_lock(&Giant);
1561 						device_delete_child(
1562 						    sc->mfi_dev,
1563 						    syspd->pd_dev);
1564 						mtx_unlock(&Giant);
1565 						break;
1566 					}
1567 				}
1568 			}
1569 		}
1570 		if (detail->code == MR_EVT_PD_INSERTED) {
1571 			if (mfi_detect_jbod_change) {
1572 				/* Probe for new SYSPD's */
1573 				sx_xlock(&sc->mfi_config_lock);
1574 				mtx_lock(&sc->mfi_io_lock);
1575 				mfi_syspdprobe(sc);
1576 				mtx_unlock(&sc->mfi_io_lock);
1577 				sx_xunlock(&sc->mfi_config_lock);
1578 			}
1579 		}
1580 		if (sc->mfi_cam_rescan_cb != NULL &&
1581 		    (detail->code == MR_EVT_PD_INSERTED ||
1582 		    detail->code == MR_EVT_PD_REMOVED)) {
1583 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1584 		}
1585 		break;
1586 	}
1587 }
1588 
1589 static void
1590 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1591 {
1592 	struct mfi_evt_queue_elm *elm;
1593 
1594 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1595 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1596 	if (elm == NULL)
1597 		return;
1598 	memcpy(&elm->detail, detail, sizeof(*detail));
1599 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1600 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1601 }
1602 
1603 static void
1604 mfi_handle_evt(void *context, int pending)
1605 {
1606 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1607 	struct mfi_softc *sc;
1608 	struct mfi_evt_queue_elm *elm;
1609 
1610 	sc = context;
1611 	TAILQ_INIT(&queue);
1612 	mtx_lock(&sc->mfi_io_lock);
1613 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1614 	mtx_unlock(&sc->mfi_io_lock);
1615 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1616 		TAILQ_REMOVE(&queue, elm, link);
1617 		mfi_decode_evt(sc, &elm->detail);
1618 		free(elm, M_MFIBUF);
1619 	}
1620 }
1621 
1622 static int
1623 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1624 {
1625 	struct mfi_command *cm;
1626 	struct mfi_dcmd_frame *dcmd;
1627 	union mfi_evt current_aen, prior_aen;
1628 	struct mfi_evt_detail *ed = NULL;
1629 	int error = 0;
1630 
1631 	current_aen.word = locale;
1632 	if (sc->mfi_aen_cm != NULL) {
1633 		prior_aen.word =
1634 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1635 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1636 		    !((prior_aen.members.locale & current_aen.members.locale)
1637 		    ^current_aen.members.locale)) {
1638 			return (0);
1639 		} else {
1640 			prior_aen.members.locale |= current_aen.members.locale;
1641 			if (prior_aen.members.evt_class
1642 			    < current_aen.members.evt_class)
1643 				current_aen.members.evt_class =
1644 				    prior_aen.members.evt_class;
1645 			mfi_abort(sc, &sc->mfi_aen_cm);
1646 		}
1647 	}
1648 
1649 	mtx_lock(&sc->mfi_io_lock);
1650 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1651 	    (void **)&ed, sizeof(*ed));
1652 	mtx_unlock(&sc->mfi_io_lock);
1653 	if (error) {
1654 		goto out;
1655 	}
1656 
1657 	dcmd = &cm->cm_frame->dcmd;
1658 	((uint32_t *)&dcmd->mbox)[0] = seq;
1659 	((uint32_t *)&dcmd->mbox)[1] = locale;
1660 	cm->cm_flags = MFI_CMD_DATAIN;
1661 	cm->cm_complete = mfi_aen_complete;
1662 
1663 	sc->last_seq_num = seq;
1664 	sc->mfi_aen_cm = cm;
1665 
1666 	mtx_lock(&sc->mfi_io_lock);
1667 	mfi_enqueue_ready(cm);
1668 	mfi_startio(sc);
1669 	mtx_unlock(&sc->mfi_io_lock);
1670 
1671 out:
1672 	return (error);
1673 }
1674 
1675 static void
1676 mfi_aen_complete(struct mfi_command *cm)
1677 {
1678 	struct mfi_frame_header *hdr;
1679 	struct mfi_softc *sc;
1680 	struct mfi_evt_detail *detail;
1681 	struct mfi_aen *mfi_aen_entry, *tmp;
1682 	int seq = 0, aborted = 0;
1683 
1684 	sc = cm->cm_sc;
1685 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1686 
1687 	hdr = &cm->cm_frame->header;
1688 
1689 	if (sc->mfi_aen_cm == NULL)
1690 		return;
1691 
1692 	if (sc->cm_aen_abort ||
1693 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1694 		sc->cm_aen_abort = 0;
1695 		aborted = 1;
1696 	} else {
1697 		sc->mfi_aen_triggered = 1;
1698 		if (sc->mfi_poll_waiting) {
1699 			sc->mfi_poll_waiting = 0;
1700 			selwakeup(&sc->mfi_select);
1701 		}
1702 		detail = cm->cm_data;
1703 		mfi_queue_evt(sc, detail);
1704 		seq = detail->seq + 1;
1705 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1706 		    tmp) {
1707 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1708 			    aen_link);
1709 			PROC_LOCK(mfi_aen_entry->p);
1710 			kern_psignal(mfi_aen_entry->p, SIGIO);
1711 			PROC_UNLOCK(mfi_aen_entry->p);
1712 			free(mfi_aen_entry, M_MFIBUF);
1713 		}
1714 	}
1715 
1716 	free(cm->cm_data, M_MFIBUF);
1717 	sc->mfi_aen_cm = NULL;
1718 	wakeup(&sc->mfi_aen_cm);
1719 	mfi_release_command(cm);
1720 
1721 	/* set it up again so the driver can catch more events */
1722 	if (!aborted) {
1723 		mtx_unlock(&sc->mfi_io_lock);
1724 		mfi_aen_setup(sc, seq);
1725 		mtx_lock(&sc->mfi_io_lock);
1726 	}
1727 }
1728 
1729 #define MAX_EVENTS 15
1730 
1731 static int
1732 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1733 {
1734 	struct mfi_command *cm;
1735 	struct mfi_dcmd_frame *dcmd;
1736 	struct mfi_evt_list *el;
1737 	union mfi_evt class_locale;
1738 	int error, i, seq, size;
1739 
1740 	class_locale.members.reserved = 0;
1741 	class_locale.members.locale = mfi_event_locale;
1742 	class_locale.members.evt_class  = mfi_event_class;
1743 
1744 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1745 		* (MAX_EVENTS - 1);
1746 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1747 	if (el == NULL)
1748 		return (ENOMEM);
1749 
1750 	for (seq = start_seq;;) {
1751 		mtx_lock(&sc->mfi_io_lock);
1752 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1753 			free(el, M_MFIBUF);
1754 			mtx_unlock(&sc->mfi_io_lock);
1755 			return (EBUSY);
1756 		}
1757 		mtx_unlock(&sc->mfi_io_lock);
1758 
1759 		dcmd = &cm->cm_frame->dcmd;
1760 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1761 		dcmd->header.cmd = MFI_CMD_DCMD;
1762 		dcmd->header.timeout = 0;
1763 		dcmd->header.data_len = size;
1764 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1765 		((uint32_t *)&dcmd->mbox)[0] = seq;
1766 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1767 		cm->cm_sg = &dcmd->sgl;
1768 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1769 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1770 		cm->cm_data = el;
1771 		cm->cm_len = size;
1772 
1773 		mtx_lock(&sc->mfi_io_lock);
1774 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1775 			device_printf(sc->mfi_dev,
1776 			    "Failed to get controller entries\n");
1777 			mfi_release_command(cm);
1778 			mtx_unlock(&sc->mfi_io_lock);
1779 			break;
1780 		}
1781 
1782 		mtx_unlock(&sc->mfi_io_lock);
1783 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1784 		    BUS_DMASYNC_POSTREAD);
1785 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1786 
1787 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1788 			mtx_lock(&sc->mfi_io_lock);
1789 			mfi_release_command(cm);
1790 			mtx_unlock(&sc->mfi_io_lock);
1791 			break;
1792 		}
1793 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1794 			device_printf(sc->mfi_dev,
1795 			    "Error %d fetching controller entries\n",
1796 			    dcmd->header.cmd_status);
1797 			mtx_lock(&sc->mfi_io_lock);
1798 			mfi_release_command(cm);
1799 			mtx_unlock(&sc->mfi_io_lock);
1800 			break;
1801 		}
1802 		mtx_lock(&sc->mfi_io_lock);
1803 		mfi_release_command(cm);
1804 		mtx_unlock(&sc->mfi_io_lock);
1805 
1806 		for (i = 0; i < el->count; i++) {
1807 			/*
1808 			 * If this event is newer than 'stop_seq' then
1809 			 * break out of the loop.  Note that the log
1810 			 * is a circular buffer so we have to handle
1811 			 * the case that our stop point is earlier in
1812 			 * the buffer than our start point.
1813 			 */
1814 			if (el->event[i].seq >= stop_seq) {
1815 				if (start_seq <= stop_seq)
1816 					break;
1817 				else if (el->event[i].seq < start_seq)
1818 					break;
1819 			}
1820 			mtx_lock(&sc->mfi_io_lock);
1821 			mfi_queue_evt(sc, &el->event[i]);
1822 			mtx_unlock(&sc->mfi_io_lock);
1823 		}
1824 		seq = el->event[el->count - 1].seq + 1;
1825 	}
1826 
1827 	free(el, M_MFIBUF);
1828 	return (0);
1829 }
1830 
1831 static int
1832 mfi_add_ld(struct mfi_softc *sc, int id)
1833 {
1834 	struct mfi_command *cm;
1835 	struct mfi_dcmd_frame *dcmd = NULL;
1836 	struct mfi_ld_info *ld_info = NULL;
1837 	struct mfi_disk_pending *ld_pend;
1838 	int error;
1839 
1840 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1841 
1842 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1843 	if (ld_pend != NULL) {
1844 		ld_pend->ld_id = id;
1845 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1846 	}
1847 
1848 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1849 	    (void **)&ld_info, sizeof(*ld_info));
1850 	if (error) {
1851 		device_printf(sc->mfi_dev,
1852 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1853 		if (ld_info)
1854 			free(ld_info, M_MFIBUF);
1855 		return (error);
1856 	}
1857 	cm->cm_flags = MFI_CMD_DATAIN;
1858 	dcmd = &cm->cm_frame->dcmd;
1859 	dcmd->mbox[0] = id;
1860 	if (mfi_wait_command(sc, cm) != 0) {
1861 		device_printf(sc->mfi_dev,
1862 		    "Failed to get logical drive: %d\n", id);
1863 		free(ld_info, M_MFIBUF);
1864 		return (0);
1865 	}
1866 	if (ld_info->ld_config.params.isSSCD != 1)
1867 		mfi_add_ld_complete(cm);
1868 	else {
1869 		mfi_release_command(cm);
1870 		if (ld_info)		/* SSCD drives ld_info free here */
1871 			free(ld_info, M_MFIBUF);
1872 	}
1873 	return (0);
1874 }
1875 
1876 static void
1877 mfi_add_ld_complete(struct mfi_command *cm)
1878 {
1879 	struct mfi_frame_header *hdr;
1880 	struct mfi_ld_info *ld_info;
1881 	struct mfi_softc *sc;
1882 	device_t child;
1883 
1884 	sc = cm->cm_sc;
1885 	hdr = &cm->cm_frame->header;
1886 	ld_info = cm->cm_private;
1887 
1888 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1889 		free(ld_info, M_MFIBUF);
1890 		wakeup(&sc->mfi_map_sync_cm);
1891 		mfi_release_command(cm);
1892 		return;
1893 	}
1894 	wakeup(&sc->mfi_map_sync_cm);
1895 	mfi_release_command(cm);
1896 
1897 	mtx_unlock(&sc->mfi_io_lock);
1898 	mtx_lock(&Giant);
1899 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1900 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1901 		free(ld_info, M_MFIBUF);
1902 		mtx_unlock(&Giant);
1903 		mtx_lock(&sc->mfi_io_lock);
1904 		return;
1905 	}
1906 
1907 	device_set_ivars(child, ld_info);
1908 	device_set_desc(child, "MFI Logical Disk");
1909 	bus_generic_attach(sc->mfi_dev);
1910 	mtx_unlock(&Giant);
1911 	mtx_lock(&sc->mfi_io_lock);
1912 }
1913 
1914 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1915 {
1916 	struct mfi_command *cm;
1917 	struct mfi_dcmd_frame *dcmd = NULL;
1918 	struct mfi_pd_info *pd_info = NULL;
1919 	struct mfi_system_pending *syspd_pend;
1920 	int error;
1921 
1922 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1923 
1924 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1925 	if (syspd_pend != NULL) {
1926 		syspd_pend->pd_id = id;
1927 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1928 	}
1929 
1930 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1931 		(void **)&pd_info, sizeof(*pd_info));
1932 	if (error) {
1933 		device_printf(sc->mfi_dev,
1934 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1935 		    error);
1936 		if (pd_info)
1937 			free(pd_info, M_MFIBUF);
1938 		return (error);
1939 	}
1940 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1941 	dcmd = &cm->cm_frame->dcmd;
1942 	dcmd->mbox[0]=id;
1943 	dcmd->header.scsi_status = 0;
1944 	dcmd->header.pad0 = 0;
1945 	if (mfi_mapcmd(sc, cm) != 0) {
1946 		device_printf(sc->mfi_dev,
1947 		    "Failed to get physical drive info %d\n", id);
1948 		free(pd_info, M_MFIBUF);
1949 		return (0);
1950 	}
1951 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1952 	    BUS_DMASYNC_POSTREAD);
1953 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1954 	mfi_add_sys_pd_complete(cm);
1955 	return (0);
1956 }
1957 
1958 static void
1959 mfi_add_sys_pd_complete(struct mfi_command *cm)
1960 {
1961 	struct mfi_frame_header *hdr;
1962 	struct mfi_pd_info *pd_info;
1963 	struct mfi_softc *sc;
1964 	device_t child;
1965 
1966 	sc = cm->cm_sc;
1967 	hdr = &cm->cm_frame->header;
1968 	pd_info = cm->cm_private;
1969 
1970 	if (hdr->cmd_status != MFI_STAT_OK) {
1971 		free(pd_info, M_MFIBUF);
1972 		mfi_release_command(cm);
1973 		return;
1974 	}
1975 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1976 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1977 		    pd_info->ref.v.device_id);
1978 		free(pd_info, M_MFIBUF);
1979 		mfi_release_command(cm);
1980 		return;
1981 	}
1982 	mfi_release_command(cm);
1983 
1984 	mtx_unlock(&sc->mfi_io_lock);
1985 	mtx_lock(&Giant);
1986 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1987 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
1988 		free(pd_info, M_MFIBUF);
1989 		mtx_unlock(&Giant);
1990 		mtx_lock(&sc->mfi_io_lock);
1991 		return;
1992 	}
1993 
1994 	device_set_ivars(child, pd_info);
1995 	device_set_desc(child, "MFI System PD");
1996 	bus_generic_attach(sc->mfi_dev);
1997 	mtx_unlock(&Giant);
1998 	mtx_lock(&sc->mfi_io_lock);
1999 }
2000 
2001 static struct mfi_command *
2002 mfi_bio_command(struct mfi_softc *sc)
2003 {
2004 	struct bio *bio;
2005 	struct mfi_command *cm = NULL;
2006 
2007 	/*reserving two commands to avoid starvation for IOCTL*/
2008 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2009 		return (NULL);
2010 	}
2011 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2012 		return (NULL);
2013 	}
2014 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2015 		cm = mfi_build_ldio(sc, bio);
2016 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2017 		cm = mfi_build_syspdio(sc, bio);
2018 	}
2019 	if (!cm)
2020 	    mfi_enqueue_bio(sc, bio);
2021 	return cm;
2022 }
2023 
2024 /*
2025  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2026  */
2027 
2028 int
2029 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2030 {
2031 	int cdb_len;
2032 
2033 	if (((lba & 0x1fffff) == lba)
2034          && ((block_count & 0xff) == block_count)
2035          && (byte2 == 0)) {
2036 		/* We can fit in a 6 byte cdb */
2037 		struct scsi_rw_6 *scsi_cmd;
2038 
2039 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2040 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2041 		scsi_ulto3b(lba, scsi_cmd->addr);
2042 		scsi_cmd->length = block_count & 0xff;
2043 		scsi_cmd->control = 0;
2044 		cdb_len = sizeof(*scsi_cmd);
2045 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2046 		/* Need a 10 byte CDB */
2047 		struct scsi_rw_10 *scsi_cmd;
2048 
2049 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2050 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2051 		scsi_cmd->byte2 = byte2;
2052 		scsi_ulto4b(lba, scsi_cmd->addr);
2053 		scsi_cmd->reserved = 0;
2054 		scsi_ulto2b(block_count, scsi_cmd->length);
2055 		scsi_cmd->control = 0;
2056 		cdb_len = sizeof(*scsi_cmd);
2057 	} else if (((block_count & 0xffffffff) == block_count) &&
2058 	    ((lba & 0xffffffff) == lba)) {
2059 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2060 		struct scsi_rw_12 *scsi_cmd;
2061 
2062 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2063 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2064 		scsi_cmd->byte2 = byte2;
2065 		scsi_ulto4b(lba, scsi_cmd->addr);
2066 		scsi_cmd->reserved = 0;
2067 		scsi_ulto4b(block_count, scsi_cmd->length);
2068 		scsi_cmd->control = 0;
2069 		cdb_len = sizeof(*scsi_cmd);
2070 	} else {
2071 		/*
2072 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2073 		 * than 2^32
2074 		 */
2075 		struct scsi_rw_16 *scsi_cmd;
2076 
2077 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2078 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2079 		scsi_cmd->byte2 = byte2;
2080 		scsi_u64to8b(lba, scsi_cmd->addr);
2081 		scsi_cmd->reserved = 0;
2082 		scsi_ulto4b(block_count, scsi_cmd->length);
2083 		scsi_cmd->control = 0;
2084 		cdb_len = sizeof(*scsi_cmd);
2085 	}
2086 
2087 	return cdb_len;
2088 }
2089 
2090 static struct mfi_command *
2091 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2092 {
2093 	struct mfi_command *cm;
2094 	struct mfi_pass_frame *pass;
2095 	uint32_t context = 0;
2096 	int flags = 0, blkcount = 0, readop;
2097 	uint8_t cdb_len;
2098 
2099 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2100 	    return (NULL);
2101 
2102 	/* Zero out the MFI frame */
2103 	context = cm->cm_frame->header.context;
2104 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2105 	cm->cm_frame->header.context = context;
2106 	pass = &cm->cm_frame->pass;
2107 	bzero(pass->cdb, 16);
2108 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2109 	switch (bio->bio_cmd & 0x03) {
2110 	case BIO_READ:
2111 		flags = MFI_CMD_DATAIN;
2112 		readop = 1;
2113 		break;
2114 	case BIO_WRITE:
2115 		flags = MFI_CMD_DATAOUT;
2116 		readop = 0;
2117 		break;
2118 	default:
2119 		/* TODO: what about BIO_DELETE??? */
2120 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2121 	}
2122 
2123 	/* Cheat with the sector length to avoid a non-constant division */
2124 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2125 	/* Fill the LBA and Transfer length in CDB */
2126 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2127 	    pass->cdb);
2128 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2129 	pass->header.lun_id = 0;
2130 	pass->header.timeout = 0;
2131 	pass->header.flags = 0;
2132 	pass->header.scsi_status = 0;
2133 	pass->header.sense_len = MFI_SENSE_LEN;
2134 	pass->header.data_len = bio->bio_bcount;
2135 	pass->header.cdb_len = cdb_len;
2136 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2137 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2138 	cm->cm_complete = mfi_bio_complete;
2139 	cm->cm_private = bio;
2140 	cm->cm_data = bio->bio_data;
2141 	cm->cm_len = bio->bio_bcount;
2142 	cm->cm_sg = &pass->sgl;
2143 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2144 	cm->cm_flags = flags;
2145 	return (cm);
2146 }
2147 
2148 static struct mfi_command *
2149 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2150 {
2151 	struct mfi_io_frame *io;
2152 	struct mfi_command *cm;
2153 	int flags;
2154 	uint32_t blkcount;
2155 	uint32_t context = 0;
2156 
2157 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2158 	    return (NULL);
2159 
2160 	/* Zero out the MFI frame */
2161 	context = cm->cm_frame->header.context;
2162 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2163 	cm->cm_frame->header.context = context;
2164 	io = &cm->cm_frame->io;
2165 	switch (bio->bio_cmd & 0x03) {
2166 	case BIO_READ:
2167 		io->header.cmd = MFI_CMD_LD_READ;
2168 		flags = MFI_CMD_DATAIN;
2169 		break;
2170 	case BIO_WRITE:
2171 		io->header.cmd = MFI_CMD_LD_WRITE;
2172 		flags = MFI_CMD_DATAOUT;
2173 		break;
2174 	default:
2175 		/* TODO: what about BIO_DELETE??? */
2176 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2177 	}
2178 
2179 	/* Cheat with the sector length to avoid a non-constant division */
2180 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2181 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2182 	io->header.timeout = 0;
2183 	io->header.flags = 0;
2184 	io->header.scsi_status = 0;
2185 	io->header.sense_len = MFI_SENSE_LEN;
2186 	io->header.data_len = blkcount;
2187 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2188 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2189 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2190 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2191 	cm->cm_complete = mfi_bio_complete;
2192 	cm->cm_private = bio;
2193 	cm->cm_data = bio->bio_data;
2194 	cm->cm_len = bio->bio_bcount;
2195 	cm->cm_sg = &io->sgl;
2196 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2197 	cm->cm_flags = flags;
2198 	return (cm);
2199 }
2200 
2201 static void
2202 mfi_bio_complete(struct mfi_command *cm)
2203 {
2204 	struct bio *bio;
2205 	struct mfi_frame_header *hdr;
2206 	struct mfi_softc *sc;
2207 
2208 	bio = cm->cm_private;
2209 	hdr = &cm->cm_frame->header;
2210 	sc = cm->cm_sc;
2211 
2212 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2213 		bio->bio_flags |= BIO_ERROR;
2214 		bio->bio_error = EIO;
2215 		device_printf(sc->mfi_dev, "I/O error, status= %d "
2216 		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2217 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2218 	} else if (cm->cm_error != 0) {
2219 		bio->bio_flags |= BIO_ERROR;
2220 	}
2221 
2222 	mfi_release_command(cm);
2223 	mfi_disk_complete(bio);
2224 }
2225 
2226 void
2227 mfi_startio(struct mfi_softc *sc)
2228 {
2229 	struct mfi_command *cm;
2230 	struct ccb_hdr *ccbh;
2231 
2232 	for (;;) {
2233 		/* Don't bother if we're short on resources */
2234 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2235 			break;
2236 
2237 		/* Try a command that has already been prepared */
2238 		cm = mfi_dequeue_ready(sc);
2239 
2240 		if (cm == NULL) {
2241 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2242 				cm = sc->mfi_cam_start(ccbh);
2243 		}
2244 
2245 		/* Nope, so look for work on the bioq */
2246 		if (cm == NULL)
2247 			cm = mfi_bio_command(sc);
2248 
2249 		/* No work available, so exit */
2250 		if (cm == NULL)
2251 			break;
2252 
2253 		/* Send the command to the controller */
2254 		if (mfi_mapcmd(sc, cm) != 0) {
2255 			mfi_requeue_ready(cm);
2256 			break;
2257 		}
2258 	}
2259 }
2260 
2261 int
2262 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2263 {
2264 	int error, polled;
2265 
2266 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2267 
2268 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2269 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2270 		if (cm->cm_flags & MFI_CMD_CCB)
2271 			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2272 			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2273 			    polled);
2274 		else
2275 			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2276 			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2277 			    mfi_data_cb, cm, polled);
2278 		if (error == EINPROGRESS) {
2279 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2280 			return (0);
2281 		}
2282 	} else {
2283 		if (sc->MFA_enabled)
2284 			error = mfi_tbolt_send_frame(sc, cm);
2285 		else
2286 			error = mfi_send_frame(sc, cm);
2287 	}
2288 
2289 	return (error);
2290 }
2291 
2292 static void
2293 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2294 {
2295 	struct mfi_frame_header *hdr;
2296 	struct mfi_command *cm;
2297 	union mfi_sgl *sgl;
2298 	struct mfi_softc *sc;
2299 	int i, j, first, dir;
2300 	int sge_size;
2301 
2302 	cm = (struct mfi_command *)arg;
2303 	sc = cm->cm_sc;
2304 	hdr = &cm->cm_frame->header;
2305 	sgl = cm->cm_sg;
2306 
2307 	if (error) {
2308 		printf("error %d in callback\n", error);
2309 		cm->cm_error = error;
2310 		mfi_complete(sc, cm);
2311 		return;
2312 	}
2313 	/* Use IEEE sgl only for IO's on a SKINNY controller
2314 	 * For other commands on a SKINNY controller use either
2315 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2316 	 * Also calculate the total frame size based on the type
2317 	 * of SGL used.
2318 	 */
2319 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2320 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2321 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2322 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2323 		for (i = 0; i < nsegs; i++) {
2324 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2325 			sgl->sg_skinny[i].len = segs[i].ds_len;
2326 			sgl->sg_skinny[i].flag = 0;
2327 		}
2328 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2329 		sge_size = sizeof(struct mfi_sg_skinny);
2330 		hdr->sg_count = nsegs;
2331 	} else {
2332 		j = 0;
2333 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2334 			first = cm->cm_stp_len;
2335 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2336 				sgl->sg32[j].addr = segs[0].ds_addr;
2337 				sgl->sg32[j++].len = first;
2338 			} else {
2339 				sgl->sg64[j].addr = segs[0].ds_addr;
2340 				sgl->sg64[j++].len = first;
2341 			}
2342 		} else
2343 			first = 0;
2344 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2345 			for (i = 0; i < nsegs; i++) {
2346 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2347 				sgl->sg32[j++].len = segs[i].ds_len - first;
2348 				first = 0;
2349 			}
2350 		} else {
2351 			for (i = 0; i < nsegs; i++) {
2352 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2353 				sgl->sg64[j++].len = segs[i].ds_len - first;
2354 				first = 0;
2355 			}
2356 			hdr->flags |= MFI_FRAME_SGL64;
2357 		}
2358 		hdr->sg_count = j;
2359 		sge_size = sc->mfi_sge_size;
2360 	}
2361 
2362 	dir = 0;
2363 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2364 		dir |= BUS_DMASYNC_PREREAD;
2365 		hdr->flags |= MFI_FRAME_DIR_READ;
2366 	}
2367 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2368 		dir |= BUS_DMASYNC_PREWRITE;
2369 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2370 	}
2371 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2372 	cm->cm_flags |= MFI_CMD_MAPPED;
2373 
2374 	/*
2375 	 * Instead of calculating the total number of frames in the
2376 	 * compound frame, it's already assumed that there will be at
2377 	 * least 1 frame, so don't compensate for the modulo of the
2378 	 * following division.
2379 	 */
2380 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2381 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2382 
2383 	if (sc->MFA_enabled)
2384 			mfi_tbolt_send_frame(sc, cm);
2385 	else
2386 		mfi_send_frame(sc, cm);
2387 
2388 	return;
2389 }
2390 
2391 static int
2392 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2393 {
2394 	struct mfi_frame_header *hdr;
2395 	int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2396 
2397 	hdr = &cm->cm_frame->header;
2398 
2399 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2400 		cm->cm_timestamp = time_uptime;
2401 		mfi_enqueue_busy(cm);
2402 	} else {
2403 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2404 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2405 	}
2406 
2407 	/*
2408 	 * The bus address of the command is aligned on a 64 byte boundary,
2409 	 * leaving the least 6 bits as zero.  For whatever reason, the
2410 	 * hardware wants the address shifted right by three, leaving just
2411 	 * 3 zero bits.  These three bits are then used as a prefetching
2412 	 * hint for the hardware to predict how many frames need to be
2413 	 * fetched across the bus.  If a command has more than 8 frames
2414 	 * then the 3 bits are set to 0x7 and the firmware uses other
2415 	 * information in the command to determine the total amount to fetch.
2416 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2417 	 * is enough for both 32bit and 64bit systems.
2418 	 */
2419 	if (cm->cm_extra_frames > 7)
2420 		cm->cm_extra_frames = 7;
2421 
2422 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2423 
2424 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2425 		return (0);
2426 
2427 	/* This is a polled command, so busy-wait for it to complete. */
2428 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2429 		DELAY(1000);
2430 		tm -= 1;
2431 		if (tm <= 0)
2432 			break;
2433 	}
2434 
2435 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2436 		device_printf(sc->mfi_dev, "Frame %p timed out "
2437 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2438 		return (ETIMEDOUT);
2439 	}
2440 
2441 	return (0);
2442 }
2443 
2444 
2445 void
2446 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2447 {
2448 	int dir;
2449 
2450 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2451 		dir = 0;
2452 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2453 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2454 			dir |= BUS_DMASYNC_POSTREAD;
2455 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2456 			dir |= BUS_DMASYNC_POSTWRITE;
2457 
2458 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2459 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2460 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2461 	}
2462 
2463 	cm->cm_flags |= MFI_CMD_COMPLETED;
2464 
2465 	if (cm->cm_complete != NULL)
2466 		cm->cm_complete(cm);
2467 	else
2468 		wakeup(cm);
2469 }
2470 
2471 static int
2472 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2473 {
2474 	struct mfi_command *cm;
2475 	struct mfi_abort_frame *abort;
2476 	int i = 0;
2477 	uint32_t context = 0;
2478 
2479 	mtx_lock(&sc->mfi_io_lock);
2480 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2481 		return (EBUSY);
2482 	}
2483 
2484 	/* Zero out the MFI frame */
2485 	context = cm->cm_frame->header.context;
2486 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2487 	cm->cm_frame->header.context = context;
2488 
2489 	abort = &cm->cm_frame->abort;
2490 	abort->header.cmd = MFI_CMD_ABORT;
2491 	abort->header.flags = 0;
2492 	abort->header.scsi_status = 0;
2493 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2494 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2495 	abort->abort_mfi_addr_hi =
2496 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2497 	cm->cm_data = NULL;
2498 	cm->cm_flags = MFI_CMD_POLLED;
2499 
2500 	mfi_mapcmd(sc, cm);
2501 	mfi_release_command(cm);
2502 
2503 	mtx_unlock(&sc->mfi_io_lock);
2504 	while (i < 5 && *cm_abort != NULL) {
2505 		tsleep(cm_abort, 0, "mfiabort",
2506 		    5 * hz);
2507 		i++;
2508 	}
2509 	if (*cm_abort != NULL) {
2510 		/* Force a complete if command didn't abort */
2511 		mtx_lock(&sc->mfi_io_lock);
2512 		(*cm_abort)->cm_complete(*cm_abort);
2513 		mtx_unlock(&sc->mfi_io_lock);
2514 	}
2515 
2516 	return (0);
2517 }
2518 
2519 int
2520 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2521      int len)
2522 {
2523 	struct mfi_command *cm;
2524 	struct mfi_io_frame *io;
2525 	int error;
2526 	uint32_t context = 0;
2527 
2528 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2529 		return (EBUSY);
2530 
2531 	/* Zero out the MFI frame */
2532 	context = cm->cm_frame->header.context;
2533 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2534 	cm->cm_frame->header.context = context;
2535 
2536 	io = &cm->cm_frame->io;
2537 	io->header.cmd = MFI_CMD_LD_WRITE;
2538 	io->header.target_id = id;
2539 	io->header.timeout = 0;
2540 	io->header.flags = 0;
2541 	io->header.scsi_status = 0;
2542 	io->header.sense_len = MFI_SENSE_LEN;
2543 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2544 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2545 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2546 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2547 	io->lba_lo = lba & 0xffffffff;
2548 	cm->cm_data = virt;
2549 	cm->cm_len = len;
2550 	cm->cm_sg = &io->sgl;
2551 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2552 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2553 
2554 	error = mfi_mapcmd(sc, cm);
2555 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2556 	    BUS_DMASYNC_POSTWRITE);
2557 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2558 	mfi_release_command(cm);
2559 
2560 	return (error);
2561 }
2562 
2563 int
2564 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2565     int len)
2566 {
2567 	struct mfi_command *cm;
2568 	struct mfi_pass_frame *pass;
2569 	int error, readop, cdb_len;
2570 	uint32_t blkcount;
2571 
2572 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2573 		return (EBUSY);
2574 
2575 	pass = &cm->cm_frame->pass;
2576 	bzero(pass->cdb, 16);
2577 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2578 
2579 	readop = 0;
2580 	blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2581 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2582 	pass->header.target_id = id;
2583 	pass->header.timeout = 0;
2584 	pass->header.flags = 0;
2585 	pass->header.scsi_status = 0;
2586 	pass->header.sense_len = MFI_SENSE_LEN;
2587 	pass->header.data_len = len;
2588 	pass->header.cdb_len = cdb_len;
2589 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2590 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2591 	cm->cm_data = virt;
2592 	cm->cm_len = len;
2593 	cm->cm_sg = &pass->sgl;
2594 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2595 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2596 
2597 	error = mfi_mapcmd(sc, cm);
2598 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2599 	    BUS_DMASYNC_POSTWRITE);
2600 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2601 	mfi_release_command(cm);
2602 
2603 	return (error);
2604 }
2605 
2606 static int
2607 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2608 {
2609 	struct mfi_softc *sc;
2610 	int error;
2611 
2612 	sc = dev->si_drv1;
2613 
2614 	mtx_lock(&sc->mfi_io_lock);
2615 	if (sc->mfi_detaching)
2616 		error = ENXIO;
2617 	else {
2618 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2619 		error = 0;
2620 	}
2621 	mtx_unlock(&sc->mfi_io_lock);
2622 
2623 	return (error);
2624 }
2625 
2626 static int
2627 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2628 {
2629 	struct mfi_softc *sc;
2630 	struct mfi_aen *mfi_aen_entry, *tmp;
2631 
2632 	sc = dev->si_drv1;
2633 
2634 	mtx_lock(&sc->mfi_io_lock);
2635 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2636 
2637 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2638 		if (mfi_aen_entry->p == curproc) {
2639 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2640 			    aen_link);
2641 			free(mfi_aen_entry, M_MFIBUF);
2642 		}
2643 	}
2644 	mtx_unlock(&sc->mfi_io_lock);
2645 	return (0);
2646 }
2647 
2648 static int
2649 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2650 {
2651 
2652 	switch (opcode) {
2653 	case MFI_DCMD_LD_DELETE:
2654 	case MFI_DCMD_CFG_ADD:
2655 	case MFI_DCMD_CFG_CLEAR:
2656 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2657 		sx_xlock(&sc->mfi_config_lock);
2658 		return (1);
2659 	default:
2660 		return (0);
2661 	}
2662 }
2663 
2664 static void
2665 mfi_config_unlock(struct mfi_softc *sc, int locked)
2666 {
2667 
2668 	if (locked)
2669 		sx_xunlock(&sc->mfi_config_lock);
2670 }
2671 
2672 /*
2673  * Perform pre-issue checks on commands from userland and possibly veto
2674  * them.
2675  */
2676 static int
2677 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2678 {
2679 	struct mfi_disk *ld, *ld2;
2680 	int error;
2681 	struct mfi_system_pd *syspd = NULL;
2682 	uint16_t syspd_id;
2683 	uint16_t *mbox;
2684 
2685 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2686 	error = 0;
2687 	switch (cm->cm_frame->dcmd.opcode) {
2688 	case MFI_DCMD_LD_DELETE:
2689 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2690 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2691 				break;
2692 		}
2693 		if (ld == NULL)
2694 			error = ENOENT;
2695 		else
2696 			error = mfi_disk_disable(ld);
2697 		break;
2698 	case MFI_DCMD_CFG_CLEAR:
2699 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2700 			error = mfi_disk_disable(ld);
2701 			if (error)
2702 				break;
2703 		}
2704 		if (error) {
2705 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2706 				if (ld2 == ld)
2707 					break;
2708 				mfi_disk_enable(ld2);
2709 			}
2710 		}
2711 		break;
2712 	case MFI_DCMD_PD_STATE_SET:
2713 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2714 		syspd_id = mbox[0];
2715 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2716 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2717 				if (syspd->pd_id == syspd_id)
2718 					break;
2719 			}
2720 		}
2721 		else
2722 			break;
2723 		if (syspd)
2724 			error = mfi_syspd_disable(syspd);
2725 		break;
2726 	default:
2727 		break;
2728 	}
2729 	return (error);
2730 }
2731 
2732 /* Perform post-issue checks on commands from userland. */
2733 static void
2734 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2735 {
2736 	struct mfi_disk *ld, *ldn;
2737 	struct mfi_system_pd *syspd = NULL;
2738 	uint16_t syspd_id;
2739 	uint16_t *mbox;
2740 
2741 	switch (cm->cm_frame->dcmd.opcode) {
2742 	case MFI_DCMD_LD_DELETE:
2743 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2744 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2745 				break;
2746 		}
2747 		KASSERT(ld != NULL, ("volume dissappeared"));
2748 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2749 			mtx_unlock(&sc->mfi_io_lock);
2750 			mtx_lock(&Giant);
2751 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2752 			mtx_unlock(&Giant);
2753 			mtx_lock(&sc->mfi_io_lock);
2754 		} else
2755 			mfi_disk_enable(ld);
2756 		break;
2757 	case MFI_DCMD_CFG_CLEAR:
2758 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2759 			mtx_unlock(&sc->mfi_io_lock);
2760 			mtx_lock(&Giant);
2761 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2762 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2763 			}
2764 			mtx_unlock(&Giant);
2765 			mtx_lock(&sc->mfi_io_lock);
2766 		} else {
2767 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2768 				mfi_disk_enable(ld);
2769 		}
2770 		break;
2771 	case MFI_DCMD_CFG_ADD:
2772 		mfi_ldprobe(sc);
2773 		break;
2774 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2775 		mfi_ldprobe(sc);
2776 		break;
2777 	case MFI_DCMD_PD_STATE_SET:
2778 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2779 		syspd_id = mbox[0];
2780 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2781 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2782 				if (syspd->pd_id == syspd_id)
2783 					break;
2784 			}
2785 		}
2786 		else
2787 			break;
2788 		/* If the transition fails then enable the syspd again */
2789 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2790 			mfi_syspd_enable(syspd);
2791 		break;
2792 	}
2793 }
2794 
2795 static int
2796 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2797 {
2798 	struct mfi_config_data *conf_data;
2799 	struct mfi_command *ld_cm = NULL;
2800 	struct mfi_ld_info *ld_info = NULL;
2801 	struct mfi_ld_config *ld;
2802 	char *p;
2803 	int error = 0;
2804 
2805 	conf_data = (struct mfi_config_data *)cm->cm_data;
2806 
2807 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2808 		p = (char *)conf_data->array;
2809 		p += conf_data->array_size * conf_data->array_count;
2810 		ld = (struct mfi_ld_config *)p;
2811 		if (ld->params.isSSCD == 1)
2812 			error = 1;
2813 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2814 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2815 		    (void **)&ld_info, sizeof(*ld_info));
2816 		if (error) {
2817 			device_printf(sc->mfi_dev, "Failed to allocate"
2818 			    "MFI_DCMD_LD_GET_INFO %d", error);
2819 			if (ld_info)
2820 				free(ld_info, M_MFIBUF);
2821 			return 0;
2822 		}
2823 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2824 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2825 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2826 		if (mfi_wait_command(sc, ld_cm) != 0) {
2827 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2828 			mfi_release_command(ld_cm);
2829 			free(ld_info, M_MFIBUF);
2830 			return 0;
2831 		}
2832 
2833 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2834 			free(ld_info, M_MFIBUF);
2835 			mfi_release_command(ld_cm);
2836 			return 0;
2837 		}
2838 		else
2839 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2840 
2841 		if (ld_info->ld_config.params.isSSCD == 1)
2842 			error = 1;
2843 
2844 		mfi_release_command(ld_cm);
2845 		free(ld_info, M_MFIBUF);
2846 
2847 	}
2848 	return error;
2849 }
2850 
2851 static int
2852 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2853 {
2854 	uint8_t i;
2855 	struct mfi_ioc_packet *ioc;
2856 	ioc = (struct mfi_ioc_packet *)arg;
2857 	int sge_size, error;
2858 	struct megasas_sge *kern_sge;
2859 
2860 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2861 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2862 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2863 
2864 	if (sizeof(bus_addr_t) == 8) {
2865 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2866 		cm->cm_extra_frames = 2;
2867 		sge_size = sizeof(struct mfi_sg64);
2868 	} else {
2869 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2870 		sge_size = sizeof(struct mfi_sg32);
2871 	}
2872 
2873 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2874 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2875 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2876 			1, 0,			/* algnmnt, boundary */
2877 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2878 			BUS_SPACE_MAXADDR,	/* highaddr */
2879 			NULL, NULL,		/* filter, filterarg */
2880 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2881 			2,			/* nsegments */
2882 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2883 			BUS_DMA_ALLOCNOW,	/* flags */
2884 			NULL, NULL,		/* lockfunc, lockarg */
2885 			&sc->mfi_kbuff_arr_dmat[i])) {
2886 			device_printf(sc->mfi_dev,
2887 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2888 			return (ENOMEM);
2889 		}
2890 
2891 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2892 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2893 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2894 			device_printf(sc->mfi_dev,
2895 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2896 			return (ENOMEM);
2897 		}
2898 
2899 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2900 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2901 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2902 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2903 
2904 		if (!sc->kbuff_arr[i]) {
2905 			device_printf(sc->mfi_dev,
2906 			    "Could not allocate memory for kbuff_arr info\n");
2907 			return -1;
2908 		}
2909 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2910 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2911 
2912 		if (sizeof(bus_addr_t) == 8) {
2913 			cm->cm_frame->stp.sgl.sg64[i].addr =
2914 			    kern_sge[i].phys_addr;
2915 			cm->cm_frame->stp.sgl.sg64[i].len =
2916 			    ioc->mfi_sgl[i].iov_len;
2917 		} else {
2918 			cm->cm_frame->stp.sgl.sg32[i].len =
2919 			    kern_sge[i].phys_addr;
2920 			cm->cm_frame->stp.sgl.sg32[i].len =
2921 			    ioc->mfi_sgl[i].iov_len;
2922 		}
2923 
2924 		error = copyin(ioc->mfi_sgl[i].iov_base,
2925 		    sc->kbuff_arr[i],
2926 		    ioc->mfi_sgl[i].iov_len);
2927 		if (error != 0) {
2928 			device_printf(sc->mfi_dev, "Copy in failed\n");
2929 			return error;
2930 		}
2931 	}
2932 
2933 	cm->cm_flags |=MFI_CMD_MAPPED;
2934 	return 0;
2935 }
2936 
2937 static int
2938 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2939 {
2940 	struct mfi_command *cm;
2941 	struct mfi_dcmd_frame *dcmd;
2942 	void *ioc_buf = NULL;
2943 	uint32_t context;
2944 	int error = 0, locked;
2945 
2946 
2947 	if (ioc->buf_size > 0) {
2948 		if (ioc->buf_size > 1024 * 1024)
2949 			return (ENOMEM);
2950 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2951 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2952 		if (error) {
2953 			device_printf(sc->mfi_dev, "failed to copyin\n");
2954 			free(ioc_buf, M_MFIBUF);
2955 			return (error);
2956 		}
2957 	}
2958 
2959 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2960 
2961 	mtx_lock(&sc->mfi_io_lock);
2962 	while ((cm = mfi_dequeue_free(sc)) == NULL)
2963 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2964 
2965 	/* Save context for later */
2966 	context = cm->cm_frame->header.context;
2967 
2968 	dcmd = &cm->cm_frame->dcmd;
2969 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2970 
2971 	cm->cm_sg = &dcmd->sgl;
2972 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2973 	cm->cm_data = ioc_buf;
2974 	cm->cm_len = ioc->buf_size;
2975 
2976 	/* restore context */
2977 	cm->cm_frame->header.context = context;
2978 
2979 	/* Cheat since we don't know if we're writing or reading */
2980 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2981 
2982 	error = mfi_check_command_pre(sc, cm);
2983 	if (error)
2984 		goto out;
2985 
2986 	error = mfi_wait_command(sc, cm);
2987 	if (error) {
2988 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2989 		goto out;
2990 	}
2991 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2992 	mfi_check_command_post(sc, cm);
2993 out:
2994 	mfi_release_command(cm);
2995 	mtx_unlock(&sc->mfi_io_lock);
2996 	mfi_config_unlock(sc, locked);
2997 	if (ioc->buf_size > 0)
2998 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2999 	if (ioc_buf)
3000 		free(ioc_buf, M_MFIBUF);
3001 	return (error);
3002 }
3003 
3004 #define	PTRIN(p)		((void *)(uintptr_t)(p))
3005 
3006 static int
3007 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3008 {
3009 	struct mfi_softc *sc;
3010 	union mfi_statrequest *ms;
3011 	struct mfi_ioc_packet *ioc;
3012 #ifdef COMPAT_FREEBSD32
3013 	struct mfi_ioc_packet32 *ioc32;
3014 #endif
3015 	struct mfi_ioc_aen *aen;
3016 	struct mfi_command *cm = NULL;
3017 	uint32_t context = 0;
3018 	union mfi_sense_ptr sense_ptr;
3019 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3020 	size_t len;
3021 	int i, res;
3022 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3023 #ifdef COMPAT_FREEBSD32
3024 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3025 	struct mfi_ioc_passthru iop_swab;
3026 #endif
3027 	int error, locked;
3028 	union mfi_sgl *sgl;
3029 	sc = dev->si_drv1;
3030 	error = 0;
3031 
3032 	if (sc->adpreset)
3033 		return EBUSY;
3034 
3035 	if (sc->hw_crit_error)
3036 		return EBUSY;
3037 
3038 	if (sc->issuepend_done == 0)
3039 		return EBUSY;
3040 
3041 	switch (cmd) {
3042 	case MFIIO_STATS:
3043 		ms = (union mfi_statrequest *)arg;
3044 		switch (ms->ms_item) {
3045 		case MFIQ_FREE:
3046 		case MFIQ_BIO:
3047 		case MFIQ_READY:
3048 		case MFIQ_BUSY:
3049 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3050 			    sizeof(struct mfi_qstat));
3051 			break;
3052 		default:
3053 			error = ENOIOCTL;
3054 			break;
3055 		}
3056 		break;
3057 	case MFIIO_QUERY_DISK:
3058 	{
3059 		struct mfi_query_disk *qd;
3060 		struct mfi_disk *ld;
3061 
3062 		qd = (struct mfi_query_disk *)arg;
3063 		mtx_lock(&sc->mfi_io_lock);
3064 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3065 			if (ld->ld_id == qd->array_id)
3066 				break;
3067 		}
3068 		if (ld == NULL) {
3069 			qd->present = 0;
3070 			mtx_unlock(&sc->mfi_io_lock);
3071 			return (0);
3072 		}
3073 		qd->present = 1;
3074 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3075 			qd->open = 1;
3076 		bzero(qd->devname, SPECNAMELEN + 1);
3077 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3078 		mtx_unlock(&sc->mfi_io_lock);
3079 		break;
3080 	}
3081 	case MFI_CMD:
3082 #ifdef COMPAT_FREEBSD32
3083 	case MFI_CMD32:
3084 #endif
3085 		{
3086 		devclass_t devclass;
3087 		ioc = (struct mfi_ioc_packet *)arg;
3088 		int adapter;
3089 
3090 		adapter = ioc->mfi_adapter_no;
3091 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3092 			devclass = devclass_find("mfi");
3093 			sc = devclass_get_softc(devclass, adapter);
3094 		}
3095 		mtx_lock(&sc->mfi_io_lock);
3096 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3097 			mtx_unlock(&sc->mfi_io_lock);
3098 			return (EBUSY);
3099 		}
3100 		mtx_unlock(&sc->mfi_io_lock);
3101 		locked = 0;
3102 
3103 		/*
3104 		 * save off original context since copying from user
3105 		 * will clobber some data
3106 		 */
3107 		context = cm->cm_frame->header.context;
3108 		cm->cm_frame->header.context = cm->cm_index;
3109 
3110 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3111 		    2 * MEGAMFI_FRAME_SIZE);
3112 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3113 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3114 		cm->cm_frame->header.scsi_status = 0;
3115 		cm->cm_frame->header.pad0 = 0;
3116 		if (ioc->mfi_sge_count) {
3117 			cm->cm_sg =
3118 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3119 		}
3120 		sgl = cm->cm_sg;
3121 		cm->cm_flags = 0;
3122 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3123 			cm->cm_flags |= MFI_CMD_DATAIN;
3124 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3125 			cm->cm_flags |= MFI_CMD_DATAOUT;
3126 		/* Legacy app shim */
3127 		if (cm->cm_flags == 0)
3128 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3129 		cm->cm_len = cm->cm_frame->header.data_len;
3130 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3131 #ifdef COMPAT_FREEBSD32
3132 			if (cmd == MFI_CMD) {
3133 #endif
3134 				/* Native */
3135 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3136 #ifdef COMPAT_FREEBSD32
3137 			} else {
3138 				/* 32bit on 64bit */
3139 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3140 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3141 			}
3142 #endif
3143 			cm->cm_len += cm->cm_stp_len;
3144 		}
3145 		if (cm->cm_len &&
3146 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3147 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3148 			    M_WAITOK | M_ZERO);
3149 			if (cm->cm_data == NULL) {
3150 				device_printf(sc->mfi_dev, "Malloc failed\n");
3151 				goto out;
3152 			}
3153 		} else {
3154 			cm->cm_data = 0;
3155 		}
3156 
3157 		/* restore header context */
3158 		cm->cm_frame->header.context = context;
3159 
3160 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3161 			res = mfi_stp_cmd(sc, cm, arg);
3162 			if (res != 0)
3163 				goto out;
3164 		} else {
3165 			temp = data;
3166 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3167 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3168 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3169 #ifdef COMPAT_FREEBSD32
3170 					if (cmd == MFI_CMD) {
3171 #endif
3172 						/* Native */
3173 						addr = ioc->mfi_sgl[i].iov_base;
3174 						len = ioc->mfi_sgl[i].iov_len;
3175 #ifdef COMPAT_FREEBSD32
3176 					} else {
3177 						/* 32bit on 64bit */
3178 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3179 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3180 						len = ioc32->mfi_sgl[i].iov_len;
3181 					}
3182 #endif
3183 					error = copyin(addr, temp, len);
3184 					if (error != 0) {
3185 						device_printf(sc->mfi_dev,
3186 						    "Copy in failed\n");
3187 						goto out;
3188 					}
3189 					temp = &temp[len];
3190 				}
3191 			}
3192 		}
3193 
3194 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3195 			locked = mfi_config_lock(sc,
3196 			     cm->cm_frame->dcmd.opcode);
3197 
3198 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3199 			cm->cm_frame->pass.sense_addr_lo =
3200 			    (uint32_t)cm->cm_sense_busaddr;
3201 			cm->cm_frame->pass.sense_addr_hi =
3202 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3203 		}
3204 		mtx_lock(&sc->mfi_io_lock);
3205 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3206 		if (!skip_pre_post) {
3207 			error = mfi_check_command_pre(sc, cm);
3208 			if (error) {
3209 				mtx_unlock(&sc->mfi_io_lock);
3210 				goto out;
3211 			}
3212 		}
3213 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3214 			device_printf(sc->mfi_dev,
3215 			    "Controller polled failed\n");
3216 			mtx_unlock(&sc->mfi_io_lock);
3217 			goto out;
3218 		}
3219 		if (!skip_pre_post) {
3220 			mfi_check_command_post(sc, cm);
3221 		}
3222 		mtx_unlock(&sc->mfi_io_lock);
3223 
3224 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3225 			temp = data;
3226 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3227 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3228 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3229 #ifdef COMPAT_FREEBSD32
3230 					if (cmd == MFI_CMD) {
3231 #endif
3232 						/* Native */
3233 						addr = ioc->mfi_sgl[i].iov_base;
3234 						len = ioc->mfi_sgl[i].iov_len;
3235 #ifdef COMPAT_FREEBSD32
3236 					} else {
3237 						/* 32bit on 64bit */
3238 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3239 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3240 						len = ioc32->mfi_sgl[i].iov_len;
3241 					}
3242 #endif
3243 					error = copyout(temp, addr, len);
3244 					if (error != 0) {
3245 						device_printf(sc->mfi_dev,
3246 						    "Copy out failed\n");
3247 						goto out;
3248 					}
3249 					temp = &temp[len];
3250 				}
3251 			}
3252 		}
3253 
3254 		if (ioc->mfi_sense_len) {
3255 			/* get user-space sense ptr then copy out sense */
3256 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3257 			    &sense_ptr.sense_ptr_data[0],
3258 			    sizeof(sense_ptr.sense_ptr_data));
3259 #ifdef COMPAT_FREEBSD32
3260 			if (cmd != MFI_CMD) {
3261 				/*
3262 				 * not 64bit native so zero out any address
3263 				 * over 32bit */
3264 				sense_ptr.addr.high = 0;
3265 			}
3266 #endif
3267 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3268 			    ioc->mfi_sense_len);
3269 			if (error != 0) {
3270 				device_printf(sc->mfi_dev,
3271 				    "Copy out failed\n");
3272 				goto out;
3273 			}
3274 		}
3275 
3276 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3277 out:
3278 		mfi_config_unlock(sc, locked);
3279 		if (data)
3280 			free(data, M_MFIBUF);
3281 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3282 			for (i = 0; i < 2; i++) {
3283 				if (sc->kbuff_arr[i]) {
3284 					if (sc->mfi_kbuff_arr_busaddr != 0)
3285 						bus_dmamap_unload(
3286 						    sc->mfi_kbuff_arr_dmat[i],
3287 						    sc->mfi_kbuff_arr_dmamap[i]
3288 						    );
3289 					if (sc->kbuff_arr[i] != NULL)
3290 						bus_dmamem_free(
3291 						    sc->mfi_kbuff_arr_dmat[i],
3292 						    sc->kbuff_arr[i],
3293 						    sc->mfi_kbuff_arr_dmamap[i]
3294 						    );
3295 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3296 						bus_dma_tag_destroy(
3297 						    sc->mfi_kbuff_arr_dmat[i]);
3298 				}
3299 			}
3300 		}
3301 		if (cm) {
3302 			mtx_lock(&sc->mfi_io_lock);
3303 			mfi_release_command(cm);
3304 			mtx_unlock(&sc->mfi_io_lock);
3305 		}
3306 
3307 		break;
3308 		}
3309 	case MFI_SET_AEN:
3310 		aen = (struct mfi_ioc_aen *)arg;
3311 		error = mfi_aen_register(sc, aen->aen_seq_num,
3312 		    aen->aen_class_locale);
3313 
3314 		break;
3315 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3316 		{
3317 			devclass_t devclass;
3318 			struct mfi_linux_ioc_packet l_ioc;
3319 			int adapter;
3320 
3321 			devclass = devclass_find("mfi");
3322 			if (devclass == NULL)
3323 				return (ENOENT);
3324 
3325 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3326 			if (error)
3327 				return (error);
3328 			adapter = l_ioc.lioc_adapter_no;
3329 			sc = devclass_get_softc(devclass, adapter);
3330 			if (sc == NULL)
3331 				return (ENOENT);
3332 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3333 			    cmd, arg, flag, td));
3334 			break;
3335 		}
3336 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3337 		{
3338 			devclass_t devclass;
3339 			struct mfi_linux_ioc_aen l_aen;
3340 			int adapter;
3341 
3342 			devclass = devclass_find("mfi");
3343 			if (devclass == NULL)
3344 				return (ENOENT);
3345 
3346 			error = copyin(arg, &l_aen, sizeof(l_aen));
3347 			if (error)
3348 				return (error);
3349 			adapter = l_aen.laen_adapter_no;
3350 			sc = devclass_get_softc(devclass, adapter);
3351 			if (sc == NULL)
3352 				return (ENOENT);
3353 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3354 			    cmd, arg, flag, td));
3355 			break;
3356 		}
3357 #ifdef COMPAT_FREEBSD32
3358 	case MFIIO_PASSTHRU32:
3359 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3360 			error = ENOTTY;
3361 			break;
3362 		}
3363 		iop_swab.ioc_frame	= iop32->ioc_frame;
3364 		iop_swab.buf_size	= iop32->buf_size;
3365 		iop_swab.buf		= PTRIN(iop32->buf);
3366 		iop			= &iop_swab;
3367 		/* FALLTHROUGH */
3368 #endif
3369 	case MFIIO_PASSTHRU:
3370 		error = mfi_user_command(sc, iop);
3371 #ifdef COMPAT_FREEBSD32
3372 		if (cmd == MFIIO_PASSTHRU32)
3373 			iop32->ioc_frame = iop_swab.ioc_frame;
3374 #endif
3375 		break;
3376 	default:
3377 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3378 		error = ENOTTY;
3379 		break;
3380 	}
3381 
3382 	return (error);
3383 }
3384 
3385 static int
3386 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3387 {
3388 	struct mfi_softc *sc;
3389 	struct mfi_linux_ioc_packet l_ioc;
3390 	struct mfi_linux_ioc_aen l_aen;
3391 	struct mfi_command *cm = NULL;
3392 	struct mfi_aen *mfi_aen_entry;
3393 	union mfi_sense_ptr sense_ptr;
3394 	uint32_t context = 0;
3395 	uint8_t *data = NULL, *temp;
3396 	int i;
3397 	int error, locked;
3398 
3399 	sc = dev->si_drv1;
3400 	error = 0;
3401 	switch (cmd) {
3402 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3403 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3404 		if (error != 0)
3405 			return (error);
3406 
3407 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3408 			return (EINVAL);
3409 		}
3410 
3411 		mtx_lock(&sc->mfi_io_lock);
3412 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3413 			mtx_unlock(&sc->mfi_io_lock);
3414 			return (EBUSY);
3415 		}
3416 		mtx_unlock(&sc->mfi_io_lock);
3417 		locked = 0;
3418 
3419 		/*
3420 		 * save off original context since copying from user
3421 		 * will clobber some data
3422 		 */
3423 		context = cm->cm_frame->header.context;
3424 
3425 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3426 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3427 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3428 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3429 		cm->cm_frame->header.scsi_status = 0;
3430 		cm->cm_frame->header.pad0 = 0;
3431 		if (l_ioc.lioc_sge_count)
3432 			cm->cm_sg =
3433 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3434 		cm->cm_flags = 0;
3435 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3436 			cm->cm_flags |= MFI_CMD_DATAIN;
3437 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3438 			cm->cm_flags |= MFI_CMD_DATAOUT;
3439 		cm->cm_len = cm->cm_frame->header.data_len;
3440 		if (cm->cm_len &&
3441 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3442 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3443 			    M_WAITOK | M_ZERO);
3444 			if (cm->cm_data == NULL) {
3445 				device_printf(sc->mfi_dev, "Malloc failed\n");
3446 				goto out;
3447 			}
3448 		} else {
3449 			cm->cm_data = 0;
3450 		}
3451 
3452 		/* restore header context */
3453 		cm->cm_frame->header.context = context;
3454 
3455 		temp = data;
3456 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3457 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3458 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3459 				       temp,
3460 				       l_ioc.lioc_sgl[i].iov_len);
3461 				if (error != 0) {
3462 					device_printf(sc->mfi_dev,
3463 					    "Copy in failed\n");
3464 					goto out;
3465 				}
3466 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3467 			}
3468 		}
3469 
3470 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3471 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3472 
3473 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3474 			cm->cm_frame->pass.sense_addr_lo =
3475 			    (uint32_t)cm->cm_sense_busaddr;
3476 			cm->cm_frame->pass.sense_addr_hi =
3477 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3478 		}
3479 
3480 		mtx_lock(&sc->mfi_io_lock);
3481 		error = mfi_check_command_pre(sc, cm);
3482 		if (error) {
3483 			mtx_unlock(&sc->mfi_io_lock);
3484 			goto out;
3485 		}
3486 
3487 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3488 			device_printf(sc->mfi_dev,
3489 			    "Controller polled failed\n");
3490 			mtx_unlock(&sc->mfi_io_lock);
3491 			goto out;
3492 		}
3493 
3494 		mfi_check_command_post(sc, cm);
3495 		mtx_unlock(&sc->mfi_io_lock);
3496 
3497 		temp = data;
3498 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3499 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3500 				error = copyout(temp,
3501 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3502 					l_ioc.lioc_sgl[i].iov_len);
3503 				if (error != 0) {
3504 					device_printf(sc->mfi_dev,
3505 					    "Copy out failed\n");
3506 					goto out;
3507 				}
3508 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3509 			}
3510 		}
3511 
3512 		if (l_ioc.lioc_sense_len) {
3513 			/* get user-space sense ptr then copy out sense */
3514 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3515                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3516 			    &sense_ptr.sense_ptr_data[0],
3517 			    sizeof(sense_ptr.sense_ptr_data));
3518 #ifdef __amd64__
3519 			/*
3520 			 * only 32bit Linux support so zero out any
3521 			 * address over 32bit
3522 			 */
3523 			sense_ptr.addr.high = 0;
3524 #endif
3525 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3526 			    l_ioc.lioc_sense_len);
3527 			if (error != 0) {
3528 				device_printf(sc->mfi_dev,
3529 				    "Copy out failed\n");
3530 				goto out;
3531 			}
3532 		}
3533 
3534 		error = copyout(&cm->cm_frame->header.cmd_status,
3535 			&((struct mfi_linux_ioc_packet*)arg)
3536 			->lioc_frame.hdr.cmd_status,
3537 			1);
3538 		if (error != 0) {
3539 			device_printf(sc->mfi_dev,
3540 				      "Copy out failed\n");
3541 			goto out;
3542 		}
3543 
3544 out:
3545 		mfi_config_unlock(sc, locked);
3546 		if (data)
3547 			free(data, M_MFIBUF);
3548 		if (cm) {
3549 			mtx_lock(&sc->mfi_io_lock);
3550 			mfi_release_command(cm);
3551 			mtx_unlock(&sc->mfi_io_lock);
3552 		}
3553 
3554 		return (error);
3555 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3556 		error = copyin(arg, &l_aen, sizeof(l_aen));
3557 		if (error != 0)
3558 			return (error);
3559 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3560 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3561 		    M_WAITOK);
3562 		mtx_lock(&sc->mfi_io_lock);
3563 		if (mfi_aen_entry != NULL) {
3564 			mfi_aen_entry->p = curproc;
3565 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3566 			    aen_link);
3567 		}
3568 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3569 		    l_aen.laen_class_locale);
3570 
3571 		if (error != 0) {
3572 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3573 			    aen_link);
3574 			free(mfi_aen_entry, M_MFIBUF);
3575 		}
3576 		mtx_unlock(&sc->mfi_io_lock);
3577 
3578 		return (error);
3579 	default:
3580 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3581 		error = ENOENT;
3582 		break;
3583 	}
3584 
3585 	return (error);
3586 }
3587 
3588 static int
3589 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3590 {
3591 	struct mfi_softc *sc;
3592 	int revents = 0;
3593 
3594 	sc = dev->si_drv1;
3595 
3596 	if (poll_events & (POLLIN | POLLRDNORM)) {
3597 		if (sc->mfi_aen_triggered != 0) {
3598 			revents |= poll_events & (POLLIN | POLLRDNORM);
3599 			sc->mfi_aen_triggered = 0;
3600 		}
3601 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3602 			revents |= POLLERR;
3603 		}
3604 	}
3605 
3606 	if (revents == 0) {
3607 		if (poll_events & (POLLIN | POLLRDNORM)) {
3608 			sc->mfi_poll_waiting = 1;
3609 			selrecord(td, &sc->mfi_select);
3610 		}
3611 	}
3612 
3613 	return revents;
3614 }
3615 
3616 static void
3617 mfi_dump_all(void)
3618 {
3619 	struct mfi_softc *sc;
3620 	struct mfi_command *cm;
3621 	devclass_t dc;
3622 	time_t deadline;
3623 	int timedout;
3624 	int i;
3625 
3626 	dc = devclass_find("mfi");
3627 	if (dc == NULL) {
3628 		printf("No mfi dev class\n");
3629 		return;
3630 	}
3631 
3632 	for (i = 0; ; i++) {
3633 		sc = devclass_get_softc(dc, i);
3634 		if (sc == NULL)
3635 			break;
3636 		device_printf(sc->mfi_dev, "Dumping\n\n");
3637 		timedout = 0;
3638 		deadline = time_uptime - MFI_CMD_TIMEOUT;
3639 		mtx_lock(&sc->mfi_io_lock);
3640 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3641 			if (cm->cm_timestamp < deadline) {
3642 				device_printf(sc->mfi_dev,
3643 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3644 				    cm, (int)(time_uptime - cm->cm_timestamp));
3645 				MFI_PRINT_CMD(cm);
3646 				timedout++;
3647 			}
3648 		}
3649 
3650 #if 0
3651 		if (timedout)
3652 			MFI_DUMP_CMDS(SC);
3653 #endif
3654 
3655 		mtx_unlock(&sc->mfi_io_lock);
3656 	}
3657 
3658 	return;
3659 }
3660 
3661 static void
3662 mfi_timeout(void *data)
3663 {
3664 	struct mfi_softc *sc = (struct mfi_softc *)data;
3665 	struct mfi_command *cm;
3666 	time_t deadline;
3667 	int timedout = 0;
3668 
3669 	deadline = time_uptime - MFI_CMD_TIMEOUT;
3670 	if (sc->adpreset == 0) {
3671 		if (!mfi_tbolt_reset(sc)) {
3672 			callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
3673 			return;
3674 		}
3675 	}
3676 	mtx_lock(&sc->mfi_io_lock);
3677 	TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3678 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3679 			continue;
3680 		if (cm->cm_timestamp < deadline) {
3681 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3682 				cm->cm_timestamp = time_uptime;
3683 			} else {
3684 				device_printf(sc->mfi_dev,
3685 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3686 				     cm, (int)(time_uptime - cm->cm_timestamp)
3687 				     );
3688 				MFI_PRINT_CMD(cm);
3689 				MFI_VALIDATE_CMD(sc, cm);
3690 				timedout++;
3691 			}
3692 		}
3693 	}
3694 
3695 #if 0
3696 	if (timedout)
3697 		MFI_DUMP_CMDS(SC);
3698 #endif
3699 
3700 	mtx_unlock(&sc->mfi_io_lock);
3701 
3702 	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
3703 	    mfi_timeout, sc);
3704 
3705 	if (0)
3706 		mfi_dump_all();
3707 	return;
3708 }
3709