xref: /freebsd/sys/dev/mfi/mfi.c (revision a743df5c964d81a7c920cf257e87cb42ab993d58)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_mfi.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/poll.h>
37 #include <sys/selinfo.h>
38 #include <sys/bus.h>
39 #include <sys/conf.h>
40 #include <sys/eventhandler.h>
41 #include <sys/rman.h>
42 #include <sys/bus_dma.h>
43 #include <sys/bio.h>
44 #include <sys/ioccom.h>
45 #include <sys/uio.h>
46 #include <sys/proc.h>
47 
48 #include <machine/bus.h>
49 #include <machine/resource.h>
50 
51 #include <dev/mfi/mfireg.h>
52 #include <dev/mfi/mfi_ioctl.h>
53 #include <dev/mfi/mfivar.h>
54 
55 static int	mfi_alloc_commands(struct mfi_softc *);
56 static void	mfi_release_command(struct mfi_command *cm);
57 static int	mfi_comms_init(struct mfi_softc *);
58 static int	mfi_polled_command(struct mfi_softc *, struct mfi_command *);
59 static int	mfi_get_controller_info(struct mfi_softc *);
60 static int	mfi_get_log_state(struct mfi_softc *,
61 		    struct mfi_evt_log_state *);
62 static int	mfi_get_entry(struct mfi_softc *, int);
63 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
64 static void	mfi_startup(void *arg);
65 static void	mfi_intr(void *arg);
66 static void	mfi_enable_intr(struct mfi_softc *sc);
67 static void	mfi_ldprobe_inq(struct mfi_softc *sc);
68 static void	mfi_ldprobe_inq_complete(struct mfi_command *);
69 static int	mfi_ldprobe_capacity(struct mfi_softc *sc, int id);
70 static void	mfi_ldprobe_capacity_complete(struct mfi_command *);
71 static int	mfi_ldprobe_tur(struct mfi_softc *sc, int id);
72 static void	mfi_ldprobe_tur_complete(struct mfi_command *);
73 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
74 static void	mfi_aen_complete(struct mfi_command *);
75 static int	mfi_aen_setup(struct mfi_softc *, uint32_t);
76 static int	mfi_add_ld(struct mfi_softc *sc, int id, uint64_t, uint32_t);
77 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
78 static void	mfi_bio_complete(struct mfi_command *);
79 static int	mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
80 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
81 static void	mfi_complete(struct mfi_softc *, struct mfi_command *);
82 static int	mfi_abort(struct mfi_softc *, struct mfi_command *);
83 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, d_thread_t *);
84 
85 /* Management interface */
86 static d_open_t		mfi_open;
87 static d_close_t	mfi_close;
88 static d_ioctl_t	mfi_ioctl;
89 static d_poll_t		mfi_poll;
90 
91 static struct cdevsw mfi_cdevsw = {
92 	.d_version = 	D_VERSION,
93 	.d_flags =	0,
94 	.d_open = 	mfi_open,
95 	.d_close =	mfi_close,
96 	.d_ioctl =	mfi_ioctl,
97 	.d_poll =	mfi_poll,
98 	.d_name =	"mfi",
99 };
100 
101 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
102 
103 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
104 
105 static int
106 mfi_transition_firmware(struct mfi_softc *sc)
107 {
108 	int32_t fw_state, cur_state;
109 	int max_wait, i;
110 
111 	fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
112 	while (fw_state != MFI_FWSTATE_READY) {
113 		if (bootverbose)
114 			device_printf(sc->mfi_dev, "Waiting for firmware to "
115 			    "become ready\n");
116 		cur_state = fw_state;
117 		switch (fw_state) {
118 		case MFI_FWSTATE_FAULT:
119 			device_printf(sc->mfi_dev, "Firmware fault\n");
120 			return (ENXIO);
121 		case MFI_FWSTATE_WAIT_HANDSHAKE:
122 			MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
123 			max_wait = 2;
124 			break;
125 		case MFI_FWSTATE_OPERATIONAL:
126 			MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
127 			max_wait = 10;
128 			break;
129 		case MFI_FWSTATE_UNDEFINED:
130 		case MFI_FWSTATE_BB_INIT:
131 			max_wait = 2;
132 			break;
133 		case MFI_FWSTATE_FW_INIT:
134 		case MFI_FWSTATE_DEVICE_SCAN:
135 		case MFI_FWSTATE_FLUSH_CACHE:
136 			max_wait = 20;
137 			break;
138 		default:
139 			device_printf(sc->mfi_dev,"Unknown firmware state %d\n",
140 			    fw_state);
141 			return (ENXIO);
142 		}
143 		for (i = 0; i < (max_wait * 10); i++) {
144 			fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
145 			if (fw_state == cur_state)
146 				DELAY(100000);
147 			else
148 				break;
149 		}
150 		if (fw_state == cur_state) {
151 			device_printf(sc->mfi_dev, "firmware stuck in state "
152 			    "%#x\n", fw_state);
153 			return (ENXIO);
154 		}
155 	}
156 	return (0);
157 }
158 
159 static void
160 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
161 {
162 	uint32_t *addr;
163 
164 	addr = arg;
165 	*addr = segs[0].ds_addr;
166 }
167 
168 int
169 mfi_attach(struct mfi_softc *sc)
170 {
171 	uint32_t status;
172 	int error, commsz, framessz, sensesz;
173 	int frames, unit;
174 
175 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
176 	TAILQ_INIT(&sc->mfi_ld_tqh);
177 	TAILQ_INIT(&sc->mfi_aen_pids);
178 
179 	mfi_initq_free(sc);
180 	mfi_initq_ready(sc);
181 	mfi_initq_busy(sc);
182 	mfi_initq_bio(sc);
183 
184 	/* Before we get too far, see if the firmware is working */
185 	if ((error = mfi_transition_firmware(sc)) != 0) {
186 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
187 		    "error %d\n", error);
188 		return (ENXIO);
189 	}
190 
191 	/*
192 	 * Get information needed for sizing the contiguous memory for the
193 	 * frame pool.  Size down the sgl parameter since we know that
194 	 * we will never need more than what's required for MAXPHYS.
195 	 * It would be nice if these constants were available at runtime
196 	 * instead of compile time.
197 	 */
198 	status = MFI_READ4(sc, MFI_OMSG0);
199 	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
200 	sc->mfi_max_fw_sgl = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
201 	sc->mfi_total_sgl = min(sc->mfi_max_fw_sgl, ((MAXPHYS / PAGE_SIZE) +1));
202 
203 	/*
204 	 * Create the dma tag for data buffers.  Used both for block I/O
205 	 * and for various internal data queries.
206 	 */
207 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
208 				1, 0,			/* algnmnt, boundary */
209 				BUS_SPACE_MAXADDR,	/* lowaddr */
210 				BUS_SPACE_MAXADDR,	/* highaddr */
211 				NULL, NULL,		/* filter, filterarg */
212 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
213 				sc->mfi_total_sgl,	/* nsegments */
214 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
215 				BUS_DMA_ALLOCNOW,	/* flags */
216 				busdma_lock_mutex,	/* lockfunc */
217 				&sc->mfi_io_lock,	/* lockfuncarg */
218 				&sc->mfi_buffer_dmat)) {
219 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
220 		return (ENOMEM);
221 	}
222 
223 	/*
224 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
225 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
226 	 * entry, so the calculated size here will be will be 1 more than
227 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
228 	 */
229 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
230 	    sizeof(struct mfi_hwcomms);
231 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
232 				1, 0,			/* algnmnt, boundary */
233 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
234 				BUS_SPACE_MAXADDR,	/* highaddr */
235 				NULL, NULL,		/* filter, filterarg */
236 				commsz,			/* maxsize */
237 				1,			/* msegments */
238 				commsz,			/* maxsegsize */
239 				0,			/* flags */
240 				NULL, NULL,		/* lockfunc, lockarg */
241 				&sc->mfi_comms_dmat)) {
242 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
243 		return (ENOMEM);
244 	}
245 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
246 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
247 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
248 		return (ENOMEM);
249 	}
250 	bzero(sc->mfi_comms, commsz);
251 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
252 	    sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
253 
254 	/*
255 	 * Allocate DMA memory for the command frames.  Keep them in the
256 	 * lower 4GB for efficiency.  Calculate the size of the frames at
257 	 * the same time; the frame is 64 bytes plus space for the SG lists.
258 	 * The assumption here is that the SG list will start at the second
259 	 * 64 byte segment of the frame and not use the unused bytes in the
260 	 * frame.  While this might seem wasteful, apparently the frames must
261 	 * be 64 byte aligned, so any savings would be negated by the extra
262 	 * alignment padding.
263 	 */
264 	if (sizeof(bus_addr_t) == 8) {
265 		sc->mfi_sgsize = sizeof(struct mfi_sg64);
266 		sc->mfi_flags |= MFI_FLAGS_SG64;
267 	} else {
268 		sc->mfi_sgsize = sizeof(struct mfi_sg32);
269 	}
270 	frames = (sc->mfi_sgsize * sc->mfi_total_sgl + MFI_FRAME_SIZE - 1) /
271 	    MFI_FRAME_SIZE + 1;
272 	sc->mfi_frame_size = frames * MFI_FRAME_SIZE;
273 	framessz = sc->mfi_frame_size * sc->mfi_max_fw_cmds;
274 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
275 				64, 0,			/* algnmnt, boundary */
276 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
277 				BUS_SPACE_MAXADDR,	/* highaddr */
278 				NULL, NULL,		/* filter, filterarg */
279 				framessz,		/* maxsize */
280 				1,			/* nsegments */
281 				framessz,		/* maxsegsize */
282 				0,			/* flags */
283 				NULL, NULL,		/* lockfunc, lockarg */
284 				&sc->mfi_frames_dmat)) {
285 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
286 		return (ENOMEM);
287 	}
288 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
289 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
290 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
291 		return (ENOMEM);
292 	}
293 	bzero(sc->mfi_frames, framessz);
294 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
295 	    sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
296 
297 	/*
298 	 * Allocate DMA memory for the frame sense data.  Keep them in the
299 	 * lower 4GB for efficiency
300 	 */
301 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
302 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
303 				4, 0,			/* algnmnt, boundary */
304 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
305 				BUS_SPACE_MAXADDR,	/* highaddr */
306 				NULL, NULL,		/* filter, filterarg */
307 				sensesz,		/* maxsize */
308 				1,			/* nsegments */
309 				sensesz,		/* maxsegsize */
310 				0,			/* flags */
311 				NULL, NULL,		/* lockfunc, lockarg */
312 				&sc->mfi_sense_dmat)) {
313 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
314 		return (ENOMEM);
315 	}
316 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
317 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
318 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
319 		return (ENOMEM);
320 	}
321 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
322 	    sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
323 
324 	if ((error = mfi_alloc_commands(sc)) != 0)
325 		return (error);
326 
327 	if ((error = mfi_comms_init(sc)) != 0)
328 		return (error);
329 
330 	if ((error = mfi_get_controller_info(sc)) != 0)
331 		return (error);
332 
333 	if ((error = mfi_aen_setup(sc, 0), 0) != 0)
334 		return (error);
335 
336 	/*
337 	 * Set up the interrupt handler.  XXX This should happen in
338 	 * mfi_pci.c
339 	 */
340 	sc->mfi_irq_rid = 0;
341 	if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
342 	    &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
343 		device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
344 		return (EINVAL);
345 	}
346 	if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
347 	    mfi_intr, sc, &sc->mfi_intr)) {
348 		device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
349 		return (EINVAL);
350 	}
351 
352 	/* Register a config hook to probe the bus for arrays */
353 	sc->mfi_ich.ich_func = mfi_startup;
354 	sc->mfi_ich.ich_arg = sc;
355 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
356 		device_printf(sc->mfi_dev, "Cannot establish configuration "
357 		    "hook\n");
358 		return (EINVAL);
359 	}
360 
361 	/*
362 	 * Register a shutdown handler.
363 	 */
364 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
365 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
366 		device_printf(sc->mfi_dev, "Warning: shutdown event "
367 		    "registration failed\n");
368 	}
369 
370 	/*
371 	 * Create the control device for doing management
372 	 */
373 	unit = device_get_unit(sc->mfi_dev);
374 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
375 	    0640, "mfi%d", unit);
376 	if (unit == 0)
377 		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
378 	if (sc->mfi_cdev != NULL)
379 		sc->mfi_cdev->si_drv1 = sc;
380 
381 	return (0);
382 }
383 
384 static int
385 mfi_alloc_commands(struct mfi_softc *sc)
386 {
387 	struct mfi_command *cm;
388 	int i, ncmds;
389 
390 	/*
391 	 * XXX Should we allocate all the commands up front, or allocate on
392 	 * demand later like 'aac' does?
393 	 */
394 	ncmds = sc->mfi_max_fw_cmds;
395 	sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
396 	    M_WAITOK | M_ZERO);
397 
398 	for (i = 0; i < ncmds; i++) {
399 		cm = &sc->mfi_commands[i];
400 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
401 		    sc->mfi_frame_size * i);
402 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
403 		    sc->mfi_frame_size * i;
404 		cm->cm_frame->header.context = i;
405 		cm->cm_sense = &sc->mfi_sense[i];
406 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
407 		cm->cm_sc = sc;
408 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
409 		    &cm->cm_dmamap) == 0)
410 			mfi_release_command(cm);
411 		else
412 			break;
413 		sc->mfi_total_cmds++;
414 	}
415 
416 	return (0);
417 }
418 
419 static void
420 mfi_release_command(struct mfi_command *cm)
421 {
422 	uint32_t *hdr_data;
423 
424 	/*
425 	 * Zero out the important fields of the frame, but make sure the
426 	 * context field is preserved
427 	 */
428 	hdr_data = (uint32_t *)cm->cm_frame;
429 	hdr_data[0] = 0;
430 	hdr_data[1] = 0;
431 
432 	cm->cm_extra_frames = 0;
433 	cm->cm_flags = 0;
434 	cm->cm_complete = NULL;
435 	cm->cm_private = NULL;
436 	cm->cm_sg = 0;
437 	cm->cm_total_frame_size = 0;
438 	mfi_enqueue_free(cm);
439 }
440 
441 static int
442 mfi_comms_init(struct mfi_softc *sc)
443 {
444 	struct mfi_command *cm;
445 	struct mfi_init_frame *init;
446 	struct mfi_init_qinfo *qinfo;
447 	int error;
448 
449 	if ((cm = mfi_dequeue_free(sc)) == NULL)
450 		return (EBUSY);
451 
452 	/*
453 	 * Abuse the SG list area of the frame to hold the init_qinfo
454 	 * object;
455 	 */
456 	init = &cm->cm_frame->init;
457 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
458 
459 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
460 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
461 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
462 	    offsetof(struct mfi_hwcomms, hw_reply_q);
463 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
464 	    offsetof(struct mfi_hwcomms, hw_pi);
465 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
466 	    offsetof(struct mfi_hwcomms, hw_ci);
467 
468 	init->header.cmd = MFI_CMD_INIT;
469 	init->header.data_len = sizeof(struct mfi_init_qinfo);
470 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
471 
472 	if ((error = mfi_polled_command(sc, cm)) != 0) {
473 		device_printf(sc->mfi_dev, "failed to send init command\n");
474 		return (error);
475 	}
476 	mfi_release_command(cm);
477 
478 	return (0);
479 }
480 
481 static int
482 mfi_get_controller_info(struct mfi_softc *sc)
483 {
484 	struct mfi_command *cm;
485 	struct mfi_dcmd_frame *dcmd;
486 	struct mfi_ctrl_info *ci;
487 	uint32_t max_sectors_1, max_sectors_2;
488 	int error;
489 
490 	if ((cm = mfi_dequeue_free(sc)) == NULL)
491 		return (EBUSY);
492 
493 	ci = malloc(sizeof(struct mfi_ctrl_info), M_MFIBUF, M_NOWAIT | M_ZERO);
494 	if (ci == NULL) {
495 		mfi_release_command(cm);
496 		return (ENOMEM);
497 	}
498 
499 	dcmd = &cm->cm_frame->dcmd;
500 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
501 	dcmd->header.cmd = MFI_CMD_DCMD;
502 	dcmd->header.timeout = 0;
503 	dcmd->header.data_len = sizeof(struct mfi_ctrl_info);
504 	dcmd->opcode = MFI_DCMD_CTRL_GETINFO;
505 	cm->cm_sg = &dcmd->sgl;
506 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
507 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
508 	cm->cm_data = ci;
509 	cm->cm_len = sizeof(struct mfi_ctrl_info);
510 
511 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
512 		device_printf(sc->mfi_dev, "Controller info buffer map failed");
513 		free(ci, M_MFIBUF);
514 		mfi_release_command(cm);
515 		return (error);
516 	}
517 
518 	/* It's ok if this fails, just use default info instead */
519 	if ((error = mfi_polled_command(sc, cm)) != 0) {
520 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
521 		sc->mfi_max_io = (sc->mfi_total_sgl - 1) * PAGE_SIZE /
522 		    MFI_SECTOR_LEN;
523 		free(ci, M_MFIBUF);
524 		mfi_release_command(cm);
525 		return (0);
526 	}
527 
528 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
529 	    BUS_DMASYNC_POSTREAD);
530 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
531 
532 	max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
533 	max_sectors_2 = ci->max_request_size;
534 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
535 
536 	free(ci, M_MFIBUF);
537 	mfi_release_command(cm);
538 
539 	return (error);
540 }
541 
542 static int
543 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state *log_state)
544 {
545 	struct mfi_command *cm;
546 	struct mfi_dcmd_frame *dcmd;
547 	int error;
548 
549 	if ((cm = mfi_dequeue_free(sc)) == NULL)
550 		return (EBUSY);
551 
552 
553 	dcmd = &cm->cm_frame->dcmd;
554 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
555 	dcmd->header.cmd = MFI_CMD_DCMD;
556 	dcmd->header.timeout = 0;
557 	dcmd->header.data_len = sizeof(struct mfi_evt_log_state);
558 	dcmd->opcode = MFI_DCMD_CTRL_EVENT_GETINFO;
559 	cm->cm_sg = &dcmd->sgl;
560 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
561 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
562 	cm->cm_data = log_state;
563 	cm->cm_len = sizeof(struct mfi_evt_log_state);
564 
565 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
566 		device_printf(sc->mfi_dev, "Controller info buffer map failed");
567 		mfi_release_command(cm);
568 		return (error);
569 	}
570 
571 	/* It's ok if this fails, just use default info instead */
572 	if ((error = mfi_polled_command(sc, cm)) != 0) {
573 		device_printf(sc->mfi_dev, "Failed to get controller state\n");
574 		sc->mfi_max_io = (sc->mfi_total_sgl - 1) * PAGE_SIZE /
575 		    MFI_SECTOR_LEN;
576 		mfi_release_command(cm);
577 		return (0);
578 	}
579 
580 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
581 	    BUS_DMASYNC_POSTREAD);
582 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
583 
584 	mfi_release_command(cm);
585 
586 	return (error);
587 }
588 
589 static int
590 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
591 {
592 	struct mfi_evt_log_state log_state;
593 	union mfi_evt class_locale;
594 	int error = 0;
595 	uint32_t seq;
596 
597 	class_locale.members.reserved = 0;
598 	class_locale.members.locale = MFI_EVT_LOCALE_ALL;
599 	class_locale.members.class  = MFI_EVT_CLASS_DEBUG;
600 
601 	if (seq_start == 0) {
602 		error = mfi_get_log_state(sc, &log_state);
603 		if (error)
604 			return (error);
605 		/*
606 		 * Don't run them yet since we can't parse them.
607 		 * We can indirectly get the contents from
608 		 * the AEN mechanism via setting it lower then
609 		 * current.  The firmware will iterate through them.
610 		 */
611 #if 0
612 		for (seq = log_state.shutdown_seq_num;
613 		     seq <= log_state.newest_seq_num; seq++) {
614 			mfi_get_entry(sc, seq);
615 		}
616 #endif
617 
618 		seq = log_state.shutdown_seq_num + 1;
619 	} else
620 		seq = seq_start;
621 	mfi_aen_register(sc, seq, class_locale.word);
622 
623 	return 0;
624 }
625 
626 static int
627 mfi_polled_command(struct mfi_softc *sc, struct mfi_command *cm)
628 {
629 	struct mfi_frame_header *hdr;
630 	int tm = MFI_POLL_TIMEOUT_SECS * 1000000;
631 
632 	hdr = &cm->cm_frame->header;
633 	hdr->cmd_status = 0xff;
634 	hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
635 
636 	mfi_send_frame(sc, cm);
637 
638 	while (hdr->cmd_status == 0xff) {
639 		DELAY(1000);
640 		tm -= 1000;
641 		if (tm <= 0)
642 			break;
643 	}
644 
645 	if (hdr->cmd_status == 0xff) {
646 		device_printf(sc->mfi_dev, "Frame %p timed out\n", hdr);
647 		return (ETIMEDOUT);
648 	}
649 
650 	return (0);
651 }
652 
653 void
654 mfi_free(struct mfi_softc *sc)
655 {
656 	struct mfi_command *cm;
657 	int i;
658 
659 	if (sc->mfi_cdev != NULL)
660 		destroy_dev(sc->mfi_cdev);
661 
662 	if (sc->mfi_total_cmds != 0) {
663 		for (i = 0; i < sc->mfi_total_cmds; i++) {
664 			cm = &sc->mfi_commands[i];
665 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
666 		}
667 		free(sc->mfi_commands, M_MFIBUF);
668 	}
669 
670 	if (sc->mfi_intr)
671 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
672 	if (sc->mfi_irq != NULL)
673 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
674 		    sc->mfi_irq);
675 
676 	if (sc->mfi_sense_busaddr != 0)
677 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
678 	if (sc->mfi_sense != NULL)
679 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
680 		    sc->mfi_sense_dmamap);
681 	if (sc->mfi_sense_dmat != NULL)
682 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
683 
684 	if (sc->mfi_frames_busaddr != 0)
685 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
686 	if (sc->mfi_frames != NULL)
687 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
688 		    sc->mfi_frames_dmamap);
689 	if (sc->mfi_frames_dmat != NULL)
690 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
691 
692 	if (sc->mfi_comms_busaddr != 0)
693 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
694 	if (sc->mfi_comms != NULL)
695 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
696 		    sc->mfi_comms_dmamap);
697 	if (sc->mfi_comms_dmat != NULL)
698 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
699 
700 	if (sc->mfi_buffer_dmat != NULL)
701 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
702 	if (sc->mfi_parent_dmat != NULL)
703 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
704 
705 	if (mtx_initialized(&sc->mfi_io_lock))
706 		mtx_destroy(&sc->mfi_io_lock);
707 
708 	return;
709 }
710 
711 static void
712 mfi_startup(void *arg)
713 {
714 	struct mfi_softc *sc;
715 
716 	sc = (struct mfi_softc *)arg;
717 
718 	config_intrhook_disestablish(&sc->mfi_ich);
719 
720 	mfi_enable_intr(sc);
721 	mfi_ldprobe_inq(sc);
722 }
723 
724 static void
725 mfi_intr(void *arg)
726 {
727 	struct mfi_softc *sc;
728 	struct mfi_command *cm;
729 	uint32_t status, pi, ci, context;
730 
731 	sc = (struct mfi_softc *)arg;
732 
733 	status = MFI_READ4(sc, MFI_OSTS);
734 	if ((status & MFI_OSTS_INTR_VALID) == 0)
735 		return;
736 	MFI_WRITE4(sc, MFI_OSTS, status);
737 
738 	pi = sc->mfi_comms->hw_pi;
739 	ci = sc->mfi_comms->hw_ci;
740 	mtx_lock(&sc->mfi_io_lock);
741 	while (ci != pi) {
742 		context = sc->mfi_comms->hw_reply_q[ci];
743 		sc->mfi_comms->hw_reply_q[ci] = 0xffffffff;
744 		if (context == 0xffffffff) {
745 			device_printf(sc->mfi_dev, "mfi_intr: invalid context "
746 			    "pi= %d ci= %d\n", pi, ci);
747 		} else {
748 			cm = &sc->mfi_commands[context];
749 			mfi_remove_busy(cm);
750 			mfi_complete(sc, cm);
751 		}
752 		ci++;
753 		if (ci == (sc->mfi_max_fw_cmds + 1)) {
754 			ci = 0;
755 		}
756 	}
757 	mtx_unlock(&sc->mfi_io_lock);
758 
759 	sc->mfi_comms->hw_ci = ci;
760 
761 	return;
762 }
763 
764 int
765 mfi_shutdown(struct mfi_softc *sc)
766 {
767 	struct mfi_dcmd_frame *dcmd;
768 	struct mfi_command *cm;
769 	int error;
770 
771 	if ((cm = mfi_dequeue_free(sc)) == NULL)
772 		return (EBUSY);
773 
774 	if (sc->mfi_aen_cm != NULL)
775 		mfi_abort(sc, sc->mfi_aen_cm);
776 
777 	dcmd = &cm->cm_frame->dcmd;
778 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
779 	dcmd->header.cmd = MFI_CMD_DCMD;
780 	dcmd->header.sg_count = 0;
781 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
782 	dcmd->header.timeout = 0;
783 	dcmd->header.data_len = 0;
784 	dcmd->opcode = MFI_DCMD_CTRL_SHUTDOWN;
785 
786 	if ((error = mfi_polled_command(sc, cm)) != 0) {
787 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
788 	}
789 
790 	return (error);
791 }
792 
793 static void
794 mfi_enable_intr(struct mfi_softc *sc)
795 {
796 
797 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
798 }
799 
800 static void
801 mfi_ldprobe_inq(struct mfi_softc *sc)
802 {
803 	struct mfi_command *cm;
804 	struct mfi_pass_frame *pass;
805 	char *inq;
806 	int i;
807 
808 	/* Probe all possible targets with a SCSI INQ command */
809 	mtx_lock(&sc->mfi_io_lock);
810 	sc->mfi_probe_count = 0;
811 	for (i = 0; i < MFI_MAX_CHANNEL_DEVS; i++) {
812 		inq = malloc(MFI_INQ_LENGTH, M_MFIBUF, M_NOWAIT|M_ZERO);
813 		if (inq == NULL)
814 			break;
815 		cm = mfi_dequeue_free(sc);
816 		if (cm == NULL) {
817 			free(inq, M_MFIBUF);
818 			msleep(mfi_startup, &sc->mfi_io_lock, 0, "mfistart",
819 			    5 * hz);
820 			i--;
821 			continue;
822 		}
823 		pass = &cm->cm_frame->pass;
824 		pass->header.cmd = MFI_CMD_LD_SCSI_IO;
825 		pass->header.target_id = i;
826 		pass->header.lun_id = 0;
827 		pass->header.cdb_len = 6;
828 		pass->header.timeout = 0;
829 		pass->header.data_len = MFI_INQ_LENGTH;
830 		bzero(pass->cdb, 16);
831 		pass->cdb[0] = INQUIRY;
832 		pass->cdb[4] = MFI_INQ_LENGTH;
833 		pass->header.sense_len = MFI_SENSE_LEN;
834 		pass->sense_addr_lo = cm->cm_sense_busaddr;
835 		pass->sense_addr_hi = 0;
836 		cm->cm_complete = mfi_ldprobe_inq_complete;
837 		cm->cm_private = inq;
838 		cm->cm_sg = &pass->sgl;
839 		cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
840 		cm->cm_flags |= MFI_CMD_DATAIN;
841 		cm->cm_data = inq;
842 		cm->cm_len = MFI_INQ_LENGTH;
843 		sc->mfi_probe_count++;
844 		mfi_enqueue_ready(cm);
845 		mfi_startio(sc);
846 	}
847 
848 	/* Sleep while the arrays are attaching */
849 	msleep(mfi_startup, &sc->mfi_io_lock, 0, "mfistart", 60 * hz);
850 	mtx_unlock(&sc->mfi_io_lock);
851 
852 	return;
853 }
854 
855 static void
856 mfi_ldprobe_inq_complete(struct mfi_command *cm)
857 {
858 	struct mfi_frame_header *hdr;
859 	struct mfi_softc *sc;
860 	struct scsi_inquiry_data *inq;
861 
862 	sc = cm->cm_sc;
863 	inq = cm->cm_private;
864 	hdr = &cm->cm_frame->header;
865 
866 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0x00) ||
867 	    (SID_TYPE(inq) != T_DIRECT)) {
868 		free(inq, M_MFIBUF);
869 		mfi_release_command(cm);
870 		if (--sc->mfi_probe_count <= 0)
871 			wakeup(mfi_startup);
872 		return;
873 	}
874 
875 	free(inq, M_MFIBUF);
876 	mfi_release_command(cm);
877 	mfi_ldprobe_tur(sc, hdr->target_id);
878 }
879 
880 static int
881 mfi_ldprobe_tur(struct mfi_softc *sc, int id)
882 {
883 	struct mfi_command *cm;
884 	struct mfi_pass_frame *pass;
885 
886 	cm = mfi_dequeue_free(sc);
887 	if (cm == NULL)
888 		return (EBUSY);
889 	pass = &cm->cm_frame->pass;
890 	pass->header.cmd = MFI_CMD_LD_SCSI_IO;
891 	pass->header.target_id = id;
892 	pass->header.lun_id = 0;
893 	pass->header.cdb_len = 6;
894 	pass->header.timeout = 0;
895 	pass->header.data_len = 0;
896 	bzero(pass->cdb, 16);
897 	pass->cdb[0] = TEST_UNIT_READY;
898 	pass->header.sense_len = MFI_SENSE_LEN;
899 	pass->sense_addr_lo = cm->cm_sense_busaddr;
900 	pass->sense_addr_hi = 0;
901 	cm->cm_complete = mfi_ldprobe_tur_complete;
902 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
903 	cm->cm_flags = 0;
904 	mfi_enqueue_ready(cm);
905 	mfi_startio(sc);
906 
907 	return (0);
908 }
909 
910 static void
911 mfi_ldprobe_tur_complete(struct mfi_command *cm)
912 {
913 	struct mfi_frame_header *hdr;
914 	struct mfi_softc *sc;
915 
916 	sc = cm->cm_sc;
917 	hdr = &cm->cm_frame->header;
918 
919 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0x00)) {
920 		device_printf(sc->mfi_dev, "Logical disk %d is not ready, "
921 		    "cmd_status= %d scsi_status= %d\n", hdr->target_id,
922 		    hdr->cmd_status, hdr->scsi_status);
923 		mfi_print_sense(sc, cm->cm_sense);
924 		mfi_release_command(cm);
925 		if (--sc->mfi_probe_count <= 0)
926 			wakeup(mfi_startup);
927 		return;
928 	}
929 	mfi_release_command(cm);
930 	mfi_ldprobe_capacity(sc, hdr->target_id);
931 }
932 
933 #ifndef MFI_DECODE_LOG
934 static void
935 mfi_decode_log(struct mfi_softc *sc, struct mfi_log_detail *detail)
936 {
937         switch (detail->arg_type) {
938 	default:
939 		device_printf(sc->mfi_dev, "%d - Log entry type %d\n",
940 		    detail->seq,
941 		    detail->arg_type
942 		);
943 		break;
944 	}
945 }
946 #else
947 #include <dev/mfi/mfilog.h>
948 #include <dev/mfi/mfi_log.c>
949 #endif
950 
951 static void
952 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
953 {
954 	switch (detail->arg_type) {
955 	case MR_EVT_ARGS_NONE:
956 		/* Try to get info from log entry */
957 		mfi_get_entry(sc, detail->seq);
958 		break;
959 	case MR_EVT_ARGS_CDB_SENSE:
960 		device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) CDB %*D"
961 		    "Sense %*D\n: %s",
962 		    detail->seq,
963 		    detail->args.cdb_sense.pd.device_id,
964 		    detail->args.cdb_sense.pd.enclosure_index,
965 		    detail->args.cdb_sense.pd.slot_number,
966 		    detail->args.cdb_sense.cdb_len,
967 		    detail->args.cdb_sense.cdb,
968 		    ":",
969 		    detail->args.cdb_sense.sense_len,
970 		    detail->args.cdb_sense.sense,
971 		    ":",
972 		    detail->description
973 		    );
974 		break;
975 	case MR_EVT_ARGS_LD:
976 		device_printf(sc->mfi_dev, "%d - VD %02d/%d "
977 		    "event: %s\n",
978 		    detail->seq,
979 		    detail->args.ld.ld_index,
980 		    detail->args.ld.target_id,
981 		    detail->description
982 		    );
983 		break;
984 	case MR_EVT_ARGS_LD_COUNT:
985 		device_printf(sc->mfi_dev, "%d - VD %02d/%d "
986 		    "count %lld: %s\n",
987 		    detail->seq,
988 		    detail->args.ld_count.ld.ld_index,
989 		    detail->args.ld_count.ld.target_id,
990 		    (long long)detail->args.ld_count.count,
991 		    detail->description
992 		    );
993 		break;
994 	case MR_EVT_ARGS_LD_LBA:
995 		device_printf(sc->mfi_dev, "%d - VD %02d/%d "
996 		    "lba %lld: %s\n",
997 		    detail->seq,
998 		    detail->args.ld_lba.ld.ld_index,
999 		    detail->args.ld_lba.ld.target_id,
1000 		    (long long)detail->args.ld_lba.lba,
1001 		    detail->description
1002 		    );
1003 		break;
1004 	case MR_EVT_ARGS_LD_OWNER:
1005 		device_printf(sc->mfi_dev, "%d - VD %02d/%d "
1006 		    "owner changed: prior %d, new %d: %s\n",
1007 		    detail->seq,
1008 		    detail->args.ld_owner.ld.ld_index,
1009 		    detail->args.ld_owner.ld.target_id,
1010 		    detail->args.ld_owner.pre_owner,
1011 		    detail->args.ld_owner.new_owner,
1012 		    detail->description
1013 		    );
1014 		break;
1015 	case MR_EVT_ARGS_LD_LBA_PD_LBA:
1016 		device_printf(sc->mfi_dev, "%d - VD %02d/%d "
1017 		    "lba %lld, physical drive PD %02d(e%d/s%d) lba %lld: %s\n",
1018 		    detail->seq,
1019 		    detail->args.ld_lba_pd_lba.ld.ld_index,
1020 		    detail->args.ld_lba_pd_lba.ld.target_id,
1021 		    (long long)detail->args.ld_lba_pd_lba.ld_lba,
1022 		    detail->args.ld_lba_pd_lba.pd.device_id,
1023 		    detail->args.ld_lba_pd_lba.pd.enclosure_index,
1024 		    detail->args.ld_lba_pd_lba.pd.slot_number,
1025 		    (long long)detail->args.ld_lba_pd_lba.pd_lba,
1026 		    detail->description
1027 		    );
1028 		break;
1029 	case MR_EVT_ARGS_LD_PROG:
1030 		device_printf(sc->mfi_dev, "%d - VD %02d/%d "
1031 		    "progress %d%% in %ds: %s\n",
1032 		    detail->seq,
1033 		    detail->args.ld_prog.ld.ld_index,
1034 		    detail->args.ld_prog.ld.target_id,
1035 		    detail->args.ld_prog.prog.progress/655,
1036 		    detail->args.ld_prog.prog.elapsed_seconds,
1037 		    detail->description
1038 		    );
1039 		break;
1040 	case MR_EVT_ARGS_LD_STATE:
1041 		device_printf(sc->mfi_dev, "%d - VD %02d/%d "
1042 		    "state prior %d new %d: %s\n",
1043 		    detail->seq,
1044 		    detail->args.ld_state.ld.ld_index,
1045 		    detail->args.ld_state.ld.target_id,
1046 		    detail->args.ld_state.prev_state,
1047 		    detail->args.ld_state.new_state,
1048 		    detail->description
1049 		    );
1050 		break;
1051 	case MR_EVT_ARGS_LD_STRIP:
1052 		device_printf(sc->mfi_dev, "%d - VD %02d/%d "
1053 		    "strip %lld: %s\n",
1054 		    detail->seq,
1055 		    detail->args.ld_strip.ld.ld_index,
1056 		    detail->args.ld_strip.ld.target_id,
1057 		    (long long)detail->args.ld_strip.strip,
1058 		    detail->description
1059 		    );
1060 		break;
1061 	case MR_EVT_ARGS_PD:
1062 		device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1063 		    "event: %s\n",
1064 		    detail->seq,
1065 		    detail->args.pd.device_id,
1066 		    detail->args.pd.enclosure_index,
1067 		    detail->args.pd.slot_number,
1068 		    detail->description
1069 		    );
1070 		break;
1071 	case MR_EVT_ARGS_PD_ERR:
1072 		device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1073 		    "err %d: %s\n",
1074 		    detail->seq,
1075 		    detail->args.pd_err.pd.device_id,
1076 		    detail->args.pd_err.pd.enclosure_index,
1077 		    detail->args.pd_err.pd.slot_number,
1078 		    detail->args.pd_err.err,
1079 		    detail->description
1080 		    );
1081 		break;
1082 	case MR_EVT_ARGS_PD_LBA:
1083 		device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1084 		    "lba %lld: %s\n",
1085 		    detail->seq,
1086 		    detail->args.pd_lba.pd.device_id,
1087 		    detail->args.pd_lba.pd.enclosure_index,
1088 		    detail->args.pd_lba.pd.slot_number,
1089 		    (long long)detail->args.pd_lba.lba,
1090 		    detail->description
1091 		    );
1092 		break;
1093 	case MR_EVT_ARGS_PD_LBA_LD:
1094 		device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1095 		    "lba %lld VD %02d/%d: %s\n",
1096 		    detail->seq,
1097 		    detail->args.pd_lba_ld.pd.device_id,
1098 		    detail->args.pd_lba_ld.pd.enclosure_index,
1099 		    detail->args.pd_lba_ld.pd.slot_number,
1100 		    (long long)detail->args.pd_lba.lba,
1101 		    detail->args.pd_lba_ld.ld.ld_index,
1102 		    detail->args.pd_lba_ld.ld.target_id,
1103 		    detail->description
1104 		    );
1105 		break;
1106 	case MR_EVT_ARGS_PD_PROG:
1107 		device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1108 		    "progress %d%% seconds %ds: %s\n",
1109 		    detail->seq,
1110 		    detail->args.pd_prog.pd.device_id,
1111 		    detail->args.pd_prog.pd.enclosure_index,
1112 		    detail->args.pd_prog.pd.slot_number,
1113 		    detail->args.pd_prog.prog.progress/655,
1114 		    detail->args.pd_prog.prog.elapsed_seconds,
1115 		    detail->description
1116 		    );
1117 		break;
1118 	case MR_EVT_ARGS_PD_STATE:
1119 		device_printf(sc->mfi_dev, "%d - PD %02d(e%d/s%d) "
1120 		    "state prior %d new %d: %s\n",
1121 		    detail->seq,
1122 		    detail->args.pd_prog.pd.device_id,
1123 		    detail->args.pd_prog.pd.enclosure_index,
1124 		    detail->args.pd_prog.pd.slot_number,
1125 		    detail->args.pd_state.prev_state,
1126 		    detail->args.pd_state.new_state,
1127 		    detail->description
1128 		    );
1129 		break;
1130 	case MR_EVT_ARGS_PCI:
1131 		device_printf(sc->mfi_dev, "%d - PCI 0x04%x 0x04%x "
1132 		    "0x04%x 0x04%x: %s\n",
1133 		    detail->seq,
1134 		    detail->args.pci.venderId,
1135 		    detail->args.pci.deviceId,
1136 		    detail->args.pci.subVenderId,
1137 		    detail->args.pci.subDeviceId,
1138 		    detail->description
1139 		    );
1140 		break;
1141 	case MR_EVT_ARGS_RATE:
1142 		device_printf(sc->mfi_dev, "%d - Rebuild rate %d: %s\n",
1143 		    detail->seq,
1144 		    detail->args.rate,
1145 		    detail->description
1146 		    );
1147 		break;
1148 	case MR_EVT_ARGS_TIME:
1149 		device_printf(sc->mfi_dev, "%d - Adapter ticks %d "
1150 		    "elapsed %ds: %s\n",
1151 		    detail->seq,
1152 		    detail->args.time.rtc,
1153 		    detail->args.time.elapsedSeconds,
1154 		    detail->description
1155 		    );
1156 		break;
1157 	case MR_EVT_ARGS_ECC:
1158 		device_printf(sc->mfi_dev, "%d - Adapter ECC %x,%x: %s: %s\n",
1159 		    detail->seq,
1160 		    detail->args.ecc.ecar,
1161 		    detail->args.ecc.elog,
1162 		    detail->args.ecc.str,
1163 		    detail->description
1164 		    );
1165 		break;
1166 	default:
1167 		device_printf(sc->mfi_dev, "%d - Type %d: %s\n",
1168 		    detail->seq,
1169 		    detail->arg_type, detail->description
1170 		    );
1171 	}
1172 }
1173 
1174 static int
1175 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1176 {
1177 	struct mfi_command *cm;
1178 	struct mfi_dcmd_frame *dcmd;
1179 	union mfi_evt current_aen, prior_aen;
1180 	struct mfi_evt_detail *ed;
1181 
1182 	current_aen.word = locale;
1183 	if (sc->mfi_aen_cm != NULL) {
1184 		prior_aen.word =
1185 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1186 		if (prior_aen.members.class <= current_aen.members.class &&
1187 		    !((prior_aen.members.locale & current_aen.members.locale)
1188 		    ^current_aen.members.locale)) {
1189 			return (0);
1190 		} else {
1191 			prior_aen.members.locale |= current_aen.members.locale;
1192 			if (prior_aen.members.class
1193 			    < current_aen.members.class)
1194 				current_aen.members.class =
1195 				    prior_aen.members.class;
1196 			mfi_abort(sc, sc->mfi_aen_cm);
1197 		}
1198 	}
1199 
1200 	mtx_lock(&sc->mfi_io_lock);
1201 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
1202 		mtx_unlock(&sc->mfi_io_lock);
1203 		return (EBUSY);
1204 	}
1205 	mtx_unlock(&sc->mfi_io_lock);
1206 
1207 	ed = malloc(sizeof(struct mfi_evt_detail), M_MFIBUF,
1208 	    M_NOWAIT | M_ZERO);
1209 	if (ed == NULL) {
1210 		mtx_lock(&sc->mfi_io_lock);
1211 		mfi_release_command(cm);
1212 		mtx_unlock(&sc->mfi_io_lock);
1213 		return (ENOMEM);
1214 	}
1215 
1216 	dcmd = &cm->cm_frame->dcmd;
1217 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
1218 	dcmd->header.cmd = MFI_CMD_DCMD;
1219 	dcmd->header.timeout = 0;
1220 	dcmd->header.data_len = sizeof(struct mfi_evt_detail);
1221 	dcmd->opcode = MFI_DCMD_CTRL_EVENT_WAIT;
1222 	((uint32_t *)&dcmd->mbox)[0] = seq;
1223 	((uint32_t *)&dcmd->mbox)[1] = locale;
1224 	cm->cm_sg = &dcmd->sgl;
1225 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1226 	cm->cm_flags = MFI_CMD_DATAIN;
1227 	cm->cm_data = ed;
1228 	cm->cm_len = sizeof(struct mfi_evt_detail);
1229 	cm->cm_complete = mfi_aen_complete;
1230 
1231 	sc->mfi_aen_cm = cm;
1232 
1233 	mfi_enqueue_ready(cm);
1234 	mfi_startio(sc);
1235 
1236 	return (0);
1237 }
1238 
1239 static void
1240 mfi_aen_complete(struct mfi_command *cm)
1241 {
1242 	struct mfi_frame_header *hdr;
1243 	struct mfi_softc *sc;
1244 	struct mfi_evt_detail *detail;
1245 	struct mfi_aen *mfi_aen_entry;
1246 	int seq = 0, aborted = 0;
1247 
1248 	sc = cm->cm_sc;
1249 	hdr = &cm->cm_frame->header;
1250 
1251 	if (sc->mfi_aen_cm == NULL)
1252 		return;
1253 
1254 	if (sc->mfi_aen_cm->cm_aen_abort || hdr->cmd_status == 0xff) {
1255 		sc->mfi_aen_cm->cm_aen_abort = 0;
1256 		aborted = 1;
1257 	} else {
1258 		sc->mfi_aen_triggered = 1;
1259 		if (sc->mfi_poll_waiting)
1260 			selwakeup(&sc->mfi_select);
1261 		detail = cm->cm_data;
1262 		mtx_unlock(&sc->mfi_io_lock);
1263 		mfi_decode_evt(sc, detail);
1264 		mtx_lock(&sc->mfi_io_lock);
1265 		seq = detail->seq + 1;
1266 		TAILQ_FOREACH(mfi_aen_entry, &sc->mfi_aen_pids, aen_link) {
1267 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1268 			    aen_link);
1269 			psignal(mfi_aen_entry->p, SIGIO);
1270 			free(mfi_aen_entry, M_MFIBUF);
1271 		}
1272 	}
1273 
1274 	free(cm->cm_data, M_MFIBUF);
1275 	sc->mfi_aen_cm = NULL;
1276 	wakeup(&sc->mfi_aen_cm);
1277 	mfi_release_command(cm);
1278 
1279 	/* set it up again so the driver can catch more events */
1280 	if (!aborted) {
1281 		mtx_unlock(&sc->mfi_io_lock);
1282 		mfi_aen_setup(sc, seq);
1283 		mtx_lock(&sc->mfi_io_lock);
1284 	}
1285 }
1286 
1287 static int
1288 mfi_get_entry(struct mfi_softc *sc, int seq)
1289 {
1290 	struct mfi_command *cm;
1291 	struct mfi_dcmd_frame *dcmd;
1292 	struct mfi_log_detail *ed;
1293 	int error;
1294 
1295 	mtx_lock(&sc->mfi_io_lock);
1296 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
1297 		mtx_unlock(&sc->mfi_io_lock);
1298 		return (EBUSY);
1299 	}
1300 	mtx_unlock(&sc->mfi_io_lock);
1301 
1302 	ed = malloc(sizeof(struct mfi_log_detail), M_MFIBUF, M_NOWAIT | M_ZERO);
1303 	if (ed == NULL) {
1304 		mtx_lock(&sc->mfi_io_lock);
1305 		mfi_release_command(cm);
1306 		mtx_unlock(&sc->mfi_io_lock);
1307 		return (ENOMEM);
1308 	}
1309 
1310 	dcmd = &cm->cm_frame->dcmd;
1311 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
1312 	dcmd->header.cmd = MFI_CMD_DCMD;
1313 	dcmd->header.timeout = 0;
1314 	dcmd->header.data_len = sizeof(struct mfi_log_detail);
1315 	dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1316 	((uint32_t *)&dcmd->mbox)[0] = seq;
1317 	((uint32_t *)&dcmd->mbox)[1] = MFI_EVT_LOCALE_ALL;
1318 	cm->cm_sg = &dcmd->sgl;
1319 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1320 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1321 	cm->cm_data = ed;
1322 	cm->cm_len = sizeof(struct mfi_evt_detail);
1323 
1324 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1325 		device_printf(sc->mfi_dev, "Controller info buffer map failed");
1326 		free(ed, M_MFIBUF);
1327 		mfi_release_command(cm);
1328 		return (error);
1329 	}
1330 
1331 	if ((error = mfi_polled_command(sc, cm)) != 0) {
1332 		device_printf(sc->mfi_dev, "Failed to get controller entry\n");
1333 		sc->mfi_max_io = (sc->mfi_total_sgl - 1) * PAGE_SIZE /
1334 		    MFI_SECTOR_LEN;
1335 		free(ed, M_MFIBUF);
1336 		mfi_release_command(cm);
1337 		return (0);
1338 	}
1339 
1340 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1341 	    BUS_DMASYNC_POSTREAD);
1342 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1343 
1344 	mfi_decode_log(sc, ed);
1345 
1346 	mtx_lock(&sc->mfi_io_lock);
1347 	free(cm->cm_data, M_MFIBUF);
1348 	mfi_release_command(cm);
1349 	mtx_unlock(&sc->mfi_io_lock);
1350 	return (0);
1351 }
1352 
1353 static int
1354 mfi_ldprobe_capacity(struct mfi_softc *sc, int id)
1355 {
1356 	struct mfi_command *cm;
1357 	struct mfi_pass_frame *pass;
1358 	struct scsi_read_capacity_data_long *cap;
1359 
1360 	cap = malloc(sizeof(*cap), M_MFIBUF, M_NOWAIT|M_ZERO);
1361 	if (cap == NULL)
1362 		return (ENOMEM);
1363 	cm = mfi_dequeue_free(sc);
1364 	if (cm == NULL) {
1365 		free(cap, M_MFIBUF);
1366 		return (EBUSY);
1367 	}
1368 	pass = &cm->cm_frame->pass;
1369 	pass->header.cmd = MFI_CMD_LD_SCSI_IO;
1370 	pass->header.target_id = id;
1371 	pass->header.lun_id = 0;
1372 	pass->header.cdb_len = 6;
1373 	pass->header.timeout = 0;
1374 	pass->header.data_len = sizeof(*cap);
1375 	bzero(pass->cdb, 16);
1376 	pass->cdb[0] = 0x9e;	/* READ CAPACITY 16 */
1377 	pass->cdb[13] = sizeof(*cap);
1378 	pass->header.sense_len = MFI_SENSE_LEN;
1379 	pass->sense_addr_lo = cm->cm_sense_busaddr;
1380 	pass->sense_addr_hi = 0;
1381 	cm->cm_complete = mfi_ldprobe_capacity_complete;
1382 	cm->cm_private = cap;
1383 	cm->cm_sg = &pass->sgl;
1384 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
1385 	cm->cm_flags |= MFI_CMD_DATAIN;
1386 	cm->cm_data = cap;
1387 	cm->cm_len = sizeof(*cap);
1388 	mfi_enqueue_ready(cm);
1389 	mfi_startio(sc);
1390 
1391 	return (0);
1392 }
1393 
1394 static void
1395 mfi_ldprobe_capacity_complete(struct mfi_command *cm)
1396 {
1397 	struct mfi_frame_header *hdr;
1398 	struct mfi_softc *sc;
1399 	struct scsi_read_capacity_data_long *cap;
1400 	uint64_t sectors;
1401 	uint32_t secsize;
1402 	int target;
1403 
1404 	sc = cm->cm_sc;
1405 	cap = cm->cm_private;
1406 	hdr = &cm->cm_frame->header;
1407 
1408 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0x00)) {
1409 		device_printf(sc->mfi_dev, "Failed to read capacity for "
1410 		    "logical disk\n");
1411 		device_printf(sc->mfi_dev, "cmd_status= %d scsi_status= %d\n",
1412 		    hdr->cmd_status, hdr->scsi_status);
1413 		free(cap, M_MFIBUF);
1414 		mfi_release_command(cm);
1415 		if (--sc->mfi_probe_count <= 0)
1416 			wakeup(mfi_startup);
1417 		return;
1418 	}
1419 	target = hdr->target_id;
1420 	sectors = scsi_8btou64(cap->addr);
1421 	secsize = scsi_4btoul(cap->length);
1422 	free(cap, M_MFIBUF);
1423 	mfi_release_command(cm);
1424 	mfi_add_ld(sc, target, sectors, secsize);
1425 	if (--sc->mfi_probe_count <= 0)
1426 		wakeup(mfi_startup);
1427 
1428 	return;
1429 }
1430 
1431 static int
1432 mfi_add_ld(struct mfi_softc *sc, int id, uint64_t sectors, uint32_t secsize)
1433 {
1434 	struct mfi_ld *ld;
1435 	device_t child;
1436 
1437 	if ((secsize == 0) || (sectors == 0)) {
1438 		device_printf(sc->mfi_dev, "Invalid capacity parameters for "
1439 		      "logical disk %d\n", id);
1440 		return (EINVAL);
1441 	}
1442 
1443 	ld = malloc(sizeof(struct mfi_ld), M_MFIBUF, M_NOWAIT|M_ZERO);
1444 	if (ld == NULL) {
1445 		device_printf(sc->mfi_dev, "Cannot allocate ld\n");
1446 		return (ENOMEM);
1447 	}
1448 
1449 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1450 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1451 		free(ld, M_MFIBUF);
1452 		return (EINVAL);
1453 	}
1454 
1455 	ld->ld_id = id;
1456 	ld->ld_disk = child;
1457 	ld->ld_secsize = secsize;
1458 	ld->ld_sectors = sectors;
1459 
1460 	device_set_ivars(child, ld);
1461 	device_set_desc(child, "MFI Logical Disk");
1462 	mtx_unlock(&sc->mfi_io_lock);
1463 	mtx_lock(&Giant);
1464 	bus_generic_attach(sc->mfi_dev);
1465 	mtx_unlock(&Giant);
1466 	mtx_lock(&sc->mfi_io_lock);
1467 
1468 	return (0);
1469 }
1470 
1471 static struct mfi_command *
1472 mfi_bio_command(struct mfi_softc *sc)
1473 {
1474 	struct mfi_io_frame *io;
1475 	struct mfi_command *cm;
1476 	struct bio *bio;
1477 	int flags, blkcount;
1478 
1479 	if ((cm = mfi_dequeue_free(sc)) == NULL)
1480 		return (NULL);
1481 
1482 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1483 		mfi_release_command(cm);
1484 		return (NULL);
1485 	}
1486 
1487 	io = &cm->cm_frame->io;
1488 	switch (bio->bio_cmd & 0x03) {
1489 	case BIO_READ:
1490 		io->header.cmd = MFI_CMD_LD_READ;
1491 		flags = MFI_CMD_DATAIN;
1492 		break;
1493 	case BIO_WRITE:
1494 		io->header.cmd = MFI_CMD_LD_WRITE;
1495 		flags = MFI_CMD_DATAOUT;
1496 		break;
1497 	default:
1498 		panic("Invalid bio command");
1499 	}
1500 
1501 	/* Cheat with the sector length to avoid a non-constant division */
1502 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1503 	io->header.target_id = (uintptr_t)bio->bio_driver1;
1504 	io->header.timeout = 0;
1505 	io->header.flags = 0;
1506 	io->header.sense_len = MFI_SENSE_LEN;
1507 	io->header.data_len = blkcount;
1508 	io->sense_addr_lo = cm->cm_sense_busaddr;
1509 	io->sense_addr_hi = 0;
1510 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
1511 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
1512 	cm->cm_complete = mfi_bio_complete;
1513 	cm->cm_private = bio;
1514 	cm->cm_data = bio->bio_data;
1515 	cm->cm_len = bio->bio_bcount;
1516 	cm->cm_sg = &io->sgl;
1517 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1518 	cm->cm_flags = flags;
1519 
1520 	return (cm);
1521 }
1522 
1523 static void
1524 mfi_bio_complete(struct mfi_command *cm)
1525 {
1526 	struct bio *bio;
1527 	struct mfi_frame_header *hdr;
1528 	struct mfi_softc *sc;
1529 
1530 	bio = cm->cm_private;
1531 	hdr = &cm->cm_frame->header;
1532 	sc = cm->cm_sc;
1533 
1534 	if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) {
1535 		bio->bio_flags |= BIO_ERROR;
1536 		bio->bio_error = EIO;
1537 		device_printf(sc->mfi_dev, "I/O error, status= %d "
1538 		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1539 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
1540 	}
1541 
1542 	mfi_release_command(cm);
1543 	mfi_disk_complete(bio);
1544 }
1545 
1546 void
1547 mfi_startio(struct mfi_softc *sc)
1548 {
1549 	struct mfi_command *cm;
1550 
1551 	for (;;) {
1552 		/* Don't bother if we're short on resources */
1553 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1554 			break;
1555 
1556 		/* Try a command that has already been prepared */
1557 		cm = mfi_dequeue_ready(sc);
1558 
1559 		/* Nope, so look for work on the bioq */
1560 		if (cm == NULL)
1561 			cm = mfi_bio_command(sc);
1562 
1563 		/* No work available, so exit */
1564 		if (cm == NULL)
1565 			break;
1566 
1567 		/* Send the command to the controller */
1568 		if (mfi_mapcmd(sc, cm) != 0) {
1569 			mfi_requeue_ready(cm);
1570 			break;
1571 		}
1572 	}
1573 }
1574 
1575 static int
1576 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1577 {
1578 	int error, polled;
1579 
1580 	if (cm->cm_data != NULL) {
1581 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1582 		error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1583 		    cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1584 		if (error == EINPROGRESS) {
1585 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
1586 			return (0);
1587 		}
1588 	} else {
1589 		mfi_enqueue_busy(cm);
1590 		error = mfi_send_frame(sc, cm);
1591 	}
1592 
1593 	return (error);
1594 }
1595 
1596 static void
1597 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1598 {
1599 	struct mfi_frame_header *hdr;
1600 	struct mfi_command *cm;
1601 	union mfi_sgl *sgl;
1602 	struct mfi_softc *sc;
1603 	int i, dir;
1604 
1605 	if (error)
1606 		return;
1607 
1608 	cm = (struct mfi_command *)arg;
1609 	sc = cm->cm_sc;
1610 	hdr = &cm->cm_frame->header;
1611 	sgl = cm->cm_sg;
1612 
1613 	if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1614 		for (i = 0; i < nsegs; i++) {
1615 			sgl->sg32[i].addr = segs[i].ds_addr;
1616 			sgl->sg32[i].len = segs[i].ds_len;
1617 		}
1618 	} else {
1619 		for (i = 0; i < nsegs; i++) {
1620 			sgl->sg64[i].addr = segs[i].ds_addr;
1621 			sgl->sg64[i].len = segs[i].ds_len;
1622 		}
1623 		hdr->flags |= MFI_FRAME_SGL64;
1624 	}
1625 	hdr->sg_count = nsegs;
1626 
1627 	dir = 0;
1628 	if (cm->cm_flags & MFI_CMD_DATAIN) {
1629 		dir |= BUS_DMASYNC_PREREAD;
1630 		hdr->flags |= MFI_FRAME_DIR_READ;
1631 	}
1632 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
1633 		dir |= BUS_DMASYNC_PREWRITE;
1634 		hdr->flags |= MFI_FRAME_DIR_WRITE;
1635 	}
1636 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1637 	cm->cm_flags |= MFI_CMD_MAPPED;
1638 
1639 	/*
1640 	 * Instead of calculating the total number of frames in the
1641 	 * compound frame, it's already assumed that there will be at
1642 	 * least 1 frame, so don't compensate for the modulo of the
1643 	 * following division.
1644 	 */
1645 	cm->cm_total_frame_size += (sc->mfi_sgsize * nsegs);
1646 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1647 
1648 	/* The caller will take care of delivering polled commands */
1649 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1650 		mfi_enqueue_busy(cm);
1651 		mfi_send_frame(sc, cm);
1652 	}
1653 
1654 	return;
1655 }
1656 
1657 static int
1658 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1659 {
1660 
1661 	/*
1662 	 * The bus address of the command is aligned on a 64 byte boundary,
1663 	 * leaving the least 6 bits as zero.  For whatever reason, the
1664 	 * hardware wants the address shifted right by three, leaving just
1665 	 * 3 zero bits.  These three bits are then used to indicate how many
1666 	 * 64 byte frames beyond the first one are used in the command.  The
1667 	 * extra frames are typically filled with S/G elements.  The extra
1668 	 * frames must also be contiguous.  Thus, a compound frame can be at
1669 	 * most 512 bytes long, allowing for up to 59 32-bit S/G elements or
1670 	 * 39 64-bit S/G elements for block I/O commands.  This means that
1671 	 * I/O transfers of 256k and higher simply are not possible, which
1672 	 * is quite odd for such a modern adapter.
1673 	 */
1674 	MFI_WRITE4(sc, MFI_IQP, (cm->cm_frame_busaddr >> 3) |
1675 	    cm->cm_extra_frames);
1676 	return (0);
1677 }
1678 
1679 static void
1680 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
1681 {
1682 	int dir;
1683 
1684 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
1685 		dir = 0;
1686 		if (cm->cm_flags & MFI_CMD_DATAIN)
1687 			dir |= BUS_DMASYNC_POSTREAD;
1688 		if (cm->cm_flags & MFI_CMD_DATAOUT)
1689 			dir |= BUS_DMASYNC_POSTWRITE;
1690 
1691 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1692 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1693 		cm->cm_flags &= ~MFI_CMD_MAPPED;
1694 	}
1695 
1696 	if (cm->cm_complete != NULL)
1697 		cm->cm_complete(cm);
1698 
1699 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1700 	mfi_startio(sc);
1701 }
1702 
1703 static int
1704 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
1705 {
1706 	struct mfi_command *cm;
1707 	struct mfi_abort_frame *abort;
1708 
1709 	mtx_lock(&sc->mfi_io_lock);
1710 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
1711 		mtx_unlock(&sc->mfi_io_lock);
1712 		return (EBUSY);
1713 	}
1714 	mtx_unlock(&sc->mfi_io_lock);
1715 
1716 	abort = &cm->cm_frame->abort;
1717 	abort->header.cmd = MFI_CMD_ABORT;
1718 	abort->header.flags = 0;
1719 	abort->abort_context = cm_abort->cm_frame->header.context;
1720 	abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
1721 	abort->abort_mfi_addr_hi = 0;
1722 	cm->cm_data = NULL;
1723 
1724 	sc->mfi_aen_cm->cm_aen_abort = 1;
1725 	mfi_mapcmd(sc, cm);
1726 	mfi_polled_command(sc, cm);
1727 	mtx_lock(&sc->mfi_io_lock);
1728 	mfi_release_command(cm);
1729 	mtx_unlock(&sc->mfi_io_lock);
1730 
1731 	while (sc->mfi_aen_cm != NULL) {
1732 		tsleep(&sc->mfi_aen_cm, 0, "mfiabort", 5 * hz);
1733 	}
1734 
1735 	return (0);
1736 }
1737 
1738 int
1739 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
1740 {
1741 	struct mfi_command *cm;
1742 	struct mfi_io_frame *io;
1743 	int error;
1744 
1745 	if ((cm = mfi_dequeue_free(sc)) == NULL)
1746 		return (EBUSY);
1747 
1748 	io = &cm->cm_frame->io;
1749 	io->header.cmd = MFI_CMD_LD_WRITE;
1750 	io->header.target_id = id;
1751 	io->header.timeout = 0;
1752 	io->header.flags = 0;
1753 	io->header.sense_len = MFI_SENSE_LEN;
1754 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1755 	io->sense_addr_lo = cm->cm_sense_busaddr;
1756 	io->sense_addr_hi = 0;
1757 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
1758 	io->lba_lo = lba & 0xffffffff;
1759 	cm->cm_data = virt;
1760 	cm->cm_len = len;
1761 	cm->cm_sg = &io->sgl;
1762 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1763 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
1764 
1765 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1766 		mfi_release_command(cm);
1767 		return (error);
1768 	}
1769 
1770 	error = mfi_polled_command(sc, cm);
1771 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1772 	    BUS_DMASYNC_POSTWRITE);
1773 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1774 	mfi_release_command(cm);
1775 
1776 	return (error);
1777 }
1778 
1779 static int
1780 mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1781 {
1782 	struct mfi_softc *sc;
1783 
1784 	sc = dev->si_drv1;
1785 	sc->mfi_flags |= MFI_FLAGS_OPEN;
1786 
1787 	return (0);
1788 }
1789 
1790 static int
1791 mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1792 {
1793 	struct mfi_softc *sc;
1794 	struct mfi_aen *mfi_aen_entry;
1795 
1796 	sc = dev->si_drv1;
1797 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
1798 
1799 	TAILQ_FOREACH(mfi_aen_entry, &sc->mfi_aen_pids, aen_link) {
1800 		if (mfi_aen_entry->p == curproc) {
1801 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1802 			    aen_link);
1803 printf("REMOVED pid %d\n",mfi_aen_entry->p->p_pid);
1804 			free(mfi_aen_entry, M_MFIBUF);
1805 		}
1806 	}
1807 	return (0);
1808 }
1809 
1810 static int
1811 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1812 {
1813 	struct mfi_softc *sc;
1814 	union mfi_statrequest *ms;
1815 	int error;
1816 
1817 	sc = dev->si_drv1;
1818 	error = 0;
1819 
1820 	switch (cmd) {
1821 	case MFIIO_STATS:
1822 		ms = (union mfi_statrequest *)arg;
1823 		switch (ms->ms_item) {
1824 		case MFIQ_FREE:
1825 		case MFIQ_BIO:
1826 		case MFIQ_READY:
1827 		case MFIQ_BUSY:
1828 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
1829 			    sizeof(struct mfi_qstat));
1830 			break;
1831 		default:
1832 			error = ENOIOCTL;
1833 			break;
1834 		}
1835 		break;
1836 	case 0xc1144d01: /* Firmware Linux ioctl shim */
1837 		{
1838 			devclass_t devclass;
1839 			struct mfi_linux_ioc_packet l_ioc;
1840 			int adapter;
1841 
1842 			devclass = devclass_find("mfi");
1843 			if (devclass == NULL)
1844 				return (ENOENT);
1845 
1846 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
1847 			if (error)
1848 				return (error);
1849 			adapter = l_ioc.lioc_adapter_no;
1850 			sc = devclass_get_softc(devclass, adapter);
1851 			if (sc == NULL)
1852 				return (ENOENT);
1853 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
1854 			    cmd, arg, flag, td));
1855 			break;
1856 		}
1857 	case 0x400c4d03: /* AEN Linux ioctl shim */
1858 		{
1859 			devclass_t devclass;
1860 			struct mfi_linux_ioc_aen l_aen;
1861 			int adapter;
1862 
1863 			devclass = devclass_find("mfi");
1864 			if (devclass == NULL)
1865 				return (ENOENT);
1866 
1867 			error = copyin(arg, &l_aen, sizeof(l_aen));
1868 			if (error)
1869 				return (error);
1870 			adapter = l_aen.laen_adapter_no;
1871 			sc = devclass_get_softc(devclass, adapter);
1872 			if (sc == NULL)
1873 				return (ENOENT);
1874 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
1875 			    cmd, arg, flag, td));
1876 			break;
1877 		}
1878 	default:
1879 		error = ENOENT;
1880 		break;
1881 	}
1882 
1883 	return (error);
1884 }
1885 
1886 static int
1887 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1888 {
1889 	struct mfi_softc *sc;
1890 	struct mfi_linux_ioc_packet l_ioc;
1891 	struct mfi_linux_ioc_aen l_aen;
1892 	struct mfi_command *cm = NULL;
1893 	struct mfi_aen *mfi_aen_entry;
1894 	uint32_t *sense_ptr;
1895 	uint32_t context;
1896 	uint8_t *data = NULL, *temp;
1897 	int i;
1898 	int error;
1899 
1900 	sc = dev->si_drv1;
1901 	error = 0;
1902 	switch (cmd) {
1903 	case 0xc1144d01: /* Firmware Linux ioctl shim */
1904 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
1905 		if (error != 0)
1906 			return (error);
1907 
1908 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
1909 			return (EINVAL);
1910 		}
1911 
1912 		mtx_lock(&sc->mfi_io_lock);
1913 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1914 			mtx_unlock(&sc->mfi_io_lock);
1915 			return (EBUSY);
1916 		}
1917 		mtx_unlock(&sc->mfi_io_lock);
1918 
1919 		/*
1920 		 * save off original context since copying from user
1921 		 * will clobber some data
1922 		 */
1923 		context = cm->cm_frame->header.context;
1924 
1925 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
1926 		      l_ioc.lioc_sgl_off); /* Linux can do 2 frames ? */
1927 		cm->cm_total_frame_size = l_ioc.lioc_sgl_off;
1928 		cm->cm_sg =
1929 		    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
1930 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT
1931 			| MFI_CMD_POLLED;
1932 		cm->cm_len = cm->cm_frame->header.data_len;
1933 		cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
1934 					    M_WAITOK | M_ZERO);
1935 
1936 		/* restore header context */
1937 		cm->cm_frame->header.context = context;
1938 
1939 		temp = data;
1940 		for (i = 0; i < l_ioc.lioc_sge_count; i++) {
1941 			error = copyin(l_ioc.lioc_sgl[i].iov_base,
1942 			       temp,
1943 			       l_ioc.lioc_sgl[i].iov_len);
1944 			if (error != 0) {
1945 				device_printf(sc->mfi_dev,
1946 				    "Copy in failed");
1947 				goto out;
1948 			}
1949 			temp = &temp[l_ioc.lioc_sgl[i].iov_len];
1950 		}
1951 
1952 		if (l_ioc.lioc_sense_len) {
1953 			sense_ptr =
1954 			    (void *)&cm->cm_frame->bytes[l_ioc.lioc_sense_off];
1955 			*sense_ptr = cm->cm_sense_busaddr;
1956 		}
1957 
1958 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1959 			device_printf(sc->mfi_dev,
1960 			    "Controller info buffer map failed");
1961 			goto out;
1962 		}
1963 
1964 		if ((error = mfi_polled_command(sc, cm)) != 0) {
1965 			device_printf(sc->mfi_dev,
1966 			    "Controller polled failed");
1967 			goto out;
1968 		}
1969 
1970 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1971 				BUS_DMASYNC_POSTREAD);
1972 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1973 
1974 		temp = data;
1975 		for (i = 0; i < l_ioc.lioc_sge_count; i++) {
1976 			error = copyout(temp,
1977 				l_ioc.lioc_sgl[i].iov_base,
1978 				l_ioc.lioc_sgl[i].iov_len);
1979 			if (error != 0) {
1980 				device_printf(sc->mfi_dev,
1981 				    "Copy out failed");
1982 				goto out;
1983 			}
1984 			temp = &temp[l_ioc.lioc_sgl[i].iov_len];
1985 		}
1986 
1987 		if (l_ioc.lioc_sense_len) {
1988 			/* copy out sense */
1989 			sense_ptr = (void *)
1990 			    &l_ioc.lioc_frame.raw[l_ioc.lioc_sense_off];
1991 			temp = 0;
1992 			temp += cm->cm_sense_busaddr;
1993 			error = copyout(temp, sense_ptr,
1994 			    l_ioc.lioc_sense_len);
1995 			if (error != 0) {
1996 				device_printf(sc->mfi_dev,
1997 				    "Copy out failed");
1998 				goto out;
1999 			}
2000 		}
2001 
2002 		error = copyout(&cm->cm_frame->header.cmd_status,
2003 			&((struct mfi_linux_ioc_packet*)arg)
2004 			->lioc_frame.hdr.cmd_status,
2005 			1);
2006 		if (error != 0) {
2007 			device_printf(sc->mfi_dev,
2008 				      "Copy out failed");
2009 			goto out;
2010 		}
2011 
2012 out:
2013 		if (data)
2014 			free(data, M_MFIBUF);
2015 		if (cm) {
2016 			mtx_lock(&sc->mfi_io_lock);
2017 			mfi_release_command(cm);
2018 			mtx_unlock(&sc->mfi_io_lock);
2019 		}
2020 
2021 		return (error);
2022 	case 0x400c4d03: /* AEN Linux ioctl shim */
2023 		error = copyin(arg, &l_aen, sizeof(l_aen));
2024 		if (error != 0)
2025 			return (error);
2026 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
2027 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
2028 		    M_WAITOK);
2029 		if (mfi_aen_entry != NULL) {
2030 			mfi_aen_entry->p = curproc;
2031 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
2032 			    aen_link);
2033 		}
2034 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
2035 		    l_aen.laen_class_locale);
2036 
2037 		if (error != 0) {
2038 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2039 			    aen_link);
2040 			free(mfi_aen_entry, M_MFIBUF);
2041 		}
2042 
2043 		return (error);
2044 	default:
2045 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2046 		error = ENOENT;
2047 		break;
2048 	}
2049 
2050 	return (error);
2051 }
2052 
2053 static int
2054 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
2055 {
2056 	struct mfi_softc *sc;
2057 	int revents = 0;
2058 
2059 	sc = dev->si_drv1;
2060 
2061 	printf("MFI POLL\n");
2062 	if (poll_events & (POLLIN | POLLRDNORM)) {
2063 		if (sc->mfi_aen_triggered != 0)
2064 			revents |= poll_events & (POLLIN | POLLRDNORM);
2065 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
2066 			revents |= POLLERR;
2067 		}
2068 	}
2069 
2070 	if (revents == 0) {
2071 		if (poll_events & (POLLIN | POLLRDNORM)) {
2072 			sc->mfi_poll_waiting = 1;
2073 			selrecord(td, &sc->mfi_select);
2074 			sc->mfi_poll_waiting = 0;
2075 		}
2076 	}
2077 
2078 	return revents;
2079 }
2080