xref: /freebsd/sys/dev/mfi/mfi.c (revision 7dfd9569a2f0637fb9a48157b1c1bfe5709faee3)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_mfi.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/conf.h>
38 #include <sys/eventhandler.h>
39 #include <sys/rman.h>
40 #include <sys/bus_dma.h>
41 #include <sys/bio.h>
42 #include <sys/ioccom.h>
43 
44 #include <machine/bus.h>
45 #include <machine/resource.h>
46 
47 #include <dev/mfi/mfireg.h>
48 #include <dev/mfi/mfi_ioctl.h>
49 #include <dev/mfi/mfivar.h>
50 
51 static int	mfi_alloc_commands(struct mfi_softc *);
52 static void	mfi_release_command(struct mfi_command *cm);
53 static int	mfi_comms_init(struct mfi_softc *);
54 static int	mfi_polled_command(struct mfi_softc *, struct mfi_command *);
55 static int	mfi_get_controller_info(struct mfi_softc *);
56 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
57 static void	mfi_startup(void *arg);
58 static void	mfi_intr(void *arg);
59 static void	mfi_enable_intr(struct mfi_softc *sc);
60 static void	mfi_ldprobe_inq(struct mfi_softc *sc);
61 static void	mfi_ldprobe_inq_complete(struct mfi_command *);
62 static int	mfi_ldprobe_capacity(struct mfi_softc *sc, int id);
63 static void	mfi_ldprobe_capacity_complete(struct mfi_command *);
64 static int	mfi_ldprobe_tur(struct mfi_softc *sc, int id);
65 static void	mfi_ldprobe_tur_complete(struct mfi_command *);
66 static int	mfi_add_ld(struct mfi_softc *sc, int id, uint64_t, uint32_t);
67 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
68 static void	mfi_bio_complete(struct mfi_command *);
69 static int	mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
70 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
71 static void	mfi_complete(struct mfi_softc *, struct mfi_command *);
72 
73 /* Management interface */
74 static d_open_t		mfi_open;
75 static d_close_t	mfi_close;
76 static d_ioctl_t	mfi_ioctl;
77 
78 static struct cdevsw mfi_cdevsw = {
79 	.d_version = 	D_VERSION,
80 	.d_flags =	0,
81 	.d_open = 	mfi_open,
82 	.d_close =	mfi_close,
83 	.d_ioctl =	mfi_ioctl,
84 	.d_name =	"mfi",
85 };
86 
87 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
88 
89 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
90 
91 static int
92 mfi_transition_firmware(struct mfi_softc *sc)
93 {
94 	int32_t fw_state, cur_state;
95 	int max_wait, i;
96 
97 	fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
98 	while (fw_state != MFI_FWSTATE_READY) {
99 		if (bootverbose)
100 			device_printf(sc->mfi_dev, "Waiting for firmware to "
101 			    "become ready\n");
102 		cur_state = fw_state;
103 		switch (fw_state) {
104 		case MFI_FWSTATE_FAULT:
105 			device_printf(sc->mfi_dev, "Firmware fault\n");
106 			return (ENXIO);
107 		case MFI_FWSTATE_WAIT_HANDSHAKE:
108 			MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
109 			max_wait = 2;
110 			break;
111 		case MFI_FWSTATE_OPERATIONAL:
112 			MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
113 			max_wait = 10;
114 			break;
115 		case MFI_FWSTATE_UNDEFINED:
116 		case MFI_FWSTATE_BB_INIT:
117 			max_wait = 2;
118 			break;
119 		case MFI_FWSTATE_FW_INIT:
120 		case MFI_FWSTATE_DEVICE_SCAN:
121 		case MFI_FWSTATE_FLUSH_CACHE:
122 			max_wait = 20;
123 			break;
124 		default:
125 			device_printf(sc->mfi_dev,"Unknown firmware state %d\n",
126 			    fw_state);
127 			return (ENXIO);
128 		}
129 		for (i = 0; i < (max_wait * 10); i++) {
130 			fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
131 			if (fw_state == cur_state)
132 				DELAY(100000);
133 			else
134 				break;
135 		}
136 		if (fw_state == cur_state) {
137 			device_printf(sc->mfi_dev, "firmware stuck in state "
138 			    "%#x\n", fw_state);
139 			return (ENXIO);
140 		}
141 	}
142 	return (0);
143 }
144 
145 static void
146 mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
147 {
148 	uint32_t *addr;
149 
150 	addr = arg;
151 	*addr = segs[0].ds_addr;
152 }
153 
154 int
155 mfi_attach(struct mfi_softc *sc)
156 {
157 	uint32_t status;
158 	int error, commsz, framessz, sensesz;
159 	int frames, unit;
160 
161 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
162 	TAILQ_INIT(&sc->mfi_ld_tqh);
163 
164 	mfi_initq_free(sc);
165 	mfi_initq_ready(sc);
166 	mfi_initq_busy(sc);
167 	mfi_initq_bio(sc);
168 
169 	/* Before we get too far, see if the firmware is working */
170 	if ((error = mfi_transition_firmware(sc)) != 0) {
171 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
172 		    "error %d\n", error);
173 		return (ENXIO);
174 	}
175 
176 	/*
177 	 * Get information needed for sizing the contiguous memory for the
178 	 * frame pool.  Size down the sgl parameter since we know that
179 	 * we will never need more than what's required for MAXPHYS.
180 	 * It would be nice if these constants were available at runtime
181 	 * instead of compile time.
182 	 */
183 	status = MFI_READ4(sc, MFI_OMSG0);
184 	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
185 	sc->mfi_max_fw_sgl = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
186 	sc->mfi_total_sgl = min(sc->mfi_max_fw_sgl, ((MAXPHYS / PAGE_SIZE) +1));
187 
188 	/*
189 	 * Create the dma tag for data buffers.  Used both for block I/O
190 	 * and for various internal data queries.
191 	 */
192 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
193 				1, 0,			/* algnmnt, boundary */
194 				BUS_SPACE_MAXADDR,	/* lowaddr */
195 				BUS_SPACE_MAXADDR,	/* highaddr */
196 				NULL, NULL,		/* filter, filterarg */
197 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
198 				sc->mfi_total_sgl,	/* nsegments */
199 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
200 				BUS_DMA_ALLOCNOW,	/* flags */
201 				busdma_lock_mutex,	/* lockfunc */
202 				&sc->mfi_io_lock,	/* lockfuncarg */
203 				&sc->mfi_buffer_dmat)) {
204 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
205 		return (ENOMEM);
206 	}
207 
208 	/*
209 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
210 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
211 	 * entry, so the calculated size here will be will be 1 more than
212 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
213 	 */
214 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
215 	    sizeof(struct mfi_hwcomms);
216 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
217 				1, 0,			/* algnmnt, boundary */
218 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
219 				BUS_SPACE_MAXADDR,	/* highaddr */
220 				NULL, NULL,		/* filter, filterarg */
221 				commsz,			/* maxsize */
222 				1,			/* msegments */
223 				commsz,			/* maxsegsize */
224 				0,			/* flags */
225 				NULL, NULL,		/* lockfunc, lockarg */
226 				&sc->mfi_comms_dmat)) {
227 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
228 		return (ENOMEM);
229 	}
230 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
231 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
232 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
233 		return (ENOMEM);
234 	}
235 	bzero(sc->mfi_comms, commsz);
236 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
237 	    sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
238 
239 	/*
240 	 * Allocate DMA memory for the command frames.  Keep them in the
241 	 * lower 4GB for efficiency.  Calculate the size of the frames at
242 	 * the same time; the frame is 64 bytes plus space for the SG lists.
243 	 * The assumption here is that the SG list will start at the second
244 	 * 64 byte segment of the frame and not use the unused bytes in the
245 	 * frame.  While this might seem wasteful, apparently the frames must
246 	 * be 64 byte aligned, so any savings would be negated by the extra
247 	 * alignment padding.
248 	 */
249 	if (sizeof(bus_addr_t) == 8) {
250 		sc->mfi_sgsize = sizeof(struct mfi_sg64);
251 		sc->mfi_flags |= MFI_FLAGS_SG64;
252 	} else {
253 		sc->mfi_sgsize = sizeof(struct mfi_sg32);
254 	}
255 	frames = (sc->mfi_sgsize * sc->mfi_total_sgl + MFI_FRAME_SIZE - 1) /
256 	    MFI_FRAME_SIZE + 1;
257 	sc->mfi_frame_size = frames * MFI_FRAME_SIZE;
258 	framessz = sc->mfi_frame_size * sc->mfi_max_fw_cmds;
259 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
260 				64, 0,			/* algnmnt, boundary */
261 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
262 				BUS_SPACE_MAXADDR,	/* highaddr */
263 				NULL, NULL,		/* filter, filterarg */
264 				framessz,		/* maxsize */
265 				1,			/* nsegments */
266 				framessz,		/* maxsegsize */
267 				0,			/* flags */
268 				NULL, NULL,		/* lockfunc, lockarg */
269 				&sc->mfi_frames_dmat)) {
270 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
271 		return (ENOMEM);
272 	}
273 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
274 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
275 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
276 		return (ENOMEM);
277 	}
278 	bzero(sc->mfi_frames, framessz);
279 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
280 	    sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
281 
282 	/*
283 	 * Allocate DMA memory for the frame sense data.  Keep them in the
284 	 * lower 4GB for efficiency
285 	 */
286 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
287 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
288 				4, 0,			/* algnmnt, boundary */
289 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
290 				BUS_SPACE_MAXADDR,	/* highaddr */
291 				NULL, NULL,		/* filter, filterarg */
292 				sensesz,		/* maxsize */
293 				1,			/* nsegments */
294 				sensesz,		/* maxsegsize */
295 				0,			/* flags */
296 				NULL, NULL,		/* lockfunc, lockarg */
297 				&sc->mfi_sense_dmat)) {
298 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
299 		return (ENOMEM);
300 	}
301 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
302 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
303 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
304 		return (ENOMEM);
305 	}
306 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
307 	    sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
308 
309 	if ((error = mfi_alloc_commands(sc)) != 0)
310 		return (error);
311 
312 	if ((error = mfi_comms_init(sc)) != 0)
313 		return (error);
314 
315 	if ((error = mfi_get_controller_info(sc)) != 0)
316 		return (error);
317 
318 #if 0
319 	if ((error = mfi_setup_aen(sc)) != 0)
320 		return (error);
321 #endif
322 
323 	/*
324 	 * Set up the interrupt handler.  XXX This should happen in
325 	 * mfi_pci.c
326 	 */
327 	sc->mfi_irq_rid = 0;
328 	if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
329 	    &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
330 		device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
331 		return (EINVAL);
332 	}
333 	if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
334 	    mfi_intr, sc, &sc->mfi_intr)) {
335 		device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
336 		return (EINVAL);
337 	}
338 
339 	/* Register a config hook to probe the bus for arrays */
340 	sc->mfi_ich.ich_func = mfi_startup;
341 	sc->mfi_ich.ich_arg = sc;
342 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
343 		device_printf(sc->mfi_dev, "Cannot establish configuration "
344 		    "hook\n");
345 		return (EINVAL);
346 	}
347 
348 	/*
349 	 * Register a shutdown handler.
350 	 */
351 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
352 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
353 		device_printf(sc->mfi_dev, "Warning: shutdown event "
354 		    "registration failed\n");
355 	}
356 
357 	/*
358 	 * Create the control device for doing management
359 	 */
360 	unit = device_get_unit(sc->mfi_dev);
361 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
362 	    0640, "mfi%d", unit);
363 	if (sc->mfi_cdev != NULL)
364 		sc->mfi_cdev->si_drv1 = sc;
365 
366 	return (0);
367 }
368 
369 static int
370 mfi_alloc_commands(struct mfi_softc *sc)
371 {
372 	struct mfi_command *cm;
373 	int i, ncmds;
374 
375 	/*
376 	 * XXX Should we allocate all the commands up front, or allocate on
377 	 * demand later like 'aac' does?
378 	 */
379 	ncmds = sc->mfi_max_fw_cmds;
380 	sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
381 	    M_WAITOK | M_ZERO);
382 
383 	for (i = 0; i < ncmds; i++) {
384 		cm = &sc->mfi_commands[i];
385 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
386 		    sc->mfi_frame_size * i);
387 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
388 		    sc->mfi_frame_size * i;
389 		cm->cm_frame->header.context = i;
390 		cm->cm_sense = &sc->mfi_sense[i];
391 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
392 		cm->cm_sc = sc;
393 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
394 		    &cm->cm_dmamap) == 0)
395 			mfi_release_command(cm);
396 		else
397 			break;
398 		sc->mfi_total_cmds++;
399 	}
400 
401 	return (0);
402 }
403 
404 static void
405 mfi_release_command(struct mfi_command *cm)
406 {
407 	uint32_t *hdr_data;
408 
409 	/*
410 	 * Zero out the important fields of the frame, but make sure the
411 	 * context field is preserved
412 	 */
413 	hdr_data = (uint32_t *)cm->cm_frame;
414 	hdr_data[0] = 0;
415 	hdr_data[1] = 0;
416 
417 	cm->cm_extra_frames = 0;
418 	cm->cm_flags = 0;
419 	cm->cm_complete = NULL;
420 	cm->cm_private = NULL;
421 	cm->cm_sg = 0;
422 	cm->cm_total_frame_size = 0;
423 	mfi_enqueue_free(cm);
424 }
425 
426 static int
427 mfi_comms_init(struct mfi_softc *sc)
428 {
429 	struct mfi_command *cm;
430 	struct mfi_init_frame *init;
431 	struct mfi_init_qinfo *qinfo;
432 	int error;
433 
434 	if ((cm = mfi_dequeue_free(sc)) == NULL)
435 		return (EBUSY);
436 
437 	/*
438 	 * Abuse the SG list area of the frame to hold the init_qinfo
439 	 * object;
440 	 */
441 	init = &cm->cm_frame->init;
442 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
443 
444 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
445 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
446 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
447 	    offsetof(struct mfi_hwcomms, hw_reply_q);
448 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
449 	    offsetof(struct mfi_hwcomms, hw_pi);
450 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
451 	    offsetof(struct mfi_hwcomms, hw_ci);
452 
453 	init->header.cmd = MFI_CMD_INIT;
454 	init->header.data_len = sizeof(struct mfi_init_qinfo);
455 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
456 
457 	if ((error = mfi_polled_command(sc, cm)) != 0) {
458 		device_printf(sc->mfi_dev, "failed to send init command\n");
459 		return (error);
460 	}
461 	mfi_release_command(cm);
462 
463 	return (0);
464 }
465 
466 static int
467 mfi_get_controller_info(struct mfi_softc *sc)
468 {
469 	struct mfi_command *cm;
470 	struct mfi_dcmd_frame *dcmd;
471 	struct mfi_ctrl_info *ci;
472 	uint32_t max_sectors_1, max_sectors_2;
473 	int error;
474 
475 	if ((cm = mfi_dequeue_free(sc)) == NULL)
476 		return (EBUSY);
477 
478 	ci = malloc(sizeof(struct mfi_ctrl_info), M_MFIBUF, M_NOWAIT | M_ZERO);
479 	if (ci == NULL) {
480 		mfi_release_command(cm);
481 		return (ENOMEM);
482 	}
483 
484 	dcmd = &cm->cm_frame->dcmd;
485 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
486 	dcmd->header.cmd = MFI_CMD_DCMD;
487 	dcmd->header.timeout = 0;
488 	dcmd->header.data_len = sizeof(struct mfi_ctrl_info);
489 	dcmd->opcode = MFI_DCMD_CTRL_GETINFO;
490 	cm->cm_sg = &dcmd->sgl;
491 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
492 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
493 	cm->cm_data = ci;
494 	cm->cm_len = sizeof(struct mfi_ctrl_info);
495 
496 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
497 		device_printf(sc->mfi_dev, "Controller info buffer map failed");
498 		free(ci, M_MFIBUF);
499 		mfi_release_command(cm);
500 		return (error);
501 	}
502 
503 	/* It's ok if this fails, just use default info instead */
504 	if ((error = mfi_polled_command(sc, cm)) != 0) {
505 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
506 		sc->mfi_max_io = (sc->mfi_total_sgl - 1) * PAGE_SIZE /
507 		    MFI_SECTOR_LEN;
508 		free(ci, M_MFIBUF);
509 		mfi_release_command(cm);
510 		return (0);
511 	}
512 
513 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
514 	    BUS_DMASYNC_POSTREAD);
515 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
516 
517 	max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
518 	max_sectors_2 = ci->max_request_size;
519 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
520 
521 	free(ci, M_MFIBUF);
522 	mfi_release_command(cm);
523 
524 	return (error);
525 }
526 
527 static int
528 mfi_polled_command(struct mfi_softc *sc, struct mfi_command *cm)
529 {
530 	struct mfi_frame_header *hdr;
531 	int tm = MFI_POLL_TIMEOUT_SECS * 1000000;
532 
533 	hdr = &cm->cm_frame->header;
534 	hdr->cmd_status = 0xff;
535 	hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
536 
537 	mfi_send_frame(sc, cm);
538 
539 	while (hdr->cmd_status == 0xff) {
540 		DELAY(1000);
541 		tm -= 1000;
542 		if (tm <= 0)
543 			break;
544 	}
545 
546 	if (hdr->cmd_status == 0xff) {
547 		device_printf(sc->mfi_dev, "Frame %p timed out\n", hdr);
548 		return (ETIMEDOUT);
549 	}
550 
551 	return (0);
552 }
553 
554 void
555 mfi_free(struct mfi_softc *sc)
556 {
557 	struct mfi_command *cm;
558 	int i;
559 
560 	if (sc->mfi_cdev != NULL)
561 		destroy_dev(sc->mfi_cdev);
562 
563 	if (sc->mfi_total_cmds != 0) {
564 		for (i = 0; i < sc->mfi_total_cmds; i++) {
565 			cm = &sc->mfi_commands[i];
566 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
567 		}
568 		free(sc->mfi_commands, M_MFIBUF);
569 	}
570 
571 	if (sc->mfi_intr)
572 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
573 	if (sc->mfi_irq != NULL)
574 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
575 		    sc->mfi_irq);
576 
577 	if (sc->mfi_sense_busaddr != 0)
578 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
579 	if (sc->mfi_sense != NULL)
580 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
581 		    sc->mfi_sense_dmamap);
582 	if (sc->mfi_sense_dmat != NULL)
583 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
584 
585 	if (sc->mfi_frames_busaddr != 0)
586 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
587 	if (sc->mfi_frames != NULL)
588 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
589 		    sc->mfi_frames_dmamap);
590 	if (sc->mfi_frames_dmat != NULL)
591 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
592 
593 	if (sc->mfi_comms_busaddr != 0)
594 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
595 	if (sc->mfi_comms != NULL)
596 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
597 		    sc->mfi_comms_dmamap);
598 	if (sc->mfi_comms_dmat != NULL)
599 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
600 
601 	if (sc->mfi_buffer_dmat != NULL)
602 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
603 	if (sc->mfi_parent_dmat != NULL)
604 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
605 
606 	if (mtx_initialized(&sc->mfi_io_lock))
607 		mtx_destroy(&sc->mfi_io_lock);
608 
609 	return;
610 }
611 
612 static void
613 mfi_startup(void *arg)
614 {
615 	struct mfi_softc *sc;
616 
617 	sc = (struct mfi_softc *)arg;
618 
619 	config_intrhook_disestablish(&sc->mfi_ich);
620 
621 	mfi_enable_intr(sc);
622 	mfi_ldprobe_inq(sc);
623 }
624 
625 static void
626 mfi_intr(void *arg)
627 {
628 	struct mfi_softc *sc;
629 	struct mfi_command *cm;
630 	uint32_t status, pi, ci, context;
631 
632 	sc = (struct mfi_softc *)arg;
633 
634 	status = MFI_READ4(sc, MFI_OSTS);
635 	if ((status & MFI_OSTS_INTR_VALID) == 0)
636 		return;
637 	MFI_WRITE4(sc, MFI_OSTS, status);
638 
639 	pi = sc->mfi_comms->hw_pi;
640 	ci = sc->mfi_comms->hw_ci;
641 
642 	mtx_lock(&sc->mfi_io_lock);
643 	while (ci != pi) {
644 		context = sc->mfi_comms->hw_reply_q[ci];
645 		sc->mfi_comms->hw_reply_q[ci] = 0xffffffff;
646 		if (context == 0xffffffff) {
647 			device_printf(sc->mfi_dev, "mfi_intr: invalid context "
648 			    "pi= %d ci= %d\n", pi, ci);
649 		} else {
650 			cm = &sc->mfi_commands[context];
651 			mfi_remove_busy(cm);
652 			mfi_complete(sc, cm);
653 		}
654 		ci++;
655 		if (ci == (sc->mfi_max_fw_cmds + 1)) {
656 			ci = 0;
657 		}
658 	}
659 	mtx_unlock(&sc->mfi_io_lock);
660 
661 	sc->mfi_comms->hw_ci = ci;
662 
663 	return;
664 }
665 
666 int
667 mfi_shutdown(struct mfi_softc *sc)
668 {
669 	struct mfi_dcmd_frame *dcmd;
670 	struct mfi_command *cm;
671 	int error;
672 
673 	if ((cm = mfi_dequeue_free(sc)) == NULL)
674 		return (EBUSY);
675 
676 	/* AEN? */
677 
678 	dcmd = &cm->cm_frame->dcmd;
679 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
680 	dcmd->header.cmd = MFI_CMD_DCMD;
681 	dcmd->header.sg_count = 0;
682 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
683 	dcmd->header.timeout = 0;
684 	dcmd->header.data_len = 0;
685 	dcmd->opcode = MFI_DCMD_CTRL_SHUTDOWN;
686 
687 	if ((error = mfi_polled_command(sc, cm)) != 0) {
688 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
689 	}
690 
691 	return (error);
692 }
693 
694 static void
695 mfi_enable_intr(struct mfi_softc *sc)
696 {
697 
698 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
699 }
700 
701 static void
702 mfi_ldprobe_inq(struct mfi_softc *sc)
703 {
704 	struct mfi_command *cm;
705 	struct mfi_pass_frame *pass;
706 	char *inq;
707 	int i;
708 
709 	/* Probe all possible targets with a SCSI INQ command */
710 	mtx_lock(&sc->mfi_io_lock);
711 	sc->mfi_probe_count = 0;
712 	for (i = 0; i < MFI_MAX_CHANNEL_DEVS; i++) {
713 		inq = malloc(MFI_INQ_LENGTH, M_MFIBUF, M_NOWAIT|M_ZERO);
714 		if (inq == NULL)
715 			break;
716 		cm = mfi_dequeue_free(sc);
717 		if (cm == NULL) {
718 			free(inq, M_MFIBUF);
719 			msleep(mfi_startup, &sc->mfi_io_lock, 0, "mfistart",
720 			    5 * hz);
721 			i--;
722 			continue;
723 		}
724 		pass = &cm->cm_frame->pass;
725 		pass->header.cmd = MFI_CMD_LD_SCSI_IO;
726 		pass->header.target_id = i;
727 		pass->header.lun_id = 0;
728 		pass->header.cdb_len = 6;
729 		pass->header.timeout = 0;
730 		pass->header.data_len = MFI_INQ_LENGTH;
731 		bzero(pass->cdb, 16);
732 		pass->cdb[0] = INQUIRY;
733 		pass->cdb[4] = MFI_INQ_LENGTH;
734 		pass->header.sense_len = MFI_SENSE_LEN;
735 		pass->sense_addr_lo = cm->cm_sense_busaddr;
736 		pass->sense_addr_hi = 0;
737 		cm->cm_complete = mfi_ldprobe_inq_complete;
738 		cm->cm_private = inq;
739 		cm->cm_sg = &pass->sgl;
740 		cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
741 		cm->cm_flags |= MFI_CMD_DATAIN;
742 		cm->cm_data = inq;
743 		cm->cm_len = MFI_INQ_LENGTH;
744 		sc->mfi_probe_count++;
745 		mfi_enqueue_ready(cm);
746 		mfi_startio(sc);
747 	}
748 
749 	/* Sleep while the arrays are attaching */
750 	msleep(mfi_startup, &sc->mfi_io_lock, 0, "mfistart", 60 * hz);
751 	mtx_unlock(&sc->mfi_io_lock);
752 
753 	return;
754 }
755 
756 static void
757 mfi_ldprobe_inq_complete(struct mfi_command *cm)
758 {
759 	struct mfi_frame_header *hdr;
760 	struct mfi_softc *sc;
761 	struct scsi_inquiry_data *inq;
762 
763 	sc = cm->cm_sc;
764 	inq = cm->cm_private;
765 	hdr = &cm->cm_frame->header;
766 
767 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0x00) ||
768 	    (SID_TYPE(inq) != T_DIRECT)) {
769 		free(inq, M_MFIBUF);
770 		mfi_release_command(cm);
771 		if (--sc->mfi_probe_count <= 0)
772 			wakeup(mfi_startup);
773 		return;
774 	}
775 
776 	free(inq, M_MFIBUF);
777 	mfi_release_command(cm);
778 	mfi_ldprobe_tur(sc, hdr->target_id);
779 }
780 
781 static int
782 mfi_ldprobe_tur(struct mfi_softc *sc, int id)
783 {
784 	struct mfi_command *cm;
785 	struct mfi_pass_frame *pass;
786 
787 	cm = mfi_dequeue_free(sc);
788 	if (cm == NULL)
789 		return (EBUSY);
790 	pass = &cm->cm_frame->pass;
791 	pass->header.cmd = MFI_CMD_LD_SCSI_IO;
792 	pass->header.target_id = id;
793 	pass->header.lun_id = 0;
794 	pass->header.cdb_len = 6;
795 	pass->header.timeout = 0;
796 	pass->header.data_len = 0;
797 	bzero(pass->cdb, 16);
798 	pass->cdb[0] = TEST_UNIT_READY;
799 	pass->header.sense_len = MFI_SENSE_LEN;
800 	pass->sense_addr_lo = cm->cm_sense_busaddr;
801 	pass->sense_addr_hi = 0;
802 	cm->cm_complete = mfi_ldprobe_tur_complete;
803 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
804 	cm->cm_flags = 0;
805 	mfi_enqueue_ready(cm);
806 	mfi_startio(sc);
807 
808 	return (0);
809 }
810 
811 static void
812 mfi_ldprobe_tur_complete(struct mfi_command *cm)
813 {
814 	struct mfi_frame_header *hdr;
815 	struct mfi_softc *sc;
816 
817 	sc = cm->cm_sc;
818 	hdr = &cm->cm_frame->header;
819 
820 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0x00)) {
821 		device_printf(sc->mfi_dev, "Logical disk %d is not ready, "
822 		    "cmd_status= %d scsi_status= %d\n", hdr->target_id,
823 		    hdr->cmd_status, hdr->scsi_status);
824 		mfi_print_sense(sc, cm->cm_sense);
825 		mfi_release_command(cm);
826 		if (--sc->mfi_probe_count <= 0)
827 			wakeup(mfi_startup);
828 		return;
829 	}
830 	mfi_release_command(cm);
831 	mfi_ldprobe_capacity(sc, hdr->target_id);
832 }
833 
834 static int
835 mfi_ldprobe_capacity(struct mfi_softc *sc, int id)
836 {
837 	struct mfi_command *cm;
838 	struct mfi_pass_frame *pass;
839 	struct scsi_read_capacity_data_long *cap;
840 
841 	cap = malloc(sizeof(*cap), M_MFIBUF, M_NOWAIT|M_ZERO);
842 	if (cap == NULL)
843 		return (ENOMEM);
844 	cm = mfi_dequeue_free(sc);
845 	if (cm == NULL) {
846 		free(cap, M_MFIBUF);
847 		return (EBUSY);
848 	}
849 	pass = &cm->cm_frame->pass;
850 	pass->header.cmd = MFI_CMD_LD_SCSI_IO;
851 	pass->header.target_id = id;
852 	pass->header.lun_id = 0;
853 	pass->header.cdb_len = 6;
854 	pass->header.timeout = 0;
855 	pass->header.data_len = sizeof(*cap);
856 	bzero(pass->cdb, 16);
857 	pass->cdb[0] = 0x9e;	/* READ CAPACITY 16 */
858 	pass->cdb[13] = sizeof(*cap);
859 	pass->header.sense_len = MFI_SENSE_LEN;
860 	pass->sense_addr_lo = cm->cm_sense_busaddr;
861 	pass->sense_addr_hi = 0;
862 	cm->cm_complete = mfi_ldprobe_capacity_complete;
863 	cm->cm_private = cap;
864 	cm->cm_sg = &pass->sgl;
865 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
866 	cm->cm_flags |= MFI_CMD_DATAIN;
867 	cm->cm_data = cap;
868 	cm->cm_len = sizeof(*cap);
869 	mfi_enqueue_ready(cm);
870 	mfi_startio(sc);
871 
872 	return (0);
873 }
874 
875 static void
876 mfi_ldprobe_capacity_complete(struct mfi_command *cm)
877 {
878 	struct mfi_frame_header *hdr;
879 	struct mfi_softc *sc;
880 	struct scsi_read_capacity_data_long *cap;
881 	uint64_t sectors;
882 	uint32_t secsize;
883 	int target;
884 
885 	sc = cm->cm_sc;
886 	cap = cm->cm_private;
887 	hdr = &cm->cm_frame->header;
888 
889 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0x00)) {
890 		device_printf(sc->mfi_dev, "Failed to read capacity for "
891 		    "logical disk\n");
892 		device_printf(sc->mfi_dev, "cmd_status= %d scsi_status= %d\n",
893 		    hdr->cmd_status, hdr->scsi_status);
894 		free(cap, M_MFIBUF);
895 		mfi_release_command(cm);
896 		if (--sc->mfi_probe_count <= 0)
897 			wakeup(mfi_startup);
898 		return;
899 	}
900 	target = hdr->target_id;
901 	sectors = scsi_8btou64(cap->addr);
902 	secsize = scsi_4btoul(cap->length);
903 	free(cap, M_MFIBUF);
904 	mfi_release_command(cm);
905 	mfi_add_ld(sc, target, sectors, secsize);
906 	if (--sc->mfi_probe_count <= 0)
907 		wakeup(mfi_startup);
908 
909 	return;
910 }
911 
912 static int
913 mfi_add_ld(struct mfi_softc *sc, int id, uint64_t sectors, uint32_t secsize)
914 {
915 	struct mfi_ld *ld;
916 	device_t child;
917 
918 	if ((secsize == 0) || (sectors == 0)) {
919 		device_printf(sc->mfi_dev, "Invalid capacity parameters for "
920 		      "logical disk %d\n", id);
921 		return (EINVAL);
922 	}
923 
924 	ld = malloc(sizeof(struct mfi_ld), M_MFIBUF, M_NOWAIT|M_ZERO);
925 	if (ld == NULL) {
926 		device_printf(sc->mfi_dev, "Cannot allocate ld\n");
927 		return (ENOMEM);
928 	}
929 
930 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
931 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
932 		free(ld, M_MFIBUF);
933 		return (EINVAL);
934 	}
935 
936 	ld->ld_id = id;
937 	ld->ld_disk = child;
938 	ld->ld_secsize = secsize;
939 	ld->ld_sectors = sectors;
940 
941 	device_set_ivars(child, ld);
942 	device_set_desc(child, "MFI Logical Disk");
943 	mtx_unlock(&sc->mfi_io_lock);
944 	mtx_lock(&Giant);
945 	bus_generic_attach(sc->mfi_dev);
946 	mtx_unlock(&Giant);
947 	mtx_lock(&sc->mfi_io_lock);
948 
949 	return (0);
950 }
951 
952 static struct mfi_command *
953 mfi_bio_command(struct mfi_softc *sc)
954 {
955 	struct mfi_io_frame *io;
956 	struct mfi_command *cm;
957 	struct bio *bio;
958 	int flags, blkcount;;
959 
960 	if ((cm = mfi_dequeue_free(sc)) == NULL)
961 		return (NULL);
962 
963 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
964 		mfi_release_command(cm);
965 		return (NULL);
966 	}
967 
968 	io = &cm->cm_frame->io;
969 	switch (bio->bio_cmd & 0x03) {
970 	case BIO_READ:
971 		io->header.cmd = MFI_CMD_LD_READ;
972 		flags = MFI_CMD_DATAIN;
973 		break;
974 	case BIO_WRITE:
975 		io->header.cmd = MFI_CMD_LD_WRITE;
976 		flags = MFI_CMD_DATAOUT;
977 		break;
978 	default:
979 		panic("Invalid bio command");
980 	}
981 
982 	/* Cheat with the sector length to avoid a non-constant division */
983 	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
984 	io->header.target_id = (uintptr_t)bio->bio_driver1;
985 	io->header.timeout = 0;
986 	io->header.flags = 0;
987 	io->header.sense_len = MFI_SENSE_LEN;
988 	io->header.data_len = blkcount;
989 	io->sense_addr_lo = cm->cm_sense_busaddr;
990 	io->sense_addr_hi = 0;
991 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
992 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
993 	cm->cm_complete = mfi_bio_complete;
994 	cm->cm_private = bio;
995 	cm->cm_data = bio->bio_data;
996 	cm->cm_len = bio->bio_bcount;
997 	cm->cm_sg = &io->sgl;
998 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
999 	cm->cm_flags = flags;
1000 
1001 	return (cm);
1002 }
1003 
1004 static void
1005 mfi_bio_complete(struct mfi_command *cm)
1006 {
1007 	struct bio *bio;
1008 	struct mfi_frame_header *hdr;
1009 	struct mfi_softc *sc;
1010 
1011 	bio = cm->cm_private;
1012 	hdr = &cm->cm_frame->header;
1013 	sc = cm->cm_sc;
1014 
1015 	if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) {
1016 		bio->bio_flags |= BIO_ERROR;
1017 		bio->bio_error = EIO;
1018 		device_printf(sc->mfi_dev, "I/O error, status= %d "
1019 		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1020 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
1021 	}
1022 
1023 	mfi_release_command(cm);
1024 	mfi_disk_complete(bio);
1025 }
1026 
1027 void
1028 mfi_startio(struct mfi_softc *sc)
1029 {
1030 	struct mfi_command *cm;
1031 
1032 	for (;;) {
1033 		/* Don't bother if we're short on resources */
1034 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1035 			break;
1036 
1037 		/* Try a command that has already been prepared */
1038 		cm = mfi_dequeue_ready(sc);
1039 
1040 		/* Nope, so look for work on the bioq */
1041 		if (cm == NULL)
1042 			cm = mfi_bio_command(sc);
1043 
1044 		/* No work available, so exit */
1045 		if (cm == NULL)
1046 			break;
1047 
1048 		/* Send the command to the controller */
1049 		if (mfi_mapcmd(sc, cm) != 0) {
1050 			mfi_requeue_ready(cm);
1051 			break;
1052 		}
1053 	}
1054 }
1055 
1056 static int
1057 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1058 {
1059 	int error, polled;
1060 
1061 	if (cm->cm_data != NULL) {
1062 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1063 		error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1064 		    cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1065 		if (error == EINPROGRESS) {
1066 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
1067 			return (0);
1068 		}
1069 	} else {
1070 		mfi_enqueue_busy(cm);
1071 		error = mfi_send_frame(sc, cm);
1072 	}
1073 
1074 	return (error);
1075 }
1076 
1077 static void
1078 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1079 {
1080 	struct mfi_frame_header *hdr;
1081 	struct mfi_command *cm;
1082 	union mfi_sgl *sgl;
1083 	struct mfi_softc *sc;
1084 	int i, dir;
1085 
1086 	if (error)
1087 		return;
1088 
1089 	cm = (struct mfi_command *)arg;
1090 	sc = cm->cm_sc;
1091 	hdr = &cm->cm_frame->header;
1092 	sgl = cm->cm_sg;
1093 
1094 	if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1095 		for (i = 0; i < nsegs; i++) {
1096 			sgl->sg32[i].addr = segs[i].ds_addr;
1097 			sgl->sg32[i].len = segs[i].ds_len;
1098 		}
1099 	} else {
1100 		for (i = 0; i < nsegs; i++) {
1101 			sgl->sg64[i].addr = segs[i].ds_addr;
1102 			sgl->sg64[i].len = segs[i].ds_len;
1103 		}
1104 		hdr->flags |= MFI_FRAME_SGL64;
1105 	}
1106 	hdr->sg_count = nsegs;
1107 
1108 	dir = 0;
1109 	if (cm->cm_flags & MFI_CMD_DATAIN) {
1110 		dir |= BUS_DMASYNC_PREREAD;
1111 		hdr->flags |= MFI_FRAME_DIR_READ;
1112 	}
1113 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
1114 		dir |= BUS_DMASYNC_PREWRITE;
1115 		hdr->flags |= MFI_FRAME_DIR_WRITE;
1116 	}
1117 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1118 	cm->cm_flags |= MFI_CMD_MAPPED;
1119 
1120 	/*
1121 	 * Instead of calculating the total number of frames in the
1122 	 * compound frame, it's already assumed that there will be at
1123 	 * least 1 frame, so don't compensate for the modulo of the
1124 	 * following division.
1125 	 */
1126 	cm->cm_total_frame_size += (sc->mfi_sgsize * nsegs);
1127 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1128 
1129 	/* The caller will take care of delivering polled commands */
1130 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1131 		mfi_enqueue_busy(cm);
1132 		mfi_send_frame(sc, cm);
1133 	}
1134 
1135 	return;
1136 }
1137 
1138 static int
1139 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1140 {
1141 
1142 	/*
1143 	 * The bus address of the command is aligned on a 64 byte boundary,
1144 	 * leaving the least 6 bits as zero.  For whatever reason, the
1145 	 * hardware wants the address shifted right by three, leaving just
1146 	 * 3 zero bits.  These three bits are then used to indicate how many
1147 	 * 64 byte frames beyond the first one are used in the command.  The
1148 	 * extra frames are typically filled with S/G elements.  The extra
1149 	 * frames must also be contiguous.  Thus, a compound frame can be at
1150 	 * most 512 bytes long, allowing for up to 59 32-bit S/G elements or
1151 	 * 39 64-bit S/G elements for block I/O commands.  This means that
1152 	 * I/O transfers of 256k and higher simply are not possible, which
1153 	 * is quite odd for such a modern adapter.
1154 	 */
1155 	MFI_WRITE4(sc, MFI_IQP, (cm->cm_frame_busaddr >> 3) |
1156 	    cm->cm_extra_frames);
1157 	return (0);
1158 }
1159 
1160 static void
1161 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
1162 {
1163 	int dir;
1164 
1165 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
1166 		dir = 0;
1167 		if (cm->cm_flags & MFI_CMD_DATAIN)
1168 			dir |= BUS_DMASYNC_POSTREAD;
1169 		if (cm->cm_flags & MFI_CMD_DATAOUT)
1170 			dir |= BUS_DMASYNC_POSTWRITE;
1171 
1172 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1173 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1174 		cm->cm_flags &= ~MFI_CMD_MAPPED;
1175 	}
1176 
1177 	if (cm->cm_complete != NULL)
1178 		cm->cm_complete(cm);
1179 
1180 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1181 	mfi_startio(sc);
1182 }
1183 
1184 int
1185 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
1186 {
1187 	struct mfi_command *cm;
1188 	struct mfi_io_frame *io;
1189 	int error;
1190 
1191 	if ((cm = mfi_dequeue_free(sc)) == NULL)
1192 		return (EBUSY);
1193 
1194 	io = &cm->cm_frame->io;
1195 	io->header.cmd = MFI_CMD_LD_WRITE;
1196 	io->header.target_id = id;
1197 	io->header.timeout = 0;
1198 	io->header.flags = 0;
1199 	io->header.sense_len = MFI_SENSE_LEN;
1200 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1201 	io->sense_addr_lo = cm->cm_sense_busaddr;
1202 	io->sense_addr_hi = 0;
1203 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
1204 	io->lba_lo = lba & 0xffffffff;
1205 	cm->cm_data = virt;
1206 	cm->cm_len = len;
1207 	cm->cm_sg = &io->sgl;
1208 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1209 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
1210 
1211 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1212 		mfi_release_command(cm);
1213 		return (error);
1214 	}
1215 
1216 	error = mfi_polled_command(sc, cm);
1217 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1218 	    BUS_DMASYNC_POSTWRITE);
1219 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1220 	mfi_release_command(cm);
1221 
1222 	return (error);
1223 }
1224 
1225 static int
1226 mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1227 {
1228 	struct mfi_softc *sc;
1229 
1230 	sc = dev->si_drv1;
1231 	sc->mfi_flags |= MFI_FLAGS_OPEN;
1232 
1233 	return (0);
1234 }
1235 
1236 static int
1237 mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1238 {
1239 	struct mfi_softc *sc;
1240 
1241 	sc = dev->si_drv1;
1242 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
1243 
1244 	return (0);
1245 }
1246 
1247 static int
1248 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1249 {
1250 	struct mfi_softc *sc;
1251 	union mfi_statrequest *ms;
1252 	int error;
1253 
1254 	sc = dev->si_drv1;
1255 	error = 0;
1256 
1257 	switch (cmd) {
1258 	case MFIIO_STATS:
1259 		ms = (union mfi_statrequest *)arg;
1260 		switch (ms->ms_item) {
1261 		case MFIQ_FREE:
1262 		case MFIQ_BIO:
1263 		case MFIQ_READY:
1264 		case MFIQ_BUSY:
1265 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
1266 			    sizeof(struct mfi_qstat));
1267 			break;
1268 		default:
1269 			error = ENOENT;
1270 			break;
1271 		}
1272 		break;
1273 	default:
1274 		error = ENOENT;
1275 		break;
1276 	}
1277 
1278 	return (error);
1279 }
1280