xref: /freebsd/sys/dev/aacraid/aacraid.c (revision de7b456e596ff18032d2cbfdf244c66f36770da4)
1 /*-
2  * Copyright (c) 2000 Michael Smith
3  * Copyright (c) 2001 Scott Long
4  * Copyright (c) 2000 BSDi
5  * Copyright (c) 2001-2010 Adaptec, Inc.
6  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
36  */
37 #define AAC_DRIVERNAME			"aacraid"
38 
39 #include "opt_aacraid.h"
40 
41 /* #include <stddef.h> */
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/sysctl.h>
48 #include <sys/poll.h>
49 #include <sys/ioccom.h>
50 
51 #include <sys/bus.h>
52 #include <sys/conf.h>
53 #include <sys/signalvar.h>
54 #include <sys/time.h>
55 #include <sys/eventhandler.h>
56 #include <sys/rman.h>
57 
58 #include <machine/bus.h>
59 #include <sys/bus_dma.h>
60 #include <machine/resource.h>
61 
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
64 
65 #include <dev/aacraid/aacraid_reg.h>
66 #include <sys/aac_ioctl.h>
67 #include <dev/aacraid/aacraid_debug.h>
68 #include <dev/aacraid/aacraid_var.h>
69 
70 #ifndef FILTER_HANDLED
71 #define FILTER_HANDLED	0x02
72 #endif
73 
74 static void	aac_add_container(struct aac_softc *sc,
75 				  struct aac_mntinforesp *mir, int f,
76 				  u_int32_t uid);
77 static void	aac_get_bus_info(struct aac_softc *sc);
78 static void	aac_container_bus(struct aac_softc *sc);
79 static void	aac_daemon(void *arg);
80 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
81 							  int pages, int nseg, int nseg_new);
82 
83 /* Command Processing */
84 static void	aac_timeout(struct aac_softc *sc);
85 static void	aac_command_thread(struct aac_softc *sc);
86 static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
87 				     u_int32_t xferstate, struct aac_fib *fib,
88 				     u_int16_t datasize);
89 /* Command Buffer Management */
90 static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
91 				       int nseg, int error);
92 static int	aac_alloc_commands(struct aac_softc *sc);
93 static void	aac_free_commands(struct aac_softc *sc);
94 static void	aac_unmap_command(struct aac_command *cm);
95 
96 /* Hardware Interface */
97 static int	aac_alloc(struct aac_softc *sc);
98 static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
99 			       int error);
100 static int	aac_check_firmware(struct aac_softc *sc);
101 static void	aac_define_int_mode(struct aac_softc *sc);
102 static int	aac_init(struct aac_softc *sc);
103 static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
104 static int	aac_setup_intr(struct aac_softc *sc);
105 static int	aac_check_config(struct aac_softc *sc);
106 
107 /* PMC SRC interface */
108 static int	aac_src_get_fwstatus(struct aac_softc *sc);
109 static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
110 static int	aac_src_get_istatus(struct aac_softc *sc);
111 static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
112 static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
113 				    u_int32_t arg0, u_int32_t arg1,
114 				    u_int32_t arg2, u_int32_t arg3);
115 static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
116 static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
117 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
118 static int aac_src_get_outb_queue(struct aac_softc *sc);
119 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
120 
121 struct aac_interface aacraid_src_interface = {
122 	aac_src_get_fwstatus,
123 	aac_src_qnotify,
124 	aac_src_get_istatus,
125 	aac_src_clear_istatus,
126 	aac_src_set_mailbox,
127 	aac_src_get_mailbox,
128 	aac_src_access_devreg,
129 	aac_src_send_command,
130 	aac_src_get_outb_queue,
131 	aac_src_set_outb_queue
132 };
133 
134 /* PMC SRCv interface */
135 static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
136 				    u_int32_t arg0, u_int32_t arg1,
137 				    u_int32_t arg2, u_int32_t arg3);
138 static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
139 
140 struct aac_interface aacraid_srcv_interface = {
141 	aac_src_get_fwstatus,
142 	aac_src_qnotify,
143 	aac_src_get_istatus,
144 	aac_src_clear_istatus,
145 	aac_srcv_set_mailbox,
146 	aac_srcv_get_mailbox,
147 	aac_src_access_devreg,
148 	aac_src_send_command,
149 	aac_src_get_outb_queue,
150 	aac_src_set_outb_queue
151 };
152 
153 /* Debugging and Diagnostics */
154 static struct aac_code_lookup aac_cpu_variant[] = {
155 	{"i960JX",		CPUI960_JX},
156 	{"i960CX",		CPUI960_CX},
157 	{"i960HX",		CPUI960_HX},
158 	{"i960RX",		CPUI960_RX},
159 	{"i960 80303",		CPUI960_80303},
160 	{"StrongARM SA110",	CPUARM_SA110},
161 	{"PPC603e",		CPUPPC_603e},
162 	{"XScale 80321",	CPU_XSCALE_80321},
163 	{"MIPS 4KC",		CPU_MIPS_4KC},
164 	{"MIPS 5KC",		CPU_MIPS_5KC},
165 	{"Unknown StrongARM",	CPUARM_xxx},
166 	{"Unknown PowerPC",	CPUPPC_xxx},
167 	{NULL, 0},
168 	{"Unknown processor",	0}
169 };
170 
171 static struct aac_code_lookup aac_battery_platform[] = {
172 	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
173 	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
174 	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
175 	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
176 	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
177 	{NULL, 0},
178 	{"unknown battery platform",		0}
179 };
180 static void	aac_describe_controller(struct aac_softc *sc);
181 static char	*aac_describe_code(struct aac_code_lookup *table,
182 				   u_int32_t code);
183 
184 /* Management Interface */
185 static d_open_t		aac_open;
186 static d_ioctl_t	aac_ioctl;
187 static d_poll_t		aac_poll;
188 #if __FreeBSD_version >= 702000
189 static void		aac_cdevpriv_dtor(void *arg);
190 #else
191 static d_close_t	aac_close;
192 #endif
193 static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
194 static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
195 static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
196 static void	aac_request_aif(struct aac_softc *sc);
197 static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
198 static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
199 static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
200 static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
201 static int	aac_return_aif(struct aac_softc *sc,
202 			       struct aac_fib_context *ctx, caddr_t uptr);
203 static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
204 static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
205 static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
206 static void	aac_ioctl_event(struct aac_softc *sc,
207 				struct aac_event *event, void *arg);
208 static int	aac_reset_adapter(struct aac_softc *sc);
209 static int	aac_get_container_info(struct aac_softc *sc,
210 				       struct aac_fib *fib, int cid,
211 				       struct aac_mntinforesp *mir,
212 				       u_int32_t *uid);
213 static u_int32_t
214 	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
215 
216 static struct cdevsw aacraid_cdevsw = {
217 	.d_version =	D_VERSION,
218 	.d_flags =	D_NEEDGIANT,
219 	.d_open =	aac_open,
220 #if __FreeBSD_version < 702000
221 	.d_close =	aac_close,
222 #endif
223 	.d_ioctl =	aac_ioctl,
224 	.d_poll =	aac_poll,
225 	.d_name =	"aacraid",
226 };
227 
228 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
229 
230 /* sysctl node */
231 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters");
232 
233 /*
234  * Device Interface
235  */
236 
237 /*
238  * Initialize the controller and softc
239  */
240 int
241 aacraid_attach(struct aac_softc *sc)
242 {
243 	int error, unit;
244 	struct aac_fib *fib;
245 	struct aac_mntinforesp mir;
246 	int count = 0, i = 0;
247 	u_int32_t uid;
248 
249 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
250 	sc->hint_flags = device_get_flags(sc->aac_dev);
251 	/*
252 	 * Initialize per-controller queues.
253 	 */
254 	aac_initq_free(sc);
255 	aac_initq_ready(sc);
256 	aac_initq_busy(sc);
257 
258 	/* mark controller as suspended until we get ourselves organised */
259 	sc->aac_state |= AAC_STATE_SUSPEND;
260 
261 	/*
262 	 * Check that the firmware on the card is supported.
263 	 */
264 	sc->msi_enabled = FALSE;
265 	if ((error = aac_check_firmware(sc)) != 0)
266 		return(error);
267 
268 	/*
269 	 * Initialize locks
270 	 */
271 	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
272 	TAILQ_INIT(&sc->aac_container_tqh);
273 	TAILQ_INIT(&sc->aac_ev_cmfree);
274 
275 #if __FreeBSD_version >= 800000
276 	/* Initialize the clock daemon callout. */
277 	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
278 #endif
279 	/*
280 	 * Initialize the adapter.
281 	 */
282 	if ((error = aac_alloc(sc)) != 0)
283 		return(error);
284 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
285 		aac_define_int_mode(sc);
286 		if ((error = aac_init(sc)) != 0)
287 			return(error);
288 	}
289 
290 	/*
291 	 * Allocate and connect our interrupt.
292 	 */
293 	if ((error = aac_setup_intr(sc)) != 0)
294 		return(error);
295 
296 	/*
297 	 * Print a little information about the controller.
298 	 */
299 	aac_describe_controller(sc);
300 
301 	/*
302 	 * Make the control device.
303 	 */
304 	unit = device_get_unit(sc->aac_dev);
305 	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
306 				 0640, "aacraid%d", unit);
307 	sc->aac_dev_t->si_drv1 = sc;
308 
309 	/* Create the AIF thread */
310 	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
311 		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
312 		panic("Could not create AIF thread");
313 
314 	/* Register the shutdown method to only be called post-dump */
315 	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
316 	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
317 		device_printf(sc->aac_dev,
318 			      "shutdown event registration failed\n");
319 
320 	/* Find containers */
321 	mtx_lock(&sc->aac_io_lock);
322 	aac_alloc_sync_fib(sc, &fib);
323 	/* loop over possible containers */
324 	do {
325 		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
326 			continue;
327 		if (i == 0)
328 			count = mir.MntRespCount;
329 		aac_add_container(sc, &mir, 0, uid);
330 		i++;
331 	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
332 	aac_release_sync_fib(sc);
333 	mtx_unlock(&sc->aac_io_lock);
334 
335 	/* Register with CAM for the containers */
336 	TAILQ_INIT(&sc->aac_sim_tqh);
337 	aac_container_bus(sc);
338 	/* Register with CAM for the non-DASD devices */
339 	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
340 		aac_get_bus_info(sc);
341 
342 	/* poke the bus to actually attach the child devices */
343 	bus_generic_attach(sc->aac_dev);
344 
345 	/* mark the controller up */
346 	sc->aac_state &= ~AAC_STATE_SUSPEND;
347 
348 	/* enable interrupts now */
349 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
350 
351 #if __FreeBSD_version >= 800000
352 	mtx_lock(&sc->aac_io_lock);
353 	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
354 	mtx_unlock(&sc->aac_io_lock);
355 #else
356 	{
357 		struct timeval tv;
358 		tv.tv_sec = 60;
359 		tv.tv_usec = 0;
360 		sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
361 	}
362 #endif
363 
364 	return(0);
365 }
366 
367 static void
368 aac_daemon(void *arg)
369 {
370 	struct aac_softc *sc;
371 	struct timeval tv;
372 	struct aac_command *cm;
373 	struct aac_fib *fib;
374 
375 	sc = arg;
376 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
377 
378 #if __FreeBSD_version >= 800000
379 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
380 	if (callout_pending(&sc->aac_daemontime) ||
381 	    callout_active(&sc->aac_daemontime) == 0)
382 		return;
383 #else
384 	mtx_lock(&sc->aac_io_lock);
385 #endif
386 	getmicrotime(&tv);
387 
388 	if (!aacraid_alloc_command(sc, &cm)) {
389 		fib = cm->cm_fib;
390 		cm->cm_timestamp = time_uptime;
391 		cm->cm_datalen = 0;
392 		cm->cm_flags |= AAC_CMD_WAIT;
393 
394 		fib->Header.Size =
395 			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
396 		fib->Header.XferState =
397 			AAC_FIBSTATE_HOSTOWNED   |
398 			AAC_FIBSTATE_INITIALISED |
399 			AAC_FIBSTATE_EMPTY	 |
400 			AAC_FIBSTATE_FROMHOST	 |
401 			AAC_FIBSTATE_REXPECTED   |
402 			AAC_FIBSTATE_NORM	 |
403 			AAC_FIBSTATE_ASYNC	 |
404 			AAC_FIBSTATE_FAST_RESPONSE;
405 		fib->Header.Command = SendHostTime;
406 		*(uint32_t *)fib->data = tv.tv_sec;
407 
408 		aacraid_map_command_sg(cm, NULL, 0, 0);
409 		aacraid_release_command(cm);
410 	}
411 
412 #if __FreeBSD_version >= 800000
413 	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
414 #else
415 	mtx_unlock(&sc->aac_io_lock);
416 	tv.tv_sec = 30 * 60;
417 	tv.tv_usec = 0;
418 	sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
419 #endif
420 }
421 
422 void
423 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
424 {
425 
426 	switch (event->ev_type & AAC_EVENT_MASK) {
427 	case AAC_EVENT_CMFREE:
428 		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
429 		break;
430 	default:
431 		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
432 		    event->ev_type);
433 		break;
434 	}
435 
436 	return;
437 }
438 
439 /*
440  * Request information of container #cid
441  */
442 static int
443 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
444 		       struct aac_mntinforesp *mir, u_int32_t *uid)
445 {
446 	struct aac_command *cm;
447 	struct aac_fib *fib;
448 	struct aac_mntinfo *mi;
449 	struct aac_cnt_config *ccfg;
450 	int rval;
451 
452 	if (sync_fib == NULL) {
453 		if (aacraid_alloc_command(sc, &cm)) {
454 			device_printf(sc->aac_dev,
455 				"Warning, no free command available\n");
456 			return (-1);
457 		}
458 		fib = cm->cm_fib;
459 	} else {
460 		fib = sync_fib;
461 	}
462 
463 	mi = (struct aac_mntinfo *)&fib->data[0];
464 	/* 4KB support?, 64-bit LBA? */
465 	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
466 		mi->Command = VM_NameServeAllBlk;
467 	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
468 		mi->Command = VM_NameServe64;
469 	else
470 		mi->Command = VM_NameServe;
471 	mi->MntType = FT_FILESYS;
472 	mi->MntCount = cid;
473 
474 	if (sync_fib) {
475 		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
476 			 sizeof(struct aac_mntinfo))) {
477 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
478 			return (-1);
479 		}
480 	} else {
481 		cm->cm_timestamp = time_uptime;
482 		cm->cm_datalen = 0;
483 
484 		fib->Header.Size =
485 			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
486 		fib->Header.XferState =
487 			AAC_FIBSTATE_HOSTOWNED   |
488 			AAC_FIBSTATE_INITIALISED |
489 			AAC_FIBSTATE_EMPTY	 |
490 			AAC_FIBSTATE_FROMHOST	 |
491 			AAC_FIBSTATE_REXPECTED   |
492 			AAC_FIBSTATE_NORM	 |
493 			AAC_FIBSTATE_ASYNC	 |
494 			AAC_FIBSTATE_FAST_RESPONSE;
495 		fib->Header.Command = ContainerCommand;
496 		if (aacraid_wait_command(cm) != 0) {
497 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
498 			aacraid_release_command(cm);
499 			return (-1);
500 		}
501 	}
502 	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
503 
504 	/* UID */
505 	*uid = cid;
506 	if (mir->MntTable[0].VolType != CT_NONE &&
507 		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
508 		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
509 			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
510 			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
511 		}
512 		ccfg = (struct aac_cnt_config *)&fib->data[0];
513 		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
514 		ccfg->Command = VM_ContainerConfig;
515 		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
516 		ccfg->CTCommand.param[0] = cid;
517 
518 		if (sync_fib) {
519 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
520 				sizeof(struct aac_cnt_config));
521 			if (rval == 0 && ccfg->Command == ST_OK &&
522 				ccfg->CTCommand.param[0] == CT_OK &&
523 				mir->MntTable[0].VolType != CT_PASSTHRU)
524 				*uid = ccfg->CTCommand.param[1];
525 		} else {
526 			fib->Header.Size =
527 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
528 			fib->Header.XferState =
529 				AAC_FIBSTATE_HOSTOWNED   |
530 				AAC_FIBSTATE_INITIALISED |
531 				AAC_FIBSTATE_EMPTY	 |
532 				AAC_FIBSTATE_FROMHOST	 |
533 				AAC_FIBSTATE_REXPECTED   |
534 				AAC_FIBSTATE_NORM	 |
535 				AAC_FIBSTATE_ASYNC	 |
536 				AAC_FIBSTATE_FAST_RESPONSE;
537 			fib->Header.Command = ContainerCommand;
538 			rval = aacraid_wait_command(cm);
539 			if (rval == 0 && ccfg->Command == ST_OK &&
540 				ccfg->CTCommand.param[0] == CT_OK &&
541 				mir->MntTable[0].VolType != CT_PASSTHRU)
542 				*uid = ccfg->CTCommand.param[1];
543 			aacraid_release_command(cm);
544 		}
545 	}
546 
547 	return (0);
548 }
549 
550 /*
551  * Create a device to represent a new container
552  */
553 static void
554 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
555 		  u_int32_t uid)
556 {
557 	struct aac_container *co;
558 
559 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
560 
561 	/*
562 	 * Check container volume type for validity.  Note that many of
563 	 * the possible types may never show up.
564 	 */
565 	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
566 		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
567 		       M_NOWAIT | M_ZERO);
568 		if (co == NULL) {
569 			panic("Out of memory?!");
570 		}
571 
572 		co->co_found = f;
573 		bcopy(&mir->MntTable[0], &co->co_mntobj,
574 		      sizeof(struct aac_mntobj));
575 		co->co_uid = uid;
576 		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
577 	}
578 }
579 
580 /*
581  * Allocate resources associated with (sc)
582  */
583 static int
584 aac_alloc(struct aac_softc *sc)
585 {
586 	bus_size_t maxsize;
587 
588 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
589 
590 	/*
591 	 * Create DMA tag for mapping buffers into controller-addressable space.
592 	 */
593 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
594 			       1, 0, 			/* algnmnt, boundary */
595 			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
596 			       BUS_SPACE_MAXADDR :
597 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
598 			       BUS_SPACE_MAXADDR, 	/* highaddr */
599 			       NULL, NULL, 		/* filter, filterarg */
600 			       MAXBSIZE,		/* maxsize */
601 			       sc->aac_sg_tablesize,	/* nsegments */
602 			       MAXBSIZE,		/* maxsegsize */
603 			       BUS_DMA_ALLOCNOW,	/* flags */
604 			       busdma_lock_mutex,	/* lockfunc */
605 			       &sc->aac_io_lock,	/* lockfuncarg */
606 			       &sc->aac_buffer_dmat)) {
607 		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
608 		return (ENOMEM);
609 	}
610 
611 	/*
612 	 * Create DMA tag for mapping FIBs into controller-addressable space..
613 	 */
614 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
615 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
616 			sizeof(struct aac_fib_xporthdr) + 31);
617 	else
618 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
619 	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
620 			       1, 0, 			/* algnmnt, boundary */
621 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
622 			       BUS_SPACE_MAXADDR_32BIT :
623 			       0x7fffffff,		/* lowaddr */
624 			       BUS_SPACE_MAXADDR, 	/* highaddr */
625 			       NULL, NULL, 		/* filter, filterarg */
626 			       maxsize,  		/* maxsize */
627 			       1,			/* nsegments */
628 			       maxsize,			/* maxsize */
629 			       0,			/* flags */
630 			       NULL, NULL,		/* No locking needed */
631 			       &sc->aac_fib_dmat)) {
632 		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
633 		return (ENOMEM);
634 	}
635 
636 	/*
637 	 * Create DMA tag for the common structure and allocate it.
638 	 */
639 	maxsize = sizeof(struct aac_common);
640 	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
641 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
642 			       1, 0,			/* algnmnt, boundary */
643 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
644 			       BUS_SPACE_MAXADDR_32BIT :
645 			       0x7fffffff,		/* lowaddr */
646 			       BUS_SPACE_MAXADDR, 	/* highaddr */
647 			       NULL, NULL, 		/* filter, filterarg */
648 			       maxsize, 		/* maxsize */
649 			       1,			/* nsegments */
650 			       maxsize,			/* maxsegsize */
651 			       0,			/* flags */
652 			       NULL, NULL,		/* No locking needed */
653 			       &sc->aac_common_dmat)) {
654 		device_printf(sc->aac_dev,
655 			      "can't allocate common structure DMA tag\n");
656 		return (ENOMEM);
657 	}
658 	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
659 			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
660 		device_printf(sc->aac_dev, "can't allocate common structure\n");
661 		return (ENOMEM);
662 	}
663 
664 	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
665 			sc->aac_common, maxsize,
666 			aac_common_map, sc, 0);
667 	bzero(sc->aac_common, maxsize);
668 
669 	/* Allocate some FIBs and associated command structs */
670 	TAILQ_INIT(&sc->aac_fibmap_tqh);
671 	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
672 				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
673 	mtx_lock(&sc->aac_io_lock);
674 	while (sc->total_fibs < sc->aac_max_fibs) {
675 		if (aac_alloc_commands(sc) != 0)
676 			break;
677 	}
678 	mtx_unlock(&sc->aac_io_lock);
679 	if (sc->total_fibs == 0)
680 		return (ENOMEM);
681 
682 	return (0);
683 }
684 
685 /*
686  * Free all of the resources associated with (sc)
687  *
688  * Should not be called if the controller is active.
689  */
690 void
691 aacraid_free(struct aac_softc *sc)
692 {
693 	int i;
694 
695 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
696 
697 	/* remove the control device */
698 	if (sc->aac_dev_t != NULL)
699 		destroy_dev(sc->aac_dev_t);
700 
701 	/* throw away any FIB buffers, discard the FIB DMA tag */
702 	aac_free_commands(sc);
703 	if (sc->aac_fib_dmat)
704 		bus_dma_tag_destroy(sc->aac_fib_dmat);
705 
706 	free(sc->aac_commands, M_AACRAIDBUF);
707 
708 	/* destroy the common area */
709 	if (sc->aac_common) {
710 		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
711 		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
712 				sc->aac_common_dmamap);
713 	}
714 	if (sc->aac_common_dmat)
715 		bus_dma_tag_destroy(sc->aac_common_dmat);
716 
717 	/* disconnect the interrupt handler */
718 	for (i = 0; i < AAC_MAX_MSIX; ++i) {
719 		if (sc->aac_intr[i])
720 			bus_teardown_intr(sc->aac_dev,
721 				sc->aac_irq[i], sc->aac_intr[i]);
722 		if (sc->aac_irq[i])
723 			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
724 				sc->aac_irq_rid[i], sc->aac_irq[i]);
725 		else
726 			break;
727 	}
728 	if (sc->msi_enabled)
729 		pci_release_msi(sc->aac_dev);
730 
731 	/* destroy data-transfer DMA tag */
732 	if (sc->aac_buffer_dmat)
733 		bus_dma_tag_destroy(sc->aac_buffer_dmat);
734 
735 	/* destroy the parent DMA tag */
736 	if (sc->aac_parent_dmat)
737 		bus_dma_tag_destroy(sc->aac_parent_dmat);
738 
739 	/* release the register window mapping */
740 	if (sc->aac_regs_res0 != NULL)
741 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
742 				     sc->aac_regs_rid0, sc->aac_regs_res0);
743 	if (sc->aac_regs_res1 != NULL)
744 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
745 				     sc->aac_regs_rid1, sc->aac_regs_res1);
746 }
747 
748 /*
749  * Disconnect from the controller completely, in preparation for unload.
750  */
751 int
752 aacraid_detach(device_t dev)
753 {
754 	struct aac_softc *sc;
755 	struct aac_container *co;
756 	struct aac_sim	*sim;
757 	int error;
758 
759 	sc = device_get_softc(dev);
760 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
761 
762 #if __FreeBSD_version >= 800000
763 	callout_drain(&sc->aac_daemontime);
764 #else
765 	untimeout(aac_daemon, (void *)sc, sc->timeout_id);
766 #endif
767 	/* Remove the child containers */
768 	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
769 		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
770 		free(co, M_AACRAIDBUF);
771 	}
772 
773 	/* Remove the CAM SIMs */
774 	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
775 		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
776 		error = device_delete_child(dev, sim->sim_dev);
777 		if (error)
778 			return (error);
779 		free(sim, M_AACRAIDBUF);
780 	}
781 
782 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
783 		sc->aifflags |= AAC_AIFFLAGS_EXIT;
784 		wakeup(sc->aifthread);
785 		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
786 	}
787 
788 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
789 		panic("Cannot shutdown AIF thread");
790 
791 	if ((error = aacraid_shutdown(dev)))
792 		return(error);
793 
794 	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
795 
796 	aacraid_free(sc);
797 
798 	mtx_destroy(&sc->aac_io_lock);
799 
800 	return(0);
801 }
802 
803 /*
804  * Bring the controller down to a dormant state and detach all child devices.
805  *
806  * This function is called before detach or system shutdown.
807  *
808  * Note that we can assume that the bioq on the controller is empty, as we won't
809  * allow shutdown if any device is open.
810  */
811 int
812 aacraid_shutdown(device_t dev)
813 {
814 	struct aac_softc *sc;
815 	struct aac_fib *fib;
816 	struct aac_close_command *cc;
817 
818 	sc = device_get_softc(dev);
819 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
820 
821 	sc->aac_state |= AAC_STATE_SUSPEND;
822 
823 	/*
824 	 * Send a Container shutdown followed by a HostShutdown FIB to the
825 	 * controller to convince it that we don't want to talk to it anymore.
826 	 * We've been closed and all I/O completed already
827 	 */
828 	device_printf(sc->aac_dev, "shutting down controller...");
829 
830 	mtx_lock(&sc->aac_io_lock);
831 	aac_alloc_sync_fib(sc, &fib);
832 	cc = (struct aac_close_command *)&fib->data[0];
833 
834 	bzero(cc, sizeof(struct aac_close_command));
835 	cc->Command = VM_CloseAll;
836 	cc->ContainerId = 0xfffffffe;
837 	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
838 	    sizeof(struct aac_close_command)))
839 		printf("FAILED.\n");
840 	else
841 		printf("done\n");
842 
843 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
844 	aac_release_sync_fib(sc);
845 	mtx_unlock(&sc->aac_io_lock);
846 
847 	return(0);
848 }
849 
850 /*
851  * Bring the controller to a quiescent state, ready for system suspend.
852  */
853 int
854 aacraid_suspend(device_t dev)
855 {
856 	struct aac_softc *sc;
857 
858 	sc = device_get_softc(dev);
859 
860 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
861 	sc->aac_state |= AAC_STATE_SUSPEND;
862 
863 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
864 	return(0);
865 }
866 
867 /*
868  * Bring the controller back to a state ready for operation.
869  */
870 int
871 aacraid_resume(device_t dev)
872 {
873 	struct aac_softc *sc;
874 
875 	sc = device_get_softc(dev);
876 
877 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
878 	sc->aac_state &= ~AAC_STATE_SUSPEND;
879 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
880 	return(0);
881 }
882 
883 /*
884  * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
885  */
886 void
887 aacraid_new_intr_type1(void *arg)
888 {
889 	struct aac_msix_ctx *ctx;
890 	struct aac_softc *sc;
891 	int vector_no;
892 	struct aac_command *cm;
893 	struct aac_fib *fib;
894 	u_int32_t bellbits, bellbits_shifted, index, handle;
895 	int isFastResponse, isAif, noMoreAif, mode;
896 
897 	ctx = (struct aac_msix_ctx *)arg;
898 	sc = ctx->sc;
899 	vector_no = ctx->vector_no;
900 
901 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
902 	mtx_lock(&sc->aac_io_lock);
903 
904 	if (sc->msi_enabled) {
905 		mode = AAC_INT_MODE_MSI;
906 		if (vector_no == 0) {
907 			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
908 			if (bellbits & 0x40000)
909 				mode |= AAC_INT_MODE_AIF;
910 			else if (bellbits & 0x1000)
911 				mode |= AAC_INT_MODE_SYNC;
912 		}
913 	} else {
914 		mode = AAC_INT_MODE_INTX;
915 		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
916 		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
917 			bellbits = AAC_DB_RESPONSE_SENT_NS;
918 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
919 		} else {
920 			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
921 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
922 			if (bellbits_shifted & AAC_DB_AIF_PENDING)
923 				mode |= AAC_INT_MODE_AIF;
924 			else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
925 				mode |= AAC_INT_MODE_SYNC;
926 		}
927 		/* ODR readback, Prep #238630 */
928 		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
929 	}
930 
931 	if (mode & AAC_INT_MODE_SYNC) {
932 		if (sc->aac_sync_cm) {
933 			cm = sc->aac_sync_cm;
934 			cm->cm_flags |= AAC_CMD_COMPLETED;
935 			/* is there a completion handler? */
936 			if (cm->cm_complete != NULL) {
937 				cm->cm_complete(cm);
938 			} else {
939 				/* assume that someone is sleeping on this command */
940 				wakeup(cm);
941 			}
942 			sc->flags &= ~AAC_QUEUE_FRZN;
943 			sc->aac_sync_cm = NULL;
944 		}
945 		mode = 0;
946 	}
947 
948 	if (mode & AAC_INT_MODE_AIF) {
949 		if (mode & AAC_INT_MODE_INTX) {
950 			aac_request_aif(sc);
951 			mode = 0;
952 		}
953 	}
954 
955 	if (mode) {
956 		/* handle async. status */
957 		index = sc->aac_host_rrq_idx[vector_no];
958 		for (;;) {
959 			isFastResponse = isAif = noMoreAif = 0;
960 			/* remove toggle bit (31) */
961 			handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
962 			/* check fast response bit (30) */
963 			if (handle & 0x40000000)
964 				isFastResponse = 1;
965 			/* check AIF bit (23) */
966 			else if (handle & 0x00800000)
967 				isAif = TRUE;
968 			handle &= 0x0000ffff;
969 			if (handle == 0)
970 				break;
971 
972 			cm = sc->aac_commands + (handle - 1);
973 			fib = cm->cm_fib;
974 			sc->aac_rrq_outstanding[vector_no]--;
975 			if (isAif) {
976 				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
977 				if (!noMoreAif)
978 					aac_handle_aif(sc, fib);
979 				aac_remove_busy(cm);
980 				aacraid_release_command(cm);
981 			} else {
982 				if (isFastResponse) {
983 					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
984 					*((u_int32_t *)(fib->data)) = ST_OK;
985 					cm->cm_flags |= AAC_CMD_FASTRESP;
986 				}
987 				aac_remove_busy(cm);
988 				aac_unmap_command(cm);
989 				cm->cm_flags |= AAC_CMD_COMPLETED;
990 
991 				/* is there a completion handler? */
992 				if (cm->cm_complete != NULL) {
993 					cm->cm_complete(cm);
994 				} else {
995 					/* assume that someone is sleeping on this command */
996 					wakeup(cm);
997 				}
998 				sc->flags &= ~AAC_QUEUE_FRZN;
999 			}
1000 
1001 			sc->aac_common->ac_host_rrq[index++] = 0;
1002 			if (index == (vector_no + 1) * sc->aac_vector_cap)
1003 				index = vector_no * sc->aac_vector_cap;
1004 			sc->aac_host_rrq_idx[vector_no] = index;
1005 
1006 			if ((isAif && !noMoreAif) || sc->aif_pending)
1007 				aac_request_aif(sc);
1008 		}
1009 	}
1010 
1011 	if (mode & AAC_INT_MODE_AIF) {
1012 		aac_request_aif(sc);
1013 		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1014 		mode = 0;
1015 	}
1016 
1017 	/* see if we can start some more I/O */
1018 	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1019 		aacraid_startio(sc);
1020 	mtx_unlock(&sc->aac_io_lock);
1021 }
1022 
1023 /*
1024  * Handle notification of one or more FIBs coming from the controller.
1025  */
1026 static void
1027 aac_command_thread(struct aac_softc *sc)
1028 {
1029 	int retval;
1030 
1031 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1032 
1033 	mtx_lock(&sc->aac_io_lock);
1034 	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1035 
1036 	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1037 
1038 		retval = 0;
1039 		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1040 			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1041 					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1042 
1043 		/*
1044 		 * First see if any FIBs need to be allocated.  This needs
1045 		 * to be called without the driver lock because contigmalloc
1046 		 * will grab Giant, and would result in an LOR.
1047 		 */
1048 		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1049 			aac_alloc_commands(sc);
1050 			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1051 			aacraid_startio(sc);
1052 		}
1053 
1054 		/*
1055 		 * While we're here, check to see if any commands are stuck.
1056 		 * This is pretty low-priority, so it's ok if it doesn't
1057 		 * always fire.
1058 		 */
1059 		if (retval == EWOULDBLOCK)
1060 			aac_timeout(sc);
1061 
1062 		/* Check the hardware printf message buffer */
1063 		if (sc->aac_common->ac_printf[0] != 0)
1064 			aac_print_printf(sc);
1065 	}
1066 	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1067 	mtx_unlock(&sc->aac_io_lock);
1068 	wakeup(sc->aac_dev);
1069 
1070 	aac_kthread_exit(0);
1071 }
1072 
1073 /*
1074  * Submit a command to the controller, return when it completes.
1075  * XXX This is very dangerous!  If the card has gone out to lunch, we could
1076  *     be stuck here forever.  At the same time, signals are not caught
1077  *     because there is a risk that a signal could wakeup the sleep before
1078  *     the card has a chance to complete the command.  Since there is no way
1079  *     to cancel a command that is in progress, we can't protect against the
1080  *     card completing a command late and spamming the command and data
1081  *     memory.  So, we are held hostage until the command completes.
1082  */
1083 int
1084 aacraid_wait_command(struct aac_command *cm)
1085 {
1086 	struct aac_softc *sc;
1087 	int error;
1088 
1089 	sc = cm->cm_sc;
1090 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1091 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1092 
1093 	/* Put the command on the ready queue and get things going */
1094 	aac_enqueue_ready(cm);
1095 	aacraid_startio(sc);
1096 	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1097 	return(error);
1098 }
1099 
1100 /*
1101  *Command Buffer Management
1102  */
1103 
1104 /*
1105  * Allocate a command.
1106  */
1107 int
1108 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1109 {
1110 	struct aac_command *cm;
1111 
1112 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1113 
1114 	if ((cm = aac_dequeue_free(sc)) == NULL) {
1115 		if (sc->total_fibs < sc->aac_max_fibs) {
1116 			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1117 			wakeup(sc->aifthread);
1118 		}
1119 		return (EBUSY);
1120 	}
1121 
1122 	*cmp = cm;
1123 	return(0);
1124 }
1125 
1126 /*
1127  * Release a command back to the freelist.
1128  */
1129 void
1130 aacraid_release_command(struct aac_command *cm)
1131 {
1132 	struct aac_event *event;
1133 	struct aac_softc *sc;
1134 
1135 	sc = cm->cm_sc;
1136 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1137 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1138 
1139 	/* (re)initialize the command/FIB */
1140 	cm->cm_sgtable = NULL;
1141 	cm->cm_flags = 0;
1142 	cm->cm_complete = NULL;
1143 	cm->cm_ccb = NULL;
1144 	cm->cm_passthr_dmat = 0;
1145 	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1146 	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1147 	cm->cm_fib->Header.Unused = 0;
1148 	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1149 
1150 	/*
1151 	 * These are duplicated in aac_start to cover the case where an
1152 	 * intermediate stage may have destroyed them.  They're left
1153 	 * initialized here for debugging purposes only.
1154 	 */
1155 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1156 	cm->cm_fib->Header.Handle = 0;
1157 
1158 	aac_enqueue_free(cm);
1159 
1160 	/*
1161 	 * Dequeue all events so that there's no risk of events getting
1162 	 * stranded.
1163 	 */
1164 	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1165 		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1166 		event->ev_callback(sc, event, event->ev_arg);
1167 	}
1168 }
1169 
1170 /*
1171  * Map helper for command/FIB allocation.
1172  */
1173 static void
1174 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1175 {
1176 	uint64_t	*fibphys;
1177 
1178 	fibphys = (uint64_t *)arg;
1179 
1180 	*fibphys = segs[0].ds_addr;
1181 }
1182 
1183 /*
1184  * Allocate and initialize commands/FIBs for this adapter.
1185  */
1186 static int
1187 aac_alloc_commands(struct aac_softc *sc)
1188 {
1189 	struct aac_command *cm;
1190 	struct aac_fibmap *fm;
1191 	uint64_t fibphys;
1192 	int i, error;
1193 	u_int32_t maxsize;
1194 
1195 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1196 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1197 
1198 	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1199 		return (ENOMEM);
1200 
1201 	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1202 	if (fm == NULL)
1203 		return (ENOMEM);
1204 
1205 	mtx_unlock(&sc->aac_io_lock);
1206 	/* allocate the FIBs in DMAable memory and load them */
1207 	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1208 			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1209 		device_printf(sc->aac_dev,
1210 			      "Not enough contiguous memory available.\n");
1211 		free(fm, M_AACRAIDBUF);
1212 		mtx_lock(&sc->aac_io_lock);
1213 		return (ENOMEM);
1214 	}
1215 
1216 	maxsize = sc->aac_max_fib_size + 31;
1217 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1218 		maxsize += sizeof(struct aac_fib_xporthdr);
1219 	/* Ignore errors since this doesn't bounce */
1220 	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1221 			      sc->aac_max_fibs_alloc * maxsize,
1222 			      aac_map_command_helper, &fibphys, 0);
1223 	mtx_lock(&sc->aac_io_lock);
1224 
1225 	/* initialize constant fields in the command structure */
1226 	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1227 	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1228 		cm = sc->aac_commands + sc->total_fibs;
1229 		fm->aac_commands = cm;
1230 		cm->cm_sc = sc;
1231 		cm->cm_fib = (struct aac_fib *)
1232 			((u_int8_t *)fm->aac_fibs + i * maxsize);
1233 		cm->cm_fibphys = fibphys + i * maxsize;
1234 		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1235 			u_int64_t fibphys_aligned;
1236 			fibphys_aligned =
1237 				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1238 			cm->cm_fib = (struct aac_fib *)
1239 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1240 			cm->cm_fibphys = fibphys_aligned;
1241 		} else {
1242 			u_int64_t fibphys_aligned;
1243 			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1244 			cm->cm_fib = (struct aac_fib *)
1245 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1246 			cm->cm_fibphys = fibphys_aligned;
1247 		}
1248 		cm->cm_index = sc->total_fibs;
1249 
1250 		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1251 					       &cm->cm_datamap)) != 0)
1252 			break;
1253 		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1254 			aacraid_release_command(cm);
1255 		sc->total_fibs++;
1256 	}
1257 
1258 	if (i > 0) {
1259 		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1260 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1261 		return (0);
1262 	}
1263 
1264 	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1265 	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1266 	free(fm, M_AACRAIDBUF);
1267 	return (ENOMEM);
1268 }
1269 
1270 /*
1271  * Free FIBs owned by this adapter.
1272  */
1273 static void
1274 aac_free_commands(struct aac_softc *sc)
1275 {
1276 	struct aac_fibmap *fm;
1277 	struct aac_command *cm;
1278 	int i;
1279 
1280 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1281 
1282 	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1283 
1284 		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1285 		/*
1286 		 * We check against total_fibs to handle partially
1287 		 * allocated blocks.
1288 		 */
1289 		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1290 			cm = fm->aac_commands + i;
1291 			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1292 		}
1293 		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1294 		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1295 		free(fm, M_AACRAIDBUF);
1296 	}
1297 }
1298 
1299 /*
1300  * Command-mapping helper function - populate this command's s/g table.
1301  */
1302 void
1303 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1304 {
1305 	struct aac_softc *sc;
1306 	struct aac_command *cm;
1307 	struct aac_fib *fib;
1308 	int i;
1309 
1310 	cm = (struct aac_command *)arg;
1311 	sc = cm->cm_sc;
1312 	fib = cm->cm_fib;
1313 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1314 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1315 
1316 	/* copy into the FIB */
1317 	if (cm->cm_sgtable != NULL) {
1318 		if (fib->Header.Command == RawIo2) {
1319 			struct aac_raw_io2 *raw;
1320 			struct aac_sge_ieee1212 *sg;
1321 			u_int32_t min_size = PAGE_SIZE, cur_size;
1322 			int conformable = TRUE;
1323 
1324 			raw = (struct aac_raw_io2 *)&fib->data[0];
1325 			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1326 			raw->sgeCnt = nseg;
1327 
1328 			for (i = 0; i < nseg; i++) {
1329 				cur_size = segs[i].ds_len;
1330 				sg[i].addrHigh = 0;
1331 				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1332 				sg[i].length = cur_size;
1333 				sg[i].flags = 0;
1334 				if (i == 0) {
1335 					raw->sgeFirstSize = cur_size;
1336 				} else if (i == 1) {
1337 					raw->sgeNominalSize = cur_size;
1338 					min_size = cur_size;
1339 				} else if ((i+1) < nseg &&
1340 					cur_size != raw->sgeNominalSize) {
1341 					conformable = FALSE;
1342 					if (cur_size < min_size)
1343 						min_size = cur_size;
1344 				}
1345 			}
1346 
1347 			/* not conformable: evaluate required sg elements */
1348 			if (!conformable) {
1349 				int j, err_found, nseg_new = nseg;
1350 				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1351 					err_found = FALSE;
1352 					nseg_new = 2;
1353 					for (j = 1; j < nseg - 1; ++j) {
1354 						if (sg[j].length % (i*PAGE_SIZE)) {
1355 							err_found = TRUE;
1356 							break;
1357 						}
1358 						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1359 					}
1360 					if (!err_found)
1361 						break;
1362 				}
1363 				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1364 					!(sc->hint_flags & 4))
1365 					nseg = aac_convert_sgraw2(sc,
1366 						raw, i, nseg, nseg_new);
1367 			} else {
1368 				raw->flags |= RIO2_SGL_CONFORMANT;
1369 			}
1370 
1371 			/* update the FIB size for the s/g count */
1372 			fib->Header.Size += nseg *
1373 				sizeof(struct aac_sge_ieee1212);
1374 
1375 		} else if (fib->Header.Command == RawIo) {
1376 			struct aac_sg_tableraw *sg;
1377 			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1378 			sg->SgCount = nseg;
1379 			for (i = 0; i < nseg; i++) {
1380 				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1381 				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1382 				sg->SgEntryRaw[i].Next = 0;
1383 				sg->SgEntryRaw[i].Prev = 0;
1384 				sg->SgEntryRaw[i].Flags = 0;
1385 			}
1386 			/* update the FIB size for the s/g count */
1387 			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1388 		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1389 			struct aac_sg_table *sg;
1390 			sg = cm->cm_sgtable;
1391 			sg->SgCount = nseg;
1392 			for (i = 0; i < nseg; i++) {
1393 				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1394 				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1395 			}
1396 			/* update the FIB size for the s/g count */
1397 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1398 		} else {
1399 			struct aac_sg_table64 *sg;
1400 			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1401 			sg->SgCount = nseg;
1402 			for (i = 0; i < nseg; i++) {
1403 				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1404 				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1405 			}
1406 			/* update the FIB size for the s/g count */
1407 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1408 		}
1409 	}
1410 
1411 	/* Fix up the address values in the FIB.  Use the command array index
1412 	 * instead of a pointer since these fields are only 32 bits.  Shift
1413 	 * the SenderFibAddress over to make room for the fast response bit
1414 	 * and for the AIF bit
1415 	 */
1416 	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1417 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1418 
1419 	/* save a pointer to the command for speedy reverse-lookup */
1420 	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1421 
1422 	if (cm->cm_passthr_dmat == 0) {
1423 		if (cm->cm_flags & AAC_CMD_DATAIN)
1424 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1425 							BUS_DMASYNC_PREREAD);
1426 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1427 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1428 							BUS_DMASYNC_PREWRITE);
1429 	}
1430 
1431 	cm->cm_flags |= AAC_CMD_MAPPED;
1432 
1433 	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1434 		u_int32_t wait = 0;
1435 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1436 	} else if (cm->cm_flags & AAC_CMD_WAIT) {
1437 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1438 	} else {
1439 		int count = 10000000L;
1440 		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1441 			if (--count == 0) {
1442 				aac_unmap_command(cm);
1443 				sc->flags |= AAC_QUEUE_FRZN;
1444 				aac_requeue_ready(cm);
1445 			}
1446 			DELAY(5);			/* wait 5 usec. */
1447 		}
1448 	}
1449 }
1450 
1451 
1452 static int
1453 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1454 				   int pages, int nseg, int nseg_new)
1455 {
1456 	struct aac_sge_ieee1212 *sge;
1457 	int i, j, pos;
1458 	u_int32_t addr_low;
1459 
1460 	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1461 		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1462 	if (sge == NULL)
1463 		return nseg;
1464 
1465 	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1466 		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1467 			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1468 			sge[pos].addrLow = addr_low;
1469 			sge[pos].addrHigh = raw->sge[i].addrHigh;
1470 			if (addr_low < raw->sge[i].addrLow)
1471 				sge[pos].addrHigh++;
1472 			sge[pos].length = pages * PAGE_SIZE;
1473 			sge[pos].flags = 0;
1474 			pos++;
1475 		}
1476 	}
1477 	sge[pos] = raw->sge[nseg-1];
1478 	for (i = 1; i < nseg_new; ++i)
1479 		raw->sge[i] = sge[i];
1480 
1481 	free(sge, M_AACRAIDBUF);
1482 	raw->sgeCnt = nseg_new;
1483 	raw->flags |= RIO2_SGL_CONFORMANT;
1484 	raw->sgeNominalSize = pages * PAGE_SIZE;
1485 	return nseg_new;
1486 }
1487 
1488 
1489 /*
1490  * Unmap a command from controller-visible space.
1491  */
1492 static void
1493 aac_unmap_command(struct aac_command *cm)
1494 {
1495 	struct aac_softc *sc;
1496 
1497 	sc = cm->cm_sc;
1498 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1499 
1500 	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1501 		return;
1502 
1503 	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1504 		if (cm->cm_flags & AAC_CMD_DATAIN)
1505 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1506 					BUS_DMASYNC_POSTREAD);
1507 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1508 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1509 					BUS_DMASYNC_POSTWRITE);
1510 
1511 		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1512 	}
1513 	cm->cm_flags &= ~AAC_CMD_MAPPED;
1514 }
1515 
1516 /*
1517  * Hardware Interface
1518  */
1519 
1520 /*
1521  * Initialize the adapter.
1522  */
1523 static void
1524 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1525 {
1526 	struct aac_softc *sc;
1527 
1528 	sc = (struct aac_softc *)arg;
1529 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1530 
1531 	sc->aac_common_busaddr = segs[0].ds_addr;
1532 }
1533 
1534 static int
1535 aac_check_firmware(struct aac_softc *sc)
1536 {
1537 	u_int32_t code, major, minor, maxsize;
1538 	u_int32_t options = 0, atu_size = 0, status, waitCount;
1539 	time_t then;
1540 
1541 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1542 
1543 	/* check if flash update is running */
1544 	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1545 		then = time_uptime;
1546 		do {
1547 			code = AAC_GET_FWSTATUS(sc);
1548 			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1549 				device_printf(sc->aac_dev,
1550 						  "FATAL: controller not coming ready, "
1551 						   "status %x\n", code);
1552 				return(ENXIO);
1553 			}
1554 		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1555 		/*
1556 		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1557 		 * do not read scratch pad register at this time
1558 		 */
1559 		waitCount = 10 * 10000;
1560 		while (waitCount) {
1561 			DELAY(100);		/* delay 100 microseconds */
1562 			waitCount--;
1563 		}
1564 	}
1565 
1566 	/*
1567 	 * Wait for the adapter to come ready.
1568 	 */
1569 	then = time_uptime;
1570 	do {
1571 		code = AAC_GET_FWSTATUS(sc);
1572 		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1573 			device_printf(sc->aac_dev,
1574 				      "FATAL: controller not coming ready, "
1575 					   "status %x\n", code);
1576 			return(ENXIO);
1577 		}
1578 	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1579 
1580 	/*
1581 	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1582 	 * firmware version 1.x are not compatible with this driver.
1583 	 */
1584 	if (sc->flags & AAC_FLAGS_PERC2QC) {
1585 		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1586 				     NULL, NULL)) {
1587 			device_printf(sc->aac_dev,
1588 				      "Error reading firmware version\n");
1589 			return (EIO);
1590 		}
1591 
1592 		/* These numbers are stored as ASCII! */
1593 		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1594 		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1595 		if (major == 1) {
1596 			device_printf(sc->aac_dev,
1597 			    "Firmware version %d.%d is not supported.\n",
1598 			    major, minor);
1599 			return (EINVAL);
1600 		}
1601 	}
1602 	/*
1603 	 * Retrieve the capabilities/supported options word so we know what
1604 	 * work-arounds to enable.  Some firmware revs don't support this
1605 	 * command.
1606 	 */
1607 	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1608 		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1609 			device_printf(sc->aac_dev,
1610 			     "RequestAdapterInfo failed\n");
1611 			return (EIO);
1612 		}
1613 	} else {
1614 		options = AAC_GET_MAILBOX(sc, 1);
1615 		atu_size = AAC_GET_MAILBOX(sc, 2);
1616 		sc->supported_options = options;
1617 
1618 		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1619 		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1620 			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1621 		if (options & AAC_SUPPORTED_NONDASD)
1622 			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1623 		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1624 			&& (sizeof(bus_addr_t) > 4)
1625 			&& (sc->hint_flags & 0x1)) {
1626 			device_printf(sc->aac_dev,
1627 			    "Enabling 64-bit address support\n");
1628 			sc->flags |= AAC_FLAGS_SG_64BIT;
1629 		}
1630 		if (sc->aac_if.aif_send_command) {
1631 			if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1632 				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1633 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1634 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1635 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1636 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1637 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1638 		}
1639 		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1640 			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1641 	}
1642 
1643 	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1644 		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1645 		return (ENXIO);
1646 	}
1647 
1648 	if (sc->hint_flags & 2) {
1649 		device_printf(sc->aac_dev,
1650 			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1651 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1652 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1653 		device_printf(sc->aac_dev,
1654 			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1655 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1656 	}
1657 
1658 	/* Check for broken hardware that does a lower number of commands */
1659 	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1660 
1661 	/* Remap mem. resource, if required */
1662 	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1663 		bus_release_resource(
1664 			sc->aac_dev, SYS_RES_MEMORY,
1665 			sc->aac_regs_rid0, sc->aac_regs_res0);
1666 		sc->aac_regs_res0 = bus_alloc_resource(
1667 			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1668 			0ul, ~0ul, atu_size, RF_ACTIVE);
1669 		if (sc->aac_regs_res0 == NULL) {
1670 			sc->aac_regs_res0 = bus_alloc_resource_any(
1671 				sc->aac_dev, SYS_RES_MEMORY,
1672 				&sc->aac_regs_rid0, RF_ACTIVE);
1673 			if (sc->aac_regs_res0 == NULL) {
1674 				device_printf(sc->aac_dev,
1675 					"couldn't allocate register window\n");
1676 				return (ENXIO);
1677 			}
1678 		}
1679 		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1680 		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1681 	}
1682 
1683 	/* Read preferred settings */
1684 	sc->aac_max_fib_size = sizeof(struct aac_fib);
1685 	sc->aac_max_sectors = 128;				/* 64KB */
1686 	sc->aac_max_aif = 1;
1687 	if (sc->flags & AAC_FLAGS_SG_64BIT)
1688 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1689 		 - sizeof(struct aac_blockwrite64))
1690 		 / sizeof(struct aac_sg_entry64);
1691 	else
1692 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1693 		 - sizeof(struct aac_blockwrite))
1694 		 / sizeof(struct aac_sg_entry);
1695 
1696 	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1697 		options = AAC_GET_MAILBOX(sc, 1);
1698 		sc->aac_max_fib_size = (options & 0xFFFF);
1699 		sc->aac_max_sectors = (options >> 16) << 1;
1700 		options = AAC_GET_MAILBOX(sc, 2);
1701 		sc->aac_sg_tablesize = (options >> 16);
1702 		options = AAC_GET_MAILBOX(sc, 3);
1703 		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1704 		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1705 			sc->aac_max_fibs = (options & 0xFFFF);
1706 		options = AAC_GET_MAILBOX(sc, 4);
1707 		sc->aac_max_aif = (options & 0xFFFF);
1708 		options = AAC_GET_MAILBOX(sc, 5);
1709 		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1710 	}
1711 
1712 	maxsize = sc->aac_max_fib_size + 31;
1713 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1714 		maxsize += sizeof(struct aac_fib_xporthdr);
1715 	if (maxsize > PAGE_SIZE) {
1716     	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1717 		maxsize = PAGE_SIZE;
1718 	}
1719 	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1720 
1721 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1722 		sc->flags |= AAC_FLAGS_RAW_IO;
1723 		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1724 	}
1725 	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1726 	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1727 		sc->flags |= AAC_FLAGS_LBA_64BIT;
1728 		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1729 	}
1730 
1731 	aacraid_get_fw_debug_buffer(sc);
1732 	return (0);
1733 }
1734 
1735 static int
1736 aac_init(struct aac_softc *sc)
1737 {
1738 	struct aac_adapter_init	*ip;
1739 	int i, error;
1740 
1741 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1742 
1743 	/* reset rrq index */
1744 	sc->aac_fibs_pushed_no = 0;
1745 	for (i = 0; i < sc->aac_max_msix; i++)
1746 		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1747 
1748 	/*
1749 	 * Fill in the init structure.  This tells the adapter about the
1750 	 * physical location of various important shared data structures.
1751 	 */
1752 	ip = &sc->aac_common->ac_init;
1753 	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1754 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1755 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1756 		sc->flags |= AAC_FLAGS_RAW_IO;
1757 	}
1758 	ip->NoOfMSIXVectors = sc->aac_max_msix;
1759 
1760 	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1761 					 offsetof(struct aac_common, ac_fibs);
1762 	ip->AdapterFibsVirtualAddress = 0;
1763 	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1764 	ip->AdapterFibAlign = sizeof(struct aac_fib);
1765 
1766 	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1767 				  offsetof(struct aac_common, ac_printf);
1768 	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1769 
1770 	/*
1771 	 * The adapter assumes that pages are 4K in size, except on some
1772  	 * broken firmware versions that do the page->byte conversion twice,
1773 	 * therefore 'assuming' that this value is in 16MB units (2^24).
1774 	 * Round up since the granularity is so high.
1775 	 */
1776 	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1777 	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1778 		ip->HostPhysMemPages =
1779 		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1780 	}
1781 	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1782 
1783 	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1784 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1785 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1786 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1787 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1788 		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1789 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1790 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1791 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1792 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1793 		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1794 	}
1795 	ip->MaxNumAif = sc->aac_max_aif;
1796 	ip->HostRRQ_AddrLow =
1797 		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1798 	/* always 32-bit address */
1799 	ip->HostRRQ_AddrHigh = 0;
1800 
1801 	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1802 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1803 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1804 		device_printf(sc->aac_dev, "Power Management enabled\n");
1805 	}
1806 
1807 	ip->MaxIoCommands = sc->aac_max_fibs;
1808 	ip->MaxIoSize = sc->aac_max_sectors << 9;
1809 	ip->MaxFibSize = sc->aac_max_fib_size;
1810 
1811 	/*
1812 	 * Do controller-type-specific initialisation
1813 	 */
1814 	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1815 
1816 	/*
1817 	 * Give the init structure to the controller.
1818 	 */
1819 	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1820 			     sc->aac_common_busaddr +
1821 			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1822 			     NULL, NULL)) {
1823 		device_printf(sc->aac_dev,
1824 			      "error establishing init structure\n");
1825 		error = EIO;
1826 		goto out;
1827 	}
1828 
1829 	/*
1830 	 * Check configuration issues
1831 	 */
1832 	if ((error = aac_check_config(sc)) != 0)
1833 		goto out;
1834 
1835 	error = 0;
1836 out:
1837 	return(error);
1838 }
1839 
1840 static void
1841 aac_define_int_mode(struct aac_softc *sc)
1842 {
1843 	device_t dev;
1844 	int cap, msi_count, error = 0;
1845 	uint32_t val;
1846 
1847 	dev = sc->aac_dev;
1848 
1849 	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1850 	if (sc->aac_max_msix == 0) {
1851 		sc->aac_max_msix = 1;
1852 		sc->aac_vector_cap = sc->aac_max_fibs;
1853 		return;
1854 	}
1855 
1856 	/* OS capability */
1857 	msi_count = pci_msix_count(dev);
1858 	if (msi_count > AAC_MAX_MSIX)
1859 		msi_count = AAC_MAX_MSIX;
1860 	if (msi_count > sc->aac_max_msix)
1861 		msi_count = sc->aac_max_msix;
1862 	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1863 		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1864 				   "will try MSI\n", msi_count, error);
1865 		pci_release_msi(dev);
1866 	} else {
1867 		sc->msi_enabled = TRUE;
1868 		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1869 			msi_count);
1870 	}
1871 
1872 	if (!sc->msi_enabled) {
1873 		msi_count = 1;
1874 		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1875 			device_printf(dev, "alloc msi failed - err=%d; "
1876 				           "will use INTx\n", error);
1877 			pci_release_msi(dev);
1878 		} else {
1879 			sc->msi_enabled = TRUE;
1880 			device_printf(dev, "using MSI interrupts\n");
1881 		}
1882 	}
1883 
1884 	if (sc->msi_enabled) {
1885 		/* now read controller capability from PCI config. space */
1886 		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1887 		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1888 		if (!(val & AAC_PCI_MSI_ENABLE)) {
1889 			pci_release_msi(dev);
1890 			sc->msi_enabled = FALSE;
1891 		}
1892 	}
1893 
1894 	if (!sc->msi_enabled) {
1895 		device_printf(dev, "using legacy interrupts\n");
1896 		sc->aac_max_msix = 1;
1897 	} else {
1898 		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1899 		if (sc->aac_max_msix > msi_count)
1900 			sc->aac_max_msix = msi_count;
1901 	}
1902 	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1903 
1904 	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1905 		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1906 }
1907 
1908 static int
1909 aac_find_pci_capability(struct aac_softc *sc, int cap)
1910 {
1911 	device_t dev;
1912 	uint32_t status;
1913 	uint8_t ptr;
1914 
1915 	dev = sc->aac_dev;
1916 
1917 	status = pci_read_config(dev, PCIR_STATUS, 2);
1918 	if (!(status & PCIM_STATUS_CAPPRESENT))
1919 		return (0);
1920 
1921 	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1922 	switch (status & PCIM_HDRTYPE) {
1923 	case 0:
1924 	case 1:
1925 		ptr = PCIR_CAP_PTR;
1926 		break;
1927 	case 2:
1928 		ptr = PCIR_CAP_PTR_2;
1929 		break;
1930 	default:
1931 		return (0);
1932 		break;
1933 	}
1934 	ptr = pci_read_config(dev, ptr, 1);
1935 
1936 	while (ptr != 0) {
1937 		int next, val;
1938 		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1939 		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1940 		if (val == cap)
1941 			return (ptr);
1942 		ptr = next;
1943 	}
1944 
1945 	return (0);
1946 }
1947 
1948 static int
1949 aac_setup_intr(struct aac_softc *sc)
1950 {
1951 	int i, msi_count, rid;
1952 	struct resource *res;
1953 	void *tag;
1954 
1955 	msi_count = sc->aac_max_msix;
1956 	rid = (sc->msi_enabled ? 1:0);
1957 
1958 	for (i = 0; i < msi_count; i++, rid++) {
1959 		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1960 			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1961 			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1962 			return (EINVAL);
1963 		}
1964 		sc->aac_irq_rid[i] = rid;
1965 		sc->aac_irq[i] = res;
1966 		if (aac_bus_setup_intr(sc->aac_dev, res,
1967 			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1968 			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1969 			device_printf(sc->aac_dev, "can't set up interrupt\n");
1970 			return (EINVAL);
1971 		}
1972 		sc->aac_msix[i].vector_no = i;
1973 		sc->aac_msix[i].sc = sc;
1974 		sc->aac_intr[i] = tag;
1975 	}
1976 
1977 	return (0);
1978 }
1979 
1980 static int
1981 aac_check_config(struct aac_softc *sc)
1982 {
1983 	struct aac_fib *fib;
1984 	struct aac_cnt_config *ccfg;
1985 	struct aac_cf_status_hdr *cf_shdr;
1986 	int rval;
1987 
1988 	mtx_lock(&sc->aac_io_lock);
1989 	aac_alloc_sync_fib(sc, &fib);
1990 
1991 	ccfg = (struct aac_cnt_config *)&fib->data[0];
1992 	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
1993 	ccfg->Command = VM_ContainerConfig;
1994 	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
1995 	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
1996 
1997 	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
1998 		sizeof (struct aac_cnt_config));
1999 	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2000 	if (rval == 0 && ccfg->Command == ST_OK &&
2001 		ccfg->CTCommand.param[0] == CT_OK) {
2002 		if (cf_shdr->action <= CFACT_PAUSE) {
2003 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2004 			ccfg->Command = VM_ContainerConfig;
2005 			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2006 
2007 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2008 				sizeof (struct aac_cnt_config));
2009 			if (rval == 0 && ccfg->Command == ST_OK &&
2010 				ccfg->CTCommand.param[0] == CT_OK) {
2011 				/* successful completion */
2012 				rval = 0;
2013 			} else {
2014 				/* auto commit aborted due to error(s) */
2015 				rval = -2;
2016 			}
2017 		} else {
2018 			/* auto commit aborted due to adapter indicating
2019 			   config. issues too dangerous to auto commit  */
2020 			rval = -3;
2021 		}
2022 	} else {
2023 		/* error */
2024 		rval = -1;
2025 	}
2026 
2027 	aac_release_sync_fib(sc);
2028 	mtx_unlock(&sc->aac_io_lock);
2029 	return(rval);
2030 }
2031 
2032 /*
2033  * Send a synchronous command to the controller and wait for a result.
2034  * Indicate if the controller completed the command with an error status.
2035  */
2036 int
2037 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2038 		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2039 		 u_int32_t *sp, u_int32_t *r1)
2040 {
2041 	time_t then;
2042 	u_int32_t status;
2043 
2044 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2045 
2046 	/* populate the mailbox */
2047 	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2048 
2049 	/* ensure the sync command doorbell flag is cleared */
2050 	if (!sc->msi_enabled)
2051 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2052 
2053 	/* then set it to signal the adapter */
2054 	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2055 
2056 	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2057 		/* spin waiting for the command to complete */
2058 		then = time_uptime;
2059 		do {
2060 			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2061 				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2062 				return(EIO);
2063 			}
2064 		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2065 
2066 		/* clear the completion flag */
2067 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2068 
2069 		/* get the command status */
2070 		status = AAC_GET_MAILBOX(sc, 0);
2071 		if (sp != NULL)
2072 			*sp = status;
2073 
2074 		/* return parameter */
2075 		if (r1 != NULL)
2076 			*r1 = AAC_GET_MAILBOX(sc, 1);
2077 
2078 		if (status != AAC_SRB_STS_SUCCESS)
2079 			return (-1);
2080 	}
2081 	return(0);
2082 }
2083 
2084 static int
2085 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2086 		 struct aac_fib *fib, u_int16_t datasize)
2087 {
2088 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2089 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2090 
2091 	if (datasize > AAC_FIB_DATASIZE)
2092 		return(EINVAL);
2093 
2094 	/*
2095 	 * Set up the sync FIB
2096 	 */
2097 	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2098 				AAC_FIBSTATE_INITIALISED |
2099 				AAC_FIBSTATE_EMPTY;
2100 	fib->Header.XferState |= xferstate;
2101 	fib->Header.Command = command;
2102 	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2103 	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2104 	fib->Header.SenderSize = sizeof(struct aac_fib);
2105 	fib->Header.SenderFibAddress = 0;	/* Not needed */
2106 	fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
2107 		offsetof(struct aac_common, ac_sync_fib);
2108 
2109 	/*
2110 	 * Give the FIB to the controller, wait for a response.
2111 	 */
2112 	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2113 		fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2114 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2115 		return(EIO);
2116 	}
2117 
2118 	return (0);
2119 }
2120 
2121 /*
2122  * Check for commands that have been outstanding for a suspiciously long time,
2123  * and complain about them.
2124  */
2125 static void
2126 aac_timeout(struct aac_softc *sc)
2127 {
2128 	struct aac_command *cm;
2129 	time_t deadline;
2130 	int timedout;
2131 
2132 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2133 	/*
2134 	 * Traverse the busy command list, bitch about late commands once
2135 	 * only.
2136 	 */
2137 	timedout = 0;
2138 	deadline = time_uptime - AAC_CMD_TIMEOUT;
2139 	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2140 		if (cm->cm_timestamp < deadline) {
2141 			device_printf(sc->aac_dev,
2142 				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2143 				      cm, (int)(time_uptime-cm->cm_timestamp));
2144 			AAC_PRINT_FIB(sc, cm->cm_fib);
2145 			timedout++;
2146 		}
2147 	}
2148 
2149 	if (timedout)
2150 		aac_reset_adapter(sc);
2151 	aacraid_print_queues(sc);
2152 }
2153 
2154 /*
2155  * Interface Function Vectors
2156  */
2157 
2158 /*
2159  * Read the current firmware status word.
2160  */
2161 static int
2162 aac_src_get_fwstatus(struct aac_softc *sc)
2163 {
2164 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2165 
2166 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2167 }
2168 
2169 /*
2170  * Notify the controller of a change in a given queue
2171  */
2172 static void
2173 aac_src_qnotify(struct aac_softc *sc, int qbit)
2174 {
2175 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2176 
2177 	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2178 }
2179 
2180 /*
2181  * Get the interrupt reason bits
2182  */
2183 static int
2184 aac_src_get_istatus(struct aac_softc *sc)
2185 {
2186 	int val;
2187 
2188 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2189 
2190 	if (sc->msi_enabled) {
2191 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2192 		if (val & AAC_MSI_SYNC_STATUS)
2193 			val = AAC_DB_SYNC_COMMAND;
2194 		else
2195 			val = 0;
2196 	} else {
2197 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2198 	}
2199 	return(val);
2200 }
2201 
2202 /*
2203  * Clear some interrupt reason bits
2204  */
2205 static void
2206 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2207 {
2208 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2209 
2210 	if (sc->msi_enabled) {
2211 		if (mask == AAC_DB_SYNC_COMMAND)
2212 			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2213 	} else {
2214 		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2215 	}
2216 }
2217 
2218 /*
2219  * Populate the mailbox and set the command word
2220  */
2221 static void
2222 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2223 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2224 {
2225 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2226 
2227 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2228 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2229 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2230 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2231 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2232 }
2233 
2234 static void
2235 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2236 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2237 {
2238 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2239 
2240 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2241 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2242 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2243 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2244 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2245 }
2246 
2247 /*
2248  * Fetch the immediate command status word
2249  */
2250 static int
2251 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2252 {
2253 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2254 
2255 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2256 }
2257 
2258 static int
2259 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2260 {
2261 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2262 
2263 	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2264 }
2265 
2266 /*
2267  * Set/clear interrupt masks
2268  */
2269 static void
2270 aac_src_access_devreg(struct aac_softc *sc, int mode)
2271 {
2272 	u_int32_t val;
2273 
2274 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2275 
2276 	switch (mode) {
2277 	case AAC_ENABLE_INTERRUPT:
2278 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2279 			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2280 				           AAC_INT_ENABLE_TYPE1_INTX));
2281 		break;
2282 
2283 	case AAC_DISABLE_INTERRUPT:
2284 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2285 		break;
2286 
2287 	case AAC_ENABLE_MSIX:
2288 		/* set bit 6 */
2289 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2290 		val |= 0x40;
2291 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2292 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2293 		/* unmask int. */
2294 		val = PMC_ALL_INTERRUPT_BITS;
2295 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2296 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2297 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2298 			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2299 		break;
2300 
2301 	case AAC_DISABLE_MSIX:
2302 		/* reset bit 6 */
2303 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2304 		val &= ~0x40;
2305 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2306 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2307 		break;
2308 
2309 	case AAC_CLEAR_AIF_BIT:
2310 		/* set bit 5 */
2311 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2312 		val |= 0x20;
2313 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2314 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2315 		break;
2316 
2317 	case AAC_CLEAR_SYNC_BIT:
2318 		/* set bit 4 */
2319 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2320 		val |= 0x10;
2321 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2322 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2323 		break;
2324 
2325 	case AAC_ENABLE_INTX:
2326 		/* set bit 7 */
2327 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2328 		val |= 0x80;
2329 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2330 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2331 		/* unmask int. */
2332 		val = PMC_ALL_INTERRUPT_BITS;
2333 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2334 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2335 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2336 			val & (~(PMC_GLOBAL_INT_BIT2)));
2337 		break;
2338 
2339 	default:
2340 		break;
2341 	}
2342 }
2343 
2344 /*
2345  * New comm. interface: Send command functions
2346  */
2347 static int
2348 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2349 {
2350 	struct aac_fib_xporthdr *pFibX;
2351 	u_int32_t fibsize, high_addr;
2352 	u_int64_t address;
2353 
2354 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2355 
2356 	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2357 		sc->aac_max_msix > 1) {
2358 		u_int16_t vector_no, first_choice = 0xffff;
2359 
2360 		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2361 		do {
2362 			vector_no += 1;
2363 			if (vector_no == sc->aac_max_msix)
2364 				vector_no = 1;
2365 			if (sc->aac_rrq_outstanding[vector_no] <
2366 				sc->aac_vector_cap)
2367 				break;
2368 			if (0xffff == first_choice)
2369 				first_choice = vector_no;
2370 			else if (vector_no == first_choice)
2371 				break;
2372 		} while (1);
2373 		if (vector_no == first_choice)
2374 			vector_no = 0;
2375 		sc->aac_rrq_outstanding[vector_no]++;
2376 		if (sc->aac_fibs_pushed_no == 0xffffffff)
2377 			sc->aac_fibs_pushed_no = 0;
2378 		else
2379 			sc->aac_fibs_pushed_no++;
2380 
2381 		cm->cm_fib->Header.Handle += (vector_no << 16);
2382 	}
2383 
2384 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2385 		/* Calculate the amount to the fibsize bits */
2386 		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2387 		/* Fill new FIB header */
2388 		address = cm->cm_fibphys;
2389 		high_addr = (u_int32_t)(address >> 32);
2390 		if (high_addr == 0L) {
2391 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2392 			cm->cm_fib->Header.u.TimeStamp = 0L;
2393 		} else {
2394 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2395 			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2396 		}
2397 		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2398 	} else {
2399 		/* Calculate the amount to the fibsize bits */
2400 		fibsize = (sizeof(struct aac_fib_xporthdr) +
2401 		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2402 		/* Fill XPORT header */
2403 		pFibX = (struct aac_fib_xporthdr *)
2404 			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2405 		pFibX->Handle = cm->cm_fib->Header.Handle;
2406 		pFibX->HostAddress = cm->cm_fibphys;
2407 		pFibX->Size = cm->cm_fib->Header.Size;
2408 		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2409 		high_addr = (u_int32_t)(address >> 32);
2410 	}
2411 
2412 	if (fibsize > 31)
2413 		fibsize = 31;
2414 	aac_enqueue_busy(cm);
2415 	if (high_addr) {
2416 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2417 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2418 	} else {
2419 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2420 	}
2421 	return 0;
2422 }
2423 
2424 /*
2425  * New comm. interface: get, set outbound queue index
2426  */
2427 static int
2428 aac_src_get_outb_queue(struct aac_softc *sc)
2429 {
2430 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2431 
2432 	return(-1);
2433 }
2434 
2435 static void
2436 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2437 {
2438 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2439 }
2440 
2441 /*
2442  * Debugging and Diagnostics
2443  */
2444 
2445 /*
2446  * Print some information about the controller.
2447  */
2448 static void
2449 aac_describe_controller(struct aac_softc *sc)
2450 {
2451 	struct aac_fib *fib;
2452 	struct aac_adapter_info	*info;
2453 	char *adapter_type = "Adaptec RAID controller";
2454 
2455 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2456 
2457 	mtx_lock(&sc->aac_io_lock);
2458 	aac_alloc_sync_fib(sc, &fib);
2459 
2460 	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2461 		fib->data[0] = 0;
2462 		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2463 			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2464 		else {
2465 			struct aac_supplement_adapter_info *supp_info;
2466 
2467 			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2468 			adapter_type = (char *)supp_info->AdapterTypeText;
2469 			sc->aac_feature_bits = supp_info->FeatureBits;
2470 			sc->aac_support_opt2 = supp_info->SupportedOptions2;
2471 		}
2472 	}
2473 	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2474 		adapter_type,
2475 		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2476 		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2477 
2478 	fib->data[0] = 0;
2479 	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2480 		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2481 		aac_release_sync_fib(sc);
2482 		mtx_unlock(&sc->aac_io_lock);
2483 		return;
2484 	}
2485 
2486 	/* save the kernel revision structure for later use */
2487 	info = (struct aac_adapter_info *)&fib->data[0];
2488 	sc->aac_revision = info->KernelRevision;
2489 
2490 	if (bootverbose) {
2491 		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2492 		    "(%dMB cache, %dMB execution), %s\n",
2493 		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2494 		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2495 		    info->BufferMem / (1024 * 1024),
2496 		    info->ExecutionMem / (1024 * 1024),
2497 		    aac_describe_code(aac_battery_platform,
2498 		    info->batteryPlatform));
2499 
2500 		device_printf(sc->aac_dev,
2501 		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2502 		    info->KernelRevision.external.comp.major,
2503 		    info->KernelRevision.external.comp.minor,
2504 		    info->KernelRevision.external.comp.dash,
2505 		    info->KernelRevision.buildNumber,
2506 		    (u_int32_t)(info->SerialNumber & 0xffffff));
2507 
2508 		device_printf(sc->aac_dev, "Supported Options=%b\n",
2509 			      sc->supported_options,
2510 			      "\20"
2511 			      "\1SNAPSHOT"
2512 			      "\2CLUSTERS"
2513 			      "\3WCACHE"
2514 			      "\4DATA64"
2515 			      "\5HOSTTIME"
2516 			      "\6RAID50"
2517 			      "\7WINDOW4GB"
2518 			      "\10SCSIUPGD"
2519 			      "\11SOFTERR"
2520 			      "\12NORECOND"
2521 			      "\13SGMAP64"
2522 			      "\14ALARM"
2523 			      "\15NONDASD"
2524 			      "\16SCSIMGT"
2525 			      "\17RAIDSCSI"
2526 			      "\21ADPTINFO"
2527 			      "\22NEWCOMM"
2528 			      "\23ARRAY64BIT"
2529 			      "\24HEATSENSOR");
2530 	}
2531 
2532 	aac_release_sync_fib(sc);
2533 	mtx_unlock(&sc->aac_io_lock);
2534 }
2535 
2536 /*
2537  * Look up a text description of a numeric error code and return a pointer to
2538  * same.
2539  */
2540 static char *
2541 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2542 {
2543 	int i;
2544 
2545 	for (i = 0; table[i].string != NULL; i++)
2546 		if (table[i].code == code)
2547 			return(table[i].string);
2548 	return(table[i + 1].string);
2549 }
2550 
2551 /*
2552  * Management Interface
2553  */
2554 
2555 static int
2556 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2557 {
2558 	struct aac_softc *sc;
2559 
2560 	sc = dev->si_drv1;
2561 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2562 #if __FreeBSD_version >= 702000
2563 	device_busy(sc->aac_dev);
2564 	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2565 #endif
2566 	return 0;
2567 }
2568 
2569 static int
2570 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2571 {
2572 	union aac_statrequest *as;
2573 	struct aac_softc *sc;
2574 	int error = 0;
2575 
2576 	as = (union aac_statrequest *)arg;
2577 	sc = dev->si_drv1;
2578 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2579 
2580 	switch (cmd) {
2581 	case AACIO_STATS:
2582 		switch (as->as_item) {
2583 		case AACQ_FREE:
2584 		case AACQ_READY:
2585 		case AACQ_BUSY:
2586 			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2587 			      sizeof(struct aac_qstat));
2588 			break;
2589 		default:
2590 			error = ENOENT;
2591 			break;
2592 		}
2593 	break;
2594 
2595 	case FSACTL_SENDFIB:
2596 	case FSACTL_SEND_LARGE_FIB:
2597 		arg = *(caddr_t*)arg;
2598 	case FSACTL_LNX_SENDFIB:
2599 	case FSACTL_LNX_SEND_LARGE_FIB:
2600 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2601 		error = aac_ioctl_sendfib(sc, arg);
2602 		break;
2603 	case FSACTL_SEND_RAW_SRB:
2604 		arg = *(caddr_t*)arg;
2605 	case FSACTL_LNX_SEND_RAW_SRB:
2606 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2607 		error = aac_ioctl_send_raw_srb(sc, arg);
2608 		break;
2609 	case FSACTL_AIF_THREAD:
2610 	case FSACTL_LNX_AIF_THREAD:
2611 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2612 		error = EINVAL;
2613 		break;
2614 	case FSACTL_OPEN_GET_ADAPTER_FIB:
2615 		arg = *(caddr_t*)arg;
2616 	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2617 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2618 		error = aac_open_aif(sc, arg);
2619 		break;
2620 	case FSACTL_GET_NEXT_ADAPTER_FIB:
2621 		arg = *(caddr_t*)arg;
2622 	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2623 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2624 		error = aac_getnext_aif(sc, arg);
2625 		break;
2626 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2627 		arg = *(caddr_t*)arg;
2628 	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2629 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2630 		error = aac_close_aif(sc, arg);
2631 		break;
2632 	case FSACTL_MINIPORT_REV_CHECK:
2633 		arg = *(caddr_t*)arg;
2634 	case FSACTL_LNX_MINIPORT_REV_CHECK:
2635 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2636 		error = aac_rev_check(sc, arg);
2637 		break;
2638 	case FSACTL_QUERY_DISK:
2639 		arg = *(caddr_t*)arg;
2640 	case FSACTL_LNX_QUERY_DISK:
2641 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2642 		error = aac_query_disk(sc, arg);
2643 		break;
2644 	case FSACTL_DELETE_DISK:
2645 	case FSACTL_LNX_DELETE_DISK:
2646 		/*
2647 		 * We don't trust the underland to tell us when to delete a
2648 		 * container, rather we rely on an AIF coming from the
2649 		 * controller
2650 		 */
2651 		error = 0;
2652 		break;
2653 	case FSACTL_GET_PCI_INFO:
2654 		arg = *(caddr_t*)arg;
2655 	case FSACTL_LNX_GET_PCI_INFO:
2656 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2657 		error = aac_get_pci_info(sc, arg);
2658 		break;
2659 	case FSACTL_GET_FEATURES:
2660 		arg = *(caddr_t*)arg;
2661 	case FSACTL_LNX_GET_FEATURES:
2662 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2663 		error = aac_supported_features(sc, arg);
2664 		break;
2665 	default:
2666 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2667 		error = EINVAL;
2668 		break;
2669 	}
2670 	return(error);
2671 }
2672 
2673 static int
2674 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2675 {
2676 	struct aac_softc *sc;
2677 	struct aac_fib_context *ctx;
2678 	int revents;
2679 
2680 	sc = dev->si_drv1;
2681 	revents = 0;
2682 
2683 	mtx_lock(&sc->aac_io_lock);
2684 	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2685 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2686 			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2687 				revents |= poll_events & (POLLIN | POLLRDNORM);
2688 				break;
2689 			}
2690 		}
2691 	}
2692 	mtx_unlock(&sc->aac_io_lock);
2693 
2694 	if (revents == 0) {
2695 		if (poll_events & (POLLIN | POLLRDNORM))
2696 			selrecord(td, &sc->rcv_select);
2697 	}
2698 
2699 	return (revents);
2700 }
2701 
2702 static void
2703 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2704 {
2705 
2706 	switch (event->ev_type) {
2707 	case AAC_EVENT_CMFREE:
2708 		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2709 		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2710 			aacraid_add_event(sc, event);
2711 			return;
2712 		}
2713 		free(event, M_AACRAIDBUF);
2714 		wakeup(arg);
2715 		break;
2716 	default:
2717 		break;
2718 	}
2719 }
2720 
2721 /*
2722  * Send a FIB supplied from userspace
2723  */
2724 static int
2725 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2726 {
2727 	struct aac_command *cm;
2728 	int size, error;
2729 
2730 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2731 
2732 	cm = NULL;
2733 
2734 	/*
2735 	 * Get a command
2736 	 */
2737 	mtx_lock(&sc->aac_io_lock);
2738 	if (aacraid_alloc_command(sc, &cm)) {
2739 		struct aac_event *event;
2740 
2741 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2742 		    M_NOWAIT | M_ZERO);
2743 		if (event == NULL) {
2744 			error = EBUSY;
2745 			mtx_unlock(&sc->aac_io_lock);
2746 			goto out;
2747 		}
2748 		event->ev_type = AAC_EVENT_CMFREE;
2749 		event->ev_callback = aac_ioctl_event;
2750 		event->ev_arg = &cm;
2751 		aacraid_add_event(sc, event);
2752 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2753 	}
2754 	mtx_unlock(&sc->aac_io_lock);
2755 
2756 	/*
2757 	 * Fetch the FIB header, then re-copy to get data as well.
2758 	 */
2759 	if ((error = copyin(ufib, cm->cm_fib,
2760 			    sizeof(struct aac_fib_header))) != 0)
2761 		goto out;
2762 	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2763 	if (size > sc->aac_max_fib_size) {
2764 		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2765 			      size, sc->aac_max_fib_size);
2766 		size = sc->aac_max_fib_size;
2767 	}
2768 	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2769 		goto out;
2770 	cm->cm_fib->Header.Size = size;
2771 	cm->cm_timestamp = time_uptime;
2772 	cm->cm_datalen = 0;
2773 
2774 	/*
2775 	 * Pass the FIB to the controller, wait for it to complete.
2776 	 */
2777 	mtx_lock(&sc->aac_io_lock);
2778 	error = aacraid_wait_command(cm);
2779 	mtx_unlock(&sc->aac_io_lock);
2780 	if (error != 0) {
2781 		device_printf(sc->aac_dev,
2782 			      "aacraid_wait_command return %d\n", error);
2783 		goto out;
2784 	}
2785 
2786 	/*
2787 	 * Copy the FIB and data back out to the caller.
2788 	 */
2789 	size = cm->cm_fib->Header.Size;
2790 	if (size > sc->aac_max_fib_size) {
2791 		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2792 			      size, sc->aac_max_fib_size);
2793 		size = sc->aac_max_fib_size;
2794 	}
2795 	error = copyout(cm->cm_fib, ufib, size);
2796 
2797 out:
2798 	if (cm != NULL) {
2799 		mtx_lock(&sc->aac_io_lock);
2800 		aacraid_release_command(cm);
2801 		mtx_unlock(&sc->aac_io_lock);
2802 	}
2803 	return(error);
2804 }
2805 
2806 /*
2807  * Send a passthrough FIB supplied from userspace
2808  */
2809 static int
2810 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2811 {
2812 	struct aac_command *cm;
2813 	struct aac_fib *fib;
2814 	struct aac_srb *srbcmd;
2815 	struct aac_srb *user_srb = (struct aac_srb *)arg;
2816 	void *user_reply;
2817 	int error, transfer_data = 0;
2818 	bus_dmamap_t orig_map = 0;
2819 	u_int32_t fibsize = 0;
2820 	u_int64_t srb_sg_address;
2821 	u_int32_t srb_sg_bytecount;
2822 
2823 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2824 
2825 	cm = NULL;
2826 
2827 	mtx_lock(&sc->aac_io_lock);
2828 	if (aacraid_alloc_command(sc, &cm)) {
2829 		struct aac_event *event;
2830 
2831 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2832 		    M_NOWAIT | M_ZERO);
2833 		if (event == NULL) {
2834 			error = EBUSY;
2835 			mtx_unlock(&sc->aac_io_lock);
2836 			goto out;
2837 		}
2838 		event->ev_type = AAC_EVENT_CMFREE;
2839 		event->ev_callback = aac_ioctl_event;
2840 		event->ev_arg = &cm;
2841 		aacraid_add_event(sc, event);
2842 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2843 	}
2844 	mtx_unlock(&sc->aac_io_lock);
2845 
2846 	cm->cm_data = NULL;
2847 	/* save original dma map */
2848 	orig_map = cm->cm_datamap;
2849 
2850 	fib = cm->cm_fib;
2851 	srbcmd = (struct aac_srb *)fib->data;
2852 	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2853 		sizeof (u_int32_t)) != 0))
2854 		goto out;
2855 	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2856 		error = EINVAL;
2857 		goto out;
2858 	}
2859 	if ((error = copyin((void *)user_srb, srbcmd, fibsize) != 0))
2860 		goto out;
2861 
2862 	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2863 	srbcmd->retry_limit = 0;	/* obsolete */
2864 
2865 	/* only one sg element from userspace supported */
2866 	if (srbcmd->sg_map.SgCount > 1) {
2867 		error = EINVAL;
2868 		goto out;
2869 	}
2870 	/* check fibsize */
2871 	if (fibsize == (sizeof(struct aac_srb) +
2872 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2873 		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2874 		srb_sg_bytecount = sgp->SgByteCount;
2875 		srb_sg_address = (u_int64_t)sgp->SgAddress;
2876 	} else if (fibsize == (sizeof(struct aac_srb) +
2877 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2878 #ifdef __LP64__
2879 		struct aac_sg_entry64 *sgp =
2880 			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2881 		srb_sg_bytecount = sgp->SgByteCount;
2882 		srb_sg_address = sgp->SgAddress;
2883 		if (srb_sg_address > 0xffffffffull &&
2884 			!(sc->flags & AAC_FLAGS_SG_64BIT))
2885 #endif
2886 		{
2887 			error = EINVAL;
2888 			goto out;
2889 		}
2890 	} else {
2891 		error = EINVAL;
2892 		goto out;
2893 	}
2894 	user_reply = (char *)arg + fibsize;
2895 	srbcmd->data_len = srb_sg_bytecount;
2896 	if (srbcmd->sg_map.SgCount == 1)
2897 		transfer_data = 1;
2898 
2899 	if (transfer_data) {
2900 		/*
2901 		 * Create DMA tag for the passthr. data buffer and allocate it.
2902 		 */
2903 		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2904 			1, 0,			/* algnmnt, boundary */
2905 			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2906 			BUS_SPACE_MAXADDR_32BIT :
2907 			0x7fffffff,		/* lowaddr */
2908 			BUS_SPACE_MAXADDR, 	/* highaddr */
2909 			NULL, NULL, 		/* filter, filterarg */
2910 			srb_sg_bytecount, 	/* size */
2911 			sc->aac_sg_tablesize,	/* nsegments */
2912 			srb_sg_bytecount, 	/* maxsegsize */
2913 			0,			/* flags */
2914 			NULL, NULL,		/* No locking needed */
2915 			&cm->cm_passthr_dmat)) {
2916 			error = ENOMEM;
2917 			goto out;
2918 		}
2919 		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2920 			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2921 			error = ENOMEM;
2922 			goto out;
2923 		}
2924 		/* fill some cm variables */
2925 		cm->cm_datalen = srb_sg_bytecount;
2926 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2927 			cm->cm_flags |= AAC_CMD_DATAIN;
2928 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2929 			cm->cm_flags |= AAC_CMD_DATAOUT;
2930 
2931 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2932 			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2933 				cm->cm_data, cm->cm_datalen)) != 0)
2934 				goto out;
2935 			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2936 			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2937 				BUS_DMASYNC_PREWRITE);
2938 		}
2939 	}
2940 
2941 	/* build the FIB */
2942 	fib->Header.Size = sizeof(struct aac_fib_header) +
2943 		sizeof(struct aac_srb);
2944 	fib->Header.XferState =
2945 		AAC_FIBSTATE_HOSTOWNED   |
2946 		AAC_FIBSTATE_INITIALISED |
2947 		AAC_FIBSTATE_EMPTY	 |
2948 		AAC_FIBSTATE_FROMHOST	 |
2949 		AAC_FIBSTATE_REXPECTED   |
2950 		AAC_FIBSTATE_NORM	 |
2951 		AAC_FIBSTATE_ASYNC;
2952 
2953 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
2954 		ScsiPortCommandU64 : ScsiPortCommand;
2955 	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
2956 
2957 	/* send command */
2958 	if (transfer_data) {
2959 		bus_dmamap_load(cm->cm_passthr_dmat,
2960 			cm->cm_datamap, cm->cm_data,
2961 			cm->cm_datalen,
2962 			aacraid_map_command_sg, cm, 0);
2963 	} else {
2964 		aacraid_map_command_sg(cm, NULL, 0, 0);
2965 	}
2966 
2967 	/* wait for completion */
2968 	mtx_lock(&sc->aac_io_lock);
2969 	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
2970 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
2971 	mtx_unlock(&sc->aac_io_lock);
2972 
2973 	/* copy data */
2974 	if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
2975 		if ((error = copyout(cm->cm_data,
2976 			(void *)(uintptr_t)srb_sg_address,
2977 			cm->cm_datalen)) != 0)
2978 			goto out;
2979 		/* sync required for bus_dmamem_alloc() allocated mem.? */
2980 		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2981 				BUS_DMASYNC_POSTREAD);
2982 	}
2983 
2984 	/* status */
2985 	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
2986 
2987 out:
2988 	if (cm && cm->cm_data) {
2989 		if (transfer_data)
2990 			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
2991 		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
2992 		cm->cm_datamap = orig_map;
2993 	}
2994 	if (cm && cm->cm_passthr_dmat)
2995 		bus_dma_tag_destroy(cm->cm_passthr_dmat);
2996 	if (cm) {
2997 		mtx_lock(&sc->aac_io_lock);
2998 		aacraid_release_command(cm);
2999 		mtx_unlock(&sc->aac_io_lock);
3000 	}
3001 	return(error);
3002 }
3003 
3004 /*
3005  * Request an AIF from the controller (new comm. type1)
3006  */
3007 static void
3008 aac_request_aif(struct aac_softc *sc)
3009 {
3010 	struct aac_command *cm;
3011 	struct aac_fib *fib;
3012 
3013 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3014 
3015 	if (aacraid_alloc_command(sc, &cm)) {
3016 		sc->aif_pending = 1;
3017 		return;
3018 	}
3019 	sc->aif_pending = 0;
3020 
3021 	/* build the FIB */
3022 	fib = cm->cm_fib;
3023 	fib->Header.Size = sizeof(struct aac_fib);
3024 	fib->Header.XferState =
3025         AAC_FIBSTATE_HOSTOWNED   |
3026         AAC_FIBSTATE_INITIALISED |
3027         AAC_FIBSTATE_EMPTY	 |
3028         AAC_FIBSTATE_FROMHOST	 |
3029         AAC_FIBSTATE_REXPECTED   |
3030         AAC_FIBSTATE_NORM	 |
3031         AAC_FIBSTATE_ASYNC;
3032 	/* set AIF marker */
3033 	fib->Header.Handle = 0x00800000;
3034 	fib->Header.Command = AifRequest;
3035 	((struct aac_aif_command *)fib->data)->command = AifReqEvent;
3036 
3037 	aacraid_map_command_sg(cm, NULL, 0, 0);
3038 }
3039 
3040 
3041 #if __FreeBSD_version >= 702000
3042 /*
3043  * cdevpriv interface private destructor.
3044  */
3045 static void
3046 aac_cdevpriv_dtor(void *arg)
3047 {
3048 	struct aac_softc *sc;
3049 
3050 	sc = arg;
3051 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3052 	mtx_lock(&Giant);
3053 	device_unbusy(sc->aac_dev);
3054 	mtx_unlock(&Giant);
3055 }
3056 #else
3057 static int
3058 aac_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3059 {
3060 	struct aac_softc *sc;
3061 
3062 	sc = dev->si_drv1;
3063 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3064 	return 0;
3065 }
3066 #endif
3067 
3068 /*
3069  * Handle an AIF sent to us by the controller; queue it for later reference.
3070  * If the queue fills up, then drop the older entries.
3071  */
3072 static void
3073 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3074 {
3075 	struct aac_aif_command *aif;
3076 	struct aac_container *co, *co_next;
3077 	struct aac_fib_context *ctx;
3078 	struct aac_fib *sync_fib;
3079 	struct aac_mntinforesp mir;
3080 	int next, current, found;
3081 	int count = 0, changed = 0, i = 0;
3082 	u_int32_t channel, uid;
3083 
3084 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3085 
3086 	aif = (struct aac_aif_command*)&fib->data[0];
3087 	aacraid_print_aif(sc, aif);
3088 
3089 	/* Is it an event that we should care about? */
3090 	switch (aif->command) {
3091 	case AifCmdEventNotify:
3092 		switch (aif->data.EN.type) {
3093 		case AifEnAddContainer:
3094 		case AifEnDeleteContainer:
3095 			/*
3096 			 * A container was added or deleted, but the message
3097 			 * doesn't tell us anything else!  Re-enumerate the
3098 			 * containers and sort things out.
3099 			 */
3100 			aac_alloc_sync_fib(sc, &sync_fib);
3101 			do {
3102 				/*
3103 				 * Ask the controller for its containers one at
3104 				 * a time.
3105 				 * XXX What if the controller's list changes
3106 				 * midway through this enumaration?
3107 				 * XXX This should be done async.
3108 				 */
3109 				if (aac_get_container_info(sc, sync_fib, i,
3110 					&mir, &uid) != 0)
3111 					continue;
3112 				if (i == 0)
3113 					count = mir.MntRespCount;
3114 				/*
3115 				 * Check the container against our list.
3116 				 * co->co_found was already set to 0 in a
3117 				 * previous run.
3118 				 */
3119 				if ((mir.Status == ST_OK) &&
3120 				    (mir.MntTable[0].VolType != CT_NONE)) {
3121 					found = 0;
3122 					TAILQ_FOREACH(co,
3123 						      &sc->aac_container_tqh,
3124 						      co_link) {
3125 						if (co->co_mntobj.ObjectId ==
3126 						    mir.MntTable[0].ObjectId) {
3127 							co->co_found = 1;
3128 							found = 1;
3129 							break;
3130 						}
3131 					}
3132 					/*
3133 					 * If the container matched, continue
3134 					 * in the list.
3135 					 */
3136 					if (found) {
3137 						i++;
3138 						continue;
3139 					}
3140 
3141 					/*
3142 					 * This is a new container.  Do all the
3143 					 * appropriate things to set it up.
3144 					 */
3145 					aac_add_container(sc, &mir, 1, uid);
3146 					changed = 1;
3147 				}
3148 				i++;
3149 			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3150 			aac_release_sync_fib(sc);
3151 
3152 			/*
3153 			 * Go through our list of containers and see which ones
3154 			 * were not marked 'found'.  Since the controller didn't
3155 			 * list them they must have been deleted.  Do the
3156 			 * appropriate steps to destroy the device.  Also reset
3157 			 * the co->co_found field.
3158 			 */
3159 			co = TAILQ_FIRST(&sc->aac_container_tqh);
3160 			while (co != NULL) {
3161 				if (co->co_found == 0) {
3162 					co_next = TAILQ_NEXT(co, co_link);
3163 					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3164 						     co_link);
3165 					free(co, M_AACRAIDBUF);
3166 					changed = 1;
3167 					co = co_next;
3168 				} else {
3169 					co->co_found = 0;
3170 					co = TAILQ_NEXT(co, co_link);
3171 				}
3172 			}
3173 
3174 			/* Attach the newly created containers */
3175 			if (changed) {
3176 				if (sc->cam_rescan_cb != NULL)
3177 					sc->cam_rescan_cb(sc, 0,
3178 				    	AAC_CAM_TARGET_WILDCARD);
3179 			}
3180 
3181 			break;
3182 
3183 		case AifEnEnclosureManagement:
3184 			switch (aif->data.EN.data.EEE.eventType) {
3185 			case AIF_EM_DRIVE_INSERTION:
3186 			case AIF_EM_DRIVE_REMOVAL:
3187 				channel = aif->data.EN.data.EEE.unitID;
3188 				if (sc->cam_rescan_cb != NULL)
3189 					sc->cam_rescan_cb(sc,
3190 					    ((channel>>24) & 0xF) + 1,
3191 					    (channel & 0xFFFF));
3192 				break;
3193 			}
3194 			break;
3195 
3196 		case AifEnAddJBOD:
3197 		case AifEnDeleteJBOD:
3198 		case AifRawDeviceRemove:
3199 			channel = aif->data.EN.data.ECE.container;
3200 			if (sc->cam_rescan_cb != NULL)
3201 				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3202 				    AAC_CAM_TARGET_WILDCARD);
3203 			break;
3204 
3205 		default:
3206 			break;
3207 		}
3208 
3209 	default:
3210 		break;
3211 	}
3212 
3213 	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3214 	current = sc->aifq_idx;
3215 	next = (current + 1) % AAC_AIFQ_LENGTH;
3216 	if (next == 0)
3217 		sc->aifq_filled = 1;
3218 	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3219 	/* modify AIF contexts */
3220 	if (sc->aifq_filled) {
3221 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3222 			if (next == ctx->ctx_idx)
3223 				ctx->ctx_wrap = 1;
3224 			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3225 				ctx->ctx_idx = next;
3226 		}
3227 	}
3228 	sc->aifq_idx = next;
3229 	/* On the off chance that someone is sleeping for an aif... */
3230 	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3231 		wakeup(sc->aac_aifq);
3232 	/* Wakeup any poll()ers */
3233 	selwakeuppri(&sc->rcv_select, PRIBIO);
3234 
3235 	return;
3236 }
3237 
3238 /*
3239  * Return the Revision of the driver to userspace and check to see if the
3240  * userspace app is possibly compatible.  This is extremely bogus since
3241  * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3242  * returning what the card reported.
3243  */
3244 static int
3245 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3246 {
3247 	struct aac_rev_check rev_check;
3248 	struct aac_rev_check_resp rev_check_resp;
3249 	int error = 0;
3250 
3251 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3252 
3253 	/*
3254 	 * Copyin the revision struct from userspace
3255 	 */
3256 	if ((error = copyin(udata, (caddr_t)&rev_check,
3257 			sizeof(struct aac_rev_check))) != 0) {
3258 		return error;
3259 	}
3260 
3261 	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3262 	      rev_check.callingRevision.buildNumber);
3263 
3264 	/*
3265 	 * Doctor up the response struct.
3266 	 */
3267 	rev_check_resp.possiblyCompatible = 1;
3268 	rev_check_resp.adapterSWRevision.external.comp.major =
3269 	    AAC_DRIVER_MAJOR_VERSION;
3270 	rev_check_resp.adapterSWRevision.external.comp.minor =
3271 	    AAC_DRIVER_MINOR_VERSION;
3272 	rev_check_resp.adapterSWRevision.external.comp.type =
3273 	    AAC_DRIVER_TYPE;
3274 	rev_check_resp.adapterSWRevision.external.comp.dash =
3275 	    AAC_DRIVER_BUGFIX_LEVEL;
3276 	rev_check_resp.adapterSWRevision.buildNumber =
3277 	    AAC_DRIVER_BUILD;
3278 
3279 	return(copyout((caddr_t)&rev_check_resp, udata,
3280 			sizeof(struct aac_rev_check_resp)));
3281 }
3282 
3283 /*
3284  * Pass the fib context to the caller
3285  */
3286 static int
3287 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3288 {
3289 	struct aac_fib_context *fibctx, *ctx;
3290 	int error = 0;
3291 
3292 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3293 
3294 	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3295 	if (fibctx == NULL)
3296 		return (ENOMEM);
3297 
3298 	mtx_lock(&sc->aac_io_lock);
3299 	/* all elements are already 0, add to queue */
3300 	if (sc->fibctx == NULL)
3301 		sc->fibctx = fibctx;
3302 	else {
3303 		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3304 			;
3305 		ctx->next = fibctx;
3306 		fibctx->prev = ctx;
3307 	}
3308 
3309 	/* evaluate unique value */
3310 	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3311 	ctx = sc->fibctx;
3312 	while (ctx != fibctx) {
3313 		if (ctx->unique == fibctx->unique) {
3314 			fibctx->unique++;
3315 			ctx = sc->fibctx;
3316 		} else {
3317 			ctx = ctx->next;
3318 		}
3319 	}
3320 
3321 	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3322 	mtx_unlock(&sc->aac_io_lock);
3323 	if (error)
3324 		aac_close_aif(sc, (caddr_t)ctx);
3325 	return error;
3326 }
3327 
3328 /*
3329  * Close the caller's fib context
3330  */
3331 static int
3332 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3333 {
3334 	struct aac_fib_context *ctx;
3335 
3336 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3337 
3338 	mtx_lock(&sc->aac_io_lock);
3339 	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3340 		if (ctx->unique == *(uint32_t *)&arg) {
3341 			if (ctx == sc->fibctx)
3342 				sc->fibctx = NULL;
3343 			else {
3344 				ctx->prev->next = ctx->next;
3345 				if (ctx->next)
3346 					ctx->next->prev = ctx->prev;
3347 			}
3348 			break;
3349 		}
3350 	}
3351 	if (ctx)
3352 		free(ctx, M_AACRAIDBUF);
3353 
3354 	mtx_unlock(&sc->aac_io_lock);
3355 	return 0;
3356 }
3357 
3358 /*
3359  * Pass the caller the next AIF in their queue
3360  */
3361 static int
3362 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3363 {
3364 	struct get_adapter_fib_ioctl agf;
3365 	struct aac_fib_context *ctx;
3366 	int error;
3367 
3368 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3369 
3370 	mtx_lock(&sc->aac_io_lock);
3371 	if ((error = copyin(arg, &agf, sizeof(agf))) == 0) {
3372 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3373 			if (agf.AdapterFibContext == ctx->unique)
3374 				break;
3375 		}
3376 		if (!ctx) {
3377 			mtx_unlock(&sc->aac_io_lock);
3378 			return (EFAULT);
3379 		}
3380 
3381 		error = aac_return_aif(sc, ctx, agf.AifFib);
3382 		if (error == EAGAIN && agf.Wait) {
3383 			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3384 			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3385 			while (error == EAGAIN) {
3386 				mtx_unlock(&sc->aac_io_lock);
3387 				error = tsleep(sc->aac_aifq, PRIBIO |
3388 					       PCATCH, "aacaif", 0);
3389 				mtx_lock(&sc->aac_io_lock);
3390 				if (error == 0)
3391 					error = aac_return_aif(sc, ctx, agf.AifFib);
3392 			}
3393 			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3394 		}
3395 	}
3396 	mtx_unlock(&sc->aac_io_lock);
3397 	return(error);
3398 }
3399 
3400 /*
3401  * Hand the next AIF off the top of the queue out to userspace.
3402  */
3403 static int
3404 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3405 {
3406 	int current, error;
3407 
3408 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3409 
3410 	current = ctx->ctx_idx;
3411 	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3412 		/* empty */
3413 		return (EAGAIN);
3414 	}
3415 	error =
3416 		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3417 	if (error)
3418 		device_printf(sc->aac_dev,
3419 		    "aac_return_aif: copyout returned %d\n", error);
3420 	else {
3421 		ctx->ctx_wrap = 0;
3422 		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3423 	}
3424 	return(error);
3425 }
3426 
3427 static int
3428 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3429 {
3430 	struct aac_pci_info {
3431 		u_int32_t bus;
3432 		u_int32_t slot;
3433 	} pciinf;
3434 	int error;
3435 
3436 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3437 
3438 	pciinf.bus = pci_get_bus(sc->aac_dev);
3439 	pciinf.slot = pci_get_slot(sc->aac_dev);
3440 
3441 	error = copyout((caddr_t)&pciinf, uptr,
3442 			sizeof(struct aac_pci_info));
3443 
3444 	return (error);
3445 }
3446 
3447 static int
3448 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3449 {
3450 	struct aac_features f;
3451 	int error;
3452 
3453 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3454 
3455 	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3456 		return (error);
3457 
3458 	/*
3459 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3460 	 * ALL zero in the featuresState, the driver will return the current
3461 	 * state of all the supported features, the data field will not be
3462 	 * valid.
3463 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3464 	 * a specific bit set in the featuresState, the driver will return the
3465 	 * current state of this specific feature and whatever data that are
3466 	 * associated with the feature in the data field or perform whatever
3467 	 * action needed indicates in the data field.
3468 	 */
3469 	 if (f.feat.fValue == 0) {
3470 		f.feat.fBits.largeLBA =
3471 		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3472 		f.feat.fBits.JBODSupport = 1;
3473 		/* TODO: In the future, add other features state here as well */
3474 	} else {
3475 		if (f.feat.fBits.largeLBA)
3476 			f.feat.fBits.largeLBA =
3477 			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3478 		/* TODO: Add other features state and data in the future */
3479 	}
3480 
3481 	error = copyout(&f, uptr, sizeof (f));
3482 	return (error);
3483 }
3484 
3485 /*
3486  * Give the userland some information about the container.  The AAC arch
3487  * expects the driver to be a SCSI passthrough type driver, so it expects
3488  * the containers to have b:t:l numbers.  Fake it.
3489  */
3490 static int
3491 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3492 {
3493 	struct aac_query_disk query_disk;
3494 	struct aac_container *co;
3495 	int error, id;
3496 
3497 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3498 
3499 	mtx_lock(&sc->aac_io_lock);
3500 	error = copyin(uptr, (caddr_t)&query_disk,
3501 		       sizeof(struct aac_query_disk));
3502 	if (error) {
3503 		mtx_unlock(&sc->aac_io_lock);
3504 		return (error);
3505 	}
3506 
3507 	id = query_disk.ContainerNumber;
3508 	if (id == -1) {
3509 		mtx_unlock(&sc->aac_io_lock);
3510 		return (EINVAL);
3511 	}
3512 
3513 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3514 		if (co->co_mntobj.ObjectId == id)
3515 			break;
3516 		}
3517 
3518 	if (co == NULL) {
3519 			query_disk.Valid = 0;
3520 			query_disk.Locked = 0;
3521 			query_disk.Deleted = 1;		/* XXX is this right? */
3522 	} else {
3523 		query_disk.Valid = 1;
3524 		query_disk.Locked = 1;
3525 		query_disk.Deleted = 0;
3526 		query_disk.Bus = device_get_unit(sc->aac_dev);
3527 		query_disk.Target = 0;
3528 		query_disk.Lun = 0;
3529 		query_disk.UnMapped = 0;
3530 	}
3531 
3532 	error = copyout((caddr_t)&query_disk, uptr,
3533 			sizeof(struct aac_query_disk));
3534 
3535 	mtx_unlock(&sc->aac_io_lock);
3536 	return (error);
3537 }
3538 
3539 static void
3540 aac_container_bus(struct aac_softc *sc)
3541 {
3542 	struct aac_sim *sim;
3543 	device_t child;
3544 
3545 	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3546 		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3547 	if (sim == NULL) {
3548 		device_printf(sc->aac_dev,
3549 	    	"No memory to add container bus\n");
3550 		panic("Out of memory?!");
3551 	};
3552 	child = device_add_child(sc->aac_dev, "aacraidp", -1);
3553 	if (child == NULL) {
3554 		device_printf(sc->aac_dev,
3555 	    	"device_add_child failed for container bus\n");
3556 		free(sim, M_AACRAIDBUF);
3557 		panic("Out of memory?!");
3558 	}
3559 
3560 	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3561 	sim->BusNumber = 0;
3562 	sim->BusType = CONTAINER_BUS;
3563 	sim->InitiatorBusId = -1;
3564 	sim->aac_sc = sc;
3565 	sim->sim_dev = child;
3566 	sim->aac_cam = NULL;
3567 
3568 	device_set_ivars(child, sim);
3569 	device_set_desc(child, "Container Bus");
3570 	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3571 	/*
3572 	device_set_desc(child, aac_describe_code(aac_container_types,
3573 			mir->MntTable[0].VolType));
3574 	*/
3575 	bus_generic_attach(sc->aac_dev);
3576 }
3577 
3578 static void
3579 aac_get_bus_info(struct aac_softc *sc)
3580 {
3581 	struct aac_fib *fib;
3582 	struct aac_ctcfg *c_cmd;
3583 	struct aac_ctcfg_resp *c_resp;
3584 	struct aac_vmioctl *vmi;
3585 	struct aac_vmi_businf_resp *vmi_resp;
3586 	struct aac_getbusinf businfo;
3587 	struct aac_sim *caminf;
3588 	device_t child;
3589 	int i, error;
3590 
3591 	mtx_lock(&sc->aac_io_lock);
3592 	aac_alloc_sync_fib(sc, &fib);
3593 	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3594 	bzero(c_cmd, sizeof(struct aac_ctcfg));
3595 
3596 	c_cmd->Command = VM_ContainerConfig;
3597 	c_cmd->cmd = CT_GET_SCSI_METHOD;
3598 	c_cmd->param = 0;
3599 
3600 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3601 	    sizeof(struct aac_ctcfg));
3602 	if (error) {
3603 		device_printf(sc->aac_dev, "Error %d sending "
3604 		    "VM_ContainerConfig command\n", error);
3605 		aac_release_sync_fib(sc);
3606 		mtx_unlock(&sc->aac_io_lock);
3607 		return;
3608 	}
3609 
3610 	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3611 	if (c_resp->Status != ST_OK) {
3612 		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3613 		    c_resp->Status);
3614 		aac_release_sync_fib(sc);
3615 		mtx_unlock(&sc->aac_io_lock);
3616 		return;
3617 	}
3618 
3619 	sc->scsi_method_id = c_resp->param;
3620 
3621 	vmi = (struct aac_vmioctl *)&fib->data[0];
3622 	bzero(vmi, sizeof(struct aac_vmioctl));
3623 
3624 	vmi->Command = VM_Ioctl;
3625 	vmi->ObjType = FT_DRIVE;
3626 	vmi->MethId = sc->scsi_method_id;
3627 	vmi->ObjId = 0;
3628 	vmi->IoctlCmd = GetBusInfo;
3629 
3630 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3631 	    sizeof(struct aac_vmi_businf_resp));
3632 	if (error) {
3633 		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3634 		    error);
3635 		aac_release_sync_fib(sc);
3636 		mtx_unlock(&sc->aac_io_lock);
3637 		return;
3638 	}
3639 
3640 	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3641 	if (vmi_resp->Status != ST_OK) {
3642 		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3643 		    vmi_resp->Status);
3644 		aac_release_sync_fib(sc);
3645 		mtx_unlock(&sc->aac_io_lock);
3646 		return;
3647 	}
3648 
3649 	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3650 	aac_release_sync_fib(sc);
3651 	mtx_unlock(&sc->aac_io_lock);
3652 
3653 	for (i = 0; i < businfo.BusCount; i++) {
3654 		if (businfo.BusValid[i] != AAC_BUS_VALID)
3655 			continue;
3656 
3657 		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3658 		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3659 		if (caminf == NULL) {
3660 			device_printf(sc->aac_dev,
3661 			    "No memory to add passthrough bus %d\n", i);
3662 			break;
3663 		};
3664 
3665 		child = device_add_child(sc->aac_dev, "aacraidp", -1);
3666 		if (child == NULL) {
3667 			device_printf(sc->aac_dev,
3668 			    "device_add_child failed for passthrough bus %d\n",
3669 			    i);
3670 			free(caminf, M_AACRAIDBUF);
3671 			break;
3672 		}
3673 
3674 		caminf->TargetsPerBus = businfo.TargetsPerBus;
3675 		caminf->BusNumber = i+1;
3676 		caminf->BusType = PASSTHROUGH_BUS;
3677 		caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3678 		caminf->aac_sc = sc;
3679 		caminf->sim_dev = child;
3680 		caminf->aac_cam = NULL;
3681 
3682 		device_set_ivars(child, caminf);
3683 		device_set_desc(child, "SCSI Passthrough Bus");
3684 		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3685 	}
3686 }
3687 
3688 /*
3689  * Check to see if the kernel is up and running. If we are in a
3690  * BlinkLED state, return the BlinkLED code.
3691  */
3692 static u_int32_t
3693 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3694 {
3695 	u_int32_t ret;
3696 
3697 	ret = AAC_GET_FWSTATUS(sc);
3698 
3699 	if (ret & AAC_UP_AND_RUNNING)
3700 		ret = 0;
3701 	else if (ret & AAC_KERNEL_PANIC && bled)
3702 		*bled = (ret >> 16) & 0xff;
3703 
3704 	return (ret);
3705 }
3706 
3707 /*
3708  * Once do an IOP reset, basically have to re-initialize the card as
3709  * if coming up from a cold boot, and the driver is responsible for
3710  * any IO that was outstanding to the adapter at the time of the IOP
3711  * RESET. And prepare the driver for IOP RESET by making the init code
3712  * modular with the ability to call it from multiple places.
3713  */
3714 static int
3715 aac_reset_adapter(struct aac_softc *sc)
3716 {
3717 	struct aac_command *cm;
3718 	struct aac_fib *fib;
3719 	struct aac_pause_command *pc;
3720 	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3721 	int msi_enabled_orig;
3722 
3723 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3724 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3725 
3726 	if (sc->aac_state & AAC_STATE_RESET) {
3727 		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3728 		return (EINVAL);
3729 	}
3730 	sc->aac_state |= AAC_STATE_RESET;
3731 
3732 	/* disable interrupt */
3733 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3734 
3735 	/*
3736 	 * Abort all pending commands:
3737 	 * a) on the controller
3738 	 */
3739 	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3740 		cm->cm_flags |= AAC_CMD_RESET;
3741 
3742 		/* is there a completion handler? */
3743 		if (cm->cm_complete != NULL) {
3744 			cm->cm_complete(cm);
3745 		} else {
3746 			/* assume that someone is sleeping on this
3747 			 * command
3748 			 */
3749 			wakeup(cm);
3750 		}
3751 	}
3752 
3753 	/* b) in the waiting queues */
3754 	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3755 		cm->cm_flags |= AAC_CMD_RESET;
3756 
3757 		/* is there a completion handler? */
3758 		if (cm->cm_complete != NULL) {
3759 			cm->cm_complete(cm);
3760 		} else {
3761 			/* assume that someone is sleeping on this
3762 			 * command
3763 			 */
3764 			wakeup(cm);
3765 		}
3766 	}
3767 
3768 	/* flush drives */
3769 	if (aac_check_adapter_health(sc, NULL) == 0) {
3770 		mtx_unlock(&sc->aac_io_lock);
3771 		(void) aacraid_shutdown(sc->aac_dev);
3772 		mtx_lock(&sc->aac_io_lock);
3773 	}
3774 
3775 	/* execute IOP reset */
3776 	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3777 		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3778 
3779 		/* We need to wait for 5 seconds before accessing the MU again
3780 		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3781 		 */
3782 		waitCount = 5 * 10000;
3783 		while (waitCount) {
3784 			DELAY(100);			/* delay 100 microseconds */
3785 			waitCount--;
3786 		}
3787 	} else if ((aacraid_sync_command(sc,
3788 		AAC_IOP_RESET_ALWAYS, 0, 0, 0, 0, &status, &reset_mask)) != 0) {
3789 		/* call IOP_RESET for older firmware */
3790 		if ((aacraid_sync_command(sc,
3791 			AAC_IOP_RESET, 0, 0, 0, 0, &status, NULL)) != 0) {
3792 
3793 			if (status == AAC_SRB_STS_INVALID_REQUEST)
3794 				device_printf(sc->aac_dev, "IOP_RESET not supported\n");
3795 			else
3796 				/* probably timeout */
3797 				device_printf(sc->aac_dev, "IOP_RESET failed\n");
3798 
3799 			/* unwind aac_shutdown() */
3800 			aac_alloc_sync_fib(sc, &fib);
3801 			pc = (struct aac_pause_command *)&fib->data[0];
3802 			pc->Command = VM_ContainerConfig;
3803 			pc->Type = CT_PAUSE_IO;
3804 			pc->Timeout = 1;
3805 			pc->Min = 1;
3806 			pc->NoRescan = 1;
3807 
3808 			(void) aac_sync_fib(sc, ContainerCommand, 0, fib,
3809 				sizeof (struct aac_pause_command));
3810 			aac_release_sync_fib(sc);
3811 
3812 			goto finish;
3813 		}
3814 	} else if (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET) {
3815 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3816 		/*
3817 		 * We need to wait for 5 seconds before accessing the doorbell
3818 		 * again, 10000 * 100us = 1000,000us = 1000ms = 1s
3819 		 */
3820 		waitCount = 5 * 10000;
3821 		while (waitCount) {
3822 			DELAY(100);		/* delay 100 microseconds */
3823 			waitCount--;
3824 		}
3825 	}
3826 
3827 	/*
3828 	 * Initialize the adapter.
3829 	 */
3830 	max_msix_orig = sc->aac_max_msix;
3831 	msi_enabled_orig = sc->msi_enabled;
3832 	sc->msi_enabled = FALSE;
3833 	if (aac_check_firmware(sc) != 0)
3834 		goto finish;
3835 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3836 		sc->aac_max_msix = max_msix_orig;
3837 		if (msi_enabled_orig) {
3838 			sc->msi_enabled = msi_enabled_orig;
3839 			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3840 		}
3841 		mtx_unlock(&sc->aac_io_lock);
3842 		aac_init(sc);
3843 		mtx_lock(&sc->aac_io_lock);
3844 	}
3845 
3846 finish:
3847 	sc->aac_state &= ~AAC_STATE_RESET;
3848 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3849 	aacraid_startio(sc);
3850 	return (0);
3851 }
3852