xref: /freebsd/sys/dev/aacraid/aacraid.c (revision dd21556857e8d40f66bf5ad54754d9d52669ebf7)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2000 Michael Smith
5  * Copyright (c) 2001 Scott Long
6  * Copyright (c) 2000 BSDi
7  * Copyright (c) 2001-2010 Adaptec, Inc.
8  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 /*
35  * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
36  */
37 #define AAC_DRIVERNAME			"aacraid"
38 
39 #include "opt_aacraid.h"
40 
41 /* #include <stddef.h> */
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/proc.h>
48 #include <sys/sysctl.h>
49 #include <sys/sysent.h>
50 #include <sys/poll.h>
51 #include <sys/ioccom.h>
52 
53 #include <sys/bus.h>
54 #include <sys/conf.h>
55 #include <sys/signalvar.h>
56 #include <sys/time.h>
57 #include <sys/eventhandler.h>
58 #include <sys/rman.h>
59 
60 #include <machine/bus.h>
61 #include <machine/resource.h>
62 
63 #include <dev/pci/pcireg.h>
64 #include <dev/pci/pcivar.h>
65 
66 #include <dev/aacraid/aacraid_reg.h>
67 #include <sys/aac_ioctl.h>
68 #include <dev/aacraid/aacraid_debug.h>
69 #include <dev/aacraid/aacraid_var.h>
70 #include <dev/aacraid/aacraid_endian.h>
71 
72 #ifndef FILTER_HANDLED
73 #define FILTER_HANDLED	0x02
74 #endif
75 
76 static void	aac_add_container(struct aac_softc *sc,
77 				  struct aac_mntinforesp *mir, int f,
78 				  u_int32_t uid);
79 static void	aac_get_bus_info(struct aac_softc *sc);
80 static void	aac_container_bus(struct aac_softc *sc);
81 static void	aac_daemon(void *arg);
82 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
83 							  int pages, int nseg, int nseg_new);
84 
85 /* Command Processing */
86 static void	aac_timeout(struct aac_softc *sc);
87 static void	aac_command_thread(struct aac_softc *sc);
88 static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
89 				     u_int32_t xferstate, struct aac_fib *fib,
90 				     u_int16_t datasize);
91 /* Command Buffer Management */
92 static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
93 				       int nseg, int error);
94 static int	aac_alloc_commands(struct aac_softc *sc);
95 static void	aac_free_commands(struct aac_softc *sc);
96 static void	aac_unmap_command(struct aac_command *cm);
97 
98 /* Hardware Interface */
99 static int	aac_alloc(struct aac_softc *sc);
100 static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
101 			       int error);
102 static int	aac_check_firmware(struct aac_softc *sc);
103 static void	aac_define_int_mode(struct aac_softc *sc);
104 static int	aac_init(struct aac_softc *sc);
105 static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
106 static int	aac_setup_intr(struct aac_softc *sc);
107 static int	aac_check_config(struct aac_softc *sc);
108 
109 /* PMC SRC interface */
110 static int	aac_src_get_fwstatus(struct aac_softc *sc);
111 static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
112 static int	aac_src_get_istatus(struct aac_softc *sc);
113 static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
114 static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
115 				    u_int32_t arg0, u_int32_t arg1,
116 				    u_int32_t arg2, u_int32_t arg3);
117 static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
118 static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
119 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
120 static int aac_src_get_outb_queue(struct aac_softc *sc);
121 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
122 
123 struct aac_interface aacraid_src_interface = {
124 	aac_src_get_fwstatus,
125 	aac_src_qnotify,
126 	aac_src_get_istatus,
127 	aac_src_clear_istatus,
128 	aac_src_set_mailbox,
129 	aac_src_get_mailbox,
130 	aac_src_access_devreg,
131 	aac_src_send_command,
132 	aac_src_get_outb_queue,
133 	aac_src_set_outb_queue
134 };
135 
136 /* PMC SRCv interface */
137 static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
138 				    u_int32_t arg0, u_int32_t arg1,
139 				    u_int32_t arg2, u_int32_t arg3);
140 static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
141 
142 struct aac_interface aacraid_srcv_interface = {
143 	aac_src_get_fwstatus,
144 	aac_src_qnotify,
145 	aac_src_get_istatus,
146 	aac_src_clear_istatus,
147 	aac_srcv_set_mailbox,
148 	aac_srcv_get_mailbox,
149 	aac_src_access_devreg,
150 	aac_src_send_command,
151 	aac_src_get_outb_queue,
152 	aac_src_set_outb_queue
153 };
154 
155 /* Debugging and Diagnostics */
156 static struct aac_code_lookup aac_cpu_variant[] = {
157 	{"i960JX",		CPUI960_JX},
158 	{"i960CX",		CPUI960_CX},
159 	{"i960HX",		CPUI960_HX},
160 	{"i960RX",		CPUI960_RX},
161 	{"i960 80303",		CPUI960_80303},
162 	{"StrongARM SA110",	CPUARM_SA110},
163 	{"PPC603e",		CPUPPC_603e},
164 	{"XScale 80321",	CPU_XSCALE_80321},
165 	{"MIPS 4KC",		CPU_MIPS_4KC},
166 	{"MIPS 5KC",		CPU_MIPS_5KC},
167 	{"Unknown StrongARM",	CPUARM_xxx},
168 	{"Unknown PowerPC",	CPUPPC_xxx},
169 	{NULL, 0},
170 	{"Unknown processor",	0}
171 };
172 
173 static struct aac_code_lookup aac_battery_platform[] = {
174 	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
175 	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
176 	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
177 	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
178 	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
179 	{NULL, 0},
180 	{"unknown battery platform",		0}
181 };
182 static void	aac_describe_controller(struct aac_softc *sc);
183 static char	*aac_describe_code(struct aac_code_lookup *table,
184 				   u_int32_t code);
185 
186 /* Management Interface */
187 static d_open_t		aac_open;
188 static d_ioctl_t	aac_ioctl;
189 static d_poll_t		aac_poll;
190 static void		aac_cdevpriv_dtor(void *arg);
191 static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
192 static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
193 static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
194 static void	aac_request_aif(struct aac_softc *sc);
195 static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
196 static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
197 static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
198 static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
199 static int	aac_return_aif(struct aac_softc *sc,
200 			       struct aac_fib_context *ctx, caddr_t uptr);
201 static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
202 static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
203 static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
204 static void	aac_ioctl_event(struct aac_softc *sc,
205 				struct aac_event *event, void *arg);
206 static int	aac_reset_adapter(struct aac_softc *sc);
207 static int	aac_get_container_info(struct aac_softc *sc,
208 				       struct aac_fib *fib, int cid,
209 				       struct aac_mntinforesp *mir,
210 				       u_int32_t *uid);
211 static u_int32_t
212 	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
213 
214 static struct cdevsw aacraid_cdevsw = {
215 	.d_version =	D_VERSION,
216 	.d_flags =	0,
217 	.d_open =	aac_open,
218 	.d_ioctl =	aac_ioctl,
219 	.d_poll =	aac_poll,
220 	.d_name =	"aacraid",
221 };
222 
223 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
224 
225 /* sysctl node */
226 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
227     "AACRAID driver parameters");
228 
229 /*
230  * Device Interface
231  */
232 
233 /*
234  * Initialize the controller and softc
235  */
236 int
237 aacraid_attach(struct aac_softc *sc)
238 {
239 	int error, unit;
240 	struct aac_fib *fib;
241 	struct aac_mntinforesp mir;
242 	int count = 0, i = 0;
243 	u_int32_t uid;
244 
245 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
246 	sc->hint_flags = device_get_flags(sc->aac_dev);
247 	/*
248 	 * Initialize per-controller queues.
249 	 */
250 	aac_initq_free(sc);
251 	aac_initq_ready(sc);
252 	aac_initq_busy(sc);
253 
254 	/* mark controller as suspended until we get ourselves organised */
255 	sc->aac_state |= AAC_STATE_SUSPEND;
256 
257 	/*
258 	 * Check that the firmware on the card is supported.
259 	 */
260 	sc->msi_enabled = sc->msi_tupelo = FALSE;
261 	if ((error = aac_check_firmware(sc)) != 0)
262 		return(error);
263 
264 	/*
265 	 * Initialize locks
266 	 */
267 	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
268 	TAILQ_INIT(&sc->aac_container_tqh);
269 	TAILQ_INIT(&sc->aac_ev_cmfree);
270 
271 	/* Initialize the clock daemon callout. */
272 	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
273 
274 	/*
275 	 * Initialize the adapter.
276 	 */
277 	if ((error = aac_alloc(sc)) != 0)
278 		return(error);
279 	aac_define_int_mode(sc);
280 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
281 		if ((error = aac_init(sc)) != 0)
282 			return(error);
283 	}
284 
285 	/*
286 	 * Allocate and connect our interrupt.
287 	 */
288 	if ((error = aac_setup_intr(sc)) != 0)
289 		return(error);
290 
291 	/*
292 	 * Print a little information about the controller.
293 	 */
294 	aac_describe_controller(sc);
295 
296 	/*
297 	 * Make the control device.
298 	 */
299 	unit = device_get_unit(sc->aac_dev);
300 	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
301 				 0640, "aacraid%d", unit);
302 	sc->aac_dev_t->si_drv1 = sc;
303 
304 	/* Create the AIF thread */
305 	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
306 		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
307 		panic("Could not create AIF thread");
308 
309 	/* Register the shutdown method to only be called post-dump */
310 	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
311 	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
312 		device_printf(sc->aac_dev,
313 			      "shutdown event registration failed\n");
314 
315 	/* Find containers */
316 	mtx_lock(&sc->aac_io_lock);
317 	aac_alloc_sync_fib(sc, &fib);
318 	/* loop over possible containers */
319 	do {
320 		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
321 			continue;
322 		if (i == 0)
323 			count = mir.MntRespCount;
324 		aac_add_container(sc, &mir, 0, uid);
325 		i++;
326 	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
327 	aac_release_sync_fib(sc);
328 	mtx_unlock(&sc->aac_io_lock);
329 
330 	/* Register with CAM for the containers */
331 	TAILQ_INIT(&sc->aac_sim_tqh);
332 	aac_container_bus(sc);
333 	/* Register with CAM for the non-DASD devices */
334 	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
335 		aac_get_bus_info(sc);
336 
337 	/* poke the bus to actually attach the child devices */
338 	bus_attach_children(sc->aac_dev);
339 
340 	/* mark the controller up */
341 	sc->aac_state &= ~AAC_STATE_SUSPEND;
342 
343 	/* enable interrupts now */
344 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
345 
346 	mtx_lock(&sc->aac_io_lock);
347 	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
348 	mtx_unlock(&sc->aac_io_lock);
349 
350 	return(0);
351 }
352 
353 static void
354 aac_daemon(void *arg)
355 {
356 	struct aac_softc *sc;
357 	struct timeval tv;
358 	struct aac_command *cm;
359 	struct aac_fib *fib;
360 
361 	sc = arg;
362 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
363 
364 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
365 	if (callout_pending(&sc->aac_daemontime) ||
366 	    callout_active(&sc->aac_daemontime) == 0)
367 		return;
368 	getmicrotime(&tv);
369 
370 	if (!aacraid_alloc_command(sc, &cm)) {
371 		fib = cm->cm_fib;
372 		cm->cm_timestamp = time_uptime;
373 		cm->cm_datalen = 0;
374 		cm->cm_flags |= AAC_CMD_WAIT;
375 
376 		fib->Header.Size =
377 			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
378 		fib->Header.XferState =
379 			AAC_FIBSTATE_HOSTOWNED   |
380 			AAC_FIBSTATE_INITIALISED |
381 			AAC_FIBSTATE_EMPTY	 |
382 			AAC_FIBSTATE_FROMHOST	 |
383 			AAC_FIBSTATE_REXPECTED   |
384 			AAC_FIBSTATE_NORM	 |
385 			AAC_FIBSTATE_ASYNC	 |
386 			AAC_FIBSTATE_FAST_RESPONSE;
387 		fib->Header.Command = SendHostTime;
388 		*(uint32_t *)fib->data = htole32(tv.tv_sec);
389 
390 		aacraid_map_command_sg(cm, NULL, 0, 0);
391 		aacraid_release_command(cm);
392 	}
393 
394 	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
395 }
396 
397 void
398 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
399 {
400 
401 	switch (event->ev_type & AAC_EVENT_MASK) {
402 	case AAC_EVENT_CMFREE:
403 		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
404 		break;
405 	default:
406 		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
407 		    event->ev_type);
408 		break;
409 	}
410 
411 	return;
412 }
413 
414 /*
415  * Request information of container #cid
416  */
417 static int
418 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
419 		       struct aac_mntinforesp *mir, u_int32_t *uid)
420 {
421 	struct aac_command *cm;
422 	struct aac_fib *fib;
423 	struct aac_mntinfo *mi;
424 	struct aac_cnt_config *ccfg;
425 	int rval;
426 
427 	if (sync_fib == NULL) {
428 		if (aacraid_alloc_command(sc, &cm)) {
429 			device_printf(sc->aac_dev,
430 				"Warning, no free command available\n");
431 			return (-1);
432 		}
433 		fib = cm->cm_fib;
434 	} else {
435 		fib = sync_fib;
436 	}
437 
438 	mi = (struct aac_mntinfo *)&fib->data[0];
439 	/* 4KB support?, 64-bit LBA? */
440 	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
441 		mi->Command = VM_NameServeAllBlk;
442 	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
443 		mi->Command = VM_NameServe64;
444 	else
445 		mi->Command = VM_NameServe;
446 	mi->MntType = FT_FILESYS;
447 	mi->MntCount = cid;
448 	aac_mntinfo_tole(mi);
449 
450 	if (sync_fib) {
451 		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
452 			 sizeof(struct aac_mntinfo))) {
453 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
454 			return (-1);
455 		}
456 	} else {
457 		cm->cm_timestamp = time_uptime;
458 		cm->cm_datalen = 0;
459 
460 		fib->Header.Size =
461 			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
462 		fib->Header.XferState =
463 			AAC_FIBSTATE_HOSTOWNED   |
464 			AAC_FIBSTATE_INITIALISED |
465 			AAC_FIBSTATE_EMPTY	 |
466 			AAC_FIBSTATE_FROMHOST	 |
467 			AAC_FIBSTATE_REXPECTED   |
468 			AAC_FIBSTATE_NORM	 |
469 			AAC_FIBSTATE_ASYNC	 |
470 			AAC_FIBSTATE_FAST_RESPONSE;
471 		fib->Header.Command = ContainerCommand;
472 		if (aacraid_wait_command(cm) != 0) {
473 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
474 			aacraid_release_command(cm);
475 			return (-1);
476 		}
477 	}
478 	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
479 	aac_mntinforesp_toh(mir);
480 
481 	/* UID */
482 	*uid = cid;
483 	if (mir->MntTable[0].VolType != CT_NONE &&
484 		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
485 		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
486 			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
487 			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
488 		}
489 		ccfg = (struct aac_cnt_config *)&fib->data[0];
490 		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
491 		ccfg->Command = VM_ContainerConfig;
492 		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
493 		ccfg->CTCommand.param[0] = cid;
494 		aac_cnt_config_tole(ccfg);
495 
496 		if (sync_fib) {
497 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
498 				sizeof(struct aac_cnt_config));
499 			aac_cnt_config_toh(ccfg);
500 			if (rval == 0 && ccfg->Command == ST_OK &&
501 				ccfg->CTCommand.param[0] == CT_OK &&
502 				mir->MntTable[0].VolType != CT_PASSTHRU)
503 				*uid = ccfg->CTCommand.param[1];
504 		} else {
505 			fib->Header.Size =
506 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
507 			fib->Header.XferState =
508 				AAC_FIBSTATE_HOSTOWNED   |
509 				AAC_FIBSTATE_INITIALISED |
510 				AAC_FIBSTATE_EMPTY	 |
511 				AAC_FIBSTATE_FROMHOST	 |
512 				AAC_FIBSTATE_REXPECTED   |
513 				AAC_FIBSTATE_NORM	 |
514 				AAC_FIBSTATE_ASYNC	 |
515 				AAC_FIBSTATE_FAST_RESPONSE;
516 			fib->Header.Command = ContainerCommand;
517 			rval = aacraid_wait_command(cm);
518 			aac_cnt_config_toh(ccfg);
519 			if (rval == 0 && ccfg->Command == ST_OK &&
520 				ccfg->CTCommand.param[0] == CT_OK &&
521 				mir->MntTable[0].VolType != CT_PASSTHRU)
522 				*uid = ccfg->CTCommand.param[1];
523 			aacraid_release_command(cm);
524 		}
525 	}
526 
527 	return (0);
528 }
529 
530 /*
531  * Create a device to represent a new container
532  */
533 static void
534 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
535 		  u_int32_t uid)
536 {
537 	struct aac_container *co;
538 
539 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
540 
541 	/*
542 	 * Check container volume type for validity.  Note that many of
543 	 * the possible types may never show up.
544 	 */
545 	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
546 		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
547 		       M_NOWAIT | M_ZERO);
548 		if (co == NULL) {
549 			panic("Out of memory?!");
550 		}
551 
552 		co->co_found = f;
553 		bcopy(&mir->MntTable[0], &co->co_mntobj,
554 		      sizeof(struct aac_mntobj));
555 		co->co_uid = uid;
556 		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
557 	}
558 }
559 
560 /*
561  * Allocate resources associated with (sc)
562  */
563 static int
564 aac_alloc(struct aac_softc *sc)
565 {
566 	bus_size_t maxsize;
567 
568 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
569 
570 	/*
571 	 * Create DMA tag for mapping buffers into controller-addressable space.
572 	 */
573 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
574 			       1, 0, 			/* algnmnt, boundary */
575 			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
576 			       BUS_SPACE_MAXADDR :
577 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
578 			       BUS_SPACE_MAXADDR, 	/* highaddr */
579 			       NULL, NULL, 		/* filter, filterarg */
580 			       AAC_MAXIO_SIZE(sc),	/* maxsize */
581 			       sc->aac_sg_tablesize,	/* nsegments */
582 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
583 			       BUS_DMA_ALLOCNOW,	/* flags */
584 			       busdma_lock_mutex,	/* lockfunc */
585 			       &sc->aac_io_lock,	/* lockfuncarg */
586 			       &sc->aac_buffer_dmat)) {
587 		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
588 		return (ENOMEM);
589 	}
590 
591 	/*
592 	 * Create DMA tag for mapping FIBs into controller-addressable space..
593 	 */
594 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
595 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
596 			sizeof(struct aac_fib_xporthdr) + 31);
597 	else
598 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
599 	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
600 			       1, 0, 			/* algnmnt, boundary */
601 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
602 			       BUS_SPACE_MAXADDR_32BIT :
603 			       0x7fffffff,		/* lowaddr */
604 			       BUS_SPACE_MAXADDR, 	/* highaddr */
605 			       NULL, NULL, 		/* filter, filterarg */
606 			       maxsize,  		/* maxsize */
607 			       1,			/* nsegments */
608 			       maxsize,			/* maxsize */
609 			       0,			/* flags */
610 			       NULL, NULL,		/* No locking needed */
611 			       &sc->aac_fib_dmat)) {
612 		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
613 		return (ENOMEM);
614 	}
615 
616 	/*
617 	 * Create DMA tag for the common structure and allocate it.
618 	 */
619 	maxsize = sizeof(struct aac_common);
620 	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
621 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
622 			       1, 0,			/* algnmnt, boundary */
623 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
624 			       BUS_SPACE_MAXADDR_32BIT :
625 			       0x7fffffff,		/* lowaddr */
626 			       BUS_SPACE_MAXADDR, 	/* highaddr */
627 			       NULL, NULL, 		/* filter, filterarg */
628 			       maxsize, 		/* maxsize */
629 			       1,			/* nsegments */
630 			       maxsize,			/* maxsegsize */
631 			       0,			/* flags */
632 			       NULL, NULL,		/* No locking needed */
633 			       &sc->aac_common_dmat)) {
634 		device_printf(sc->aac_dev,
635 			      "can't allocate common structure DMA tag\n");
636 		return (ENOMEM);
637 	}
638 	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
639 			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
640 		device_printf(sc->aac_dev, "can't allocate common structure\n");
641 		return (ENOMEM);
642 	}
643 
644 	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
645 			sc->aac_common, maxsize,
646 			aac_common_map, sc, 0);
647 	bzero(sc->aac_common, maxsize);
648 
649 	/* Allocate some FIBs and associated command structs */
650 	TAILQ_INIT(&sc->aac_fibmap_tqh);
651 	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
652 				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
653 	mtx_lock(&sc->aac_io_lock);
654 	while (sc->total_fibs < sc->aac_max_fibs) {
655 		if (aac_alloc_commands(sc) != 0)
656 			break;
657 	}
658 	mtx_unlock(&sc->aac_io_lock);
659 	if (sc->total_fibs == 0)
660 		return (ENOMEM);
661 
662 	return (0);
663 }
664 
665 /*
666  * Free all of the resources associated with (sc)
667  *
668  * Should not be called if the controller is active.
669  */
670 void
671 aacraid_free(struct aac_softc *sc)
672 {
673 	int i;
674 
675 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
676 
677 	/* remove the control device */
678 	if (sc->aac_dev_t != NULL)
679 		destroy_dev(sc->aac_dev_t);
680 
681 	/* throw away any FIB buffers, discard the FIB DMA tag */
682 	aac_free_commands(sc);
683 	if (sc->aac_fib_dmat)
684 		bus_dma_tag_destroy(sc->aac_fib_dmat);
685 
686 	free(sc->aac_commands, M_AACRAIDBUF);
687 
688 	/* destroy the common area */
689 	if (sc->aac_common) {
690 		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
691 		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
692 				sc->aac_common_dmamap);
693 	}
694 	if (sc->aac_common_dmat)
695 		bus_dma_tag_destroy(sc->aac_common_dmat);
696 
697 	/* disconnect the interrupt handler */
698 	for (i = 0; i < AAC_MAX_MSIX; ++i) {
699 		if (sc->aac_intr[i])
700 			bus_teardown_intr(sc->aac_dev,
701 				sc->aac_irq[i], sc->aac_intr[i]);
702 		if (sc->aac_irq[i])
703 			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
704 				sc->aac_irq_rid[i], sc->aac_irq[i]);
705 		else
706 			break;
707 	}
708 	if (sc->msi_enabled || sc->msi_tupelo)
709 		pci_release_msi(sc->aac_dev);
710 
711 	/* destroy data-transfer DMA tag */
712 	if (sc->aac_buffer_dmat)
713 		bus_dma_tag_destroy(sc->aac_buffer_dmat);
714 
715 	/* destroy the parent DMA tag */
716 	if (sc->aac_parent_dmat)
717 		bus_dma_tag_destroy(sc->aac_parent_dmat);
718 
719 	/* release the register window mapping */
720 	if (sc->aac_regs_res0 != NULL)
721 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
722 				     sc->aac_regs_rid0, sc->aac_regs_res0);
723 	if (sc->aac_regs_res1 != NULL)
724 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
725 				     sc->aac_regs_rid1, sc->aac_regs_res1);
726 }
727 
728 /*
729  * Disconnect from the controller completely, in preparation for unload.
730  */
731 int
732 aacraid_detach(device_t dev)
733 {
734 	struct aac_softc *sc;
735 	struct aac_container *co;
736 	struct aac_sim	*sim;
737 	int error;
738 
739 	sc = device_get_softc(dev);
740 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
741 
742 	error = bus_generic_detach(dev);
743 	if (error != 0)
744 		return (error);
745 
746 	callout_drain(&sc->aac_daemontime);
747 	/* Remove the child containers */
748 	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
749 		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
750 		free(co, M_AACRAIDBUF);
751 	}
752 
753 	/* Remove the CAM SIMs */
754 	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
755 		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
756 		free(sim, M_AACRAIDBUF);
757 	}
758 
759 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
760 		sc->aifflags |= AAC_AIFFLAGS_EXIT;
761 		wakeup(sc->aifthread);
762 		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
763 	}
764 
765 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
766 		panic("Cannot shutdown AIF thread");
767 
768 	if ((error = aacraid_shutdown(dev)))
769 		return(error);
770 
771 	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
772 
773 	aacraid_free(sc);
774 
775 	mtx_destroy(&sc->aac_io_lock);
776 
777 	return(0);
778 }
779 
780 /*
781  * Bring the controller down to a dormant state and detach all child devices.
782  *
783  * This function is called before detach or system shutdown.
784  *
785  * Note that we can assume that the bioq on the controller is empty, as we won't
786  * allow shutdown if any device is open.
787  */
788 int
789 aacraid_shutdown(device_t dev)
790 {
791 	struct aac_softc *sc;
792 	struct aac_fib *fib;
793 	struct aac_close_command *cc;
794 
795 	sc = device_get_softc(dev);
796 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
797 
798 	sc->aac_state |= AAC_STATE_SUSPEND;
799 
800 	/*
801 	 * Send a Container shutdown followed by a HostShutdown FIB to the
802 	 * controller to convince it that we don't want to talk to it anymore.
803 	 * We've been closed and all I/O completed already
804 	 */
805 	device_printf(sc->aac_dev, "shutting down controller...");
806 
807 	mtx_lock(&sc->aac_io_lock);
808 	aac_alloc_sync_fib(sc, &fib);
809 	cc = (struct aac_close_command *)&fib->data[0];
810 
811 	bzero(cc, sizeof(struct aac_close_command));
812 	cc->Command = htole32(VM_CloseAll);
813 	cc->ContainerId = htole32(0xfffffffe);
814 	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
815 	    sizeof(struct aac_close_command)))
816 		printf("FAILED.\n");
817 	else
818 		printf("done\n");
819 
820 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
821 	aac_release_sync_fib(sc);
822 	mtx_unlock(&sc->aac_io_lock);
823 
824 	return(0);
825 }
826 
827 /*
828  * Bring the controller to a quiescent state, ready for system suspend.
829  */
830 int
831 aacraid_suspend(device_t dev)
832 {
833 	struct aac_softc *sc;
834 
835 	sc = device_get_softc(dev);
836 
837 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
838 	sc->aac_state |= AAC_STATE_SUSPEND;
839 
840 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
841 	return(0);
842 }
843 
844 /*
845  * Bring the controller back to a state ready for operation.
846  */
847 int
848 aacraid_resume(device_t dev)
849 {
850 	struct aac_softc *sc;
851 
852 	sc = device_get_softc(dev);
853 
854 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
855 	sc->aac_state &= ~AAC_STATE_SUSPEND;
856 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
857 	return(0);
858 }
859 
860 /*
861  * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
862  */
863 void
864 aacraid_new_intr_type1(void *arg)
865 {
866 	struct aac_msix_ctx *ctx;
867 	struct aac_softc *sc;
868 	int vector_no;
869 	struct aac_command *cm;
870 	struct aac_fib *fib;
871 	u_int32_t bellbits, bellbits_shifted, index, handle;
872 	int isFastResponse, isAif, noMoreAif, mode;
873 
874 	ctx = (struct aac_msix_ctx *)arg;
875 	sc = ctx->sc;
876 	vector_no = ctx->vector_no;
877 
878 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
879 	mtx_lock(&sc->aac_io_lock);
880 
881 	if (sc->msi_enabled) {
882 		mode = AAC_INT_MODE_MSI;
883 		if (vector_no == 0) {
884 			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
885 			if (bellbits & 0x40000)
886 				mode |= AAC_INT_MODE_AIF;
887 			else if (bellbits & 0x1000)
888 				mode |= AAC_INT_MODE_SYNC;
889 		}
890 	} else {
891 		mode = AAC_INT_MODE_INTX;
892 		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
893 		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
894 			bellbits = AAC_DB_RESPONSE_SENT_NS;
895 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
896 		} else {
897 			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
898 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
899 			if (bellbits_shifted & AAC_DB_AIF_PENDING)
900 				mode |= AAC_INT_MODE_AIF;
901 			if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
902 				mode |= AAC_INT_MODE_SYNC;
903 		}
904 		/* ODR readback, Prep #238630 */
905 		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
906 	}
907 
908 	if (mode & AAC_INT_MODE_SYNC) {
909 		if (sc->aac_sync_cm) {
910 			cm = sc->aac_sync_cm;
911 			aac_unmap_command(cm);
912 			cm->cm_flags |= AAC_CMD_COMPLETED;
913 			aac_fib_header_toh(&cm->cm_fib->Header);
914 
915 			/* is there a completion handler? */
916 			if (cm->cm_complete != NULL) {
917 				cm->cm_complete(cm);
918 			} else {
919 				/* assume that someone is sleeping on this command */
920 				wakeup(cm);
921 			}
922 			sc->flags &= ~AAC_QUEUE_FRZN;
923 			sc->aac_sync_cm = NULL;
924 		}
925 		if (mode & AAC_INT_MODE_INTX)
926 			mode &= ~AAC_INT_MODE_SYNC;
927 		else
928 			mode = 0;
929 	}
930 
931 	if (mode & AAC_INT_MODE_AIF) {
932 		if (mode & AAC_INT_MODE_INTX) {
933 			aac_request_aif(sc);
934 			mode = 0;
935 		}
936 	}
937 
938 	if (sc->flags & AAC_FLAGS_SYNC_MODE)
939 		mode = 0;
940 
941 	if (mode) {
942 		/* handle async. status */
943 		index = sc->aac_host_rrq_idx[vector_no];
944 		for (;;) {
945 			isFastResponse = isAif = noMoreAif = 0;
946 			/* remove toggle bit (31) */
947 			handle = (le32toh(sc->aac_common->ac_host_rrq[index]) &
948 			    0x7fffffff);
949 			/* check fast response bit (30) */
950 			if (handle & 0x40000000)
951 				isFastResponse = 1;
952 			/* check AIF bit (23) */
953 			else if (handle & 0x00800000)
954 				isAif = TRUE;
955 			handle &= 0x0000ffff;
956 			if (handle == 0)
957 				break;
958 
959 			cm = sc->aac_commands + (handle - 1);
960 			fib = cm->cm_fib;
961 			aac_fib_header_toh(&fib->Header);
962 			sc->aac_rrq_outstanding[vector_no]--;
963 			if (isAif) {
964 				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
965 				if (!noMoreAif)
966 					aac_handle_aif(sc, fib);
967 				aac_remove_busy(cm);
968 				aacraid_release_command(cm);
969 			} else {
970 				if (isFastResponse) {
971 					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
972 					*((u_int32_t *)(fib->data)) = htole32(ST_OK);
973 					cm->cm_flags |= AAC_CMD_FASTRESP;
974 				}
975 				aac_remove_busy(cm);
976 				aac_unmap_command(cm);
977 				cm->cm_flags |= AAC_CMD_COMPLETED;
978 
979 				/* is there a completion handler? */
980 				if (cm->cm_complete != NULL) {
981 					cm->cm_complete(cm);
982 				} else {
983 					/* assume that someone is sleeping on this command */
984 					wakeup(cm);
985 				}
986 				sc->flags &= ~AAC_QUEUE_FRZN;
987 			}
988 
989 			sc->aac_common->ac_host_rrq[index++] = 0;
990 			if (index == (vector_no + 1) * sc->aac_vector_cap)
991 				index = vector_no * sc->aac_vector_cap;
992 			sc->aac_host_rrq_idx[vector_no] = index;
993 
994 			if ((isAif && !noMoreAif) || sc->aif_pending)
995 				aac_request_aif(sc);
996 		}
997 	}
998 
999 	if (mode & AAC_INT_MODE_AIF) {
1000 		aac_request_aif(sc);
1001 		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1002 		mode = 0;
1003 	}
1004 
1005 	/* see if we can start some more I/O */
1006 	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1007 		aacraid_startio(sc);
1008 	mtx_unlock(&sc->aac_io_lock);
1009 }
1010 
1011 /*
1012  * Handle notification of one or more FIBs coming from the controller.
1013  */
1014 static void
1015 aac_command_thread(struct aac_softc *sc)
1016 {
1017 	int retval;
1018 
1019 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1020 
1021 	mtx_lock(&sc->aac_io_lock);
1022 	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1023 
1024 	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1025 		retval = 0;
1026 		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1027 			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1028 					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1029 
1030 		/*
1031 		 * First see if any FIBs need to be allocated.
1032 		 */
1033 		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1034 			aac_alloc_commands(sc);
1035 			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1036 			aacraid_startio(sc);
1037 		}
1038 
1039 		/*
1040 		 * While we're here, check to see if any commands are stuck.
1041 		 * This is pretty low-priority, so it's ok if it doesn't
1042 		 * always fire.
1043 		 */
1044 		if (retval == EWOULDBLOCK)
1045 			aac_timeout(sc);
1046 
1047 		/* Check the hardware printf message buffer */
1048 		if (sc->aac_common->ac_printf[0] != 0)
1049 			aac_print_printf(sc);
1050 	}
1051 	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1052 	mtx_unlock(&sc->aac_io_lock);
1053 	wakeup(sc->aac_dev);
1054 
1055 	aac_kthread_exit(0);
1056 }
1057 
1058 /*
1059  * Submit a command to the controller, return when it completes.
1060  * XXX This is very dangerous!  If the card has gone out to lunch, we could
1061  *     be stuck here forever.  At the same time, signals are not caught
1062  *     because there is a risk that a signal could wakeup the sleep before
1063  *     the card has a chance to complete the command.  Since there is no way
1064  *     to cancel a command that is in progress, we can't protect against the
1065  *     card completing a command late and spamming the command and data
1066  *     memory.  So, we are held hostage until the command completes.
1067  */
1068 int
1069 aacraid_wait_command(struct aac_command *cm)
1070 {
1071 	struct aac_softc *sc;
1072 	int error;
1073 
1074 	sc = cm->cm_sc;
1075 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1076 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1077 
1078 	/* Put the command on the ready queue and get things going */
1079 	aac_enqueue_ready(cm);
1080 	aacraid_startio(sc);
1081 	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1082 	return(error);
1083 }
1084 
1085 /*
1086  *Command Buffer Management
1087  */
1088 
1089 /*
1090  * Allocate a command.
1091  */
1092 int
1093 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1094 {
1095 	struct aac_command *cm;
1096 
1097 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1098 
1099 	if ((cm = aac_dequeue_free(sc)) == NULL) {
1100 		if (sc->total_fibs < sc->aac_max_fibs) {
1101 			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1102 			wakeup(sc->aifthread);
1103 		}
1104 		return (EBUSY);
1105 	}
1106 
1107 	*cmp = cm;
1108 	return(0);
1109 }
1110 
1111 /*
1112  * Release a command back to the freelist.
1113  */
1114 void
1115 aacraid_release_command(struct aac_command *cm)
1116 {
1117 	struct aac_event *event;
1118 	struct aac_softc *sc;
1119 
1120 	sc = cm->cm_sc;
1121 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1122 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1123 
1124 	/* (re)initialize the command/FIB */
1125 	cm->cm_sgtable = NULL;
1126 	cm->cm_flags = 0;
1127 	cm->cm_complete = NULL;
1128 	cm->cm_ccb = NULL;
1129 	cm->cm_passthr_dmat = 0;
1130 	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1131 	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1132 	cm->cm_fib->Header.Unused = 0;
1133 	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1134 
1135 	/*
1136 	 * These are duplicated in aac_start to cover the case where an
1137 	 * intermediate stage may have destroyed them.  They're left
1138 	 * initialized here for debugging purposes only.
1139 	 */
1140 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1141 	cm->cm_fib->Header.Handle = 0;
1142 
1143 	aac_enqueue_free(cm);
1144 
1145 	/*
1146 	 * Dequeue all events so that there's no risk of events getting
1147 	 * stranded.
1148 	 */
1149 	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1150 		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1151 		event->ev_callback(sc, event, event->ev_arg);
1152 	}
1153 }
1154 
1155 /*
1156  * Map helper for command/FIB allocation.
1157  */
1158 static void
1159 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1160 {
1161 	uint64_t	*fibphys;
1162 
1163 	fibphys = (uint64_t *)arg;
1164 
1165 	*fibphys = segs[0].ds_addr;
1166 }
1167 
1168 /*
1169  * Allocate and initialize commands/FIBs for this adapter.
1170  */
1171 static int
1172 aac_alloc_commands(struct aac_softc *sc)
1173 {
1174 	struct aac_command *cm;
1175 	struct aac_fibmap *fm;
1176 	uint64_t fibphys;
1177 	int i, error;
1178 	u_int32_t maxsize;
1179 
1180 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1181 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1182 
1183 	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1184 		return (ENOMEM);
1185 
1186 	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1187 	if (fm == NULL)
1188 		return (ENOMEM);
1189 
1190 	mtx_unlock(&sc->aac_io_lock);
1191 	/* allocate the FIBs in DMAable memory and load them */
1192 	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1193 			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1194 		device_printf(sc->aac_dev,
1195 			      "Not enough contiguous memory available.\n");
1196 		free(fm, M_AACRAIDBUF);
1197 		mtx_lock(&sc->aac_io_lock);
1198 		return (ENOMEM);
1199 	}
1200 
1201 	maxsize = sc->aac_max_fib_size + 31;
1202 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1203 		maxsize += sizeof(struct aac_fib_xporthdr);
1204 	/* Ignore errors since this doesn't bounce */
1205 	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1206 			      sc->aac_max_fibs_alloc * maxsize,
1207 			      aac_map_command_helper, &fibphys, 0);
1208 	mtx_lock(&sc->aac_io_lock);
1209 
1210 	/* initialize constant fields in the command structure */
1211 	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1212 	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1213 		cm = sc->aac_commands + sc->total_fibs;
1214 		fm->aac_commands = cm;
1215 		cm->cm_sc = sc;
1216 		cm->cm_fib = (struct aac_fib *)
1217 			((u_int8_t *)fm->aac_fibs + i * maxsize);
1218 		cm->cm_fibphys = fibphys + i * maxsize;
1219 		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1220 			u_int64_t fibphys_aligned;
1221 			fibphys_aligned =
1222 				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1223 			cm->cm_fib = (struct aac_fib *)
1224 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1225 			cm->cm_fibphys = fibphys_aligned;
1226 		} else {
1227 			u_int64_t fibphys_aligned;
1228 			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1229 			cm->cm_fib = (struct aac_fib *)
1230 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1231 			cm->cm_fibphys = fibphys_aligned;
1232 		}
1233 		cm->cm_index = sc->total_fibs;
1234 
1235 		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1236 					       &cm->cm_datamap)) != 0)
1237 			break;
1238 		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1239 			aacraid_release_command(cm);
1240 		sc->total_fibs++;
1241 	}
1242 
1243 	if (i > 0) {
1244 		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1245 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1246 		return (0);
1247 	}
1248 
1249 	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1250 	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1251 	free(fm, M_AACRAIDBUF);
1252 	return (ENOMEM);
1253 }
1254 
1255 /*
1256  * Free FIBs owned by this adapter.
1257  */
1258 static void
1259 aac_free_commands(struct aac_softc *sc)
1260 {
1261 	struct aac_fibmap *fm;
1262 	struct aac_command *cm;
1263 	int i;
1264 
1265 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1266 
1267 	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1268 		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1269 		/*
1270 		 * We check against total_fibs to handle partially
1271 		 * allocated blocks.
1272 		 */
1273 		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1274 			cm = fm->aac_commands + i;
1275 			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1276 		}
1277 		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1278 		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1279 		free(fm, M_AACRAIDBUF);
1280 	}
1281 }
1282 
1283 /*
1284  * Command-mapping helper function - populate this command's s/g table.
1285  */
1286 void
1287 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1288 {
1289 	struct aac_softc *sc;
1290 	struct aac_command *cm;
1291 	struct aac_fib *fib;
1292 	int i;
1293 
1294 	cm = (struct aac_command *)arg;
1295 	sc = cm->cm_sc;
1296 	fib = cm->cm_fib;
1297 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1298 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1299 
1300 	if ((sc->flags & AAC_FLAGS_SYNC_MODE) && sc->aac_sync_cm)
1301 		return;
1302 
1303 	/* copy into the FIB */
1304 	if (cm->cm_sgtable != NULL) {
1305 		if (fib->Header.Command == RawIo2) {
1306 			struct aac_raw_io2 *raw;
1307 			struct aac_sge_ieee1212 *sg;
1308 			u_int32_t min_size = PAGE_SIZE, cur_size;
1309 			int conformable = TRUE;
1310 
1311 			raw = (struct aac_raw_io2 *)&fib->data[0];
1312 			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1313 			raw->sgeCnt = nseg;
1314 
1315 			for (i = 0; i < nseg; i++) {
1316 				cur_size = segs[i].ds_len;
1317 				sg[i].addrHigh = 0;
1318 				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1319 				sg[i].length = cur_size;
1320 				sg[i].flags = 0;
1321 				if (i == 0) {
1322 					raw->sgeFirstSize = cur_size;
1323 				} else if (i == 1) {
1324 					raw->sgeNominalSize = cur_size;
1325 					min_size = cur_size;
1326 				} else if ((i+1) < nseg &&
1327 					cur_size != raw->sgeNominalSize) {
1328 					conformable = FALSE;
1329 					if (cur_size < min_size)
1330 						min_size = cur_size;
1331 				}
1332 			}
1333 
1334 			/* not conformable: evaluate required sg elements */
1335 			if (!conformable) {
1336 				int j, err_found, nseg_new = nseg;
1337 				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1338 					err_found = FALSE;
1339 					nseg_new = 2;
1340 					for (j = 1; j < nseg - 1; ++j) {
1341 						if (sg[j].length % (i*PAGE_SIZE)) {
1342 							err_found = TRUE;
1343 							break;
1344 						}
1345 						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1346 					}
1347 					if (!err_found)
1348 						break;
1349 				}
1350 				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1351 					!(sc->hint_flags & 4))
1352 					nseg = aac_convert_sgraw2(sc,
1353 						raw, i, nseg, nseg_new);
1354 			} else {
1355 				raw->flags |= RIO2_SGL_CONFORMANT;
1356 			}
1357 
1358 			for (i = 0; i < nseg; i++)
1359 				aac_sge_ieee1212_tole(sg + i);
1360 			aac_raw_io2_tole(raw);
1361 
1362 			/* update the FIB size for the s/g count */
1363 			fib->Header.Size += nseg *
1364 				sizeof(struct aac_sge_ieee1212);
1365 
1366 		} else if (fib->Header.Command == RawIo) {
1367 			struct aac_sg_tableraw *sg;
1368 			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1369 			sg->SgCount = htole32(nseg);
1370 			for (i = 0; i < nseg; i++) {
1371 				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1372 				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1373 				sg->SgEntryRaw[i].Next = 0;
1374 				sg->SgEntryRaw[i].Prev = 0;
1375 				sg->SgEntryRaw[i].Flags = 0;
1376 				aac_sg_entryraw_tole(&sg->SgEntryRaw[i]);
1377 			}
1378 			aac_raw_io_tole((struct aac_raw_io *)&fib->data[0]);
1379 			/* update the FIB size for the s/g count */
1380 			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1381 		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1382 			struct aac_sg_table *sg;
1383 			sg = cm->cm_sgtable;
1384 			sg->SgCount = htole32(nseg);
1385 			for (i = 0; i < nseg; i++) {
1386 				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1387 				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1388 				aac_sg_entry_tole(&sg->SgEntry[i]);
1389 			}
1390 			/* update the FIB size for the s/g count */
1391 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1392 		} else {
1393 			struct aac_sg_table64 *sg;
1394 			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1395 			sg->SgCount = htole32(nseg);
1396 			for (i = 0; i < nseg; i++) {
1397 				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1398 				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1399 				aac_sg_entry64_tole(&sg->SgEntry64[i]);
1400 			}
1401 			/* update the FIB size for the s/g count */
1402 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1403 		}
1404 	}
1405 
1406 	/* Fix up the address values in the FIB.  Use the command array index
1407 	 * instead of a pointer since these fields are only 32 bits.  Shift
1408 	 * the SenderFibAddress over to make room for the fast response bit
1409 	 * and for the AIF bit
1410 	 */
1411 	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1412 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1413 
1414 	/* save a pointer to the command for speedy reverse-lookup */
1415 	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1416 
1417 	if (cm->cm_passthr_dmat == 0) {
1418 		if (cm->cm_flags & AAC_CMD_DATAIN)
1419 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1420 							BUS_DMASYNC_PREREAD);
1421 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1422 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1423 							BUS_DMASYNC_PREWRITE);
1424 	}
1425 
1426 	cm->cm_flags |= AAC_CMD_MAPPED;
1427 
1428 	if (cm->cm_flags & AAC_CMD_WAIT) {
1429 		aac_fib_header_tole(&fib->Header);
1430 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1431 			cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1432 	} else if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1433 		u_int32_t wait = 0;
1434 		sc->aac_sync_cm = cm;
1435 		aac_fib_header_tole(&fib->Header);
1436 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1437 			cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1438 	} else {
1439 		int count = 10000000L;
1440 		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1441 			if (--count == 0) {
1442 				aac_unmap_command(cm);
1443 				sc->flags |= AAC_QUEUE_FRZN;
1444 				aac_requeue_ready(cm);
1445 			}
1446 			DELAY(5);			/* wait 5 usec. */
1447 		}
1448 	}
1449 }
1450 
1451 static int
1452 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1453 				   int pages, int nseg, int nseg_new)
1454 {
1455 	struct aac_sge_ieee1212 *sge;
1456 	int i, j, pos;
1457 	u_int32_t addr_low;
1458 
1459 	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1460 		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1461 	if (sge == NULL)
1462 		return nseg;
1463 
1464 	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1465 		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1466 			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1467 			sge[pos].addrLow = addr_low;
1468 			sge[pos].addrHigh = raw->sge[i].addrHigh;
1469 			if (addr_low < raw->sge[i].addrLow)
1470 				sge[pos].addrHigh++;
1471 			sge[pos].length = pages * PAGE_SIZE;
1472 			sge[pos].flags = 0;
1473 			pos++;
1474 		}
1475 	}
1476 	sge[pos] = raw->sge[nseg-1];
1477 	for (i = 1; i < nseg_new; ++i)
1478 		raw->sge[i] = sge[i];
1479 
1480 	free(sge, M_AACRAIDBUF);
1481 	raw->sgeCnt = nseg_new;
1482 	raw->flags |= RIO2_SGL_CONFORMANT;
1483 	raw->sgeNominalSize = pages * PAGE_SIZE;
1484 	return nseg_new;
1485 }
1486 
1487 /*
1488  * Unmap a command from controller-visible space.
1489  */
1490 static void
1491 aac_unmap_command(struct aac_command *cm)
1492 {
1493 	struct aac_softc *sc;
1494 
1495 	sc = cm->cm_sc;
1496 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1497 
1498 	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1499 		return;
1500 
1501 	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1502 		if (cm->cm_flags & AAC_CMD_DATAIN)
1503 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1504 					BUS_DMASYNC_POSTREAD);
1505 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1506 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1507 					BUS_DMASYNC_POSTWRITE);
1508 
1509 		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1510 	}
1511 	cm->cm_flags &= ~AAC_CMD_MAPPED;
1512 }
1513 
1514 /*
1515  * Hardware Interface
1516  */
1517 
1518 /*
1519  * Initialize the adapter.
1520  */
1521 static void
1522 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1523 {
1524 	struct aac_softc *sc;
1525 
1526 	sc = (struct aac_softc *)arg;
1527 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1528 
1529 	sc->aac_common_busaddr = segs[0].ds_addr;
1530 }
1531 
1532 static int
1533 aac_check_firmware(struct aac_softc *sc)
1534 {
1535 	u_int32_t code, major, minor, maxsize;
1536 	u_int32_t options = 0, atu_size = 0, status, waitCount;
1537 	time_t then;
1538 
1539 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1540 
1541 	/* check if flash update is running */
1542 	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1543 		then = time_uptime;
1544 		do {
1545 			code = AAC_GET_FWSTATUS(sc);
1546 			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1547 				device_printf(sc->aac_dev,
1548 						  "FATAL: controller not coming ready, "
1549 						   "status %x\n", code);
1550 				return(ENXIO);
1551 			}
1552 		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1553 		/*
1554 		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1555 		 * do not read scratch pad register at this time
1556 		 */
1557 		waitCount = 10 * 10000;
1558 		while (waitCount) {
1559 			DELAY(100);		/* delay 100 microseconds */
1560 			waitCount--;
1561 		}
1562 	}
1563 
1564 	/*
1565 	 * Wait for the adapter to come ready.
1566 	 */
1567 	then = time_uptime;
1568 	do {
1569 		code = AAC_GET_FWSTATUS(sc);
1570 		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1571 			device_printf(sc->aac_dev,
1572 				      "FATAL: controller not coming ready, "
1573 					   "status %x\n", code);
1574 			return(ENXIO);
1575 		}
1576 	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1577 
1578 	/*
1579 	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1580 	 * firmware version 1.x are not compatible with this driver.
1581 	 */
1582 	if (sc->flags & AAC_FLAGS_PERC2QC) {
1583 		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1584 				     NULL, NULL)) {
1585 			device_printf(sc->aac_dev,
1586 				      "Error reading firmware version\n");
1587 			return (EIO);
1588 		}
1589 
1590 		/* These numbers are stored as ASCII! */
1591 		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1592 		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1593 		if (major == 1) {
1594 			device_printf(sc->aac_dev,
1595 			    "Firmware version %d.%d is not supported.\n",
1596 			    major, minor);
1597 			return (EINVAL);
1598 		}
1599 	}
1600 	/*
1601 	 * Retrieve the capabilities/supported options word so we know what
1602 	 * work-arounds to enable.  Some firmware revs don't support this
1603 	 * command.
1604 	 */
1605 	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1606 		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1607 			device_printf(sc->aac_dev,
1608 			     "RequestAdapterInfo failed\n");
1609 			return (EIO);
1610 		}
1611 	} else {
1612 		options = AAC_GET_MAILBOX(sc, 1);
1613 		atu_size = AAC_GET_MAILBOX(sc, 2);
1614 		sc->supported_options = options;
1615 		sc->doorbell_mask = AAC_GET_MAILBOX(sc, 3);
1616 
1617 		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1618 		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1619 			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1620 		if (options & AAC_SUPPORTED_NONDASD)
1621 			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1622 		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1623 			&& (sizeof(bus_addr_t) > 4)
1624 			&& (sc->hint_flags & 0x1)) {
1625 			device_printf(sc->aac_dev,
1626 			    "Enabling 64-bit address support\n");
1627 			sc->flags |= AAC_FLAGS_SG_64BIT;
1628 		}
1629 		if (sc->aac_if.aif_send_command) {
1630 			if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1631 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1632 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1633 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1634 			else if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1635 				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1636 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1637 		}
1638 		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1639 			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1640 	}
1641 
1642 	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1643 		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1644 		return (ENXIO);
1645 	}
1646 
1647 	if (sc->hint_flags & 2) {
1648 		device_printf(sc->aac_dev,
1649 			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1650 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1651 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1652 		device_printf(sc->aac_dev,
1653 			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1654 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1655 	}
1656 
1657 	/* Check for broken hardware that does a lower number of commands */
1658 	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1659 
1660 	/* Remap mem. resource, if required */
1661 	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1662 		bus_release_resource(
1663 			sc->aac_dev, SYS_RES_MEMORY,
1664 			sc->aac_regs_rid0, sc->aac_regs_res0);
1665 		sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1666 			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1667 			atu_size, RF_ACTIVE);
1668 		if (sc->aac_regs_res0 == NULL) {
1669 			sc->aac_regs_res0 = bus_alloc_resource_any(
1670 				sc->aac_dev, SYS_RES_MEMORY,
1671 				&sc->aac_regs_rid0, RF_ACTIVE);
1672 			if (sc->aac_regs_res0 == NULL) {
1673 				device_printf(sc->aac_dev,
1674 					"couldn't allocate register window\n");
1675 				return (ENXIO);
1676 			}
1677 		}
1678 		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1679 		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1680 	}
1681 
1682 	/* Read preferred settings */
1683 	sc->aac_max_fib_size = sizeof(struct aac_fib);
1684 	sc->aac_max_sectors = 128;				/* 64KB */
1685 	sc->aac_max_aif = 1;
1686 	if (sc->flags & AAC_FLAGS_SG_64BIT)
1687 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1688 		 - sizeof(struct aac_blockwrite64))
1689 		 / sizeof(struct aac_sg_entry64);
1690 	else
1691 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1692 		 - sizeof(struct aac_blockwrite))
1693 		 / sizeof(struct aac_sg_entry);
1694 
1695 	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1696 		options = AAC_GET_MAILBOX(sc, 1);
1697 		sc->aac_max_fib_size = (options & 0xFFFF);
1698 		sc->aac_max_sectors = (options >> 16) << 1;
1699 		options = AAC_GET_MAILBOX(sc, 2);
1700 		sc->aac_sg_tablesize = (options >> 16);
1701 		options = AAC_GET_MAILBOX(sc, 3);
1702 		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1703 		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1704 			sc->aac_max_fibs = (options & 0xFFFF);
1705 		options = AAC_GET_MAILBOX(sc, 4);
1706 		sc->aac_max_aif = (options & 0xFFFF);
1707 		options = AAC_GET_MAILBOX(sc, 5);
1708 		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1709 	}
1710 
1711 	maxsize = sc->aac_max_fib_size + 31;
1712 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1713 		maxsize += sizeof(struct aac_fib_xporthdr);
1714 	if (maxsize > PAGE_SIZE) {
1715     	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1716 		maxsize = PAGE_SIZE;
1717 	}
1718 	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1719 
1720 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1721 		sc->flags |= AAC_FLAGS_RAW_IO;
1722 		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1723 	}
1724 	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1725 	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1726 		sc->flags |= AAC_FLAGS_LBA_64BIT;
1727 		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1728 	}
1729 
1730 #ifdef AACRAID_DEBUG
1731 	aacraid_get_fw_debug_buffer(sc);
1732 #endif
1733 	return (0);
1734 }
1735 
1736 static int
1737 aac_init(struct aac_softc *sc)
1738 {
1739 	struct aac_adapter_init	*ip;
1740 	int i, error;
1741 
1742 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1743 
1744 	/* reset rrq index */
1745 	sc->aac_fibs_pushed_no = 0;
1746 	for (i = 0; i < sc->aac_max_msix; i++)
1747 		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1748 
1749 	/*
1750 	 * Fill in the init structure.  This tells the adapter about the
1751 	 * physical location of various important shared data structures.
1752 	 */
1753 	ip = &sc->aac_common->ac_init;
1754 	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1755 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1756 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1757 		sc->flags |= AAC_FLAGS_RAW_IO;
1758 	}
1759 	ip->NoOfMSIXVectors = sc->aac_max_msix;
1760 
1761 	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1762 					 offsetof(struct aac_common, ac_fibs);
1763 	ip->AdapterFibsVirtualAddress = 0;
1764 	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1765 	ip->AdapterFibAlign = sizeof(struct aac_fib);
1766 
1767 	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1768 				  offsetof(struct aac_common, ac_printf);
1769 	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1770 
1771 	/*
1772 	 * The adapter assumes that pages are 4K in size, except on some
1773  	 * broken firmware versions that do the page->byte conversion twice,
1774 	 * therefore 'assuming' that this value is in 16MB units (2^24).
1775 	 * Round up since the granularity is so high.
1776 	 */
1777 	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1778 	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1779 		ip->HostPhysMemPages =
1780 		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1781 	}
1782 	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1783 
1784 	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1785 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1786 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1787 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1788 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1789 		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1790 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1791 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1792 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1793 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1794 		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1795 	}
1796 	ip->MaxNumAif = sc->aac_max_aif;
1797 	ip->HostRRQ_AddrLow =
1798 		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1799 	/* always 32-bit address */
1800 	ip->HostRRQ_AddrHigh = 0;
1801 
1802 	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1803 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1804 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1805 		device_printf(sc->aac_dev, "Power Management enabled\n");
1806 	}
1807 
1808 	ip->MaxIoCommands = sc->aac_max_fibs;
1809 	ip->MaxIoSize = AAC_MAXIO_SIZE(sc);
1810 	ip->MaxFibSize = sc->aac_max_fib_size;
1811 
1812 	aac_adapter_init_tole(ip);
1813 
1814 	/*
1815 	 * Do controller-type-specific initialisation
1816 	 */
1817 	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1818 
1819 	/*
1820 	 * Give the init structure to the controller.
1821 	 */
1822 	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1823 			     sc->aac_common_busaddr +
1824 			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1825 			     NULL, NULL)) {
1826 		device_printf(sc->aac_dev,
1827 			      "error establishing init structure\n");
1828 		error = EIO;
1829 		goto out;
1830 	}
1831 
1832 	/*
1833 	 * Check configuration issues
1834 	 */
1835 	if ((error = aac_check_config(sc)) != 0)
1836 		goto out;
1837 
1838 	error = 0;
1839 out:
1840 	return(error);
1841 }
1842 
1843 static void
1844 aac_define_int_mode(struct aac_softc *sc)
1845 {
1846 	device_t dev;
1847 	int cap, msi_count, error = 0;
1848 	uint32_t val;
1849 
1850 	dev = sc->aac_dev;
1851 
1852 	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1853 		device_printf(dev, "using line interrupts\n");
1854 		sc->aac_max_msix = 1;
1855 		sc->aac_vector_cap = sc->aac_max_fibs;
1856 		return;
1857 	}
1858 
1859 	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1860 	if (sc->aac_max_msix == 0) {
1861 		if (sc->aac_hwif == AAC_HWIF_SRC) {
1862 			msi_count = 1;
1863 			if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1864 				device_printf(dev, "alloc msi failed - err=%d; "
1865 				    "will use INTx\n", error);
1866 				pci_release_msi(dev);
1867 			} else {
1868 				sc->msi_tupelo = TRUE;
1869 			}
1870 		}
1871 		if (sc->msi_tupelo)
1872 			device_printf(dev, "using MSI interrupts\n");
1873 		else
1874 			device_printf(dev, "using line interrupts\n");
1875 
1876 		sc->aac_max_msix = 1;
1877 		sc->aac_vector_cap = sc->aac_max_fibs;
1878 		return;
1879 	}
1880 
1881 	/* OS capability */
1882 	msi_count = pci_msix_count(dev);
1883 	if (msi_count > AAC_MAX_MSIX)
1884 		msi_count = AAC_MAX_MSIX;
1885 	if (msi_count > sc->aac_max_msix)
1886 		msi_count = sc->aac_max_msix;
1887 	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1888 		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1889 				   "will try MSI\n", msi_count, error);
1890 		pci_release_msi(dev);
1891 	} else {
1892 		sc->msi_enabled = TRUE;
1893 		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1894 			msi_count);
1895 	}
1896 
1897 	if (!sc->msi_enabled) {
1898 		msi_count = 1;
1899 		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1900 			device_printf(dev, "alloc msi failed - err=%d; "
1901 				           "will use INTx\n", error);
1902 			pci_release_msi(dev);
1903 		} else {
1904 			sc->msi_enabled = TRUE;
1905 			device_printf(dev, "using MSI interrupts\n");
1906 		}
1907 	}
1908 
1909 	if (sc->msi_enabled) {
1910 		/* now read controller capability from PCI config. space */
1911 		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1912 		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1913 		if (!(val & AAC_PCI_MSI_ENABLE)) {
1914 			pci_release_msi(dev);
1915 			sc->msi_enabled = FALSE;
1916 		}
1917 	}
1918 
1919 	if (!sc->msi_enabled) {
1920 		device_printf(dev, "using legacy interrupts\n");
1921 		sc->aac_max_msix = 1;
1922 	} else {
1923 		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1924 		if (sc->aac_max_msix > msi_count)
1925 			sc->aac_max_msix = msi_count;
1926 	}
1927 	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1928 
1929 	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1930 		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1931 }
1932 
1933 static int
1934 aac_find_pci_capability(struct aac_softc *sc, int cap)
1935 {
1936 	device_t dev;
1937 	uint32_t status;
1938 	uint8_t ptr;
1939 
1940 	dev = sc->aac_dev;
1941 
1942 	status = pci_read_config(dev, PCIR_STATUS, 2);
1943 	if (!(status & PCIM_STATUS_CAPPRESENT))
1944 		return (0);
1945 
1946 	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1947 	switch (status & PCIM_HDRTYPE) {
1948 	case 0:
1949 	case 1:
1950 		ptr = PCIR_CAP_PTR;
1951 		break;
1952 	case 2:
1953 		ptr = PCIR_CAP_PTR_2;
1954 		break;
1955 	default:
1956 		return (0);
1957 		break;
1958 	}
1959 	ptr = pci_read_config(dev, ptr, 1);
1960 
1961 	while (ptr != 0) {
1962 		int next, val;
1963 		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1964 		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1965 		if (val == cap)
1966 			return (ptr);
1967 		ptr = next;
1968 	}
1969 
1970 	return (0);
1971 }
1972 
1973 static int
1974 aac_setup_intr(struct aac_softc *sc)
1975 {
1976 	int i, msi_count, rid;
1977 	struct resource *res;
1978 	void *tag;
1979 
1980 	msi_count = sc->aac_max_msix;
1981 	rid = ((sc->msi_enabled || sc->msi_tupelo)? 1:0);
1982 
1983 	for (i = 0; i < msi_count; i++, rid++) {
1984 		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1985 			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1986 			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1987 			return (EINVAL);
1988 		}
1989 		sc->aac_irq_rid[i] = rid;
1990 		sc->aac_irq[i] = res;
1991 		if (aac_bus_setup_intr(sc->aac_dev, res,
1992 			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1993 			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1994 			device_printf(sc->aac_dev, "can't set up interrupt\n");
1995 			return (EINVAL);
1996 		}
1997 		sc->aac_msix[i].vector_no = i;
1998 		sc->aac_msix[i].sc = sc;
1999 		sc->aac_intr[i] = tag;
2000 	}
2001 
2002 	return (0);
2003 }
2004 
2005 static int
2006 aac_check_config(struct aac_softc *sc)
2007 {
2008 	struct aac_fib *fib;
2009 	struct aac_cnt_config *ccfg;
2010 	struct aac_cf_status_hdr *cf_shdr;
2011 	int rval;
2012 
2013 	mtx_lock(&sc->aac_io_lock);
2014 	aac_alloc_sync_fib(sc, &fib);
2015 
2016 	ccfg = (struct aac_cnt_config *)&fib->data[0];
2017 	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2018 	ccfg->Command = VM_ContainerConfig;
2019 	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
2020 	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
2021 
2022 	aac_cnt_config_tole(ccfg);
2023 	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2024 		sizeof (struct aac_cnt_config));
2025 	aac_cnt_config_toh(ccfg);
2026 
2027 	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2028 	if (rval == 0 && ccfg->Command == ST_OK &&
2029 		ccfg->CTCommand.param[0] == CT_OK) {
2030 		if (le32toh(cf_shdr->action) <= CFACT_PAUSE) {
2031 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2032 			ccfg->Command = VM_ContainerConfig;
2033 			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2034 
2035 			aac_cnt_config_tole(ccfg);
2036 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2037 				sizeof (struct aac_cnt_config));
2038 			aac_cnt_config_toh(ccfg);
2039 
2040 			if (rval == 0 && ccfg->Command == ST_OK &&
2041 				ccfg->CTCommand.param[0] == CT_OK) {
2042 				/* successful completion */
2043 				rval = 0;
2044 			} else {
2045 				/* auto commit aborted due to error(s) */
2046 				rval = -2;
2047 			}
2048 		} else {
2049 			/* auto commit aborted due to adapter indicating
2050 			   config. issues too dangerous to auto commit  */
2051 			rval = -3;
2052 		}
2053 	} else {
2054 		/* error */
2055 		rval = -1;
2056 	}
2057 
2058 	aac_release_sync_fib(sc);
2059 	mtx_unlock(&sc->aac_io_lock);
2060 	return(rval);
2061 }
2062 
2063 /*
2064  * Send a synchronous command to the controller and wait for a result.
2065  * Indicate if the controller completed the command with an error status.
2066  */
2067 int
2068 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2069 		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2070 		 u_int32_t *sp, u_int32_t *r1)
2071 {
2072 	time_t then;
2073 	u_int32_t status;
2074 
2075 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2076 
2077 	/* populate the mailbox */
2078 	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2079 
2080 	/* ensure the sync command doorbell flag is cleared */
2081 	if (!sc->msi_enabled)
2082 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2083 
2084 	/* then set it to signal the adapter */
2085 	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2086 
2087 	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2088 		/* spin waiting for the command to complete */
2089 		then = time_uptime;
2090 		do {
2091 			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2092 				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2093 				return(EIO);
2094 			}
2095 		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2096 
2097 		/* clear the completion flag */
2098 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2099 
2100 		/* get the command status */
2101 		status = AAC_GET_MAILBOX(sc, 0);
2102 		if (sp != NULL)
2103 			*sp = status;
2104 
2105 		/* return parameter */
2106 		if (r1 != NULL)
2107 			*r1 = AAC_GET_MAILBOX(sc, 1);
2108 
2109 		if (status != AAC_SRB_STS_SUCCESS)
2110 			return (-1);
2111 	}
2112 	return(0);
2113 }
2114 
2115 static int
2116 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2117 		 struct aac_fib *fib, u_int16_t datasize)
2118 {
2119 	uint32_t ReceiverFibAddress;
2120 
2121 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2122 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2123 
2124 	if (datasize > AAC_FIB_DATASIZE)
2125 		return(EINVAL);
2126 
2127 	/*
2128 	 * Set up the sync FIB
2129 	 */
2130 	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2131 				AAC_FIBSTATE_INITIALISED |
2132 				AAC_FIBSTATE_EMPTY;
2133 	fib->Header.XferState |= xferstate;
2134 	fib->Header.Command = command;
2135 	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2136 	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2137 	fib->Header.SenderSize = sizeof(struct aac_fib);
2138 	fib->Header.SenderFibAddress = 0;	/* Not needed */
2139 	ReceiverFibAddress = sc->aac_common_busaddr +
2140 		offsetof(struct aac_common, ac_sync_fib);
2141 	fib->Header.u.ReceiverFibAddress = ReceiverFibAddress;
2142 	aac_fib_header_tole(&fib->Header);
2143 
2144 	/*
2145 	 * Give the FIB to the controller, wait for a response.
2146 	 */
2147 	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2148 		ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2149 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2150 		aac_fib_header_toh(&fib->Header);
2151 		return(EIO);
2152 	}
2153 
2154 	aac_fib_header_toh(&fib->Header);
2155 	return (0);
2156 }
2157 
2158 /*
2159  * Check for commands that have been outstanding for a suspiciously long time,
2160  * and complain about them.
2161  */
2162 static void
2163 aac_timeout(struct aac_softc *sc)
2164 {
2165 	struct aac_command *cm;
2166 	time_t deadline;
2167 	int timedout;
2168 
2169 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2170 	/*
2171 	 * Traverse the busy command list, bitch about late commands once
2172 	 * only.
2173 	 */
2174 	timedout = 0;
2175 	deadline = time_uptime - AAC_CMD_TIMEOUT;
2176 	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2177 		if (cm->cm_timestamp < deadline) {
2178 			device_printf(sc->aac_dev,
2179 				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2180 				      cm, (int)(time_uptime-cm->cm_timestamp));
2181 			AAC_PRINT_FIB(sc, cm->cm_fib);
2182 			timedout++;
2183 		}
2184 	}
2185 
2186 	if (timedout)
2187 		aac_reset_adapter(sc);
2188 	aacraid_print_queues(sc);
2189 }
2190 
2191 /*
2192  * Interface Function Vectors
2193  */
2194 
2195 /*
2196  * Read the current firmware status word.
2197  */
2198 static int
2199 aac_src_get_fwstatus(struct aac_softc *sc)
2200 {
2201 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2202 
2203 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2204 }
2205 
2206 /*
2207  * Notify the controller of a change in a given queue
2208  */
2209 static void
2210 aac_src_qnotify(struct aac_softc *sc, int qbit)
2211 {
2212 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2213 
2214 	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2215 }
2216 
2217 /*
2218  * Get the interrupt reason bits
2219  */
2220 static int
2221 aac_src_get_istatus(struct aac_softc *sc)
2222 {
2223 	int val;
2224 
2225 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2226 
2227 	if (sc->msi_enabled) {
2228 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2229 		if (val & AAC_MSI_SYNC_STATUS)
2230 			val = AAC_DB_SYNC_COMMAND;
2231 		else
2232 			val = 0;
2233 	} else {
2234 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2235 	}
2236 	return(val);
2237 }
2238 
2239 /*
2240  * Clear some interrupt reason bits
2241  */
2242 static void
2243 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2244 {
2245 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2246 
2247 	if (sc->msi_enabled) {
2248 		if (mask == AAC_DB_SYNC_COMMAND)
2249 			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2250 	} else {
2251 		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2252 	}
2253 }
2254 
2255 /*
2256  * Populate the mailbox and set the command word
2257  */
2258 static void
2259 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2260 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2261 {
2262 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2263 
2264 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2265 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2266 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2267 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2268 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2269 }
2270 
2271 static void
2272 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2273 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2274 {
2275 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2276 
2277 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2278 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2279 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2280 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2281 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2282 }
2283 
2284 /*
2285  * Fetch the immediate command status word
2286  */
2287 static int
2288 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2289 {
2290 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2291 
2292 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2293 }
2294 
2295 static int
2296 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2297 {
2298 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2299 
2300 	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2301 }
2302 
2303 /*
2304  * Set/clear interrupt masks
2305  */
2306 static void
2307 aac_src_access_devreg(struct aac_softc *sc, int mode)
2308 {
2309 	u_int32_t val;
2310 
2311 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2312 
2313 	switch (mode) {
2314 	case AAC_ENABLE_INTERRUPT:
2315 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2316 			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2317 				           AAC_INT_ENABLE_TYPE1_INTX));
2318 		break;
2319 
2320 	case AAC_DISABLE_INTERRUPT:
2321 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2322 		break;
2323 
2324 	case AAC_ENABLE_MSIX:
2325 		/* set bit 6 */
2326 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2327 		val |= 0x40;
2328 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2329 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2330 		/* unmask int. */
2331 		val = PMC_ALL_INTERRUPT_BITS;
2332 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2333 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2334 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2335 			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2336 		break;
2337 
2338 	case AAC_DISABLE_MSIX:
2339 		/* reset bit 6 */
2340 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2341 		val &= ~0x40;
2342 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2343 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2344 		break;
2345 
2346 	case AAC_CLEAR_AIF_BIT:
2347 		/* set bit 5 */
2348 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2349 		val |= 0x20;
2350 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2351 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2352 		break;
2353 
2354 	case AAC_CLEAR_SYNC_BIT:
2355 		/* set bit 4 */
2356 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2357 		val |= 0x10;
2358 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2359 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2360 		break;
2361 
2362 	case AAC_ENABLE_INTX:
2363 		/* set bit 7 */
2364 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2365 		val |= 0x80;
2366 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2367 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2368 		/* unmask int. */
2369 		val = PMC_ALL_INTERRUPT_BITS;
2370 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2371 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2372 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2373 			val & (~(PMC_GLOBAL_INT_BIT2)));
2374 		break;
2375 
2376 	default:
2377 		break;
2378 	}
2379 }
2380 
2381 /*
2382  * New comm. interface: Send command functions
2383  */
2384 static int
2385 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2386 {
2387 	struct aac_fib_xporthdr *pFibX;
2388 	u_int32_t fibsize, high_addr;
2389 	u_int64_t address;
2390 
2391 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2392 
2393 	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2394 		sc->aac_max_msix > 1) {
2395 		u_int16_t vector_no, first_choice = 0xffff;
2396 
2397 		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2398 		do {
2399 			vector_no += 1;
2400 			if (vector_no == sc->aac_max_msix)
2401 				vector_no = 1;
2402 			if (sc->aac_rrq_outstanding[vector_no] <
2403 				sc->aac_vector_cap)
2404 				break;
2405 			if (0xffff == first_choice)
2406 				first_choice = vector_no;
2407 			else if (vector_no == first_choice)
2408 				break;
2409 		} while (1);
2410 		if (vector_no == first_choice)
2411 			vector_no = 0;
2412 		sc->aac_rrq_outstanding[vector_no]++;
2413 		if (sc->aac_fibs_pushed_no == 0xffffffff)
2414 			sc->aac_fibs_pushed_no = 0;
2415 		else
2416 			sc->aac_fibs_pushed_no++;
2417 
2418 		cm->cm_fib->Header.Handle += (vector_no << 16);
2419 	}
2420 
2421 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2422 		/* Calculate the amount to the fibsize bits */
2423 		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2424 		/* Fill new FIB header */
2425 		address = cm->cm_fibphys;
2426 		high_addr = (u_int32_t)(address >> 32);
2427 		if (high_addr == 0L) {
2428 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2429 			cm->cm_fib->Header.u.TimeStamp = 0L;
2430 		} else {
2431 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2432 			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2433 		}
2434 		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2435 	} else {
2436 		/* Calculate the amount to the fibsize bits */
2437 		fibsize = (sizeof(struct aac_fib_xporthdr) +
2438 		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2439 		/* Fill XPORT header */
2440 		pFibX = (struct aac_fib_xporthdr *)
2441 			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2442 		pFibX->Handle = cm->cm_fib->Header.Handle;
2443 		pFibX->HostAddress = cm->cm_fibphys;
2444 		pFibX->Size = cm->cm_fib->Header.Size;
2445 		aac_fib_xporthdr_tole(pFibX);
2446 		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2447 		high_addr = (u_int32_t)(address >> 32);
2448 	}
2449 
2450 	aac_fib_header_tole(&cm->cm_fib->Header);
2451 
2452 	if (fibsize > 31)
2453 		fibsize = 31;
2454 	aac_enqueue_busy(cm);
2455 	if (high_addr) {
2456 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2457 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2458 	} else {
2459 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2460 	}
2461 	return 0;
2462 }
2463 
2464 /*
2465  * New comm. interface: get, set outbound queue index
2466  */
2467 static int
2468 aac_src_get_outb_queue(struct aac_softc *sc)
2469 {
2470 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2471 
2472 	return(-1);
2473 }
2474 
2475 static void
2476 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2477 {
2478 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2479 }
2480 
2481 /*
2482  * Debugging and Diagnostics
2483  */
2484 
2485 /*
2486  * Print some information about the controller.
2487  */
2488 static void
2489 aac_describe_controller(struct aac_softc *sc)
2490 {
2491 	struct aac_fib *fib;
2492 	struct aac_adapter_info	*info;
2493 	char *adapter_type = "Adaptec RAID controller";
2494 
2495 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2496 
2497 	mtx_lock(&sc->aac_io_lock);
2498 	aac_alloc_sync_fib(sc, &fib);
2499 
2500 	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2501 		fib->data[0] = 0;
2502 		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2503 			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2504 		else {
2505 			struct aac_supplement_adapter_info *supp_info;
2506 
2507 			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2508 			adapter_type = (char *)supp_info->AdapterTypeText;
2509 			sc->aac_feature_bits = le32toh(supp_info->FeatureBits);
2510 			sc->aac_support_opt2 = le32toh(supp_info->SupportedOptions2);
2511 		}
2512 	}
2513 	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2514 		adapter_type,
2515 		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2516 		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2517 
2518 	fib->data[0] = 0;
2519 	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2520 		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2521 		aac_release_sync_fib(sc);
2522 		mtx_unlock(&sc->aac_io_lock);
2523 		return;
2524 	}
2525 
2526 	/* save the kernel revision structure for later use */
2527 	info = (struct aac_adapter_info *)&fib->data[0];
2528 	aac_adapter_info_toh(info);
2529 	sc->aac_revision = info->KernelRevision;
2530 
2531 	if (bootverbose) {
2532 		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2533 		    "(%dMB cache, %dMB execution), %s\n",
2534 		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2535 		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2536 		    info->BufferMem / (1024 * 1024),
2537 		    info->ExecutionMem / (1024 * 1024),
2538 		    aac_describe_code(aac_battery_platform,
2539 		    info->batteryPlatform));
2540 
2541 		device_printf(sc->aac_dev,
2542 		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2543 		    info->KernelRevision.external.comp.major,
2544 		    info->KernelRevision.external.comp.minor,
2545 		    info->KernelRevision.external.comp.dash,
2546 		    info->KernelRevision.buildNumber,
2547 		    (u_int32_t)(info->SerialNumber & 0xffffff));
2548 
2549 		device_printf(sc->aac_dev, "Supported Options=%b\n",
2550 			      sc->supported_options,
2551 			      "\20"
2552 			      "\1SNAPSHOT"
2553 			      "\2CLUSTERS"
2554 			      "\3WCACHE"
2555 			      "\4DATA64"
2556 			      "\5HOSTTIME"
2557 			      "\6RAID50"
2558 			      "\7WINDOW4GB"
2559 			      "\10SCSIUPGD"
2560 			      "\11SOFTERR"
2561 			      "\12NORECOND"
2562 			      "\13SGMAP64"
2563 			      "\14ALARM"
2564 			      "\15NONDASD"
2565 			      "\16SCSIMGT"
2566 			      "\17RAIDSCSI"
2567 			      "\21ADPTINFO"
2568 			      "\22NEWCOMM"
2569 			      "\23ARRAY64BIT"
2570 			      "\24HEATSENSOR");
2571 	}
2572 
2573 	aac_release_sync_fib(sc);
2574 	mtx_unlock(&sc->aac_io_lock);
2575 }
2576 
2577 /*
2578  * Look up a text description of a numeric error code and return a pointer to
2579  * same.
2580  */
2581 static char *
2582 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2583 {
2584 	int i;
2585 
2586 	for (i = 0; table[i].string != NULL; i++)
2587 		if (table[i].code == code)
2588 			return(table[i].string);
2589 	return(table[i + 1].string);
2590 }
2591 
2592 /*
2593  * Management Interface
2594  */
2595 
2596 static int
2597 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2598 {
2599 	struct aac_softc *sc;
2600 
2601 	sc = dev->si_drv1;
2602 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2603 	device_busy(sc->aac_dev);
2604 	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2605 	return 0;
2606 }
2607 
2608 static int
2609 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2610 {
2611 	union aac_statrequest *as;
2612 	struct aac_softc *sc;
2613 	int error = 0;
2614 
2615 	as = (union aac_statrequest *)arg;
2616 	sc = dev->si_drv1;
2617 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2618 
2619 	switch (cmd) {
2620 	case AACIO_STATS:
2621 		switch (as->as_item) {
2622 		case AACQ_FREE:
2623 		case AACQ_READY:
2624 		case AACQ_BUSY:
2625 			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2626 			      sizeof(struct aac_qstat));
2627 			break;
2628 		default:
2629 			error = ENOENT;
2630 			break;
2631 		}
2632 	break;
2633 
2634 	case FSACTL_SENDFIB:
2635 	case FSACTL_SEND_LARGE_FIB:
2636 		arg = *(caddr_t*)arg;
2637 	case FSACTL_LNX_SENDFIB:
2638 	case FSACTL_LNX_SEND_LARGE_FIB:
2639 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2640 		error = aac_ioctl_sendfib(sc, arg);
2641 		break;
2642 	case FSACTL_SEND_RAW_SRB:
2643 		arg = *(caddr_t*)arg;
2644 	case FSACTL_LNX_SEND_RAW_SRB:
2645 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2646 		error = aac_ioctl_send_raw_srb(sc, arg);
2647 		break;
2648 	case FSACTL_AIF_THREAD:
2649 	case FSACTL_LNX_AIF_THREAD:
2650 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2651 		error = EINVAL;
2652 		break;
2653 	case FSACTL_OPEN_GET_ADAPTER_FIB:
2654 		arg = *(caddr_t*)arg;
2655 	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2656 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2657 		error = aac_open_aif(sc, arg);
2658 		break;
2659 	case FSACTL_GET_NEXT_ADAPTER_FIB:
2660 		arg = *(caddr_t*)arg;
2661 	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2662 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2663 		error = aac_getnext_aif(sc, arg);
2664 		break;
2665 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2666 		arg = *(caddr_t*)arg;
2667 	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2668 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2669 		error = aac_close_aif(sc, arg);
2670 		break;
2671 	case FSACTL_MINIPORT_REV_CHECK:
2672 		arg = *(caddr_t*)arg;
2673 	case FSACTL_LNX_MINIPORT_REV_CHECK:
2674 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2675 		error = aac_rev_check(sc, arg);
2676 		break;
2677 	case FSACTL_QUERY_DISK:
2678 		arg = *(caddr_t*)arg;
2679 	case FSACTL_LNX_QUERY_DISK:
2680 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2681 		error = aac_query_disk(sc, arg);
2682 		break;
2683 	case FSACTL_DELETE_DISK:
2684 	case FSACTL_LNX_DELETE_DISK:
2685 		/*
2686 		 * We don't trust the underland to tell us when to delete a
2687 		 * container, rather we rely on an AIF coming from the
2688 		 * controller
2689 		 */
2690 		error = 0;
2691 		break;
2692 	case FSACTL_GET_PCI_INFO:
2693 		arg = *(caddr_t*)arg;
2694 	case FSACTL_LNX_GET_PCI_INFO:
2695 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2696 		error = aac_get_pci_info(sc, arg);
2697 		break;
2698 	case FSACTL_GET_FEATURES:
2699 		arg = *(caddr_t*)arg;
2700 	case FSACTL_LNX_GET_FEATURES:
2701 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2702 		error = aac_supported_features(sc, arg);
2703 		break;
2704 	default:
2705 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2706 		error = EINVAL;
2707 		break;
2708 	}
2709 	return(error);
2710 }
2711 
2712 static int
2713 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2714 {
2715 	struct aac_softc *sc;
2716 	struct aac_fib_context *ctx;
2717 	int revents;
2718 
2719 	sc = dev->si_drv1;
2720 	revents = 0;
2721 
2722 	mtx_lock(&sc->aac_io_lock);
2723 	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2724 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2725 			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2726 				revents |= poll_events & (POLLIN | POLLRDNORM);
2727 				break;
2728 			}
2729 		}
2730 	}
2731 	mtx_unlock(&sc->aac_io_lock);
2732 
2733 	if (revents == 0) {
2734 		if (poll_events & (POLLIN | POLLRDNORM))
2735 			selrecord(td, &sc->rcv_select);
2736 	}
2737 
2738 	return (revents);
2739 }
2740 
2741 static void
2742 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2743 {
2744 
2745 	switch (event->ev_type) {
2746 	case AAC_EVENT_CMFREE:
2747 		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2748 		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2749 			aacraid_add_event(sc, event);
2750 			return;
2751 		}
2752 		free(event, M_AACRAIDBUF);
2753 		wakeup(arg);
2754 		break;
2755 	default:
2756 		break;
2757 	}
2758 }
2759 
2760 /*
2761  * Send a FIB supplied from userspace
2762  *
2763  * Currently, sending a FIB from userspace in BE hosts is not supported.
2764  * There are several things that need to be considered in order to
2765  * support this, such as:
2766  * - At least the FIB data part from userspace should already be in LE,
2767  *   or else the kernel would need to know all FIB types to be able to
2768  *   correctly convert it to BE.
2769  * - SG tables are converted to BE by aacraid_map_command_sg(). This
2770  *   conversion should be supressed if the FIB comes from userspace.
2771  * - aacraid_wait_command() calls functions that convert the FIB header
2772  *   to LE. But if the header is already in LE, the conversion should not
2773  *   be performed.
2774  */
2775 static int
2776 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2777 {
2778 	struct aac_command *cm;
2779 	int size, error;
2780 
2781 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2782 
2783 	cm = NULL;
2784 
2785 	/*
2786 	 * Get a command
2787 	 */
2788 	mtx_lock(&sc->aac_io_lock);
2789 	if (aacraid_alloc_command(sc, &cm)) {
2790 		struct aac_event *event;
2791 
2792 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2793 		    M_NOWAIT | M_ZERO);
2794 		if (event == NULL) {
2795 			error = EBUSY;
2796 			mtx_unlock(&sc->aac_io_lock);
2797 			goto out;
2798 		}
2799 		event->ev_type = AAC_EVENT_CMFREE;
2800 		event->ev_callback = aac_ioctl_event;
2801 		event->ev_arg = &cm;
2802 		aacraid_add_event(sc, event);
2803 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2804 	}
2805 	mtx_unlock(&sc->aac_io_lock);
2806 
2807 	/*
2808 	 * Fetch the FIB header, then re-copy to get data as well.
2809 	 */
2810 	if ((error = copyin(ufib, cm->cm_fib,
2811 			    sizeof(struct aac_fib_header))) != 0)
2812 		goto out;
2813 	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2814 	if (size > sc->aac_max_fib_size) {
2815 		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2816 			      size, sc->aac_max_fib_size);
2817 		size = sc->aac_max_fib_size;
2818 	}
2819 	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2820 		goto out;
2821 	cm->cm_fib->Header.Size = size;
2822 	cm->cm_timestamp = time_uptime;
2823 	cm->cm_datalen = 0;
2824 
2825 	/*
2826 	 * Pass the FIB to the controller, wait for it to complete.
2827 	 */
2828 	mtx_lock(&sc->aac_io_lock);
2829 	error = aacraid_wait_command(cm);
2830 	mtx_unlock(&sc->aac_io_lock);
2831 	if (error != 0) {
2832 		device_printf(sc->aac_dev,
2833 			      "aacraid_wait_command return %d\n", error);
2834 		goto out;
2835 	}
2836 
2837 	/*
2838 	 * Copy the FIB and data back out to the caller.
2839 	 */
2840 	size = cm->cm_fib->Header.Size;
2841 	if (size > sc->aac_max_fib_size) {
2842 		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2843 			      size, sc->aac_max_fib_size);
2844 		size = sc->aac_max_fib_size;
2845 	}
2846 	error = copyout(cm->cm_fib, ufib, size);
2847 
2848 out:
2849 	if (cm != NULL) {
2850 		mtx_lock(&sc->aac_io_lock);
2851 		aacraid_release_command(cm);
2852 		mtx_unlock(&sc->aac_io_lock);
2853 	}
2854 	return(error);
2855 }
2856 
2857 /*
2858  * Send a passthrough FIB supplied from userspace
2859  */
2860 static int
2861 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2862 {
2863 	struct aac_command *cm;
2864 	struct aac_fib *fib;
2865 	struct aac_srb *srbcmd;
2866 	struct aac_srb *user_srb = (struct aac_srb *)arg;
2867 	void *user_reply;
2868 	int error, transfer_data = 0;
2869 	bus_dmamap_t orig_map = 0;
2870 	u_int32_t fibsize = 0;
2871 	u_int64_t srb_sg_address;
2872 	u_int32_t srb_sg_bytecount;
2873 
2874 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2875 
2876 	cm = NULL;
2877 
2878 	mtx_lock(&sc->aac_io_lock);
2879 	if (aacraid_alloc_command(sc, &cm)) {
2880 		struct aac_event *event;
2881 
2882 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2883 		    M_NOWAIT | M_ZERO);
2884 		if (event == NULL) {
2885 			error = EBUSY;
2886 			mtx_unlock(&sc->aac_io_lock);
2887 			goto out;
2888 		}
2889 		event->ev_type = AAC_EVENT_CMFREE;
2890 		event->ev_callback = aac_ioctl_event;
2891 		event->ev_arg = &cm;
2892 		aacraid_add_event(sc, event);
2893 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2894 	}
2895 	mtx_unlock(&sc->aac_io_lock);
2896 
2897 	cm->cm_data = NULL;
2898 	/* save original dma map */
2899 	orig_map = cm->cm_datamap;
2900 
2901 	fib = cm->cm_fib;
2902 	srbcmd = (struct aac_srb *)fib->data;
2903 	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2904 	    sizeof (u_int32_t))) != 0)
2905 		goto out;
2906 	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2907 		error = EINVAL;
2908 		goto out;
2909 	}
2910 	if ((error = copyin((void *)user_srb, srbcmd, fibsize)) != 0)
2911 		goto out;
2912 
2913 	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2914 	srbcmd->retry_limit = 0;	/* obsolete */
2915 
2916 	/* only one sg element from userspace supported */
2917 	if (srbcmd->sg_map.SgCount > 1) {
2918 		error = EINVAL;
2919 		goto out;
2920 	}
2921 	/* check fibsize */
2922 	if (fibsize == (sizeof(struct aac_srb) +
2923 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2924 		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2925 		struct aac_sg_entry sg;
2926 
2927 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2928 			goto out;
2929 
2930 		srb_sg_bytecount = sg.SgByteCount;
2931 		srb_sg_address = (u_int64_t)sg.SgAddress;
2932 	} else if (fibsize == (sizeof(struct aac_srb) +
2933 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2934 #ifdef __LP64__
2935 		struct aac_sg_entry64 *sgp =
2936 			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2937 		struct aac_sg_entry64 sg;
2938 
2939 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2940 			goto out;
2941 
2942 		srb_sg_bytecount = sg.SgByteCount;
2943 		srb_sg_address = sg.SgAddress;
2944 #else
2945 		error = EINVAL;
2946 		goto out;
2947 #endif
2948 	} else {
2949 		error = EINVAL;
2950 		goto out;
2951 	}
2952 	user_reply = (char *)arg + fibsize;
2953 	srbcmd->data_len = srb_sg_bytecount;
2954 	if (srbcmd->sg_map.SgCount == 1)
2955 		transfer_data = 1;
2956 
2957 	if (transfer_data) {
2958 		/*
2959 		 * Create DMA tag for the passthr. data buffer and allocate it.
2960 		 */
2961 		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2962 			1, 0,			/* algnmnt, boundary */
2963 			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2964 			BUS_SPACE_MAXADDR_32BIT :
2965 			0x7fffffff,		/* lowaddr */
2966 			BUS_SPACE_MAXADDR, 	/* highaddr */
2967 			NULL, NULL, 		/* filter, filterarg */
2968 			srb_sg_bytecount, 	/* size */
2969 			sc->aac_sg_tablesize,	/* nsegments */
2970 			srb_sg_bytecount, 	/* maxsegsize */
2971 			0,			/* flags */
2972 			NULL, NULL,		/* No locking needed */
2973 			&cm->cm_passthr_dmat)) {
2974 			error = ENOMEM;
2975 			goto out;
2976 		}
2977 		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2978 			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2979 			error = ENOMEM;
2980 			goto out;
2981 		}
2982 		/* fill some cm variables */
2983 		cm->cm_datalen = srb_sg_bytecount;
2984 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2985 			cm->cm_flags |= AAC_CMD_DATAIN;
2986 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2987 			cm->cm_flags |= AAC_CMD_DATAOUT;
2988 
2989 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2990 			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2991 				cm->cm_data, cm->cm_datalen)) != 0)
2992 				goto out;
2993 			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2994 			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2995 				BUS_DMASYNC_PREWRITE);
2996 		}
2997 	}
2998 
2999 	/* build the FIB */
3000 	fib->Header.Size = sizeof(struct aac_fib_header) +
3001 		sizeof(struct aac_srb);
3002 	fib->Header.XferState =
3003 		AAC_FIBSTATE_HOSTOWNED   |
3004 		AAC_FIBSTATE_INITIALISED |
3005 		AAC_FIBSTATE_EMPTY	 |
3006 		AAC_FIBSTATE_FROMHOST	 |
3007 		AAC_FIBSTATE_REXPECTED   |
3008 		AAC_FIBSTATE_NORM	 |
3009 		AAC_FIBSTATE_ASYNC;
3010 
3011 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
3012 		ScsiPortCommandU64 : ScsiPortCommand;
3013 	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
3014 
3015 	aac_srb_tole(srbcmd);
3016 
3017 	/* send command */
3018 	if (transfer_data) {
3019 		bus_dmamap_load(cm->cm_passthr_dmat,
3020 			cm->cm_datamap, cm->cm_data,
3021 			cm->cm_datalen,
3022 			aacraid_map_command_sg, cm, 0);
3023 	} else {
3024 		aacraid_map_command_sg(cm, NULL, 0, 0);
3025 	}
3026 
3027 	/* wait for completion */
3028 	mtx_lock(&sc->aac_io_lock);
3029 	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
3030 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
3031 	mtx_unlock(&sc->aac_io_lock);
3032 
3033 	/* copy data */
3034 	if (transfer_data && (le32toh(srbcmd->flags) & AAC_SRB_FLAGS_DATA_IN)) {
3035 		if ((error = copyout(cm->cm_data,
3036 			(void *)(uintptr_t)srb_sg_address,
3037 			cm->cm_datalen)) != 0)
3038 			goto out;
3039 		/* sync required for bus_dmamem_alloc() allocated mem.? */
3040 		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
3041 				BUS_DMASYNC_POSTREAD);
3042 	}
3043 
3044 	/* status */
3045 	aac_srb_response_toh((struct aac_srb_response *)fib->data);
3046 	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
3047 
3048 out:
3049 	if (cm && cm->cm_data) {
3050 		if (transfer_data)
3051 			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
3052 		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
3053 		cm->cm_datamap = orig_map;
3054 	}
3055 	if (cm && cm->cm_passthr_dmat)
3056 		bus_dma_tag_destroy(cm->cm_passthr_dmat);
3057 	if (cm) {
3058 		mtx_lock(&sc->aac_io_lock);
3059 		aacraid_release_command(cm);
3060 		mtx_unlock(&sc->aac_io_lock);
3061 	}
3062 	return(error);
3063 }
3064 
3065 /*
3066  * Request an AIF from the controller (new comm. type1)
3067  */
3068 static void
3069 aac_request_aif(struct aac_softc *sc)
3070 {
3071 	struct aac_command *cm;
3072 	struct aac_fib *fib;
3073 
3074 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3075 
3076 	if (aacraid_alloc_command(sc, &cm)) {
3077 		sc->aif_pending = 1;
3078 		return;
3079 	}
3080 	sc->aif_pending = 0;
3081 
3082 	/* build the FIB */
3083 	fib = cm->cm_fib;
3084 	fib->Header.Size = sizeof(struct aac_fib);
3085 	fib->Header.XferState =
3086         AAC_FIBSTATE_HOSTOWNED   |
3087         AAC_FIBSTATE_INITIALISED |
3088         AAC_FIBSTATE_EMPTY	 |
3089         AAC_FIBSTATE_FROMHOST	 |
3090         AAC_FIBSTATE_REXPECTED   |
3091         AAC_FIBSTATE_NORM	 |
3092         AAC_FIBSTATE_ASYNC;
3093 	/* set AIF marker */
3094 	fib->Header.Handle = 0x00800000;
3095 	fib->Header.Command = AifRequest;
3096 	((struct aac_aif_command *)fib->data)->command = htole32(AifReqEvent);
3097 
3098 	aacraid_map_command_sg(cm, NULL, 0, 0);
3099 }
3100 
3101 /*
3102  * cdevpriv interface private destructor.
3103  */
3104 static void
3105 aac_cdevpriv_dtor(void *arg)
3106 {
3107 	struct aac_softc *sc;
3108 
3109 	sc = arg;
3110 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3111 	device_unbusy(sc->aac_dev);
3112 }
3113 
3114 /*
3115  * Handle an AIF sent to us by the controller; queue it for later reference.
3116  * If the queue fills up, then drop the older entries.
3117  */
3118 static void
3119 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3120 {
3121 	struct aac_aif_command *aif;
3122 	struct aac_container *co, *co_next;
3123 	struct aac_fib_context *ctx;
3124 	struct aac_fib *sync_fib;
3125 	struct aac_mntinforesp mir;
3126 	int next, current, found;
3127 	int count = 0, changed = 0, i = 0;
3128 	u_int32_t channel, uid;
3129 
3130 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3131 
3132 	aif = (struct aac_aif_command*)&fib->data[0];
3133 	aacraid_print_aif(sc, aif);
3134 
3135 	/* Is it an event that we should care about? */
3136 	switch (le32toh(aif->command)) {
3137 	case AifCmdEventNotify:
3138 		switch (le32toh(aif->data.EN.type)) {
3139 		case AifEnAddContainer:
3140 		case AifEnDeleteContainer:
3141 			/*
3142 			 * A container was added or deleted, but the message
3143 			 * doesn't tell us anything else!  Re-enumerate the
3144 			 * containers and sort things out.
3145 			 */
3146 			aac_alloc_sync_fib(sc, &sync_fib);
3147 			do {
3148 				/*
3149 				 * Ask the controller for its containers one at
3150 				 * a time.
3151 				 * XXX What if the controller's list changes
3152 				 * midway through this enumaration?
3153 				 * XXX This should be done async.
3154 				 */
3155 				if (aac_get_container_info(sc, sync_fib, i,
3156 					&mir, &uid) != 0)
3157 					continue;
3158 				if (i == 0)
3159 					count = mir.MntRespCount;
3160 				/*
3161 				 * Check the container against our list.
3162 				 * co->co_found was already set to 0 in a
3163 				 * previous run.
3164 				 */
3165 				if ((mir.Status == ST_OK) &&
3166 				    (mir.MntTable[0].VolType != CT_NONE)) {
3167 					found = 0;
3168 					TAILQ_FOREACH(co,
3169 						      &sc->aac_container_tqh,
3170 						      co_link) {
3171 						if (co->co_mntobj.ObjectId ==
3172 						    mir.MntTable[0].ObjectId) {
3173 							co->co_found = 1;
3174 							found = 1;
3175 							break;
3176 						}
3177 					}
3178 					/*
3179 					 * If the container matched, continue
3180 					 * in the list.
3181 					 */
3182 					if (found) {
3183 						i++;
3184 						continue;
3185 					}
3186 
3187 					/*
3188 					 * This is a new container.  Do all the
3189 					 * appropriate things to set it up.
3190 					 */
3191 					aac_add_container(sc, &mir, 1, uid);
3192 					changed = 1;
3193 				}
3194 				i++;
3195 			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3196 			aac_release_sync_fib(sc);
3197 
3198 			/*
3199 			 * Go through our list of containers and see which ones
3200 			 * were not marked 'found'.  Since the controller didn't
3201 			 * list them they must have been deleted.  Do the
3202 			 * appropriate steps to destroy the device.  Also reset
3203 			 * the co->co_found field.
3204 			 */
3205 			co = TAILQ_FIRST(&sc->aac_container_tqh);
3206 			while (co != NULL) {
3207 				if (co->co_found == 0) {
3208 					co_next = TAILQ_NEXT(co, co_link);
3209 					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3210 						     co_link);
3211 					free(co, M_AACRAIDBUF);
3212 					changed = 1;
3213 					co = co_next;
3214 				} else {
3215 					co->co_found = 0;
3216 					co = TAILQ_NEXT(co, co_link);
3217 				}
3218 			}
3219 
3220 			/* Attach the newly created containers */
3221 			if (changed) {
3222 				if (sc->cam_rescan_cb != NULL)
3223 					sc->cam_rescan_cb(sc, 0,
3224 				    	AAC_CAM_TARGET_WILDCARD);
3225 			}
3226 
3227 			break;
3228 
3229 		case AifEnEnclosureManagement:
3230 			switch (le32toh(aif->data.EN.data.EEE.eventType)) {
3231 			case AIF_EM_DRIVE_INSERTION:
3232 			case AIF_EM_DRIVE_REMOVAL:
3233 				channel = le32toh(aif->data.EN.data.EEE.unitID);
3234 				if (sc->cam_rescan_cb != NULL)
3235 					sc->cam_rescan_cb(sc,
3236 					    ((channel>>24) & 0xF) + 1,
3237 					    (channel & 0xFFFF));
3238 				break;
3239 			}
3240 			break;
3241 
3242 		case AifEnAddJBOD:
3243 		case AifEnDeleteJBOD:
3244 		case AifRawDeviceRemove:
3245 			channel = le32toh(aif->data.EN.data.ECE.container);
3246 			if (sc->cam_rescan_cb != NULL)
3247 				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3248 				    AAC_CAM_TARGET_WILDCARD);
3249 			break;
3250 
3251 		default:
3252 			break;
3253 		}
3254 
3255 	default:
3256 		break;
3257 	}
3258 
3259 	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3260 	current = sc->aifq_idx;
3261 	next = (current + 1) % AAC_AIFQ_LENGTH;
3262 	if (next == 0)
3263 		sc->aifq_filled = 1;
3264 	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3265 	/* Make aifq's FIB header and data LE */
3266 	aac_fib_header_tole(&sc->aac_aifq[current].Header);
3267 	/* modify AIF contexts */
3268 	if (sc->aifq_filled) {
3269 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3270 			if (next == ctx->ctx_idx)
3271 				ctx->ctx_wrap = 1;
3272 			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3273 				ctx->ctx_idx = next;
3274 		}
3275 	}
3276 	sc->aifq_idx = next;
3277 	/* On the off chance that someone is sleeping for an aif... */
3278 	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3279 		wakeup(sc->aac_aifq);
3280 	/* Wakeup any poll()ers */
3281 	selwakeuppri(&sc->rcv_select, PRIBIO);
3282 
3283 	return;
3284 }
3285 
3286 /*
3287  * Return the Revision of the driver to userspace and check to see if the
3288  * userspace app is possibly compatible.  This is extremely bogus since
3289  * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3290  * returning what the card reported.
3291  */
3292 static int
3293 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3294 {
3295 	struct aac_rev_check rev_check;
3296 	struct aac_rev_check_resp rev_check_resp;
3297 	int error = 0;
3298 
3299 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3300 
3301 	/*
3302 	 * Copyin the revision struct from userspace
3303 	 */
3304 	if ((error = copyin(udata, (caddr_t)&rev_check,
3305 			sizeof(struct aac_rev_check))) != 0) {
3306 		return error;
3307 	}
3308 
3309 	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3310 	      rev_check.callingRevision.buildNumber);
3311 
3312 	/*
3313 	 * Doctor up the response struct.
3314 	 */
3315 	rev_check_resp.possiblyCompatible = 1;
3316 	rev_check_resp.adapterSWRevision.external.comp.major =
3317 	    AAC_DRIVER_MAJOR_VERSION;
3318 	rev_check_resp.adapterSWRevision.external.comp.minor =
3319 	    AAC_DRIVER_MINOR_VERSION;
3320 	rev_check_resp.adapterSWRevision.external.comp.type =
3321 	    AAC_DRIVER_TYPE;
3322 	rev_check_resp.adapterSWRevision.external.comp.dash =
3323 	    AAC_DRIVER_BUGFIX_LEVEL;
3324 	rev_check_resp.adapterSWRevision.buildNumber =
3325 	    AAC_DRIVER_BUILD;
3326 
3327 	return(copyout((caddr_t)&rev_check_resp, udata,
3328 			sizeof(struct aac_rev_check_resp)));
3329 }
3330 
3331 /*
3332  * Pass the fib context to the caller
3333  */
3334 static int
3335 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3336 {
3337 	struct aac_fib_context *fibctx, *ctx;
3338 	int error = 0;
3339 
3340 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3341 
3342 	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3343 	if (fibctx == NULL)
3344 		return (ENOMEM);
3345 
3346 	mtx_lock(&sc->aac_io_lock);
3347 	/* all elements are already 0, add to queue */
3348 	if (sc->fibctx == NULL)
3349 		sc->fibctx = fibctx;
3350 	else {
3351 		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3352 			;
3353 		ctx->next = fibctx;
3354 		fibctx->prev = ctx;
3355 	}
3356 
3357 	/* evaluate unique value */
3358 	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3359 	ctx = sc->fibctx;
3360 	while (ctx != fibctx) {
3361 		if (ctx->unique == fibctx->unique) {
3362 			fibctx->unique++;
3363 			ctx = sc->fibctx;
3364 		} else {
3365 			ctx = ctx->next;
3366 		}
3367 	}
3368 
3369 	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3370 	mtx_unlock(&sc->aac_io_lock);
3371 	if (error)
3372 		aac_close_aif(sc, (caddr_t)ctx);
3373 	return error;
3374 }
3375 
3376 /*
3377  * Close the caller's fib context
3378  */
3379 static int
3380 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3381 {
3382 	struct aac_fib_context *ctx;
3383 
3384 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3385 
3386 	mtx_lock(&sc->aac_io_lock);
3387 	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3388 		if (ctx->unique == *(uint32_t *)&arg) {
3389 			if (ctx == sc->fibctx)
3390 				sc->fibctx = NULL;
3391 			else {
3392 				ctx->prev->next = ctx->next;
3393 				if (ctx->next)
3394 					ctx->next->prev = ctx->prev;
3395 			}
3396 			break;
3397 		}
3398 	}
3399 	if (ctx)
3400 		free(ctx, M_AACRAIDBUF);
3401 
3402 	mtx_unlock(&sc->aac_io_lock);
3403 	return 0;
3404 }
3405 
3406 /*
3407  * Pass the caller the next AIF in their queue
3408  */
3409 static int
3410 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3411 {
3412 	struct get_adapter_fib_ioctl agf;
3413 	struct aac_fib_context *ctx;
3414 	int error;
3415 
3416 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3417 
3418 	mtx_lock(&sc->aac_io_lock);
3419 #ifdef COMPAT_FREEBSD32
3420 	if (SV_CURPROC_FLAG(SV_ILP32)) {
3421 		struct get_adapter_fib_ioctl32 agf32;
3422 		error = copyin(arg, &agf32, sizeof(agf32));
3423 		if (error == 0) {
3424 			agf.AdapterFibContext = agf32.AdapterFibContext;
3425 			agf.Wait = agf32.Wait;
3426 			agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3427 		}
3428 	} else
3429 #endif
3430 		error = copyin(arg, &agf, sizeof(agf));
3431 	if (error == 0) {
3432 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3433 			if (agf.AdapterFibContext == ctx->unique)
3434 				break;
3435 		}
3436 		if (!ctx) {
3437 			mtx_unlock(&sc->aac_io_lock);
3438 			return (EFAULT);
3439 		}
3440 
3441 		error = aac_return_aif(sc, ctx, agf.AifFib);
3442 		if (error == EAGAIN && agf.Wait) {
3443 			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3444 			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3445 			while (error == EAGAIN) {
3446 				mtx_unlock(&sc->aac_io_lock);
3447 				error = tsleep(sc->aac_aifq, PRIBIO |
3448 					       PCATCH, "aacaif", 0);
3449 				mtx_lock(&sc->aac_io_lock);
3450 				if (error == 0)
3451 					error = aac_return_aif(sc, ctx, agf.AifFib);
3452 			}
3453 			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3454 		}
3455 	}
3456 	mtx_unlock(&sc->aac_io_lock);
3457 	return(error);
3458 }
3459 
3460 /*
3461  * Hand the next AIF off the top of the queue out to userspace.
3462  */
3463 static int
3464 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3465 {
3466 	int current, error;
3467 
3468 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3469 
3470 	current = ctx->ctx_idx;
3471 	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3472 		/* empty */
3473 		return (EAGAIN);
3474 	}
3475 	error =
3476 		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3477 	if (error)
3478 		device_printf(sc->aac_dev,
3479 		    "aac_return_aif: copyout returned %d\n", error);
3480 	else {
3481 		ctx->ctx_wrap = 0;
3482 		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3483 	}
3484 	return(error);
3485 }
3486 
3487 static int
3488 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3489 {
3490 	struct aac_pci_info {
3491 		u_int32_t bus;
3492 		u_int32_t slot;
3493 	} pciinf;
3494 	int error;
3495 
3496 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3497 
3498 	pciinf.bus = pci_get_bus(sc->aac_dev);
3499 	pciinf.slot = pci_get_slot(sc->aac_dev);
3500 
3501 	error = copyout((caddr_t)&pciinf, uptr,
3502 			sizeof(struct aac_pci_info));
3503 
3504 	return (error);
3505 }
3506 
3507 static int
3508 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3509 {
3510 	struct aac_features f;
3511 	int error;
3512 
3513 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3514 
3515 	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3516 		return (error);
3517 
3518 	/*
3519 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3520 	 * ALL zero in the featuresState, the driver will return the current
3521 	 * state of all the supported features, the data field will not be
3522 	 * valid.
3523 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3524 	 * a specific bit set in the featuresState, the driver will return the
3525 	 * current state of this specific feature and whatever data that are
3526 	 * associated with the feature in the data field or perform whatever
3527 	 * action needed indicates in the data field.
3528 	 */
3529 	 if (f.feat.fValue == 0) {
3530 		f.feat.fBits.largeLBA =
3531 		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3532 		f.feat.fBits.JBODSupport = 1;
3533 		/* TODO: In the future, add other features state here as well */
3534 	} else {
3535 		if (f.feat.fBits.largeLBA)
3536 			f.feat.fBits.largeLBA =
3537 			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3538 		/* TODO: Add other features state and data in the future */
3539 	}
3540 
3541 	error = copyout(&f, uptr, sizeof (f));
3542 	return (error);
3543 }
3544 
3545 /*
3546  * Give the userland some information about the container.  The AAC arch
3547  * expects the driver to be a SCSI passthrough type driver, so it expects
3548  * the containers to have b:t:l numbers.  Fake it.
3549  */
3550 static int
3551 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3552 {
3553 	struct aac_query_disk query_disk;
3554 	struct aac_container *co;
3555 	int error, id;
3556 
3557 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3558 
3559 	mtx_lock(&sc->aac_io_lock);
3560 	error = copyin(uptr, (caddr_t)&query_disk,
3561 		       sizeof(struct aac_query_disk));
3562 	if (error) {
3563 		mtx_unlock(&sc->aac_io_lock);
3564 		return (error);
3565 	}
3566 
3567 	id = query_disk.ContainerNumber;
3568 	if (id == -1) {
3569 		mtx_unlock(&sc->aac_io_lock);
3570 		return (EINVAL);
3571 	}
3572 
3573 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3574 		if (co->co_mntobj.ObjectId == id)
3575 			break;
3576 		}
3577 
3578 	if (co == NULL) {
3579 			query_disk.Valid = 0;
3580 			query_disk.Locked = 0;
3581 			query_disk.Deleted = 1;		/* XXX is this right? */
3582 	} else {
3583 		query_disk.Valid = 1;
3584 		query_disk.Locked = 1;
3585 		query_disk.Deleted = 0;
3586 		query_disk.Bus = device_get_unit(sc->aac_dev);
3587 		query_disk.Target = 0;
3588 		query_disk.Lun = 0;
3589 		query_disk.UnMapped = 0;
3590 	}
3591 
3592 	error = copyout((caddr_t)&query_disk, uptr,
3593 			sizeof(struct aac_query_disk));
3594 
3595 	mtx_unlock(&sc->aac_io_lock);
3596 	return (error);
3597 }
3598 
3599 static void
3600 aac_container_bus(struct aac_softc *sc)
3601 {
3602 	struct aac_sim *sim;
3603 	device_t child;
3604 
3605 	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3606 		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3607 	if (sim == NULL) {
3608 		device_printf(sc->aac_dev,
3609 	    	"No memory to add container bus\n");
3610 		panic("Out of memory?!");
3611 	}
3612 	child = device_add_child(sc->aac_dev, "aacraidp", DEVICE_UNIT_ANY);
3613 	if (child == NULL) {
3614 		device_printf(sc->aac_dev,
3615 	    	"device_add_child failed for container bus\n");
3616 		free(sim, M_AACRAIDBUF);
3617 		panic("Out of memory?!");
3618 	}
3619 
3620 	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3621 	sim->BusNumber = 0;
3622 	sim->BusType = CONTAINER_BUS;
3623 	sim->InitiatorBusId = -1;
3624 	sim->aac_sc = sc;
3625 	sim->sim_dev = child;
3626 	sim->aac_cam = NULL;
3627 
3628 	device_set_ivars(child, sim);
3629 	device_set_desc(child, "Container Bus");
3630 	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3631 	/*
3632 	device_set_desc(child, aac_describe_code(aac_container_types,
3633 			mir->MntTable[0].VolType));
3634 	*/
3635 	bus_attach_children(sc->aac_dev);
3636 }
3637 
3638 static void
3639 aac_get_bus_info(struct aac_softc *sc)
3640 {
3641 	struct aac_fib *fib;
3642 	struct aac_ctcfg *c_cmd;
3643 	struct aac_ctcfg_resp *c_resp;
3644 	struct aac_vmioctl *vmi;
3645 	struct aac_vmi_businf_resp *vmi_resp;
3646 	struct aac_getbusinf businfo;
3647 	struct aac_sim *caminf;
3648 	device_t child;
3649 	int i, error;
3650 
3651 	mtx_lock(&sc->aac_io_lock);
3652 	aac_alloc_sync_fib(sc, &fib);
3653 	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3654 	bzero(c_cmd, sizeof(struct aac_ctcfg));
3655 
3656 	c_cmd->Command = VM_ContainerConfig;
3657 	c_cmd->cmd = CT_GET_SCSI_METHOD;
3658 	c_cmd->param = 0;
3659 
3660 	aac_ctcfg_tole(c_cmd);
3661 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3662 	    sizeof(struct aac_ctcfg));
3663 	if (error) {
3664 		device_printf(sc->aac_dev, "Error %d sending "
3665 		    "VM_ContainerConfig command\n", error);
3666 		aac_release_sync_fib(sc);
3667 		mtx_unlock(&sc->aac_io_lock);
3668 		return;
3669 	}
3670 
3671 	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3672 	aac_ctcfg_resp_toh(c_resp);
3673 	if (c_resp->Status != ST_OK) {
3674 		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3675 		    c_resp->Status);
3676 		aac_release_sync_fib(sc);
3677 		mtx_unlock(&sc->aac_io_lock);
3678 		return;
3679 	}
3680 
3681 	sc->scsi_method_id = c_resp->param;
3682 
3683 	vmi = (struct aac_vmioctl *)&fib->data[0];
3684 	bzero(vmi, sizeof(struct aac_vmioctl));
3685 
3686 	vmi->Command = VM_Ioctl;
3687 	vmi->ObjType = FT_DRIVE;
3688 	vmi->MethId = sc->scsi_method_id;
3689 	vmi->ObjId = 0;
3690 	vmi->IoctlCmd = GetBusInfo;
3691 
3692 	aac_vmioctl_tole(vmi);
3693 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3694 	    sizeof(struct aac_vmi_businf_resp));
3695 	if (error) {
3696 		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3697 		    error);
3698 		aac_release_sync_fib(sc);
3699 		mtx_unlock(&sc->aac_io_lock);
3700 		return;
3701 	}
3702 
3703 	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3704 	aac_vmi_businf_resp_toh(vmi_resp);
3705 	if (vmi_resp->Status != ST_OK) {
3706 		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3707 		    vmi_resp->Status);
3708 		aac_release_sync_fib(sc);
3709 		mtx_unlock(&sc->aac_io_lock);
3710 		return;
3711 	}
3712 
3713 	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3714 	aac_release_sync_fib(sc);
3715 	mtx_unlock(&sc->aac_io_lock);
3716 
3717 	for (i = 0; i < businfo.BusCount; i++) {
3718 		if (businfo.BusValid[i] != AAC_BUS_VALID)
3719 			continue;
3720 
3721 		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3722 		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3723 		if (caminf == NULL) {
3724 			device_printf(sc->aac_dev,
3725 			    "No memory to add passthrough bus %d\n", i);
3726 			break;
3727 		}
3728 
3729 		child = device_add_child(sc->aac_dev, "aacraidp", DEVICE_UNIT_ANY);
3730 		if (child == NULL) {
3731 			device_printf(sc->aac_dev,
3732 			    "device_add_child failed for passthrough bus %d\n",
3733 			    i);
3734 			free(caminf, M_AACRAIDBUF);
3735 			break;
3736 		}
3737 
3738 		caminf->TargetsPerBus = businfo.TargetsPerBus;
3739 		caminf->BusNumber = i+1;
3740 		caminf->BusType = PASSTHROUGH_BUS;
3741 		caminf->InitiatorBusId = -1;
3742 		caminf->aac_sc = sc;
3743 		caminf->sim_dev = child;
3744 		caminf->aac_cam = NULL;
3745 
3746 		device_set_ivars(child, caminf);
3747 		device_set_desc(child, "SCSI Passthrough Bus");
3748 		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3749 	}
3750 }
3751 
3752 /*
3753  * Check to see if the kernel is up and running. If we are in a
3754  * BlinkLED state, return the BlinkLED code.
3755  */
3756 static u_int32_t
3757 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3758 {
3759 	u_int32_t ret;
3760 
3761 	ret = AAC_GET_FWSTATUS(sc);
3762 
3763 	if (ret & AAC_UP_AND_RUNNING)
3764 		ret = 0;
3765 	else if (ret & AAC_KERNEL_PANIC && bled)
3766 		*bled = (ret >> 16) & 0xff;
3767 
3768 	return (ret);
3769 }
3770 
3771 /*
3772  * Once do an IOP reset, basically have to re-initialize the card as
3773  * if coming up from a cold boot, and the driver is responsible for
3774  * any IO that was outstanding to the adapter at the time of the IOP
3775  * RESET. And prepare the driver for IOP RESET by making the init code
3776  * modular with the ability to call it from multiple places.
3777  */
3778 static int
3779 aac_reset_adapter(struct aac_softc *sc)
3780 {
3781 	struct aac_command *cm;
3782 	struct aac_fib *fib;
3783 	struct aac_pause_command *pc;
3784 	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3785 	int ret, msi_enabled_orig;
3786 
3787 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3788 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3789 
3790 	if (sc->aac_state & AAC_STATE_RESET) {
3791 		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3792 		return (EINVAL);
3793 	}
3794 	sc->aac_state |= AAC_STATE_RESET;
3795 
3796 	/* disable interrupt */
3797 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3798 
3799 	/*
3800 	 * Abort all pending commands:
3801 	 * a) on the controller
3802 	 */
3803 	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3804 		cm->cm_flags |= AAC_CMD_RESET;
3805 
3806 		/* is there a completion handler? */
3807 		if (cm->cm_complete != NULL) {
3808 			cm->cm_complete(cm);
3809 		} else {
3810 			/* assume that someone is sleeping on this
3811 			 * command
3812 			 */
3813 			wakeup(cm);
3814 		}
3815 	}
3816 
3817 	/* b) in the waiting queues */
3818 	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3819 		cm->cm_flags |= AAC_CMD_RESET;
3820 
3821 		/* is there a completion handler? */
3822 		if (cm->cm_complete != NULL) {
3823 			cm->cm_complete(cm);
3824 		} else {
3825 			/* assume that someone is sleeping on this
3826 			 * command
3827 			 */
3828 			wakeup(cm);
3829 		}
3830 	}
3831 
3832 	/* flush drives */
3833 	if (aac_check_adapter_health(sc, NULL) == 0) {
3834 		mtx_unlock(&sc->aac_io_lock);
3835 		(void) aacraid_shutdown(sc->aac_dev);
3836 		mtx_lock(&sc->aac_io_lock);
3837 	}
3838 
3839 	/* execute IOP reset */
3840 	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3841 		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3842 
3843 		/* We need to wait for 5 seconds before accessing the MU again
3844 		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3845 		 */
3846 		waitCount = 5 * 10000;
3847 		while (waitCount) {
3848 			DELAY(100);			/* delay 100 microseconds */
3849 			waitCount--;
3850 		}
3851 	} else {
3852 		ret = aacraid_sync_command(sc, AAC_IOP_RESET_ALWAYS,
3853 			0, 0, 0, 0, &status, &reset_mask);
3854 		if (ret && !sc->doorbell_mask) {
3855 			/* call IOP_RESET for older firmware */
3856 			if ((aacraid_sync_command(sc, AAC_IOP_RESET, 0,0,0,0,
3857 			    &status, NULL)) != 0) {
3858 				if (status == AAC_SRB_STS_INVALID_REQUEST) {
3859 					device_printf(sc->aac_dev,
3860 					    "IOP_RESET not supported\n");
3861 				} else {
3862 					/* probably timeout */
3863 					device_printf(sc->aac_dev,
3864 					    "IOP_RESET failed\n");
3865 				}
3866 
3867 				/* unwind aac_shutdown() */
3868 				aac_alloc_sync_fib(sc, &fib);
3869 				pc = (struct aac_pause_command *)&fib->data[0];
3870 				pc->Command = VM_ContainerConfig;
3871 				pc->Type = CT_PAUSE_IO;
3872 				pc->Timeout = 1;
3873 				pc->Min = 1;
3874 				pc->NoRescan = 1;
3875 
3876 				aac_pause_command_tole(pc);
3877 				(void) aac_sync_fib(sc, ContainerCommand, 0,
3878 				    fib, sizeof (struct aac_pause_command));
3879 				aac_release_sync_fib(sc);
3880 
3881 				goto finish;
3882 			}
3883 		} else if (sc->doorbell_mask) {
3884 			ret = 0;
3885 			reset_mask = sc->doorbell_mask;
3886 		}
3887 		if (!ret &&
3888 		    (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET)) {
3889 			AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3890 			/*
3891 			 * We need to wait for 5 seconds before accessing the
3892 			 * doorbell again;
3893 			 * 10000 * 100us = 1000,000us = 1000ms = 1s
3894 			 */
3895 			waitCount = 5 * 10000;
3896 			while (waitCount) {
3897 				DELAY(100);	/* delay 100 microseconds */
3898 				waitCount--;
3899 			}
3900 		}
3901 	}
3902 
3903 	/*
3904 	 * Initialize the adapter.
3905 	 */
3906 	max_msix_orig = sc->aac_max_msix;
3907 	msi_enabled_orig = sc->msi_enabled;
3908 	sc->msi_enabled = FALSE;
3909 	if (aac_check_firmware(sc) != 0)
3910 		goto finish;
3911 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3912 		sc->aac_max_msix = max_msix_orig;
3913 		if (msi_enabled_orig) {
3914 			sc->msi_enabled = msi_enabled_orig;
3915 			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3916 		}
3917 		mtx_unlock(&sc->aac_io_lock);
3918 		aac_init(sc);
3919 		mtx_lock(&sc->aac_io_lock);
3920 	}
3921 
3922 finish:
3923 	sc->aac_state &= ~AAC_STATE_RESET;
3924 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3925 	aacraid_startio(sc);
3926 	return (0);
3927 }
3928